Commit 69625ea7 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-add-support-for-TC-MQPRIO-Qdisc-Offload'

Rahul Lakkireddy says:

====================
cxgb4: add support for TC-MQPRIO Qdisc Offload

This series of patches add support for offloading TC-MQPRIO Qdisc
to Chelsio T5/T6 NICs. Offloading QoS traffic shaping and pacing
requires using Ethernet Offload (ETHOFLD) resources available on
Chelsio NICs. The ETHOFLD resources are configured by firmware
and taken from the resource pool shared with other Chelsio Upper
Layer Drivers. Traffic flowing through ETHOFLD region requires a
software netdev Tx queue (EOSW_TXQ) exposed to networking stack,
and an underlying hardware Tx queue (EOHW_TXQ) used for sending
packets through hardware.

ETHOFLD region is addressed using EOTIDs, which are per-connection
resource. Hence, EOTIDs are capable of storing only a very small
number of packets in flight. To allow more connections to share
the the QoS rate limiting configuration, multiple EOTIDs must be
allocated to reduce packet drops. EOTIDs are 1-to-1 mapped with
software EOSW_TXQ. Several software EOSW_TXQs can post packets to
a single hardware EOHW_TXQ.

The series is broken down as follows:

Patch 1 queries firmware for maximum available traffic classes,
as well as, start and maximum available indices (EOTID) into ETHOFLD
region, supported by the underlying device.

Patch 2 reworks queue configuration and simplifies MSI-X allocation
logic in preparation for ETHOFLD queues support.

Patch 3 adds skeleton for validating and configuring TC-MQPRIO Qdisc
offload. Also, adds support for software EOSW_TXQs and exposes them
to network stack. Updates Tx queue selection to use fallback NIC Tx
path for unsupported traffic that can't go through ETHOFLD queues.

Patch 4 adds support for managing hardware queues to rate limit
traffic flowing through them. The queues are allocated/removed based
on enabling/disabling TC-MQPRIO Qdisc offload, respectively.

Patch 5 adds Tx path for traffic flowing through software EOSW_TXQ
and EOHW_TXQ. Also, adds Rx path to handle Tx completions.

Patch 6 updates exisiting SCHED API to configure FLOWC based QoS
offload. In the existing QUEUE based rate limiting, multiple queues
sharing a traffic class get the aggreagated max rate limit value.
On the other hand, in FLOWC based rate limiting, multiple queues
sharing a traffic class get their own individual max rate limit
value. For example, if 2 queues are bound to class 0, which is rate
limited to 1 Gbps, then in QUEUE based rate limiting, both the
queues get the aggregate max output of 1 Gbps only. In FLOWC based
rate limiting, each queue gets its own output of max 1 Gbps each;
i.e. 2 queues * 1 Gbps rate limit = 2 Gbps max output.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1c8dd9cb 0e395b3c
......@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
cudbg_common.o cudbg_lib.o cudbg_zlib.o
cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
......
......@@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype {
CUDBG_QTYPE_CRYPTO_FLQ,
CUDBG_QTYPE_TLS_RXQ,
CUDBG_QTYPE_TLS_FLQ,
CUDBG_QTYPE_ETHOFLD_TXQ,
CUDBG_QTYPE_ETHOFLD_RXQ,
CUDBG_QTYPE_ETHOFLD_FLQ,
CUDBG_QTYPE_MAX,
};
......
......@@ -2930,6 +2930,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
MAX_RXQ_DESC_SIZE;
/* ETHOFLD TXQ, RXQ, and FLQ */
tot_entries += MAX_OFLD_QSETS * 3;
tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
tot_size += sizeof(struct cudbg_ver_hdr) +
sizeof(struct cudbg_qdesc_info) +
sizeof(struct cudbg_qdesc_entry) * tot_entries;
......@@ -3087,6 +3091,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
}
}
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
CUDBG_QTYPE_ETHOFLD_TXQ, out);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
CUDBG_QTYPE_ETHOFLD_RXQ, out);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
CUDBG_QTYPE_ETHOFLD_FLQ, out);
}
out_unlock:
mutex_unlock(&uld_mutex);
......
......@@ -392,6 +392,7 @@ struct adapter_params {
struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char crypto; /* HW capability for crypto */
unsigned char ethofld; /* QoS support */
unsigned char bypass;
unsigned char hash_filter;
......@@ -711,6 +712,7 @@ struct sge_eth_rxq { /* SW Ethernet Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_eth_stats stats;
struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct sge_ofld_stats { /* offload queue statistics */
......@@ -724,6 +726,7 @@ struct sge_ofld_rxq { /* SW offload Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_ofld_stats stats;
struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct tx_desc {
......@@ -788,7 +791,6 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_uld_rxq_info {
char name[IFNAMSIZ]; /* name of ULD driver */
struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
u16 *msix_tbl; /* msix_tbl for uld */
u16 *rspq_id; /* response queue id's of rxq */
u16 nrxq; /* # of ingress uld queues */
u16 nciq; /* # of completion queues */
......@@ -801,6 +803,55 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */
};
enum sge_eosw_state {
CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */
CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */
CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */
CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
};
struct sge_eosw_desc {
struct sk_buff *skb; /* SKB to free after getting completion */
dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
};
struct sge_eosw_txq {
spinlock_t lock; /* Per queue lock to synchronize completions */
enum sge_eosw_state state; /* Current ETHOFLD State */
struct sge_eosw_desc *desc; /* Descriptor ring to hold packets */
u32 ndesc; /* Number of descriptors */
u32 pidx; /* Current Producer Index */
u32 last_pidx; /* Last successfully transmitted Producer Index */
u32 cidx; /* Current Consumer Index */
u32 last_cidx; /* Last successfully reclaimed Consumer Index */
u32 flowc_idx; /* Descriptor containing a FLOWC request */
u32 inuse; /* Number of packets held in ring */
u32 cred; /* Current available credits */
u32 ncompl; /* # of completions posted */
u32 last_compl; /* # of credits consumed since last completion req */
u32 eotid; /* Index into EOTID table in software */
u32 hwtid; /* Hardware EOTID index */
u32 hwqid; /* Underlying hardware queue index */
struct net_device *netdev; /* Pointer to netdevice */
struct tasklet_struct qresume_tsk; /* Restarts the queue */
struct completion completion; /* completion for FLOWC rendezvous */
};
struct sge_eohw_txq {
spinlock_t lock; /* Per queue lock */
struct sge_txq q; /* HW Txq */
struct adapter *adap; /* Backpointer to adapter */
unsigned long tso; /* # of TSO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
};
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_txq ptptxq;
......@@ -814,11 +865,16 @@ struct sge {
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
struct sge_eohw_txq *eohw_txq;
struct sge_ofld_rxq *eohw_rxq;
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 eoqsets; /* # of ETHOFLD queues */
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u16 dbqtimer_tick;
......@@ -841,6 +897,9 @@ struct sge {
unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */
int nd_msix_idx; /* Index to non-data interrupts MSI-X info */
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
......@@ -870,13 +929,13 @@ struct hash_mac_addr {
unsigned int iface_mac;
};
struct uld_msix_bmap {
struct msix_bmap {
unsigned long *msix_bmap;
unsigned int mapsize;
spinlock_t lock; /* lock for acquiring bitmap */
};
struct uld_msix_info {
struct msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
unsigned int idx;
......@@ -945,14 +1004,9 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
struct msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
cpumask_var_t aff_mask;
} msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
int msi_idx;
/* MSI-X Info for NIC and OFLD queues */
struct msix_info *msix_info;
struct msix_bmap msix_bmap;
struct doorbell_stats db_stats;
struct sge sge;
......@@ -1044,6 +1098,9 @@ struct adapter {
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal ch_thermal;
#endif
/* TC MQPRIO offload */
struct cxgb4_tc_mqprio *tc_mqprio;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
......@@ -1077,6 +1134,7 @@ enum {
enum {
SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */
SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */
};
enum {
......@@ -1100,6 +1158,14 @@ struct ch_sched_queue {
s8 class; /* class index */
};
/* Support for "sched_flowc" command to allow one or more FLOWC
* to be bound to a TX Scheduling Class.
*/
struct ch_sched_flowc {
s32 tid; /* TID to bind */
s8 class; /* class index */
};
/* Defined bit width of user definable filter tuples
*/
#define ETHTYPE_BITWIDTH 16
......@@ -1293,6 +1359,11 @@ static inline int is_uld(const struct adapter *adap)
return (adap->params.offload || adap->params.crypto);
}
static inline int is_ethofld(const struct adapter *adap)
{
return adap->params.ethofld;
}
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
......@@ -1426,6 +1497,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type);
int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
struct net_device *dev, u32 iqid);
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
......@@ -1890,6 +1964,12 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
u32 ndesc);
int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
void cxgb4_ethofld_restart(unsigned long data);
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q);
void cxgb4_reclaim_completed_tx(struct adapter *adap,
struct sge_txq *q, bool unmap);
......@@ -1948,5 +2028,10 @@ int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx);
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
int cxgb_open(struct net_device *dev);
int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
#endif /* __CXGB4_H__ */
......@@ -2658,6 +2658,7 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
int eth_entries, ctrl_entries, eo_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
......@@ -2665,11 +2666,12 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
int i, n, r = (uintptr_t)v - 1;
int eth_entries, ctrl_entries;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
if (adap->sge.eohw_txq)
eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
mutex_lock(&uld_mutex);
if (s->uld_txq_info)
......@@ -2761,6 +2763,54 @@ do { \
}
r -= eth_entries;
if (r < eo_entries) {
int base_qset = r * 4;
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
n = min(4, s->eoqsets - 4 * r);
S("QType:", "ETHOFLD");
S("Interface:",
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0);
R("FL pend:", fl.pend_cred);
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxImm:", stats.imm);
RL("RxAN", stats.an);
RL("RxNoMem", stats.nomem);
TL("TSO:", tso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
TL("TxMapErr:", mapping_err);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
goto unlock;
}
r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
......@@ -3007,6 +3057,7 @@ static int sge_queue_entries(const struct adapter *adap)
mutex_unlock(&uld_mutex);
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
(adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
......
......@@ -65,6 +65,7 @@
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
......@@ -82,6 +83,7 @@
#include "sched.h"
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
#include "cxgb4_tc_mqprio.h"
#include "cxgb4_ptp.h"
#include "cxgb4_cudbg.h"
......@@ -685,31 +687,6 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
return IRQ_HANDLED;
}
/*
* Name the MSI-X interrupts.
*/
static void name_msix_vecs(struct adapter *adap)
{
int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
/* non-data interrupts */
snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
/* FW events */
snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
adap->port[0]->name);
/* Ethernet queues */
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
for (i = 0; i < pi->nqsets; i++, msi_idx++)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
}
}
int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
cpumask_var_t *aff_mask, int idx)
{
......@@ -743,15 +720,19 @@ static int request_msix_queue_irqs(struct adapter *adap)
struct sge *s = &adap->sge;
struct msix_info *minfo;
int err, ethqidx;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
adap->msix_info[1].desc, &s->fw_evtq);
if (s->fwevtq_msix_idx < 0)
return -ENOMEM;
err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
t4_sge_intr_msix, 0,
adap->msix_info[s->fwevtq_msix_idx].desc,
&s->fw_evtq);
if (err)
return err;
for_each_ethrxq(s, ethqidx) {
minfo = &adap->msix_info[msi_index];
minfo = s->ethrxq[ethqidx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
......@@ -761,18 +742,16 @@ static int request_msix_queue_irqs(struct adapter *adap)
cxgb4_set_msix_aff(adap, minfo->vec,
&minfo->aff_mask, ethqidx);
msi_index++;
}
return 0;
unwind:
while (--ethqidx >= 0) {
msi_index--;
minfo = &adap->msix_info[msi_index];
minfo = s->ethrxq[ethqidx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
}
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
return err;
}
......@@ -780,11 +759,11 @@ static void free_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
struct msix_info *minfo;
int i, msi_index = 2;
int i;
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
for_each_ethrxq(s, i) {
minfo = &adap->msix_info[msi_index++];
minfo = s->ethrxq[i].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[i].rspq);
}
......@@ -901,6 +880,12 @@ static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}
void cxgb4_quiesce_rx(struct sge_rspq *q)
{
if (q->handler)
napi_disable(&q->napi);
}
/*
* Wait until all NAPI handlers are descheduled.
*/
......@@ -911,19 +896,24 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (q && q->handler)
napi_disable(&q->napi);
if (!q)
continue;
cxgb4_quiesce_rx(q);
}
}
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
struct sge *s = &adap->sge;
if (adap->flags & CXGB4_FULL_INIT_DONE) {
t4_intr_disable(adap);
if (adap->flags & CXGB4_USING_MSIX) {
free_msix_queue_irqs(adap);
free_irq(adap->msix_info[0].vec, adap);
free_irq(adap->msix_info[s->nd_msix_idx].vec,
adap);
} else {
free_irq(adap->pdev->irq, adap);
}
......@@ -931,6 +921,17 @@ static void disable_interrupts(struct adapter *adap)
}
}
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
{
if (q->handler)
napi_enable(&q->napi);
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
INGRESSQID_V(q->cntxt_id));
}
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
......@@ -943,37 +944,63 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
if (q->handler)
napi_enable(&q->napi);
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
INGRESSQID_V(q->cntxt_id));
cxgb4_enable_rx(adap, q);
}
}
static int setup_non_data_intr(struct adapter *adap)
{
int msix;
adap->sge.nd_msix_idx = -1;
if (!(adap->flags & CXGB4_USING_MSIX))
return 0;
/* Request MSI-X vector for non-data interrupt */
msix = cxgb4_get_msix_idx_from_bmap(adap);
if (msix < 0)
return -ENOMEM;
snprintf(adap->msix_info[msix].desc,
sizeof(adap->msix_info[msix].desc),
"%s", adap->port[0]->name);
adap->sge.nd_msix_idx = msix;
return 0;
}
static int setup_fw_sge_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int err = 0;
int msix, err = 0;
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
if (adap->flags & CXGB4_USING_MSIX)
adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
if (adap->flags & CXGB4_USING_MSIX) {
s->fwevtq_msix_idx = -1;
msix = cxgb4_get_msix_idx_from_bmap(adap);
if (msix < 0)
return -ENOMEM;
snprintf(adap->msix_info[msix].desc,
sizeof(adap->msix_info[msix].desc),
"%s-FWeventq", adap->port[0]->name);
} else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
NULL, NULL, NULL, -1);
if (err)
return err;
adap->msi_idx = -((int)s->intrq.abs_id + 1);
msix = -((int)s->intrq.abs_id + 1);
}
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
msix, NULL, fwevtq_handler, NULL, -1);
if (err && msix >= 0)
cxgb4_free_msix_idx_in_bmap(adap, msix);
s->fwevtq_msix_idx = msix;
return err;
}
......@@ -987,14 +1014,17 @@ static int setup_fw_sge_queues(struct adapter *adap)
*/
static int setup_sge_queues(struct adapter *adap)
{
int err, i, j;
struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info = NULL;
struct sge *s = &adap->sge;
unsigned int cmplqid = 0;
int err, i, j, msix = 0;
if (is_uld(adap))
rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
if (!(adap->flags & CXGB4_USING_MSIX))
msix = -((int)s->intrq.abs_id + 1);
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
......@@ -1002,10 +1032,21 @@ static int setup_sge_queues(struct adapter *adap)
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) {
if (adap->msi_idx > 0)
adap->msi_idx++;
if (msix >= 0) {
msix = cxgb4_get_msix_idx_from_bmap(adap);
if (msix < 0) {
err = msix;
goto freeout;
}
snprintf(adap->msix_info[msix].desc,
sizeof(adap->msix_info[msix].desc),
"%s-Rx%d", dev->name, j);
q->msix = &adap->msix_info[msix];
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
adap->msi_idx, &q->fl,
msix, &q->fl,
t4_ethrx_handler,
NULL,
t4_get_tp_ch_map(adap,
......@@ -1092,6 +1133,18 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif /* CONFIG_CHELSIO_T4_DCB */
if (dev->num_tc) {
struct port_info *pi = netdev2pinfo(dev);
/* Send unsupported traffic pattern to normal NIC queues. */
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
ip_hdr(skb)->protocol != IPPROTO_TCP)
txq = txq % pi->nqsets;
return txq;
}
if (select_queue) {
txq = (skb_rx_queue_recorded(skb)
? skb_get_rx_queue(skb)
......@@ -1458,19 +1511,23 @@ static int tid_init(struct tid_info *t)
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
unsigned int eotid_bmap_size;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
eotid_bmap_size = BITS_TO_LONGS(t->neotids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
max_ftids * sizeof(*t->ftid_tab) +
ftid_bmap_size * sizeof(long);
ftid_bmap_size * sizeof(long) +
t->neotids * sizeof(*t->eotid_tab) +
eotid_bmap_size * sizeof(long);
t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
......@@ -1481,6 +1538,8 @@ static int tid_init(struct tid_info *t)
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
spin_lock_init(&t->ftid_lock);
......@@ -1507,6 +1566,9 @@ static int tid_init(struct tid_info *t)
if (!t->stid_base &&
CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
__set_bit(0, t->stid_bmap);
if (t->neotids)
bitmap_zero(t->eotid_bmap, t->neotids);
}
bitmap_zero(t->ftid_bmap, t->nftids);
......@@ -2363,6 +2425,7 @@ static void update_clip(const struct adapter *adap)
*/
static int cxgb_up(struct adapter *adap)
{
struct sge *s = &adap->sge;
int err;
mutex_lock(&uld_mutex);
......@@ -2374,16 +2437,20 @@ static int cxgb_up(struct adapter *adap)
goto freeq;
if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
if (s->nd_msix_idx < 0) {
err = -ENOMEM;
goto irq_err;
}
err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
t4_nondata_intr, 0,
adap->msix_info[s->nd_msix_idx].desc, adap);
if (err)
goto irq_err;
err = request_msix_queue_irqs(adap);
if (err) {
free_irq(adap->msix_info[0].vec, adap);
goto irq_err;
}
if (err)
goto irq_err_free_nd_msix;
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & CXGB4_USING_MSI) ? 0
......@@ -2405,11 +2472,13 @@ static int cxgb_up(struct adapter *adap)
#endif
return err;
irq_err:
irq_err_free_nd_msix:
free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
freeq:
freeq:
t4_free_sge_resources(adap);
rel_lock:
rel_lock:
mutex_unlock(&uld_mutex);
return err;
}
......@@ -2431,11 +2500,11 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
static int cxgb_open(struct net_device *dev)
int cxgb_open(struct net_device *dev)
{
int err;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
int err;
netif_carrier_off(dev);
......@@ -2458,7 +2527,7 @@ static int cxgb_open(struct net_device *dev)
return err;
}
static int cxgb_close(struct net_device *dev)
int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
......@@ -3192,6 +3261,17 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
static int cxgb_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
struct adapter *adap = netdev2adap(dev);
if (!is_ethofld(adap) || !adap->tc_mqprio)
return -ENOMEM;
return cxgb4_setup_tc_mqprio(dev, mqprio);
}
static LIST_HEAD(cxgb_block_cb_list);
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
......@@ -3200,6 +3280,8 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct port_info *pi = netdev2pinfo(dev);
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return cxgb_setup_tc_mqprio(dev, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&cxgb_block_cb_list,
......@@ -4604,11 +4686,18 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->clipt_start = val[0];
adap->clipt_end = val[1];
/* We don't yet have a PARAMs calls to retrieve the number of Traffic
* Classes supported by the hardware/firmware so we hard code it here
* for now.
/* Get the supported number of traffic classes */
params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
if (ret < 0) {
/* We couldn't retrieve the number of Traffic Classes
* supported by the hardware/firmware. So we hard
* code it here.
*/
adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
} else {
adap->params.nsched_cls = val[0];
}
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
......@@ -4693,7 +4782,8 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->params.offload = 1;
if (caps_cmd.ofldcaps ||
(caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
(caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
(caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
......@@ -4735,6 +4825,19 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
} else {
adap->num_ofld_uld += 1;
}
if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
params[0] = FW_PARAM_PFVF(ETHOFLD_START);
params[1] = FW_PARAM_PFVF(ETHOFLD_END);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
params, val);
if (!ret) {
adap->tids.eotid_base = val[0];
adap->tids.neotids = min_t(u32, MAX_ATIDS,
val[1] - val[0] + 1);
adap->params.ethofld = 1;
}
}
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
......@@ -5157,26 +5260,25 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
/*
* Perform default configuration of DMA queues depending on the number and type
/* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
static int cfg_queues(struct adapter *adap)
{
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
int i, n10g = 0, qidx = 0;
int niqflint, neq, avail_eth_qsets;
int max_eth_qsets = 32;
u32 i, n10g = 0, qidx = 0;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
/* Reduce memory usage in kdump environment, disable all offload.
*/
/* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
adap->params.offload = 0;
adap->params.crypto = 0;
adap->params.ethofld = 0;
}
/* Calculate the number of Ethernet Queue Sets available based on
......@@ -5195,14 +5297,11 @@ static int cfg_queues(struct adapter *adap)
if (!(adap->flags & CXGB4_USING_MSIX))
niqflint--;
neq = adap->params.pfres.neq / 2;
avail_eth_qsets = min(niqflint, neq);
if (avail_eth_qsets > max_eth_qsets)
avail_eth_qsets = max_eth_qsets;
avail_qsets = min(niqflint, neq);
if (avail_eth_qsets < adap->params.nports) {
if (avail_qsets < adap->params.nports) {
dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
avail_eth_qsets, adap->params.nports);
avail_qsets, adap->params.nports);
return -ENOMEM;
}
......@@ -5210,6 +5309,7 @@ static int cfg_queues(struct adapter *adap)
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
......@@ -5229,8 +5329,7 @@ static int cfg_queues(struct adapter *adap)
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
/*
* We default to 1 queue per non-10G port and up to # of cores queues
/* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
......@@ -5252,19 +5351,40 @@ static int cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
avail_qsets -= qidx;
if (is_uld(adap)) {
/*
* For offload we use 1 queue/channel if all ports are up to 1G,
/* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
if (n10g) {
i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else {
num_ulds = adap->num_uld + adap->num_ofld_uld;
i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
avail_uld_qsets = roundup(i, adap->params.nports);
if (avail_qsets < num_ulds * adap->params.nports) {
adap->params.offload = 0;
adap->params.crypto = 0;
s->ofldqsets = 0;
} else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
s->ofldqsets = adap->params.nports;
} else {
s->ofldqsets = avail_uld_qsets;
}
avail_qsets -= num_ulds * s->ofldqsets;
}
/* ETHOFLD Queues used for QoS offload should follow same
* allocation scheme as normal Ethernet Queues.
*/
if (is_ethofld(adap)) {
if (avail_qsets < s->max_ethqsets) {
adap->params.ethofld = 0;
s->eoqsets = 0;
} else {
s->eoqsets = s->max_ethqsets;
}
avail_qsets -= s->eoqsets;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
......@@ -5317,42 +5437,62 @@ static void reduce_ethqs(struct adapter *adap, int n)
}
}
static int get_msix_info(struct adapter *adap)
static int alloc_msix_info(struct adapter *adap, u32 num_vec)
{
struct uld_msix_info *msix_info;
unsigned int max_ingq = 0;
struct msix_info *msix_info;
if (is_offload(adap))
max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
if (is_pci_uld(adap))
max_ingq += MAX_OFLD_QSETS * adap->num_uld;
if (!max_ingq)
goto out;
msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
if (!msix_info)
return -ENOMEM;
adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
sizeof(long), GFP_KERNEL);
if (!adap->msix_bmap_ulds.msix_bmap) {
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
}
spin_lock_init(&adap->msix_bmap_ulds.lock);
adap->msix_info_ulds = msix_info;
out:
spin_lock_init(&adap->msix_bmap.lock);
adap->msix_bmap.mapsize = num_vec;
adap->msix_info = msix_info;
return 0;
}
static void free_msix_info(struct adapter *adap)
{
if (!(adap->num_uld && adap->num_ofld_uld))
return;
kfree(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
{
struct msix_bmap *bmap = &adap->msix_bmap;
unsigned int msix_idx;
unsigned long flags;
spin_lock_irqsave(&bmap->lock, flags);
msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
if (msix_idx < bmap->mapsize) {
__set_bit(msix_idx, bmap->msix_bmap);
} else {
spin_unlock_irqrestore(&bmap->lock, flags);
return -ENOSPC;
}
spin_unlock_irqrestore(&bmap->lock, flags);
return msix_idx;
}
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
unsigned int msix_idx)
{
struct msix_bmap *bmap = &adap->msix_bmap;
unsigned long flags;
kfree(adap->msix_info_ulds);
kfree(adap->msix_bmap_ulds.msix_bmap);
spin_lock_irqsave(&bmap->lock, flags);
__clear_bit(msix_idx, bmap->msix_bmap);
spin_unlock_irqrestore(&bmap->lock, flags);
}
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
......@@ -5360,88 +5500,161 @@ static void free_msix_info(struct adapter *adap)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0, uld_need = 0;
int i, j, want, need, allocated;
u32 eth_need, uld_need = 0, ethofld_need = 0;
u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
u8 num_uld = 0, nchan = adap->params.nports;
u32 i, want, need, num_vec;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry *entries;
int max_ingq = MAX_INGQ;
if (is_pci_uld(adap))
max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
if (is_offload(adap))
max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
/* map for msix */
if (get_msix_info(adap)) {
adap->params.offload = 0;
adap->params.crypto = 0;
}
for (i = 0; i < max_ingq + 1; ++i)
entries[i].entry = i;
struct port_info *pi;
int allocated, ret;
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
want += adap->num_ofld_uld * s->ofldqsets;
ofld_need = adap->num_ofld_uld * nchan;
}
if (is_pci_uld(adap)) {
want += adap->num_uld * s->ofldqsets;
uld_need = adap->num_uld * nchan;
}
want = s->max_ethqsets;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
* each port.
*/
need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
need = 8 * nchan;
#else
need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
need = nchan;
#endif
eth_need = need;
if (is_uld(adap)) {
num_uld = adap->num_ofld_uld + adap->num_uld;
want += num_uld * s->ofldqsets;
uld_need = num_uld * nchan;
need += uld_need;
}
if (is_ethofld(adap)) {
want += s->eoqsets;
ethofld_need = eth_need;
need += ethofld_need;
}
want += EXTRA_VECS;
need += EXTRA_VECS;
entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < want; i++)
entries[i].entry = i;
allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
if (allocated < 0) {
dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
" not using MSI-X\n");
kfree(entries);
return allocated;
/* Disable offload and attempt to get vectors for NIC
* only mode.
*/
want = s->max_ethqsets + EXTRA_VECS;
need = eth_need + EXTRA_VECS;
allocated = pci_enable_msix_range(adap->pdev, entries,
need, want);
if (allocated < 0) {
dev_info(adap->pdev_dev,
"Disabling MSI-X due to insufficient MSI-X vectors\n");
ret = allocated;
goto out_free;
}
dev_info(adap->pdev_dev,
"Disabling offload due to insufficient MSI-X vectors\n");
adap->params.offload = 0;
adap->params.crypto = 0;
adap->params.ethofld = 0;
s->ofldqsets = 0;
s->eoqsets = 0;
uld_need = 0;
ethofld_need = 0;
}
num_vec = allocated;
if (num_vec < want) {
/* Distribute available vectors to the various queue groups.
* Every group gets its minimum requirement and NIC gets top
* priority for leftovers.
*/
i = allocated - EXTRA_VECS - ofld_need - uld_need;
if (i < s->max_ethqsets) {
s->max_ethqsets = i;
if (i < s->ethqsets)
reduce_ethqs(adap, i);
ethqsets = eth_need;
if (is_uld(adap))
ofldqsets = nchan;
if (is_ethofld(adap))
eoqsets = ethofld_need;
num_vec -= need;
while (num_vec) {
if (num_vec < eth_need + ethofld_need ||
ethqsets > s->max_ethqsets)
break;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
if (pi->nqsets < 2)
continue;
ethqsets++;
num_vec--;
if (ethofld_need) {
eoqsets++;
num_vec--;
}
}
}
if (is_uld(adap)) {
if (allocated < want)
s->nqs_per_uld = nchan;
else
s->nqs_per_uld = s->ofldqsets;
while (num_vec) {
if (num_vec < uld_need ||
ofldqsets > s->ofldqsets)
break;
ofldqsets++;
num_vec -= uld_need;
}
}
} else {
ethqsets = s->max_ethqsets;
if (is_uld(adap))
ofldqsets = s->ofldqsets;
if (is_ethofld(adap))
eoqsets = s->eoqsets;
}
if (ethqsets < s->max_ethqsets) {
s->max_ethqsets = ethqsets;
reduce_ethqs(adap, ethqsets);
}
for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
adap->msix_info[i].vec = entries[i].vector;
if (is_uld(adap)) {
for (j = 0 ; i < allocated; ++i, j++) {
adap->msix_info_ulds[j].vec = entries[i].vector;
adap->msix_info_ulds[j].idx = i;
s->ofldqsets = ofldqsets;
s->nqs_per_uld = s->ofldqsets;
}
adap->msix_bmap_ulds.mapsize = j;
if (is_ethofld(adap))
s->eoqsets = eoqsets;
/* map for msix */
ret = alloc_msix_info(adap, allocated);
if (ret)
goto out_disable_msix;
for (i = 0; i < allocated; i++) {
adap->msix_info[i].vec = entries[i].vector;
adap->msix_info[i].idx = i;
}
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
"nic %d per uld %d\n",
allocated, s->max_ethqsets, s->nqs_per_uld);
dev_info(adap->pdev_dev,
"%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
kfree(entries);
return 0;
out_disable_msix:
pci_disable_msix(adap->pdev);
out_free:
kfree(entries);
return ret;
}
#undef EXTRA_VECS
......@@ -5528,6 +5741,7 @@ static void free_some_resources(struct adapter *adapter)
kvfree(adapter->srq);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
cxgb4_cleanup_tc_mqprio(adapter);
cxgb4_cleanup_tc_flower(adapter);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
......@@ -5942,8 +6156,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&adapter->mac_hlist);
for_each_port(adapter, i) {
/* For supporting MQPRIO Offload, need some extra
* queues for each ETHOFLD TIDs. Keep it equal to
* MAX_ATIDs for now. Once we connect to firmware
* later and query the EOTID params, we'll come to
* know the actual # of EOTIDs supported.
*/
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
MAX_ETH_QSETS + MAX_ATIDS);
if (!netdev) {
err = -ENOMEM;
goto out_free_dev;
......@@ -6091,6 +6311,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cxgb4_init_tc_flower(adapter))
dev_warn(&pdev->dev,
"could not offload tc flower, continuing\n");
if (cxgb4_init_tc_mqprio(adapter))
dev_warn(&pdev->dev,
"could not offload tc mqprio, continuing\n");
}
if (is_offload(adapter) || is_hashfilter(adapter)) {
......@@ -6127,6 +6351,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_dev;
err = setup_non_data_intr(adapter);
if (err) {
dev_err(adapter->pdev_dev,
"Non Data interrupt allocation failed, err: %d\n", err);
goto out_free_dev;
}
err = setup_fw_sge_queues(adapter);
if (err) {
dev_err(adapter->pdev_dev,
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
#include "cxgb4.h"
#include "cxgb4_tc_mqprio.h"
#include "sched.h"
static int cxgb4_mqprio_validate(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
u64 min_rate = 0, max_rate = 0, max_link_rate;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u32 qcount = 0, qoffset = 0;
u32 link_ok, speed, mtu;
int ret;
u8 i;
if (!mqprio->qopt.num_tc)
return 0;
if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
netdev_err(dev, "Only full TC hardware offload is supported\n");
return -EINVAL;
} else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
netdev_err(dev, "Only channel mode offload is supported\n");
return -EINVAL;
} else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
netdev_err(dev, "Only bandwidth rate shaper supported\n");
return -EINVAL;
} else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
netdev_err(dev,
"Only %u traffic classes supported by hardware\n",
adap->params.nsched_cls);
return -ERANGE;
}
ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
if (ret) {
netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
return -EINVAL;
}
/* Convert from Mbps to bps */
max_link_rate = (u64)speed * 1000 * 1000;
for (i = 0; i < mqprio->qopt.num_tc; i++) {
qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
qcount += mqprio->qopt.count[i];
/* Convert byte per second to bits per second */
min_rate += (mqprio->min_rate[i] * 8);
max_rate += (mqprio->max_rate[i] * 8);
}
if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
return -ENOMEM;
if (min_rate > max_link_rate || max_rate > max_link_rate) {
netdev_err(dev,
"Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
min_rate, max_rate, max_link_rate);
return -EINVAL;
}
return 0;
}
static int cxgb4_init_eosw_txq(struct net_device *dev,
struct sge_eosw_txq *eosw_txq,
u32 eotid, u32 hwqid)
{
struct adapter *adap = netdev2adap(dev);
struct sge_eosw_desc *ring;
memset(eosw_txq, 0, sizeof(*eosw_txq));
ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
sizeof(*ring), GFP_KERNEL);
if (!ring)
return -ENOMEM;
eosw_txq->desc = ring;
eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
spin_lock_init(&eosw_txq->lock);
eosw_txq->state = CXGB4_EO_STATE_CLOSED;
eosw_txq->eotid = eotid;
eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
eosw_txq->cred = adap->params.ofldq_wr_cred;
eosw_txq->hwqid = hwqid;
eosw_txq->netdev = dev;
tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
(unsigned long)eosw_txq);
return 0;
}
static void cxgb4_clean_eosw_txq(struct net_device *dev,
struct sge_eosw_txq *eosw_txq)
{
struct adapter *adap = netdev2adap(dev);
cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
eosw_txq->pidx = 0;
eosw_txq->last_pidx = 0;
eosw_txq->cidx = 0;
eosw_txq->last_cidx = 0;
eosw_txq->flowc_idx = 0;
eosw_txq->inuse = 0;
eosw_txq->cred = adap->params.ofldq_wr_cred;
eosw_txq->ncompl = 0;
eosw_txq->last_compl = 0;
eosw_txq->state = CXGB4_EO_STATE_CLOSED;
}
static void cxgb4_free_eosw_txq(struct net_device *dev,
struct sge_eosw_txq *eosw_txq)
{
spin_lock_bh(&eosw_txq->lock);
cxgb4_clean_eosw_txq(dev, eosw_txq);
kfree(eosw_txq->desc);
spin_unlock_bh(&eosw_txq->lock);
tasklet_kill(&eosw_txq->qresume_tsk);
}
static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sge_ofld_rxq *eorxq;
struct sge_eohw_txq *eotxq;
int ret, msix = 0;
u32 i;
/* Allocate ETHOFLD hardware queue structures if not done already */
if (!refcount_read(&adap->tc_mqprio->refcnt)) {
adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
sizeof(struct sge_ofld_rxq),
GFP_KERNEL);
if (!adap->sge.eohw_rxq)
return -ENOMEM;
adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
sizeof(struct sge_eohw_txq),
GFP_KERNEL);
if (!adap->sge.eohw_txq) {
kfree(adap->sge.eohw_rxq);
return -ENOMEM;
}
}
if (!(adap->flags & CXGB4_USING_MSIX))
msix = -((int)adap->sge.intrq.abs_id + 1);
for (i = 0; i < pi->nqsets; i++) {
eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
/* Allocate Rxqs for receiving ETHOFLD Tx completions */
if (msix >= 0) {
msix = cxgb4_get_msix_idx_from_bmap(adap);
if (msix < 0)
goto out_free_queues;
eorxq->msix = &adap->msix_info[msix];
snprintf(eorxq->msix->desc,
sizeof(eorxq->msix->desc),
"%s-eorxq%d", dev->name, i);
}
init_rspq(adap, &eorxq->rspq,
CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
dev, msix, &eorxq->fl,
cxgb4_ethofld_rx_handler,
NULL, 0);
if (ret)
goto out_free_queues;
/* Allocate ETHOFLD hardware Txqs */
eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
eorxq->rspq.cntxt_id);
if (ret)
goto out_free_queues;
/* Allocate IRQs, set IRQ affinity, and start Rx */
if (adap->flags & CXGB4_USING_MSIX) {
ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
eorxq->msix->desc, &eorxq->rspq);
if (ret)
goto out_free_msix;
cxgb4_set_msix_aff(adap, eorxq->msix->vec,
&eorxq->msix->aff_mask, i);
}
if (adap->flags & CXGB4_FULL_INIT_DONE)
cxgb4_enable_rx(adap, &eorxq->rspq);
}
refcount_inc(&adap->tc_mqprio->refcnt);
return 0;
out_free_msix:
while (i-- > 0) {
eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
if (adap->flags & CXGB4_FULL_INIT_DONE)
cxgb4_quiesce_rx(&eorxq->rspq);
if (adap->flags & CXGB4_USING_MSIX) {
cxgb4_clear_msix_aff(eorxq->msix->vec,
eorxq->msix->aff_mask);
free_irq(eorxq->msix->vec, &eorxq->rspq);
}
}
out_free_queues:
for (i = 0; i < pi->nqsets; i++) {
eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
if (eorxq->rspq.desc)
free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
if (eorxq->msix)
cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
t4_sge_free_ethofld_txq(adap, eotxq);
}
kfree(adap->sge.eohw_txq);
kfree(adap->sge.eohw_rxq);
return ret;
}
void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sge_ofld_rxq *eorxq;
struct sge_eohw_txq *eotxq;
u32 i;
/* Return if no ETHOFLD structures have been allocated yet */
if (!refcount_read(&adap->tc_mqprio->refcnt))
return;
/* Return if no hardware queues have been allocated */
if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
return;
for (i = 0; i < pi->nqsets; i++) {
eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
/* Device removal path will already disable NAPI
* before unregistering netdevice. So, only disable
* NAPI if we're not in device removal path
*/
if (!(adap->flags & CXGB4_SHUTTING_DOWN))
cxgb4_quiesce_rx(&eorxq->rspq);
if (adap->flags & CXGB4_USING_MSIX) {
cxgb4_clear_msix_aff(eorxq->msix->vec,
eorxq->msix->aff_mask);
free_irq(eorxq->msix->vec, &eorxq->rspq);
}
free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
t4_sge_free_ethofld_txq(adap, eotxq);
}
/* Free up ETHOFLD structures if there are no users */
if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
kfree(adap->sge.eohw_txq);
kfree(adap->sge.eohw_rxq);
}
}
static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
struct ch_sched_params p = {
.type = SCHED_CLASS_TYPE_PACKET,
.u.params.level = SCHED_CLASS_LEVEL_CL_RL,
.u.params.mode = SCHED_CLASS_MODE_FLOW,
.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
.u.params.class = SCHED_CLS_NONE,
.u.params.weight = 0,
.u.params.pktsize = dev->mtu,
};
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sched_class *e;
int ret;
u8 i;
tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
p.u.params.channel = pi->tx_chan;
for (i = 0; i < mqprio->qopt.num_tc; i++) {
/* Convert from bytes per second to Kbps */
p.u.params.minrate = mqprio->min_rate[i] * 8 / 1000;
p.u.params.maxrate = mqprio->max_rate[i] * 8 / 1000;
e = cxgb4_sched_class_alloc(dev, &p);
if (!e) {
ret = -ENOMEM;
goto out_err;
}
tc_port_mqprio->tc_hwtc_map[i] = e->idx;
}
return 0;
out_err:
while (i--)
cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
return ret;
}
static void cxgb4_mqprio_free_tc(struct net_device *dev)
{
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u8 i;
tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
}
static int cxgb4_mqprio_class_bind(struct net_device *dev,
struct sge_eosw_txq *eosw_txq,
u8 tc)
{
struct ch_sched_flowc fe;
int ret;
init_completion(&eosw_txq->completion);
fe.tid = eosw_txq->eotid;
fe.class = tc;
ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
if (ret)
return ret;
ret = wait_for_completion_timeout(&eosw_txq->completion,
CXGB4_FLOWC_WAIT_TIMEOUT);
if (!ret)
return -ETIMEDOUT;
return 0;
}
static void cxgb4_mqprio_class_unbind(struct net_device *dev,
struct sge_eosw_txq *eosw_txq,
u8 tc)
{
struct adapter *adap = netdev2adap(dev);
struct ch_sched_flowc fe;
/* If we're shutting down, interrupts are disabled and no completions
* come back. So, skip waiting for completions in this scenario.
*/
if (!(adap->flags & CXGB4_SHUTTING_DOWN))
init_completion(&eosw_txq->completion);
fe.tid = eosw_txq->eotid;
fe.class = tc;
cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
if (!(adap->flags & CXGB4_SHUTTING_DOWN))
wait_for_completion_timeout(&eosw_txq->completion,
CXGB4_FLOWC_WAIT_TIMEOUT);
}
static int cxgb4_mqprio_enable_offload(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
u32 qoffset, qcount, tot_qcount, qid, hwqid;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sge_eosw_txq *eosw_txq;
int eotid, ret;
u16 i, j;
u8 hwtc;
ret = cxgb4_mqprio_alloc_hw_resources(dev);
if (ret)
return -ENOMEM;
tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
for (i = 0; i < mqprio->qopt.num_tc; i++) {
qoffset = mqprio->qopt.offset[i];
qcount = mqprio->qopt.count[i];
for (j = 0; j < qcount; j++) {
eotid = cxgb4_get_free_eotid(&adap->tids);
if (eotid < 0) {
ret = -ENOMEM;
goto out_free_eotids;
}
qid = qoffset + j;
hwqid = pi->first_qset + (eotid % pi->nqsets);
eosw_txq = &tc_port_mqprio->eosw_txq[qid];
ret = cxgb4_init_eosw_txq(dev, eosw_txq,
eotid, hwqid);
if (ret)
goto out_free_eotids;
cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
hwtc = tc_port_mqprio->tc_hwtc_map[i];
ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
if (ret)
goto out_free_eotids;
}
}
memcpy(&tc_port_mqprio->mqprio, mqprio,
sizeof(struct tc_mqprio_qopt_offload));
/* Inform the stack about the configured tc params.
*
* Set the correct queue map. If no queue count has been
* specified, then send the traffic through default NIC
* queues; instead of ETHOFLD queues.
*/
ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
if (ret)
goto out_free_eotids;
tot_qcount = pi->nqsets;
for (i = 0; i < mqprio->qopt.num_tc; i++) {
qcount = mqprio->qopt.count[i];
if (qcount) {
qoffset = mqprio->qopt.offset[i] + pi->nqsets;
} else {
qcount = pi->nqsets;
qoffset = 0;
}
ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
if (ret)
goto out_reset_tc;
tot_qcount += mqprio->qopt.count[i];
}
ret = netif_set_real_num_tx_queues(dev, tot_qcount);
if (ret)
goto out_reset_tc;
tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
return 0;
out_reset_tc:
netdev_reset_tc(dev);
i = mqprio->qopt.num_tc;
out_free_eotids:
while (i-- > 0) {
qoffset = mqprio->qopt.offset[i];
qcount = mqprio->qopt.count[i];
for (j = 0; j < qcount; j++) {
eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
hwtc = tc_port_mqprio->tc_hwtc_map[i];
cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
cxgb4_free_eosw_txq(dev, eosw_txq);
}
}
cxgb4_mqprio_free_hw_resources(dev);
return ret;
}
static void cxgb4_mqprio_disable_offload(struct net_device *dev)
{
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sge_eosw_txq *eosw_txq;
u32 qoffset, qcount;
u16 i, j;
u8 hwtc;
tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
return;
netdev_reset_tc(dev);
netif_set_real_num_tx_queues(dev, pi->nqsets);
for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
qcount = tc_port_mqprio->mqprio.qopt.count[i];
for (j = 0; j < qcount; j++) {
eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
hwtc = tc_port_mqprio->tc_hwtc_map[i];
cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
cxgb4_free_eosw_txq(dev, eosw_txq);
}
}
cxgb4_mqprio_free_hw_resources(dev);
/* Free up the traffic classes */
cxgb4_mqprio_free_tc(dev);
memset(&tc_port_mqprio->mqprio, 0,
sizeof(struct tc_mqprio_qopt_offload));
tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
}
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
bool needs_bring_up = false;
int ret;
ret = cxgb4_mqprio_validate(dev, mqprio);
if (ret)
return ret;
/* To configure tc params, the current allocated EOTIDs must
* be freed up. However, they can't be freed up if there's
* traffic running on the interface. So, ensure interface is
* down before configuring tc params.
*/
if (netif_running(dev)) {
cxgb_close(dev);
needs_bring_up = true;
}
cxgb4_mqprio_disable_offload(dev);
/* If requested for clear, then just return since resources are
* already freed up by now.
*/
if (!mqprio->qopt.num_tc)
goto out;
/* Allocate free available traffic classes and configure
* their rate parameters.
*/
ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
if (ret)
goto out;
ret = cxgb4_mqprio_enable_offload(dev, mqprio);
if (ret) {
cxgb4_mqprio_free_tc(dev);
goto out;
}
out:
if (needs_bring_up)
cxgb_open(dev);
return ret;
}
int cxgb4_init_tc_mqprio(struct adapter *adap)
{
struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
struct cxgb4_tc_mqprio *tc_mqprio;
struct sge_eosw_txq *eosw_txq;
int ret = 0;
u8 i;
tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
if (!tc_mqprio)
return -ENOMEM;
tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
GFP_KERNEL);
if (!tc_port_mqprio) {
ret = -ENOMEM;
goto out_free_mqprio;
}
tc_mqprio->port_mqprio = tc_port_mqprio;
for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i];
eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
GFP_KERNEL);
if (!eosw_txq) {
ret = -ENOMEM;
goto out_free_ports;
}
port_mqprio->eosw_txq = eosw_txq;
}
adap->tc_mqprio = tc_mqprio;
refcount_set(&adap->tc_mqprio->refcnt, 0);
return 0;
out_free_ports:
for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i];
kfree(port_mqprio->eosw_txq);
}
kfree(tc_port_mqprio);
out_free_mqprio:
kfree(tc_mqprio);
return ret;
}
void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
{
struct cxgb4_tc_port_mqprio *port_mqprio;
u8 i;
if (adap->tc_mqprio) {
if (adap->tc_mqprio->port_mqprio) {
for (i = 0; i < adap->params.nports; i++) {
struct net_device *dev = adap->port[i];
if (dev)
cxgb4_mqprio_disable_offload(dev);
port_mqprio = &adap->tc_mqprio->port_mqprio[i];
kfree(port_mqprio->eosw_txq);
}
kfree(adap->tc_mqprio->port_mqprio);
}
kfree(adap->tc_mqprio);
}
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__
#include <net/pkt_cls.h>
#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
#define CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM 1024
#define CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM 1024
#define CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE 64
#define CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC 5
#define CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT 8
#define CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM 72
#define CXGB4_FLOWC_WAIT_TIMEOUT (5 * HZ)
enum cxgb4_mqprio_state {
CXGB4_MQPRIO_STATE_DISABLED = 0,
CXGB4_MQPRIO_STATE_ACTIVE,
};
struct cxgb4_tc_port_mqprio {
enum cxgb4_mqprio_state state; /* Current MQPRIO offload state */
struct tc_mqprio_qopt_offload mqprio; /* MQPRIO offload params */
struct sge_eosw_txq *eosw_txq; /* Netdev SW Tx queue array */
u8 tc_hwtc_map[TC_QOPT_MAX_QUEUE]; /* MQPRIO tc to hardware tc map */
};
struct cxgb4_tc_mqprio {
refcount_t refcnt; /* Refcount for adapter-wide resources */
struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
};
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio);
int cxgb4_init_tc_mqprio(struct adapter *adap);
void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
#endif /* __CXGB4_TC_MQPRIO_H__ */
......@@ -53,35 +53,6 @@
#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
static int get_msix_idx_from_bmap(struct adapter *adap)
{
struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
unsigned long flags;
unsigned int msix_idx;
spin_lock_irqsave(&bmap->lock, flags);
msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
if (msix_idx < bmap->mapsize) {
__set_bit(msix_idx, bmap->msix_bmap);
} else {
spin_unlock_irqrestore(&bmap->lock, flags);
return -ENOSPC;
}
spin_unlock_irqrestore(&bmap->lock, flags);
return msix_idx;
}
static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
{
struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
unsigned long flags;
spin_lock_irqsave(&bmap->lock, flags);
__clear_bit(msix_idx, bmap->msix_bmap);
spin_unlock_irqrestore(&bmap->lock, flags);
}
/* Flush the aggregated lro sessions */
static void uldrx_flush_handler(struct sge_rspq *q)
{
......@@ -138,9 +109,9 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro)
{
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id;
int i, err, msi_idx, que_idx = 0;
struct sge *s = &adap->sge;
unsigned int per_chan;
......@@ -159,12 +130,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
}
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
if (bmap_idx < 0) {
msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
if (msi_idx < 0) {
err = -ENOSPC;
goto freeout;
}
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
snprintf(adap->msix_info[msi_idx].desc,
sizeof(adap->msix_info[msi_idx].desc),
"%s-%s%d",
adap->port[0]->name, rxq_info->name, i);
q->msix = &adap->msix_info[msi_idx];
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[que_idx++ / per_chan],
......@@ -175,8 +152,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
0);
if (err)
goto freeout;
if (msi_idx >= 0)
rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
......@@ -188,6 +164,8 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
if (q->msix)
cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
}
return err;
}
......@@ -198,14 +176,6 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
if (adap->flags & CXGB4_USING_MSIX) {
rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
sizeof(unsigned short),
GFP_KERNEL);
if (!rxq_info->msix_tbl)
return -ENOMEM;
}
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
......@@ -261,8 +231,6 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
if (adap->flags & CXGB4_USING_MSIX)
kfree(rxq_info->msix_tbl);
}
static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
......@@ -355,13 +323,12 @@ static int
request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
struct uld_msix_info *minfo;
struct msix_info *minfo;
unsigned int idx;
int err = 0;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
minfo = &adap->msix_info_ulds[bmap_idx];
minfo = rxq_info->uldrxq[idx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
......@@ -376,10 +343,9 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
unwind:
while (idx-- > 0) {
bmap_idx = rxq_info->msix_tbl[idx];
minfo = &adap->msix_info_ulds[bmap_idx];
minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx);
cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
return err;
......@@ -389,69 +355,45 @@ static void
free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
struct uld_msix_info *minfo;
unsigned int idx, bmap_idx;
struct msix_info *minfo;
unsigned int idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
minfo = &adap->msix_info_ulds[bmap_idx];
minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx);
cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
}
static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int n = sizeof(adap->msix_info_ulds[0].desc);
unsigned int idx, bmap_idx;
int idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
adap->port[0]->name, rxq_info->name, idx);
}
}
static void enable_rx(struct adapter *adap, struct sge_rspq *q)
{
if (!q)
return;
if (q->handler)
napi_enable(&q->napi);
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
INGRESSQID_V(q->cntxt_id));
}
continue;
static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
{
if (q && q->handler)
napi_disable(&q->napi);
cxgb4_enable_rx(adap, q);
}
}
static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
for_each_uldrxq(rxq_info, idx)
enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
}
for_each_uldrxq(rxq_info, idx) {
struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
if (!q)
continue;
for_each_uldrxq(rxq_info, idx)
quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
cxgb4_quiesce_rx(q);
}
}
static void
......@@ -750,7 +692,6 @@ void cxgb4_register_uld(enum cxgb4_uld type,
if (ret)
goto free_queues;
if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
......
......@@ -89,6 +89,10 @@ union aopen_entry {
union aopen_entry *next;
};
struct eotid_entry {
void *data;
};
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables. The tables themselves are allocated dynamically.
......@@ -126,6 +130,12 @@ struct tid_info {
unsigned int v6_stids_in_use;
unsigned int sftids_in_use;
/* ETHOFLD range */
struct eotid_entry *eotid_tab;
unsigned long *eotid_bmap;
unsigned int eotid_base;
unsigned int neotids;
/* TIDs in the TCAM */
atomic_t tids_in_use;
/* TIDs in the HASH */
......@@ -176,6 +186,35 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
atomic_inc(&t->conns_in_use);
}
static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t,
u32 eotid)
{
return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL;
}
static inline int cxgb4_get_free_eotid(struct tid_info *t)
{
int eotid;
eotid = find_first_zero_bit(t->eotid_bmap, t->neotids);
if (eotid >= t->neotids)
eotid = -1;
return eotid;
}
static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
{
set_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = data;
}
static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
{
clear_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = NULL;
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
......
......@@ -92,45 +92,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
pf = adap->pf;
vf = 0;
err = t4_set_params(adap, adap->mbox, pf, vf, 1,
&fw_param, &fw_class);
break;
}
case SCHED_FLOWC: {
struct sched_flowc_entry *fe;
fe = (struct sched_flowc_entry *)arg;
fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
fe->param.tid, fw_class);
break;
}
default:
err = -ENOTSUPP;
goto out;
break;
}
err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
out:
return err;
}
static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
const unsigned int qid,
int *index)
static void *t4_sched_entry_lookup(struct port_info *pi,
enum sched_bind_type type,
const u32 val)
{
struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end;
struct sched_class *found = NULL;
int i;
void *found = NULL;
/* Look for a class with matching bound queue parameters */
/* Look for an entry with matching @val */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
if (e->state == SCHED_STATE_UNUSED ||
e->bind_type != type)
continue;
switch (type) {
case SCHED_QUEUE: {
struct sched_queue_entry *qe;
i = 0;
if (e->state == SCHED_STATE_UNUSED)
continue;
list_for_each_entry(qe, &e->entry_list, list) {
if (qe->cntxt_id == val) {
found = qe;
break;
}
}
break;
}
case SCHED_FLOWC: {
struct sched_flowc_entry *fe;
list_for_each_entry(qe, &e->queue_list, list) {
if (qe->cntxt_id == qid) {
found = e;
if (index)
*index = i;
list_for_each_entry(fe, &e->entry_list, list) {
if (fe->param.tid == val) {
found = fe;
break;
}
}
break;
}
i++;
default:
return NULL;
}
if (found)
......@@ -142,35 +166,26 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
struct adapter *adap = pi->adapter;
struct sched_class *e;
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
unsigned int qid;
int index = -1;
struct sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE;
txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
qid = txq->q.cntxt_id;
/* Find the existing class that the queue is bound to */
e = t4_sched_queue_lookup(pi, qid, &index);
if (e && index >= 0) {
int i = 0;
list_for_each_entry(qe, &e->queue_list, list) {
if (i == index)
break;
i++;
}
/* Find the existing entry that the queue is bound to */
qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
if (qe) {
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
if (err)
return err;
e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list);
kvfree(qe);
if (atomic_dec_and_test(&e->refcnt)) {
......@@ -183,11 +198,11 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
{
struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
struct sched_class *e;
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
struct sched_class *e;
unsigned int qid;
int err = 0;
......@@ -215,7 +230,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
if (err)
goto out_err;
list_add_tail(&qe->list, &e->queue_list);
list_add_tail(&qe->list, &e->entry_list);
e->bind_type = SCHED_QUEUE;
atomic_inc(&e->refcnt);
return err;
......@@ -224,6 +240,73 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
return err;
}
static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
struct sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
return -ERANGE;
/* Find the existing entry that the flowc is bound to */
fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
if (fe) {
err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
false);
if (err)
return err;
e = &pi->sched_tbl->tab[fe->param.class];
list_del(&fe->list);
kvfree(fe);
if (atomic_dec_and_test(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
}
return err;
}
static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_table *s = pi->sched_tbl;
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
struct sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
return -ERANGE;
fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
if (!fe)
return -ENOMEM;
/* Unbind flowc from any existing class */
err = t4_sched_flowc_unbind(pi, p);
if (err)
goto out_err;
/* Bind flowc to specified class */
memcpy(&fe->param, p, sizeof(fe->param));
e = &s->tab[fe->param.class];
err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
if (err)
goto out_err;
list_add_tail(&fe->list, &e->entry_list);
e->bind_type = SCHED_FLOWC;
atomic_inc(&e->refcnt);
return err;
out_err:
kvfree(fe);
return err;
}
static void t4_sched_class_unbind_all(struct port_info *pi,
struct sched_class *e,
enum sched_bind_type type)
......@@ -235,10 +318,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi,
case SCHED_QUEUE: {
struct sched_queue_entry *qe;
list_for_each_entry(qe, &e->queue_list, list)
list_for_each_entry(qe, &e->entry_list, list)
t4_sched_queue_unbind(pi, &qe->param);
break;
}
case SCHED_FLOWC: {
struct sched_flowc_entry *fe;
list_for_each_entry(fe, &e->entry_list, list)
t4_sched_flowc_unbind(pi, &fe->param);
break;
}
default:
break;
}
......@@ -262,6 +352,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
err = t4_sched_queue_unbind(pi, qe);
break;
}
case SCHED_FLOWC: {
struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
if (bind)
err = t4_sched_flowc_bind(pi, fe);
else
err = t4_sched_flowc_unbind(pi, fe);
break;
}
default:
err = -ENOTSUPP;
break;
......@@ -299,6 +398,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
case SCHED_FLOWC: {
struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
class_id = fe->class;
break;
}
default:
return -ENOTSUPP;
}
......@@ -340,6 +445,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
case SCHED_FLOWC: {
struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
class_id = fe->class;
break;
}
default:
return -ENOTSUPP;
}
......@@ -355,10 +466,13 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end;
struct sched_class *found = NULL;
struct sched_class *e, *end;
if (!p) {
/* Only allow tc to be shared among SCHED_FLOWC types. For
* other types, always allocate a new tc.
*/
if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) {
/* Get any available unused class */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
......@@ -467,9 +581,32 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
return t4_sched_class_alloc(pi, p);
}
static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
/**
* cxgb4_sched_class_free - free a scheduling class
* @dev: net_device pointer
* @e: scheduling class
*
* Frees a scheduling class if there are no users.
*/
void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct sched_class *e;
e = &s->tab[classid];
if (!atomic_read(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
}
static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
{
t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
struct port_info *pi = netdev2pinfo(dev);
t4_sched_class_unbind_all(pi, e, e->bind_type);
cxgb4_sched_class_free(dev, e->idx);
}
struct sched_table *t4_init_sched(unsigned int sched_size)
......@@ -487,7 +624,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].queue_list);
INIT_LIST_HEAD(&s->tab[i].entry_list);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
......@@ -510,7 +647,7 @@ void t4_cleanup_sched(struct adapter *adap)
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
t4_sched_class_free(pi, e);
t4_sched_class_free(adap->port[j], e);
}
kvfree(s);
}
......
......@@ -56,6 +56,7 @@ enum sched_fw_ops {
enum sched_bind_type {
SCHED_QUEUE,
SCHED_FLOWC,
};
struct sched_queue_entry {
......@@ -64,11 +65,17 @@ struct sched_queue_entry {
struct ch_sched_queue param;
};
struct sched_flowc_entry {
struct list_head list;
struct ch_sched_flowc param;
};
struct sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
struct list_head queue_list;
enum sched_bind_type bind_type;
struct list_head entry_list;
atomic_t refcnt;
};
......@@ -102,6 +109,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
struct ch_sched_params *p);
void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
void t4_cleanup_sched(struct adapter *adap);
......
......@@ -55,6 +55,8 @@
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
#include "cxgb4_uld.h"
#include "cxgb4_tc_mqprio.h"
#include "sched.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
......@@ -269,7 +271,6 @@ int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
}
EXPORT_SYMBOL(cxgb4_map_skb);
#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
const dma_addr_t *addr)
{
......@@ -284,6 +285,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
#ifdef CONFIG_NEED_DMA_MAP_STATE
/**
* deferred_unmap_destructor - unmap a packet when it is freed
* @skb: the packet
......@@ -1309,6 +1311,35 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
}
static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
struct cpl_tx_pkt_lso_core *lso)
{
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
int l3hdr_len = skb_network_header_len(skb);
const struct skb_shared_info *ssi;
bool ipv6 = false;
ssi = skb_shinfo(skb);
if (ssi->gso_type & SKB_GSO_TCPV6)
ipv6 = true;
lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
LSO_IPV6_V(ipv6) |
LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
LSO_IPHDR_LEN_V(l3hdr_len / 4) |
LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = htons(0);
lso->mss = htons(ssi->gso_size);
lso->seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
lso->len = htonl(skb->len);
else
lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
return (void *)(lso + 1);
}
/**
* t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
* @adap: the adapter
......@@ -1347,6 +1378,31 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
return reclaimed;
}
static inline int cxgb4_validate_skb(struct sk_buff *skb,
struct net_device *dev,
u32 min_pkt_len)
{
u32 max_pkt_len;
/* The chip min packet length is 10 octets but some firmware
* commands have a minimum packet length requirement. So, play
* safe and reject anything shorter than @min_pkt_len.
*/
if (unlikely(skb->len < min_pkt_len))
return -EINVAL;
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
return -EINVAL;
return 0;
}
/**
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
......@@ -1356,41 +1412,24 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
*/
static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid, ctrl0, op;
u64 cntrl, *end, *sgl;
int qidx, credits;
unsigned int flits, ndesc;
struct adapter *adap;
struct sge_eth_txq *q;
const struct port_info *pi;
enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
bool ptp_enabled = is_ptp_enabled(skb, dev);
dma_addr_t addr[MAX_SKB_FRAGS + 1];
const struct skb_shared_info *ssi;
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
int len, qidx, credits, ret;
const struct port_info *pi;
unsigned int flits, ndesc;
bool immediate = false;
int len, max_pkt_len;
bool ptp_enabled = is_ptp_enabled(skb, dev);
u32 wr_mid, ctrl0, op;
u64 cntrl, *end, *sgl;
struct sge_eth_txq *q;
unsigned int chip_ver;
enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
/*
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
struct adapter *adap;
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
if (ret)
goto out_free;
pi = netdev_priv(dev);
......@@ -1421,8 +1460,8 @@ out_free: dev_kfree_skb_any(skb);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
if (unlikely(err == -ENOTSUPP)) {
ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
if (unlikely(ret == -ENOTSUPP)) {
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
goto out_free;
......@@ -1490,9 +1529,6 @@ out_free: dev_kfree_skb_any(skb);
len += sizeof(*cpl);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
if (tnl_type)
......@@ -1519,30 +1555,8 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
} else {
lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
LSO_IPV6_V(v6) |
LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
LSO_IPHDR_LEN_V(l3hdr_len / 4) |
LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = htons(0);
lso->mss = htons(ssi->gso_size);
lso->seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
lso->len = htonl(skb->len);
else
lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
if (CHELSIO_CHIP_VERSION(adap->params.chip)
<= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN_V(l3hdr_len);
cpl = write_tso_wr(adap, skb, lso);
cntrl = hwcsum(adap->params.chip, skb);
}
sgl = (u64 *)(cpl + 1); /* sgl start here */
if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
......@@ -1622,6 +1636,10 @@ out_free: dev_kfree_skb_any(skb);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
out_free:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Constants ... */
......@@ -1710,32 +1728,25 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
dma_addr_t addr[MAX_SKB_FRAGS + 1];
const struct skb_shared_info *ssi;
struct fw_eth_tx_pkt_vm_wr *wr;
int qidx, credits, max_pkt_len;
struct cpl_tx_pkt_core *cpl;
const struct port_info *pi;
unsigned int flits, ndesc;
struct sge_eth_txq *txq;
struct adapter *adapter;
int qidx, credits, ret;
size_t fw_hdr_copy_len;
u64 cntrl, *end;
u32 wr_mid;
const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
sizeof(wr->ethmacsrc) +
sizeof(wr->ethtype) +
sizeof(wr->vlantci);
/* The chip minimum packet length is 10 octets but the firmware
* command that we are using requires that we copy the Ethernet header
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
if (unlikely(skb->len < fw_hdr_copy_len))
goto out_free;
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
if (skb_vlan_tag_present(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
sizeof(wr->ethtype) + sizeof(wr->vlantci);
ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
if (ret)
goto out_free;
/* Figure out which TX Queue we're going to use. */
......@@ -1991,16 +2002,6 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
return cxgb4_vf_eth_xmit(skb, dev);
return cxgb4_eth_xmit(skb, dev);
}
/**
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
......@@ -2021,6 +2022,433 @@ static inline void reclaim_completed_tx_imm(struct sge_txq *q)
q->cidx = hw_cidx;
}
static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
{
u32 val = *idx + n;
if (val >= max)
val -= max;
*idx = val;
}
void cxgb4_eosw_txq_free_desc(struct adapter *adap,
struct sge_eosw_txq *eosw_txq, u32 ndesc)
{
struct sge_eosw_desc *d;
d = &eosw_txq->desc[eosw_txq->last_cidx];
while (ndesc--) {
if (d->skb) {
if (d->addr[0]) {
unmap_skb(adap->pdev_dev, d->skb, d->addr);
memset(d->addr, 0, sizeof(d->addr));
}
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
eosw_txq->ndesc);
d = &eosw_txq->desc[eosw_txq->last_cidx];
}
}
static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
{
eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
eosw_txq->inuse += n;
}
static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
struct sk_buff *skb)
{
if (eosw_txq->inuse == eosw_txq->ndesc)
return -ENOMEM;
eosw_txq->desc[eosw_txq->pidx].skb = skb;
return 0;
}
static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
{
return eosw_txq->desc[eosw_txq->last_pidx].skb;
}
static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
struct sk_buff *skb, u32 hdr_len)
{
u8 flits, nsgl = 0;
u32 wrlen;
wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
if (skb_shinfo(skb)->gso_size)
wrlen += sizeof(struct cpl_tx_pkt_lso_core);
wrlen += roundup(hdr_len, 16);
/* Packet headers + WR + CPLs */
flits = DIV_ROUND_UP(wrlen, 8);
if (skb_shinfo(skb)->nr_frags > 0)
nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
else if (skb->len - hdr_len)
nsgl = sgl_len(1);
return flits + nsgl;
}
static inline void *write_eo_wr(struct adapter *adap,
struct sge_eosw_txq *eosw_txq,
struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
u32 hdr_len, u32 wrlen)
{
const struct skb_shared_info *ssi = skb_shinfo(skb);
struct cpl_tx_pkt_core *cpl;
u32 immd_len, wrlen16;
bool compl = false;
wrlen16 = DIV_ROUND_UP(wrlen, 16);
immd_len = sizeof(struct cpl_tx_pkt_core);
if (skb_shinfo(skb)->gso_size) {
if (skb->encapsulation &&
CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
immd_len += sizeof(struct cpl_tx_tnl_lso);
else
immd_len += sizeof(struct cpl_tx_pkt_lso_core);
}
immd_len += hdr_len;
if (!eosw_txq->ncompl ||
eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
compl = true;
eosw_txq->ncompl++;
eosw_txq->last_compl = 0;
}
wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
FW_WR_COMPL_V(compl));
wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
FW_WR_FLOWID_V(eosw_txq->hwtid));
wr->r3 = 0;
wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
wr->u.tcpseg.ethlen = skb_network_offset(skb);
wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
wr->u.tcpseg.tsclk_tsoff = 0;
wr->u.tcpseg.r4 = 0;
wr->u.tcpseg.r5 = 0;
wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
cpl = write_tso_wr(adap, skb, lso);
} else {
wr->u.tcpseg.mss = cpu_to_be16(0xffff);
cpl = (void *)(wr + 1);
}
eosw_txq->cred -= wrlen16;
eosw_txq->last_compl += wrlen16;
return cpl;
}
static void ethofld_hard_xmit(struct net_device *dev,
struct sge_eosw_txq *eosw_txq)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u32 wrlen, wrlen16, hdr_len, data_len;
enum sge_eosw_state next_state;
u64 cntrl, *start, *end, *sgl;
struct sge_eohw_txq *eohw_txq;
struct cpl_tx_pkt_core *cpl;
struct fw_eth_tx_eo_wr *wr;
bool skip_eotx_wr = false;
struct sge_eosw_desc *d;
struct sk_buff *skb;
u8 flits, ndesc;
int left;
eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
spin_lock(&eohw_txq->lock);
reclaim_completed_tx_imm(&eohw_txq->q);
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
hdr_len = skb->len;
data_len = 0;
flits = DIV_ROUND_UP(hdr_len, 8);
if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
else
next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
skip_eotx_wr = true;
} else {
hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
data_len = skb->len - hdr_len;
flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
}
ndesc = flits_to_desc(flits);
wrlen = flits * 8;
wrlen16 = DIV_ROUND_UP(wrlen, 16);
/* If there are no CPL credits, then wait for credits
* to come back and retry again
*/
if (unlikely(wrlen16 > eosw_txq->cred))
goto out_unlock;
if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr;
eosw_txq->state = next_state;
goto write_wr_headers;
}
cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
cntrl = hwcsum(adap->params.chip, skb);
if (skb_vlan_tag_present(skb))
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
TXPKT_INTF_V(pi->tx_chan) |
TXPKT_PF_V(adap->pf));
cpl->pack = 0;
cpl->len = cpu_to_be16(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
start = (u64 *)(cpl + 1);
write_wr_headers:
sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
hdr_len);
if (data_len) {
if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
memset(d->addr, 0, sizeof(d->addr));
eohw_txq->mapping_err++;
goto out_unlock;
}
end = (u64 *)wr + flits;
if (unlikely(start > sgl)) {
left = (u8 *)end - (u8 *)eohw_txq->q.stat;
end = (void *)eohw_txq->q.desc + left;
}
if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
/* If current position is already at the end of the
* txq, reset the current to point to start of the queue
* and update the end ptr as well.
*/
left = (u8 *)end - (u8 *)eohw_txq->q.stat;
end = (void *)eohw_txq->q.desc + left;
sgl = (void *)eohw_txq->q.desc;
}
cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
d->addr);
}
txq_advance(&eohw_txq->q, ndesc);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
out_unlock:
spin_unlock(&eohw_txq->lock);
}
static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
{
struct sk_buff *skb;
int pktcount;
switch (eosw_txq->state) {
case CXGB4_EO_STATE_ACTIVE:
case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
if (pktcount < 0)
pktcount += eosw_txq->ndesc;
break;
case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
case CXGB4_EO_STATE_CLOSED:
default:
return;
};
while (pktcount--) {
skb = eosw_txq_peek(eosw_txq);
if (!skb) {
eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
eosw_txq->ndesc);
continue;
}
ethofld_hard_xmit(dev, eosw_txq);
}
}
static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct sge_eosw_txq *eosw_txq;
u32 qid;
int ret;
ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
if (ret)
goto out_free;
tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
qid = skb_get_queue_mapping(skb) - pi->nqsets;
eosw_txq = &tc_port_mqprio->eosw_txq[qid];
spin_lock_bh(&eosw_txq->lock);
if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
goto out_unlock;
ret = eosw_txq_enqueue(eosw_txq, skb);
if (ret)
goto out_unlock;
/* SKB is queued for processing until credits are available.
* So, call the destructor now and we'll free the skb later
* after it has been successfully transmitted.
*/
skb_orphan(skb);
eosw_txq_advance(eosw_txq, 1);
ethofld_xmit(dev, eosw_txq);
spin_unlock_bh(&eosw_txq->lock);
return NETDEV_TX_OK;
out_unlock:
spin_unlock_bh(&eosw_txq->lock);
out_free:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
u16 qid = skb_get_queue_mapping(skb);
if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
return cxgb4_vf_eth_xmit(skb, dev);
if (unlikely(qid >= pi->nqsets))
return cxgb4_ethofld_xmit(skb, dev);
return cxgb4_eth_xmit(skb, dev);
}
/**
* cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
* @dev - netdevice
* @eotid - ETHOFLD tid to bind/unbind
* @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
*
* Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
* If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
* a traffic class.
*/
int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
enum sge_eosw_state next_state;
struct sge_eosw_txq *eosw_txq;
u32 len, len16, nparams = 6;
struct fw_flowc_wr *flowc;
struct eotid_entry *entry;
struct sge_ofld_rxq *rxq;
struct sk_buff *skb;
int ret = 0;
len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams;
len16 = DIV_ROUND_UP(len, 16);
entry = cxgb4_lookup_eotid(&adap->tids, eotid);
if (!entry)
return -ENOMEM;
eosw_txq = (struct sge_eosw_txq *)entry->data;
if (!eosw_txq)
return -ENOMEM;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
spin_lock_bh(&eosw_txq->lock);
if (tc != FW_SCHED_CLS_NONE) {
if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
goto out_unlock;
next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
} else {
if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
goto out_unlock;
next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
}
flowc = __skb_put(skb, len);
memset(flowc, 0, len);
rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
FW_WR_FLOWID_V(eosw_txq->hwtid));
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams) |
FW_WR_COMPL_V(1));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
flowc->mnemval[4].val = cpu_to_be32(tc);
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
FW_FLOWC_MNEM_EOSTATE_CLOSING :
FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
eosw_txq->cred -= len16;
eosw_txq->ncompl++;
eosw_txq->last_compl = 0;
ret = eosw_txq_enqueue(eosw_txq, skb);
if (ret) {
dev_consume_skb_any(skb);
goto out_unlock;
}
eosw_txq->state = next_state;
eosw_txq->flowc_idx = eosw_txq->pidx;
eosw_txq_advance(eosw_txq, 1);
ethofld_xmit(dev, eosw_txq);
out_unlock:
spin_unlock_bh(&eosw_txq->lock);
return ret;
}
/**
* is_imm - check whether a packet can be sent as immediate data
* @skb: the packet
......@@ -3311,6 +3739,113 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
void cxgb4_ethofld_restart(unsigned long data)
{
struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
int pktcount;
spin_lock(&eosw_txq->lock);
pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
if (pktcount < 0)
pktcount += eosw_txq->ndesc;
if (pktcount) {
cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
eosw_txq, pktcount);
eosw_txq->inuse -= pktcount;
}
/* There may be some packets waiting for completions. So,
* attempt to send these packets now.
*/
ethofld_xmit(eosw_txq->netdev, eosw_txq);
spin_unlock(&eosw_txq->lock);
}
/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
* @q: the response queue that received the packet
* @rsp: the response queue descriptor holding the CPL message
* @si: the gather list of packet fragments
*
* Process a ETHOFLD Tx completion. Increment the cidx here, but
* free up the descriptors in a tasklet later.
*/
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si)
{
u8 opcode = ((const struct rss_header *)rsp)->opcode;
/* skip RSS header */
rsp++;
if (opcode == CPL_FW4_ACK) {
const struct cpl_fw4_ack *cpl;
struct sge_eosw_txq *eosw_txq;
struct eotid_entry *entry;
struct sk_buff *skb;
u32 hdr_len, eotid;
u8 flits, wrlen16;
int credits;
cpl = (const struct cpl_fw4_ack *)rsp;
eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
q->adap->tids.eotid_base;
entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
if (!entry)
goto out_done;
eosw_txq = (struct sge_eosw_txq *)entry->data;
if (!eosw_txq)
goto out_done;
spin_lock(&eosw_txq->lock);
credits = cpl->credits;
while (credits > 0) {
skb = eosw_txq->desc[eosw_txq->cidx].skb;
if (!skb)
break;
if (unlikely((eosw_txq->state ==
CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
eosw_txq->state ==
CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
eosw_txq->cidx == eosw_txq->flowc_idx)) {
hdr_len = skb->len;
flits = DIV_ROUND_UP(skb->len, 8);
if (eosw_txq->state ==
CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
else
eosw_txq->state = CXGB4_EO_STATE_CLOSED;
complete(&eosw_txq->completion);
} else {
hdr_len = eth_get_headlen(eosw_txq->netdev,
skb->data,
skb_headlen(skb));
flits = ethofld_calc_tx_flits(q->adap, skb,
hdr_len);
}
eosw_txq_advance_index(&eosw_txq->cidx, 1,
eosw_txq->ndesc);
wrlen16 = DIV_ROUND_UP(flits * 8, 16);
credits -= wrlen16;
}
eosw_txq->cred += cpl->credits;
eosw_txq->ncompl--;
spin_unlock(&eosw_txq->lock);
/* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
* if there were packets waiting for completion.
*/
tasklet_schedule(&eosw_txq->qresume_tsk);
}
out_done:
return 0;
}
/*
* The MSI-X interrupt handler for an SGE response queue.
*/
......@@ -3912,30 +4447,30 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type)
static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
struct net_device *dev, u32 cmd, u32 iqid)
{
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
int ret, nentries;
struct fw_eq_ofld_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
int cmd = FW_EQ_OFLD_CMD;
struct sge *s = &adap->sge;
struct fw_eq_ofld_cmd c;
u32 fb_min, nentries;
int ret;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
NUMA_NO_NODE);
if (!txq->q.desc)
nentries = q->size + s->stat_len / sizeof(struct tx_desc);
q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
sizeof(struct tx_sw_desc), &q->phys_addr,
&q->sdesc, s->stat_len, NUMA_NO_NODE);
if (!q->desc)
return -ENOMEM;
if (chip_ver <= CHELSIO_T5)
fb_min = FETCHBURSTMIN_64B_X;
else
fb_min = FETCHBURSTMIN_64B_T6_X;
memset(&c, 0, sizeof(c));
if (unlikely(uld_type == CXGB4_TX_CRYPTO))
cmd = FW_EQ_CTRL_CMD;
c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
......@@ -3947,27 +4482,42 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
? FETCHBURSTMIN_64B_X
: FETCHBURSTMIN_64B_T6_X) |
htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
c.eqaddr = cpu_to_be64(q->phys_addr);
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
kfree(q->sdesc);
q->sdesc = NULL;
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
txq->q.desc, txq->q.phys_addr);
txq->q.desc = NULL;
q->desc, q->phys_addr);
q->desc = NULL;
return ret;
}
init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
return 0;
}
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type)
{
u32 cmd = FW_EQ_OFLD_CMD;
int ret;
if (unlikely(uld_type == CXGB4_TX_CRYPTO))
cmd = FW_EQ_CTRL_CMD;
ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
if (ret)
return ret;
txq->q.q_type = CXGB4_TXQ_ULD;
init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
......@@ -3976,6 +4526,25 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
return 0;
}
int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
struct net_device *dev, u32 iqid)
{
int ret;
ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
if (ret)
return ret;
txq->q.q_type = CXGB4_TXQ_ULD;
spin_lock_init(&txq->lock);
txq->adap = adap;
txq->tso = 0;
txq->tx_cso = 0;
txq->vlan_ins = 0;
txq->mapping_err = 0;
return 0;
}
void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
......@@ -4031,6 +4600,17 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
q->fl.size ? &q->fl : NULL);
}
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
{
if (txq->q.desc) {
t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
txq->q.cntxt_id);
free_tx_desc(adap, &txq->q, txq->q.in_use, false);
kfree(txq->q.sdesc);
free_txq(adap, &txq->q);
}
}
/**
* t4_free_sge_resources - free SGE resources
* @adap: the adapter
......@@ -4060,6 +4640,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
if (eq->msix) {
cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
eq->msix = NULL;
}
etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
......@@ -4086,8 +4670,15 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
if (adap->sge.fw_evtq.desc)
if (adap->sge.fw_evtq.desc) {
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
if (adap->sge.fwevtq_msix_idx >= 0)
cxgb4_free_msix_idx_in_bmap(adap,
adap->sge.fwevtq_msix_idx);
}
if (adap->sge.nd_msix_idx >= 0)
cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
......
......@@ -1421,6 +1421,11 @@ enum {
CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
};
#define CPL_FW4_ACK_FLOWID_S 0
#define CPL_FW4_ACK_FLOWID_M 0xffffff
#define CPL_FW4_ACK_FLOWID_G(x) \
(((x) >> CPL_FW4_ACK_FLOWID_S) & CPL_FW4_ACK_FLOWID_M)
struct cpl_fw6_msg {
u8 opcode;
u8 type;
......
......@@ -87,6 +87,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_EO_WR = 0x1c,
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
......@@ -534,6 +535,35 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
enum fw_eth_tx_eo_type {
FW_ETH_TX_EO_TYPE_TCPSEG = 1,
};
struct fw_eth_tx_eo_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
__be64 r3;
union fw_eth_tx_eo {
struct fw_eth_tx_eo_tcpseg {
__u8 type;
__u8 ethlen;
__be16 iplen;
__u8 tcplen;
__u8 tsclk_tsoff;
__be16 r4;
__be16 mss;
__be16 r5;
__be32 plen;
} tcpseg;
} u;
};
#define FW_ETH_TX_EO_WR_IMMDLEN_S 0
#define FW_ETH_TX_EO_WR_IMMDLEN_M 0x1ff
#define FW_ETH_TX_EO_WR_IMMDLEN_V(x) ((x) << FW_ETH_TX_EO_WR_IMMDLEN_S)
#define FW_ETH_TX_EO_WR_IMMDLEN_G(x) \
(((x) >> FW_ETH_TX_EO_WR_IMMDLEN_S) & FW_ETH_TX_EO_WR_IMMDLEN_M)
struct fw_ofld_connection_wr {
__be32 op_compl;
__be32 len16_pkd;
......@@ -660,6 +690,12 @@ enum fw_flowc_mnem_tcpstate {
FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */
};
enum fw_flowc_mnem_eostate {
FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */
/* graceful close, after sending outstanding payload */
FW_FLOWC_MNEM_EOSTATE_CLOSING = 2,
};
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
......@@ -1134,6 +1170,7 @@ enum fw_caps_config_nic {
FW_CAPS_CONFIG_NIC = 0x00000001,
FW_CAPS_CONFIG_NIC_VM = 0x00000002,
FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
};
enum fw_caps_config_ofld {
......@@ -1276,6 +1313,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment