Commit 4d22de3e authored by Divy Le Ray's avatar Divy Le Ray Committed by Jeff Garzik

Add support for the latest 1G/10G Chelsio adapter, T3.

This driver is required by the Chelsio T3 RDMA driver posted by
Steve Wise.
Signed-off-by: default avatarDivy Le Ray <divy@chelsio.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 0bf94faf
...@@ -2389,6 +2389,24 @@ config CHELSIO_T1_NAPI ...@@ -2389,6 +2389,24 @@ config CHELSIO_T1_NAPI
NAPI is a driver API designed to reduce CPU and interrupt load NAPI is a driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card. when the driver is receiving lots of packets from the card.
config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on PCI
help
This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.htm>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called cxgb3.
config EHEA config EHEA
tristate "eHEA Ethernet support" tristate "eHEA Ethernet support"
depends on IBMEBUS depends on IBMEBUS
......
...@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/ ...@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_IBM_EMAC) += ibm_emac/ obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T1) += chelsio/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_BONDING) += bonding/ obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o
......
#
# Chelsio T3 driver
#
obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
xgmac.o sge.o l2t.o cxgb3_offload.o
/*
* This file is part of the Chelsio T3 Ethernet driver for Linux.
*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
/* This file should not be included directly. Include common.h instead. */
#ifndef __T3_ADAPTER_H__
#define __T3_ADAPTER_H__
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cache.h>
#include "t3cdev.h"
#include <asm/semaphore.h>
#include <asm/bitops.h>
#include <asm/io.h>
typedef irqreturn_t(*intr_handler_t) (int, void *);
struct vlan_group;
struct port_info {
struct vlan_group *vlan_grp;
const struct port_type_info *port_type;
u8 port_id;
u8 rx_csum_offload;
u8 nqsets;
u8 first_qset;
struct cphy phy;
struct cmac mac;
struct link_config link_config;
struct net_device_stats netstats;
int activity;
};
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
};
struct rx_desc;
struct rx_sw_desc;
struct sge_fl { /* SGE per free-buffer list state */
unsigned int buf_size; /* size of each Rx buffer */
unsigned int credits; /* # of available Rx buffers */
unsigned int size; /* capacity of free list */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* free list generation */
struct rx_desc *desc; /* address of HW Rx descriptor ring */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
dma_addr_t phys_addr; /* physical address of HW ring start */
unsigned int cntxt_id; /* SGE context id for the free list */
unsigned long empty; /* # of times queue ran out of buffers */
};
/*
* Bundle size for grouping offload RX packets for delivery to the stack.
* Don't make this too big as we do prefetch on each packet in a bundle.
*/
# define RX_BUNDLE_SIZE 8
struct rsp_desc;
struct sge_rspq { /* state for an SGE response queue */
unsigned int credits; /* # of pending response credits */
unsigned int size; /* capacity of response queue */
unsigned int cidx; /* consumer index */
unsigned int gen; /* current generation bit */
unsigned int polling; /* is the queue serviced through NAPI? */
unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
unsigned int next_holdoff; /* holdoff time for next interrupt */
struct rsp_desc *desc; /* address of HW response ring */
dma_addr_t phys_addr; /* physical address of the ring */
unsigned int cntxt_id; /* SGE context id for the response q */
spinlock_t lock; /* guards response processing */
struct sk_buff *rx_head; /* offload packet receive queue head */
struct sk_buff *rx_tail; /* offload packet receive queue tail */
unsigned long offload_pkts;
unsigned long offload_bundles;
unsigned long eth_pkts; /* # of ethernet packets */
unsigned long pure_rsps; /* # of pure (non-data) responses */
unsigned long imm_data; /* responses with immediate data */
unsigned long rx_drops; /* # of packets dropped due to no mem */
unsigned long async_notif; /* # of asynchronous notification events */
unsigned long empty; /* # of times queue ran out of credits */
unsigned long nomem; /* # of responses deferred due to no mem */
unsigned long unhandled_irqs; /* # of spurious intrs */
};
struct tx_desc;
struct tx_sw_desc;
struct sge_txq { /* state for an SGE Tx queue */
unsigned long flags; /* HW DMA fetch status */
unsigned int in_use; /* # of in-use Tx descriptors */
unsigned int size; /* # of descriptors */
unsigned int processed; /* total # of descs HW has processed */
unsigned int cleaned; /* total # of descs SW has reclaimed */
unsigned int stop_thres; /* SW TX queue suspend threshold */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* current value of generation bit */
unsigned int unacked; /* Tx descriptors used since last COMPL */
struct tx_desc *desc; /* address of HW Tx descriptor ring */
struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
spinlock_t lock; /* guards enqueueing of new packets */
unsigned int token; /* WR token */
dma_addr_t phys_addr; /* physical address of the ring */
struct sk_buff_head sendq; /* List of backpressured offload packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */
unsigned int cntxt_id; /* SGE context id for the Tx q */
unsigned long stops; /* # of times q has been stopped */
unsigned long restarts; /* # of queue restarts */
};
enum { /* per port SGE statistics */
SGE_PSTAT_TSO, /* # of TSO requests */
SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
SGE_PSTAT_MAX /* must be last */
};
struct sge_qset { /* an SGE queue set */
struct sge_rspq rspq;
struct sge_fl fl[SGE_RXQ_PER_SET];
struct sge_txq txq[SGE_TXQ_PER_SET];
struct net_device *netdev; /* associated net device */
unsigned long txq_stopped; /* which Tx queues are stopped */
struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
unsigned long port_stats[SGE_PSTAT_MAX];
} ____cacheline_aligned;
struct sge {
struct sge_qset qs[SGE_QSETS];
spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
};
struct adapter {
struct t3cdev tdev;
struct list_head adapter_list;
void __iomem *regs;
struct pci_dev *pdev;
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
const char *name;
int msg_enable;
unsigned int mmio_len;
struct adapter_params params;
unsigned int slow_intr_mask;
unsigned long irq_stats[IRQ_NUM_STATS];
struct {
unsigned short vec;
char desc[22];
} msix_info[SGE_QSETS + 1];
/* T3 modules */
struct sge sge;
struct mc7 pmrx;
struct mc7 pmtx;
struct mc7 cm;
struct mc5 mc5;
struct net_device *port[MAX_NPORTS];
unsigned int check_task_cnt;
struct delayed_work adap_check_task;
struct work_struct ext_intr_handler_task;
/*
* Dummy netdevices are needed when using multiple receive queues with
* NAPI as each netdevice can service only one queue.
*/
struct net_device *dummy_netdev[SGE_QSETS - 1];
struct dentry *debugfs_root;
struct mutex mdio_lock;
spinlock_t stats_lock;
spinlock_t work_lock;
};
static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
{
u32 val = readl(adapter->regs + reg_addr);
CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
return val;
}
static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
{
CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
writel(val, adapter->regs + reg_addr);
}
static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
{
return netdev_priv(adap->port[idx]);
}
/*
* We use the spare atalk_ptr to map a net device to its SGE queue set.
* This is a macro so it can be used as l-value.
*/
#define dev2qset(netdev) ((netdev)->atalk_ptr)
#define OFFLOAD_DEVMAP_BIT 15
#define tdev2adap(d) container_of(d, struct adapter, tdev)
static inline int offload_running(struct adapter *adapter)
{
return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
}
int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
void t3_os_ext_intr_handler(struct adapter *adapter);
void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
int speed, int duplex, int fc);
void t3_sge_start(struct adapter *adap);
void t3_sge_stop(struct adapter *adap);
void t3_free_sge_resources(struct adapter *adap);
void t3_sge_err_intr_handler(struct adapter *adapter);
intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int irq_vec_idx, const struct qset_params *p,
int ntxq, struct net_device *netdev);
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
#endif /* __T3_ADAPTER_H__ */
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
#include "regs.h"
enum {
AEL100X_TX_DISABLE = 9,
AEL100X_TX_CONFIG1 = 0xc002,
AEL1002_PWR_DOWN_HI = 0xc011,
AEL1002_PWR_DOWN_LO = 0xc012,
AEL1002_XFI_EQL = 0xc015,
AEL1002_LB_EN = 0xc017,
LASI_CTRL = 0x9002,
LASI_STAT = 0x9005
};
static void ael100x_txon(struct cphy *phy)
{
int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
msleep(100);
t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
msleep(30);
}
static int ael1002_power_down(struct cphy *phy, int enable)
{
int err;
err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
if (!err)
err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
return err;
}
static int ael1002_reset(struct cphy *phy, int wait)
{
int err;
if ((err = ael1002_power_down(phy, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
(err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
0, 1 << 5)))
return err;
return 0;
}
static int ael1002_intr_noop(struct cphy *phy)
{
return 0;
}
static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
if (link_ok) {
unsigned int status;
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!err && !(status & BMSR_LSTATUS))
err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
&status);
if (err)
return err;
*link_ok = !!(status & BMSR_LSTATUS);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static struct cphy_ops ael1002_ops = {
.reset = ael1002_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = ael100x_get_link_status,
.power_down = ael1002_power_down,
};
void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
ael100x_txon(phy);
}
static int ael1006_reset(struct cphy *phy, int wait)
{
return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
}
static int ael1006_intr_enable(struct cphy *phy)
{
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
}
static int ael1006_intr_disable(struct cphy *phy)
{
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
}
static int ael1006_intr_clear(struct cphy *phy)
{
u32 val;
return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
}
static int ael1006_intr_handler(struct cphy *phy)
{
unsigned int status;
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
if (err)
return err;
return (status & 1) ? cphy_cause_link_change : 0;
}
static int ael1006_power_down(struct cphy *phy, int enable)
{
return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
}
static struct cphy_ops ael1006_ops = {
.reset = ael1006_reset,
.intr_enable = ael1006_intr_enable,
.intr_disable = ael1006_intr_disable,
.intr_clear = ael1006_intr_clear,
.intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
.power_down = ael1006_power_down,
};
void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
ael100x_txon(phy);
}
static struct cphy_ops qt2045_ops = {
.reset = ael1006_reset,
.intr_enable = ael1006_intr_enable,
.intr_disable = ael1006_intr_disable,
.intr_clear = ael1006_intr_clear,
.intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
.power_down = ael1006_power_down,
};
void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
unsigned int stat;
cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
/*
* Some cards where the PHY is supposed to be at address 0 actually
* have it at 1.
*/
if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
stat == 0xffff)
phy->addr = 1;
}
static int xaui_direct_reset(struct cphy *phy, int wait)
{
return 0;
}
static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
if (link_ok) {
unsigned int status;
status = t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
*link_ok = !(status & F_LOWSIG0);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static int xaui_direct_power_down(struct cphy *phy, int enable)
{
return 0;
}
static struct cphy_ops xaui_direct_ops = {
.reset = xaui_direct_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = xaui_direct_get_link_status,
.power_down = xaui_direct_power_down,
};
void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
}
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#ifndef __CHELSIO_COMMON_H
#define __CHELSIO_COMMON_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include "version.h"
#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
#define CH_ALERT(adap, fmt, ...) \
dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
/*
* More powerful macro that selectively prints messages based on msg_enable.
* For info and debugging messages.
*/
#define CH_MSG(adapter, level, category, fmt, ...) do { \
if ((adapter)->msg_enable & NETIF_MSG_##category) \
dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
## __VA_ARGS__); \
} while (0)
#ifdef DEBUG
# define CH_DBG(adapter, category, fmt, ...) \
CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
#else
# define CH_DBG(adapter, category, fmt, ...)
#endif
/* Additional NETIF_MSG_* categories */
#define NETIF_MSG_MMIO 0x8000000
struct t3_rx_mode {
struct net_device *dev;
struct dev_mc_list *mclist;
unsigned int idx;
};
static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
struct dev_mc_list *mclist)
{
p->dev = dev;
p->mclist = mclist;
p->idx = 0;
}
static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
{
u8 *addr = NULL;
if (rm->mclist && rm->idx < rm->dev->mc_count) {
addr = rm->mclist->dmi_addr;
rm->mclist = rm->mclist->next;
rm->idx++;
}
return addr;
}
enum {
MAX_NPORTS = 2, /* max # of ports */
MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
EEPROMSIZE = 8192, /* Serial EEPROM size */
RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
};
#define MAX_RX_COALESCING_LEN 16224U
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
enum {
SUPPORTED_OFFLOAD = 1 << 24,
SUPPORTED_IRQ = 1 << 25
};
enum { /* adapter interrupt-maintained statistics */
STAT_ULP_CH0_PBL_OOB,
STAT_ULP_CH1_PBL_OOB,
STAT_PCI_CORR_ECC,
IRQ_NUM_STATS /* keep last */
};
enum {
SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
};
enum sge_context_type { /* SGE egress context types */
SGE_CNTXT_RDMA = 0,
SGE_CNTXT_ETH = 2,
SGE_CNTXT_OFLD = 4,
SGE_CNTXT_CTRL = 5
};
enum {
AN_PKT_SIZE = 32, /* async notification packet size */
IMMED_PKT_SIZE = 48 /* packet size for immediate data */
};
struct sg_ent { /* SGE scatter/gather entry */
u32 len[2];
u64 addr[2];
};
#ifndef SGE_NUM_GENBITS
/* Must be 1 or 2 */
# define SGE_NUM_GENBITS 2
#endif
#define TX_DESC_FLITS 16U
#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
struct cphy;
struct adapter;
struct mdio_ops {
int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
struct adapter_info {
unsigned char nports; /* # of ports */
unsigned char phy_base_addr; /* MDIO PHY base address */
unsigned char mdien;
unsigned char mdiinv;
unsigned int gpio_out; /* GPIO output settings */
unsigned int gpio_intr; /* GPIO IRQ enable mask */
unsigned long caps; /* adapter capabilities */
const struct mdio_ops *mdio_ops; /* MDIO operations */
const char *desc; /* product description */
};
struct port_type_info {
void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *ops);
unsigned int caps;
const char *desc;
};
struct mc5_stats {
unsigned long parity_err;
unsigned long active_rgn_full;
unsigned long nfa_srch_err;
unsigned long unknown_cmd;
unsigned long reqq_parity_err;
unsigned long dispq_parity_err;
unsigned long del_act_empty;
};
struct mc7_stats {
unsigned long corr_err;
unsigned long uncorr_err;
unsigned long parity_err;
unsigned long addr_err;
};
struct mac_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_octets_bad; /* total # of octets in error frames */
u64 tx_frames; /* all good frames */
u64 tx_mcast_frames; /* good multicast frames */
u64 tx_bcast_frames; /* good broadcast frames */
u64 tx_pause; /* # of transmitted pause frames */
u64 tx_deferred; /* frames with deferred transmissions */
u64 tx_late_collisions; /* # of late collisions */
u64 tx_total_collisions; /* # of total collisions */
u64 tx_excess_collisions; /* frame errors from excessive collissions */
u64 tx_underrun; /* # of Tx FIFO underruns */
u64 tx_len_errs; /* # of Tx length errors */
u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
u64 tx_excess_deferral; /* # of frames with excessive deferral */
u64 tx_fcs_errs; /* # of frames with bad FCS */
u64 tx_frames_64; /* # of Tx frames in a particular range */
u64 tx_frames_65_127;
u64 tx_frames_128_255;
u64 tx_frames_256_511;
u64 tx_frames_512_1023;
u64 tx_frames_1024_1518;
u64 tx_frames_1519_max;
u64 rx_octets; /* total # of octets in good frames */
u64 rx_octets_bad; /* total # of octets in error frames */
u64 rx_frames; /* all good frames */
u64 rx_mcast_frames; /* good multicast frames */
u64 rx_bcast_frames; /* good broadcast frames */
u64 rx_pause; /* # of received pause frames */
u64 rx_fcs_errs; /* # of received frames with bad FCS */
u64 rx_align_errs; /* alignment errors */
u64 rx_symbol_errs; /* symbol errors */
u64 rx_data_errs; /* data errors */
u64 rx_sequence_errs; /* sequence errors */
u64 rx_runt; /* # of runt frames */
u64 rx_jabber; /* # of jabber frames */
u64 rx_short; /* # of short frames */
u64 rx_too_long; /* # of oversized frames */
u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
u64 rx_frames_64; /* # of Rx frames in a particular range */
u64 rx_frames_65_127;
u64 rx_frames_128_255;
u64 rx_frames_256_511;
u64 rx_frames_512_1023;
u64 rx_frames_1024_1518;
u64 rx_frames_1519_max;
u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
unsigned long tx_fifo_parity_err;
unsigned long rx_fifo_parity_err;
unsigned long tx_fifo_urun;
unsigned long rx_fifo_ovfl;
unsigned long serdes_signal_loss;
unsigned long xaui_pcs_ctc_err;
unsigned long xaui_pcs_align_change;
};
struct tp_mib_stats {
u32 ipInReceive_hi;
u32 ipInReceive_lo;
u32 ipInHdrErrors_hi;
u32 ipInHdrErrors_lo;
u32 ipInAddrErrors_hi;
u32 ipInAddrErrors_lo;
u32 ipInUnknownProtos_hi;
u32 ipInUnknownProtos_lo;
u32 ipInDiscards_hi;
u32 ipInDiscards_lo;
u32 ipInDelivers_hi;
u32 ipInDelivers_lo;
u32 ipOutRequests_hi;
u32 ipOutRequests_lo;
u32 ipOutDiscards_hi;
u32 ipOutDiscards_lo;
u32 ipOutNoRoutes_hi;
u32 ipOutNoRoutes_lo;
u32 ipReasmTimeout;
u32 ipReasmReqds;
u32 ipReasmOKs;
u32 ipReasmFails;
u32 reserved[8];
u32 tcpActiveOpens;
u32 tcpPassiveOpens;
u32 tcpAttemptFails;
u32 tcpEstabResets;
u32 tcpOutRsts;
u32 tcpCurrEstab;
u32 tcpInSegs_hi;
u32 tcpInSegs_lo;
u32 tcpOutSegs_hi;
u32 tcpOutSegs_lo;
u32 tcpRetransSeg_hi;
u32 tcpRetransSeg_lo;
u32 tcpInErrs_hi;
u32 tcpInErrs_lo;
u32 tcpRtoMin;
u32 tcpRtoMax;
};
struct tp_params {
unsigned int nchan; /* # of channels */
unsigned int pmrx_size; /* total PMRX capacity */
unsigned int pmtx_size; /* total PMTX capacity */
unsigned int cm_size; /* total CM capacity */
unsigned int chan_rx_size; /* per channel Rx size */
unsigned int chan_tx_size; /* per channel Tx size */
unsigned int rx_pg_size; /* Rx page size */
unsigned int tx_pg_size; /* Tx page size */
unsigned int rx_num_pgs; /* # of Rx pages */
unsigned int tx_num_pgs; /* # of Tx pages */
unsigned int ntimer_qs; /* # of timer queues */
};
struct qset_params { /* SGE queue set parameters */
unsigned int polling; /* polling/interrupt service for rspq */
unsigned int coalesce_usecs; /* irq coalescing timer */
unsigned int rspq_size; /* # of entries in response queue */
unsigned int fl_size; /* # of entries in regular free list */
unsigned int jumbo_size; /* # of entries in jumbo free list */
unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
unsigned int cong_thres; /* FL congestion threshold */
};
struct sge_params {
unsigned int max_pkt_size; /* max offload pkt size */
struct qset_params qset[SGE_QSETS];
};
struct mc5_params {
unsigned int mode; /* selects MC5 width */
unsigned int nservers; /* size of server region */
unsigned int nfilters; /* size of filter region */
unsigned int nroutes; /* size of routing region */
};
/* Default MC5 region sizes */
enum {
DEFAULT_NSERVERS = 512,
DEFAULT_NFILTERS = 128
};
/* MC5 modes, these must be non-0 */
enum {
MC5_MODE_144_BIT = 1,
MC5_MODE_72_BIT = 2
};
struct vpd_params {
unsigned int cclk;
unsigned int mclk;
unsigned int uclk;
unsigned int mdc;
unsigned int mem_timing;
u8 eth_base[6];
u8 port_type[MAX_NPORTS];
unsigned short xauicfg[2];
};
struct pci_params {
unsigned int vpd_cap_addr;
unsigned int pcie_cap_addr;
unsigned short speed;
unsigned char width;
unsigned char variant;
};
enum {
PCI_VARIANT_PCI,
PCI_VARIANT_PCIX_MODE1_PARITY,
PCI_VARIANT_PCIX_MODE1_ECC,
PCI_VARIANT_PCIX_266_MODE2,
PCI_VARIANT_PCIE
};
struct adapter_params {
struct sge_params sge;
struct mc5_params mc5;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
const struct adapter_info *info;
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
unsigned short b_wnd[NCCTRL_WIN];
unsigned int nports; /* # of ethernet ports */
unsigned int stats_update_period; /* MAC stats accumulation period */
unsigned int linkpoll_period; /* link poll period in 0.1s */
unsigned int rev; /* chip revision */
};
struct trace_params {
u32 sip;
u32 sip_mask;
u32 dip;
u32 dip_mask;
u16 sport;
u16 sport_mask;
u16 dport;
u16 dport_mask;
u32 vlan:12;
u32 vlan_mask:12;
u32 intf:4;
u32 intf_mask:4;
u8 proto;
u8 proto_mask;
};
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_duplex; /* duplex user has requested */
unsigned char duplex; /* actual link duplex */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned int link_ok; /* link up? */
};
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
struct mc5 {
struct adapter *adapter;
unsigned int tcam_size;
unsigned char part_type;
unsigned char parity_enabled;
unsigned char mode;
struct mc5_stats stats;
};
static inline unsigned int t3_mc5_size(const struct mc5 *p)
{
return p->tcam_size;
}
struct mc7 {
struct adapter *adapter; /* backpointer to adapter */
unsigned int size; /* memory size in bytes */
unsigned int width; /* MC7 interface width */
unsigned int offset; /* register address offset for MC7 instance */
const char *name; /* name of MC7 instance */
struct mc7_stats stats; /* MC7 statistics */
};
static inline unsigned int t3_mc7_size(const struct mc7 *p)
{
return p->size;
}
struct cmac {
struct adapter *adapter;
unsigned int offset;
unsigned int nucast; /* # of address filters for unicast MACs */
struct mac_stats stats;
};
enum {
MAC_DIRECTION_RX = 1,
MAC_DIRECTION_TX = 2,
MAC_RXFIFO_SIZE = 32768
};
/* IEEE 802.3ae specified MDIO devices */
enum {
MDIO_DEV_PMA_PMD = 1,
MDIO_DEV_WIS = 2,
MDIO_DEV_PCS = 3,
MDIO_DEV_XGXS = 4
};
/* PHY loopback direction */
enum {
PHY_LOOPBACK_TX = 1,
PHY_LOOPBACK_RX = 2
};
/* PHY interrupt types */
enum {
cphy_cause_link_change = 1,
cphy_cause_fifo_error = 2
};
/* PHY operations */
struct cphy_ops {
void (*destroy)(struct cphy *phy);
int (*reset)(struct cphy *phy, int wait);
int (*intr_enable)(struct cphy *phy);
int (*intr_disable)(struct cphy *phy);
int (*intr_clear)(struct cphy *phy);
int (*intr_handler)(struct cphy *phy);
int (*autoneg_enable)(struct cphy *phy);
int (*autoneg_restart)(struct cphy *phy);
int (*advertise)(struct cphy *phy, unsigned int advertise_map);
int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
int (*power_down)(struct cphy *phy, int enable);
};
/* A PHY instance */
struct cphy {
int addr; /* PHY address */
struct adapter *adapter; /* associated adapter */
unsigned long fifo_errors; /* FIFO over/under-flows */
const struct cphy_ops *ops; /* PHY operations */
int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
/* Convenience MDIO read/write wrappers */
static inline int mdio_read(struct cphy *phy, int mmd, int reg,
unsigned int *valp)
{
return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
}
static inline int mdio_write(struct cphy *phy, int mmd, int reg,
unsigned int val)
{
return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
}
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
int phy_addr, struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops)
{
phy->adapter = adapter;
phy->addr = phy_addr;
phy->ops = phy_ops;
if (mdio_ops) {
phy->mdio_read = mdio_ops->read;
phy->mdio_write = mdio_ops->write;
}
}
/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
#define MAC_STATS_ACCUM_SECS 180
#define XGM_REG(reg_addr, idx) \
((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
struct addr_val_pair {
unsigned int reg_addr;
unsigned int val;
};
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
# define PCI_VENDOR_ID_CHELSIO 0x1425
#endif
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
#define adapter_info(adap) ((adap)->params.info)
static inline int uses_xaui(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_AUI;
}
static inline int is_10G(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
}
static inline int is_offload(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
}
static inline unsigned int is_pcie(const struct adapter *adap)
{
return adap->params.pci.variant == PCI_VARIANT_PCIE;
}
void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
int n, unsigned int offset);
int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay, u32 *valp);
static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay)
{
return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
delay, NULL);
}
int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
unsigned int set);
int t3_phy_reset(struct cphy *phy, int mmd, int wait);
int t3_phy_advertise(struct cphy *phy, unsigned int advert);
int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
void t3_intr_enable(struct adapter *adapter);
void t3_intr_disable(struct adapter *adapter);
void t3_intr_clear(struct adapter *adapter);
void t3_port_intr_enable(struct adapter *adapter, int idx);
void t3_port_intr_disable(struct adapter *adapter, int idx);
void t3_port_intr_clear(struct adapter *adapter, int idx);
int t3_slow_intr_handler(struct adapter *adapter);
int t3_phy_intr_handler(struct adapter *adapter);
void t3_link_changed(struct adapter *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
int t3_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
int t3_get_fw_version(struct adapter *adapter, u32 *vers);
int t3_check_fw_version(struct adapter *adapter);
int t3_init_hw(struct adapter *adapter, u32 fw_params);
void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
int reset);
void t3_led_ready(struct adapter *adapter);
void t3_fatal_err(struct adapter *adapter);
void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
const u8 * cpus, const u16 *rspq);
int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
unsigned int n, unsigned int *valp);
int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
u64 *buf);
int t3_mac_reset(struct cmac *mac);
void t3b_pcs_reset(struct cmac *mac);
int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes);
void t3_mc5_intr_handler(struct mc5 *mc5);
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
u32 *buf);
int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
void t3_tp_set_offload_mode(struct adapter *adap, int enable);
void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
unsigned short alpha[NCCTRL_WIN],
unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
void t3_get_cong_cntl_tab(struct adapter *adap,
unsigned short incr[NMTUS][NCCTRL_WIN]);
void t3_config_trace_filter(struct adapter *adapter,
const struct trace_params *tp, int filter_index,
int invert, int enable);
int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
void t3_sge_prep(struct adapter *adap, struct sge_params *p);
void t3_sge_init(struct adapter *adap, struct sge_params *p);
int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
enum sge_context_type type, int respq, u64 base_addr,
unsigned int size, unsigned int token, int gen,
unsigned int cidx);
int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
int gts_enable, u64 base_addr, unsigned int size,
unsigned int esize, unsigned int cong_thres, int gen,
unsigned int cidx);
int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
int irq_vec_idx, u64 base_addr, unsigned int size,
unsigned int fl_thres, int gen, unsigned int cidx);
int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
unsigned int size, int rspq, int ovfl_mode,
unsigned int credits, unsigned int credit_thres);
int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
unsigned int credits);
void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
#endif /* __CHELSIO_COMMON_H */
/*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
#define _CXGB3_OFFLOAD_CTL_DEFS_H
enum {
GET_MAX_OUTSTANDING_WR,
GET_TX_MAX_CHUNK,
GET_TID_RANGE,
GET_STID_RANGE,
GET_RTBL_RANGE,
GET_L2T_CAPACITY,
GET_MTUS,
GET_WR_LEN,
GET_IFF_FROM_MAC,
GET_DDP_PARAMS,
GET_PORTS,
ULP_ISCSI_GET_PARAMS,
ULP_ISCSI_SET_PARAMS,
RDMA_GET_PARAMS,
RDMA_CQ_OP,
RDMA_CQ_SETUP,
RDMA_CQ_DISABLE,
RDMA_CTRL_QP_SETUP,
RDMA_GET_MEM,
};
/*
* Structure used to describe a TID range. Valid TIDs are [base, base+num).
*/
struct tid_range {
unsigned int base; /* first TID */
unsigned int num; /* number of TIDs in range */
};
/*
* Structure used to request the size and contents of the MTU table.
*/
struct mtutab {
unsigned int size; /* # of entries in the MTU table */
const unsigned short *mtus; /* the MTU table values */
};
struct net_device;
/*
* Structure used to request the adapter net_device owning a given MAC address.
*/
struct iff_mac {
struct net_device *dev; /* the net_device */
const unsigned char *mac_addr; /* MAC address to lookup */
u16 vlan_tag;
};
struct pci_dev;
/*
* Structure used to request the TCP DDP parameters.
*/
struct ddp_params {
unsigned int llimit; /* TDDP region start address */
unsigned int ulimit; /* TDDP region end address */
unsigned int tag_mask; /* TDDP tag mask */
struct pci_dev *pdev;
};
struct adap_ports {
unsigned int nports; /* number of ports on this adapter */
struct net_device *lldevs[2];
};
/*
* Structure used to return information to the iscsi layer.
*/
struct ulp_iscsi_info {
unsigned int offset;
unsigned int llimit;
unsigned int ulimit;
unsigned int tagmask;
unsigned int pgsz3;
unsigned int pgsz2;
unsigned int pgsz1;
unsigned int pgsz0;
unsigned int max_rxsz;
unsigned int max_txsz;
struct pci_dev *pdev;
};
/*
* Structure used to return information to the RDMA layer.
*/
struct rdma_info {
unsigned int tpt_base; /* TPT base address */
unsigned int tpt_top; /* TPT last entry address */
unsigned int pbl_base; /* PBL base address */
unsigned int pbl_top; /* PBL last entry address */
unsigned int rqt_base; /* RQT base address */
unsigned int rqt_top; /* RQT last entry address */
unsigned int udbell_len; /* user doorbell region length */
unsigned long udbell_physbase; /* user doorbell physical start addr */
void __iomem *kdb_addr; /* kernel doorbell register address */
struct pci_dev *pdev; /* associated PCI device */
};
/*
* Structure used to request an operation on an RDMA completion queue.
*/
struct rdma_cq_op {
unsigned int id;
unsigned int op;
unsigned int credits;
};
/*
* Structure used to setup RDMA completion queues.
*/
struct rdma_cq_setup {
unsigned int id;
unsigned long long base_addr;
unsigned int size;
unsigned int credits;
unsigned int credit_thres;
unsigned int ovfl_mode;
};
/*
* Structure used to setup the RDMA control egress context.
*/
struct rdma_ctrlqp_setup {
unsigned long long base_addr;
unsigned int size;
};
#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CHELSIO_DEFS_H
#define _CHELSIO_DEFS_H
#include <linux/skbuff.h>
#include <net/tcp.h>
#include "t3cdev.h"
#include "cxgb3_offload.h"
#define VALIDATE_TID 1
void *cxgb_alloc_mem(unsigned long size);
void cxgb_free_mem(void *addr);
void cxgb_neigh_update(struct neighbour *neigh);
void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
/*
* Map an ATID or STID to their entries in the corresponding TID tables.
*/
static inline union active_open_entry *atid2entry(const struct tid_info *t,
unsigned int atid)
{
return &t->atid_tab[atid - t->atid_base];
}
static inline union listen_entry *stid2entry(const struct tid_info *t,
unsigned int stid)
{
return &t->stid_tab[stid - t->stid_base];
}
/*
* Find the connection corresponding to a TID.
*/
static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
unsigned int tid)
{
return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
}
/*
* Find the connection corresponding to a server TID.
*/
static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
unsigned int tid)
{
if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
return NULL;
return &(stid2entry(t, tid)->t3c_tid);
}
/*
* Find the connection corresponding to an active-open TID.
*/
static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
unsigned int tid)
{
if (tid < t->atid_base || tid >= t->atid_base + t->natids)
return NULL;
return &(atid2entry(t, tid)->t3c_tid);
}
int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
int attach_t3cdev(struct t3cdev *dev);
void detach_t3cdev(struct t3cdev *dev);
#endif
/*
* This file is part of the Chelsio T3 Ethernet driver for Linux.
*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#ifndef __CHIOCTL_H__
#define __CHIOCTL_H__
/*
* Ioctl commands specific to this driver.
*/
enum {
CHELSIO_SETREG = 1024,
CHELSIO_GETREG,
CHELSIO_SETTPI,
CHELSIO_GETTPI,
CHELSIO_GETMTUTAB,
CHELSIO_SETMTUTAB,
CHELSIO_GETMTU,
CHELSIO_SET_PM,
CHELSIO_GET_PM,
CHELSIO_GET_TCAM,
CHELSIO_SET_TCAM,
CHELSIO_GET_TCB,
CHELSIO_GET_MEM,
CHELSIO_LOAD_FW,
CHELSIO_GET_PROTO,
CHELSIO_SET_PROTO,
CHELSIO_SET_TRACE_FILTER,
CHELSIO_SET_QSET_PARAMS,
CHELSIO_GET_QSET_PARAMS,
CHELSIO_SET_QSET_NUM,
CHELSIO_GET_QSET_NUM,
CHELSIO_SET_PKTSCHED,
};
struct ch_reg {
uint32_t cmd;
uint32_t addr;
uint32_t val;
};
struct ch_cntxt {
uint32_t cmd;
uint32_t cntxt_type;
uint32_t cntxt_id;
uint32_t data[4];
};
/* context types */
enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
struct ch_desc {
uint32_t cmd;
uint32_t queue_num;
uint32_t idx;
uint32_t size;
uint8_t data[128];
};
struct ch_mem_range {
uint32_t cmd;
uint32_t mem_id;
uint32_t addr;
uint32_t len;
uint32_t version;
uint8_t buf[0];
};
struct ch_qset_params {
uint32_t cmd;
uint32_t qset_idx;
int32_t txq_size[3];
int32_t rspq_size;
int32_t fl_size[2];
int32_t intr_lat;
int32_t polling;
int32_t cong_thres;
};
struct ch_pktsched_params {
uint32_t cmd;
uint8_t sched;
uint8_t idx;
uint8_t min;
uint8_t max;
uint8_t binding;
};
#ifndef TCB_SIZE
# define TCB_SIZE 128
#endif
/* TCB size in 32-bit words */
#define TCB_WORDS (TCB_SIZE / 4)
enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
struct ch_mtus {
uint32_t cmd;
uint32_t nmtus;
uint16_t mtus[NMTUS];
};
struct ch_pm {
uint32_t cmd;
uint32_t tx_pg_sz;
uint32_t tx_num_pg;
uint32_t rx_pg_sz;
uint32_t rx_num_pg;
uint32_t pm_total;
};
struct ch_tcam {
uint32_t cmd;
uint32_t tcam_size;
uint32_t nservers;
uint32_t nroutes;
uint32_t nfilters;
};
struct ch_tcb {
uint32_t cmd;
uint32_t tcb_index;
uint32_t tcb_data[TCB_WORDS];
};
struct ch_tcam_word {
uint32_t cmd;
uint32_t addr;
uint32_t buf[3];
};
struct ch_trace {
uint32_t cmd;
uint32_t sip;
uint32_t sip_mask;
uint32_t dip;
uint32_t dip_mask;
uint16_t sport;
uint16_t sport_mask;
uint16_t dport;
uint16_t dport_mask;
uint32_t vlan:12;
uint32_t vlan_mask:12;
uint32_t intf:4;
uint32_t intf_mask:4;
uint8_t proto;
uint8_t proto_mask;
uint8_t invert_match:1;
uint8_t config_tx:1;
uint8_t config_rx:1;
uint8_t trace_tx:1;
uint8_t trace_rx:1;
};
#define SIOCCHIOCTL SIOCDEVPRIVATE
#endif
/*
* This file is part of the Chelsio T3 Ethernet driver for Linux.
*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/mii.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
#include <linux/rtnetlink.h>
#include <asm/uaccess.h>
#include "common.h"
#include "cxgb3_ioctl.h"
#include "regs.h"
#include "cxgb3_offload.h"
#include "version.h"
#include "cxgb3_ctl_defs.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
enum {
MAX_TXQ_ENTRIES = 16384,
MAX_CTRL_TXQ_ENTRIES = 1024,
MAX_RSPQ_ENTRIES = 16384,
MAX_RX_BUFFERS = 16384,
MAX_RX_JUMBO_BUFFERS = 16384,
MIN_TXQ_ENTRIES = 4,
MIN_CTRL_TXQ_ENTRIES = 4,
MIN_RSPQ_ENTRIES = 32,
MIN_FL_ENTRIES = 32
};
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define EEPROM_MAGIC 0x38E2F10C
#define to_net_dev(class) container_of(class, struct net_device, class_dev)
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
static const struct pci_device_id cxgb3_pci_tbl[] = {
CH_DEVICE(0x20, 1, 0), /* PE9000 */
CH_DEVICE(0x21, 1, 1), /* T302E */
CH_DEVICE(0x22, 1, 2), /* T310E */
CH_DEVICE(0x23, 1, 3), /* T320X */
CH_DEVICE(0x24, 1, 1), /* T302X */
CH_DEVICE(0x25, 1, 3), /* T320E */
CH_DEVICE(0x26, 1, 2), /* T310X */
CH_DEVICE(0x30, 1, 2), /* T3B10 */
CH_DEVICE(0x31, 1, 3), /* T3B20 */
CH_DEVICE(0x32, 1, 1), /* T3B02 */
{0,}
};
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
/*
* The driver uses the best interrupt scheme available on a platform in the
* order MSI-X, MSI, legacy pin interrupts. This parameter determines which
* of these schemes the driver may consider as follows:
*
* msi = 2: choose from among all three options
* msi = 1: only consider MSI and pin interrupts
* msi = 0: force pin interrupts
*/
static int msi = 2;
module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
/*
* The driver enables offload as a default.
* To disable it, use ofld_disable = 1.
*/
static int ofld_disable = 0;
module_param(ofld_disable, int, 0644);
MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
/*
* We have work elements that we need to cancel when an interface is taken
* down. Normally the work elements would be executed by keventd but that
* can deadlock because of linkwatch. If our close method takes the rtnl
* lock and linkwatch is ahead of our work elements in keventd, linkwatch
* will block keventd as it needs the rtnl lock, and we'll deadlock waiting
* for our work to complete. Get our own work queue to solve this.
*/
static struct workqueue_struct *cxgb3_wq;
/**
* link_report - show link status and link speed/duplex
* @p: the port whose settings are to be reported
*
* Shows the link status, speed, and duplex of a port.
*/
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
printk(KERN_INFO "%s: link down\n", dev->name);
else {
const char *s = "10Mbps";
const struct port_info *p = netdev_priv(dev);
switch (p->link_config.speed) {
case SPEED_10000:
s = "10Gbps";
break;
case SPEED_1000:
s = "1000Mbps";
break;
case SPEED_100:
s = "100Mbps";
break;
}
printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
}
}
/**
* t3_os_link_changed - handle link status changes
* @adapter: the adapter associated with the link change
* @port_id: the port index whose limk status has changed
* @link_stat: the new status of the link
* @speed: the new speed setting
* @duplex: the new duplex setting
* @pause: the new flow-control setting
*
* This is the OS-dependent handler for link status changes. The OS
* neutral handler takes care of most of the processing for these events,
* then calls this handler for any OS-specific processing.
*/
void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
int speed, int duplex, int pause)
{
struct net_device *dev = adapter->port[port_id];
/* Skip changes from disabled ports. */
if (!netif_running(dev))
return;
if (link_stat != netif_carrier_ok(dev)) {
if (link_stat)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
link_report(dev);
}
}
static void cxgb_set_rxmode(struct net_device *dev)
{
struct t3_rx_mode rm;
struct port_info *pi = netdev_priv(dev);
init_rx_mode(&rm, dev, dev->mc_list);
t3_mac_set_rx_mode(&pi->mac, &rm);
}
/**
* link_start - enable a port
* @dev: the device to enable
*
* Performs the MAC and PHY actions needed to enable a port.
*/
static void link_start(struct net_device *dev)
{
struct t3_rx_mode rm;
struct port_info *pi = netdev_priv(dev);
struct cmac *mac = &pi->mac;
init_rx_mode(&rm, dev, dev->mc_list);
t3_mac_reset(mac);
t3_mac_set_mtu(mac, dev->mtu);
t3_mac_set_address(mac, 0, dev->dev_addr);
t3_mac_set_rx_mode(mac, &rm);
t3_link_start(&pi->phy, mac, &pi->link_config);
t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
}
static inline void cxgb_disable_msi(struct adapter *adapter)
{
if (adapter->flags & USING_MSIX) {
pci_disable_msix(adapter->pdev);
adapter->flags &= ~USING_MSIX;
} else if (adapter->flags & USING_MSI) {
pci_disable_msi(adapter->pdev);
adapter->flags &= ~USING_MSI;
}
}
/*
* Interrupt handler for asynchronous events used with MSI-X.
*/
static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
{
t3_slow_intr_handler(cookie);
return IRQ_HANDLED;
}
/*
* Name the MSI-X interrupts.
*/
static void name_msix_vecs(struct adapter *adap)
{
int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
adap->msix_info[0].desc[n] = 0;
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
for (i = 0; i < pi->nqsets; i++, msi_idx++) {
snprintf(adap->msix_info[msi_idx].desc, n,
"%s (queue %d)", d->name, i);
adap->msix_info[msi_idx].desc[n] = 0;
}
}
}
static int request_msix_data_irqs(struct adapter *adap)
{
int i, j, err, qidx = 0;
for_each_port(adap, i) {
int nqsets = adap2pinfo(adap, i)->nqsets;
for (j = 0; j < nqsets; ++j) {
err = request_irq(adap->msix_info[qidx + 1].vec,
t3_intr_handler(adap,
adap->sge.qs[qidx].
rspq.polling), 0,
adap->msix_info[qidx + 1].desc,
&adap->sge.qs[qidx]);
if (err) {
while (--qidx >= 0)
free_irq(adap->msix_info[qidx + 1].vec,
&adap->sge.qs[qidx]);
return err;
}
qidx++;
}
}
return 0;
}
/**
* setup_rss - configure RSS
* @adap: the adapter
*
* Sets up RSS to distribute packets to multiple receive queues. We
* configure the RSS CPU lookup table to distribute to the number of HW
* receive queues, and the response queue lookup table to narrow that
* down to the response queues actually configured for each port.
* We always configure the RSS mapping for two ports since the mapping
* table has plenty of entries.
*/
static void setup_rss(struct adapter *adap)
{
int i;
unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
u8 cpus[SGE_QSETS + 1];
u16 rspq_map[RSS_TABLE_SIZE];
for (i = 0; i < SGE_QSETS; ++i)
cpus[i] = i;
cpus[SGE_QSETS] = 0xff; /* terminator */
for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
rspq_map[i] = i % nq0;
rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
}
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
V_RRCPLCPUSIZE(6), cpus, rspq_map);
}
/*
* If we have multiple receive queues per port serviced by NAPI we need one
* netdevice per queue as NAPI operates on netdevices. We already have one
* netdevice, namely the one associated with the interface, so we use dummy
* ones for any additional queues. Note that these netdevices exist purely
* so that NAPI has something to work with, they do not represent network
* ports and are not registered.
*/
static int init_dummy_netdevs(struct adapter *adap)
{
int i, j, dummy_idx = 0;
struct net_device *nd;
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
const struct port_info *pi = netdev_priv(dev);
for (j = 0; j < pi->nqsets - 1; j++) {
if (!adap->dummy_netdev[dummy_idx]) {
nd = alloc_netdev(0, "", ether_setup);
if (!nd)
goto free_all;
nd->priv = adap;
nd->weight = 64;
set_bit(__LINK_STATE_START, &nd->state);
adap->dummy_netdev[dummy_idx] = nd;
}
strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
dummy_idx++;
}
}
return 0;
free_all:
while (--dummy_idx >= 0) {
free_netdev(adap->dummy_netdev[dummy_idx]);
adap->dummy_netdev[dummy_idx] = NULL;
}
return -ENOMEM;
}
/*
* Wait until all NAPI handlers are descheduled. This includes the handlers of
* both netdevices representing interfaces and the dummy ones for the extra
* queues.
*/
static void quiesce_rx(struct adapter *adap)
{
int i;
struct net_device *dev;
for_each_port(adap, i) {
dev = adap->port[i];
while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
msleep(1);
}
for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
dev = adap->dummy_netdev[i];
if (dev)
while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
msleep(1);
}
}
/**
* setup_sge_qsets - configure SGE Tx/Rx/response queues
* @adap: the adapter
*
* Determines how many sets of SGE queues to use and initializes them.
* We support multiple queue sets per port if we have MSI-X, otherwise
* just one queue set per port.
*/
static int setup_sge_qsets(struct adapter *adap)
{
int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
irq_idx = -1;
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
const struct port_info *pi = netdev_priv(dev);
for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 :
irq_idx,
&adap->params.sge.qset[qset_idx], ntxq,
j == 0 ? dev :
adap-> dummy_netdev[dummy_dev_idx++]);
if (err) {
t3_free_sge_resources(adap);
return err;
}
}
}
return 0;
}
static ssize_t attr_show(struct class_device *cd, char *buf,
ssize_t(*format) (struct adapter *, char *))
{
ssize_t len;
struct adapter *adap = to_net_dev(cd)->priv;
/* Synchronize with ioctls that may shut down the device */
rtnl_lock();
len = (*format) (adap, buf);
rtnl_unlock();
return len;
}
static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
ssize_t(*set) (struct adapter *, unsigned int),
unsigned int min_val, unsigned int max_val)
{
char *endp;
ssize_t ret;
unsigned int val;
struct adapter *adap = to_net_dev(cd)->priv;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val < min_val || val > max_val)
return -EINVAL;
rtnl_lock();
ret = (*set) (adap, val);
if (!ret)
ret = len;
rtnl_unlock();
return ret;
}
#define CXGB3_SHOW(name, val_expr) \
static ssize_t format_##name(struct adapter *adap, char *buf) \
{ \
return sprintf(buf, "%u\n", val_expr); \
} \
static ssize_t show_##name(struct class_device *cd, char *buf) \
{ \
return attr_show(cd, buf, format_##name); \
}
static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
{
if (adap->flags & FULL_INIT_DONE)
return -EBUSY;
if (val && adap->params.rev == 0)
return -EINVAL;
if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
return -EINVAL;
adap->params.mc5.nfilters = val;
return 0;
}
static ssize_t store_nfilters(struct class_device *cd, const char *buf,
size_t len)
{
return attr_store(cd, buf, len, set_nfilters, 0, ~0);
}
static ssize_t set_nservers(struct adapter *adap, unsigned int val)
{
if (adap->flags & FULL_INIT_DONE)
return -EBUSY;
if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
return -EINVAL;
adap->params.mc5.nservers = val;
return 0;
}
static ssize_t store_nservers(struct class_device *cd, const char *buf,
size_t len)
{
return attr_store(cd, buf, len, set_nservers, 0, ~0);
}
#define CXGB3_ATTR_R(name, val_expr) \
CXGB3_SHOW(name, val_expr) \
static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
#define CXGB3_ATTR_RW(name, val_expr, store_method) \
CXGB3_SHOW(name, val_expr) \
static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
static struct attribute *cxgb3_attrs[] = {
&class_device_attr_cam_size.attr,
&class_device_attr_nfilters.attr,
&class_device_attr_nservers.attr,
NULL
};
static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
{
ssize_t len;
unsigned int v, addr, bpt, cpt;
struct adapter *adap = to_net_dev(cd)->priv;
addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
rtnl_lock();
t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
if (sched & 1)
v >>= 16;
bpt = (v >> 8) & 0xff;
cpt = v & 0xff;
if (!cpt)
len = sprintf(buf, "disabled\n");
else {
v = (adap->params.vpd.cclk * 1000) / cpt;
len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
}
rtnl_unlock();
return len;
}
static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
size_t len, int sched)
{
char *endp;
ssize_t ret;
unsigned int val;
struct adapter *adap = to_net_dev(cd)->priv;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val > 10000000)
return -EINVAL;
rtnl_lock();
ret = t3_config_sched(adap, val, sched);
if (!ret)
ret = len;
rtnl_unlock();
return ret;
}
#define TM_ATTR(name, sched) \
static ssize_t show_##name(struct class_device *cd, char *buf) \
{ \
return tm_attr_show(cd, buf, sched); \
} \
static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
{ \
return tm_attr_store(cd, buf, len, sched); \
} \
static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
TM_ATTR(sched0, 0);
TM_ATTR(sched1, 1);
TM_ATTR(sched2, 2);
TM_ATTR(sched3, 3);
TM_ATTR(sched4, 4);
TM_ATTR(sched5, 5);
TM_ATTR(sched6, 6);
TM_ATTR(sched7, 7);
static struct attribute *offload_attrs[] = {
&class_device_attr_sched0.attr,
&class_device_attr_sched1.attr,
&class_device_attr_sched2.attr,
&class_device_attr_sched3.attr,
&class_device_attr_sched4.attr,
&class_device_attr_sched5.attr,
&class_device_attr_sched6.attr,
&class_device_attr_sched7.attr,
NULL
};
static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
/*
* Sends an sk_buff to an offload queue driver
* after dealing with any active network taps.
*/
static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
{
int ret;
local_bh_disable();
ret = t3_offload_tx(tdev, skb);
local_bh_enable();
return ret;
}
static int write_smt_entry(struct adapter *adapter, int idx)
{
struct cpl_smt_write_req *req;
struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
req->iff = idx;
memset(req->src_mac1, 0, sizeof(req->src_mac1));
memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
skb->priority = 1;
offload_tx(&adapter->tdev, skb);
return 0;
}
static int init_smt(struct adapter *adapter)
{
int i;
for_each_port(adapter, i)
write_smt_entry(adapter, i);
return 0;
}
static void init_port_mtus(struct adapter *adapter)
{
unsigned int mtus = adapter->port[0]->mtu;
if (adapter->port[1])
mtus |= adapter->port[1]->mtu << 16;
t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
}
/**
* cxgb_up - enable the adapter
* @adapter: adapter being enabled
*
* Called when the first port is enabled, this function performs the
* actions necessary to make an adapter operational, such as completing
* the initialization of HW modules, and enabling interrupts.
*
* Must be called with the rtnl lock held.
*/
static int cxgb_up(struct adapter *adap)
{
int err = 0;
if (!(adap->flags & FULL_INIT_DONE)) {
err = t3_check_fw_version(adap);
if (err) {
dev_err(&adap->pdev->dev,
"adapter FW is not compatible with driver\n");
goto out;
}
err = init_dummy_netdevs(adap);
if (err)
goto out;
err = t3_init_hw(adap, 0);
if (err)
goto out;
err = setup_sge_qsets(adap);
if (err)
goto out;
setup_rss(adap);
adap->flags |= FULL_INIT_DONE;
}
t3_intr_clear(adap);
if (adap->flags & USING_MSIX) {
name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec,
t3_async_intr_handler, 0,
adap->msix_info[0].desc, adap);
if (err)
goto irq_err;
if (request_msix_data_irqs(adap)) {
free_irq(adap->msix_info[0].vec, adap);
goto irq_err;
}
} else if ((err = request_irq(adap->pdev->irq,
t3_intr_handler(adap,
adap->sge.qs[0].rspq.
polling),
(adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
adap->name, adap)))
goto irq_err;
t3_sge_start(adap);
t3_intr_enable(adap);
out:
return err;
irq_err:
CH_ERR(adap, "request_irq failed, err %d\n", err);
goto out;
}
/*
* Release resources when all the ports and offloading have been stopped.
*/
static void cxgb_down(struct adapter *adapter)
{
t3_sge_stop(adapter);
spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
t3_intr_disable(adapter);
spin_unlock_irq(&adapter->work_lock);
if (adapter->flags & USING_MSIX) {
int i, n = 0;
free_irq(adapter->msix_info[0].vec, adapter);
for_each_port(adapter, i)
n += adap2pinfo(adapter, i)->nqsets;
for (i = 0; i < n; ++i)
free_irq(adapter->msix_info[i + 1].vec,
&adapter->sge.qs[i]);
} else
free_irq(adapter->pdev->irq, adapter);
flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
quiesce_rx(adapter);
}
static void schedule_chk_task(struct adapter *adap)
{
unsigned int timeo;
timeo = adap->params.linkpoll_period ?
(HZ * adap->params.linkpoll_period) / 10 :
adap->params.stats_update_period * HZ;
if (timeo)
queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
}
static int offload_open(struct net_device *dev)
{
struct adapter *adapter = dev->priv;
struct t3cdev *tdev = T3CDEV(dev);
int adap_up = adapter->open_device_map & PORT_MASK;
int err = 0;
if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
return 0;
if (!adap_up && (err = cxgb_up(adapter)) < 0)
return err;
t3_tp_set_offload_mode(adapter, 1);
tdev->lldev = adapter->port[0];
err = cxgb3_offload_activate(adapter);
if (err)
goto out;
init_port_mtus(adapter);
t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
adapter->params.b_wnd,
adapter->params.rev == 0 ?
adapter->port[0]->mtu : 0xffff);
init_smt(adapter);
/* Never mind if the next step fails */
sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
/* Call back all registered clients */
cxgb3_add_clients(tdev);
out:
/* restore them in case the offload module has changed them */
if (err) {
t3_tp_set_offload_mode(adapter, 0);
clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
cxgb3_set_dummy_ops(tdev);
}
return err;
}
static int offload_close(struct t3cdev *tdev)
{
struct adapter *adapter = tdev2adap(tdev);
if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
return 0;
/* Call back all registered clients */
cxgb3_remove_clients(tdev);
sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
tdev->lldev = NULL;
cxgb3_set_dummy_ops(tdev);
t3_tp_set_offload_mode(adapter, 0);
clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
if (!adapter->open_device_map)
cxgb_down(adapter);
cxgb3_offload_deactivate(adapter);
return 0;
}
static int cxgb_open(struct net_device *dev)
{
int err;
struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
int other_ports = adapter->open_device_map & PORT_MASK;
if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
return err;
set_bit(pi->port_id, &adapter->open_device_map);
if (!ofld_disable) {
err = offload_open(dev);
if (err)
printk(KERN_WARNING
"Could not initialize offload capabilities\n");
}
link_start(dev);
t3_port_intr_enable(adapter, pi->port_id);
netif_start_queue(dev);
if (!other_ports)
schedule_chk_task(adapter);
return 0;
}
static int cxgb_close(struct net_device *dev)
{
struct adapter *adapter = dev->priv;
struct port_info *p = netdev_priv(dev);
t3_port_intr_disable(adapter, p->port_id);
netif_stop_queue(dev);
p->phy.ops->power_down(&p->phy, 1);
netif_carrier_off(dev);
t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
spin_lock(&adapter->work_lock); /* sync with update task */
clear_bit(p->port_id, &adapter->open_device_map);
spin_unlock(&adapter->work_lock);
if (!(adapter->open_device_map & PORT_MASK))
cancel_rearming_delayed_workqueue(cxgb3_wq,
&adapter->adap_check_task);
if (!adapter->open_device_map)
cxgb_down(adapter);
return 0;
}
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
struct adapter *adapter = dev->priv;
struct port_info *p = netdev_priv(dev);
struct net_device_stats *ns = &p->netstats;
const struct mac_stats *pstats;
spin_lock(&adapter->stats_lock);
pstats = t3_mac_update_stats(&p->mac);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = pstats->tx_octets;
ns->tx_packets = pstats->tx_frames;
ns->rx_bytes = pstats->rx_octets;
ns->rx_packets = pstats->rx_frames;
ns->multicast = pstats->rx_mcast_frames;
ns->tx_errors = pstats->tx_underrun;
ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
pstats->rx_fifo_ovfl;
/* detailed rx_errors */
ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
ns->rx_over_errors = 0;
ns->rx_crc_errors = pstats->rx_fcs_errs;
ns->rx_frame_errors = pstats->rx_symbol_errs;
ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
ns->rx_missed_errors = pstats->rx_cong_drops;
/* detailed tx_errors */
ns->tx_aborted_errors = 0;
ns->tx_carrier_errors = 0;
ns->tx_fifo_errors = pstats->tx_underrun;
ns->tx_heartbeat_errors = 0;
ns->tx_window_errors = 0;
return ns;
}
static u32 get_msglevel(struct net_device *dev)
{
struct adapter *adapter = dev->priv;
return adapter->msg_enable;
}
static void set_msglevel(struct net_device *dev, u32 val)
{
struct adapter *adapter = dev->priv;
adapter->msg_enable = val;
}
static char stats_strings[][ETH_GSTRING_LEN] = {
"TxOctetsOK ",
"TxFramesOK ",
"TxMulticastFramesOK",
"TxBroadcastFramesOK",
"TxPauseFrames ",
"TxUnderrun ",
"TxExtUnderrun ",
"TxFrames64 ",
"TxFrames65To127 ",
"TxFrames128To255 ",
"TxFrames256To511 ",
"TxFrames512To1023 ",
"TxFrames1024To1518 ",
"TxFrames1519ToMax ",
"RxOctetsOK ",
"RxFramesOK ",
"RxMulticastFramesOK",
"RxBroadcastFramesOK",
"RxPauseFrames ",
"RxFCSErrors ",
"RxSymbolErrors ",
"RxShortErrors ",
"RxJabberErrors ",
"RxLengthErrors ",
"RxFIFOoverflow ",
"RxFrames64 ",
"RxFrames65To127 ",
"RxFrames128To255 ",
"RxFrames256To511 ",
"RxFrames512To1023 ",
"RxFrames1024To1518 ",
"RxFrames1519ToMax ",
"PhyFIFOErrors ",
"TSO ",
"VLANextractions ",
"VLANinsertions ",
"TxCsumOffload ",
"RxCsumGood ",
"RxDrops "
};
static int get_stats_count(struct net_device *dev)
{
return ARRAY_SIZE(stats_strings);
}
#define T3_REGMAP_SIZE (3 * 1024)
static int get_regs_len(struct net_device *dev)
{
return T3_REGMAP_SIZE;
}
static int get_eeprom_len(struct net_device *dev)
{
return EEPROMSIZE;
}
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
u32 fw_vers = 0;
struct adapter *adapter = dev->priv;
t3_get_fw_version(adapter, &fw_vers);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, pci_name(adapter->pdev));
if (!fw_vers)
strcpy(info->fw_version, "N/A");
else
snprintf(info->fw_version, sizeof(info->fw_version),
"%s %u.%u", (fw_vers >> 24) ? "T" : "N",
(fw_vers >> 12) & 0xfff, fw_vers & 0xfff);
}
static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
if (stringset == ETH_SS_STATS)
memcpy(data, stats_strings, sizeof(stats_strings));
}
static unsigned long collect_sge_port_stats(struct adapter *adapter,
struct port_info *p, int idx)
{
int i;
unsigned long tot = 0;
for (i = 0; i < p->nqsets; ++i)
tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
return tot;
}
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
const struct mac_stats *s;
spin_lock(&adapter->stats_lock);
s = t3_mac_update_stats(&pi->mac);
spin_unlock(&adapter->stats_lock);
*data++ = s->tx_octets;
*data++ = s->tx_frames;
*data++ = s->tx_mcast_frames;
*data++ = s->tx_bcast_frames;
*data++ = s->tx_pause;
*data++ = s->tx_underrun;
*data++ = s->tx_fifo_urun;
*data++ = s->tx_frames_64;
*data++ = s->tx_frames_65_127;
*data++ = s->tx_frames_128_255;
*data++ = s->tx_frames_256_511;
*data++ = s->tx_frames_512_1023;
*data++ = s->tx_frames_1024_1518;
*data++ = s->tx_frames_1519_max;
*data++ = s->rx_octets;
*data++ = s->rx_frames;
*data++ = s->rx_mcast_frames;
*data++ = s->rx_bcast_frames;
*data++ = s->rx_pause;
*data++ = s->rx_fcs_errs;
*data++ = s->rx_symbol_errs;
*data++ = s->rx_short;
*data++ = s->rx_jabber;
*data++ = s->rx_too_long;
*data++ = s->rx_fifo_ovfl;
*data++ = s->rx_frames_64;
*data++ = s->rx_frames_65_127;
*data++ = s->rx_frames_128_255;
*data++ = s->rx_frames_256_511;
*data++ = s->rx_frames_512_1023;
*data++ = s->rx_frames_1024_1518;
*data++ = s->rx_frames_1519_max;
*data++ = pi->phy.fifo_errors;
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
*data++ = s->rx_cong_drops;
}
static inline void reg_block_dump(struct adapter *ap, void *buf,
unsigned int start, unsigned int end)
{
u32 *p = buf + start;
for (; start <= end; start += sizeof(u32))
*p++ = t3_read_reg(ap, start);
}
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
struct adapter *ap = dev->priv;
/*
* Version scheme:
* bits 0..9: chip version
* bits 10..15: chip revision
* bit 31: set for PCIe cards
*/
regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
/*
* We skip the MAC statistics registers because they are clear-on-read.
* Also reading multi-register stats would need to synchronize with the
* periodic mac stats accumulation. Hard to justify the complexity.
*/
memset(buf, 0, T3_REGMAP_SIZE);
reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
XGM_REG(A_XGM_SERDES_STAT3, 1));
reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
}
static int restart_autoneg(struct net_device *dev)
{
struct port_info *p = netdev_priv(dev);
if (!netif_running(dev))
return -EAGAIN;
if (p->link_config.autoneg != AUTONEG_ENABLE)
return -EINVAL;
p->phy.ops->autoneg_restart(&p->phy);
return 0;
}
static int cxgb3_phys_id(struct net_device *dev, u32 data)
{
int i;
struct adapter *adapter = dev->priv;
if (data == 0)
data = 2;
for (i = 0; i < data * 2; i++) {
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
(i & 1) ? F_GPIO0_OUT_VAL : 0);
if (msleep_interruptible(500))
break;
}
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
F_GPIO0_OUT_VAL);
return 0;
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct port_info *p = netdev_priv(dev);
cmd->supported = p->link_config.supported;
cmd->advertising = p->link_config.advertising;
if (netif_carrier_ok(dev)) {
cmd->speed = p->link_config.speed;
cmd->duplex = p->link_config.duplex;
} else {
cmd->speed = -1;
cmd->duplex = -1;
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
cmd->phy_address = p->phy.addr;
cmd->transceiver = XCVR_EXTERNAL;
cmd->autoneg = p->link_config.autoneg;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
}
static int speed_duplex_to_caps(int speed, int duplex)
{
int cap = 0;
switch (speed) {
case SPEED_10:
if (duplex == DUPLEX_FULL)
cap = SUPPORTED_10baseT_Full;
else
cap = SUPPORTED_10baseT_Half;
break;
case SPEED_100:
if (duplex == DUPLEX_FULL)
cap = SUPPORTED_100baseT_Full;
else
cap = SUPPORTED_100baseT_Half;
break;
case SPEED_1000:
if (duplex == DUPLEX_FULL)
cap = SUPPORTED_1000baseT_Full;
else
cap = SUPPORTED_1000baseT_Half;
break;
case SPEED_10000:
if (duplex == DUPLEX_FULL)
cap = SUPPORTED_10000baseT_Full;
}
return cap;
}
#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
ADVERTISED_10000baseT_Full)
static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_config;
if (!(lc->supported & SUPPORTED_Autoneg))
return -EOPNOTSUPP; /* can't change speed/duplex */
if (cmd->autoneg == AUTONEG_DISABLE) {
int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
return -EINVAL;
lc->requested_speed = cmd->speed;
lc->requested_duplex = cmd->duplex;
lc->advertising = 0;
} else {
cmd->advertising &= ADVERTISED_MASK;
cmd->advertising &= lc->supported;
if (!cmd->advertising)
return -EINVAL;
lc->requested_speed = SPEED_INVALID;
lc->requested_duplex = DUPLEX_INVALID;
lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
}
lc->autoneg = cmd->autoneg;
if (netif_running(dev))
t3_link_start(&p->phy, &p->mac, lc);
return 0;
}
static void get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct port_info *p = netdev_priv(dev);
epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
}
static int set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_config;
if (epause->autoneg == AUTONEG_DISABLE)
lc->requested_fc = 0;
else if (lc->supported & SUPPORTED_Autoneg)
lc->requested_fc = PAUSE_AUTONEG;
else
return -EINVAL;
if (epause->rx_pause)
lc->requested_fc |= PAUSE_RX;
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (lc->autoneg == AUTONEG_ENABLE) {
if (netif_running(dev))
t3_link_start(&p->phy, &p->mac, lc);
} else {
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
if (netif_running(dev))
t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
}
return 0;
}
static u32 get_rx_csum(struct net_device *dev)
{
struct port_info *p = netdev_priv(dev);
return p->rx_csum_offload;
}
static int set_rx_csum(struct net_device *dev, u32 data)
{
struct port_info *p = netdev_priv(dev);
p->rx_csum_offload = data;
return 0;
}
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
struct adapter *adapter = dev->priv;
e->rx_max_pending = MAX_RX_BUFFERS;
e->rx_mini_max_pending = 0;
e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
e->tx_max_pending = MAX_TXQ_ENTRIES;
e->rx_pending = adapter->params.sge.qset[0].fl_size;
e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
}
static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
int i;
struct adapter *adapter = dev->priv;
if (e->rx_pending > MAX_RX_BUFFERS ||
e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
e->tx_pending > MAX_TXQ_ENTRIES ||
e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
e->rx_pending < MIN_FL_ENTRIES ||
e->rx_jumbo_pending < MIN_FL_ENTRIES ||
e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
return -EINVAL;
if (adapter->flags & FULL_INIT_DONE)
return -EBUSY;
for (i = 0; i < SGE_QSETS; ++i) {
struct qset_params *q = &adapter->params.sge.qset[i];
q->rspq_size = e->rx_mini_pending;
q->fl_size = e->rx_pending;
q->jumbo_size = e->rx_jumbo_pending;
q->txq_size[0] = e->tx_pending;
q->txq_size[1] = e->tx_pending;
q->txq_size[2] = e->tx_pending;
}
return 0;
}
static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
struct adapter *adapter = dev->priv;
struct qset_params *qsp = &adapter->params.sge.qset[0];
struct sge_qset *qs = &adapter->sge.qs[0];
if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
return -EINVAL;
qsp->coalesce_usecs = c->rx_coalesce_usecs;
t3_update_qset_coalesce(qs, qsp);
return 0;
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
struct adapter *adapter = dev->priv;
struct qset_params *q = adapter->params.sge.qset;
c->rx_coalesce_usecs = q->coalesce_usecs;
return 0;
}
static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
u8 * data)
{
int i, err = 0;
struct adapter *adapter = dev->priv;
u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
e->magic = EEPROM_MAGIC;
for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
if (!err)
memcpy(data, buf + e->offset, e->len);
kfree(buf);
return err;
}
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 * data)
{
u8 *buf;
int err = 0;
u32 aligned_offset, aligned_len, *p;
struct adapter *adapter = dev->priv;
if (eeprom->magic != EEPROM_MAGIC)
return -EINVAL;
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
buf = kmalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
if (!err && aligned_len > 4)
err = t3_seeprom_read(adapter,
aligned_offset + aligned_len - 4,
(u32 *) & buf[aligned_len - 4]);
if (err)
goto out;
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
} else
buf = data;
err = t3_seeprom_wp(adapter, 0);
if (err)
goto out;
for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
err = t3_seeprom_write(adapter, aligned_offset, *p);
aligned_offset += 4;
}
if (!err)
err = t3_seeprom_wp(adapter, 1);
out:
if (buf != data)
kfree(buf);
return err;
}
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
.get_ringparam = get_sge_param,
.set_ringparam = set_sge_param,
.get_coalesce = get_coalesce,
.set_coalesce = set_coalesce,
.get_eeprom_len = get_eeprom_len,
.get_eeprom = get_eeprom,
.set_eeprom = set_eeprom,
.get_pauseparam = get_pauseparam,
.set_pauseparam = set_pauseparam,
.get_rx_csum = get_rx_csum,
.set_rx_csum = set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = get_strings,
.phys_id = cxgb3_phys_id,
.nway_reset = restart_autoneg,
.get_stats_count = get_stats_count,
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
.get_wol = get_wol,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_perm_addr = ethtool_op_get_perm_addr
};
static int in_range(int val, int lo, int hi)
{
return val < 0 || (val <= hi && val >= lo);
}
static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
{
int ret;
u32 cmd;
struct adapter *adapter = dev->priv;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
switch (cmd) {
case CHELSIO_SETREG:{
struct ch_reg edata;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
if ((edata.addr & 3) != 0
|| edata.addr >= adapter->mmio_len)
return -EINVAL;
writel(edata.val, adapter->regs + edata.addr);
break;
}
case CHELSIO_GETREG:{
struct ch_reg edata;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
if ((edata.addr & 3) != 0
|| edata.addr >= adapter->mmio_len)
return -EINVAL;
edata.val = readl(adapter->regs + edata.addr);
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
break;
}
case CHELSIO_SET_QSET_PARAMS:{
int i;
struct qset_params *q;
struct ch_qset_params t;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
!in_range(t.cong_thres, 0, 255) ||
!in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
MAX_TXQ_ENTRIES) ||
!in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
MAX_TXQ_ENTRIES) ||
!in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
MAX_CTRL_TXQ_ENTRIES) ||
!in_range(t.fl_size[0], MIN_FL_ENTRIES,
MAX_RX_BUFFERS)
|| !in_range(t.fl_size[1], MIN_FL_ENTRIES,
MAX_RX_JUMBO_BUFFERS)
|| !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
MAX_RSPQ_ENTRIES))
return -EINVAL;
if ((adapter->flags & FULL_INIT_DONE) &&
(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
t.polling >= 0 || t.cong_thres >= 0))
return -EBUSY;
q = &adapter->params.sge.qset[t.qset_idx];
if (t.rspq_size >= 0)
q->rspq_size = t.rspq_size;
if (t.fl_size[0] >= 0)
q->fl_size = t.fl_size[0];
if (t.fl_size[1] >= 0)
q->jumbo_size = t.fl_size[1];
if (t.txq_size[0] >= 0)
q->txq_size[0] = t.txq_size[0];
if (t.txq_size[1] >= 0)
q->txq_size[1] = t.txq_size[1];
if (t.txq_size[2] >= 0)
q->txq_size[2] = t.txq_size[2];
if (t.cong_thres >= 0)
q->cong_thres = t.cong_thres;
if (t.intr_lat >= 0) {
struct sge_qset *qs =
&adapter->sge.qs[t.qset_idx];
q->coalesce_usecs = t.intr_lat;
t3_update_qset_coalesce(qs, q);
}
if (t.polling >= 0) {
if (adapter->flags & USING_MSIX)
q->polling = t.polling;
else {
/* No polling with INTx for T3A */
if (adapter->params.rev == 0 &&
!(adapter->flags & USING_MSI))
t.polling = 0;
for (i = 0; i < SGE_QSETS; i++) {
q = &adapter->params.sge.
qset[i];
q->polling = t.polling;
}
}
}
break;
}
case CHELSIO_GET_QSET_PARAMS:{
struct qset_params *q;
struct ch_qset_params t;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
q = &adapter->params.sge.qset[t.qset_idx];
t.rspq_size = q->rspq_size;
t.txq_size[0] = q->txq_size[0];
t.txq_size[1] = q->txq_size[1];
t.txq_size[2] = q->txq_size[2];
t.fl_size[0] = q->fl_size;
t.fl_size[1] = q->jumbo_size;
t.polling = q->polling;
t.intr_lat = q->coalesce_usecs;
t.cong_thres = q->cong_thres;
if (copy_to_user(useraddr, &t, sizeof(t)))
return -EFAULT;
break;
}
case CHELSIO_SET_QSET_NUM:{
struct ch_reg edata;
struct port_info *pi = netdev_priv(dev);
unsigned int i, first_qset = 0, other_qsets = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (adapter->flags & FULL_INIT_DONE)
return -EBUSY;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
if (edata.val < 1 ||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL;
for_each_port(adapter, i)
if (adapter->port[i] && adapter->port[i] != dev)
other_qsets += adap2pinfo(adapter, i)->nqsets;
if (edata.val + other_qsets > SGE_QSETS)
return -EINVAL;
pi->nqsets = edata.val;
for_each_port(adapter, i)
if (adapter->port[i]) {
pi = adap2pinfo(adapter, i);
pi->first_qset = first_qset;
first_qset += pi->nqsets;
}
break;
}
case CHELSIO_GET_QSET_NUM:{
struct ch_reg edata;
struct port_info *pi = netdev_priv(dev);
edata.cmd = CHELSIO_GET_QSET_NUM;
edata.val = pi->nqsets;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
break;
}
case CHELSIO_LOAD_FW:{
u8 *fw_data;
struct ch_mem_range t;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
fw_data = kmalloc(t.len, GFP_KERNEL);
if (!fw_data)
return -ENOMEM;
if (copy_from_user
(fw_data, useraddr + sizeof(t), t.len)) {
kfree(fw_data);
return -EFAULT;
}
ret = t3_load_fw(adapter, fw_data, t.len);
kfree(fw_data);
if (ret)
return ret;
break;
}
case CHELSIO_SETMTUTAB:{
struct ch_mtus m;
int i;
if (!is_offload(adapter))
return -EOPNOTSUPP;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (offload_running(adapter))
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
if (m.nmtus != NMTUS)
return -EINVAL;
if (m.mtus[0] < 81) /* accommodate SACK */
return -EINVAL;
/* MTUs must be in ascending order */
for (i = 1; i < NMTUS; ++i)
if (m.mtus[i] < m.mtus[i - 1])
return -EINVAL;
memcpy(adapter->params.mtus, m.mtus,
sizeof(adapter->params.mtus));
break;
}
case CHELSIO_GET_PM:{
struct tp_params *p = &adapter->params.tp;
struct ch_pm m = {.cmd = CHELSIO_GET_PM };
if (!is_offload(adapter))
return -EOPNOTSUPP;
m.tx_pg_sz = p->tx_pg_size;
m.tx_num_pg = p->tx_num_pgs;
m.rx_pg_sz = p->rx_pg_size;
m.rx_num_pg = p->rx_num_pgs;
m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
if (copy_to_user(useraddr, &m, sizeof(m)))
return -EFAULT;
break;
}
case CHELSIO_SET_PM:{
struct ch_pm m;
struct tp_params *p = &adapter->params.tp;
if (!is_offload(adapter))
return -EOPNOTSUPP;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (adapter->flags & FULL_INIT_DONE)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
!m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
return -EINVAL; /* not power of 2 */
if (!(m.rx_pg_sz & 0x14000))
return -EINVAL; /* not 16KB or 64KB */
if (!(m.tx_pg_sz & 0x1554000))
return -EINVAL;
if (m.tx_num_pg == -1)
m.tx_num_pg = p->tx_num_pgs;
if (m.rx_num_pg == -1)
m.rx_num_pg = p->rx_num_pgs;
if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
return -EINVAL;
if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
return -EINVAL;
p->rx_pg_size = m.rx_pg_sz;
p->tx_pg_size = m.tx_pg_sz;
p->rx_num_pgs = m.rx_num_pg;
p->tx_num_pgs = m.tx_num_pg;
break;
}
case CHELSIO_GET_MEM:{
struct ch_mem_range t;
struct mc7 *mem;
u64 buf[32];
if (!is_offload(adapter))
return -EOPNOTSUPP;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
if ((t.addr & 7) || (t.len & 7))
return -EINVAL;
if (t.mem_id == MEM_CM)
mem = &adapter->cm;
else if (t.mem_id == MEM_PMRX)
mem = &adapter->pmrx;
else if (t.mem_id == MEM_PMTX)
mem = &adapter->pmtx;
else
return -EINVAL;
/*
* Version scheme:
* bits 0..9: chip version
* bits 10..15: chip revision
*/
t.version = 3 | (adapter->params.rev << 10);
if (copy_to_user(useraddr, &t, sizeof(t)))
return -EFAULT;
/*
* Read 256 bytes at a time as len can be large and we don't
* want to use huge intermediate buffers.
*/
useraddr += sizeof(t); /* advance to start of buffer */
while (t.len) {
unsigned int chunk =
min_t(unsigned int, t.len, sizeof(buf));
ret =
t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
buf);
if (ret)
return ret;
if (copy_to_user(useraddr, buf, chunk))
return -EFAULT;
useraddr += chunk;
t.addr += chunk;
t.len -= chunk;
}
break;
}
case CHELSIO_SET_TRACE_FILTER:{
struct ch_trace t;
const struct trace_params *tp;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!offload_running(adapter))
return -EAGAIN;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
tp = (const struct trace_params *)&t.sip;
if (t.config_tx)
t3_config_trace_filter(adapter, tp, 0,
t.invert_match,
t.trace_tx);
if (t.config_rx)
t3_config_trace_filter(adapter, tp, 1,
t.invert_match,
t.trace_rx);
break;
}
case CHELSIO_SET_PKTSCHED:{
struct sk_buff *skb;
struct ch_pktsched_params p;
struct mngt_pktsched_wr *req;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* uP must be up and running */
if (copy_from_user(&p, useraddr, sizeof(p)))
return -EFAULT;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
req =
(struct mngt_pktsched_wr *)skb_put(skb,
sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
req->sched = p.sched;
req->idx = p.idx;
req->min = p.min;
req->max = p.max;
req->binding = p.binding;
printk(KERN_INFO
"pktsched: sched %u idx %u min %u max %u binding %u\n",
req->sched, req->idx, req->min, req->max,
req->binding);
skb->priority = 1;
offload_tx(&adapter->tdev, skb);
break;
}
default:
return -EOPNOTSUPP;
}
return 0;
}
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
int ret, mmd;
struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(req);
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = pi->phy.addr;
/* FALLTHRU */
case SIOCGMIIREG:{
u32 val;
struct cphy *phy = &pi->phy;
if (!phy->mdio_read)
return -EOPNOTSUPP;
if (is_10G(adapter)) {
mmd = data->phy_id >> 8;
if (!mmd)
mmd = MDIO_DEV_PCS;
else if (mmd > MDIO_DEV_XGXS)
return -EINVAL;
ret =
phy->mdio_read(adapter, data->phy_id & 0x1f,
mmd, data->reg_num, &val);
} else
ret =
phy->mdio_read(adapter, data->phy_id & 0x1f,
0, data->reg_num & 0x1f,
&val);
if (!ret)
data->val_out = val;
break;
}
case SIOCSMIIREG:{
struct cphy *phy = &pi->phy;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!phy->mdio_write)
return -EOPNOTSUPP;
if (is_10G(adapter)) {
mmd = data->phy_id >> 8;
if (!mmd)
mmd = MDIO_DEV_PCS;
else if (mmd > MDIO_DEV_XGXS)
return -EINVAL;
ret =
phy->mdio_write(adapter,
data->phy_id & 0x1f, mmd,
data->reg_num,
data->val_in);
} else
ret =
phy->mdio_write(adapter,
data->phy_id & 0x1f, 0,
data->reg_num & 0x1f,
data->val_in);
break;
}
case SIOCCHIOCTL:
return cxgb_extension_ioctl(dev, req->ifr_data);
default:
return -EOPNOTSUPP;
}
return ret;
}
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
if (new_mtu < 81) /* accommodate SACK */
return -EINVAL;
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
dev->mtu = new_mtu;
init_port_mtus(adapter);
if (adapter->params.rev == 0 && offload_running(adapter))
t3_load_mtus(adapter, adapter->params.mtus,
adapter->params.a_wnd, adapter->params.b_wnd,
adapter->port[0]->mtu);
return 0;
}
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
return 0;
}
/**
* t3_synchronize_rx - wait for current Rx processing on a port to complete
* @adap: the adapter
* @p: the port
*
* Ensures that current Rx processing on any of the queues associated with
* the given port completes before returning. We do this by acquiring and
* releasing the locks of the response queues associated with the port.
*/
static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
{
int i;
for (i = 0; i < p->nqsets; i++) {
struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
spin_lock_irq(&q->lock);
spin_unlock_irq(&q->lock);
}
}
static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
pi->vlan_grp = grp;
if (adapter->params.rev > 0)
t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
else {
/* single control for all ports */
unsigned int i, have_vlans = 0;
for_each_port(adapter, i)
have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
t3_set_vlan_accel(adapter, 1, have_vlans);
}
t3_synchronize_rx(adapter, pi);
}
static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
/* nothing */
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
struct adapter *adapter = dev->priv;
struct sge_qset *qs = dev2qset(dev);
t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
adapter);
}
#endif
/*
* Periodic accumulation of MAC statistics.
*/
static void mac_stats_update(struct adapter *adapter)
{
int i;
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
struct port_info *p = netdev_priv(dev);
if (netif_running(dev)) {
spin_lock(&adapter->stats_lock);
t3_mac_update_stats(&p->mac);
spin_unlock(&adapter->stats_lock);
}
}
}
static void check_link_status(struct adapter *adapter)
{
int i;
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
struct port_info *p = netdev_priv(dev);
if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
t3_link_changed(adapter, i);
}
}
static void t3_adap_check_task(struct work_struct *work)
{
struct adapter *adapter = container_of(work, struct adapter,
adap_check_task.work);
const struct adapter_params *p = &adapter->params;
adapter->check_task_cnt++;
/* Check link status for PHYs without interrupts */
if (p->linkpoll_period)
check_link_status(adapter);
/* Accumulate MAC stats if needed */
if (!p->linkpoll_period ||
(adapter->check_task_cnt * p->linkpoll_period) / 10 >=
p->stats_update_period) {
mac_stats_update(adapter);
adapter->check_task_cnt = 0;
}
/* Schedule the next check update if any port is active. */
spin_lock(&adapter->work_lock);
if (adapter->open_device_map & PORT_MASK)
schedule_chk_task(adapter);
spin_unlock(&adapter->work_lock);
}
/*
* Processes external (PHY) interrupts in process context.
*/
static void ext_intr_task(struct work_struct *work)
{
struct adapter *adapter = container_of(work, struct adapter,
ext_intr_handler_task);
t3_phy_intr_handler(adapter);
/* Now reenable external interrupts */
spin_lock_irq(&adapter->work_lock);
if (adapter->slow_intr_mask) {
adapter->slow_intr_mask |= F_T3DBG;
t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
t3_write_reg(adapter, A_PL_INT_ENABLE0,
adapter->slow_intr_mask);
}
spin_unlock_irq(&adapter->work_lock);
}
/*
* Interrupt-context handler for external (PHY) interrupts.
*/
void t3_os_ext_intr_handler(struct adapter *adapter)
{
/*
* Schedule a task to handle external interrupts as they may be slow
* and we use a mutex to protect MDIO registers. We disable PHY
* interrupts in the meantime and let the task reenable them when
* it's done.
*/
spin_lock(&adapter->work_lock);
if (adapter->slow_intr_mask) {
adapter->slow_intr_mask &= ~F_T3DBG;
t3_write_reg(adapter, A_PL_INT_ENABLE0,
adapter->slow_intr_mask);
queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
}
spin_unlock(&adapter->work_lock);
}
void t3_fatal_err(struct adapter *adapter)
{
unsigned int fw_status[4];
if (adapter->flags & FULL_INIT_DONE) {
t3_sge_stop(adapter);
t3_intr_disable(adapter);
}
CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
fw_status[0], fw_status[1],
fw_status[2], fw_status[3]);
}
static int __devinit cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int i, err;
for (i = 0; i < ARRAY_SIZE(entries); ++i)
entries[i].entry = i;
err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
if (!err) {
for (i = 0; i < ARRAY_SIZE(entries); ++i)
adap->msix_info[i].vec = entries[i].vector;
} else if (err > 0)
dev_info(&adap->pdev->dev,
"only %d MSI-X vectors left, not using MSI-X\n", err);
return err;
}
static void __devinit print_port_info(struct adapter *adap,
const struct adapter_info *ai)
{
static const char *pci_variant[] = {
"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
};
int i;
char buf[80];
if (is_pcie(adap))
snprintf(buf, sizeof(buf), "%s x%d",
pci_variant[adap->params.pci.variant],
adap->params.pci.width);
else
snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
pci_variant[adap->params.pci.variant],
adap->params.pci.speed, adap->params.pci.width);
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
const struct port_info *pi = netdev_priv(dev);
if (!test_bit(i, &adap->registered_device_map))
continue;
printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
dev->name, ai->desc, pi->port_type->desc,
adap->params.rev, buf,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name && adap->params.vpd.mclk)
printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
adap->name, t3_mc7_size(&adap->cm) >> 20,
t3_mc7_size(&adap->pmtx) >> 20,
t3_mc7_size(&adap->pmrx) >> 20);
}
}
static int __devinit init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static int version_printed;
int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
struct port_info *pi;
if (!version_printed) {
printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
++version_printed;
}
if (!cxgb3_wq) {
cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
if (!cxgb3_wq) {
printk(KERN_ERR DRV_NAME
": cannot initialize work queue\n");
return -ENOMEM;
}
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
/* Just info, some other driver may have claimed the device. */
dev_info(&pdev->dev, "cannot obtain PCI resources\n");
return err;
}
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "cannot enable PCI device\n");
goto out_release_regions;
}
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
goto out_disable_device;
}
} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_disable_device;
}
pci_set_master(pdev);
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
ai = t3_get_adapter_info(ent->driver_data);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
goto out_disable_device;
}
adapter->regs = ioremap_nocache(mmio_start, mmio_len);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
goto out_free_adapter;
}
adapter->pdev = pdev;
adapter->name = pci_name(pdev);
adapter->msg_enable = dflt_msg_enable;
adapter->mmio_len = mmio_len;
mutex_init(&adapter->mdio_lock);
spin_lock_init(&adapter->work_lock);
spin_lock_init(&adapter->stats_lock);
INIT_LIST_HEAD(&adapter->adapter_list);
INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
for (i = 0; i < ai->nports; ++i) {
struct net_device *netdev;
netdev = alloc_etherdev(sizeof(struct port_info));
if (!netdev) {
err = -ENOMEM;
goto out_free_dev;
}
SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter->port[i] = netdev;
pi = netdev_priv(netdev);
pi->rx_csum_offload = 1;
pi->nqsets = 1;
pi->first_qset = i;
pi->activity = 0;
pi->port_id = i;
netif_carrier_off(netdev);
netdev->irq = pdev->irq;
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
netdev->priv = adapter;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->features |= NETIF_F_LLTX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_rx_register = vlan_rx_register;
netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
netdev->open = cxgb_open;
netdev->stop = cxgb_close;
netdev->hard_start_xmit = t3_eth_xmit;
netdev->get_stats = cxgb_get_stats;
netdev->set_multicast_list = cxgb_set_rxmode;
netdev->do_ioctl = cxgb_ioctl;
netdev->change_mtu = cxgb_change_mtu;
netdev->set_mac_address = cxgb_set_mac_addr;
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = cxgb_netpoll;
#endif
netdev->weight = 64;
SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
}
pci_set_drvdata(pdev, adapter->port[0]);
if (t3_prep_adapter(adapter, ai, 1) < 0) {
err = -ENODEV;
goto out_free_dev;
}
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
* with the ports we manage to register successfully. However we must
* register at least one net device.
*/
for_each_port(adapter, i) {
err = register_netdev(adapter->port[i]);
if (err)
dev_warn(&pdev->dev,
"cannot register net device %s, skipping\n",
adapter->port[i]->name);
else {
/*
* Change the name we use for messages to the name of
* the first successfully registered interface.
*/
if (!adapter->registered_device_map)
adapter->name = adapter->port[i]->name;
__set_bit(i, &adapter->registered_device_map);
}
}
if (!adapter->registered_device_map) {
dev_err(&pdev->dev, "could not register any net devices\n");
goto out_free_dev;
}
/* Driver's ready. Reflect it on LEDs */
t3_led_ready(adapter);
if (is_offload(adapter)) {
__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
cxgb3_adapter_ofld(adapter);
}
/* See what interrupts we'll be using */
if (msi > 1 && cxgb_enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX;
else if (msi > 0 && pci_enable_msi(pdev) == 0)
adapter->flags |= USING_MSI;
err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
&cxgb3_attr_group);
print_port_info(adapter, ai);
return 0;
out_free_dev:
iounmap(adapter->regs);
for (i = ai->nports - 1; i >= 0; --i)
if (adapter->port[i])
free_netdev(adapter->port[i]);
out_free_adapter:
kfree(adapter);
out_disable_device:
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void __devexit remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
int i;
struct adapter *adapter = dev->priv;
t3_sge_stop(adapter);
sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
&cxgb3_attr_group);
for_each_port(adapter, i)
if (test_bit(i, &adapter->registered_device_map))
unregister_netdev(adapter->port[i]);
if (is_offload(adapter)) {
cxgb3_adapter_unofld(adapter);
if (test_bit(OFFLOAD_DEVMAP_BIT,
&adapter->open_device_map))
offload_close(&adapter->tdev);
}
t3_free_sge_resources(adapter);
cxgb_disable_msi(adapter);
for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
if (adapter->dummy_netdev[i]) {
free_netdev(adapter->dummy_netdev[i]);
adapter->dummy_netdev[i] = NULL;
}
for_each_port(adapter, i)
if (adapter->port[i])
free_netdev(adapter->port[i]);
iounmap(adapter->regs);
kfree(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
static struct pci_driver driver = {
.name = DRV_NAME,
.id_table = cxgb3_pci_tbl,
.probe = init_one,
.remove = __devexit_p(remove_one),
};
static int __init cxgb3_init_module(void)
{
int ret;
cxgb3_offload_init();
ret = pci_register_driver(&driver);
return ret;
}
static void __exit cxgb3_cleanup_module(void)
{
pci_unregister_driver(&driver);
if (cxgb3_wq)
destroy_workqueue(cxgb3_wq);
}
module_init(cxgb3_init_module);
module_exit(cxgb3_cleanup_module);
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/list.h>
#include <net/neighbour.h>
#include <linux/notifier.h>
#include <asm/atomic.h>
#include <linux/proc_fs.h>
#include <linux/if_vlan.h>
#include <net/netevent.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include "common.h"
#include "regs.h"
#include "cxgb3_ioctl.h"
#include "cxgb3_ctl_defs.h"
#include "cxgb3_defs.h"
#include "l2t.h"
#include "firmware_exports.h"
#include "cxgb3_offload.h"
static LIST_HEAD(client_list);
static LIST_HEAD(ofld_dev_list);
static DEFINE_MUTEX(cxgb3_db_lock);
static DEFINE_RWLOCK(adapter_list_lock);
static LIST_HEAD(adapter_list);
static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x100000;
static inline int offload_activated(struct t3cdev *tdev)
{
const struct adapter *adapter = tdev2adap(tdev);
return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
}
/**
* cxgb3_register_client - register an offload client
* @client: the client
*
* Add the client to the client list,
* and call backs the client for each activated offload device
*/
void cxgb3_register_client(struct cxgb3_client *client)
{
struct t3cdev *tdev;
mutex_lock(&cxgb3_db_lock);
list_add_tail(&client->client_list, &client_list);
if (client->add) {
list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
if (offload_activated(tdev))
client->add(tdev);
}
}
mutex_unlock(&cxgb3_db_lock);
}
EXPORT_SYMBOL(cxgb3_register_client);
/**
* cxgb3_unregister_client - unregister an offload client
* @client: the client
*
* Remove the client to the client list,
* and call backs the client for each activated offload device.
*/
void cxgb3_unregister_client(struct cxgb3_client *client)
{
struct t3cdev *tdev;
mutex_lock(&cxgb3_db_lock);
list_del(&client->client_list);
if (client->remove) {
list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
if (offload_activated(tdev))
client->remove(tdev);
}
}
mutex_unlock(&cxgb3_db_lock);
}
EXPORT_SYMBOL(cxgb3_unregister_client);
/**
* cxgb3_add_clients - activate registered clients for an offload device
* @tdev: the offload device
*
* Call backs all registered clients once a offload device is activated
*/
void cxgb3_add_clients(struct t3cdev *tdev)
{
struct cxgb3_client *client;
mutex_lock(&cxgb3_db_lock);
list_for_each_entry(client, &client_list, client_list) {
if (client->add)
client->add(tdev);
}
mutex_unlock(&cxgb3_db_lock);
}
/**
* cxgb3_remove_clients - deactivates registered clients
* for an offload device
* @tdev: the offload device
*
* Call backs all registered clients once a offload device is deactivated
*/
void cxgb3_remove_clients(struct t3cdev *tdev)
{
struct cxgb3_client *client;
mutex_lock(&cxgb3_db_lock);
list_for_each_entry(client, &client_list, client_list) {
if (client->remove)
client->remove(tdev);
}
mutex_unlock(&cxgb3_db_lock);
}
static struct net_device *get_iff_from_mac(struct adapter *adapter,
const unsigned char *mac,
unsigned int vlan)
{
int i;
for_each_port(adapter, i) {
const struct vlan_group *grp;
struct net_device *dev = adapter->port[i];
const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) {
grp = p->vlan_grp;
dev = grp ? grp->vlan_devices[vlan] : NULL;
} else
while (dev->master)
dev = dev->master;
return dev;
}
}
return NULL;
}
static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
void *data)
{
int ret = 0;
struct ulp_iscsi_info *uiip = data;
switch (req) {
case ULP_ISCSI_GET_PARAMS:
uiip->pdev = adapter->pdev;
uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
/*
* On tx, the iscsi pdu has to be <= tx page size and has to
* fit into the Tx PM FIFO.
*/
uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
/* on rx, the iscsi pdu has to be < rx page size and the
whole pdu + cpl headers has to fit into one sge buffer */
uiip->max_rxsz = min_t(unsigned int,
adapter->params.tp.rx_pg_size,
(adapter->sge.qs[0].fl[1].buf_size -
sizeof(struct cpl_rx_data) * 2 -
sizeof(struct cpl_rx_data_ddp)));
break;
case ULP_ISCSI_SET_PARAMS:
t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
/* Response queue used for RDMA events. */
#define ASYNC_NOTIF_RSPQ 0
static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
{
int ret = 0;
switch (req) {
case RDMA_GET_PARAMS:{
struct rdma_info *req = data;
struct pci_dev *pdev = adapter->pdev;
req->udbell_physbase = pci_resource_start(pdev, 2);
req->udbell_len = pci_resource_len(pdev, 2);
req->tpt_base =
t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
req->pbl_base =
t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
req->pdev = pdev;
break;
}
case RDMA_CQ_OP:{
unsigned long flags;
struct rdma_cq_op *req = data;
/* may be called in any context */
spin_lock_irqsave(&adapter->sge.reg_lock, flags);
ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
req->credits);
spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
break;
}
case RDMA_GET_MEM:{
struct ch_mem_range *t = data;
struct mc7 *mem;
if ((t->addr & 7) || (t->len & 7))
return -EINVAL;
if (t->mem_id == MEM_CM)
mem = &adapter->cm;
else if (t->mem_id == MEM_PMRX)
mem = &adapter->pmrx;
else if (t->mem_id == MEM_PMTX)
mem = &adapter->pmtx;
else
return -EINVAL;
ret =
t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
(u64 *) t->buf);
if (ret)
return ret;
break;
}
case RDMA_CQ_SETUP:{
struct rdma_cq_setup *req = data;
spin_lock_irq(&adapter->sge.reg_lock);
ret =
t3_sge_init_cqcntxt(adapter, req->id,
req->base_addr, req->size,
ASYNC_NOTIF_RSPQ,
req->ovfl_mode, req->credits,
req->credit_thres);
spin_unlock_irq(&adapter->sge.reg_lock);
break;
}
case RDMA_CQ_DISABLE:
spin_lock_irq(&adapter->sge.reg_lock);
ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
spin_unlock_irq(&adapter->sge.reg_lock);
break;
case RDMA_CTRL_QP_SETUP:{
struct rdma_ctrlqp_setup *req = data;
spin_lock_irq(&adapter->sge.reg_lock);
ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
SGE_CNTXT_RDMA,
ASYNC_NOTIF_RSPQ,
req->base_addr, req->size,
FW_RI_TID_START, 1, 0);
spin_unlock_irq(&adapter->sge.reg_lock);
break;
}
default:
ret = -EOPNOTSUPP;
}
return ret;
}
static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
{
struct adapter *adapter = tdev2adap(tdev);
struct tid_range *tid;
struct mtutab *mtup;
struct iff_mac *iffmacp;
struct ddp_params *ddpp;
struct adap_ports *ports;
int i;
switch (req) {
case GET_MAX_OUTSTANDING_WR:
*(unsigned int *)data = FW_WR_NUM;
break;
case GET_WR_LEN:
*(unsigned int *)data = WR_FLITS;
break;
case GET_TX_MAX_CHUNK:
*(unsigned int *)data = 1 << 20; /* 1MB */
break;
case GET_TID_RANGE:
tid = data;
tid->num = t3_mc5_size(&adapter->mc5) -
adapter->params.mc5.nroutes -
adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
tid->base = 0;
break;
case GET_STID_RANGE:
tid = data;
tid->num = adapter->params.mc5.nservers;
tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
break;
case GET_L2T_CAPACITY:
*(unsigned int *)data = 2048;
break;
case GET_MTUS:
mtup = data;
mtup->size = NMTUS;
mtup->mtus = adapter->params.mtus;
break;
case GET_IFF_FROM_MAC:
iffmacp = data;
iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
iffmacp->vlan_tag &
VLAN_VID_MASK);
break;
case GET_DDP_PARAMS:
ddpp = data;
ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
break;
case GET_PORTS:
ports = data;
ports->nports = adapter->params.nports;
for_each_port(adapter, i)
ports->lldevs[i] = adapter->port[i];
break;
case ULP_ISCSI_GET_PARAMS:
case ULP_ISCSI_SET_PARAMS:
if (!offload_running(adapter))
return -EAGAIN;
return cxgb_ulp_iscsi_ctl(adapter, req, data);
case RDMA_GET_PARAMS:
case RDMA_CQ_OP:
case RDMA_CQ_SETUP:
case RDMA_CQ_DISABLE:
case RDMA_CTRL_QP_SETUP:
case RDMA_GET_MEM:
if (!offload_running(adapter))
return -EAGAIN;
return cxgb_rdma_ctl(adapter, req, data);
default:
return -EOPNOTSUPP;
}
return 0;
}
/*
* Dummy handler for Rx offload packets in case we get an offload packet before
* proper processing is setup. This complains and drops the packet as it isn't
* normal to get offload packets at this stage.
*/
static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
int n)
{
CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
n, ntohl(*(u32 *)skbs[0]->data));
while (n--)
dev_kfree_skb_any(skbs[n]);
return 0;
}
static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
{
}
void cxgb3_set_dummy_ops(struct t3cdev *dev)
{
dev->recv = rx_offload_blackhole;
dev->neigh_update = dummy_neigh_update;
}
/*
* Free an active-open TID.
*/
void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
{
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
union active_open_entry *p = atid2entry(t, atid);
void *ctx = p->t3c_tid.ctx;
spin_lock_bh(&t->atid_lock);
p->next = t->afree;
t->afree = p;
t->atids_in_use--;
spin_unlock_bh(&t->atid_lock);
return ctx;
}
EXPORT_SYMBOL(cxgb3_free_atid);
/*
* Free a server TID and return it to the free pool.
*/
void cxgb3_free_stid(struct t3cdev *tdev, int stid)
{
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
union listen_entry *p = stid2entry(t, stid);
spin_lock_bh(&t->stid_lock);
p->next = t->sfree;
t->sfree = p;
t->stids_in_use--;
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb3_free_stid);
void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
void *ctx, unsigned int tid)
{
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t->tid_tab[tid].client = client;
t->tid_tab[tid].ctx = ctx;
atomic_inc(&t->tids_in_use);
}
EXPORT_SYMBOL(cxgb3_insert_tid);
/*
* Populate a TID_RELEASE WR. The skb must be already propely sized.
*/
static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
{
struct cpl_tid_release *req;
skb->priority = CPL_PRIORITY_SETUP;
req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}
static void t3_process_tid_release_list(struct work_struct *work)
{
struct t3c_data *td = container_of(work, struct t3c_data,
tid_release_task);
struct sk_buff *skb;
struct t3cdev *tdev = td->dev;
spin_lock_bh(&td->tid_release_lock);
while (td->tid_release_list) {
struct t3c_tid_entry *p = td->tid_release_list;
td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
spin_unlock_bh(&td->tid_release_lock);
skb = alloc_skb(sizeof(struct cpl_tid_release),
GFP_KERNEL | __GFP_NOFAIL);
mk_tid_release(skb, p - td->tid_maps.tid_tab);
cxgb3_ofld_send(tdev, skb);
p->ctx = NULL;
spin_lock_bh(&td->tid_release_lock);
}
spin_unlock_bh(&td->tid_release_lock);
}
/* use ctx as a next pointer in the tid release list */
void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
{
struct t3c_data *td = T3C_DATA(tdev);
struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
td->tid_release_list = p;
if (!p->ctx)
schedule_work(&td->tid_release_task);
spin_unlock_bh(&td->tid_release_lock);
}
EXPORT_SYMBOL(cxgb3_queue_tid_release);
/*
* Remove a tid from the TID table. A client may defer processing its last
* CPL message if it is locked at the time it arrives, and while the message
* sits in the client's backlog the TID may be reused for another connection.
* To handle this we atomically switch the TID association if it still points
* to the original client context.
*/
void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
{
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
BUG_ON(tid >= t->ntids);
if (tdev->type == T3A)
(void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
else {
struct sk_buff *skb;
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
if (likely(skb)) {
mk_tid_release(skb, tid);
cxgb3_ofld_send(tdev, skb);
t->tid_tab[tid].ctx = NULL;
} else
cxgb3_queue_tid_release(tdev, tid);
}
atomic_dec(&t->tids_in_use);
}
EXPORT_SYMBOL(cxgb3_remove_tid);
int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
void *ctx)
{
int atid = -1;
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
spin_lock_bh(&t->atid_lock);
if (t->afree) {
union active_open_entry *p = t->afree;
atid = (p - t->atid_tab) + t->atid_base;
t->afree = p->next;
p->t3c_tid.ctx = ctx;
p->t3c_tid.client = client;
t->atids_in_use++;
}
spin_unlock_bh(&t->atid_lock);
return atid;
}
EXPORT_SYMBOL(cxgb3_alloc_atid);
int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
void *ctx)
{
int stid = -1;
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
spin_lock_bh(&t->stid_lock);
if (t->sfree) {
union listen_entry *p = t->sfree;
stid = (p - t->stid_tab) + t->stid_base;
t->sfree = p->next;
p->t3c_tid.ctx = ctx;
p->t3c_tid.client = client;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
return stid;
}
EXPORT_SYMBOL(cxgb3_alloc_stid);
static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_smt_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
printk(KERN_ERR
"Unexpected SMT_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
}
static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
printk(KERN_ERR
"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
}
static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_act_open_rpl *rpl = cplhdr(skb);
unsigned int atid = G_TID(ntohl(rpl->atid));
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
t3c_tid->
ctx);
} else {
printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
dev->name, CPL_ACT_OPEN_RPL);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
union opcode_tid *p = cplhdr(skb);
unsigned int stid = G_TID(ntohl(p->opcode_tid));
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
if (t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode] (dev, skb,
t3c_tid->ctx);
} else {
printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
union opcode_tid *p = cplhdr(skb);
unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
if (t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
if (t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
(dev, skb, t3c_tid->ctx);
} else {
printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
dev->name, CPL_PASS_ACCEPT_REQ);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
{
union opcode_tid *p = cplhdr(skb);
unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
if (t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
struct cpl_abort_req_rss *req = cplhdr(skb);
struct cpl_abort_rpl *rpl;
struct sk_buff *skb =
alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
if (!skb) {
printk("do_abort_req_rss: couldn't get skb!\n");
goto out;
}
skb->priority = CPL_PRIORITY_DATA;
__skb_put(skb, sizeof(struct cpl_abort_rpl));
rpl = cplhdr(skb);
rpl->wr.wr_hi =
htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
OPCODE_TID(rpl) =
htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
rpl->cmd = req->status;
cxgb3_ofld_send(dev, skb);
out:
return CPL_RET_BUF_DONE;
}
}
static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_act_establish *req = cplhdr(skb);
unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
if (t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
} else {
printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
dev->name, CPL_PASS_ACCEPT_REQ);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
printk(KERN_ERR
"Unexpected SET_TCB_RPL status %u for tid %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
}
static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_trace_pkt *p = cplhdr(skb);
skb->protocol = 0xffff;
skb->dev = dev->lldev;
skb_pull(skb, sizeof(*p));
skb->mac.raw = skb->data;
netif_receive_skb(skb);
return 0;
}
static int do_term(struct t3cdev *dev, struct sk_buff *skb)
{
unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
unsigned int opcode = G_OPCODE(ntohl(skb->csum));
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
if (t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[opcode]) {
return t3c_tid->client->handlers[opcode] (dev, skb,
t3c_tid->ctx);
} else {
printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
dev->name, opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
static int nb_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
switch (event) {
case (NETEVENT_NEIGH_UPDATE):{
cxgb_neigh_update((struct neighbour *)ctx);
break;
}
case (NETEVENT_PMTU_UPDATE):
break;
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
cxgb_neigh_update(nr->new->neighbour);
break;
}
default:
break;
}
return 0;
}
static struct notifier_block nb = {
.notifier_call = nb_callback
};
/*
* Process a received packet with an unknown/unexpected CPL opcode.
*/
static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
{
printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
*skb->data);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
/*
* Handlers for each CPL opcode
*/
static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
/*
* Add a new handler to the CPL dispatch table. A NULL handler may be supplied
* to unregister an existing handler.
*/
void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
{
if (opcode < NUM_CPL_CMDS)
cpl_handlers[opcode] = h ? h : do_bad_cpl;
else
printk(KERN_ERR "T3C: handler registration for "
"opcode %x failed\n", opcode);
}
EXPORT_SYMBOL(t3_register_cpl_handler);
/*
* T3CDEV's receive method.
*/
int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
{
while (n--) {
struct sk_buff *skb = *skbs++;
unsigned int opcode = G_OPCODE(ntohl(skb->csum));
int ret = cpl_handlers[opcode] (dev, skb);
#if VALIDATE_TID
if (ret & CPL_RET_UNKNOWN_TID) {
union opcode_tid *p = cplhdr(skb);
printk(KERN_ERR "%s: CPL message (opcode %u) had "
"unknown TID %u\n", dev->name, opcode,
G_TID(ntohl(p->opcode_tid)));
}
#endif
if (ret & CPL_RET_BUF_DONE)
kfree_skb(skb);
}
return 0;
}
/*
* Sends an sk_buff to a T3C driver after dealing with any active network taps.
*/
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
{
int r;
local_bh_disable();
r = dev->send(dev, skb);
local_bh_enable();
return r;
}
EXPORT_SYMBOL(cxgb3_ofld_send);
static int is_offloading(struct net_device *dev)
{
struct adapter *adapter;
int i;
read_lock_bh(&adapter_list_lock);
list_for_each_entry(adapter, &adapter_list, adapter_list) {
for_each_port(adapter, i) {
if (dev == adapter->port[i]) {
read_unlock_bh(&adapter_list_lock);
return 1;
}
}
}
read_unlock_bh(&adapter_list_lock);
return 0;
}
void cxgb_neigh_update(struct neighbour *neigh)
{
struct net_device *dev = neigh->dev;
if (dev && (is_offloading(dev))) {
struct t3cdev *tdev = T3CDEV(dev);
BUG_ON(!tdev);
t3_l2t_update(tdev, neigh);
}
}
static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
return;
}
skb->priority = CPL_PRIORITY_CONTROL;
req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = 0;
req->cpu_idx = 0;
req->word = htons(W_TCB_L2T_IX);
req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
tdev->send(tdev, skb);
}
void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
{
struct net_device *olddev, *newdev;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
int update_tcb;
struct l2t_entry *e;
struct t3c_tid_entry *te;
olddev = old->neighbour->dev;
newdev = new->neighbour->dev;
if (!is_offloading(olddev))
return;
if (!is_offloading(newdev)) {
printk(KERN_WARNING "%s: Redirect to non-offload"
"device ignored.\n", __FUNCTION__);
return;
}
tdev = T3CDEV(olddev);
BUG_ON(!tdev);
if (tdev != T3CDEV(newdev)) {
printk(KERN_WARNING "%s: Redirect to different "
"offload device ignored.\n", __FUNCTION__);
return;
}
/* Add new L2T entry */
e = t3_l2t_get(tdev, new->neighbour, newdev);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__FUNCTION__);
return;
}
/* Walk tid table and notify clients of dst change. */
ti = &(T3C_DATA(tdev))->tid_maps;
for (tid = 0; tid < ti->ntids; tid++) {
te = lookup_tid(ti, tid);
BUG_ON(!te);
if (te->ctx && te->client && te->client->redirect) {
update_tcb = te->client->redirect(te->ctx, old, new, e);
if (update_tcb) {
l2t_hold(L2DATA(tdev), e);
set_l2t_ix(tdev, tid, e);
}
}
}
l2t_release(L2DATA(tdev), e);
}
/*
* Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
* The allocated memory is cleared.
*/
void *cxgb_alloc_mem(unsigned long size)
{
void *p = kmalloc(size, GFP_KERNEL);
if (!p)
p = vmalloc(size);
if (p)
memset(p, 0, size);
return p;
}
/*
* Free memory allocated through t3_alloc_mem().
*/
void cxgb_free_mem(void *addr)
{
unsigned long p = (unsigned long)addr;
if (p >= VMALLOC_START && p < VMALLOC_END)
vfree(addr);
else
kfree(addr);
}
/*
* Allocate and initialize the TID tables. Returns 0 on success.
*/
static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
unsigned int natids, unsigned int nstids,
unsigned int atid_base, unsigned int stid_base)
{
unsigned long size = ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
t->tid_tab = cxgb_alloc_mem(size);
if (!t->tid_tab)
return -ENOMEM;
t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
t->ntids = ntids;
t->nstids = nstids;
t->stid_base = stid_base;
t->sfree = NULL;
t->natids = natids;
t->atid_base = atid_base;
t->afree = NULL;
t->stids_in_use = t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
/*
* Setup the free lists for stid_tab and atid_tab.
*/
if (nstids) {
while (--nstids)
t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
t->sfree = t->stid_tab;
}
if (natids) {
while (--natids)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
return 0;
}
static void free_tid_maps(struct tid_info *t)
{
cxgb_free_mem(t->tid_tab);
}
static inline void add_adapter(struct adapter *adap)
{
write_lock_bh(&adapter_list_lock);
list_add_tail(&adap->adapter_list, &adapter_list);
write_unlock_bh(&adapter_list_lock);
}
static inline void remove_adapter(struct adapter *adap)
{
write_lock_bh(&adapter_list_lock);
list_del(&adap->adapter_list);
write_unlock_bh(&adapter_list_lock);
}
int cxgb3_offload_activate(struct adapter *adapter)
{
struct t3cdev *dev = &adapter->tdev;
int natids, err;
struct t3c_data *t;
struct tid_range stid_range, tid_range;
struct mtutab mtutab;
unsigned int l2t_capacity;
t = kcalloc(1, sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
err = -EOPNOTSUPP;
if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
goto out_free;
err = -ENOMEM;
L2DATA(dev) = t3_init_l2t(l2t_capacity);
if (!L2DATA(dev))
goto out_free;
natids = min(tid_range.num / 2, MAX_ATIDS);
err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
stid_range.num, ATID_BASE, stid_range.base);
if (err)
goto out_free_l2t;
t->mtus = mtutab.mtus;
t->nmtus = mtutab.size;
INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
spin_lock_init(&t->tid_release_lock);
INIT_LIST_HEAD(&t->list_node);
t->dev = dev;
T3C_DATA(dev) = t;
dev->recv = process_rx;
dev->neigh_update = t3_l2t_update;
/* Register netevent handler once */
if (list_empty(&adapter_list))
register_netevent_notifier(&nb);
add_adapter(adapter);
return 0;
out_free_l2t:
t3_free_l2t(L2DATA(dev));
L2DATA(dev) = NULL;
out_free:
kfree(t);
return err;
}
void cxgb3_offload_deactivate(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
struct t3c_data *t = T3C_DATA(tdev);
remove_adapter(adapter);
if (list_empty(&adapter_list))
unregister_netevent_notifier(&nb);
free_tid_maps(&t->tid_maps);
T3C_DATA(tdev) = NULL;
t3_free_l2t(L2DATA(tdev));
L2DATA(tdev) = NULL;
kfree(t);
}
static inline void register_tdev(struct t3cdev *tdev)
{
static int unit;
mutex_lock(&cxgb3_db_lock);
snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
mutex_unlock(&cxgb3_db_lock);
}
static inline void unregister_tdev(struct t3cdev *tdev)
{
mutex_lock(&cxgb3_db_lock);
list_del(&tdev->ofld_dev_list);
mutex_unlock(&cxgb3_db_lock);
}
void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
INIT_LIST_HEAD(&tdev->ofld_dev_list);
cxgb3_set_dummy_ops(tdev);
tdev->send = t3_offload_tx;
tdev->ctl = cxgb_offload_ctl;
tdev->type = adapter->params.rev == 0 ? T3A : T3B;
register_tdev(tdev);
}
void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
tdev->recv = NULL;
tdev->neigh_update = NULL;
unregister_tdev(tdev);
}
void __init cxgb3_offload_init(void)
{
int i;
for (i = 0; i < NUM_CPL_CMDS; ++i)
cpl_handlers[i] = do_bad_cpl;
t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
}
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CXGB3_OFFLOAD_H
#define _CXGB3_OFFLOAD_H
#include <linux/list.h>
#include <linux/skbuff.h>
#include "l2t.h"
#include "t3cdev.h"
#include "t3_cpl.h"
struct adapter;
void cxgb3_offload_init(void);
void cxgb3_adapter_ofld(struct adapter *adapter);
void cxgb3_adapter_unofld(struct adapter *adapter);
int cxgb3_offload_activate(struct adapter *adapter);
void cxgb3_offload_deactivate(struct adapter *adapter);
void cxgb3_set_dummy_ops(struct t3cdev *dev);
/*
* Client registration. Users of T3 driver must register themselves.
* The T3 driver will call the add function of every client for each T3
* adapter activated, passing up the t3cdev ptr. Each client fills out an
* array of callback functions to process CPL messages.
*/
void cxgb3_register_client(struct cxgb3_client *client);
void cxgb3_unregister_client(struct cxgb3_client *client);
void cxgb3_add_clients(struct t3cdev *tdev);
void cxgb3_remove_clients(struct t3cdev *tdev);
typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
struct sk_buff *skb, void *ctx);
struct cxgb3_client {
char *name;
void (*add) (struct t3cdev *);
void (*remove) (struct t3cdev *);
cxgb3_cpl_handler_func *handlers;
int (*redirect)(void *ctx, struct dst_entry *old,
struct dst_entry *new, struct l2t_entry *l2t);
struct list_head client_list;
};
/*
* TID allocation services.
*/
int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
void *cxgb3_free_atid(struct t3cdev *dev, int atid);
void cxgb3_free_stid(struct t3cdev *dev, int stid);
void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx, unsigned int tid);
void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
struct t3c_tid_entry {
struct cxgb3_client *client;
void *ctx;
};
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
CPL_PRIORITY_SETUP = 1, /* connection setup messages */
CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
CPL_PRIORITY_ACK = 1, /* RX ACK messages */
CPL_PRIORITY_CONTROL = 1 /* offload control messages */
};
/* Flags for return value of CPL message handlers */
enum {
CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
};
typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
/*
* Returns a pointer to the first byte of the CPL header in an sk_buff that
* contains a CPL message.
*/
static inline void *cplhdr(struct sk_buff *skb)
{
return skb->data;
}
void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
union listen_entry {
struct t3c_tid_entry t3c_tid;
union listen_entry *next;
};
union active_open_entry {
struct t3c_tid_entry t3c_tid;
union active_open_entry *next;
};
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables for a offload device.
* The tables themselves are allocated dynamically.
*/
struct tid_info {
struct t3c_tid_entry *tid_tab;
unsigned int ntids;
atomic_t tids_in_use;
union listen_entry *stid_tab;
unsigned int nstids;
unsigned int stid_base;
union active_open_entry *atid_tab;
unsigned int natids;
unsigned int atid_base;
/*
* The following members are accessed R/W so we put them in their own
* cache lines.
*
* XXX We could combine the atid fields above with the lock here since
* atids are use once (unlike other tids). OTOH the above fields are
* usually in cache due to tid_tab.
*/
spinlock_t atid_lock ____cacheline_aligned_in_smp;
union active_open_entry *afree;
unsigned int atids_in_use;
spinlock_t stid_lock ____cacheline_aligned;
union listen_entry *sfree;
unsigned int stids_in_use;
};
struct t3c_data {
struct list_head list_node;
struct t3cdev *dev;
unsigned int tx_max_chunk; /* max payload for TX_DATA */
unsigned int max_wrs; /* max in-flight WRs per connection */
unsigned int nmtus;
const unsigned short *mtus;
struct tid_info tid_maps;
struct t3c_tid_entry *tid_release_list;
spinlock_t tid_release_lock;
struct work_struct tid_release_task;
};
/*
* t3cdev -> t3c_data accessor
*/
#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
#endif
/*
* ----------------------------------------------------------------------------
* >>>>>>>>>>>>>>>>>>>>>>>>>>>>> COPYRIGHT NOTICE <<<<<<<<<<<<<<<<<<<<<<<<<<<<<
* ----------------------------------------------------------------------------
* Copyright 2004 (C) Chelsio Communications, Inc. (Chelsio)
*
* Chelsio Communications, Inc. owns the sole copyright to this software.
* You may not make a copy, you may not derive works herefrom, and you may
* not distribute this work to others. Other restrictions of rights may apply
* as well. This is unpublished, confidential information. All rights reserved.
* This software contains confidential information and trade secrets of Chelsio
* Communications, Inc. Use, disclosure, or reproduction is prohibited without
* the prior express written permission of Chelsio Communications, Inc.
* ----------------------------------------------------------------------------
* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Warranty <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
* ----------------------------------------------------------------------------
* CHELSIO MAKES NO WARRANTY OF ANY KIND WITH REGARD TO THE USE OF THIS
* SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
* ----------------------------------------------------------------------------
*
* This is the firmware_exports.h header file, firmware interface defines.
*
* Written January 2005 by felix marti (felix@chelsio.com)
*/
#ifndef _FIRMWARE_EXPORTS_H_
#define _FIRMWARE_EXPORTS_H_
/* WR OPCODES supported by the firmware.
*/
#define FW_WROPCODE_FORWARD 0x01
#define FW_WROPCODE_BYPASS 0x05
#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
#define FW_WROPCODE_ULPTX_MEM_READ 0x02
#define FW_WROPCODE_ULPTX_PKT 0x04
#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
#define FW_WROPCODE_OFLD_TX_DATA 0x0D
#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
#define FW_WROPCODE_RI_RDMA_INIT 0x10
#define FW_WROPCODE_RI_RDMA_WRITE 0x11
#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
#define FW_WROPCODE_RI_SEND 0x14
#define FW_WROPCODE_RI_TERMINATE 0x15
#define FW_WROPCODE_RI_RDMA_READ 0x16
#define FW_WROPCODE_RI_RECEIVE 0x17
#define FW_WROPCODE_RI_BIND_MW 0x18
#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
#define FW_WROPCODE_RI_LOCAL_INV 0x1A
#define FW_WROPCODE_RI_MODIFY_QP 0x1B
#define FW_WROPCODE_RI_BYPASS 0x1C
#define FW_WROPOCDE_RSVD 0x1E
#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
#define FW_WROPCODE_MNGT 0x1D
#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
/* Maximum size of a WR sent from the host, limited by the SGE.
*
* Note: WR coming from ULP or TP are only limited by CIM.
*/
#define FW_WR_SIZE 128
/* Maximum number of outstanding WRs sent from the host. Value must be
* programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
* offload modules to limit the number of WRs per connection.
*/
#define FW_T3_WR_NUM 16
#define FW_N3_WR_NUM 7
#ifndef N3
# define FW_WR_NUM FW_T3_WR_NUM
#else
# define FW_WR_NUM FW_N3_WR_NUM
#endif
/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
* queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
* start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
*
* Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
* to RESP Queue[i].
*/
#define FW_TUNNEL_NUM 8
#define FW_TUNNEL_SGEEC_START 8
#define FW_TUNNEL_TID_START 65544
/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
* must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
* (or 'uP Token') FW_CTRL_TID_START.
*
* Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
*/
#define FW_CTRL_NUM 8
#define FW_CTRL_SGEEC_START 65528
#define FW_CTRL_TID_START 65536
/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
* queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
*
* Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
* OFFLOAD Queues, as the host is responsible for providing the correct TID in
* every WR.
*
* Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
*/
#define FW_OFLD_NUM 8
#define FW_OFLD_SGEEC_START 0
/*
*
*/
#define FW_RI_NUM 1
#define FW_RI_SGEEC_START 65527
#define FW_RI_TID_START 65552
/*
* The RX_PKT_TID
*/
#define FW_RX_PKT_NUM 1
#define FW_RX_PKT_TID_START 65553
/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
* by the firmware.
*/
#define FW_WRC_NUM \
(65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
#endif /* _FIRMWARE_EXPORTS_H_ */
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <net/neighbour.h>
#include "common.h"
#include "t3cdev.h"
#include "cxgb3_defs.h"
#include "l2t.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
#define VLAN_NONE 0xfff
/*
* Module locking notes: There is a RW lock protecting the L2 table as a
* whole plus a spinlock per L2T entry. Entry lookups and allocations happen
* under the protection of the table lock, individual entry changes happen
* while holding that entry's spinlock. The table lock nests outside the
* entry locks. Allocations of new entries take the table lock as writers so
* no other lookups can happen while allocating new entries. Entry updates
* take the table lock as readers so multiple entries can be updated in
* parallel. An L2T entry can be dropped by decrementing its reference count
* and therefore can happen in parallel with entry allocation but no entry
* can change state or increment its ref count during allocation as both of
* these perform lookups.
*/
static inline unsigned int vlan_prio(const struct l2t_entry *e)
{
return e->vlan >> 13;
}
static inline unsigned int arp_hash(u32 key, int ifindex,
const struct l2t_data *d)
{
return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
}
static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
{
neigh_hold(n);
if (e->neigh)
neigh_release(e->neigh);
e->neigh = n;
}
/*
* Set up an L2T entry and send any packets waiting in the arp queue. The
* supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
* entry locked.
*/
static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cpl_l2t_write_req *req;
if (!skb) {
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
}
req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
V_L2T_W_PRIO(vlan_prio(e)));
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(dev, skb);
while (e->arpq_head) {
skb = e->arpq_head;
e->arpq_head = skb->next;
skb->next = NULL;
cxgb3_ofld_send(dev, skb);
}
e->arpq_tail = NULL;
e->state = L2T_STATE_VALID;
return 0;
}
/*
* Add a packet to the an L2T entry's queue of packets awaiting resolution.
* Must be called with the entry's lock held.
*/
static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
{
skb->next = NULL;
if (e->arpq_head)
e->arpq_tail->next = skb;
else
e->arpq_head = skb;
e->arpq_tail = skb;
}
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
if (e->state != L2T_STATE_RESOLVING) {
/* ARP already completed */
spin_unlock_bh(&e->lock);
goto again;
}
arpq_enqueue(e, skb);
spin_unlock_bh(&e->lock);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
if (!neigh_event_send(e->neigh, NULL)) {
skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
GFP_ATOMIC);
if (!skb)
break;
spin_lock_bh(&e->lock);
if (e->arpq_head)
setup_l2e_send_pending(dev, skb, e);
else /* we lost the race */
__kfree_skb(skb);
spin_unlock_bh(&e->lock);
}
}
return 0;
}
EXPORT_SYMBOL(t3_l2t_send_slow);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
{
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE) {
e->state = L2T_STATE_VALID;
}
spin_unlock_bh(&e->lock);
return;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return;
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
if (e->state != L2T_STATE_RESOLVING) {
/* ARP already completed */
spin_unlock_bh(&e->lock);
goto again;
}
spin_unlock_bh(&e->lock);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
neigh_event_send(e->neigh, NULL);
}
return;
}
EXPORT_SYMBOL(t3_l2t_send_event);
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
static struct l2t_entry *alloc_l2e(struct l2t_data *d)
{
struct l2t_entry *end, *e, **p;
if (!atomic_read(&d->nfree))
return NULL;
/* there's definitely a free entry */
for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
if (atomic_read(&e->refcnt) == 0)
goto found;
for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
found:
d->rover = e + 1;
atomic_dec(&d->nfree);
/*
* The entry we found may be an inactive entry that is
* presently in the hash table. We need to remove it.
*/
if (e->state != L2T_STATE_UNUSED) {
int hash = arp_hash(e->addr, e->ifindex, d);
for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
if (*p == e) {
*p = e->next;
break;
}
e->state = L2T_STATE_UNUSED;
}
return e;
}
/*
* Called when an L2T entry has no more users. The entry is left in the hash
* table since it is likely to be reused but we also bump nfree to indicate
* that the entry can be reallocated for a different neighbor. We also drop
* the existing neighbor reference in case the neighbor is going away and is
* waiting on our reference.
*
* Because entries can be reallocated to other neighbors once their ref count
* drops to 0 we need to take the entry's lock to avoid races with a new
* incarnation.
*/
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
{
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
}
}
spin_unlock_bh(&e->lock);
atomic_inc(&d->nfree);
}
EXPORT_SYMBOL(t3_l2e_free);
/*
* Update an L2T entry that was previously used for the same next hop as neigh.
* Must be called with softirqs disabled.
*/
static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
{
unsigned int nud_state;
spin_lock(&e->lock); /* avoid race with t3_l2t_free */
if (neigh != e->neigh)
neigh_replace(e, neigh);
nud_state = neigh->nud_state;
if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
!(nud_state & NUD_VALID))
e->state = L2T_STATE_RESOLVING;
else if (nud_state & NUD_CONNECTED)
e->state = L2T_STATE_VALID;
else
e->state = L2T_STATE_STALE;
spin_unlock(&e->lock);
}
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(cdev);
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
struct port_info *p = netdev_priv(dev);
int smt_idx = p->port_id;
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
e->smt_idx == smt_idx) {
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
goto done;
}
/* Need to allocate a new entry */
e = alloc_l2e(d);
if (e) {
spin_lock(&e->lock); /* avoid race with t3_l2t_free */
e->next = d->l2tab[hash].first;
d->l2tab[hash].first = e;
e->state = L2T_STATE_RESOLVING;
e->addr = addr;
e->ifindex = ifidx;
e->smt_idx = smt_idx;
atomic_set(&e->refcnt, 1);
neigh_replace(e, neigh);
if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
else
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
}
done:
write_unlock_bh(&d->lock);
return e;
}
EXPORT_SYMBOL(t3_l2t_get);
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packets is sent to the offload device.
*
* XXX: maybe we should abandon the latter behavior and just require a failure
* handler.
*/
static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
{
while (arpq) {
struct sk_buff *skb = arpq;
struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
arpq = skb->next;
skb->next = NULL;
if (cb->arp_failure_handler)
cb->arp_failure_handler(dev, skb);
else
cxgb3_ofld_send(dev, skb);
}
}
/*
* Called when the host's ARP layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
{
struct l2t_entry *e;
struct sk_buff *arpq = NULL;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx) {
spin_lock(&e->lock);
goto found;
}
read_unlock_bh(&d->lock);
return;
found:
read_unlock(&d->lock);
if (atomic_read(&e->refcnt)) {
if (neigh != e->neigh)
neigh_replace(e, neigh);
if (e->state == L2T_STATE_RESOLVING) {
if (neigh->nud_state & NUD_FAILED) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if (neigh_is_connected(neigh))
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh_is_connected(neigh) ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, neigh->ha, 6))
setup_l2e_send_pending(dev, NULL, e);
}
}
spin_unlock_bh(&e->lock);
if (arpq)
handle_failed_resolution(dev, arpq);
}
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
d = cxgb_alloc_mem(size);
if (!d)
return NULL;
d->nentries = l2t_capacity;
d->rover = &d->l2tab[1]; /* entry 0 is not used */
atomic_set(&d->nfree, l2t_capacity - 1);
rwlock_init(&d->lock);
for (i = 0; i < l2t_capacity; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
atomic_set(&d->l2tab[i].refcnt, 0);
}
return d;
}
void t3_free_l2t(struct l2t_data *d)
{
cxgb_free_mem(d);
}
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CHELSIO_L2T_H
#define _CHELSIO_L2T_H
#include <linux/spinlock.h>
#include "t3cdev.h"
#include <asm/atomic.h>
enum {
L2T_STATE_VALID, /* entry is up to date */
L2T_STATE_STALE, /* entry may be used but needs revalidation */
L2T_STATE_RESOLVING, /* entry needs address resolution */
L2T_STATE_UNUSED /* entry not in use */
};
struct neighbour;
struct sk_buff;
/*
* Each L2T entry plays multiple roles. First of all, it keeps state for the
* corresponding entry of the HW L2 table and maintains a queue of offload
* packets awaiting address resolution. Second, it is a node of a hash table
* chain, where the nodes of the chain are linked together through their next
* pointer. Finally, each node is a bucket of a hash table, pointing to the
* first element in its chain through its first pointer.
*/
struct l2t_entry {
u16 state; /* entry state */
u16 idx; /* entry index */
u32 addr; /* dest IP address */
int ifindex; /* neighbor's net_device's ifindex */
u16 smt_idx; /* SMT index */
u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
struct neighbour *neigh; /* associated neighbour */
struct l2t_entry *first; /* start of hash chain */
struct l2t_entry *next; /* next l2t_entry on chain */
struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
struct sk_buff *arpq_tail;
spinlock_t lock;
atomic_t refcnt; /* entry reference count */
u8 dmac[6]; /* neighbour's MAC address */
};
struct l2t_data {
unsigned int nentries; /* number of entries */
struct l2t_entry *rover; /* starting point for next allocation */
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct l2t_entry l2tab[0];
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
struct sk_buff * skb);
/*
* Callback stored in an skb to handle address resolution failure.
*/
struct l2t_skb_cb {
arp_failure_handler_func arp_failure_handler;
};
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
static inline void set_arp_failure_handler(struct sk_buff *skb,
arp_failure_handler_func hnd)
{
L2T_SKB_CB(skb)->arp_failure_handler = hnd;
}
/*
* Getting to the L2 data from an offload device.
*/
#define L2DATA(dev) ((dev)->l2opt)
#define W_TCB_L2T_IX 0
#define S_TCB_L2T_IX 7
#define M_TCB_L2T_IX 0x7ffULL
#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
void t3_free_l2t(struct l2t_data *d);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
if (likely(e->state == L2T_STATE_VALID))
return cxgb3_ofld_send(dev, skb);
return t3_l2t_send_slow(dev, skb, e);
}
static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_dec_and_test(&e->refcnt))
t3_l2e_free(d, e);
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}
#endif
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
#include "regs.h"
enum {
IDT75P52100 = 4,
IDT75N43102 = 5
};
/* DBGI command mode */
enum {
DBGI_MODE_MBUS = 0,
DBGI_MODE_IDT52100 = 5
};
/* IDT 75P52100 commands */
#define IDT_CMD_READ 0
#define IDT_CMD_WRITE 1
#define IDT_CMD_SEARCH 2
#define IDT_CMD_LEARN 3
/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
#define IDT_LAR_ADR0 0x180006
#define IDT_LAR_MODE144 0xffff0000
/* IDT SCR and SSR addresses (low 32 bits) */
#define IDT_SCR_ADR0 0x180000
#define IDT_SSR0_ADR0 0x180002
#define IDT_SSR1_ADR0 0x180004
/* IDT GMR base address (low 32 bits) */
#define IDT_GMR_BASE_ADR0 0x180020
/* IDT data and mask array base addresses (low 32 bits) */
#define IDT_DATARY_BASE_ADR0 0
#define IDT_MSKARY_BASE_ADR0 0x80000
/* IDT 75N43102 commands */
#define IDT4_CMD_SEARCH144 3
#define IDT4_CMD_WRITE 4
#define IDT4_CMD_READ 5
/* IDT 75N43102 SCR address (low 32 bits) */
#define IDT4_SCR_ADR0 0x3
/* IDT 75N43102 GMR base addresses (low 32 bits) */
#define IDT4_GMR_BASE0 0x10
#define IDT4_GMR_BASE1 0x20
#define IDT4_GMR_BASE2 0x30
/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
#define IDT4_DATARY_BASE_ADR0 0x1000000
#define IDT4_MSKARY_BASE_ADR0 0x2000000
#define MAX_WRITE_ATTEMPTS 5
#define MAX_ROUTES 2048
/*
* Issue a command to the TCAM and wait for its completion. The address and
* any data required by the command must have been setup by the caller.
*/
static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
}
static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
}
static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
}
static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
u32 *v3)
{
*v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
*v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
*v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
}
/*
* Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
* command cmd. The data to be written must have been set up by the caller.
* Returns -1 on failure, 0 on success.
*/
static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
if (mc5_cmd_write(adapter, cmd) == 0)
return 0;
CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
addr_lo);
return -1;
}
static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
u32 data_array_base, u32 write_cmd,
int addr_shift)
{
unsigned int i;
struct adapter *adap = mc5->adapter;
/*
* We need the size of the TCAM data and mask arrays in terms of
* 72-bit entries.
*/
unsigned int size72 = mc5->tcam_size;
unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
if (mc5->mode == MC5_MODE_144_BIT) {
size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
server_base *= 2;
}
/* Clear the data array */
dbgi_wr_data3(adap, 0, 0, 0);
for (i = 0; i < size72; i++)
if (mc5_write(adap, data_array_base + (i << addr_shift),
write_cmd))
return -1;
/* Initialize the mask array. */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
for (i = 0; i < size72; i++) {
if (i == server_base) /* entering server or routing region */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
mc5->mode == MC5_MODE_144_BIT ?
0xfffffff9 : 0xfffffffd);
if (mc5_write(adap, mask_array_base + (i << addr_shift),
write_cmd))
return -1;
}
return 0;
}
static int init_idt52100(struct mc5 *mc5)
{
int i;
struct adapter *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
/*
* Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
* GMRs 8-9 for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up LAR */
dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up SSRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up GMRs */
for (i = 0; i < 32; ++i) {
if (i >= 12 && i < 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
else if (i == 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
else
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
goto err;
}
/* Set up SCR */
dbgi_wr_data3(adap, 1, 0, 0);
if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
err:
return -EIO;
}
static int init_idt43102(struct mc5 *mc5)
{
int i;
struct adapter *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
V_RDLAT(0xd) | V_SRCHLAT(0x12));
/*
* Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
* for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up GMRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
for (i = 0; i < 7; ++i)
if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
goto err;
for (i = 0; i < 4; ++i)
if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
goto err;
/* Set up SCR */
dbgi_wr_data3(adap, 0xf0000000, 0, 0);
if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
err:
return -EIO;
}
/* Put MC5 in DBGI mode. */
static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
{
t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
}
/* Put MC5 in M-Bus mode. */
static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
{
t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
}
/*
* Initialization that requires the OS and protocol layers to already
* be intialized goes here.
*/
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes)
{
u32 cfg;
int err;
unsigned int tcam_size = mc5->tcam_size;
struct adapter *adap = mc5->adapter;
if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
return -EINVAL;
/* Reset the TCAM */
cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
CH_ERR(adap, "TCAM reset timed out\n");
return -1;
}
t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
tcam_size - nroutes - nfilters);
t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
tcam_size - nroutes - nfilters - nservers);
mc5->parity_enabled = 1;
/* All the TCAM addresses we access have only the low 32 bits non 0 */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
mc5_dbgi_mode_enable(mc5);
switch (mc5->part_type) {
case IDT75P52100:
err = init_idt52100(mc5);
break;
case IDT75N43102:
err = init_idt43102(mc5);
break;
default:
CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
err = -EINVAL;
break;
}
mc5_dbgi_mode_disable(mc5);
return err;
}
/*
* read_mc5_range - dump a part of the memory managed by MC5
* @mc5: the MC5 handle
* @start: the start address for the dump
* @n: number of 72-bit words to read
* @buf: result buffer
*
* Read n 72-bit words from MC5 memory from the given start location.
*/
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
unsigned int n, u32 *buf)
{
u32 read_cmd;
int err = 0;
struct adapter *adap = mc5->adapter;
if (mc5->part_type == IDT75P52100)
read_cmd = IDT_CMD_READ;
else if (mc5->part_type == IDT75N43102)
read_cmd = IDT4_CMD_READ;
else
return -EINVAL;
mc5_dbgi_mode_enable(mc5);
while (n--) {
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
if (mc5_cmd_write(adap, read_cmd)) {
err = -EIO;
break;
}
dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
buf += 3;
}
mc5_dbgi_mode_disable(mc5);
return 0;
}
#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
/*
* MC5 interrupt handler
*/
void t3_mc5_intr_handler(struct mc5 *mc5)
{
struct adapter *adap = mc5->adapter;
u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
if ((cause & F_PARITYERR) && mc5->parity_enabled) {
CH_ALERT(adap, "MC5 parity error\n");
mc5->stats.parity_err++;
}
if (cause & F_REQQPARERR) {
CH_ALERT(adap, "MC5 request queue parity error\n");
mc5->stats.reqq_parity_err++;
}
if (cause & F_DISPQPARERR) {
CH_ALERT(adap, "MC5 dispatch queue parity error\n");
mc5->stats.dispq_parity_err++;
}
if (cause & F_ACTRGNFULL)
mc5->stats.active_rgn_full++;
if (cause & F_NFASRCHFAIL)
mc5->stats.nfa_srch_err++;
if (cause & F_UNKNOWNCMD)
mc5->stats.unknown_cmd++;
if (cause & F_DELACTEMPTY)
mc5->stats.del_act_empty++;
if (cause & MC5_INT_FATAL)
t3_fatal_err(adap);
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
64 K, 128 K, 256 K, 32 K
};
#undef K
u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
mc5->adapter = adapter;
mc5->mode = (unsigned char)mode;
mc5->part_type = (unsigned char)G_TMTYPE(cfg);
if (cfg & F_TMTYPEHI)
mc5->part_type |= 4;
mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
if (mode == MC5_MODE_144_BIT)
mc5->tcam_size /= 2;
}
#define A_SG_CONTROL 0x0
#define S_DROPPKT 20
#define V_DROPPKT(x) ((x) << S_DROPPKT)
#define F_DROPPKT V_DROPPKT(1U)
#define S_EGRGENCTRL 19
#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
#define F_EGRGENCTRL V_EGRGENCTRL(1U)
#define S_USERSPACESIZE 14
#define M_USERSPACESIZE 0x1f
#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
#define S_HOSTPAGESIZE 11
#define M_HOSTPAGESIZE 0x7
#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
#define S_FLMODE 9
#define V_FLMODE(x) ((x) << S_FLMODE)
#define F_FLMODE V_FLMODE(1U)
#define S_PKTSHIFT 6
#define M_PKTSHIFT 0x7
#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
#define S_ONEINTMULTQ 5
#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
#define S_BIGENDIANINGRESS 2
#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
#define S_ISCSICOALESCING 1
#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
#define S_GLOBALENABLE 0
#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
#define F_GLOBALENABLE V_GLOBALENABLE(1U)
#define S_AVOIDCQOVFL 24
#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
#define S_OPTONEINTMULTQ 23
#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
#define S_CQCRDTCTRL 22
#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
#define A_SG_KDOORBELL 0x4
#define S_SELEGRCNTX 31
#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
#define F_SELEGRCNTX V_SELEGRCNTX(1U)
#define S_EGRCNTX 0
#define M_EGRCNTX 0xffff
#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
#define A_SG_GTS 0x8
#define S_RSPQ 29
#define M_RSPQ 0x7
#define V_RSPQ(x) ((x) << S_RSPQ)
#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
#define S_NEWTIMER 16
#define M_NEWTIMER 0x1fff
#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
#define S_NEWINDEX 0
#define M_NEWINDEX 0xffff
#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
#define A_SG_CONTEXT_CMD 0xc
#define S_CONTEXT_CMD_OPCODE 28
#define M_CONTEXT_CMD_OPCODE 0xf
#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
#define S_CONTEXT_CMD_BUSY 27
#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
#define S_CQ_CREDIT 20
#define M_CQ_CREDIT 0x7f
#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
#define S_CQ 19
#define V_CQ(x) ((x) << S_CQ)
#define F_CQ V_CQ(1U)
#define S_RESPONSEQ 18
#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
#define F_RESPONSEQ V_RESPONSEQ(1U)
#define S_EGRESS 17
#define V_EGRESS(x) ((x) << S_EGRESS)
#define F_EGRESS V_EGRESS(1U)
#define S_FREELIST 16
#define V_FREELIST(x) ((x) << S_FREELIST)
#define F_FREELIST V_FREELIST(1U)
#define S_CONTEXT 0
#define M_CONTEXT 0xffff
#define V_CONTEXT(x) ((x) << S_CONTEXT)
#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
#define A_SG_CONTEXT_DATA0 0x10
#define A_SG_CONTEXT_DATA1 0x14
#define A_SG_CONTEXT_DATA2 0x18
#define A_SG_CONTEXT_DATA3 0x1c
#define A_SG_CONTEXT_MASK0 0x20
#define A_SG_CONTEXT_MASK1 0x24
#define A_SG_CONTEXT_MASK2 0x28
#define A_SG_CONTEXT_MASK3 0x2c
#define A_SG_RSPQ_CREDIT_RETURN 0x30
#define S_CREDITS 0
#define M_CREDITS 0xffff
#define V_CREDITS(x) ((x) << S_CREDITS)
#define A_SG_DATA_INTR 0x34
#define S_ERRINTR 31
#define V_ERRINTR(x) ((x) << S_ERRINTR)
#define F_ERRINTR V_ERRINTR(1U)
#define A_SG_HI_DRB_HI_THRSH 0x38
#define A_SG_HI_DRB_LO_THRSH 0x3c
#define A_SG_LO_DRB_HI_THRSH 0x40
#define A_SG_LO_DRB_LO_THRSH 0x44
#define A_SG_RSPQ_FL_STATUS 0x4c
#define S_RSPQ0DISABLED 8
#define A_SG_EGR_RCQ_DRB_THRSH 0x54
#define S_HIRCQDRBTHRSH 16
#define M_HIRCQDRBTHRSH 0x7ff
#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
#define S_LORCQDRBTHRSH 0
#define M_LORCQDRBTHRSH 0x7ff
#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
#define A_SG_EGR_CNTX_BADDR 0x58
#define A_SG_INT_CAUSE 0x5c
#define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U)
#define S_RSPQCREDITOVERFOW 2
#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
#define A_SG_INT_ENABLE 0x60
#define A_SG_CMDQ_CREDIT_TH 0x64
#define S_TIMEOUT 8
#define M_TIMEOUT 0xffffff
#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
#define S_THRESHOLD 0
#define M_THRESHOLD 0xff
#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
#define A_SG_TIMER_TICK 0x68
#define A_SG_CQ_CONTEXT_BADDR 0x6c
#define A_SG_OCO_BASE 0x70
#define S_BASE1 16
#define M_BASE1 0xffff
#define V_BASE1(x) ((x) << S_BASE1)
#define A_SG_DRB_PRI_THRESH 0x74
#define A_PCIX_INT_ENABLE 0x80
#define S_MSIXPARERR 22
#define M_MSIXPARERR 0x7
#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
#define S_CFPARERR 18
#define M_CFPARERR 0xf
#define V_CFPARERR(x) ((x) << S_CFPARERR)
#define S_RFPARERR 14
#define M_RFPARERR 0xf
#define V_RFPARERR(x) ((x) << S_RFPARERR)
#define S_WFPARERR 12
#define M_WFPARERR 0x3
#define V_WFPARERR(x) ((x) << S_WFPARERR)
#define S_PIOPARERR 11
#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
#define F_PIOPARERR V_PIOPARERR(1U)
#define S_DETUNCECCERR 10
#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
#define F_DETUNCECCERR V_DETUNCECCERR(1U)
#define S_DETCORECCERR 9
#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
#define F_DETCORECCERR V_DETCORECCERR(1U)
#define S_RCVSPLCMPERR 8
#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
#define S_UNXSPLCMP 7
#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
#define F_UNXSPLCMP V_UNXSPLCMP(1U)
#define S_SPLCMPDIS 6
#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
#define F_SPLCMPDIS V_SPLCMPDIS(1U)
#define S_DETPARERR 5
#define V_DETPARERR(x) ((x) << S_DETPARERR)
#define F_DETPARERR V_DETPARERR(1U)
#define S_SIGSYSERR 4
#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
#define F_SIGSYSERR V_SIGSYSERR(1U)
#define S_RCVMSTABT 3
#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
#define F_RCVMSTABT V_RCVMSTABT(1U)
#define S_RCVTARABT 2
#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
#define F_RCVTARABT V_RCVTARABT(1U)
#define S_SIGTARABT 1
#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
#define F_SIGTARABT V_SIGTARABT(1U)
#define S_MSTDETPARERR 0
#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
#define F_MSTDETPARERR V_MSTDETPARERR(1U)
#define A_PCIX_INT_CAUSE 0x84
#define A_PCIX_CFG 0x88
#define S_CLIDECEN 18
#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
#define F_CLIDECEN V_CLIDECEN(1U)
#define A_PCIX_MODE 0x8c
#define S_PCLKRANGE 6
#define M_PCLKRANGE 0x3
#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
#define S_PCIXINITPAT 2
#define M_PCIXINITPAT 0xf
#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
#define S_64BIT 0
#define V_64BIT(x) ((x) << S_64BIT)
#define F_64BIT V_64BIT(1U)
#define A_PCIE_INT_ENABLE 0x80
#define S_BISTERR 15
#define M_BISTERR 0xff
#define V_BISTERR(x) ((x) << S_BISTERR)
#define S_PCIE_MSIXPARERR 12
#define M_PCIE_MSIXPARERR 0x7
#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
#define S_PCIE_CFPARERR 11
#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
#define S_PCIE_RFPARERR 10
#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
#define S_PCIE_WFPARERR 9
#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
#define S_PCIE_PIOPARERR 8
#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
#define S_UNXSPLCPLERRC 7
#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
#define S_UNXSPLCPLERRR 6
#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
#define S_PEXERR 0
#define V_PEXERR(x) ((x) << S_PEXERR)
#define F_PEXERR V_PEXERR(1U)
#define A_PCIE_INT_CAUSE 0x84
#define A_PCIE_CFG 0x88
#define S_PCIE_CLIDECEN 16
#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
#define S_CRSTWRMMODE 0
#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
#define A_PCIE_MODE 0x8c
#define S_NUMFSTTRNSEQRX 10
#define M_NUMFSTTRNSEQRX 0xff
#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
#define A_PCIE_PEX_CTRL0 0x98
#define S_NUMFSTTRNSEQ 22
#define M_NUMFSTTRNSEQ 0xff
#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
#define S_REPLAYLMT 2
#define M_REPLAYLMT 0xfffff
#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
#define A_PCIE_PEX_CTRL1 0x9c
#define S_T3A_ACKLAT 0
#define M_T3A_ACKLAT 0x7ff
#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
#define S_ACKLAT 0
#define M_ACKLAT 0x1fff
#define V_ACKLAT(x) ((x) << S_ACKLAT)
#define A_PCIE_PEX_ERR 0xa4
#define A_T3DBG_GPIO_EN 0xd0
#define S_GPIO11_OEN 27
#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
#define F_GPIO11_OEN V_GPIO11_OEN(1U)
#define S_GPIO10_OEN 26
#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
#define F_GPIO10_OEN V_GPIO10_OEN(1U)
#define S_GPIO7_OEN 23
#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
#define F_GPIO7_OEN V_GPIO7_OEN(1U)
#define S_GPIO6_OEN 22
#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
#define F_GPIO6_OEN V_GPIO6_OEN(1U)
#define S_GPIO5_OEN 21
#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
#define F_GPIO5_OEN V_GPIO5_OEN(1U)
#define S_GPIO4_OEN 20
#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
#define F_GPIO4_OEN V_GPIO4_OEN(1U)
#define S_GPIO2_OEN 18
#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
#define F_GPIO2_OEN V_GPIO2_OEN(1U)
#define S_GPIO1_OEN 17
#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
#define F_GPIO1_OEN V_GPIO1_OEN(1U)
#define S_GPIO0_OEN 16
#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
#define F_GPIO0_OEN V_GPIO0_OEN(1U)
#define S_GPIO10_OUT_VAL 10
#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
#define S_GPIO7_OUT_VAL 7
#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
#define S_GPIO6_OUT_VAL 6
#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
#define S_GPIO5_OUT_VAL 5
#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
#define S_GPIO4_OUT_VAL 4
#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
#define S_GPIO2_OUT_VAL 2
#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
#define S_GPIO1_OUT_VAL 1
#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
#define S_GPIO0_OUT_VAL 0
#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
#define A_T3DBG_INT_ENABLE 0xd8
#define S_GPIO11 11
#define V_GPIO11(x) ((x) << S_GPIO11)
#define F_GPIO11 V_GPIO11(1U)
#define S_GPIO10 10
#define V_GPIO10(x) ((x) << S_GPIO10)
#define F_GPIO10 V_GPIO10(1U)
#define S_GPIO7 7
#define V_GPIO7(x) ((x) << S_GPIO7)
#define F_GPIO7 V_GPIO7(1U)
#define S_GPIO6 6
#define V_GPIO6(x) ((x) << S_GPIO6)
#define F_GPIO6 V_GPIO6(1U)
#define S_GPIO5 5
#define V_GPIO5(x) ((x) << S_GPIO5)
#define F_GPIO5 V_GPIO5(1U)
#define S_GPIO4 4
#define V_GPIO4(x) ((x) << S_GPIO4)
#define F_GPIO4 V_GPIO4(1U)
#define S_GPIO3 3
#define V_GPIO3(x) ((x) << S_GPIO3)
#define F_GPIO3 V_GPIO3(1U)
#define S_GPIO2 2
#define V_GPIO2(x) ((x) << S_GPIO2)
#define F_GPIO2 V_GPIO2(1U)
#define S_GPIO1 1
#define V_GPIO1(x) ((x) << S_GPIO1)
#define F_GPIO1 V_GPIO1(1U)
#define S_GPIO0 0
#define V_GPIO0(x) ((x) << S_GPIO0)
#define F_GPIO0 V_GPIO0(1U)
#define A_T3DBG_INT_CAUSE 0xdc
#define A_T3DBG_GPIO_ACT_LOW 0xf0
#define MC7_PMRX_BASE_ADDR 0x100
#define A_MC7_CFG 0x100
#define S_IFEN 13
#define V_IFEN(x) ((x) << S_IFEN)
#define F_IFEN V_IFEN(1U)
#define S_TERM150 11
#define V_TERM150(x) ((x) << S_TERM150)
#define F_TERM150 V_TERM150(1U)
#define S_SLOW 10
#define V_SLOW(x) ((x) << S_SLOW)
#define F_SLOW V_SLOW(1U)
#define S_WIDTH 8
#define M_WIDTH 0x3
#define V_WIDTH(x) ((x) << S_WIDTH)
#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
#define S_BKS 6
#define V_BKS(x) ((x) << S_BKS)
#define F_BKS V_BKS(1U)
#define S_ORG 5
#define V_ORG(x) ((x) << S_ORG)
#define F_ORG V_ORG(1U)
#define S_DEN 2
#define M_DEN 0x7
#define V_DEN(x) ((x) << S_DEN)
#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
#define S_RDY 1
#define V_RDY(x) ((x) << S_RDY)
#define F_RDY V_RDY(1U)
#define S_CLKEN 0
#define V_CLKEN(x) ((x) << S_CLKEN)
#define F_CLKEN V_CLKEN(1U)
#define A_MC7_MODE 0x104
#define S_BUSY 31
#define V_BUSY(x) ((x) << S_BUSY)
#define F_BUSY V_BUSY(1U)
#define S_BUSY 31
#define V_BUSY(x) ((x) << S_BUSY)
#define F_BUSY V_BUSY(1U)
#define A_MC7_EXT_MODE1 0x108
#define A_MC7_EXT_MODE2 0x10c
#define A_MC7_EXT_MODE3 0x110
#define A_MC7_PRE 0x114
#define A_MC7_REF 0x118
#define S_PREREFDIV 1
#define M_PREREFDIV 0x3fff
#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
#define S_PERREFEN 0
#define V_PERREFEN(x) ((x) << S_PERREFEN)
#define F_PERREFEN V_PERREFEN(1U)
#define A_MC7_DLL 0x11c
#define S_DLLENB 1
#define V_DLLENB(x) ((x) << S_DLLENB)
#define F_DLLENB V_DLLENB(1U)
#define S_DLLRST 0
#define V_DLLRST(x) ((x) << S_DLLRST)
#define F_DLLRST V_DLLRST(1U)
#define A_MC7_PARM 0x120
#define S_ACTTOPREDLY 26
#define M_ACTTOPREDLY 0xf
#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
#define S_ACTTORDWRDLY 23
#define M_ACTTORDWRDLY 0x7
#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
#define S_PRECYC 20
#define M_PRECYC 0x7
#define V_PRECYC(x) ((x) << S_PRECYC)
#define S_REFCYC 13
#define M_REFCYC 0x7f
#define V_REFCYC(x) ((x) << S_REFCYC)
#define S_BKCYC 8
#define M_BKCYC 0x1f
#define V_BKCYC(x) ((x) << S_BKCYC)
#define S_WRTORDDLY 4
#define M_WRTORDDLY 0xf
#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
#define S_RDTOWRDLY 0
#define M_RDTOWRDLY 0xf
#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
#define A_MC7_CAL 0x128
#define S_BUSY 31
#define V_BUSY(x) ((x) << S_BUSY)
#define F_BUSY V_BUSY(1U)
#define S_BUSY 31
#define V_BUSY(x) ((x) << S_BUSY)
#define F_BUSY V_BUSY(1U)
#define S_CAL_FAULT 30
#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
#define F_CAL_FAULT V_CAL_FAULT(1U)
#define S_SGL_CAL_EN 20
#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
#define A_MC7_ERR_ADDR 0x12c
#define A_MC7_ECC 0x130
#define S_ECCCHKEN 1
#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
#define F_ECCCHKEN V_ECCCHKEN(1U)
#define S_ECCGENEN 0
#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
#define F_ECCGENEN V_ECCGENEN(1U)
#define A_MC7_CE_ADDR 0x134
#define A_MC7_CE_DATA0 0x138
#define A_MC7_CE_DATA1 0x13c
#define A_MC7_CE_DATA2 0x140
#define S_DATA 0
#define M_DATA 0xff
#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
#define A_MC7_UE_ADDR 0x144
#define A_MC7_UE_DATA0 0x148
#define A_MC7_UE_DATA1 0x14c
#define A_MC7_UE_DATA2 0x150
#define A_MC7_BD_ADDR 0x154
#define S_ADDR 3
#define M_ADDR 0x1fffffff
#define A_MC7_BD_DATA0 0x158
#define A_MC7_BD_DATA1 0x15c
#define A_MC7_BD_OP 0x164
#define S_OP 0
#define V_OP(x) ((x) << S_OP)
#define F_OP V_OP(1U)
#define F_OP V_OP(1U)
#define A_SF_OP 0x6dc
#define A_MC7_BIST_ADDR_BEG 0x168
#define A_MC7_BIST_ADDR_END 0x16c
#define A_MC7_BIST_DATA 0x170
#define A_MC7_BIST_OP 0x174
#define S_CONT 3
#define V_CONT(x) ((x) << S_CONT)
#define F_CONT V_CONT(1U)
#define F_CONT V_CONT(1U)
#define A_MC7_INT_ENABLE 0x178
#define S_AE 17
#define V_AE(x) ((x) << S_AE)
#define F_AE V_AE(1U)
#define S_PE 2
#define M_PE 0x7fff
#define V_PE(x) ((x) << S_PE)
#define G_PE(x) (((x) >> S_PE) & M_PE)
#define S_UE 1
#define V_UE(x) ((x) << S_UE)
#define F_UE V_UE(1U)
#define S_CE 0
#define V_CE(x) ((x) << S_CE)
#define F_CE V_CE(1U)
#define A_MC7_INT_CAUSE 0x17c
#define MC7_PMTX_BASE_ADDR 0x180
#define MC7_CM_BASE_ADDR 0x200
#define A_CIM_BOOT_CFG 0x280
#define S_BOOTADDR 2
#define M_BOOTADDR 0x3fffffff
#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
#define A_CIM_SDRAM_BASE_ADDR 0x28c
#define A_CIM_SDRAM_ADDR_SIZE 0x290
#define A_CIM_HOST_INT_ENABLE 0x298
#define A_CIM_HOST_INT_CAUSE 0x29c
#define S_BLKWRPLINT 12
#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
#define F_BLKWRPLINT V_BLKWRPLINT(1U)
#define S_BLKRDPLINT 11
#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
#define F_BLKRDPLINT V_BLKRDPLINT(1U)
#define S_BLKWRCTLINT 10
#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
#define S_BLKRDCTLINT 9
#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
#define S_BLKWRFLASHINT 8
#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
#define S_BLKRDFLASHINT 7
#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
#define S_SGLWRFLASHINT 6
#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
#define S_WRBLKFLASHINT 5
#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
#define S_BLKWRBOOTINT 4
#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
#define S_FLASHRANGEINT 2
#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
#define S_SDRAMRANGEINT 1
#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
#define S_RSVDSPACEINT 0
#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
#define A_CIM_HOST_ACC_CTRL 0x2b0
#define S_HOSTBUSY 17
#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
#define F_HOSTBUSY V_HOSTBUSY(1U)
#define A_CIM_HOST_ACC_DATA 0x2b4
#define A_TP_IN_CONFIG 0x300
#define S_NICMODE 14
#define V_NICMODE(x) ((x) << S_NICMODE)
#define F_NICMODE V_NICMODE(1U)
#define F_NICMODE V_NICMODE(1U)
#define S_IPV6ENABLE 15
#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
#define F_IPV6ENABLE V_IPV6ENABLE(1U)
#define A_TP_OUT_CONFIG 0x304
#define S_VLANEXTRACTIONENABLE 12
#define A_TP_GLOBAL_CONFIG 0x308
#define S_TXPACINGENABLE 24
#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
#define S_PATHMTU 15
#define V_PATHMTU(x) ((x) << S_PATHMTU)
#define F_PATHMTU V_PATHMTU(1U)
#define S_IPCHECKSUMOFFLOAD 13
#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
#define S_UDPCHECKSUMOFFLOAD 12
#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
#define S_TCPCHECKSUMOFFLOAD 11
#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
#define S_IPTTL 0
#define M_IPTTL 0xff
#define V_IPTTL(x) ((x) << S_IPTTL)
#define A_TP_CMM_MM_BASE 0x314
#define A_TP_CMM_TIMER_BASE 0x318
#define S_CMTIMERMAXNUM 28
#define M_CMTIMERMAXNUM 0x3
#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
#define A_TP_PMM_SIZE 0x31c
#define A_TP_PMM_TX_BASE 0x320
#define A_TP_PMM_RX_BASE 0x328
#define A_TP_PMM_RX_PAGE_SIZE 0x32c
#define A_TP_PMM_RX_MAX_PAGE 0x330
#define A_TP_PMM_TX_PAGE_SIZE 0x334
#define A_TP_PMM_TX_MAX_PAGE 0x338
#define A_TP_TCP_OPTIONS 0x340
#define S_MTUDEFAULT 16
#define M_MTUDEFAULT 0xffff
#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
#define S_MTUENABLE 10
#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
#define F_MTUENABLE V_MTUENABLE(1U)
#define S_SACKRX 8
#define V_SACKRX(x) ((x) << S_SACKRX)
#define F_SACKRX V_SACKRX(1U)
#define S_SACKMODE 4
#define M_SACKMODE 0x3
#define V_SACKMODE(x) ((x) << S_SACKMODE)
#define S_WINDOWSCALEMODE 2
#define M_WINDOWSCALEMODE 0x3
#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
#define S_TIMESTAMPSMODE 0
#define M_TIMESTAMPSMODE 0x3
#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
#define A_TP_DACK_CONFIG 0x344
#define S_AUTOSTATE3 30
#define M_AUTOSTATE3 0x3
#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
#define S_AUTOSTATE2 28
#define M_AUTOSTATE2 0x3
#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
#define S_AUTOSTATE1 26
#define M_AUTOSTATE1 0x3
#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
#define S_BYTETHRESHOLD 5
#define M_BYTETHRESHOLD 0xfffff
#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
#define S_MSSTHRESHOLD 3
#define M_MSSTHRESHOLD 0x3
#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
#define S_AUTOCAREFUL 2
#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
#define S_AUTOENABLE 1
#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
#define F_AUTOENABLE V_AUTOENABLE(1U)
#define S_DACK_MODE 0
#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
#define F_DACK_MODE V_DACK_MODE(1U)
#define A_TP_PC_CONFIG 0x348
#define S_TXTOSQUEUEMAPMODE 26
#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
#define S_ENABLEEPCMDAFULL 23
#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
#define S_MODULATEUNIONMODE 22
#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
#define S_TXDEFERENABLE 20
#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
#define S_RXCONGESTIONMODE 19
#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
#define S_HEARBEATDACK 16
#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
#define F_HEARBEATDACK V_HEARBEATDACK(1U)
#define S_TXCONGESTIONMODE 15
#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
#define S_ENABLEOCSPIFULL 30
#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
#define S_LOCKTID 28
#define V_LOCKTID(x) ((x) << S_LOCKTID)
#define F_LOCKTID V_LOCKTID(1U)
#define A_TP_PC_CONFIG2 0x34c
#define S_CHDRAFULL 4
#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
#define F_CHDRAFULL V_CHDRAFULL(1U)
#define A_TP_TCP_BACKOFF_REG0 0x350
#define A_TP_TCP_BACKOFF_REG1 0x354
#define A_TP_TCP_BACKOFF_REG2 0x358
#define A_TP_TCP_BACKOFF_REG3 0x35c
#define A_TP_PARA_REG2 0x368
#define S_MAXRXDATA 16
#define M_MAXRXDATA 0xffff
#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
#define S_RXCOALESCESIZE 0
#define M_RXCOALESCESIZE 0xffff
#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
#define A_TP_PARA_REG3 0x36c
#define S_TXDATAACKIDX 16
#define M_TXDATAACKIDX 0xf
#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
#define S_TXPACEAUTOSTRICT 10
#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
#define S_TXPACEFIXED 9
#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
#define F_TXPACEFIXED V_TXPACEFIXED(1U)
#define S_TXPACEAUTO 8
#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
#define F_TXPACEAUTO V_TXPACEAUTO(1U)
#define S_RXCOALESCEENABLE 1
#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
#define S_RXCOALESCEPSHEN 0
#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
#define A_TP_PARA_REG4 0x370
#define A_TP_PARA_REG6 0x378
#define S_T3A_ENABLEESND 13
#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
#define S_ENABLEESND 11
#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
#define F_ENABLEESND V_ENABLEESND(1U)
#define A_TP_PARA_REG7 0x37c
#define S_PMMAXXFERLEN1 16
#define M_PMMAXXFERLEN1 0xffff
#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
#define S_PMMAXXFERLEN0 0
#define M_PMMAXXFERLEN0 0xffff
#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
#define A_TP_TIMER_RESOLUTION 0x390
#define S_TIMERRESOLUTION 16
#define M_TIMERRESOLUTION 0xff
#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
#define S_TIMESTAMPRESOLUTION 8
#define M_TIMESTAMPRESOLUTION 0xff
#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
#define S_DELAYEDACKRESOLUTION 0
#define M_DELAYEDACKRESOLUTION 0xff
#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
#define A_TP_MSL 0x394
#define A_TP_RXT_MIN 0x398
#define A_TP_RXT_MAX 0x39c
#define A_TP_PERS_MIN 0x3a0
#define A_TP_PERS_MAX 0x3a4
#define A_TP_KEEP_IDLE 0x3a8
#define A_TP_KEEP_INTVL 0x3ac
#define A_TP_INIT_SRTT 0x3b0
#define A_TP_DACK_TIMER 0x3b4
#define A_TP_FINWAIT2_TIMER 0x3b8
#define A_TP_SHIFT_CNT 0x3c0
#define S_SYNSHIFTMAX 24
#define M_SYNSHIFTMAX 0xff
#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
#define S_RXTSHIFTMAXR1 20
#define M_RXTSHIFTMAXR1 0xf
#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
#define S_RXTSHIFTMAXR2 16
#define M_RXTSHIFTMAXR2 0xf
#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
#define S_PERSHIFTBACKOFFMAX 12
#define M_PERSHIFTBACKOFFMAX 0xf
#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
#define S_PERSHIFTMAX 8
#define M_PERSHIFTMAX 0xf
#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
#define S_KEEPALIVEMAX 0
#define M_KEEPALIVEMAX 0xff
#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
#define A_TP_MTU_PORT_TABLE 0x3d0
#define A_TP_CCTRL_TABLE 0x3dc
#define A_TP_MTU_TABLE 0x3e4
#define A_TP_RSS_MAP_TABLE 0x3e8
#define A_TP_RSS_LKP_TABLE 0x3ec
#define A_TP_RSS_CONFIG 0x3f0
#define S_TNL4TUPEN 29
#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
#define F_TNL4TUPEN V_TNL4TUPEN(1U)
#define S_TNL2TUPEN 28
#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
#define F_TNL2TUPEN V_TNL2TUPEN(1U)
#define S_TNLPRTEN 26
#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
#define F_TNLPRTEN V_TNLPRTEN(1U)
#define S_TNLMAPEN 25
#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
#define F_TNLMAPEN V_TNLMAPEN(1U)
#define S_TNLLKPEN 24
#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
#define F_TNLLKPEN V_TNLLKPEN(1U)
#define S_RRCPLCPUSIZE 4
#define M_RRCPLCPUSIZE 0x7
#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
#define S_RQFEEDBACKENABLE 3
#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
#define S_DISABLE 0
#define A_TP_TM_PIO_ADDR 0x418
#define A_TP_TM_PIO_DATA 0x41c
#define A_TP_TX_MOD_QUE_TABLE 0x420
#define A_TP_TX_RESOURCE_LIMIT 0x424
#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
#define S_TX_MOD_QUEUE_REQ_MAP 0
#define M_TX_MOD_QUEUE_REQ_MAP 0xff
#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
#define A_TP_MOD_CHANNEL_WEIGHT 0x434
#define A_TP_PIO_ADDR 0x440
#define A_TP_PIO_DATA 0x444
#define A_TP_RESET 0x44c
#define S_FLSTINITENABLE 1
#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
#define S_TPRESET 0
#define V_TPRESET(x) ((x) << S_TPRESET)
#define F_TPRESET V_TPRESET(1U)
#define A_TP_CMM_MM_RX_FLST_BASE 0x460
#define A_TP_CMM_MM_TX_FLST_BASE 0x464
#define A_TP_CMM_MM_PS_FLST_BASE 0x468
#define A_TP_MIB_INDEX 0x450
#define A_TP_MIB_RDATA 0x454
#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
#define A_TP_INT_ENABLE 0x470
#define A_TP_INT_CAUSE 0x474
#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
#define A_TP_TX_DROP_CFG_CH0 0x12b
#define A_TP_TX_DROP_MODE 0x12f
#define A_TP_EGRESS_CONFIG 0x145
#define S_REWRITEFORCETOSIZE 0
#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
#define A_TP_TX_TRC_KEY0 0x20
#define A_TP_RX_TRC_KEY0 0x120
#define A_ULPRX_CTL 0x500
#define S_ROUND_ROBIN 4
#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
#define A_ULPRX_INT_ENABLE 0x504
#define S_PARERR 0
#define V_PARERR(x) ((x) << S_PARERR)
#define F_PARERR V_PARERR(1U)
#define A_ULPRX_INT_CAUSE 0x508
#define A_ULPRX_ISCSI_LLIMIT 0x50c
#define A_ULPRX_ISCSI_ULIMIT 0x510
#define A_ULPRX_ISCSI_TAGMASK 0x514
#define A_ULPRX_TDDP_LLIMIT 0x51c
#define A_ULPRX_TDDP_ULIMIT 0x520
#define A_ULPRX_STAG_LLIMIT 0x52c
#define A_ULPRX_STAG_ULIMIT 0x530
#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_ULIMIT 0x538
#define A_ULPRX_RQ_ULIMIT 0x538
#define A_ULPRX_PBL_LLIMIT 0x53c
#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPRX_TDDP_TAGMASK 0x524
#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_ULIMIT 0x538
#define A_ULPRX_RQ_ULIMIT 0x538
#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPTX_CONFIG 0x580
#define S_CFG_RR_ARB 0
#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
#define A_ULPTX_INT_ENABLE 0x584
#define S_PBL_BOUND_ERR_CH1 1
#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
#define S_PBL_BOUND_ERR_CH0 0
#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
#define A_ULPTX_INT_CAUSE 0x588
#define A_ULPTX_TPT_LLIMIT 0x58c
#define A_ULPTX_TPT_ULIMIT 0x590
#define A_ULPTX_PBL_LLIMIT 0x594
#define A_ULPTX_PBL_ULIMIT 0x598
#define A_ULPTX_DMA_WEIGHT 0x5ac
#define S_D1_WEIGHT 16
#define M_D1_WEIGHT 0xffff
#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
#define S_D0_WEIGHT 0
#define M_D0_WEIGHT 0xffff
#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
#define A_PM1_RX_CFG 0x5c0
#define A_PM1_RX_INT_ENABLE 0x5d8
#define S_ZERO_E_CMD_ERROR 18
#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
#define S_IESPI0_RX_FRAMING_ERROR 15
#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
#define S_IESPI1_RX_FRAMING_ERROR 14
#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
#define S_IESPI0_TX_FRAMING_ERROR 13
#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
#define S_IESPI1_TX_FRAMING_ERROR 12
#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
#define S_OCSPI0_RX_FRAMING_ERROR 11
#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
#define S_OCSPI1_RX_FRAMING_ERROR 10
#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
#define S_OCSPI0_TX_FRAMING_ERROR 9
#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
#define S_OCSPI1_TX_FRAMING_ERROR 8
#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
#define S_IESPI_PAR_ERROR 3
#define M_IESPI_PAR_ERROR 0x7
#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
#define S_OCSPI_PAR_ERROR 0
#define M_OCSPI_PAR_ERROR 0x7
#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
#define A_PM1_RX_INT_CAUSE 0x5dc
#define A_PM1_TX_CFG 0x5e0
#define A_PM1_TX_INT_ENABLE 0x5f8
#define S_ZERO_C_CMD_ERROR 18
#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
#define S_ICSPI0_RX_FRAMING_ERROR 15
#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
#define S_ICSPI1_RX_FRAMING_ERROR 14
#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
#define S_ICSPI0_TX_FRAMING_ERROR 13
#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
#define S_ICSPI1_TX_FRAMING_ERROR 12
#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
#define S_OESPI0_RX_FRAMING_ERROR 11
#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
#define S_OESPI1_RX_FRAMING_ERROR 10
#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
#define S_OESPI0_TX_FRAMING_ERROR 9
#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
#define S_OESPI1_TX_FRAMING_ERROR 8
#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
#define S_ICSPI_PAR_ERROR 3
#define M_ICSPI_PAR_ERROR 0x7
#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
#define S_OESPI_PAR_ERROR 0
#define M_OESPI_PAR_ERROR 0x7
#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
#define A_PM1_TX_INT_CAUSE 0x5fc
#define A_MPS_CFG 0x600
#define S_TPRXPORTEN 4
#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
#define F_TPRXPORTEN V_TPRXPORTEN(1U)
#define S_TPTXPORT1EN 3
#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
#define S_TPTXPORT0EN 2
#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
#define S_PORT1ACTIVE 1
#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
#define S_PORT0ACTIVE 0
#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
#define S_ENFORCEPKT 11
#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
#define F_ENFORCEPKT V_ENFORCEPKT(1U)
#define A_MPS_INT_ENABLE 0x61c
#define S_MCAPARERRENB 6
#define M_MCAPARERRENB 0x7
#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
#define S_RXTPPARERRENB 4
#define M_RXTPPARERRENB 0x3
#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
#define S_TX1TPPARERRENB 2
#define M_TX1TPPARERRENB 0x3
#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
#define S_TX0TPPARERRENB 0
#define M_TX0TPPARERRENB 0x3
#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
#define A_MPS_INT_CAUSE 0x620
#define S_MCAPARERR 6
#define M_MCAPARERR 0x7
#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
#define S_RXTPPARERR 4
#define M_RXTPPARERR 0x3
#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
#define S_TX1TPPARERR 2
#define M_TX1TPPARERR 0x3
#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
#define S_TX0TPPARERR 0
#define M_TX0TPPARERR 0x3
#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
#define A_CPL_SWITCH_CNTRL 0x640
#define A_CPL_INTR_ENABLE 0x650
#define S_CIM_OVFL_ERROR 4
#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
#define S_TP_FRAMING_ERROR 3
#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
#define S_SGE_FRAMING_ERROR 2
#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
#define S_CIM_FRAMING_ERROR 1
#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
#define S_ZERO_SWITCH_ERROR 0
#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
#define A_CPL_INTR_CAUSE 0x654
#define A_CPL_MAP_TBL_DATA 0x65c
#define A_SMB_GLOBAL_TIME_CFG 0x660
#define A_I2C_CFG 0x6a0
#define S_I2C_CLKDIV 0
#define M_I2C_CLKDIV 0xfff
#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
#define A_MI1_CFG 0x6b0
#define S_CLKDIV 5
#define M_CLKDIV 0xff
#define V_CLKDIV(x) ((x) << S_CLKDIV)
#define S_ST 3
#define M_ST 0x3
#define V_ST(x) ((x) << S_ST)
#define G_ST(x) (((x) >> S_ST) & M_ST)
#define S_PREEN 2
#define V_PREEN(x) ((x) << S_PREEN)
#define F_PREEN V_PREEN(1U)
#define S_MDIINV 1
#define V_MDIINV(x) ((x) << S_MDIINV)
#define F_MDIINV V_MDIINV(1U)
#define S_MDIEN 0
#define V_MDIEN(x) ((x) << S_MDIEN)
#define F_MDIEN V_MDIEN(1U)
#define A_MI1_ADDR 0x6b4
#define S_PHYADDR 5
#define M_PHYADDR 0x1f
#define V_PHYADDR(x) ((x) << S_PHYADDR)
#define S_REGADDR 0
#define M_REGADDR 0x1f
#define V_REGADDR(x) ((x) << S_REGADDR)
#define A_MI1_DATA 0x6b8
#define A_MI1_OP 0x6bc
#define S_MDI_OP 0
#define M_MDI_OP 0x3
#define V_MDI_OP(x) ((x) << S_MDI_OP)
#define A_SF_DATA 0x6d8
#define A_SF_OP 0x6dc
#define S_BYTECNT 1
#define M_BYTECNT 0x3
#define V_BYTECNT(x) ((x) << S_BYTECNT)
#define A_PL_INT_ENABLE0 0x6e0
#define S_T3DBG 23
#define V_T3DBG(x) ((x) << S_T3DBG)
#define F_T3DBG V_T3DBG(1U)
#define S_XGMAC0_1 20
#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
#define F_XGMAC0_1 V_XGMAC0_1(1U)
#define S_XGMAC0_0 19
#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
#define F_XGMAC0_0 V_XGMAC0_0(1U)
#define S_MC5A 18
#define V_MC5A(x) ((x) << S_MC5A)
#define F_MC5A V_MC5A(1U)
#define S_CPL_SWITCH 12
#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
#define F_CPL_SWITCH V_CPL_SWITCH(1U)
#define S_MPS0 11
#define V_MPS0(x) ((x) << S_MPS0)
#define F_MPS0 V_MPS0(1U)
#define S_PM1_TX 10
#define V_PM1_TX(x) ((x) << S_PM1_TX)
#define F_PM1_TX V_PM1_TX(1U)
#define S_PM1_RX 9
#define V_PM1_RX(x) ((x) << S_PM1_RX)
#define F_PM1_RX V_PM1_RX(1U)
#define S_ULP2_TX 8
#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
#define F_ULP2_TX V_ULP2_TX(1U)
#define S_ULP2_RX 7
#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
#define F_ULP2_RX V_ULP2_RX(1U)
#define S_TP1 6
#define V_TP1(x) ((x) << S_TP1)
#define F_TP1 V_TP1(1U)
#define S_CIM 5
#define V_CIM(x) ((x) << S_CIM)
#define F_CIM V_CIM(1U)
#define S_MC7_CM 4
#define V_MC7_CM(x) ((x) << S_MC7_CM)
#define F_MC7_CM V_MC7_CM(1U)
#define S_MC7_PMTX 3
#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
#define F_MC7_PMTX V_MC7_PMTX(1U)
#define S_MC7_PMRX 2
#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
#define F_MC7_PMRX V_MC7_PMRX(1U)
#define S_PCIM0 1
#define V_PCIM0(x) ((x) << S_PCIM0)
#define F_PCIM0 V_PCIM0(1U)
#define S_SGE3 0
#define V_SGE3(x) ((x) << S_SGE3)
#define F_SGE3 V_SGE3(1U)
#define A_PL_INT_CAUSE0 0x6e4
#define A_PL_RST 0x6f0
#define S_CRSTWRM 1
#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
#define F_CRSTWRM V_CRSTWRM(1U)
#define A_PL_REV 0x6f4
#define A_PL_CLI 0x6f8
#define A_MC5_DB_CONFIG 0x704
#define S_TMTYPEHI 30
#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
#define F_TMTYPEHI V_TMTYPEHI(1U)
#define S_TMPARTSIZE 28
#define M_TMPARTSIZE 0x3
#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
#define S_TMTYPE 26
#define M_TMTYPE 0x3
#define V_TMTYPE(x) ((x) << S_TMTYPE)
#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
#define S_COMPEN 17
#define V_COMPEN(x) ((x) << S_COMPEN)
#define F_COMPEN V_COMPEN(1U)
#define S_PRTYEN 6
#define V_PRTYEN(x) ((x) << S_PRTYEN)
#define F_PRTYEN V_PRTYEN(1U)
#define S_MBUSEN 5
#define V_MBUSEN(x) ((x) << S_MBUSEN)
#define F_MBUSEN V_MBUSEN(1U)
#define S_DBGIEN 4
#define V_DBGIEN(x) ((x) << S_DBGIEN)
#define F_DBGIEN V_DBGIEN(1U)
#define S_TMRDY 2
#define V_TMRDY(x) ((x) << S_TMRDY)
#define F_TMRDY V_TMRDY(1U)
#define S_TMRST 1
#define V_TMRST(x) ((x) << S_TMRST)
#define F_TMRST V_TMRST(1U)
#define S_TMMODE 0
#define V_TMMODE(x) ((x) << S_TMMODE)
#define F_TMMODE V_TMMODE(1U)
#define F_TMMODE V_TMMODE(1U)
#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
#define A_MC5_DB_FILTER_TABLE 0x710
#define A_MC5_DB_SERVER_INDEX 0x714
#define A_MC5_DB_RSP_LATENCY 0x720
#define S_RDLAT 16
#define M_RDLAT 0x1f
#define V_RDLAT(x) ((x) << S_RDLAT)
#define S_LRNLAT 8
#define M_LRNLAT 0x1f
#define V_LRNLAT(x) ((x) << S_LRNLAT)
#define S_SRCHLAT 0
#define M_SRCHLAT 0x1f
#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
#define A_MC5_DB_PART_ID_INDEX 0x72c
#define A_MC5_DB_INT_ENABLE 0x740
#define S_DELACTEMPTY 18
#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
#define F_DELACTEMPTY V_DELACTEMPTY(1U)
#define S_DISPQPARERR 17
#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
#define F_DISPQPARERR V_DISPQPARERR(1U)
#define S_REQQPARERR 16
#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
#define F_REQQPARERR V_REQQPARERR(1U)
#define S_UNKNOWNCMD 15
#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
#define S_NFASRCHFAIL 8
#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
#define S_ACTRGNFULL 7
#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
#define F_ACTRGNFULL V_ACTRGNFULL(1U)
#define S_PARITYERR 6
#define V_PARITYERR(x) ((x) << S_PARITYERR)
#define F_PARITYERR V_PARITYERR(1U)
#define A_MC5_DB_INT_CAUSE 0x744
#define A_MC5_DB_DBGI_CONFIG 0x774
#define A_MC5_DB_DBGI_REQ_CMD 0x778
#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
#define A_MC5_DB_DBGI_REQ_DATA0 0x788
#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
#define A_MC5_DB_DBGI_REQ_DATA2 0x790
#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
#define S_DBGIRSPVALID 0
#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
#define A_MC5_DB_SYN_LRN_CMD 0x7e0
#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
#define A_MC5_DB_ACK_LRN_CMD 0x7e8
#define A_MC5_DB_ILOOKUP_CMD 0x7ec
#define A_MC5_DB_ELOOKUP_CMD 0x7f0
#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
#define A_MC5_DB_DATA_READ_CMD 0x7f8
#define XGMAC0_0_BASE_ADDR 0x800
#define A_XGM_TX_CTRL 0x800
#define S_TXEN 0
#define V_TXEN(x) ((x) << S_TXEN)
#define F_TXEN V_TXEN(1U)
#define A_XGM_TX_CFG 0x804
#define S_TXPAUSEEN 0
#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
#define F_TXPAUSEEN V_TXPAUSEEN(1U)
#define A_XGM_RX_CTRL 0x80c
#define S_RXEN 0
#define V_RXEN(x) ((x) << S_RXEN)
#define F_RXEN V_RXEN(1U)
#define A_XGM_RX_CFG 0x810
#define S_DISPAUSEFRAMES 9
#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
#define S_EN1536BFRAMES 8
#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
#define S_ENJUMBO 7
#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
#define F_ENJUMBO V_ENJUMBO(1U)
#define S_RMFCS 6
#define V_RMFCS(x) ((x) << S_RMFCS)
#define F_RMFCS V_RMFCS(1U)
#define S_ENHASHMCAST 2
#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
#define F_ENHASHMCAST V_ENHASHMCAST(1U)
#define S_COPYALLFRAMES 0
#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
#define A_XGM_RX_HASH_LOW 0x814
#define A_XGM_RX_HASH_HIGH 0x818
#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
#define A_XGM_STAT_CTRL 0x880
#define S_CLRSTATS 2
#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
#define F_CLRSTATS V_CLRSTATS(1U)
#define A_XGM_RXFIFO_CFG 0x884
#define S_RXFIFOPAUSEHWM 17
#define M_RXFIFOPAUSEHWM 0xfff
#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
#define S_RXFIFOPAUSELWM 5
#define M_RXFIFOPAUSELWM 0xfff
#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
#define S_RXSTRFRWRD 1
#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
#define S_DISERRFRAMES 0
#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
#define F_DISERRFRAMES V_DISERRFRAMES(1U)
#define A_XGM_TXFIFO_CFG 0x888
#define S_TXFIFOTHRESH 4
#define M_TXFIFOTHRESH 0x1ff
#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
#define A_XGM_SERDES_CTRL 0x890
#define A_XGM_SERDES_CTRL0 0x8e0
#define S_SERDESRESET_ 24
#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
#define F_SERDESRESET_ V_SERDESRESET_(1U)
#define S_RXENABLE 4
#define V_RXENABLE(x) ((x) << S_RXENABLE)
#define F_RXENABLE V_RXENABLE(1U)
#define S_TXENABLE 3
#define V_TXENABLE(x) ((x) << S_TXENABLE)
#define F_TXENABLE V_TXENABLE(1U)
#define A_XGM_PAUSE_TIMER 0x890
#define A_XGM_RGMII_IMP 0x89c
#define S_XGM_IMPSETUPDATE 6
#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
#define S_RGMIIIMPPD 3
#define M_RGMIIIMPPD 0x7
#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
#define S_RGMIIIMPPU 0
#define M_RGMIIIMPPU 0x7
#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
#define S_CALRESET 8
#define V_CALRESET(x) ((x) << S_CALRESET)
#define F_CALRESET V_CALRESET(1U)
#define S_CALUPDATE 7
#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
#define F_CALUPDATE V_CALUPDATE(1U)
#define A_XGM_XAUI_IMP 0x8a0
#define S_CALBUSY 31
#define V_CALBUSY(x) ((x) << S_CALBUSY)
#define F_CALBUSY V_CALBUSY(1U)
#define S_XGM_CALFAULT 29
#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
#define S_CALIMP 24
#define M_CALIMP 0x1f
#define V_CALIMP(x) ((x) << S_CALIMP)
#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
#define S_XAUIIMP 0
#define M_XAUIIMP 0x7
#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
#define A_XGM_RESET_CTRL 0x8ac
#define S_XG2G_RESET_ 3
#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
#define S_RGMII_RESET_ 2
#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
#define S_PCS_RESET_ 1
#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
#define F_PCS_RESET_ V_PCS_RESET_(1U)
#define S_MAC_RESET_ 0
#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
#define F_MAC_RESET_ V_MAC_RESET_(1U)
#define A_XGM_PORT_CFG 0x8b8
#define S_CLKDIVRESET_ 3
#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
#define S_PORTSPEED 1
#define M_PORTSPEED 0x3
#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
#define S_ENRGMII 0
#define V_ENRGMII(x) ((x) << S_ENRGMII)
#define F_ENRGMII V_ENRGMII(1U)
#define A_XGM_INT_ENABLE 0x8d4
#define S_TXFIFO_PRTY_ERR 17
#define M_TXFIFO_PRTY_ERR 0x7
#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
#define S_RXFIFO_PRTY_ERR 14
#define M_RXFIFO_PRTY_ERR 0x7
#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
#define S_TXFIFO_UNDERRUN 13
#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
#define S_RXFIFO_OVERFLOW 12
#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
#define S_SERDES_LOS 4
#define M_SERDES_LOS 0xf
#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
#define S_XAUIPCSCTCERR 3
#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
#define S_XAUIPCSALIGNCHANGE 2
#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
#define A_XGM_INT_CAUSE 0x8d8
#define A_XGM_XAUI_ACT_CTRL 0x8dc
#define S_TXACTENABLE 1
#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
#define F_TXACTENABLE V_TXACTENABLE(1U)
#define A_XGM_SERDES_CTRL0 0x8e0
#define S_RESET3 23
#define V_RESET3(x) ((x) << S_RESET3)
#define F_RESET3 V_RESET3(1U)
#define S_RESET2 22
#define V_RESET2(x) ((x) << S_RESET2)
#define F_RESET2 V_RESET2(1U)
#define S_RESET1 21
#define V_RESET1(x) ((x) << S_RESET1)
#define F_RESET1 V_RESET1(1U)
#define S_RESET0 20
#define V_RESET0(x) ((x) << S_RESET0)
#define F_RESET0 V_RESET0(1U)
#define S_PWRDN3 19
#define V_PWRDN3(x) ((x) << S_PWRDN3)
#define F_PWRDN3 V_PWRDN3(1U)
#define S_PWRDN2 18
#define V_PWRDN2(x) ((x) << S_PWRDN2)
#define F_PWRDN2 V_PWRDN2(1U)
#define S_PWRDN1 17
#define V_PWRDN1(x) ((x) << S_PWRDN1)
#define F_PWRDN1 V_PWRDN1(1U)
#define S_PWRDN0 16
#define V_PWRDN0(x) ((x) << S_PWRDN0)
#define F_PWRDN0 V_PWRDN0(1U)
#define S_RESETPLL23 15
#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
#define F_RESETPLL23 V_RESETPLL23(1U)
#define S_RESETPLL01 14
#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
#define F_RESETPLL01 V_RESETPLL01(1U)
#define A_XGM_SERDES_STAT0 0x8f0
#define S_LOWSIG0 0
#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
#define F_LOWSIG0 V_LOWSIG0(1U)
#define A_XGM_SERDES_STAT3 0x8fc
#define A_XGM_STAT_TX_BYTE_LOW 0x900
#define A_XGM_STAT_TX_BYTE_HIGH 0x904
#define A_XGM_STAT_TX_FRAME_LOW 0x908
#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
#define A_XGM_STAT_TX_BCAST 0x910
#define A_XGM_STAT_TX_MCAST 0x914
#define A_XGM_STAT_TX_PAUSE 0x918
#define A_XGM_STAT_TX_64B_FRAMES 0x91c
#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
#define A_XGM_STAT_TX_ERR_FRAMES 0x938
#define A_XGM_STAT_RX_BYTES_LOW 0x93c
#define A_XGM_STAT_RX_BYTES_HIGH 0x940
#define A_XGM_STAT_RX_FRAMES_LOW 0x944
#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
#define A_XGM_STAT_RX_64B_FRAMES 0x958
#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
#define A_XGM_SERDES_STATUS0 0x98c
#define A_XGM_SERDES_STATUS1 0x990
#define S_CMULOCK 31
#define V_CMULOCK(x) ((x) << S_CMULOCK)
#define F_CMULOCK V_CMULOCK(1U)
#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
#define XGMAC0_1_BASE_ADDR 0xa00
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/dma-mapping.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
#define USE_GTS 0
#define SGE_RX_SM_BUF_SIZE 1536
#define SGE_RX_COPY_THRES 256
# define SGE_RX_DROP_THRES 16
/*
* Period of the Tx buffer reclaim timer. This timer does not need to run
* frequently as Tx buffers are usually reclaimed by new Tx packets.
*/
#define TX_RECLAIM_PERIOD (HZ / 4)
/* WR size in bytes */
#define WR_LEN (WR_FLITS * 8)
/*
* Types of Tx queues in each queue set. Order here matters, do not change.
*/
enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
/* Values for sge_txq.flags */
enum {
TXQ_RUNNING = 1 << 0, /* fetch engine is running */
TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
};
struct tx_desc {
u64 flit[TX_DESC_FLITS];
};
struct rx_desc {
__be32 addr_lo;
__be32 len_gen;
__be32 gen2;
__be32 addr_hi;
};
struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
};
struct rx_sw_desc { /* SW state per Rx descriptor */
struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(dma_addr);
};
struct rsp_desc { /* response queue descriptor */
struct rss_header rss_hdr;
__be32 flags;
__be32 len_cq;
u8 imm_data[47];
u8 intr_gen;
};
struct unmap_info { /* packet unmapping info, overlays skb->cb */
int sflit; /* start flit of first SGL entry in Tx descriptor */
u16 fragidx; /* first page fragment in current Tx descriptor */
u16 addr_idx; /* buffer index of first SGL entry in descriptor */
u32 len; /* mapped length of skb main body */
};
/*
* Maps a number of flits to the number of Tx descriptors that can hold them.
* The formula is
*
* desc = 1 + (flits - 2) / (WR_FLITS - 1).
*
* HW allows up to 4 descriptors to be combined into a WR.
*/
static u8 flit_desc_map[] = {
0,
#if SGE_NUM_GENBITS == 1
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
#elif SGE_NUM_GENBITS == 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
#else
# error "SGE_NUM_GENBITS must be 1 or 2"
#endif
};
static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
{
return container_of(q, struct sge_qset, fl[qidx]);
}
static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
{
return container_of(q, struct sge_qset, rspq);
}
static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
{
return container_of(q, struct sge_qset, txq[qidx]);
}
/**
* refill_rspq - replenish an SGE response queue
* @adapter: the adapter
* @q: the response queue to replenish
* @credits: how many new responses to make available
*
* Replenishes a response queue by making the supplied number of responses
* available to HW.
*/
static inline void refill_rspq(struct adapter *adapter,
const struct sge_rspq *q, unsigned int credits)
{
t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
}
/**
* need_skb_unmap - does the platform need unmapping of sk_buffs?
*
* Returns true if the platfrom needs sk_buff unmapping. The compiler
* optimizes away unecessary code if this returns true.
*/
static inline int need_skb_unmap(void)
{
/*
* This structure is used to tell if the platfrom needs buffer
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
*/
struct dummy {
DECLARE_PCI_UNMAP_ADDR(addr);
};
return sizeof(struct dummy) != 0;
}
/**
* unmap_skb - unmap a packet main body and its page fragments
* @skb: the packet
* @q: the Tx queue containing Tx descriptors for the packet
* @cidx: index of Tx descriptor
* @pdev: the PCI device
*
* Unmap the main body of an sk_buff and its page fragments, if any.
* Because of the fairly complicated structure of our SGLs and the desire
* to conserve space for metadata, we keep the information necessary to
* unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
* in the Tx descriptors (the physical addresses of the various data
* buffers). The send functions initialize the state in skb->cb so we
* can unmap the buffers held in the first Tx descriptor here, and we
* have enough information at this point to update the state for the next
* Tx descriptor.
*/
static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
unsigned int cidx, struct pci_dev *pdev)
{
const struct sg_ent *sgp;
struct unmap_info *ui = (struct unmap_info *)skb->cb;
int nfrags, frag_idx, curflit, j = ui->addr_idx;
sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
if (ui->len) {
pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
PCI_DMA_TODEVICE);
ui->len = 0; /* so we know for next descriptor for this skb */
j = 1;
}
frag_idx = ui->fragidx;
curflit = ui->sflit + 1 + j;
nfrags = skb_shinfo(skb)->nr_frags;
while (frag_idx < nfrags && curflit < WR_FLITS) {
pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
skb_shinfo(skb)->frags[frag_idx].size,
PCI_DMA_TODEVICE);
j ^= 1;
if (j == 0) {
sgp++;
curflit++;
}
curflit++;
frag_idx++;
}
if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
ui->fragidx = frag_idx;
ui->addr_idx = j;
ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
}
}
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
* @adapter: the adapter
* @q: the Tx queue to reclaim descriptors from
* @n: the number of descriptors to reclaim
*
* Reclaims Tx descriptors from an SGE Tx queue and frees the associated
* Tx buffers. Called with the Tx queue lock held.
*/
static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
unsigned int n)
{
struct tx_sw_desc *d;
struct pci_dev *pdev = adapter->pdev;
unsigned int cidx = q->cidx;
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
if (need_skb_unmap())
unmap_skb(d->skb, q, cidx, pdev);
if (d->skb->priority == cidx)
kfree_skb(d->skb);
}
++d;
if (++cidx == q->size) {
cidx = 0;
d = q->sdesc;
}
}
q->cidx = cidx;
}
/**
* reclaim_completed_tx - reclaims completed Tx descriptors
* @adapter: the adapter
* @q: the Tx queue to reclaim completed descriptors from
*
* Reclaims Tx descriptors that the SGE has indicated it has processed,
* and frees the associated buffers if possible. Called with the Tx
* queue's lock held.
*/
static inline void reclaim_completed_tx(struct adapter *adapter,
struct sge_txq *q)
{
unsigned int reclaim = q->processed - q->cleaned;
if (reclaim) {
free_tx_desc(adapter, q, reclaim);
q->cleaned += reclaim;
q->in_use -= reclaim;
}
}
/**
* should_restart_tx - are there enough resources to restart a Tx queue?
* @q: the Tx queue
*
* Checks if there are enough descriptors to restart a suspended Tx queue.
*/
static inline int should_restart_tx(const struct sge_txq *q)
{
unsigned int r = q->processed - q->cleaned;
return q->in_use - r < (q->size >> 1);
}
/**
* free_rx_bufs - free the Rx buffers on an SGE free list
* @pdev: the PCI device associated with the adapter
* @rxq: the SGE free list to clean up
*
* Release the buffers on an SGE free-buffer Rx queue. HW fetching from
* this queue should be stopped before calling this function.
*/
static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
{
unsigned int cidx = q->cidx;
while (q->credits--) {
struct rx_sw_desc *d = &q->sdesc[cidx];
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
kfree_skb(d->skb);
d->skb = NULL;
if (++cidx == q->size)
cidx = 0;
}
}
/**
* add_one_rx_buf - add a packet buffer to a free-buffer list
* @skb: the buffer to add
* @len: the buffer length
* @d: the HW Rx descriptor to write
* @sd: the SW Rx descriptor to write
* @gen: the generation bit value
* @pdev: the PCI device associated with the adapter
*
* Add a buffer of the given length to the supplied HW and SW Rx
* descriptors.
*/
static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
struct rx_desc *d, struct rx_sw_desc *sd,
unsigned int gen, struct pci_dev *pdev)
{
dma_addr_t mapping;
sd->skb = skb;
mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
pci_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
wmb();
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
}
/**
* refill_fl - refill an SGE free-buffer list
* @adapter: the adapter
* @q: the free-list to refill
* @n: the number of new buffers to allocate
* @gfp: the gfp flags for allocating new buffers
*
* (Re)populate an SGE free-buffer list with up to @n new packet buffers,
* allocated with the supplied gfp flags. The caller must assure that
* @n does not exceed the queue's capacity.
*/
static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
{
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
while (n--) {
struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
if (!skb)
break;
add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
d++;
sd++;
if (++q->pidx == q->size) {
q->pidx = 0;
q->gen ^= 1;
sd = q->sdesc;
d = q->desc;
}
q->credits++;
}
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
{
refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
}
/**
* recycle_rx_buf - recycle a receive buffer
* @adapter: the adapter
* @q: the SGE free list
* @idx: index of buffer to recycle
*
* Recycles the specified buffer on the given free list by adding it at
* the next available slot on the list.
*/
static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
unsigned int idx)
{
struct rx_desc *from = &q->desc[idx];
struct rx_desc *to = &q->desc[q->pidx];
q->sdesc[q->pidx] = q->sdesc[idx];
to->addr_lo = from->addr_lo; /* already big endian */
to->addr_hi = from->addr_hi; /* likewise */
wmb();
to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
q->credits++;
if (++q->pidx == q->size) {
q->pidx = 0;
q->gen ^= 1;
}
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
/**
* alloc_ring - allocate resources for an SGE descriptor ring
* @pdev: the PCI device
* @nelem: the number of descriptors
* @elem_size: the size of each descriptor
* @sw_size: the size of the SW state associated with each ring element
* @phys: the physical address of the allocated ring
* @metadata: address of the array holding the SW state for the ring
*
* Allocates resources for an SGE descriptor ring, such as Tx queues,
* free buffer lists, or response queues. Each SGE ring requires
* space for its HW descriptors plus, optionally, space for the SW state
* associated with each HW entry (the metadata). The function returns
* three values: the virtual address for the HW ring (the return value
* of the function), the physical address of the HW ring, and the address
* of the SW ring.
*/
static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
size_t sw_size, dma_addr_t *phys, void *metadata)
{
size_t len = nelem * elem_size;
void *s = NULL;
void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
if (sw_size) {
s = kcalloc(nelem, sw_size, GFP_KERNEL);
if (!s) {
dma_free_coherent(&pdev->dev, len, p, *phys);
return NULL;
}
}
if (metadata)
*(void **)metadata = s;
memset(p, 0, len);
return p;
}
/**
* free_qset - free the resources of an SGE queue set
* @adapter: the adapter owning the queue set
* @q: the queue set
*
* Release the HW and SW resources associated with an SGE queue set, such
* as HW contexts, packet buffers, and descriptor rings. Traffic to the
* queue set must be quiesced prior to calling this.
*/
void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
{
int i;
struct pci_dev *pdev = adapter->pdev;
if (q->tx_reclaim_timer.function)
del_timer_sync(&q->tx_reclaim_timer);
for (i = 0; i < SGE_RXQ_PER_SET; ++i)
if (q->fl[i].desc) {
spin_lock(&adapter->sge.reg_lock);
t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
spin_unlock(&adapter->sge.reg_lock);
free_rx_bufs(pdev, &q->fl[i]);
kfree(q->fl[i].sdesc);
dma_free_coherent(&pdev->dev,
q->fl[i].size *
sizeof(struct rx_desc), q->fl[i].desc,
q->fl[i].phys_addr);
}
for (i = 0; i < SGE_TXQ_PER_SET; ++i)
if (q->txq[i].desc) {
spin_lock(&adapter->sge.reg_lock);
t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
spin_unlock(&adapter->sge.reg_lock);
if (q->txq[i].sdesc) {
free_tx_desc(adapter, &q->txq[i],
q->txq[i].in_use);
kfree(q->txq[i].sdesc);
}
dma_free_coherent(&pdev->dev,
q->txq[i].size *
sizeof(struct tx_desc),
q->txq[i].desc, q->txq[i].phys_addr);
__skb_queue_purge(&q->txq[i].sendq);
}
if (q->rspq.desc) {
spin_lock(&adapter->sge.reg_lock);
t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
spin_unlock(&adapter->sge.reg_lock);
dma_free_coherent(&pdev->dev,
q->rspq.size * sizeof(struct rsp_desc),
q->rspq.desc, q->rspq.phys_addr);
}
if (q->netdev)
q->netdev->atalk_ptr = NULL;
memset(q, 0, sizeof(*q));
}
/**
* init_qset_cntxt - initialize an SGE queue set context info
* @qs: the queue set
* @id: the queue set id
*
* Initializes the TIDs and context ids for the queues of a queue set.
*/
static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
{
qs->rspq.cntxt_id = id;
qs->fl[0].cntxt_id = 2 * id;
qs->fl[1].cntxt_id = 2 * id + 1;
qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
}
/**
* sgl_len - calculates the size of an SGL of the given capacity
* @n: the number of SGL entries
*
* Calculates the number of flits needed for a scatter/gather list that
* can hold the given number of entries.
*/
static inline unsigned int sgl_len(unsigned int n)
{
/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
return (3 * n) / 2 + (n & 1);
}
/**
* flits_to_desc - returns the num of Tx descriptors for the given flits
* @n: the number of flits
*
* Calculates the number of Tx descriptors needed for the supplied number
* of flits.
*/
static inline unsigned int flits_to_desc(unsigned int n)
{
BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
return flit_desc_map[n];
}
/**
* get_packet - return the next ingress packet buffer from a free list
* @adap: the adapter that received the packet
* @fl: the SGE free list holding the packet
* @len: the packet length including any SGE padding
* @drop_thres: # of remaining buffers before we start dropping packets
*
* Get the next packet from a free list and complete setup of the
* sk_buff. If the packet is small we make a copy and recycle the
* original buffer, otherwise we use the original buffer itself. If a
* positive drop threshold is supplied packets are dropped and their
* buffers recycled if (a) the number of remaining buffers is under the
* threshold and the packet is too big to copy, or (b) the packet should
* be copied but there is no memory for the copy.
*/
static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
unsigned int len, unsigned int drop_thres)
{
struct sk_buff *skb = NULL;
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
prefetch(sd->skb->data);
if (len <= SGE_RX_COPY_THRES) {
skb = alloc_skb(len, GFP_ATOMIC);
if (likely(skb != NULL)) {
__skb_put(skb, len);
pci_dma_sync_single_for_cpu(adap->pdev,
pci_unmap_addr(sd,
dma_addr),
len, PCI_DMA_FROMDEVICE);
memcpy(skb->data, sd->skb->data, len);
pci_dma_sync_single_for_device(adap->pdev,
pci_unmap_addr(sd,
dma_addr),
len, PCI_DMA_FROMDEVICE);
} else if (!drop_thres)
goto use_orig_buf;
recycle:
recycle_rx_buf(adap, fl, fl->cidx);
return skb;
}
if (unlikely(fl->credits < drop_thres))
goto recycle;
use_orig_buf:
pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
skb = sd->skb;
skb_put(skb, len);
__refill_fl(adap, fl);
return skb;
}
/**
* get_imm_packet - return the next ingress packet buffer from a response
* @resp: the response descriptor containing the packet data
*
* Return a packet containing the immediate data of the given response.
*/
static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
{
struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
}
return skb;
}
/**
* calc_tx_descs - calculate the number of Tx descriptors for a packet
* @skb: the packet
*
* Returns the number of Tx descriptors needed for the given Ethernet
* packet. Ethernet packets require addition of WR and CPL headers.
*/
static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
{
unsigned int flits;
if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
return 1;
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
if (skb_shinfo(skb)->gso_size)
flits++;
return flits_to_desc(flits);
}
/**
* make_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @sgp: the SGL to populate
* @start: start address of skb main body data to include in the SGL
* @len: length of skb main body data to include in the SGL
* @pdev: the PCI device
*
* Generates a scatter/gather list for the buffers that make up a packet
* and returns the SGL size in 8-byte words. The caller must size the SGL
* appropriately.
*/
static inline unsigned int make_sgl(const struct sk_buff *skb,
struct sg_ent *sgp, unsigned char *start,
unsigned int len, struct pci_dev *pdev)
{
dma_addr_t mapping;
unsigned int i, j = 0, nfrags;
if (len) {
mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
sgp->len[0] = cpu_to_be32(len);
sgp->addr[0] = cpu_to_be64(mapping);
j = 1;
}
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = pci_map_page(pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
sgp->len[j] = cpu_to_be32(frag->size);
sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;
if (j == 0)
++sgp;
}
if (j)
sgp->len[j] = 0;
return ((nfrags + (len != 0)) * 3) / 2 + j;
}
/**
* check_ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
* @q: the Tx queue
*
* Ring the doorbel if a Tx queue is asleep. There is a natural race,
* where the HW is going to sleep just after we checked, however,
* then the interrupt handler will detect the outstanding TX packet
* and ring the doorbell for us.
*
* When GTS is disabled we unconditionally ring the doorbell.
*/
static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
{
#if USE_GTS
clear_bit(TXQ_LAST_PKT_DB, &q->flags);
if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
set_bit(TXQ_LAST_PKT_DB, &q->flags);
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
#else
wmb(); /* write descriptors before telling HW */
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
#endif
}
static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
{
#if SGE_NUM_GENBITS == 2
d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
#endif
}
/**
* write_wr_hdr_sgl - write a WR header and, optionally, SGL
* @ndesc: number of Tx descriptors spanned by the SGL
* @skb: the packet corresponding to the WR
* @d: first Tx descriptor to be written
* @pidx: index of above descriptors
* @q: the SGE Tx queue
* @sgl: the SGL
* @flits: number of flits to the start of the SGL in the first descriptor
* @sgl_flits: the SGL size in flits
* @gen: the Tx descriptor generation
* @wr_hi: top 32 bits of WR header based on WR type (big endian)
* @wr_lo: low 32 bits of WR header based on WR type (big endian)
*
* Write a work request header and an associated SGL. If the SGL is
* small enough to fit into one Tx descriptor it has already been written
* and we just need to write the WR header. Otherwise we distribute the
* SGL across the number of descriptors it spans.
*/
static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
struct tx_desc *d, unsigned int pidx,
const struct sge_txq *q,
const struct sg_ent *sgl,
unsigned int flits, unsigned int sgl_flits,
unsigned int gen, unsigned int wr_hi,
unsigned int wr_lo)
{
struct work_request_hdr *wrp = (struct work_request_hdr *)d;
struct tx_sw_desc *sd = &q->sdesc[pidx];
sd->skb = skb;
if (need_skb_unmap()) {
struct unmap_info *ui = (struct unmap_info *)skb->cb;
ui->fragidx = 0;
ui->addr_idx = 0;
ui->sflit = flits;
}
if (likely(ndesc == 1)) {
skb->priority = pidx;
wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
V_WR_SGLSFLT(flits)) | wr_hi;
wmb();
wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
V_WR_GEN(gen)) | wr_lo;
wr_gen2(d, gen);
} else {
unsigned int ogen = gen;
const u64 *fp = (const u64 *)sgl;
struct work_request_hdr *wp = wrp;
wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
V_WR_SGLSFLT(flits)) | wr_hi;
while (sgl_flits) {
unsigned int avail = WR_FLITS - flits;
if (avail > sgl_flits)
avail = sgl_flits;
memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
sgl_flits -= avail;
ndesc--;
if (!sgl_flits)
break;
fp += avail;
d++;
sd++;
if (++pidx == q->size) {
pidx = 0;
gen ^= 1;
d = q->desc;
sd = q->sdesc;
}
sd->skb = skb;
wrp = (struct work_request_hdr *)d;
wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
V_WR_SGLSFLT(1)) | wr_hi;
wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
sgl_flits + 1)) |
V_WR_GEN(gen)) | wr_lo;
wr_gen2(d, gen);
flits = 1;
}
skb->priority = pidx;
wrp->wr_hi |= htonl(F_WR_EOP);
wmb();
wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
wr_gen2((struct tx_desc *)wp, ogen);
WARN_ON(ndesc != 0);
}
}
/**
* write_tx_pkt_wr - write a TX_PKT work request
* @adap: the adapter
* @skb: the packet to send
* @pi: the egress interface
* @pidx: index of the first Tx descriptor to write
* @gen: the generation value to use
* @q: the Tx queue
* @ndesc: number of descriptors the packet will occupy
* @compl: the value of the COMPL bit to use
*
* Generate a TX_PKT work request to send the supplied packet.
*/
static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
const struct port_info *pi,
unsigned int pidx, unsigned int gen,
struct sge_txq *q, unsigned int ndesc,
unsigned int compl)
{
unsigned int flits, sgl_flits, cntrl, tso_info;
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
struct tx_desc *d = &q->desc[pidx];
struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
cpl->len = htonl(skb->len | 0x80000000);
cntrl = V_TXPKT_INTF(pi->port_id);
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
if (tso_info) {
int eth_type;
struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
d->flit[2] = 0;
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
hdr->cntrl = htonl(cntrl);
eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
CPL_ETH_II : CPL_ETH_II_VLAN;
tso_info |= V_LSO_ETH_TYPE(eth_type) |
V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
V_LSO_TCPHDR_WORDS(skb->h.th->doff);
hdr->lso_info = htonl(tso_info);
flits = 3;
} else {
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
cpl->cntrl = htonl(cntrl);
if (skb->len <= WR_LEN - sizeof(*cpl)) {
q->sdesc[pidx].skb = NULL;
if (!skb->data_len)
memcpy(&d->flit[2], skb->data, skb->len);
else
skb_copy_bits(skb, 0, &d->flit[2], skb->len);
flits = (skb->len + 7) / 8 + 2;
cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
| F_WR_SOP | F_WR_EOP | compl);
wmb();
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
wr_gen2(d, gen);
kfree_skb(skb);
return;
}
flits = 2;
}
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
if (need_skb_unmap())
((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
htonl(V_WR_TID(q->token)));
}
/**
* eth_xmit - add a packet to the Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
*
* Add a packet to an SGE Tx queue. Runs with softirqs disabled.
*/
int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int ndesc, pidx, credits, gen, compl;
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = dev->priv;
struct sge_qset *qs = dev2qset(dev);
struct sge_txq *q = &qs->txq[TXQ_ETH];
/*
* The chip min packet length is 9 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
spin_lock(&q->lock);
reclaim_completed_tx(adap, q);
credits = q->size - q->in_use;
ndesc = calc_tx_descs(skb);
if (unlikely(credits < ndesc)) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
set_bit(TXQ_ETH, &qs->txq_stopped);
q->stops++;
dev_err(&adap->pdev->dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, q->cntxt_id & 7);
}
spin_unlock(&q->lock);
return NETDEV_TX_BUSY;
}
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres)) {
q->stops++;
netif_stop_queue(dev);
set_bit(TXQ_ETH, &qs->txq_stopped);
#if !USE_GTS
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
q->restarts++;
netif_wake_queue(dev);
}
#endif
}
gen = q->gen;
q->unacked += ndesc;
compl = (q->unacked & 8) << (S_WR_COMPL - 3);
q->unacked &= 7;
pidx = q->pidx;
q->pidx += ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
}
/* update port statistics */
if (skb->ip_summed == CHECKSUM_COMPLETE)
qs->port_stats[SGE_PSTAT_TX_CSUM]++;
if (skb_shinfo(skb)->gso_size)
qs->port_stats[SGE_PSTAT_TSO]++;
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
qs->port_stats[SGE_PSTAT_VLANINS]++;
dev->trans_start = jiffies;
spin_unlock(&q->lock);
/*
* We do not use Tx completion interrupts to free DMAd Tx packets.
* This is good for performamce but means that we rely on new Tx
* packets arriving to run the destructors of completed packets,
* which open up space in their sockets' send queues. Sometimes
* we do not get such new packets causing Tx to stall. A single
* UDP transmitter is a good example of this situation. We have
* a clean up timer that periodically reclaims completed packets
* but it doesn't run often enough (nor do we want it to) to prevent
* lengthy stalls. A solution to this problem is to run the
* destructor early, after the packet is queued but before it's DMAd.
* A cons is that we lie to socket memory accounting, but the amount
* of extra memory is reasonable (limited by the number of Tx
* descriptors), the packets do actually get freed quickly by new
* packets almost always, and for protocols like TCP that wait for
* acks to really free up the data the extra memory is even less.
* On the positive side we run the destructors on the sending CPU
* rather than on a potentially different completing CPU, usually a
* good thing. We also run them without holding our Tx queue lock,
* unlike what reclaim_completed_tx() would otherwise do.
*
* Run the destructor before telling the DMA engine about the packet
* to make sure it doesn't complete and get freed prematurely.
*/
if (likely(!skb_shared(skb)))
skb_orphan(skb);
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
check_ring_tx_db(adap, q);
return NETDEV_TX_OK;
}
/**
* write_imm - write a packet into a Tx descriptor as immediate data
* @d: the Tx descriptor to write
* @skb: the packet
* @len: the length of packet data to write as immediate data
* @gen: the generation bit value to write
*
* Writes a packet as immediate data into a Tx descriptor. The packet
* contains a work request at its beginning. We must write the packet
* carefully so the SGE doesn't read accidentally before it's written in
* its entirety.
*/
static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
unsigned int len, unsigned int gen)
{
struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
struct work_request_hdr *to = (struct work_request_hdr *)d;
memcpy(&to[1], &from[1], len - sizeof(*from));
to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
V_WR_BCNTLFLT(len & 7));
wmb();
to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
V_WR_LEN((len + 7) / 8));
wr_gen2(d, gen);
kfree_skb(skb);
}
/**
* check_desc_avail - check descriptor availability on a send queue
* @adap: the adapter
* @q: the send queue
* @skb: the packet needing the descriptors
* @ndesc: the number of Tx descriptors needed
* @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
*
* Checks if the requested number of Tx descriptors is available on an
* SGE send queue. If the queue is already suspended or not enough
* descriptors are available the packet is queued for later transmission.
* Must be called with the Tx queue locked.
*
* Returns 0 if enough descriptors are available, 1 if there aren't
* enough descriptors and the packet has been queued, and 2 if the caller
* needs to retry because there weren't enough descriptors at the
* beginning of the call but some freed up in the mean time.
*/
static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
struct sk_buff *skb, unsigned int ndesc,
unsigned int qid)
{
if (unlikely(!skb_queue_empty(&q->sendq))) {
addq_exit:__skb_queue_tail(&q->sendq, skb);
return 1;
}
if (unlikely(q->size - q->in_use < ndesc)) {
struct sge_qset *qs = txq_to_qset(q, qid);
set_bit(qid, &qs->txq_stopped);
smp_mb__after_clear_bit();
if (should_restart_tx(q) &&
test_and_clear_bit(qid, &qs->txq_stopped))
return 2;
q->stops++;
goto addq_exit;
}
return 0;
}
/**
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
*
* This is a variant of reclaim_completed_tx() that is used for Tx queues
* that send only immediate data (presently just the control queues) and
* thus do not have any sk_buffs to release.
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
unsigned int reclaim = q->processed - q->cleaned;
q->in_use -= reclaim;
q->cleaned += reclaim;
}
static inline int immediate(const struct sk_buff *skb)
{
return skb->len <= WR_LEN && !skb->data_len;
}
/**
* ctrl_xmit - send a packet through an SGE control Tx queue
* @adap: the adapter
* @q: the control queue
* @skb: the packet
*
* Send a packet through an SGE control Tx queue. Packets sent through
* a control queue must fit entirely as immediate data in a single Tx
* descriptor and have no page fragments.
*/
static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
struct sk_buff *skb)
{
int ret;
struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
if (unlikely(!immediate(skb))) {
WARN_ON(1);
dev_kfree_skb(skb);
return NET_XMIT_SUCCESS;
}
wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
wrp->wr_lo = htonl(V_WR_TID(q->token));
spin_lock(&q->lock);
again:reclaim_completed_tx_imm(q);
ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
if (unlikely(ret)) {
if (ret == 1) {
spin_unlock(&q->lock);
return NET_XMIT_CN;
}
goto again;
}
write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
q->in_use++;
if (++q->pidx >= q->size) {
q->pidx = 0;
q->gen ^= 1;
}
spin_unlock(&q->lock);
wmb();
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
return NET_XMIT_SUCCESS;
}
/**
* restart_ctrlq - restart a suspended control queue
* @qs: the queue set cotaining the control queue
*
* Resumes transmission on a suspended Tx control queue.
*/
static void restart_ctrlq(unsigned long data)
{
struct sk_buff *skb;
struct sge_qset *qs = (struct sge_qset *)data;
struct sge_txq *q = &qs->txq[TXQ_CTRL];
struct adapter *adap = qs->netdev->priv;
spin_lock(&q->lock);
again:reclaim_completed_tx_imm(q);
while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
if (++q->pidx >= q->size) {
q->pidx = 0;
q->gen ^= 1;
}
q->in_use++;
}
if (!skb_queue_empty(&q->sendq)) {
set_bit(TXQ_CTRL, &qs->txq_stopped);
smp_mb__after_clear_bit();
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
goto again;
q->stops++;
}
spin_unlock(&q->lock);
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
/**
* write_ofld_wr - write an offload work request
* @adap: the adapter
* @skb: the packet to send
* @q: the Tx queue
* @pidx: index of the first Tx descriptor to write
* @gen: the generation value to use
* @ndesc: number of descriptors the packet will occupy
*
* Write an offload work request to send the supplied packet. The packet
* data already carry the work request with most fields populated.
*/
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
struct sge_txq *q, unsigned int pidx,
unsigned int gen, unsigned int ndesc)
{
unsigned int sgl_flits, flits;
struct work_request_hdr *from;
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
struct tx_desc *d = &q->desc[pidx];
if (immediate(skb)) {
q->sdesc[pidx].skb = NULL;
write_imm(d, skb, skb->len, gen);
return;
}
/* Only TX_DATA builds SGLs */
from = (struct work_request_hdr *)skb->data;
memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
flits = (skb->h.raw - skb->data) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
adap->pdev);
if (need_skb_unmap())
((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
gen, from->wr_hi, from->wr_lo);
}
/**
* calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
* @skb: the packet
*
* Returns the number of Tx descriptors needed for the given offload
* packet. These packets are already fully constructed.
*/
static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
{
unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
if (skb->len <= WR_LEN && cnt == 0)
return 1; /* packet fits as immediate data */
flits = (skb->h.raw - skb->data) / 8; /* headers */
if (skb->tail != skb->h.raw)
cnt++;
return flits_to_desc(flits + sgl_len(cnt));
}
/**
* ofld_xmit - send a packet through an offload queue
* @adap: the adapter
* @q: the Tx offload queue
* @skb: the packet
*
* Send an offload packet through an SGE offload queue.
*/
static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
struct sk_buff *skb)
{
int ret;
unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
spin_lock(&q->lock);
again:reclaim_completed_tx(adap, q);
ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
if (unlikely(ret)) {
if (ret == 1) {
skb->priority = ndesc; /* save for restart */
spin_unlock(&q->lock);
return NET_XMIT_CN;
}
goto again;
}
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
q->pidx += ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
}
spin_unlock(&q->lock);
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
check_ring_tx_db(adap, q);
return NET_XMIT_SUCCESS;
}
/**
* restart_offloadq - restart a suspended offload queue
* @qs: the queue set cotaining the offload queue
*
* Resumes transmission on a suspended Tx offload queue.
*/
static void restart_offloadq(unsigned long data)
{
struct sk_buff *skb;
struct sge_qset *qs = (struct sge_qset *)data;
struct sge_txq *q = &qs->txq[TXQ_OFLD];
struct adapter *adap = qs->netdev->priv;
spin_lock(&q->lock);
again:reclaim_completed_tx(adap, q);
while ((skb = skb_peek(&q->sendq)) != NULL) {
unsigned int gen, pidx;
unsigned int ndesc = skb->priority;
if (unlikely(q->size - q->in_use < ndesc)) {
set_bit(TXQ_OFLD, &qs->txq_stopped);
smp_mb__after_clear_bit();
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
goto again;
q->stops++;
break;
}
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
q->pidx += ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
}
__skb_unlink(skb, &q->sendq);
spin_unlock(&q->lock);
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
spin_lock(&q->lock);
}
spin_unlock(&q->lock);
#if USE_GTS
set_bit(TXQ_RUNNING, &q->flags);
set_bit(TXQ_LAST_PKT_DB, &q->flags);
#endif
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
/**
* queue_set - return the queue set a packet should use
* @skb: the packet
*
* Maps a packet to the SGE queue set it should use. The desired queue
* set is carried in bits 1-3 in the packet's priority.
*/
static inline int queue_set(const struct sk_buff *skb)
{
return skb->priority >> 1;
}
/**
* is_ctrl_pkt - return whether an offload packet is a control packet
* @skb: the packet
*
* Determines whether an offload packet should use an OFLD or a CTRL
* Tx queue. This is indicated by bit 0 in the packet's priority.
*/
static inline int is_ctrl_pkt(const struct sk_buff *skb)
{
return skb->priority & 1;
}
/**
* t3_offload_tx - send an offload packet
* @tdev: the offload device to send to
* @skb: the packet
*
* Sends an offload packet. We use the packet priority to select the
* appropriate Tx queue as follows: bit 0 indicates whether the packet
* should be sent as regular or control, bits 1-3 select the queue set.
*/
int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
{
struct adapter *adap = tdev2adap(tdev);
struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
if (unlikely(is_ctrl_pkt(skb)))
return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
}
/**
* offload_enqueue - add an offload packet to an SGE offload receive queue
* @q: the SGE response queue
* @skb: the packet
*
* Add a new offload packet to an SGE response queue's offload packet
* queue. If the packet is the first on the queue it schedules the RX
* softirq to process the queue.
*/
static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
{
skb->next = skb->prev = NULL;
if (q->rx_tail)
q->rx_tail->next = skb;
else {
struct sge_qset *qs = rspq_to_qset(q);
if (__netif_rx_schedule_prep(qs->netdev))
__netif_rx_schedule(qs->netdev);
q->rx_head = skb;
}
q->rx_tail = skb;
}
/**
* deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
* @tdev: the offload device that will be receiving the packets
* @q: the SGE response queue that assembled the bundle
* @skbs: the partial bundle
* @n: the number of packets in the bundle
*
* Delivers a (partial) bundle of Rx offload packets to an offload device.
*/
static inline void deliver_partial_bundle(struct t3cdev *tdev,
struct sge_rspq *q,
struct sk_buff *skbs[], int n)
{
if (n) {
q->offload_bundles++;
tdev->recv(tdev, skbs, n);
}
}
/**
* ofld_poll - NAPI handler for offload packets in interrupt mode
* @dev: the network device doing the polling
* @budget: polling budget
*
* The NAPI handler for offload packets when a response queue is serviced
* by the hard interrupt handler, i.e., when it's operating in non-polling
* mode. Creates small packet batches and sends them through the offload
* receive handler. Batches need to be of modest size as we do prefetches
* on the packets in each.
*/
static int ofld_poll(struct net_device *dev, int *budget)
{
struct adapter *adapter = dev->priv;
struct sge_qset *qs = dev2qset(dev);
struct sge_rspq *q = &qs->rspq;
int work_done, limit = min(*budget, dev->quota), avail = limit;
while (avail) {
struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
int ngathered;
spin_lock_irq(&q->lock);
head = q->rx_head;
if (!head) {
work_done = limit - avail;
*budget -= work_done;
dev->quota -= work_done;
__netif_rx_complete(dev);
spin_unlock_irq(&q->lock);
return 0;
}
tail = q->rx_tail;
q->rx_head = q->rx_tail = NULL;
spin_unlock_irq(&q->lock);
for (ngathered = 0; avail && head; avail--) {
prefetch(head->data);
skbs[ngathered] = head;
head = head->next;
skbs[ngathered]->next = NULL;
if (++ngathered == RX_BUNDLE_SIZE) {
q->offload_bundles++;
adapter->tdev.recv(&adapter->tdev, skbs,
ngathered);
ngathered = 0;
}
}
if (head) { /* splice remaining packets back onto Rx queue */
spin_lock_irq(&q->lock);
tail->next = q->rx_head;
if (!q->rx_head)
q->rx_tail = tail;
q->rx_head = head;
spin_unlock_irq(&q->lock);
}
deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
}
work_done = limit - avail;
*budget -= work_done;
dev->quota -= work_done;
return 1;
}
/**
* rx_offload - process a received offload packet
* @tdev: the offload device receiving the packet
* @rq: the response queue that received the packet
* @skb: the packet
* @rx_gather: a gather list of packets if we are building a bundle
* @gather_idx: index of the next available slot in the bundle
*
* Process an ingress offload pakcet and add it to the offload ingress
* queue. Returns the index of the next available slot in the bundle.
*/
static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
struct sk_buff *skb, struct sk_buff *rx_gather[],
unsigned int gather_idx)
{
rq->offload_pkts++;
skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
if (rq->polling) {
rx_gather[gather_idx++] = skb;
if (gather_idx == RX_BUNDLE_SIZE) {
tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
gather_idx = 0;
rq->offload_bundles++;
}
} else
offload_enqueue(rq, skb);
return gather_idx;
}
/**
* update_tx_completed - update the number of processed Tx descriptors
* @qs: the queue set to update
* @idx: which Tx queue within the set to update
* @credits: number of new processed descriptors
* @tx_completed: accumulates credits for the queues
*
* Updates the number of completed Tx descriptors for a queue set's Tx
* queue. On UP systems we updated the information immediately but on
* MP we accumulate the credits locally and update the Tx queue when we
* reach a threshold to avoid cache-line bouncing.
*/
static inline void update_tx_completed(struct sge_qset *qs, int idx,
unsigned int credits,
unsigned int tx_completed[])
{
#ifdef CONFIG_SMP
tx_completed[idx] += credits;
if (tx_completed[idx] > 32) {
qs->txq[idx].processed += tx_completed[idx];
tx_completed[idx] = 0;
}
#else
qs->txq[idx].processed += credits;
#endif
}
/**
* restart_tx - check whether to restart suspended Tx queues
* @qs: the queue set to resume
*
* Restarts suspended Tx queues of an SGE queue set if they have enough
* free resources to resume operation.
*/
static void restart_tx(struct sge_qset *qs)
{
if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
should_restart_tx(&qs->txq[TXQ_ETH]) &&
test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
qs->txq[TXQ_ETH].restarts++;
if (netif_running(qs->netdev))
netif_wake_queue(qs->netdev);
}
if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
should_restart_tx(&qs->txq[TXQ_OFLD]) &&
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
qs->txq[TXQ_OFLD].restarts++;
tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
}
if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
should_restart_tx(&qs->txq[TXQ_CTRL]) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
qs->txq[TXQ_CTRL].restarts++;
tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
}
}
/**
* rx_eth - process an ingress ethernet packet
* @adap: the adapter
* @rq: the response queue that received the packet
* @skb: the packet
* @pad: amount of padding at the start of the buffer
*
* Process an ingress ethernet pakcet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
* if it was immediate data in a response.
*/
static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
struct sk_buff *skb, int pad)
{
struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
struct port_info *pi;
rq->eth_pkts++;
skb_pull(skb, sizeof(*p) + pad);
skb->dev = adap->port[p->iff];
skb->dev->last_rx = jiffies;
skb->protocol = eth_type_trans(skb, skb->dev);
pi = netdev_priv(skb->dev);
if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
!p->fragment) {
rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
if (unlikely(p->vlan_valid)) {
struct vlan_group *grp = pi->vlan_grp;
rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
if (likely(grp))
__vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
rq->polling);
else
dev_kfree_skb_any(skb);
} else if (rq->polling)
netif_receive_skb(skb);
else
netif_rx(skb);
}
/**
* handle_rsp_cntrl_info - handles control information in a response
* @qs: the queue set corresponding to the response
* @flags: the response control flags
* @tx_completed: accumulates completion credits for the Tx queues
*
* Handles the control information of an SGE response, such as GTS
* indications and completion credits for the queue set's Tx queues.
*/
static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags,
unsigned int tx_completed[])
{
unsigned int credits;
#if USE_GTS
if (flags & F_RSPD_TXQ0_GTS)
clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
#endif
/* ETH credits are already coalesced, return them immediately. */
credits = G_RSPD_TXQ0_CR(flags);
if (credits)
qs->txq[TXQ_ETH].processed += credits;
# if USE_GTS
if (flags & F_RSPD_TXQ1_GTS)
clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
# endif
update_tx_completed(qs, TXQ_OFLD, G_RSPD_TXQ1_CR(flags), tx_completed);
update_tx_completed(qs, TXQ_CTRL, G_RSPD_TXQ2_CR(flags), tx_completed);
}
/**
* flush_tx_completed - returns accumulated Tx completions to Tx queues
* @qs: the queue set to update
* @tx_completed: pending completion credits to return to Tx queues
*
* Updates the number of completed Tx descriptors for a queue set's Tx
* queues with the credits pending in @tx_completed. This does something
* only on MP systems as on UP systems we return the credits immediately.
*/
static inline void flush_tx_completed(struct sge_qset *qs,
unsigned int tx_completed[])
{
#if defined(CONFIG_SMP)
if (tx_completed[TXQ_OFLD])
qs->txq[TXQ_OFLD].processed += tx_completed[TXQ_OFLD];
if (tx_completed[TXQ_CTRL])
qs->txq[TXQ_CTRL].processed += tx_completed[TXQ_CTRL];
#endif
}
/**
* check_ring_db - check if we need to ring any doorbells
* @adapter: the adapter
* @qs: the queue set whose Tx queues are to be examined
* @sleeping: indicates which Tx queue sent GTS
*
* Checks if some of a queue set's Tx queues need to ring their doorbells
* to resume transmission after idling while they still have unprocessed
* descriptors.
*/
static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
unsigned int sleeping)
{
if (sleeping & F_RSPD_TXQ0_GTS) {
struct sge_txq *txq = &qs->txq[TXQ_ETH];
if (txq->cleaned + txq->in_use != txq->processed &&
!test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
set_bit(TXQ_RUNNING, &txq->flags);
t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
V_EGRCNTX(txq->cntxt_id));
}
}
if (sleeping & F_RSPD_TXQ1_GTS) {
struct sge_txq *txq = &qs->txq[TXQ_OFLD];
if (txq->cleaned + txq->in_use != txq->processed &&
!test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
set_bit(TXQ_RUNNING, &txq->flags);
t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
V_EGRCNTX(txq->cntxt_id));
}
}
}
/**
* is_new_response - check if a response is newly written
* @r: the response descriptor
* @q: the response queue
*
* Returns true if a response descriptor contains a yet unprocessed
* response.
*/
static inline int is_new_response(const struct rsp_desc *r,
const struct sge_rspq *q)
{
return (r->intr_gen & F_RSPD_GEN2) == q->gen;
}
#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
#define NOMEM_INTR_DELAY 2500
/**
* process_responses - process responses from an SGE response queue
* @adap: the adapter
* @qs: the queue set to which the response queue belongs
* @budget: how many responses can be processed in this round
*
* Process responses from an SGE response queue up to the supplied budget.
* Responses include received packets as well as credits and other events
* for the queues that belong to the response queue's queue set.
* A negative budget is effectively unlimited.
*
* Additionally choose the interrupt holdoff time for the next interrupt
* on this queue. If the system is under memory shortage use a fairly
* long delay to help recovery.
*/
static int process_responses(struct adapter *adap, struct sge_qset *qs,
int budget)
{
struct sge_rspq *q = &qs->rspq;
struct rsp_desc *r = &q->desc[q->cidx];
int budget_left = budget;
unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
int ngathered = 0;
q->next_holdoff = q->holdoff_tmr;
while (likely(budget_left && is_new_response(r, q))) {
int eth, ethpad = 0;
struct sk_buff *skb = NULL;
u32 len, flags = ntohl(r->flags);
u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
eth = r->rss_hdr.opcode == CPL_RX_PKT;
if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
if (!skb)
goto no_mem;
memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
skb->data[0] = CPL_ASYNC_NOTIF;
rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
q->async_notif++;
} else if (flags & F_RSPD_IMM_DATA_VALID) {
skb = get_imm_packet(r);
if (unlikely(!skb)) {
no_mem:
q->next_holdoff = NOMEM_INTR_DELAY;
q->nomem++;
/* consume one credit since we tried */
budget_left--;
break;
}
q->imm_data++;
} else if ((len = ntohl(r->len_cq)) != 0) {
struct sge_fl *fl;
fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
fl->credits--;
skb = get_packet(adap, fl, G_RSPD_LEN(len),
eth ? SGE_RX_DROP_THRES : 0);
if (!skb)
q->rx_drops++;
else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
__skb_pull(skb, 2);
ethpad = 2;
if (++fl->cidx == fl->size)
fl->cidx = 0;
} else
q->pure_rsps++;
if (flags & RSPD_CTRL_MASK) {
sleeping |= flags & RSPD_GTS_MASK;
handle_rsp_cntrl_info(qs, flags, tx_completed);
}
r++;
if (unlikely(++q->cidx == q->size)) {
q->cidx = 0;
q->gen ^= 1;
r = q->desc;
}
prefetch(r);
if (++q->credits >= (q->size / 4)) {
refill_rspq(adap, q, q->credits);
q->credits = 0;
}
if (likely(skb != NULL)) {
if (eth)
rx_eth(adap, q, skb, ethpad);
else {
/* Preserve the RSS info in csum & priority */
skb->csum = rss_hi;
skb->priority = rss_lo;
ngathered = rx_offload(&adap->tdev, q, skb,
offload_skbs, ngathered);
}
}
--budget_left;
}
flush_tx_completed(qs, tx_completed);
deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
if (sleeping)
check_ring_db(adap, qs, sleeping);
smp_mb(); /* commit Tx queue .processed updates */
if (unlikely(qs->txq_stopped != 0))
restart_tx(qs);
budget -= budget_left;
return budget;
}
static inline int is_pure_response(const struct rsp_desc *r)
{
u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
return (n | r->len_cq) == 0;
}
/**
* napi_rx_handler - the NAPI handler for Rx processing
* @dev: the net device
* @budget: how many packets we can process in this round
*
* Handler for new data events when using NAPI.
*/
static int napi_rx_handler(struct net_device *dev, int *budget)
{
struct adapter *adap = dev->priv;
struct sge_qset *qs = dev2qset(dev);
int effective_budget = min(*budget, dev->quota);
int work_done = process_responses(adap, qs, effective_budget);
*budget -= work_done;
dev->quota -= work_done;
if (work_done >= effective_budget)
return 1;
netif_rx_complete(dev);
/*
* Because we don't atomically flush the following write it is
* possible that in very rare cases it can reach the device in a way
* that races with a new response being written plus an error interrupt
* causing the NAPI interrupt handler below to return unhandled status
* to the OS. To protect against this would require flushing the write
* and doing both the write and the flush with interrupts off. Way too
* expensive and unjustifiable given the rarity of the race.
*
* The race cannot happen at all with MSI-X.
*/
t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
V_NEWTIMER(qs->rspq.next_holdoff) |
V_NEWINDEX(qs->rspq.cidx));
return 0;
}
/*
* Returns true if the device is already scheduled for polling.
*/
static inline int napi_is_scheduled(struct net_device *dev)
{
return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
}
/**
* process_pure_responses - process pure responses from a response queue
* @adap: the adapter
* @qs: the queue set owning the response queue
* @r: the first pure response to process
*
* A simpler version of process_responses() that handles only pure (i.e.,
* non data-carrying) responses. Such respones are too light-weight to
* justify calling a softirq under NAPI, so we handle them specially in
* the interrupt handler. The function is called with a pointer to a
* response, which the caller must ensure is a valid pure response.
*
* Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
*/
static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
struct rsp_desc *r)
{
struct sge_rspq *q = &qs->rspq;
unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
do {
u32 flags = ntohl(r->flags);
r++;
if (unlikely(++q->cidx == q->size)) {
q->cidx = 0;
q->gen ^= 1;
r = q->desc;
}
prefetch(r);
if (flags & RSPD_CTRL_MASK) {
sleeping |= flags & RSPD_GTS_MASK;
handle_rsp_cntrl_info(qs, flags, tx_completed);
}
q->pure_rsps++;
if (++q->credits >= (q->size / 4)) {
refill_rspq(adap, q, q->credits);
q->credits = 0;
}
} while (is_new_response(r, q) && is_pure_response(r));
flush_tx_completed(qs, tx_completed);
if (sleeping)
check_ring_db(adap, qs, sleeping);
smp_mb(); /* commit Tx queue .processed updates */
if (unlikely(qs->txq_stopped != 0))
restart_tx(qs);
return is_new_response(r, q);
}
/**
* handle_responses - decide what to do with new responses in NAPI mode
* @adap: the adapter
* @q: the response queue
*
* This is used by the NAPI interrupt handlers to decide what to do with
* new SGE responses. If there are no new responses it returns -1. If
* there are new responses and they are pure (i.e., non-data carrying)
* it handles them straight in hard interrupt context as they are very
* cheap and don't deliver any packets. Finally, if there are any data
* signaling responses it schedules the NAPI handler. Returns 1 if it
* schedules NAPI, 0 if all new responses were pure.
*
* The caller must ascertain NAPI is not already running.
*/
static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
{
struct sge_qset *qs = rspq_to_qset(q);
struct rsp_desc *r = &q->desc[q->cidx];
if (!is_new_response(r, q))
return -1;
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
return 0;
}
if (likely(__netif_rx_schedule_prep(qs->netdev)))
__netif_rx_schedule(qs->netdev);
return 1;
}
/*
* The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
* (i.e., response queue serviced in hard interrupt).
*/
irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
{
struct sge_qset *qs = cookie;
struct adapter *adap = qs->netdev->priv;
struct sge_rspq *q = &qs->rspq;
spin_lock(&q->lock);
if (process_responses(adap, qs, -1) == 0)
q->unhandled_irqs++;
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
spin_unlock(&q->lock);
return IRQ_HANDLED;
}
/*
* The MSI-X interrupt handler for an SGE response queue for the NAPI case
* (i.e., response queue serviced by NAPI polling).
*/
irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
{
struct sge_qset *qs = cookie;
struct adapter *adap = qs->netdev->priv;
struct sge_rspq *q = &qs->rspq;
spin_lock(&q->lock);
BUG_ON(napi_is_scheduled(qs->netdev));
if (handle_responses(adap, q) < 0)
q->unhandled_irqs++;
spin_unlock(&q->lock);
return IRQ_HANDLED;
}
/*
* The non-NAPI MSI interrupt handler. This needs to handle data events from
* SGE response queues as well as error and other async events as they all use
* the same MSI vector. We use one SGE response queue per port in this mode
* and protect all response queues with queue 0's lock.
*/
static irqreturn_t t3_intr_msi(int irq, void *cookie)
{
int new_packets = 0;
struct adapter *adap = cookie;
struct sge_rspq *q = &adap->sge.qs[0].rspq;
spin_lock(&q->lock);
if (process_responses(adap, &adap->sge.qs[0], -1)) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
new_packets = 1;
}
if (adap->params.nports == 2 &&
process_responses(adap, &adap->sge.qs[1], -1)) {
struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
V_NEWTIMER(q1->next_holdoff) |
V_NEWINDEX(q1->cidx));
new_packets = 1;
}
if (!new_packets && t3_slow_intr_handler(adap) == 0)
q->unhandled_irqs++;
spin_unlock(&q->lock);
return IRQ_HANDLED;
}
static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
{
if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
if (likely(__netif_rx_schedule_prep(dev)))
__netif_rx_schedule(dev);
return 1;
}
return 0;
}
/*
* The MSI interrupt handler for the NAPI case (i.e., response queues serviced
* by NAPI polling). Handles data events from SGE response queues as well as
* error and other async events as they all use the same MSI vector. We use
* one SGE response queue per port in this mode and protect all response
* queues with queue 0's lock.
*/
irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
{
int new_packets;
struct adapter *adap = cookie;
struct sge_rspq *q = &adap->sge.qs[0].rspq;
spin_lock(&q->lock);
new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
if (adap->params.nports == 2)
new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
&adap->sge.qs[1].rspq);
if (!new_packets && t3_slow_intr_handler(adap) == 0)
q->unhandled_irqs++;
spin_unlock(&q->lock);
return IRQ_HANDLED;
}
/*
* A helper function that processes responses and issues GTS.
*/
static inline int process_responses_gts(struct adapter *adap,
struct sge_rspq *rq)
{
int work;
work = process_responses(adap, rspq_to_qset(rq), -1);
t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
return work;
}
/*
* The legacy INTx interrupt handler. This needs to handle data events from
* SGE response queues as well as error and other async events as they all use
* the same interrupt pin. We use one SGE response queue per port in this mode
* and protect all response queues with queue 0's lock.
*/
static irqreturn_t t3_intr(int irq, void *cookie)
{
int work_done, w0, w1;
struct adapter *adap = cookie;
struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
spin_lock(&q0->lock);
w0 = is_new_response(&q0->desc[q0->cidx], q0);
w1 = adap->params.nports == 2 &&
is_new_response(&q1->desc[q1->cidx], q1);
if (likely(w0 | w1)) {
t3_write_reg(adap, A_PL_CLI, 0);
t3_read_reg(adap, A_PL_CLI); /* flush */
if (likely(w0))
process_responses_gts(adap, q0);
if (w1)
process_responses_gts(adap, q1);
work_done = w0 | w1;
} else
work_done = t3_slow_intr_handler(adap);
spin_unlock(&q0->lock);
return IRQ_RETVAL(work_done != 0);
}
/*
* Interrupt handler for legacy INTx interrupts for T3B-based cards.
* Handles data events from SGE response queues as well as error and other
* async events as they all use the same interrupt pin. We use one SGE
* response queue per port in this mode and protect all response queues with
* queue 0's lock.
*/
static irqreturn_t t3b_intr(int irq, void *cookie)
{
u32 map;
struct adapter *adap = cookie;
struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
t3_write_reg(adap, A_PL_CLI, 0);
map = t3_read_reg(adap, A_SG_DATA_INTR);
if (unlikely(!map)) /* shared interrupt, most likely */
return IRQ_NONE;
spin_lock(&q0->lock);
if (unlikely(map & F_ERRINTR))
t3_slow_intr_handler(adap);
if (likely(map & 1))
process_responses_gts(adap, q0);
if (map & 2)
process_responses_gts(adap, &adap->sge.qs[1].rspq);
spin_unlock(&q0->lock);
return IRQ_HANDLED;
}
/*
* NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
* Handles data events from SGE response queues as well as error and other
* async events as they all use the same interrupt pin. We use one SGE
* response queue per port in this mode and protect all response queues with
* queue 0's lock.
*/
static irqreturn_t t3b_intr_napi(int irq, void *cookie)
{
u32 map;
struct net_device *dev;
struct adapter *adap = cookie;
struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
t3_write_reg(adap, A_PL_CLI, 0);
map = t3_read_reg(adap, A_SG_DATA_INTR);
if (unlikely(!map)) /* shared interrupt, most likely */
return IRQ_NONE;
spin_lock(&q0->lock);
if (unlikely(map & F_ERRINTR))
t3_slow_intr_handler(adap);
if (likely(map & 1)) {
dev = adap->sge.qs[0].netdev;
BUG_ON(napi_is_scheduled(dev));
if (likely(__netif_rx_schedule_prep(dev)))
__netif_rx_schedule(dev);
}
if (map & 2) {
dev = adap->sge.qs[1].netdev;
BUG_ON(napi_is_scheduled(dev));
if (likely(__netif_rx_schedule_prep(dev)))
__netif_rx_schedule(dev);
}
spin_unlock(&q0->lock);
return IRQ_HANDLED;
}
/**
* t3_intr_handler - select the top-level interrupt handler
* @adap: the adapter
* @polling: whether using NAPI to service response queues
*
* Selects the top-level interrupt handler based on the type of interrupts
* (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
* response queues.
*/
intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
{
if (adap->flags & USING_MSIX)
return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
if (adap->flags & USING_MSI)
return polling ? t3_intr_msi_napi : t3_intr_msi;
if (adap->params.rev > 0)
return polling ? t3b_intr_napi : t3b_intr;
return t3_intr;
}
/**
* t3_sge_err_intr_handler - SGE async event interrupt handler
* @adapter: the adapter
*
* Interrupt handler for SGE asynchronous (non-data) events.
*/
void t3_sge_err_intr_handler(struct adapter *adapter)
{
unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
if (status & F_RSPQCREDITOVERFOW)
CH_ALERT(adapter, "SGE response queue credit overflow\n");
if (status & F_RSPQDISABLED) {
v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
CH_ALERT(adapter,
"packet delivered to disabled response queue "
"(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
}
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
t3_fatal_err(adapter);
}
/**
* sge_timer_cb - perform periodic maintenance of an SGE qset
* @data: the SGE queue set to maintain
*
* Runs periodically from a timer to perform maintenance of an SGE queue
* set. It performs two tasks:
*
* a) Cleans up any completed Tx descriptors that may still be pending.
* Normal descriptor cleanup happens when new packets are added to a Tx
* queue so this timer is relatively infrequent and does any cleanup only
* if the Tx queue has not seen any new packets in a while. We make a
* best effort attempt to reclaim descriptors, in that we don't wait
* around if we cannot get a queue's lock (which most likely is because
* someone else is queueing new packets and so will also handle the clean
* up). Since control queues use immediate data exclusively we don't
* bother cleaning them up here.
*
* b) Replenishes Rx queues that have run out due to memory shortage.
* Normally new Rx buffers are added when existing ones are consumed but
* when out of memory a queue can become empty. We try to add only a few
* buffers here, the queue will be replenished fully as these new buffers
* are used up if memory shortage has subsided.
*/
static void sge_timer_cb(unsigned long data)
{
spinlock_t *lock;
struct sge_qset *qs = (struct sge_qset *)data;
struct adapter *adap = qs->netdev->priv;
if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
spin_unlock(&qs->txq[TXQ_ETH].lock);
}
if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
spin_unlock(&qs->txq[TXQ_OFLD].lock);
}
lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
&adap->sge.qs[0].rspq.lock;
if (spin_trylock_irq(lock)) {
if (!napi_is_scheduled(qs->netdev)) {
if (qs->fl[0].credits < qs->fl[0].size)
__refill_fl(adap, &qs->fl[0]);
if (qs->fl[1].credits < qs->fl[1].size)
__refill_fl(adap, &qs->fl[1]);
}
spin_unlock_irq(lock);
}
mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
}
/**
* t3_update_qset_coalesce - update coalescing settings for a queue set
* @qs: the SGE queue set
* @p: new queue set parameters
*
* Update the coalescing settings for an SGE queue set. Nothing is done
* if the queue set is not initialized yet.
*/
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
{
if (!qs->netdev)
return;
qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
qs->rspq.polling = p->polling;
qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
}
/**
* t3_sge_alloc_qset - initialize an SGE queue set
* @adapter: the adapter
* @id: the queue set id
* @nports: how many Ethernet ports will be using this queue set
* @irq_vec_idx: the IRQ vector index for response queue interrupts
* @p: configuration parameters for this queue set
* @ntxq: number of Tx queues for the queue set
* @netdev: net device associated with this queue set
*
* Allocate resources and initialize an SGE queue set. A queue set
* comprises a response queue, two Rx free-buffer queues, and up to 3
* Tx queues. The Tx queues are assigned roles in the order Ethernet
* queue, offload queue, and control queue.
*/
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int irq_vec_idx, const struct qset_params *p,
int ntxq, struct net_device *netdev)
{
int i, ret = -ENOMEM;
struct sge_qset *q = &adapter->sge.qs[id];
init_qset_cntxt(q, id);
init_timer(&q->tx_reclaim_timer);
q->tx_reclaim_timer.data = (unsigned long)q;
q->tx_reclaim_timer.function = sge_timer_cb;
q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
sizeof(struct rx_desc),
sizeof(struct rx_sw_desc),
&q->fl[0].phys_addr, &q->fl[0].sdesc);
if (!q->fl[0].desc)
goto err;
q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
sizeof(struct rx_desc),
sizeof(struct rx_sw_desc),
&q->fl[1].phys_addr, &q->fl[1].sdesc);
if (!q->fl[1].desc)
goto err;
q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
sizeof(struct rsp_desc), 0,
&q->rspq.phys_addr, NULL);
if (!q->rspq.desc)
goto err;
for (i = 0; i < ntxq; ++i) {
/*
* The control queue always uses immediate data so does not
* need to keep track of any sk_buffs.
*/
size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
sizeof(struct tx_desc), sz,
&q->txq[i].phys_addr,
&q->txq[i].sdesc);
if (!q->txq[i].desc)
goto err;
q->txq[i].gen = 1;
q->txq[i].size = p->txq_size[i];
spin_lock_init(&q->txq[i].lock);
skb_queue_head_init(&q->txq[i].sendq);
}
tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
(unsigned long)q);
tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
(unsigned long)q);
q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size;
q->fl[1].size = p->jumbo_size;
q->rspq.gen = 1;
q->rspq.size = p->rspq_size;
spin_lock_init(&q->rspq.lock);
q->txq[TXQ_ETH].stop_thres = nports *
flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
if (ntxq == 1) {
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
sizeof(struct cpl_rx_pkt);
q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
sizeof(struct cpl_rx_pkt);
} else {
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
sizeof(struct cpl_rx_data);
q->fl[1].buf_size = (16 * 1024) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
spin_lock(&adapter->sge.reg_lock);
/* FL threshold comparison uses < */
ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
q->rspq.phys_addr, q->rspq.size,
q->fl[0].buf_size, 1, 0);
if (ret)
goto err_unlock;
for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
q->fl[i].phys_addr, q->fl[i].size,
q->fl[i].buf_size, p->cong_thres, 1,
0);
if (ret)
goto err_unlock;
}
ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
1, 0);
if (ret)
goto err_unlock;
if (ntxq > 1) {
ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
USE_GTS, SGE_CNTXT_OFLD, id,
q->txq[TXQ_OFLD].phys_addr,
q->txq[TXQ_OFLD].size, 0, 1, 0);
if (ret)
goto err_unlock;
}
if (ntxq > 2) {
ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
SGE_CNTXT_CTRL, id,
q->txq[TXQ_CTRL].phys_addr,
q->txq[TXQ_CTRL].size,
q->txq[TXQ_CTRL].token, 1, 0);
if (ret)
goto err_unlock;
}
spin_unlock(&adapter->sge.reg_lock);
q->netdev = netdev;
t3_update_qset_coalesce(q, p);
/*
* We use atalk_ptr as a backpointer to a qset. In case a device is
* associated with multiple queue sets only the first one sets
* atalk_ptr.
*/
if (netdev->atalk_ptr == NULL)
netdev->atalk_ptr = q;
refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
V_NEWTIMER(q->rspq.holdoff_tmr));
mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
return 0;
err_unlock:
spin_unlock(&adapter->sge.reg_lock);
err:
t3_free_qset(adapter, q);
return ret;
}
/**
* t3_free_sge_resources - free SGE resources
* @adap: the adapter
*
* Frees resources used by the SGE queue sets.
*/
void t3_free_sge_resources(struct adapter *adap)
{
int i;
for (i = 0; i < SGE_QSETS; ++i)
t3_free_qset(adap, &adap->sge.qs[i]);
}
/**
* t3_sge_start - enable SGE
* @adap: the adapter
*
* Enables the SGE for DMAs. This is the last step in starting packet
* transfers.
*/
void t3_sge_start(struct adapter *adap)
{
t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
}
/**
* t3_sge_stop - disable SGE operation
* @adap: the adapter
*
* Disables the DMA engine. This can be called in emeregencies (e.g.,
* from error interrupts) or from normal process context. In the latter
* case it also disables any pending queue restart tasklets. Note that
* if it is called in interrupt context it cannot disable the restart
* tasklets as it cannot wait, however the tasklets will have no effect
* since the doorbells are disabled and the driver will call this again
* later from process context, at which time the tasklets will be stopped
* if they are still running.
*/
void t3_sge_stop(struct adapter *adap)
{
t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
if (!in_interrupt()) {
int i;
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *qs = &adap->sge.qs[i];
tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
}
}
}
/**
* t3_sge_init - initialize SGE
* @adap: the adapter
* @p: the SGE parameters
*
* Performs SGE initialization needed every time after a chip reset.
* We do not initialize any of the queue sets here, instead the driver
* top-level must request those individually. We also do not enable DMA
* here, that should be done after the queues have been set up.
*/
void t3_sge_init(struct adapter *adap, struct sge_params *p)
{
unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
F_CQCRDTCTRL |
V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
#if SGE_NUM_GENBITS == 1
ctrl |= F_EGRGENCTRL;
#endif
if (adap->params.rev > 0) {
if (!(adap->flags & (USING_MSIX | USING_MSI)))
ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
}
t3_write_reg(adap, A_SG_CONTROL, ctrl);
t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
V_LORCQDRBTHRSH(512));
t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
V_TIMEOUT(100 * core_ticks_per_usec(adap)));
t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
}
/**
* t3_sge_prep - one-time SGE initialization
* @adap: the associated adapter
* @p: SGE parameters
*
* Performs one-time initialization of SGE SW state. Includes determining
* defaults for the assorted SGE parameters, which admins can change until
* they are used to initialize the SGE.
*/
void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
{
int i;
p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
for (i = 0; i < SGE_QSETS; ++i) {
struct qset_params *q = p->qset + i;
q->polling = adap->params.rev > 0;
q->coalesce_usecs = 5;
q->rspq_size = 1024;
q->fl_size = 4096;
q->jumbo_size = 512;
q->txq_size[TXQ_ETH] = 1024;
q->txq_size[TXQ_OFLD] = 1024;
q->txq_size[TXQ_CTRL] = 256;
q->cong_thres = 0;
}
spin_lock_init(&adap->sge.reg_lock);
}
/**
* t3_get_desc - dump an SGE descriptor for debugging purposes
* @qs: the queue set
* @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
* @idx: the descriptor index in the queue
* @data: where to dump the descriptor contents
*
* Dumps the contents of a HW descriptor of an SGE queue. Returns the
* size of the descriptor.
*/
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data)
{
if (qnum >= 6)
return -EINVAL;
if (qnum < 3) {
if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
return -EINVAL;
memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
return sizeof(struct tx_desc);
}
if (qnum == 3) {
if (!qs->rspq.desc || idx >= qs->rspq.size)
return -EINVAL;
memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
return sizeof(struct rsp_desc);
}
qnum -= 4;
if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
return -EINVAL;
memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
return sizeof(struct rx_desc);
}
/*
* This file is automatically generated --- any changes will be lost.
*/
#ifndef _SGE_DEFS_H
#define _SGE_DEFS_H
#define S_EC_CREDITS 0
#define M_EC_CREDITS 0x7FFF
#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
#define S_EC_GTS 15
#define V_EC_GTS(x) ((x) << S_EC_GTS)
#define F_EC_GTS V_EC_GTS(1U)
#define S_EC_INDEX 16
#define M_EC_INDEX 0xFFFF
#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
#define S_EC_SIZE 0
#define M_EC_SIZE 0xFFFF
#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
#define S_EC_BASE_LO 16
#define M_EC_BASE_LO 0xFFFF
#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
#define S_EC_BASE_HI 0
#define M_EC_BASE_HI 0xF
#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
#define S_EC_RESPQ 4
#define M_EC_RESPQ 0x7
#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
#define S_EC_TYPE 7
#define M_EC_TYPE 0x7
#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
#define S_EC_GEN 10
#define V_EC_GEN(x) ((x) << S_EC_GEN)
#define F_EC_GEN V_EC_GEN(1U)
#define S_EC_UP_TOKEN 11
#define M_EC_UP_TOKEN 0xFFFFF
#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
#define S_EC_VALID 31
#define V_EC_VALID(x) ((x) << S_EC_VALID)
#define F_EC_VALID V_EC_VALID(1U)
#define S_RQ_MSI_VEC 20
#define M_RQ_MSI_VEC 0x3F
#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
#define S_RQ_INTR_EN 26
#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
#define S_RQ_GEN 28
#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
#define F_RQ_GEN V_RQ_GEN(1U)
#define S_CQ_INDEX 0
#define M_CQ_INDEX 0xFFFF
#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
#define S_CQ_SIZE 16
#define M_CQ_SIZE 0xFFFF
#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
#define S_CQ_BASE_HI 0
#define M_CQ_BASE_HI 0xFFFFF
#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
#define S_CQ_RSPQ 20
#define M_CQ_RSPQ 0x3F
#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
#define S_CQ_ASYNC_NOTIF 26
#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
#define S_CQ_ARMED 27
#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
#define F_CQ_ARMED V_CQ_ARMED(1U)
#define S_CQ_ASYNC_NOTIF_SOL 28
#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
#define S_CQ_GEN 29
#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
#define F_CQ_GEN V_CQ_GEN(1U)
#define S_CQ_OVERFLOW_MODE 31
#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
#define S_CQ_CREDITS 0
#define M_CQ_CREDITS 0xFFFF
#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
#define S_CQ_CREDIT_THRES 16
#define M_CQ_CREDIT_THRES 0x1FFF
#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
#define S_FL_BASE_HI 0
#define M_FL_BASE_HI 0xFFFFF
#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
#define S_FL_INDEX_LO 20
#define M_FL_INDEX_LO 0xFFF
#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
#define S_FL_INDEX_HI 0
#define M_FL_INDEX_HI 0xF
#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
#define S_FL_SIZE 4
#define M_FL_SIZE 0xFFFF
#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
#define S_FL_GEN 20
#define V_FL_GEN(x) ((x) << S_FL_GEN)
#define F_FL_GEN V_FL_GEN(1U)
#define S_FL_ENTRY_SIZE_LO 21
#define M_FL_ENTRY_SIZE_LO 0x7FF
#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
#define S_FL_ENTRY_SIZE_HI 0
#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
#define S_FL_CONG_THRES 21
#define M_FL_CONG_THRES 0x3FF
#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
#define S_FL_GTS 31
#define V_FL_GTS(x) ((x) << S_FL_GTS)
#define F_FL_GTS V_FL_GTS(1U)
#define S_FLD_GEN1 31
#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
#define F_FLD_GEN1 V_FLD_GEN1(1U)
#define S_FLD_GEN2 0
#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
#define F_FLD_GEN2 V_FLD_GEN2(1U)
#define S_RSPD_TXQ1_CR 0
#define M_RSPD_TXQ1_CR 0x7F
#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
#define S_RSPD_TXQ1_GTS 7
#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
#define S_RSPD_TXQ2_CR 8
#define M_RSPD_TXQ2_CR 0x7F
#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
#define S_RSPD_TXQ2_GTS 15
#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
#define S_RSPD_TXQ0_CR 16
#define M_RSPD_TXQ0_CR 0x7F
#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
#define S_RSPD_TXQ0_GTS 23
#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
#define S_RSPD_EOP 24
#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
#define F_RSPD_EOP V_RSPD_EOP(1U)
#define S_RSPD_SOP 25
#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
#define F_RSPD_SOP V_RSPD_SOP(1U)
#define S_RSPD_ASYNC_NOTIF 26
#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
#define S_RSPD_FL0_GTS 27
#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
#define S_RSPD_FL1_GTS 28
#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
#define S_RSPD_IMM_DATA_VALID 29
#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
#define S_RSPD_OFFLOAD 30
#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
#define S_RSPD_GEN1 31
#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
#define S_RSPD_LEN 0
#define M_RSPD_LEN 0x7FFFFFFF
#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
#define S_RSPD_FLQ 31
#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
#define F_RSPD_FLQ V_RSPD_FLQ(1U)
#define S_RSPD_GEN2 0
#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
#define S_RSPD_INR_VEC 1
#define M_RSPD_INR_VEC 0x7F
#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
#endif /* _SGE_DEFS_H */
/*
* Definitions of the CPL 5 commands and status codes.
*
* Copyright (C) 2004-2006 Chelsio Communications. All rights reserved.
*
* Written by Dimitris Michailidis (dm@chelsio.com)
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#ifndef T3_CPL_H
#define T3_CPL_H
#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
# include <asm/byteorder.h>
#endif
enum CPL_opcode {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
CPL_SET_TCB = 0x4,
CPL_SET_TCB_FIELD = 0x5,
CPL_GET_TCB = 0x6,
CPL_PCMD = 0x7,
CPL_CLOSE_CON_REQ = 0x8,
CPL_CLOSE_LISTSRV_REQ = 0x9,
CPL_ABORT_REQ = 0xA,
CPL_ABORT_RPL = 0xB,
CPL_TX_DATA = 0xC,
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_RTE_DELETE_REQ = 0xF,
CPL_RTE_WRITE_REQ = 0x10,
CPL_RTE_READ_REQ = 0x11,
CPL_L2T_WRITE_REQ = 0x12,
CPL_L2T_READ_REQ = 0x13,
CPL_SMT_WRITE_REQ = 0x14,
CPL_SMT_READ_REQ = 0x15,
CPL_TX_PKT_LSO = 0x16,
CPL_PCMD_READ = 0x17,
CPL_BARRIER = 0x18,
CPL_TID_RELEASE = 0x1A,
CPL_CLOSE_LISTSRV_RPL = 0x20,
CPL_ERROR = 0x21,
CPL_GET_TCB_RPL = 0x22,
CPL_L2T_WRITE_RPL = 0x23,
CPL_PCMD_READ_RPL = 0x24,
CPL_PCMD_RPL = 0x25,
CPL_PEER_CLOSE = 0x26,
CPL_RTE_DELETE_RPL = 0x27,
CPL_RTE_WRITE_RPL = 0x28,
CPL_RX_DDP_COMPLETE = 0x29,
CPL_RX_PHYS_ADDR = 0x2A,
CPL_RX_PKT = 0x2B,
CPL_RX_URG_NOTIFY = 0x2C,
CPL_SET_TCB_RPL = 0x2D,
CPL_SMT_WRITE_RPL = 0x2E,
CPL_TX_DATA_ACK = 0x2F,
CPL_ABORT_REQ_RSS = 0x30,
CPL_ABORT_RPL_RSS = 0x31,
CPL_CLOSE_CON_RPL = 0x32,
CPL_ISCSI_HDR = 0x33,
CPL_L2T_READ_RPL = 0x34,
CPL_RDMA_CQE = 0x35,
CPL_RDMA_CQE_READ_RSP = 0x36,
CPL_RDMA_CQE_ERR = 0x37,
CPL_RTE_READ_RPL = 0x38,
CPL_RX_DATA = 0x39,
CPL_ACT_OPEN_RPL = 0x40,
CPL_PASS_OPEN_RPL = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_SMT_READ_RPL = 0x43,
CPL_ACT_ESTABLISH = 0x50,
CPL_PASS_ESTABLISH = 0x51,
CPL_PASS_ACCEPT_REQ = 0x70,
CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
CPL_TX_DMA_ACK = 0xA0,
CPL_RDMA_READ_REQ = 0xA1,
CPL_RDMA_TERMINATE = 0xA2,
CPL_TRACE_PKT = 0xA3,
CPL_RDMA_EC_STATUS = 0xA5,
NUM_CPL_CMDS /* must be last and previous entries must be sorted */
};
enum CPL_error {
CPL_ERR_NONE = 0,
CPL_ERR_TCAM_PARITY = 1,
CPL_ERR_TCAM_FULL = 3,
CPL_ERR_CONN_RESET = 20,
CPL_ERR_CONN_EXIST = 22,
CPL_ERR_ARP_MISS = 23,
CPL_ERR_BAD_SYN = 24,
CPL_ERR_CONN_TIMEDOUT = 30,
CPL_ERR_XMIT_TIMEDOUT = 31,
CPL_ERR_PERSIST_TIMEDOUT = 32,
CPL_ERR_FINWAIT2_TIMEDOUT = 33,
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_RTX_NEG_ADVICE = 35,
CPL_ERR_PERSIST_NEG_ADVICE = 36,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_GENERAL = 99
};
enum {
CPL_CONN_POLICY_AUTO = 0,
CPL_CONN_POLICY_ASK = 1,
CPL_CONN_POLICY_DENY = 3
};
enum {
ULP_MODE_NONE = 0,
ULP_MODE_ISCSI = 2,
ULP_MODE_RDMA = 4,
ULP_MODE_TCPDDP = 5
};
enum {
ULP_CRC_HEADER = 1 << 0,
ULP_CRC_DATA = 1 << 1
};
enum {
CPL_PASS_OPEN_ACCEPT,
CPL_PASS_OPEN_REJECT
};
enum {
CPL_ABORT_SEND_RST = 0,
CPL_ABORT_NO_RST,
CPL_ABORT_POST_CLOSE_REQ = 2
};
enum { /* TX_PKT_LSO ethernet types */
CPL_ETH_II,
CPL_ETH_II_VLAN,
CPL_ETH_802_3,
CPL_ETH_802_3_VLAN
};
enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
CONG_ALG_TAHOE,
CONG_ALG_NEWRENO,
CONG_ALG_HIGHSPEED
};
union opcode_tid {
__be32 opcode_tid;
__u8 opcode;
};
#define S_OPCODE 24
#define V_OPCODE(x) ((x) << S_OPCODE)
#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
#define G_TID(x) ((x) & 0xFFFFFF)
/* tid is assumed to be 24-bits */
#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
/* extract the TID from a CPL command */
#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
struct tcp_options {
__be16 mss;
__u8 wsf;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8:5;
__u8 ecn:1;
__u8 sack:1;
__u8 tstamp:1;
#else
__u8 tstamp:1;
__u8 sack:1;
__u8 ecn:1;
__u8:5;
#endif
};
struct rss_header {
__u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 cpu_idx:6;
__u8 hash_type:2;
#else
__u8 hash_type:2;
__u8 cpu_idx:6;
#endif
__be16 cq_idx;
__be32 rss_hash_val;
};
#ifndef CHELSIO_FW
struct work_request_hdr {
__be32 wr_hi;
__be32 wr_lo;
};
/* wr_hi fields */
#define S_WR_SGE_CREDITS 0
#define M_WR_SGE_CREDITS 0xFF
#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
#define S_WR_SGLSFLT 8
#define M_WR_SGLSFLT 0xFF
#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
#define S_WR_BCNTLFLT 16
#define M_WR_BCNTLFLT 0xF
#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
#define S_WR_DATATYPE 20
#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
#define F_WR_DATATYPE V_WR_DATATYPE(1U)
#define S_WR_COMPL 21
#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
#define F_WR_COMPL V_WR_COMPL(1U)
#define S_WR_EOP 22
#define V_WR_EOP(x) ((x) << S_WR_EOP)
#define F_WR_EOP V_WR_EOP(1U)
#define S_WR_SOP 23
#define V_WR_SOP(x) ((x) << S_WR_SOP)
#define F_WR_SOP V_WR_SOP(1U)
#define S_WR_OP 24
#define M_WR_OP 0xFF
#define V_WR_OP(x) ((x) << S_WR_OP)
#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
/* wr_lo fields */
#define S_WR_LEN 0
#define M_WR_LEN 0xFF
#define V_WR_LEN(x) ((x) << S_WR_LEN)
#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
#define S_WR_TID 8
#define M_WR_TID 0xFFFFF
#define V_WR_TID(x) ((x) << S_WR_TID)
#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
#define S_WR_CR_FLUSH 30
#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
#define S_WR_GEN 31
#define V_WR_GEN(x) ((x) << S_WR_GEN)
#define F_WR_GEN V_WR_GEN(1U)
# define WR_HDR struct work_request_hdr wr
# define RSS_HDR
#else
# define WR_HDR
# define RSS_HDR struct rss_header rss_hdr;
#endif
/* option 0 lower-half fields */
#define S_CPL_STATUS 0
#define M_CPL_STATUS 0xFF
#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
#define S_INJECT_TIMER 6
#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
#define F_INJECT_TIMER V_INJECT_TIMER(1U)
#define S_NO_OFFLOAD 7
#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
#define S_ULP_MODE 8
#define M_ULP_MODE 0xF
#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
#define S_RCV_BUFSIZ 12
#define M_RCV_BUFSIZ 0x3FFF
#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
#define S_TOS 26
#define M_TOS 0x3F
#define V_TOS(x) ((x) << S_TOS)
#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
/* option 0 upper-half fields */
#define S_DELACK 0
#define V_DELACK(x) ((x) << S_DELACK)
#define F_DELACK V_DELACK(1U)
#define S_NO_CONG 1
#define V_NO_CONG(x) ((x) << S_NO_CONG)
#define F_NO_CONG V_NO_CONG(1U)
#define S_SRC_MAC_SEL 2
#define M_SRC_MAC_SEL 0x3
#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
#define S_L2T_IDX 4
#define M_L2T_IDX 0x7FF
#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
#define S_TX_CHANNEL 15
#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
#define F_TX_CHANNEL V_TX_CHANNEL(1U)
#define S_TCAM_BYPASS 16
#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
#define S_NAGLE 17
#define V_NAGLE(x) ((x) << S_NAGLE)
#define F_NAGLE V_NAGLE(1U)
#define S_WND_SCALE 18
#define M_WND_SCALE 0xF
#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
#define S_KEEP_ALIVE 22
#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
#define S_MAX_RETRANS 23
#define M_MAX_RETRANS 0xF
#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
#define S_MAX_RETRANS_OVERRIDE 27
#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
#define S_MSS_IDX 28
#define M_MSS_IDX 0xF
#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
/* option 1 fields */
#define S_RSS_ENABLE 0
#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
#define F_RSS_ENABLE V_RSS_ENABLE(1U)
#define S_RSS_MASK_LEN 1
#define M_RSS_MASK_LEN 0x7
#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
#define S_CPU_IDX 4
#define M_CPU_IDX 0x3F
#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
#define S_MAC_MATCH_VALID 18
#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
#define S_CONN_POLICY 19
#define M_CONN_POLICY 0x3
#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
#define S_SYN_DEFENSE 21
#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
#define S_VLAN_PRI 22
#define M_VLAN_PRI 0x3
#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
#define S_VLAN_PRI_VALID 24
#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
#define S_PKT_TYPE 25
#define M_PKT_TYPE 0x3
#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
#define S_MAC_MATCH 27
#define M_MAC_MATCH 0x1F
#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
/* option 2 fields */
#define S_CPU_INDEX 0
#define M_CPU_INDEX 0x7F
#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
#define S_CPU_INDEX_VALID 7
#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
#define S_RX_COALESCE 8
#define M_RX_COALESCE 0x3
#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
#define S_RX_COALESCE_VALID 10
#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
#define S_CONG_CONTROL_FLAVOR 11
#define M_CONG_CONTROL_FLAVOR 0x3
#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
#define S_PACING_FLAVOR 13
#define M_PACING_FLAVOR 0x3
#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
#define S_FLAVORS_VALID 15
#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
#define S_RX_FC_DISABLE 16
#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
#define S_RX_FC_VALID 17
#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
#define F_RX_FC_VALID V_RX_FC_VALID(1U)
struct cpl_pass_open_req {
WR_HDR;
union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 opt0h;
__be32 opt0l;
__be32 peer_netmask;
__be32 opt1;
};
struct cpl_pass_open_rpl {
RSS_HDR union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__u8 resvd[7];
__u8 status;
};
struct cpl_pass_establish {
RSS_HDR union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 tos_tid;
__be16 l2t_idx;
__be16 tcp_opt;
__be32 snd_isn;
__be32 rcv_isn;
};
/* cpl_pass_establish.tos_tid fields */
#define S_PASS_OPEN_TID 0
#define M_PASS_OPEN_TID 0xFFFFFF
#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
#define S_PASS_OPEN_TOS 24
#define M_PASS_OPEN_TOS 0xFF
#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
/* cpl_pass_establish.l2t_idx fields */
#define S_L2T_IDX16 5
#define M_L2T_IDX16 0x7FF
#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
struct cpl_pass_accept_req {
RSS_HDR union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 tos_tid;
struct tcp_options tcp_options;
__u8 dst_mac[6];
__be16 vlan_tag;
__u8 src_mac[6];
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8:3;
__u8 addr_idx:3;
__u8 port_idx:1;
__u8 exact_match:1;
#else
__u8 exact_match:1;
__u8 port_idx:1;
__u8 addr_idx:3;
__u8:3;
#endif
__u8 rsvd;
__be32 rcv_isn;
__be32 rsvd2;
};
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
__be32 opt2;
__be32 rsvd;
__be32 peer_ip;
__be32 opt0h;
__be32 opt0l_status;
};
struct cpl_act_open_req {
WR_HDR;
union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 opt0h;
__be32 opt0l;
__be32 params;
__be32 opt2;
};
/* cpl_act_open_req.params fields */
#define S_AOPEN_VLAN_PRI 9
#define M_AOPEN_VLAN_PRI 0x3
#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
#define S_AOPEN_VLAN_PRI_VALID 11
#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
#define S_AOPEN_PKT_TYPE 12
#define M_AOPEN_PKT_TYPE 0x3
#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
#define S_AOPEN_MAC_MATCH 14
#define M_AOPEN_MAC_MATCH 0x1F
#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
#define S_AOPEN_MAC_MATCH_VALID 19
#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
#define S_AOPEN_IFF_VLAN 20
#define M_AOPEN_IFF_VLAN 0xFFF
#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
struct cpl_act_open_rpl {
RSS_HDR union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 atid;
__u8 rsvd[3];
__u8 status;
};
struct cpl_act_establish {
RSS_HDR union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 tos_tid;
__be16 l2t_idx;
__be16 tcp_opt;
__be32 snd_isn;
__be32 rcv_isn;
};
struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 cpuno;
__be16 rsvd;
};
struct cpl_get_tcb_rpl {
RSS_HDR union opcode_tid ot;
__u8 rsvd;
__u8 status;
__be16 len;
};
struct cpl_set_tcb {
WR_HDR;
union opcode_tid ot;
__u8 reply;
__u8 cpu_idx;
__be16 len;
};
/* cpl_set_tcb.reply fields */
#define S_NO_REPLY 7
#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
#define F_NO_REPLY V_NO_REPLY(1U)
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
__u8 reply;
__u8 cpu_idx;
__be16 word;
__be64 mask;
__be64 val;
};
struct cpl_set_tcb_rpl {
RSS_HDR union opcode_tid ot;
__u8 rsvd[3];
__u8 status;
};
struct cpl_pcmd {
WR_HDR;
union opcode_tid ot;
__u8 rsvd[3];
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 src:1;
__u8 bundle:1;
__u8 channel:1;
__u8:5;
#else
__u8:5;
__u8 channel:1;
__u8 bundle:1;
__u8 src:1;
#endif
__be32 pcmd_parm[2];
};
struct cpl_pcmd_reply {
RSS_HDR union opcode_tid ot;
__u8 status;
__u8 rsvd;
__be16 len;
};
struct cpl_close_con_req {
WR_HDR;
union opcode_tid ot;
__be32 rsvd;
};
struct cpl_close_con_rpl {
RSS_HDR union opcode_tid ot;
__u8 rsvd[3];
__u8 status;
__be32 snd_nxt;
__be32 rcv_nxt;
};
struct cpl_close_listserv_req {
WR_HDR;
union opcode_tid ot;
__u8 rsvd0;
__u8 cpu_idx;
__be16 rsvd1;
};
struct cpl_close_listserv_rpl {
RSS_HDR union opcode_tid ot;
__u8 rsvd[3];
__u8 status;
};
struct cpl_abort_req_rss {
RSS_HDR union opcode_tid ot;
__be32 rsvd0;
__u8 rsvd1;
__u8 status;
__u8 rsvd2[6];
};
struct cpl_abort_req {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
__u8 rsvd1;
__u8 cmd;
__u8 rsvd2[6];
};
struct cpl_abort_rpl_rss {
RSS_HDR union opcode_tid ot;
__be32 rsvd0;
__u8 rsvd1;
__u8 status;
__u8 rsvd2[6];
};
struct cpl_abort_rpl {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
__u8 rsvd1;
__u8 cmd;
__u8 rsvd2[6];
};
struct cpl_peer_close {
RSS_HDR union opcode_tid ot;
__be32 rcv_nxt;
};
struct tx_data_wr {
__be32 wr_hi;
__be32 wr_lo;
__be32 len;
__be32 flags;
__be32 sndseq;
__be32 param;
};
/* tx_data_wr.param fields */
#define S_TX_PORT 0
#define M_TX_PORT 0x7
#define V_TX_PORT(x) ((x) << S_TX_PORT)
#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
#define S_TX_MSS 4
#define M_TX_MSS 0xF
#define V_TX_MSS(x) ((x) << S_TX_MSS)
#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
#define S_TX_QOS 8
#define M_TX_QOS 0xFF
#define V_TX_QOS(x) ((x) << S_TX_QOS)
#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
#define S_TX_SNDBUF 16
#define M_TX_SNDBUF 0xFFFF
#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
struct cpl_tx_data {
union opcode_tid ot;
__be32 len;
__be32 rsvd;
__be16 urg;
__be16 flags;
};
/* cpl_tx_data.flags fields */
#define S_TX_ULP_SUBMODE 6
#define M_TX_ULP_SUBMODE 0xF
#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
#define S_TX_ULP_MODE 10
#define M_TX_ULP_MODE 0xF
#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
#define S_TX_SHOVE 14
#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
#define F_TX_SHOVE V_TX_SHOVE(1U)
#define S_TX_MORE 15
#define V_TX_MORE(x) ((x) << S_TX_MORE)
#define F_TX_MORE V_TX_MORE(1U)
/* additional tx_data_wr.flags fields */
#define S_TX_CPU_IDX 0
#define M_TX_CPU_IDX 0x3F
#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
#define S_TX_URG 16
#define V_TX_URG(x) ((x) << S_TX_URG)
#define F_TX_URG V_TX_URG(1U)
#define S_TX_CLOSE 17
#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
#define F_TX_CLOSE V_TX_CLOSE(1U)
#define S_TX_INIT 18
#define V_TX_INIT(x) ((x) << S_TX_INIT)
#define F_TX_INIT V_TX_INIT(1U)
#define S_TX_IMM_ACK 19
#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
#define S_TX_IMM_DMA 20
#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
struct cpl_tx_data_ack {
RSS_HDR union opcode_tid ot;
__be32 ack_seq;
};
struct cpl_wr_ack {
RSS_HDR union opcode_tid ot;
__be16 credits;
__be16 rsvd;
__be32 snd_nxt;
__be32 snd_una;
};
struct cpl_rdma_ec_status {
RSS_HDR union opcode_tid ot;
__u8 rsvd[3];
__u8 status;
};
struct mngt_pktsched_wr {
__be32 wr_hi;
__be32 wr_lo;
__u8 mngt_opcode;
__u8 rsvd[7];
__u8 sched;
__u8 idx;
__u8 min;
__u8 max;
__u8 binding;
__u8 rsvd1[3];
};
struct cpl_iscsi_hdr {
RSS_HDR union opcode_tid ot;
__be16 pdu_len_ddp;
__be16 len;
__be32 seq;
__be16 urg;
__u8 rsvd;
__u8 status;
};
/* cpl_iscsi_hdr.pdu_len_ddp fields */
#define S_ISCSI_PDU_LEN 0
#define M_ISCSI_PDU_LEN 0x7FFF
#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
#define S_ISCSI_DDP 15
#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
#define F_ISCSI_DDP V_ISCSI_DDP(1U)
struct cpl_rx_data {
RSS_HDR union opcode_tid ot;
__be16 rsvd;
__be16 len;
__be32 seq;
__be16 urg;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 dack_mode:2;
__u8 psh:1;
__u8 heartbeat:1;
__u8:4;
#else
__u8:4;
__u8 heartbeat:1;
__u8 psh:1;
__u8 dack_mode:2;
#endif
__u8 status;
};
struct cpl_rx_data_ack {
WR_HDR;
union opcode_tid ot;
__be32 credit_dack;
};
/* cpl_rx_data_ack.ack_seq fields */
#define S_RX_CREDITS 0
#define M_RX_CREDITS 0x7FFFFFF
#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
#define S_RX_MODULATE 27
#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
#define F_RX_MODULATE V_RX_MODULATE(1U)
#define S_RX_FORCE_ACK 28
#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
#define S_RX_DACK_MODE 29
#define M_RX_DACK_MODE 0x3
#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
#define S_RX_DACK_CHANGE 31
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
struct cpl_rx_urg_notify {
RSS_HDR union opcode_tid ot;
__be32 seq;
};
struct cpl_rx_ddp_complete {
RSS_HDR union opcode_tid ot;
__be32 ddp_report;
};
struct cpl_rx_data_ddp {
RSS_HDR union opcode_tid ot;
__be16 urg;
__be16 len;
__be32 seq;
union {
__be32 nxt_seq;
__be32 ddp_report;
};
__be32 ulp_crc;
__be32 ddpvld_status;
};
/* cpl_rx_data_ddp.ddpvld_status fields */
#define S_DDP_STATUS 0
#define M_DDP_STATUS 0xFF
#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
#define S_DDP_VALID 15
#define M_DDP_VALID 0x1FFFF
#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
#define S_DDP_PPOD_MISMATCH 15
#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
#define S_DDP_PDU 16
#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
#define F_DDP_PDU V_DDP_PDU(1U)
#define S_DDP_LLIMIT_ERR 17
#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
#define S_DDP_PPOD_PARITY_ERR 18
#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
#define S_DDP_PADDING_ERR 19
#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
#define S_DDP_HDRCRC_ERR 20
#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
#define S_DDP_DATACRC_ERR 21
#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
#define S_DDP_INVALID_TAG 22
#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
#define S_DDP_ULIMIT_ERR 23
#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
#define S_DDP_OFFSET_ERR 24
#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
#define S_DDP_COLOR_ERR 25
#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
#define S_DDP_TID_MISMATCH 26
#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
#define S_DDP_INVALID_PPOD 27
#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
#define S_DDP_ULP_MODE 28
#define M_DDP_ULP_MODE 0xF
#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
/* cpl_rx_data_ddp.ddp_report fields */
#define S_DDP_OFFSET 0
#define M_DDP_OFFSET 0x3FFFFF
#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
#define S_DDP_URG 24
#define V_DDP_URG(x) ((x) << S_DDP_URG)
#define F_DDP_URG V_DDP_URG(1U)
#define S_DDP_PSH 25
#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
#define F_DDP_PSH V_DDP_PSH(1U)
#define S_DDP_BUF_COMPLETE 26
#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
#define S_DDP_BUF_TIMED_OUT 27
#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
#define S_DDP_BUF_IDX 28
#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
struct cpl_tx_pkt {
WR_HDR;
__be32 cntrl;
__be32 len;
};
struct cpl_tx_pkt_lso {
WR_HDR;
__be32 cntrl;
__be32 len;
__be32 rsvd;
__be32 lso_info;
};
/* cpl_tx_pkt*.cntrl fields */
#define S_TXPKT_VLAN 0
#define M_TXPKT_VLAN 0xFFFF
#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
#define S_TXPKT_INTF 16
#define M_TXPKT_INTF 0xF
#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
#define S_TXPKT_IPCSUM_DIS 20
#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
#define S_TXPKT_L4CSUM_DIS 21
#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
#define S_TXPKT_VLAN_VLD 22
#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
#define S_TXPKT_LOOPBACK 23
#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
#define S_TXPKT_OPCODE 24
#define M_TXPKT_OPCODE 0xFF
#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
/* cpl_tx_pkt_lso.lso_info fields */
#define S_LSO_MSS 0
#define M_LSO_MSS 0x3FFF
#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
#define S_LSO_ETH_TYPE 14
#define M_LSO_ETH_TYPE 0x3
#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
#define S_LSO_TCPHDR_WORDS 16
#define M_LSO_TCPHDR_WORDS 0xF
#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
#define S_LSO_IPHDR_WORDS 20
#define M_LSO_IPHDR_WORDS 0xF
#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
#define S_LSO_IPV6 24
#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
#define F_LSO_IPV6 V_LSO_IPV6(1U)
struct cpl_trace_pkt {
#ifdef CHELSIO_FW
__u8 rss_opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 err:1;
__u8:7;
#else
__u8:7;
__u8 err:1;
#endif
__u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 qid:4;
__u8:4;
#else
__u8:4;
__u8 qid:4;
#endif
__be32 tstamp;
#endif /* CHELSIO_FW */
__u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 iff:4;
__u8:4;
#else
__u8:4;
__u8 iff:4;
#endif
__u8 rsvd[4];
__be16 len;
};
struct cpl_rx_pkt {
RSS_HDR __u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 iff:4;
__u8 csum_valid:1;
__u8 ipmi_pkt:1;
__u8 vlan_valid:1;
__u8 fragment:1;
#else
__u8 fragment:1;
__u8 vlan_valid:1;
__u8 ipmi_pkt:1;
__u8 csum_valid:1;
__u8 iff:4;
#endif
__be16 csum;
__be16 vlan;
__be16 len;
};
struct cpl_l2t_write_req {
WR_HDR;
union opcode_tid ot;
__be32 params;
__u8 rsvd[2];
__u8 dst_mac[6];
};
/* cpl_l2t_write_req.params fields */
#define S_L2T_W_IDX 0
#define M_L2T_W_IDX 0x7FF
#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
#define S_L2T_W_VLAN 11
#define M_L2T_W_VLAN 0xFFF
#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
#define S_L2T_W_IFF 23
#define M_L2T_W_IFF 0xF
#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
#define S_L2T_W_PRIO 27
#define M_L2T_W_PRIO 0x7
#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
struct cpl_l2t_write_rpl {
RSS_HDR union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
struct cpl_l2t_read_req {
WR_HDR;
union opcode_tid ot;
__be16 rsvd;
__be16 l2t_idx;
};
struct cpl_l2t_read_rpl {
RSS_HDR union opcode_tid ot;
__be32 params;
__u8 rsvd[2];
__u8 dst_mac[6];
};
/* cpl_l2t_read_rpl.params fields */
#define S_L2T_R_PRIO 0
#define M_L2T_R_PRIO 0x7
#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
#define S_L2T_R_VLAN 8
#define M_L2T_R_VLAN 0xFFF
#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
#define S_L2T_R_IFF 20
#define M_L2T_R_IFF 0xF
#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
#define S_L2T_STATUS 24
#define M_L2T_STATUS 0xFF
#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
struct cpl_smt_write_req {
WR_HDR;
union opcode_tid ot;
__u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 mtu_idx:4;
__u8 iff:4;
#else
__u8 iff:4;
__u8 mtu_idx:4;
#endif
__be16 rsvd2;
__be16 rsvd3;
__u8 src_mac1[6];
__be16 rsvd4;
__u8 src_mac0[6];
};
struct cpl_smt_write_rpl {
RSS_HDR union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
struct cpl_smt_read_req {
WR_HDR;
union opcode_tid ot;
__u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8:4;
__u8 iff:4;
#else
__u8 iff:4;
__u8:4;
#endif
__be16 rsvd2;
};
struct cpl_smt_read_rpl {
RSS_HDR union opcode_tid ot;
__u8 status;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 mtu_idx:4;
__u8:4;
#else
__u8:4;
__u8 mtu_idx:4;
#endif
__be16 rsvd2;
__be16 rsvd3;
__u8 src_mac1[6];
__be16 rsvd4;
__u8 src_mac0[6];
};
struct cpl_rte_delete_req {
WR_HDR;
union opcode_tid ot;
__be32 params;
};
/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
#define S_RTE_REQ_LUT_IX 8
#define M_RTE_REQ_LUT_IX 0x7FF
#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
#define S_RTE_REQ_LUT_BASE 19
#define M_RTE_REQ_LUT_BASE 0x7FF
#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
#define S_RTE_READ_REQ_SELECT 31
#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
struct cpl_rte_delete_rpl {
RSS_HDR union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
struct cpl_rte_write_req {
WR_HDR;
union opcode_tid ot;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8:6;
__u8 write_tcam:1;
__u8 write_l2t_lut:1;
#else
__u8 write_l2t_lut:1;
__u8 write_tcam:1;
__u8:6;
#endif
__u8 rsvd[3];
__be32 lut_params;
__be16 rsvd2;
__be16 l2t_idx;
__be32 netmask;
__be32 faddr;
};
/* cpl_rte_write_req.lut_params fields */
#define S_RTE_WRITE_REQ_LUT_IX 10
#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
#define S_RTE_WRITE_REQ_LUT_BASE 21
#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
struct cpl_rte_write_rpl {
RSS_HDR union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
struct cpl_rte_read_req {
WR_HDR;
union opcode_tid ot;
__be32 params;
};
struct cpl_rte_read_rpl {
RSS_HDR union opcode_tid ot;
__u8 status;
__u8 rsvd0;
__be16 l2t_idx;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8:7;
__u8 select:1;
#else
__u8 select:1;
__u8:7;
#endif
__u8 rsvd2[3];
__be32 addr;
};
struct cpl_tid_release {
WR_HDR;
union opcode_tid ot;
__be32 rsvd;
};
struct cpl_barrier {
WR_HDR;
__u8 opcode;
__u8 rsvd[7];
};
struct cpl_rdma_read_req {
__u8 opcode;
__u8 rsvd[15];
};
struct cpl_rdma_terminate {
#ifdef CHELSIO_FW
__u8 opcode;
__u8 rsvd[2];
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 rspq:3;
__u8:5;
#else
__u8:5;
__u8 rspq:3;
#endif
__be32 tid_len;
#endif
__be32 msn;
__be32 mo;
__u8 data[0];
};
/* cpl_rdma_terminate.tid_len fields */
#define S_FLIT_CNT 0
#define M_FLIT_CNT 0xFF
#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
#define S_TERM_TID 8
#define M_TERM_TID 0xFFFFF
#define V_TERM_TID(x) ((x) << S_TERM_TID)
#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
#endif /* T3_CPL_H */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _T3CDEV_H_
#define _T3CDEV_H_
#include <linux/list.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <net/neighbour.h>
#define T3CNAMSIZ 16
/* Get the t3cdev associated with a net_device */
#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
struct cxgb3_client;
enum t3ctype {
T3A = 0,
T3B
};
struct t3cdev {
char name[T3CNAMSIZ]; /* T3C device name */
enum t3ctype type;
struct list_head ofld_dev_list; /* for list linking */
struct net_device *lldev; /* LL dev associated with T3C messages */
struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
int (*send)(struct t3cdev *dev, struct sk_buff *skb);
int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
void *priv; /* driver private data */
void *l2opt; /* optional layer 2 data */
void *l3opt; /* optional layer 3 data */
void *l4opt; /* optional layer 4 data */
void *ulp; /* ulp stuff */
};
#endif /* _T3CDEV_H_ */
/*****************************************************************************
* *
* File: *
* version.h *
* *
* Description: *
* Chelsio driver version defines. *
* *
* Copyright (c) 2003 - 2006 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* http://www.chelsio.com *
* *
****************************************************************************/
/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
#ifndef __CHELSIO_VERSION_H
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
#define DRV_VERSION "1.0"
#endif /* __CHELSIO_VERSION_H */
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
/* VSC8211 PHY specific registers. */
enum {
VSC8211_INTR_ENABLE = 25,
VSC8211_INTR_STATUS = 26,
VSC8211_AUX_CTRL_STAT = 28,
};
enum {
VSC_INTR_RX_ERR = 1 << 0,
VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
VSC_INTR_CABLE = 1 << 2, /* cable impairment */
VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
VSC_INTR_LINK_CHG = 1 << 13, /* link change */
VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
};
#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
VSC_INTR_NEG_DONE)
#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
VSC_INTR_ENABLE)
/* PHY specific auxiliary control & status register fields */
#define S_ACSR_ACTIPHY_TMR 0
#define M_ACSR_ACTIPHY_TMR 0x3
#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
#define S_ACSR_SPEED 3
#define M_ACSR_SPEED 0x3
#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
#define S_ACSR_DUPLEX 5
#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
#define S_ACSR_ACTIPHY 6
#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
/*
* Reset the PHY. This PHY completes reset immediately so we never wait.
*/
static int vsc8211_reset(struct cphy *cphy, int wait)
{
return t3_phy_reset(cphy, 0, 0);
}
static int vsc8211_intr_enable(struct cphy *cphy)
{
return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
}
static int vsc8211_intr_disable(struct cphy *cphy)
{
return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
}
static int vsc8211_intr_clear(struct cphy *cphy)
{
u32 val;
/* Clear PHY interrupts by reading the register. */
return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
}
static int vsc8211_autoneg_enable(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANENABLE | BMCR_ANRESTART);
}
static int vsc8211_autoneg_restart(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANRESTART);
}
static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
if (!err)
err = mdio_read(cphy, 0, MII_BMSR, &status);
if (err)
return err;
if (link_ok) {
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!(status & BMSR_LSTATUS))
err = mdio_read(cphy, 0, MII_BMSR, &status);
if (err)
return err;
*link_ok = (status & BMSR_LSTATUS) != 0;
}
if (!(bmcr & BMCR_ANENABLE)) {
dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
if (bmcr & BMCR_SPEED1000)
sp = SPEED_1000;
else if (bmcr & BMCR_SPEED100)
sp = SPEED_100;
else
sp = SPEED_10;
} else if (status & BMSR_ANEGCOMPLETE) {
err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
if (err)
return err;
dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
sp = G_ACSR_SPEED(status);
if (sp == 0)
sp = SPEED_10;
else if (sp == 1)
sp = SPEED_100;
else
sp = SPEED_1000;
if (fc && dplx == DUPLEX_FULL) {
err = mdio_read(cphy, 0, MII_LPA, &lpa);
if (!err)
err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
if (err)
return err;
if (lpa & adv & ADVERTISE_PAUSE_CAP)
pause = PAUSE_RX | PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_CAP) &&
(lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_ASYM))
pause = PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_CAP))
pause = PAUSE_RX;
}
}
if (speed)
*speed = sp;
if (duplex)
*duplex = dplx;
if (fc)
*fc = pause;
return 0;
}
static int vsc8211_power_down(struct cphy *cphy, int enable)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
enable ? BMCR_PDOWN : 0);
}
static int vsc8211_intr_handler(struct cphy *cphy)
{
unsigned int cause;
int err, cphy_cause = 0;
err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
if (err)
return err;
cause &= INTR_MASK;
if (cause & CFG_CHG_INTR_MASK)
cphy_cause |= cphy_cause_link_change;
if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
cphy_cause |= cphy_cause_fifo_error;
return cphy_cause;
}
static struct cphy_ops vsc8211_ops = {
.reset = vsc8211_reset,
.intr_enable = vsc8211_intr_enable,
.intr_disable = vsc8211_intr_disable,
.intr_clear = vsc8211_intr_clear,
.intr_handler = vsc8211_intr_handler,
.autoneg_enable = vsc8211_autoneg_enable,
.autoneg_restart = vsc8211_autoneg_restart,
.advertise = t3_phy_advertise,
.set_speed_duplex = t3_set_phy_speed_duplex,
.get_link_status = vsc8211_get_link_status,
.power_down = vsc8211_power_down,
};
void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
}
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
#include "regs.h"
/*
* # of exact address filters. The first one is used for the station address,
* the rest are available for multicast addresses.
*/
#define EXACT_ADDR_FILTERS 8
static inline int macidx(const struct cmac *mac)
{
return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
}
static void xaui_serdes_reset(struct cmac *mac)
{
static const unsigned int clear[] = {
F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
};
int i;
struct adapter *adap = mac->adapter;
u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
F_RESETPLL23 | F_RESETPLL01);
t3_read_reg(adap, ctrl);
udelay(15);
for (i = 0; i < ARRAY_SIZE(clear); i++) {
t3_set_reg_field(adap, ctrl, clear[i], 0);
udelay(15);
}
}
void t3b_pcs_reset(struct cmac *mac)
{
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_, 0);
udelay(20);
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
F_PCS_RESET_);
}
int t3_mac_reset(struct cmac *mac)
{
static const struct addr_val_pair mac_reset_avp[] = {
{A_XGM_TX_CTRL, 0},
{A_XGM_RX_CTRL, 0},
{A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
{A_XGM_RX_HASH_LOW, 0},
{A_XGM_RX_HASH_HIGH, 0},
{A_XGM_RX_EXACT_MATCH_LOW_1, 0},
{A_XGM_RX_EXACT_MATCH_LOW_2, 0},
{A_XGM_RX_EXACT_MATCH_LOW_3, 0},
{A_XGM_RX_EXACT_MATCH_LOW_4, 0},
{A_XGM_RX_EXACT_MATCH_LOW_5, 0},
{A_XGM_RX_EXACT_MATCH_LOW_6, 0},
{A_XGM_RX_EXACT_MATCH_LOW_7, 0},
{A_XGM_RX_EXACT_MATCH_LOW_8, 0},
{A_XGM_STAT_CTRL, F_CLRSTATS}
};
u32 val;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
F_RXSTRFRWRD | F_DISERRFRAMES,
uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
if (uses_xaui(adap)) {
if (adap->params.rev == 0) {
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_RXENABLE | F_TXENABLE);
if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
F_CMULOCK, 1, 5, 2)) {
CH_ERR(adap,
"MAC %d XAUI SERDES CMU lock failed\n",
macidx(mac));
return -1;
}
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_SERDESRESET_);
} else
xaui_serdes_reset(mac);
}
if (adap->params.rev > 0)
t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
val = F_MAC_RESET_;
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
}
memset(&mac->stats, 0, sizeof(mac->stats));
return 0;
}
/*
* Set the exact match register 'idx' to recognize the given Ethernet address.
*/
static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
{
u32 addr_lo, addr_hi;
unsigned int oft = mac->offset + idx * 8;
addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
addr_hi = (addr[5] << 8) | addr[4];
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
}
/* Set one of the station's unicast MAC addresses. */
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
{
if (idx >= mac->nucast)
return -EINVAL;
set_addr_filter(mac, idx, addr);
return 0;
}
/*
* Specify the number of exact address filters that should be reserved for
* unicast addresses. Caller should reload the unicast and multicast addresses
* after calling this.
*/
int t3_mac_set_num_ucast(struct cmac *mac, int n)
{
if (n > EXACT_ADDR_FILTERS)
return -EINVAL;
mac->nucast = n;
return 0;
}
/* Calculate the RX hash filter index of an Ethernet address */
static int hash_hw_addr(const u8 * addr)
{
int hash = 0, octet, bit, i = 0, c;
for (octet = 0; octet < 6; ++octet)
for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
hash ^= (c & 1) << i;
if (++i == 6)
i = 0;
}
return hash;
}
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
{
u32 val, hash_lo, hash_hi;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
if (rm->dev->flags & IFF_PROMISC)
val |= F_COPYALLFRAMES;
t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
if (rm->dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
u8 *addr;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
while ((addr = t3_get_next_mcaddr(rm)))
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++, addr);
else {
int hash = hash_hw_addr(addr);
if (hash < 32)
hash_lo |= (1 << hash);
else
hash_hi |= (1 << (hash - 32));
}
}
t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
return 0;
}
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
{
int hwm, lwm;
unsigned int thres, v;
struct adapter *adap = mac->adapter;
/*
* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
* packet size register includes header, but not FCS.
*/
mtu += 14;
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
/*
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
* HWM only if flow-control is enabled.
*/
hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
lwm = hwm - 1024;
v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
v |= V_RXFIFOPAUSELWM(lwm / 8);
if (G_RXFIFOPAUSEHWM(v))
v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
V_RXFIFOPAUSEHWM(hwm / 8);
t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
/* Adjust the TX FIFO threshold based on the MTU */
thres = (adap->params.vpd.cclk * 1000) / 15625;
thres = (thres * mtu) / 1000;
if (is_10G(adap))
thres /= 10;
thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
thres = max(thres, 8U); /* need at least 8 */
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
return 0;
}
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
{
u32 val;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -EINVAL;
if (speed >= 0) {
if (speed == SPEED_10)
val = V_PORTSPEED(0);
else if (speed == SPEED_100)
val = V_PORTSPEED(1);
else if (speed == SPEED_1000)
val = V_PORTSPEED(2);
else if (speed == SPEED_10000)
val = V_PORTSPEED(3);
else
return -EINVAL;
t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
V_PORTSPEED(M_PORTSPEED), val);
}
val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
if (fc & PAUSE_TX)
val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
(fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
return 0;
}
int t3_mac_enable(struct cmac *mac, int which)
{
int idx = macidx(mac);
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
}
if (which & MAC_DIRECTION_RX)
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
return 0;
}
int t3_mac_disable(struct cmac *mac, int which)
{
int idx = macidx(mac);
struct adapter *adap = mac->adapter;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
}
if (which & MAC_DIRECTION_RX)
t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
return 0;
}
/*
* This function is called periodically to accumulate the current values of the
* RMON counters into the port statistics. Since the packet counters are only
* 32 bits they can overflow in ~286 secs at 10G, so the function should be
* called more frequently than that. The byte counters are 45-bit wide, they
* would overflow in ~7.8 hours.
*/
const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
{
#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
#define RMON_UPDATE(mac, name, reg) \
(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
u32 v, lo;
RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
RMON_UPDATE(mac, tx_pause, TX_PAUSE);
/* This counts error frames in general (bad FCS, underrun, etc). */
RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
/* The next stat isn't clear-on-read. */
t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
lo = (u32) mac->stats.rx_cong_drops;
mac->stats.rx_cong_drops += (u64) (v - lo);
return &mac->stats;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment