Commit 078086f3 authored by Rasesh Mody's avatar Rasesh Mody Committed by David S. Miller

bna: ENET and Tx Rx Redesign Enablement

Change details:
This patch contains additional structure and function definition changes
that are required to enable the new msgq/enet/txrx redesign introduced
by the previous 4 patches.
 - structure and function definition changes to header files as a result
   of Ethport, Enet, IOCEth, Tx, Rx redesign.
 - ethtool changes to use new enet function and definitions
 - Set number of Tx and Rx queues bassed on underlying hardware. Define
   separate macros for maximum and supported numbers of Tx and Rx queues
   based on underlying hardware. Take VLAN header into account for MTU
   calculation. Default to INTx mode when pci_enable_msix() fails. Set a
   bit in Rx poll routine, check and wait for that bit to be cleared in
   the cleanup routine before proceeding.
 - The TX and Rx coalesce settings are programmed in steps of 5 us. The value
   that are not divisible by 5 are rounded to the next lower number. This was
   causing the value os 1 to 4 to be rounded to 0, which is an invalid setting.
   When creating Rx and Tx object, we are currently assigning the default
   values of Rx and Tx coalescing_timeo. If these values are changed in the
   driver to a different value, the change is lost during such operations as
   MTU change. In order to avoid that, pass the configured value of
   coalescing_timeo before Rx and Tx object creation. Fix
   bnad_tx_coalescing_timeo_set() so it applies to all the Tx objects.
 - Reorg uninitialization path in case of pci_probe failure.
 - Hardware clock setup changes to pass asic generation, port modes and
   asic mode as part firmware boot parameters to firmware.
 - FW mailbox interface changes to defined asic specific mailbox interfaces.
   h/w mailbox interfaces take 8-bit FIDs and 2-bit port id for owner. Cleaned
   up mailbox definitions and usage for new and old HW. Eliminated usage of
   ASIC ID. MSI-X vector assignment and programming done by firmware. Fixed
   host offsets for CPE/RME queue registers.
 - Implement polling mechanism for FW ready to have poll mechanism replaces
   the current interrupt based FW READY method. The timer based poll routine
   in IOC will query the ioc_fwstate register to see if there is a state
   change in FW, and sends the READY event. Removed infrastructure needed to
   support mbox READY event from fw as well as IOC code.
 - Move FW init to HW init. Handle the case where PCI mapping goes away when
   IOCPF state machine is waiting for semaphore.
 - Add IOC mbox call back to client indicating that the command is sent.
Signed-off-by: default avatarRasesh Mody <rmody@brocade.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6849c6b3
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
obj-$(CONFIG_BNA) += bna.o obj-$(CONFIG_BNA) += bna.o
bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o bna-objs += cna_fwimg.o
......
...@@ -124,6 +124,7 @@ enum bfa_ioc_state { ...@@ -124,6 +124,7 @@ enum bfa_ioc_state {
BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
}; };
/** /**
...@@ -180,7 +181,18 @@ struct bfa_ioc_attr { ...@@ -180,7 +181,18 @@ struct bfa_ioc_attr {
struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */ struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
struct bfa_ioc_pci_attr pci_attr; struct bfa_ioc_pci_attr pci_attr;
u8 port_id; /*!< port number */ u8 port_id; /*!< port number */
u8 rsvd[7]; /*!< 64bit align */ u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability */
u8 port_mode_cfg; /*!< enum bfa_mode */
u8 rsvd[4]; /*!< 64bit align */
};
/**
* Adapter capability mask definition
*/
enum {
BFA_CM_HBA = 0x01,
BFA_CM_CNA = 0x02,
}; };
/** /**
...@@ -228,7 +240,7 @@ struct bfa_mfg_block { ...@@ -228,7 +240,7 @@ struct bfa_mfg_block {
mac_t mfg_mac; /*!< mac address */ mac_t mfg_mac; /*!< mac address */
u8 num_mac; /*!< number of mac addresses */ u8 num_mac; /*!< number of mac addresses */
u8 rsv2; u8 rsv2;
u32 mfg_type; /*!< card type */ u32 card_type; /*!< card type */
u8 rsv3[108]; u8 rsv3[108];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
}; };
...@@ -242,5 +254,12 @@ struct bfa_mfg_block { ...@@ -242,5 +254,12 @@ struct bfa_mfg_block {
#define bfa_asic_id_ct(devid) \ #define bfa_asic_id_ct(devid) \
((devid) == PCI_DEVICE_ID_BROCADE_CT || \ ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
(devid) == PCI_DEVICE_ID_BROCADE_CT_FC) (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
#define bfa_asic_id_ctc(devid) (bfa_asic_id_ct(devid))
enum bfa_mode {
BFA_MODE_HBA = 1,
BFA_MODE_CNA = 2,
BFA_MODE_NIC = 3
};
#endif /* __BFA_DEFS_H__ */ #endif /* __BFA_DEFS_H__ */
...@@ -19,11 +19,12 @@ ...@@ -19,11 +19,12 @@
#define __BFA_DEFS_MFG_COMM_H__ #define __BFA_DEFS_MFG_COMM_H__
#include "cna.h" #include "cna.h"
#include "bfa_defs.h"
/** /**
* Manufacturing block version * Manufacturing block version
*/ */
#define BFA_MFG_VERSION 2 #define BFA_MFG_VERSION 3
#define BFA_MFG_VERSION_UNINIT 0xFF #define BFA_MFG_VERSION_UNINIT 0xFF
/** /**
...@@ -95,27 +96,14 @@ enum { ...@@ -95,27 +96,14 @@ enum {
(type) == BFA_MFG_TYPE_CNA10P1 || \ (type) == BFA_MFG_TYPE_CNA10P1 || \
bfa_mfg_is_mezz(type))) bfa_mfg_is_mezz(type)))
#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \ #define bfa_mfg_adapter_prop_init_flash_ct(mfgblk, prop) \
do { \ do { \
switch ((card_type)) { \ switch ((mfgblk)->card_type) { \
case BFA_MFG_TYPE_FC8P2: \
case BFA_MFG_TYPE_JAYHAWK: \ case BFA_MFG_TYPE_JAYHAWK: \
case BFA_MFG_TYPE_ASTRA: \ case BFA_MFG_TYPE_ASTRA: \
(prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \ (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
BFI_ADAPTER_SETP(SPEED, 8); \ BFI_ADAPTER_SETP(SPEED, 8); \
break; \ break; \
case BFA_MFG_TYPE_FC8P1: \
(prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
BFI_ADAPTER_SETP(SPEED, 8); \
break; \
case BFA_MFG_TYPE_FC4P2: \
(prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
BFI_ADAPTER_SETP(SPEED, 4); \
break; \
case BFA_MFG_TYPE_FC4P1: \
(prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
BFI_ADAPTER_SETP(SPEED, 4); \
break; \
case BFA_MFG_TYPE_CNA10P2: \ case BFA_MFG_TYPE_CNA10P2: \
case BFA_MFG_TYPE_WANCHESE: \ case BFA_MFG_TYPE_WANCHESE: \
case BFA_MFG_TYPE_LIGHTNING_P0: \ case BFA_MFG_TYPE_LIGHTNING_P0: \
......
This diff is collapsed.
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_HWINIT_MAX 5 #define BFA_IOC_HWINIT_MAX 5
#define BFA_IOC_POLL_TOV 200 /* msecs */
/** /**
* PCI device information required by IOC * PCI device information required by IOC
...@@ -169,8 +170,9 @@ struct bfa_ioc_hbfail_notify { ...@@ -169,8 +170,9 @@ struct bfa_ioc_hbfail_notify {
struct bfa_iocpf { struct bfa_iocpf {
bfa_fsm_t fsm; bfa_fsm_t fsm;
struct bfa_ioc *ioc; struct bfa_ioc *ioc;
u32 retry_count; bool fw_mismatch_notified;
bool auto_recover; bool auto_recover;
u32 poll_time;
}; };
struct bfa_ioc { struct bfa_ioc {
...@@ -186,12 +188,10 @@ struct bfa_ioc { ...@@ -186,12 +188,10 @@ struct bfa_ioc {
void *dbg_fwsave; void *dbg_fwsave;
int dbg_fwsave_len; int dbg_fwsave_len;
bool dbg_fwsave_once; bool dbg_fwsave_once;
enum bfi_mclass ioc_mc; enum bfi_pcifn_class clscode;
struct bfa_ioc_regs ioc_regs; struct bfa_ioc_regs ioc_regs;
struct bfa_ioc_drv_stats stats; struct bfa_ioc_drv_stats stats;
bool fcmode; bool fcmode;
bool ctdev;
bool cna;
bool pllinit; bool pllinit;
bool stats_busy; /*!< outstanding stats */ bool stats_busy; /*!< outstanding stats */
u8 port_id; u8 port_id;
...@@ -202,10 +202,18 @@ struct bfa_ioc { ...@@ -202,10 +202,18 @@ struct bfa_ioc {
struct bfa_ioc_mbox_mod mbox_mod; struct bfa_ioc_mbox_mod mbox_mod;
struct bfa_ioc_hwif *ioc_hwif; struct bfa_ioc_hwif *ioc_hwif;
struct bfa_iocpf iocpf; struct bfa_iocpf iocpf;
enum bfi_asic_gen asic_gen;
enum bfi_asic_mode asic_mode;
enum bfi_port_mode port0_mode;
enum bfi_port_mode port1_mode;
enum bfa_mode port_mode;
u8 ad_cap_bm; /*!< adapter cap bit mask */
u8 port_mode_cfg; /*!< config port mode */
}; };
struct bfa_ioc_hwif { struct bfa_ioc_hwif {
enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode); enum bfa_status (*ioc_pll_init) (void __iomem *rb,
enum bfi_asic_mode m);
bool (*ioc_firmware_lock) (struct bfa_ioc *ioc); bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
void (*ioc_firmware_unlock) (struct bfa_ioc *ioc); void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
void (*ioc_reg_init) (struct bfa_ioc *ioc); void (*ioc_reg_init) (struct bfa_ioc *ioc);
...@@ -219,12 +227,14 @@ struct bfa_ioc_hwif { ...@@ -219,12 +227,14 @@ struct bfa_ioc_hwif {
void (*ioc_sync_leave) (struct bfa_ioc *ioc); void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc); void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc); bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
}; };
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) #define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) #define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
#define bfa_ioc_fetch_stats(__ioc, __stats) \ #define bfa_ioc_fetch_stats(__ioc, __stats) \
(((__stats)->drv_stats) = (__ioc)->stats) (((__stats)->drv_stats) = (__ioc)->stats)
#define bfa_ioc_clr_stats(__ioc) \ #define bfa_ioc_clr_stats(__ioc) \
...@@ -245,7 +255,8 @@ struct bfa_ioc_hwif { ...@@ -245,7 +255,8 @@ struct bfa_ioc_hwif {
(((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
BFI_IMAGE_CB_FC) BFI_IMAGE_CB_FC)
#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ #define BFA_IOC_FW_SMEM_SIZE(__ioc) \
(((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE) ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
...@@ -266,13 +277,18 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, ...@@ -266,13 +277,18 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
#define bfa_ioc_pll_init_asic(__ioc) \ #define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->fcmode)) (__ioc)->asic_mode))
#define bfa_ioc_isr_mode_set(__ioc, __msix) \ #define bfa_ioc_isr_mode_set(__ioc, __msix) \
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
#define bfa_ioc_ownership_reset(__ioc) \ #define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
#define bfa_ioc_lpu_read_stat(__ioc) do { \
if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
} while (0)
void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc); void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
...@@ -280,7 +296,7 @@ void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, ...@@ -280,7 +296,7 @@ void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
void bfa_nw_ioc_auto_recover(bool auto_recover); void bfa_nw_ioc_auto_recover(bool auto_recover);
void bfa_nw_ioc_detach(struct bfa_ioc *ioc); void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
enum bfi_mclass mc); enum bfi_pcifn_class clscode);
u32 bfa_nw_ioc_meminfo(void); u32 bfa_nw_ioc_meminfo(void);
void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
void bfa_nw_ioc_enable(struct bfa_ioc *ioc); void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
...@@ -311,7 +327,7 @@ void bfa_nw_iocpf_sem_timeout(void *ioc); ...@@ -311,7 +327,7 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
/* /*
* F/W Image Size & Chunk * F/W Image Size & Chunk
*/ */
u32 *bfa_cb_image_get_chunk(int type, u32 off); u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
u32 bfa_cb_image_get_size(int type); u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
#endif /* __BFA_IOC_H__ */ #endif /* __BFA_IOC_H__ */
...@@ -46,7 +46,8 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); ...@@ -46,7 +46,8 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static struct bfa_ioc_hwif nw_hwif_ct; static struct bfa_ioc_hwif nw_hwif_ct;
...@@ -92,7 +93,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) ...@@ -92,7 +93,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
/** /**
* If bios boot (flash based) -- do not increment usage count * If bios boot (flash based) -- do not increment usage count
*/ */
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ) BFA_IOC_FWIMG_MINSZ)
return true; return true;
...@@ -142,7 +143,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) ...@@ -142,7 +143,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
/** /**
* If bios boot (flash based) -- do not decrement usage count * If bios boot (flash based) -- do not decrement usage count
*/ */
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ) BFA_IOC_FWIMG_MINSZ)
return; return;
...@@ -165,22 +166,17 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) ...@@ -165,22 +166,17 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
static void static void
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{ {
if (ioc->cna) {
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
/* Wait for halt to take effect */ /* Wait for halt to take effect */
readl(ioc->ioc_regs.ll_halt); readl(ioc->ioc_regs.ll_halt);
readl(ioc->ioc_regs.alt_ll_halt); readl(ioc->ioc_regs.alt_ll_halt);
} else {
writel(~0U, ioc->ioc_regs.err_set);
readl(ioc->ioc_regs.err_set);
}
} }
/** /**
* Host to LPU mailbox message addresses * Host to LPU mailbox message addresses
*/ */
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
...@@ -215,9 +211,9 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) ...@@ -215,9 +211,9 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
rb = bfa_ioc_bar0(ioc); rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
if (ioc->port_id == 0) { if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
...@@ -323,11 +319,9 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) ...@@ -323,11 +319,9 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
static void static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{ {
if (ioc->cna) {
bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_usage_reg); writel(0, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
/* /*
* Read the hw sem reg to make sure that it is locked * Read the hw sem reg to make sure that it is locked
...@@ -436,9 +430,10 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) ...@@ -436,9 +430,10 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
} }
static enum bfa_status static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{ {
u32 pll_sclk, pll_fclk, r32; u32 pll_sclk, pll_fclk, r32;
bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
......
...@@ -43,17 +43,21 @@ struct bfi_mhdr { ...@@ -43,17 +43,21 @@ struct bfi_mhdr {
u8 msg_id; /*!< msg opcode with in the class */ u8 msg_id; /*!< msg opcode with in the class */
union { union {
struct { struct {
u8 rsvd; u8 qid;
u8 lpu_id; /*!< msg destination */ u8 fn_lpu; /*!< msg destination */
} h2i; } h2i;
u16 i2htok; /*!< token in msgs to host */ u16 i2htok; /*!< token in msgs to host */
} mtag; } mtag;
}; };
#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \ #define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid)
#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
(_mh).msg_class = (_mc); \ (_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \ (_mh).msg_id = (_op); \
(_mh).mtag.h2i.lpu_id = (_lpuid); \ (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
} while (0) } while (0)
#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ #define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
...@@ -148,6 +152,14 @@ struct bfi_mbmsg { ...@@ -148,6 +152,14 @@ struct bfi_mbmsg {
u32 pl[BFI_MBMSG_SZ]; u32 pl[BFI_MBMSG_SZ];
}; };
/**
* Supported PCI function class codes (personality)
*/
enum bfi_pcifn_class {
BFI_PCIFN_CLASS_FC = 0x0c04,
BFI_PCIFN_CLASS_ETH = 0x0200,
};
/** /**
* Message Classes * Message Classes
*/ */
...@@ -203,6 +215,21 @@ enum bfi_mclass { ...@@ -203,6 +215,21 @@ enum bfi_mclass {
*---------------------------------------------------------------------- *----------------------------------------------------------------------
*/ */
/**
* Different asic generations
*/
enum bfi_asic_gen {
BFI_ASIC_GEN_CB = 1,
BFI_ASIC_GEN_CT = 2,
};
enum bfi_asic_mode {
BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
};
enum bfi_ioc_h2i_msgs { enum bfi_ioc_h2i_msgs {
BFI_IOC_H2I_ENABLE_REQ = 1, BFI_IOC_H2I_ENABLE_REQ = 1,
BFI_IOC_H2I_DISABLE_REQ = 2, BFI_IOC_H2I_DISABLE_REQ = 2,
...@@ -215,8 +242,7 @@ enum bfi_ioc_i2h_msgs { ...@@ -215,8 +242,7 @@ enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
}; };
/** /**
...@@ -231,7 +257,8 @@ struct bfi_ioc_attr { ...@@ -231,7 +257,8 @@ struct bfi_ioc_attr {
u64 mfg_pwwn; /*!< Mfg port wwn */ u64 mfg_pwwn; /*!< Mfg port wwn */
u64 mfg_nwwn; /*!< Mfg node wwn */ u64 mfg_nwwn; /*!< Mfg node wwn */
mac_t mfg_mac; /*!< Mfg mac */ mac_t mfg_mac; /*!< Mfg mac */
u16 rsvd_a; u8 port_mode; /* enum bfi_port_mode */
u8 rsvd_a;
u64 pwwn; u64 pwwn;
u64 nwwn; u64 nwwn;
mac_t mac; /*!< PBC or Mfg mac */ mac_t mac; /*!< PBC or Mfg mac */
...@@ -284,19 +311,36 @@ struct bfi_ioc_getattr_reply { ...@@ -284,19 +311,36 @@ struct bfi_ioc_getattr_reply {
#define BFI_IOC_MD5SUM_SZ 4 #define BFI_IOC_MD5SUM_SZ 4
struct bfi_ioc_image_hdr { struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */ u32 signature; /*!< constant signature */
u32 rsvd_a; u8 asic_gen; /*!< asic generation */
u8 asic_mode;
u8 port0_mode; /*!< device mode for port 0 */
u8 port1_mode; /*!< device mode for port 1 */
u32 exec; /*!< exec vector */ u32 exec; /*!< exec vector */
u32 param; /*!< parameters */ u32 bootenv; /*!< firmware boot env */
u32 rsvd_b[4]; u32 rsvd_b[4];
u32 md5sum[BFI_IOC_MD5SUM_SZ]; u32 md5sum[BFI_IOC_MD5SUM_SZ];
}; };
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
(((u32)(__asic_gen)) << 24 | \
((u32)(__asic_mode)) << 16 | \
((u32)(__p0_mode)) << 8 | \
((u32)(__p1_mode)))
enum bfi_fwboot_type { enum bfi_fwboot_type {
BFI_FWBOOT_TYPE_NORMAL = 0, BFI_FWBOOT_TYPE_NORMAL = 0,
BFI_FWBOOT_TYPE_FLASH = 1, BFI_FWBOOT_TYPE_FLASH = 1,
BFI_FWBOOT_TYPE_MEMTEST = 2, BFI_FWBOOT_TYPE_MEMTEST = 2,
}; };
enum bfi_port_mode {
BFI_PORT_MODE_FC = 1,
BFI_PORT_MODE_ETH = 2,
};
/** /**
* BFI_IOC_I2H_READY_EVENT message * BFI_IOC_I2H_READY_EVENT message
*/ */
...@@ -362,8 +406,8 @@ enum { ...@@ -362,8 +406,8 @@ enum {
*/ */
struct bfi_ioc_ctrl_req { struct bfi_ioc_ctrl_req {
struct bfi_mhdr mh; struct bfi_mhdr mh;
u8 ioc_class; u16 clscode;
u8 rsvd[3]; u16 rsvd;
u32 tv_sec; u32 tv_sec;
}; };
...@@ -373,7 +417,9 @@ struct bfi_ioc_ctrl_req { ...@@ -373,7 +417,9 @@ struct bfi_ioc_ctrl_req {
struct bfi_ioc_ctrl_reply { struct bfi_ioc_ctrl_reply {
struct bfi_mhdr mh; /*!< Common msg header */ struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< enable/disable status */ u8 status; /*!< enable/disable status */
u8 rsvd[3]; u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability bit mask */
u8 rsvd;
}; };
#define BFI_IOC_MSGSZ 8 #define BFI_IOC_MSGSZ 8
...@@ -393,7 +439,7 @@ union bfi_ioc_h2i_msg_u { ...@@ -393,7 +439,7 @@ union bfi_ioc_h2i_msg_u {
*/ */
union bfi_ioc_i2h_msg_u { union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh; struct bfi_mhdr mh;
struct bfi_ioc_rdy_event rdy_event; struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ]; u32 mboxmsg[BFI_IOC_MSGSZ];
}; };
......
This diff is collapsed.
This diff is collapsed.
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#define BNAD_MAX_RXS 1 #define BNAD_MAX_RXS 1
#define BNAD_MAX_RXPS_PER_RX 16 #define BNAD_MAX_RXPS_PER_RX 16
#define BNAD_MAX_RXQ_PER_RXP 2
/* /*
* Control structure pointed to ccb->ctrl, which * Control structure pointed to ccb->ctrl, which
...@@ -76,6 +77,8 @@ struct bnad_rx_ctrl { ...@@ -76,6 +77,8 @@ struct bnad_rx_ctrl {
#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */ #define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */ #define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MAX_Q_DEPTH 0x10000 #define BNAD_MAX_Q_DEPTH 0x10000
#define BNAD_MIN_Q_DEPTH 0x200 #define BNAD_MIN_Q_DEPTH 0x200
...@@ -93,6 +96,10 @@ struct bnad_rx_ctrl { ...@@ -93,6 +96,10 @@ struct bnad_rx_ctrl {
#define BNAD_RXQ_REFILL 0 #define BNAD_RXQ_REFILL 0
#define BNAD_RXQ_STARTED 1 #define BNAD_RXQ_STARTED 1
/* Resource limits */
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
/* /*
* DATA STRUCTURES * DATA STRUCTURES
*/ */
...@@ -115,7 +122,8 @@ struct bnad_completion { ...@@ -115,7 +122,8 @@ struct bnad_completion {
struct completion tx_comp; struct completion tx_comp;
struct completion rx_comp; struct completion rx_comp;
struct completion stats_comp; struct completion stats_comp;
struct completion port_comp; struct completion enet_comp;
struct completion mtu_comp;
u8 ioc_comp_status; u8 ioc_comp_status;
u8 ucast_comp_status; u8 ucast_comp_status;
...@@ -124,6 +132,7 @@ struct bnad_completion { ...@@ -124,6 +132,7 @@ struct bnad_completion {
u8 rx_comp_status; u8 rx_comp_status;
u8 stats_comp_status; u8 stats_comp_status;
u8 port_comp_status; u8 port_comp_status;
u8 mtu_comp_status;
}; };
/* Tx Rx Control Stats */ /* Tx Rx Control Stats */
...@@ -145,6 +154,7 @@ struct bnad_drv_stats { ...@@ -145,6 +154,7 @@ struct bnad_drv_stats {
u64 netif_rx_dropped; u64 netif_rx_dropped;
u64 link_toggle; u64 link_toggle;
u64 cee_toggle;
u64 cee_up; u64 cee_up;
u64 rxp_info_alloc_failed; u64 rxp_info_alloc_failed;
...@@ -174,12 +184,14 @@ struct bnad_rx_res_info { ...@@ -174,12 +184,14 @@ struct bnad_rx_res_info {
struct bnad_tx_info { struct bnad_tx_info {
struct bna_tx *tx; /* 1:1 between tx_info & tx */ struct bna_tx *tx; /* 1:1 between tx_info & tx */
struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
u32 tx_id;
} ____cacheline_aligned; } ____cacheline_aligned;
struct bnad_rx_info { struct bnad_rx_info {
struct bna_rx *rx; /* 1:1 between rx_info & rx */ struct bna_rx *rx; /* 1:1 between rx_info & rx */
struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX]; struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
u32 rx_id;
} ____cacheline_aligned; } ____cacheline_aligned;
/* Unmap queues for Tx / Rx cleanup */ /* Unmap queues for Tx / Rx cleanup */
...@@ -205,13 +217,18 @@ struct bnad_unmap_q { ...@@ -205,13 +217,18 @@ struct bnad_unmap_q {
/* Defines for run_flags bit-mask */ /* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */ /* Set, tested & cleared using xxx_bit() functions */
/* Values indicated bit positions */ /* Values indicated bit positions */
#define BNAD_RF_CEE_RUNNING 1 #define BNAD_RF_CEE_RUNNING 0
#define BNAD_RF_MTU_SET 1
#define BNAD_RF_MBOX_IRQ_DISABLED 2 #define BNAD_RF_MBOX_IRQ_DISABLED 2
#define BNAD_RF_RX_STARTED 3 #define BNAD_RF_NETDEV_REGISTERED 3
#define BNAD_RF_DIM_TIMER_RUNNING 4 #define BNAD_RF_DIM_TIMER_RUNNING 4
#define BNAD_RF_STATS_TIMER_RUNNING 5 #define BNAD_RF_STATS_TIMER_RUNNING 5
#define BNAD_RF_TX_SHUTDOWN_DELAYED 6 #define BNAD_RF_TX_PRIO_SET 6
#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
/* Define for Fast Path flags */
/* Defined as bit positions */
#define BNAD_FP_IN_RX_PATH 0
struct bnad { struct bnad {
struct net_device *netdev; struct net_device *netdev;
...@@ -265,6 +282,7 @@ struct bnad { ...@@ -265,6 +282,7 @@ struct bnad {
/* Control path resources, memory & irq */ /* Control path resources, memory & irq */
struct bna_res_info res_info[BNA_RES_T_MAX]; struct bna_res_info res_info[BNA_RES_T_MAX];
struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS]; struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS]; struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
...@@ -302,10 +320,10 @@ extern void bnad_set_ethtool_ops(struct net_device *netdev); ...@@ -302,10 +320,10 @@ extern void bnad_set_ethtool_ops(struct net_device *netdev);
extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad); extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
extern int bnad_setup_rx(struct bnad *bnad, uint rx_id); extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
extern int bnad_setup_tx(struct bnad *bnad, uint tx_id); extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id); extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id); extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
/* Timer start/stop protos */ /* Timer start/stop protos */
extern void bnad_dim_timer_start(struct bnad *bnad); extern void bnad_dim_timer_start(struct bnad *bnad);
......
...@@ -29,14 +29,14 @@ ...@@ -29,14 +29,14 @@
#define BNAD_NUM_TXF_COUNTERS 12 #define BNAD_NUM_TXF_COUNTERS 12
#define BNAD_NUM_RXF_COUNTERS 10 #define BNAD_NUM_RXF_COUNTERS 10
#define BNAD_NUM_CQ_COUNTERS 3 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
#define BNAD_NUM_RXQ_COUNTERS 6 #define BNAD_NUM_RXQ_COUNTERS 6
#define BNAD_NUM_TXQ_COUNTERS 5 #define BNAD_NUM_TXQ_COUNTERS 5
#define BNAD_ETHTOOL_STATS_NUM \ #define BNAD_ETHTOOL_STATS_NUM \
(sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
sizeof(struct bnad_drv_stats) / sizeof(u64) + \ sizeof(struct bnad_drv_stats) / sizeof(u64) + \
offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64)) offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"rx_packets", "rx_packets",
...@@ -277,7 +277,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -277,7 +277,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) { if (ioc_attr) {
spin_lock_irqsave(&bnad->bna_lock, flags); spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags); spin_unlock_irqrestore(&bnad->bna_lock, flags);
strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
...@@ -462,8 +462,8 @@ bnad_get_pauseparam(struct net_device *netdev, ...@@ -462,8 +462,8 @@ bnad_get_pauseparam(struct net_device *netdev,
struct bnad *bnad = netdev_priv(netdev); struct bnad *bnad = netdev_priv(netdev);
pauseparam->autoneg = 0; pauseparam->autoneg = 0;
pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause; pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause; pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
} }
static int static int
...@@ -478,12 +478,12 @@ bnad_set_pauseparam(struct net_device *netdev, ...@@ -478,12 +478,12 @@ bnad_set_pauseparam(struct net_device *netdev,
return -EINVAL; return -EINVAL;
mutex_lock(&bnad->conf_mutex); mutex_lock(&bnad->conf_mutex);
if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause || if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) { pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
pause_config.rx_pause = pauseparam->rx_pause; pause_config.rx_pause = pauseparam->rx_pause;
pause_config.tx_pause = pauseparam->tx_pause; pause_config.tx_pause = pauseparam->tx_pause;
spin_lock_irqsave(&bnad->bna_lock, flags); spin_lock_irqsave(&bnad->bna_lock, flags);
bna_port_pause_config(&bnad->bna.port, &pause_config, NULL); bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
spin_unlock_irqrestore(&bnad->bna_lock, flags); spin_unlock_irqrestore(&bnad->bna_lock, flags);
} }
mutex_unlock(&bnad->conf_mutex); mutex_unlock(&bnad->conf_mutex);
...@@ -495,7 +495,7 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) ...@@ -495,7 +495,7 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
{ {
struct bnad *bnad = netdev_priv(netdev); struct bnad *bnad = netdev_priv(netdev);
int i, j, q_num; int i, j, q_num;
u64 bmap; u32 bmap;
mutex_lock(&bnad->conf_mutex); mutex_lock(&bnad->conf_mutex);
...@@ -508,9 +508,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) ...@@ -508,9 +508,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
string += ETH_GSTRING_LEN; string += ETH_GSTRING_LEN;
} }
bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | bmap = bna_tx_rid_mask(&bnad->bna);
((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); for (i = 0; bmap; i++) {
for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
if (bmap & 1) { if (bmap & 1) {
sprintf(string, "txf%d_ucast_octets", i); sprintf(string, "txf%d_ucast_octets", i);
string += ETH_GSTRING_LEN; string += ETH_GSTRING_LEN;
...@@ -540,9 +539,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) ...@@ -540,9 +539,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
bmap >>= 1; bmap >>= 1;
} }
bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | bmap = bna_rx_rid_mask(&bnad->bna);
((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); for (i = 0; bmap; i++) {
for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
if (bmap & 1) { if (bmap & 1) {
sprintf(string, "rxf%d_ucast_octets", i); sprintf(string, "rxf%d_ucast_octets", i);
string += ETH_GSTRING_LEN; string += ETH_GSTRING_LEN;
...@@ -663,18 +661,16 @@ bnad_get_stats_count_locked(struct net_device *netdev) ...@@ -663,18 +661,16 @@ bnad_get_stats_count_locked(struct net_device *netdev)
{ {
struct bnad *bnad = netdev_priv(netdev); struct bnad *bnad = netdev_priv(netdev);
int i, j, count, rxf_active_num = 0, txf_active_num = 0; int i, j, count, rxf_active_num = 0, txf_active_num = 0;
u64 bmap; u32 bmap;
bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | bmap = bna_tx_rid_mask(&bnad->bna);
((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); for (i = 0; bmap; i++) {
for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
if (bmap & 1) if (bmap & 1)
txf_active_num++; txf_active_num++;
bmap >>= 1; bmap >>= 1;
} }
bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | bmap = bna_rx_rid_mask(&bnad->bna);
((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); for (i = 0; bmap; i++) {
for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
if (bmap & 1) if (bmap & 1)
rxf_active_num++; rxf_active_num++;
bmap >>= 1; bmap >>= 1;
...@@ -787,7 +783,7 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, ...@@ -787,7 +783,7 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
unsigned long flags; unsigned long flags;
struct rtnl_link_stats64 *net_stats64; struct rtnl_link_stats64 *net_stats64;
u64 *stats64; u64 *stats64;
u64 bmap; u32 bmap;
mutex_lock(&bnad->conf_mutex); mutex_lock(&bnad->conf_mutex);
if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
...@@ -818,20 +814,20 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, ...@@ -818,20 +814,20 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
buf[bi++] = stats64[i]; buf[bi++] = stats64[i];
/* Fill hardware stats excluding the rxf/txf into ethtool bufs */ /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
stats64 = (u64 *) bnad->stats.bna_stats->hw_stats; stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
for (i = 0; for (i = 0;
i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64); i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
sizeof(u64);
i++) i++)
buf[bi++] = stats64[i]; buf[bi++] = stats64[i];
/* Fill txf stats into ethtool buffers */ /* Fill txf stats into ethtool buffers */
bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | bmap = bna_tx_rid_mask(&bnad->bna);
((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); for (i = 0; bmap; i++) {
for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
if (bmap & 1) { if (bmap & 1) {
stats64 = (u64 *)&bnad->stats.bna_stats-> stats64 = (u64 *)&bnad->stats.bna_stats->
hw_stats->txf_stats[i]; hw_stats.txf_stats[i];
for (j = 0; j < sizeof(struct bfi_ll_stats_txf) / for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
sizeof(u64); j++) sizeof(u64); j++)
buf[bi++] = stats64[j]; buf[bi++] = stats64[j];
} }
...@@ -839,13 +835,12 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, ...@@ -839,13 +835,12 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
} }
/* Fill rxf stats into ethtool buffers */ /* Fill rxf stats into ethtool buffers */
bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | bmap = bna_rx_rid_mask(&bnad->bna);
((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); for (i = 0; bmap; i++) {
for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
if (bmap & 1) { if (bmap & 1) {
stats64 = (u64 *)&bnad->stats.bna_stats-> stats64 = (u64 *)&bnad->stats.bna_stats->
hw_stats->rxf_stats[i]; hw_stats.rxf_stats[i];
for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) / for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
sizeof(u64); j++) sizeof(u64); j++)
buf[bi++] = stats64[j]; buf[bi++] = stats64[j];
} }
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
extern char bfa_version[]; extern char bfa_version[];
#define CNA_FW_FILE_CT "ctfw_cna.bin" #define CNA_FW_FILE_CT "ctfw.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1) #pragma pack(1)
...@@ -77,4 +77,33 @@ typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t; ...@@ -77,4 +77,33 @@ typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
} \ } \
} }
/*
* bfa_q_deq_tail - dequeue an element from tail of the queue
*/
#define bfa_q_deq_tail(_q, _qe) { \
if (!list_empty(_q)) { \
*((struct list_head **) (_qe)) = bfa_q_prev(_q); \
bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
(struct list_head *) (_q); \
bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
bfa_q_qe_init(*((struct list_head **) _qe)); \
} else { \
*((struct list_head **) (_qe)) = (struct list_head *) NULL; \
} \
}
/*
* bfa_add_tail_head - enqueue an element at the head of queue
*/
#define bfa_q_enq_head(_q, _qe) { \
if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
pr_err("Assertion failure: %s:%d: %d", \
__FILE__, __LINE__, \
(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
bfa_q_next(_qe) = bfa_q_next(_q); \
bfa_q_prev(_qe) = (struct list_head *) (_q); \
bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
bfa_q_next(_q) = (struct list_head *) (_qe); \
}
#endif /* __CNA_H__ */ #endif /* __CNA_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment