Commit 16a5fd92 authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

bnx2x: Revise comments and alignment

This patch correct various typos, fix comments conventions and
adds/removes a few comments.
Signed-off-by: default avatarYuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d76a6111
...@@ -362,7 +362,7 @@ union db_prod { ...@@ -362,7 +362,7 @@ union db_prod {
/* /*
* Number of required SGEs is the sum of two: * Number of required SGEs is the sum of two:
* 1. Number of possible opened aggregations (next packet for * 1. Number of possible opened aggregations (next packet for
* these aggregations will probably consume SGE immidiatelly) * these aggregations will probably consume SGE immediately)
* 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
* after placement on BD for new TPA aggregation) * after placement on BD for new TPA aggregation)
* *
...@@ -486,10 +486,10 @@ struct bnx2x_fastpath { ...@@ -486,10 +486,10 @@ struct bnx2x_fastpath {
struct napi_struct napi; struct napi_struct napi;
union host_hc_status_block status_blk; union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */ /* chip independent shortcuts into sb structure */
__le16 *sb_index_values; __le16 *sb_index_values;
__le16 *sb_running_index; __le16 *sb_running_index;
/* chip independed shortcut into rx_prods_offset memory */ /* chip independent shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset; u32 ustorm_rx_prods_offset;
u32 rx_buf_size; u32 rx_buf_size;
...@@ -603,7 +603,7 @@ struct bnx2x_fastpath { ...@@ -603,7 +603,7 @@ struct bnx2x_fastpath {
* START_BD(splitted) - includes unpaged data segment for GSO * START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data * PARSING_BD - for TSO and CSUM data
* PARSING_BD2 - for encapsulation data * PARSING_BD2 - for encapsulation data
* Frag BDs - decribes pages for frags * Frag BDs - describes pages for frags
*/ */
#define BDS_PER_TX_PKT 4 #define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
...@@ -886,14 +886,14 @@ struct bnx2x_common { ...@@ -886,14 +886,14 @@ struct bnx2x_common {
#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Ax)) (CHIP_REV(bp) == CHIP_REV_Ax))
/* This define is used in two main places: /* This define is used in two main places:
* 1. In the early stages of nic_load, to know if to configrue Parser / Searcher * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
* to nic-only mode or to offload mode. Offload mode is configured if either the * to nic-only mode or to offload mode. Offload mode is configured if either the
* chip is E1x (where MIC_MODE register is not applicable), or if cnic already * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
* registered for this port (which means that the user wants storage services). * registered for this port (which means that the user wants storage services).
* 2. During cnic-related load, to know if offload mode is already configured in * 2. During cnic-related load, to know if offload mode is already configured in
* the HW or needs to be configrued. * the HW or needs to be configured.
* Since the transition from nic-mode to offload-mode in HW causes traffic * Since the transition from nic-mode to offload-mode in HW causes traffic
* coruption, nic-mode is configured only in ports on which storage services * corruption, nic-mode is configured only in ports on which storage services
* where never requested. * where never requested.
*/ */
#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
...@@ -994,14 +994,14 @@ extern struct workqueue_struct *bnx2x_wq; ...@@ -994,14 +994,14 @@ extern struct workqueue_struct *bnx2x_wq;
* If the maximum number of FP-SB available is X then: * If the maximum number of FP-SB available is X then:
* a. If CNIC is supported it consumes 1 FP-SB thus the max number of * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
* regular L2 queues is Y=X-1 * regular L2 queues is Y=X-1
* b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
* c. If the FCoE L2 queue is supported the actual number of L2 queues * c. If the FCoE L2 queue is supported the actual number of L2 queues
* is Y+1 * is Y+1
* d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
* slow-path interrupts) or Y+2 if CNIC is supported (one additional * slow-path interrupts) or Y+2 if CNIC is supported (one additional
* FP interrupt context for the CNIC). * FP interrupt context for the CNIC).
* e. The number of HW context (CID count) is always X or X+1 if FCoE * e. The number of HW context (CID count) is always X or X+1 if FCoE
* L2 queue is supported. the cid for the FCoE L2 queue is always X. * L2 queue is supported. The cid for the FCoE L2 queue is always X.
*/ */
/* fast-path interrupt contexts E1x */ /* fast-path interrupt contexts E1x */
...@@ -1568,7 +1568,7 @@ struct bnx2x { ...@@ -1568,7 +1568,7 @@ struct bnx2x {
struct mutex cnic_mutex; struct mutex cnic_mutex;
struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
/* Start index of the "special" (CNIC related) L2 cleints */ /* Start index of the "special" (CNIC related) L2 clients */
u8 cnic_base_cl_id; u8 cnic_base_cl_id;
int dmae_ready; int dmae_ready;
...@@ -1682,7 +1682,7 @@ struct bnx2x { ...@@ -1682,7 +1682,7 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */ /* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state; unsigned long sp_rtnl_state;
/* DCBX Negotation results */ /* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat; struct dcbx_features dcbx_local_feat;
u32 dcbx_error; u32 dcbx_error;
...@@ -2042,7 +2042,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, ...@@ -2042,7 +2042,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
indicates eror */ * indicates error
*/
#define MAX_DMAE_C_PER_PORT 8 #define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
......
...@@ -124,7 +124,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) ...@@ -124,7 +124,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
/* Queue pointer cannot be re-set on an fp-basis, as moving pointer /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
* backward along the array could cause memory to be overriden * backward along the array could cause memory to be overridden
*/ */
for (cos = 1; cos < bp->max_cos; cos++) { for (cos = 1; cos < bp->max_cos; cos++) {
for (i = 0; i < old_eth_num - delta; i++) { for (i = 0; i < old_eth_num - delta; i++) {
...@@ -258,7 +258,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) ...@@ -258,7 +258,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
smp_mb(); smp_mb();
if (unlikely(netif_tx_queue_stopped(txq))) { if (unlikely(netif_tx_queue_stopped(txq))) {
/* Taking tx_lock() is needed to prevent reenabling the queue /* Taking tx_lock() is needed to prevent re-enabling the queue
* while it's empty. This could have happen if rx_action() gets * while it's empty. This could have happen if rx_action() gets
* suspended in bnx2x_tx_int() after the condition before * suspended in bnx2x_tx_int() after the condition before
* netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
...@@ -571,7 +571,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -571,7 +571,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err; return err;
} }
/* Unmap the page as we r going to pass it to the stack */ /* Unmap the page as we're going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev, dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping), dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGES, DMA_FROM_DEVICE); SGE_PAGES, DMA_FROM_DEVICE);
...@@ -1114,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp, ...@@ -1114,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp,
memset(data, 0, sizeof(*data)); memset(data, 0, sizeof(*data));
/* Fill the report data: efective line speed */ /* Fill the report data: effective line speed */
data->line_speed = line_speed; data->line_speed = line_speed;
/* Link is down */ /* Link is down */
...@@ -1157,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp) ...@@ -1157,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp)
* *
* @bp: driver handle * @bp: driver handle
* *
* None atomic inmlementation. * None atomic implementation.
* Should be called under the phy_lock. * Should be called under the phy_lock.
*/ */
void __bnx2x_link_report(struct bnx2x *bp) void __bnx2x_link_report(struct bnx2x *bp)
...@@ -1300,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) ...@@ -1300,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) { if (!fp->disable_tpa) {
/* Fill the per-aggregtion pool */ /* Fill the per-aggregation pool */
for (i = 0; i < MAX_AGG_QS(bp); i++) { for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info = struct bnx2x_agg_info *tpa_info =
&fp->tpa_info[i]; &fp->tpa_info[i];
...@@ -1858,7 +1858,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) ...@@ -1858,7 +1858,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
* *
* If the actual number of Tx queues (for each CoS) is less than 16 then there * If the actual number of Tx queues (for each CoS) is less than 16 then there
* will be the holes at the end of each group of 16 ETh L2 indices (0..15, * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
* 16..31,...) with indicies that are not coupled with any real Tx queue. * 16..31,...) with indices that are not coupled with any real Tx queue.
* *
* The proper configuration of skb->queue_mapping is handled by * The proper configuration of skb->queue_mapping is handled by
* bnx2x_select_queue() and __skb_tx_hash(). * bnx2x_select_queue() and __skb_tx_hash().
...@@ -1920,7 +1920,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) ...@@ -1920,7 +1920,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVREHEAD + ETH_OVREHEAD +
mtu + mtu +
BNX2X_FW_RX_ALIGN_END; BNX2X_FW_RX_ALIGN_END;
/* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
else else
...@@ -1933,7 +1933,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) ...@@ -1933,7 +1933,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
int i; int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
/* Prepare the initial contents fo the indirection table if RSS is /* Prepare the initial contents for the indirection table if RSS is
* enabled * enabled
*/ */
for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
...@@ -2011,7 +2011,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) ...@@ -2011,7 +2011,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
/* /*
* Cleans the object that have internal lists without sending * Cleans the object that have internal lists without sending
* ramrods. Should be run when interrutps are disabled. * ramrods. Should be run when interrupts are disabled.
*/ */
void bnx2x_squeeze_objects(struct bnx2x *bp) void bnx2x_squeeze_objects(struct bnx2x *bp)
{ {
...@@ -2347,8 +2347,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) ...@@ -2347,8 +2347,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
BNX2X_NUM_ETH_QUEUES(bp) + index]; BNX2X_NUM_ETH_QUEUES(bp) + index];
/* /* set the tpa flag for each queue. The tpa flag determines the queue
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation * minimal size so it must be set prior to queue memory allocation
*/ */
fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
...@@ -2473,6 +2472,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -2473,6 +2472,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
/* zero the structure w/o any lock, before SP handler is initialized */
memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
__set_bit(BNX2X_LINK_REPORT_LINK_DOWN, __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&bp->last_reported_link.link_report_flags); &bp->last_reported_link.link_report_flags);
...@@ -2537,8 +2537,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -2537,8 +2537,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
} }
/* configure multi cos mappings in kernel. /* configure multi cos mappings in kernel.
* this configuration may be overriden by a multi class queue discipline * this configuration may be overridden by a multi class queue
* or by a dcbx negotiation result. * discipline or by a dcbx negotiation result.
*/ */
bnx2x_setup_tc(bp->dev, bp->max_cos); bnx2x_setup_tc(bp->dev, bp->max_cos);
...@@ -2697,7 +2697,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -2697,7 +2697,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start the Tx */ /* Start the Tx */
switch (load_mode) { switch (load_mode) {
case LOAD_NORMAL: case LOAD_NORMAL:
/* Tx queue should be only reenabled */ /* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev); netif_tx_wake_all_queues(bp->dev);
break; break;
...@@ -2842,7 +2842,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) ...@@ -2842,7 +2842,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
} }
/* Nothing to do during unload if previous bnx2x_nic_load() /* Nothing to do during unload if previous bnx2x_nic_load()
* have not completed succesfully - all resourses are released. * have not completed successfully - all resources are released.
* *
* we can get here only after unsuccessful ndo_* callback, during which * we can get here only after unsuccessful ndo_* callback, during which
* dev->IFF_UP flag is still on. * dev->IFF_UP flag is still on.
...@@ -2891,10 +2891,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) ...@@ -2891,10 +2891,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Send the UNLOAD_REQUEST to the MCP */ /* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode); bnx2x_send_unload_req(bp, unload_mode);
/* /* Prevent transactions to host from the functions on the
* Prevent transactions to host from the functions on the
* engine that doesn't reset global blocks in case of global * engine that doesn't reset global blocks in case of global
* attention once gloabl blocks are reset and gates are opened * attention once global blocks are reset and gates are opened
* (the engine which leader will perform the recovery * (the engine which leader will perform the recovery
* last). * last).
*/ */
...@@ -2915,7 +2914,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) ...@@ -2915,7 +2914,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
} }
/* /*
* At this stage no more interrupts will arrive so we may safly clean * At this stage no more interrupts will arrive so we may safely clean
* the queueable objects here in case they failed to get cleaned so far. * the queueable objects here in case they failed to get cleaned so far.
*/ */
if (IS_PF(bp)) if (IS_PF(bp))
...@@ -3587,7 +3586,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3587,7 +3586,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */ txq_index, fp_index, txdata_index); */
/* enable this debug print to view the tranmission details /* enable this debug print to view the transmission details
DP(NETIF_MSG_TX_QUEUED, DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */ txdata->cid, fp_index, txdata_index, txdata, fp); */
...@@ -3970,7 +3969,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) ...@@ -3970,7 +3969,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* setup tc must be called under rtnl lock */ /* setup tc must be called under rtnl lock */
ASSERT_RTNL(); ASSERT_RTNL();
/* no traffic classes requested. aborting */ /* no traffic classes requested. Aborting */
if (!num_tc) { if (!num_tc) {
netdev_reset_tc(dev); netdev_reset_tc(dev);
return 0; return 0;
...@@ -3997,8 +3996,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) ...@@ -3997,8 +3996,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
prio, bp->prio_to_cos[prio]); prio, bp->prio_to_cos[prio]);
} }
/* Use this configuration to differentiate tc0 from other COSes
/* Use this configuration to diffrentiate tc0 from other COSes
This can be used for ets or pfc, and save the effort of setting This can be used for ets or pfc, and save the effort of setting
up a multio class queue disc or negotiating DCBX with a switch up a multio class queue disc or negotiating DCBX with a switch
netdev_set_prio_tc_map(dev, 0, 0); netdev_set_prio_tc_map(dev, 0, 0);
...@@ -4629,7 +4627,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) ...@@ -4629,7 +4627,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
changes = flags ^ bp->flags; changes = flags ^ bp->flags;
/* if GRO is changed while LRO is enabled, dont force a reload */ /* if GRO is changed while LRO is enabled, don't force a reload */
if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
changes &= ~GRO_ENABLE_FLAG; changes &= ~GRO_ENABLE_FLAG;
......
...@@ -1331,8 +1331,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu) ...@@ -1331,8 +1331,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
/* /*
* 1. number of frags should not grow above MAX_SKB_FRAGS * 1. Number of frags should not grow above MAX_SKB_FRAGS
* 2. frag must fit the page * 2. Frag must fit the page
*/ */
return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
} }
......
...@@ -687,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) ...@@ -687,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
} }
/* setup tc must be called under rtnl lock, but we can't take it here /* setup tc must be called under rtnl lock, but we can't take it here
* as we are handling an attetntion on a work queue which must be * as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down) * flushed at some rtnl-locked contexts (e.g. if down)
*/ */
if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
...@@ -707,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) ...@@ -707,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
*/ */
bnx2x_dcbnl_update_applist(bp, true); bnx2x_dcbnl_update_applist(bp, true);
/* Read rmeote mib if dcbx is in the FW */ /* Read remote mib if dcbx is in the FW */
if (bnx2x_dcbx_read_shmem_remote_mib(bp)) if (bnx2x_dcbx_read_shmem_remote_mib(bp))
return; return;
#endif #endif
...@@ -738,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) ...@@ -738,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_dcbx_update_tc_mapping(bp); bnx2x_dcbx_update_tc_mapping(bp);
/* /*
* allow other funtions to update their netdevices * allow other functions to update their netdevices
* accordingly * accordingly
*/ */
if (IS_MF(bp)) if (IS_MF(bp))
...@@ -860,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, ...@@ -860,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
} }
/*For IEEE admin_recommendation_bw_precentage /*For IEEE admin_recommendation_bw_percentage
*For IEEE admin_recommendation_ets_pg */ *For IEEE admin_recommendation_ets_pg */
af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
...@@ -1070,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, ...@@ -1070,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
bool pg_found = false; bool pg_found = false;
u32 i, traf_type, add_traf_type, add_pg; u32 i, traf_type, add_traf_type, add_pg;
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
struct pg_entry_help_data *data = help_data->data; /*shotcut*/ struct pg_entry_help_data *data = help_data->data; /*shortcut*/
/* Set to invalid */ /* Set to invalid */
for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
...@@ -1166,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, ...@@ -1166,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
else else
/* If we join a group and one is strict /* If we join a group and one is strict
* than the bw rulls */ * than the bw rules
*/
cos_data->data[entry].strict = cos_data->data[entry].strict =
BNX2X_DCBX_STRICT_COS_HIGHEST; BNX2X_DCBX_STRICT_COS_HIGHEST;
} }
...@@ -1277,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, ...@@ -1277,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
} else { } else {
/* If there are only pauseable priorities or /* If there are only pauseable priorities or
* only non-pauseable,* the lower priorities go * only non-pauseable,* the lower priorities go
* to the first queue and the higherpriorities go * to the first queue and the higher priorities go
* to the second queue. * to the second queue.
*/ */
cos_data->data[0].pausable = cos_data->data[0].pausable =
...@@ -1477,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( ...@@ -1477,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
* queue and one priority goes to the second queue. * queue and one priority goes to the second queue.
* *
* We will join this two cases: * We will join this two cases:
* if one is BW limited it will go to the secoend queue * if one is BW limited it will go to the second queue
* otherwise the last priority will get it * otherwise the last priority will get it
*/ */
...@@ -1497,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( ...@@ -1497,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
false == b_found_strict) false == b_found_strict)
/* last entry will be handled separately /* last entry will be handled separately
* If no priority is strict than last * If no priority is strict than last
* enty goes to last queue.*/ * entry goes to last queue.
*/
entry = 1; entry = 1;
cos_data->data[entry].pri_join_mask |= cos_data->data[entry].pri_join_mask |=
pri_tested; pri_tested;
...@@ -1509,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( ...@@ -1509,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
b_found_strict = true; b_found_strict = true;
cos_data->data[1].pri_join_mask |= pri_tested; cos_data->data[1].pri_join_mask |= pri_tested;
/* If we join a group and one is strict /* If we join a group and one is strict
* than the bw rulls */ * than the bw rules
*/
cos_data->data[1].strict = cos_data->data[1].strict =
BNX2X_DCBX_STRICT_COS_HIGHEST; BNX2X_DCBX_STRICT_COS_HIGHEST;
} }
...@@ -1838,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, ...@@ -1838,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
void bnx2x_dcbx_pmf_update(struct bnx2x *bp) void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{ {
/* if we need to syncronize DCBX result from prev PMF /* if we need to synchronize DCBX result from prev PMF
* read it from shmem and update bp and netdev accordingly * read it from shmem and update bp and netdev accordingly
*/ */
if (SHMEM2_HAS(bp, drv_flags) && if (SHMEM2_HAS(bp, drv_flags) &&
...@@ -1932,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, ...@@ -1932,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
return; return;
/** /**
* bw_pct ingnored - band-width percentage devision between user * bw_pct ignored - band-width percentage devision between user
* priorities within the same group is not * priorities within the same group is not
* standard and hence not supported * standard and hence not supported
* *
* prio_type igonred - priority levels within the same group are not * prio_type ignored - priority levels within the same group are not
* standard and hence are not supported. According * standard and hence are not supported. According
* to the standard pgid 15 is dedicated to strict * to the standard pgid 15 is dedicated to strict
* prioirty traffic (on the port level). * priority traffic (on the port level).
* *
* up_map ignored * up_map ignored
*/ */
...@@ -1984,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, ...@@ -1984,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
DP(BNX2X_MSG_DCB, "prio = %d\n", prio); DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
/** /**
* bw_pct ingnored - band-width percentage devision between user * bw_pct ignored - band-width percentage devision between user
* priorities within the same group is not * priorities within the same group is not
* standard and hence not supported * standard and hence not supported
* *
* prio_type igonred - priority levels within the same group are not * prio_type ignored - priority levels within the same group are not
* standard and hence are not supported. According * standard and hence are not supported. According
* to the standard pgid 15 is dedicated to strict * to the standard pgid 15 is dedicated to strict
* prioirty traffic (on the port level). * priority traffic (on the port level).
* *
* up_map ignored * up_map ignored
*/ */
......
...@@ -13,12 +13,6 @@ ...@@ -13,12 +13,6 @@
* consent. * consent.
*/ */
/* This struct holds a signature to ensure the dump returned from the driver
* match the meta data file inserted to grc_dump.tcl
* The signature is time stamp, diag version and grc_dump version
*/
#ifndef BNX2X_DUMP_H #ifndef BNX2X_DUMP_H
#define BNX2X_DUMP_H #define BNX2X_DUMP_H
......
...@@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed = ethtool_cmd_speed(cmd); speed = ethtool_cmd_speed(cmd);
/* If recieved a request for an unknown duplex, assume full*/ /* If received a request for an unknown duplex, assume full*/
if (cmd->duplex == DUPLEX_UNKNOWN) if (cmd->duplex == DUPLEX_UNKNOWN)
cmd->duplex = DUPLEX_FULL; cmd->duplex = DUPLEX_FULL;
...@@ -849,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) ...@@ -849,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
/* Paged registers are supported in E2 & E3 only */ /* Paged registers are supported in E2 & E3 only */
if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
/* Read "paged" registes */ /* Read "paged" registers */
bnx2x_read_pages_regs(bp, p, preset); bnx2x_read_pages_regs(bp, p, preset);
} }
...@@ -1154,8 +1154,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) ...@@ -1154,8 +1154,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
return bp->common.flash_size; return bp->common.flash_size;
} }
/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had /* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
* we done things the other way around, if two pfs from the same port would * had we done things the other way around, if two pfs from the same port would
* attempt to access nvram at the same time, we could run into a scenario such * attempt to access nvram at the same time, we could run into a scenario such
* as: * as:
* pf A takes the port lock. * pf A takes the port lock.
...@@ -2070,7 +2070,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) ...@@ -2070,7 +2070,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
EEE_MODE_OVERRIDE_NVRAM | EEE_MODE_OVERRIDE_NVRAM |
EEE_MODE_OUTPUT_TIME; EEE_MODE_OUTPUT_TIME;
/* Restart link to propogate changes */ /* Restart link to propagate changes */
if (netif_running(dev)) { if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_stats_handle(bp, STATS_EVENT_STOP);
bnx2x_force_link_reset(bp); bnx2x_force_link_reset(bp);
......
...@@ -34,8 +34,7 @@ enum { ...@@ -34,8 +34,7 @@ enum {
RAMROD_RESTORE, RAMROD_RESTORE,
/* Execute the next command now */ /* Execute the next command now */
RAMROD_EXEC, RAMROD_EXEC,
/* /* Don't add a new command and continue execution of postponed
* Don't add a new command and continue execution of posponed
* commands. If not set a new command will be added to the * commands. If not set a new command will be added to the
* pending commands list. * pending commands list.
*/ */
...@@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd { ...@@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd {
struct bnx2x_vlan_mac_data { struct bnx2x_vlan_mac_data {
/* Requested command: BNX2X_VLAN_MAC_XX */ /* Requested command: BNX2X_VLAN_MAC_XX */
enum bnx2x_vlan_mac_cmd cmd; enum bnx2x_vlan_mac_cmd cmd;
/* /* used to contain the data related vlan_mac_flags bits from
* used to contain the data related vlan_mac_flags bits from
* ramrod parameters. * ramrod parameters.
*/ */
unsigned long vlan_mac_flags; unsigned long vlan_mac_flags;
...@@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem * ...@@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem *
struct bnx2x_exeq_elem *elem); struct bnx2x_exeq_elem *elem);
struct bnx2x_exe_queue_obj { struct bnx2x_exe_queue_obj {
/* /* Commands pending for an execution. */
* Commands pending for an execution.
*/
struct list_head exe_queue; struct list_head exe_queue;
/* /* Commands pending for an completion. */
* Commands pending for an completion.
*/
struct list_head pending_comp; struct list_head pending_comp;
spinlock_t lock; spinlock_t lock;
...@@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj { ...@@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj {
}; };
/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
/* /*
* Element in the VLAN_MAC registry list having all currenty configured * Element in the VLAN_MAC registry list having all currently configured
* rules. * rules.
*/ */
struct bnx2x_vlan_mac_registry_elem { struct bnx2x_vlan_mac_registry_elem {
struct list_head link; struct list_head link;
/* /* Used to store the cam offset used for the mac/vlan/vlan-mac.
* Used to store the cam offset used for the mac/vlan/vlan-mac.
* Relevant for 57710 and 57711 only. VLANs and MACs share the * Relevant for 57710 and 57711 only. VLANs and MACs share the
* same CAM for these chips. * same CAM for these chips.
*/ */
...@@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj { ...@@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj {
* @param n number of elements to get * @param n number of elements to get
* @param buf buffer preallocated by caller into which elements * @param buf buffer preallocated by caller into which elements
* will be copied. Note elements are 4-byte aligned * will be copied. Note elements are 4-byte aligned
* so buffer size must be able to accomodate the * so buffer size must be able to accommodate the
* aligned elements. * aligned elements.
* *
* @return number of copied bytes * @return number of copied bytes
...@@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj { ...@@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj {
* @param bp * @param bp
* @param p Command parameters (RAMROD_COMP_WAIT bit in * @param p Command parameters (RAMROD_COMP_WAIT bit in
* ramrod_flags is only taken into an account) * ramrod_flags is only taken into an account)
* @param ppos a pointer to the cooky that should be given back in the * @param ppos a pointer to the cookie that should be given back in the
* next call to make function handle the next element. If * next call to make function handle the next element. If
* *ppos is set to NULL it will restart the iterator. * *ppos is set to NULL it will restart the iterator.
* If returned *ppos == NULL this means that the last * If returned *ppos == NULL this means that the last
...@@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj { ...@@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj {
struct bnx2x_vlan_mac_registry_elem **ppos); struct bnx2x_vlan_mac_registry_elem **ppos);
/** /**
* Should be called on a completion arival. * Should be called on a completion arrival.
* *
* @param bp * @param bp
* @param o * @param o
...@@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp, ...@@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp,
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in /* RX_MODE ramrod special flags: set in rx_mode_flags field in
* a bnx2x_rx_mode_ramrod_params. * a bnx2x_rx_mode_ramrod_params.
*/ */
enum { enum {
...@@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params { ...@@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params {
unsigned long ramrod_flags; unsigned long ramrod_flags;
unsigned long rx_mode_flags; unsigned long rx_mode_flags;
/* /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
* a tstorm_eth_mac_filter_config (e1x). * a tstorm_eth_mac_filter_config (e1x).
*/ */
void *rdata; void *rdata;
...@@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj { ...@@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj {
/* Maximum allowed credit. put() will check against it. */ /* Maximum allowed credit. put() will check against it. */
int pool_sz; int pool_sz;
/* /* Allocate a pool table statically.
* Allocate a pool table statically.
* *
* Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
* *
* The set bit in the table will mean that the entry is available. * The set bit in the table will mean that the entry is available.
*/ */
#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
...@@ -832,7 +823,7 @@ enum { ...@@ -832,7 +823,7 @@ enum {
BNX2X_Q_FLG_TUN_INC_INNER_IP_ID BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
}; };
/* Queue type options: queue type may be a compination of below. */ /* Queue type options: queue type may be a combination of below. */
enum bnx2x_q_type { enum bnx2x_q_type {
/** TODO: Consider moving both these flags into the init() /** TODO: Consider moving both these flags into the init()
* ramrod params. * ramrod params.
...@@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj { ...@@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj {
u8 cl_id; u8 cl_id;
u8 func_id; u8 func_id;
/* /* number of traffic classes supported by queue.
* number of traffic classes supported by queue. * The primary connection of the queue supports the first traffic
* The primary connection of the queue suppotrs the first traffic * class. Any further traffic class is supported by a tx-only
* class. Any further traffic class is suppoted by a tx-only
* connection. * connection.
* *
* Therefore max_cos is also a number of valid entries in the cids * Therefore max_cos is also a number of valid entries in the cids
...@@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj { ...@@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj {
/* BNX2X_Q_CMD_XX bits. This object implements "one /* BNX2X_Q_CMD_XX bits. This object implements "one
* pending" paradigm but for debug and tracing purposes it's * pending" paradigm but for debug and tracing purposes it's
* more convinient to have different bits for different * more convenient to have different bits for different
* commands. * commands.
*/ */
unsigned long pending; unsigned long pending;
...@@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj { ...@@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj {
/* BNX2X_FUNC_CMD_XX bits. This object implements "one /* BNX2X_FUNC_CMD_XX bits. This object implements "one
* pending" paradigm but for debug and tracing purposes it's * pending" paradigm but for debug and tracing purposes it's
* more convinient to have different bits for different * more convenient to have different bits for different
* commands. * commands.
*/ */
unsigned long pending; unsigned long pending;
...@@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp, ...@@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
* *
* @p: Command parameters * @p: Command parameters
* *
* Return: 0 - if operation was successfull and there is no pending completions, * Return: 0 - if operation was successful and there is no pending completions,
* positive number - if there are pending completions, * positive number - if there are pending completions,
* negative - if there were errors * negative - if there were errors
*/ */
...@@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, ...@@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the * the current command will be enqueued to the tail of the
* pending commands list. * pending commands list.
* *
* Return: 0 is operation was successfull and there are no pending completions, * Return: 0 is operation was successful and there are no pending completions,
* negative if there were errors, positive if there are pending * negative if there were errors, positive if there are pending
* completions. * completions.
*/ */
......
...@@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, ...@@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
*/ */
/* internal vf enable - until vf is enabled internally all transactions /* internal vf enable - until vf is enabled internally all transactions
* are blocked. this routine should always be called last with pretend. * are blocked. This routine should always be called last with pretend.
*/ */
static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
{ {
...@@ -1743,7 +1743,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) ...@@ -1743,7 +1743,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
/* set the number of VF alllowed doorbells to the full DQ range */ /* set the number of VF allowed doorbells to the full DQ range */
REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
/* set the VF doorbell threshold */ /* set the VF doorbell threshold */
...@@ -2403,7 +2403,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) ...@@ -2403,7 +2403,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
/* extract vf and rxq index from vf_cid - relies on the following: /* extract vf and rxq index from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid * 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64 * 2. The max number of VFs (per path) is 64
*/ */
qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
...@@ -2461,7 +2461,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) ...@@ -2461,7 +2461,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
{ {
/* extract the vf from vf_cid - relies on the following: /* extract the vf from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid * 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64 * 2. The max number of VFs (per path) is 64
*/ */
int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
return bnx2x_vf_by_abs_fid(bp, abs_vfid); return bnx2x_vf_by_abs_fid(bp, abs_vfid);
...@@ -2480,7 +2480,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, ...@@ -2480,7 +2480,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
if (vf) { if (vf) {
/* extract queue index from vf_cid - relies on the following: /* extract queue index from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid * 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64 * 2. The max number of VFs (per path) is 64
*/ */
int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
*q_obj = &bnx2x_vfq(vf, q_index, sp_obj); *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
...@@ -2705,7 +2705,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2705,7 +2705,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
} }
/* static allocation: /* static allocation:
* the global maximum number are fixed per VF. fail the request if * the global maximum number are fixed per VF. Fail the request if
* requested number exceed these globals * requested number exceed these globals
*/ */
if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
...@@ -2890,7 +2890,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp, ...@@ -2890,7 +2890,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
/* VF release can be called either: 1. the VF was acquired but /* VF release can be called either: 1. The VF was acquired but
* not enabled 2. the vf was enabled or in the process of being * not enabled 2. the vf was enabled or in the process of being
* enabled * enabled
*/ */
...@@ -3140,7 +3140,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ...@@ -3140,7 +3140,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* mac configured by ndo so its in bulletin board */ /* mac configured by ndo so its in bulletin board */
memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
else else
/* funtion has not been loaded yet. Show mac as 0s */ /* function has not been loaded yet. Show mac as 0s */
memset(&ivi->mac, 0, ETH_ALEN); memset(&ivi->mac, 0, ETH_ALEN);
/* vlan */ /* vlan */
...@@ -3148,7 +3148,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ...@@ -3148,7 +3148,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan configured by ndo so its in bulletin board */ /* vlan configured by ndo so its in bulletin board */
memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
else else
/* funtion has not been loaded yet. Show vlans as 0s */ /* function has not been loaded yet. Show vlans as 0s */
memset(&ivi->vlan, 0, VLAN_HLEN); memset(&ivi->vlan, 0, VLAN_HLEN);
} }
...@@ -3188,7 +3188,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) ...@@ -3188,7 +3188,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return -EINVAL; return -EINVAL;
} }
/* update PF's copy of the VF's bulletin. will no longer accept mac /* update PF's copy of the VF's bulletin. Will no longer accept mac
* configuration requests from vf unless match this mac * configuration requests from vf unless match this mac
*/ */
bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
...@@ -3357,8 +3357,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) ...@@ -3357,8 +3357,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
return 0; return 0;
} }
/* crc is the first field in the bulletin board. compute the crc over the /* crc is the first field in the bulletin board. Compute the crc over the
* entire bulletin board excluding the crc field itself * entire bulletin board excluding the crc field itself. Use the length field
* as the Bulletin Board was posted by a PF with possibly a different version
* from the vf which will sample it. Therefore, the length is computed by the
* PF and the used blindly by the VF.
*/ */
u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
struct pf_vf_bulletin_content *bulletin) struct pf_vf_bulletin_content *bulletin)
...@@ -3451,7 +3454,7 @@ int bnx2x_open_epilog(struct bnx2x *bp) ...@@ -3451,7 +3454,7 @@ int bnx2x_open_epilog(struct bnx2x *bp)
* register_netdevice which must have rtnl lock taken. As we are holding * register_netdevice which must have rtnl lock taken. As we are holding
* the lock right now, that could only work if the probe would not take * the lock right now, that could only work if the probe would not take
* the lock. However, as the probe of the vf may be called from other * the lock. However, as the probe of the vf may be called from other
* contexts as well (such as passthrough to vm failes) it can't assume * contexts as well (such as passthrough to vm fails) it can't assume
* the lock is being held for it. Using delayed work here allows the * the lock is being held for it. Using delayed work here allows the
* probe code to simply take the lock (i.e. wait for it to be released * probe code to simply take the lock (i.e. wait for it to be released
* if it is being held). We only want to do this if the number of VFs * if it is being held). We only want to do this if the number of VFs
......
...@@ -197,7 +197,7 @@ struct bnx2x_virtf { ...@@ -197,7 +197,7 @@ struct bnx2x_virtf {
u8 state; u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */ #define VF_FREE 0 /* VF ready to be acquired holds no resc */
#define VF_ACQUIRED 1 /* VF aquired, but not initalized */ #define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */ #define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */
......
...@@ -233,7 +233,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) ...@@ -233,7 +233,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
attempts++; attempts++;
/* test whether the PF accepted our request. If not, humble the /* test whether the PF accepted our request. If not, humble
* the request and try again. * the request and try again.
*/ */
if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
...@@ -787,7 +787,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) ...@@ -787,7 +787,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
} }
/* enable vf_pf mailbox (aka vf-pf-chanell) */ /* enable vf_pf mailbox (aka vf-pf-channel) */
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
{ {
bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
...@@ -1072,7 +1072,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, ...@@ -1072,7 +1072,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
/* outer vlan removal is set according to the PF's multi fuction mode */ /* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp)) if (IS_MF_SD(bp))
__set_bit(BNX2X_Q_FLG_OV, sp_q_flags); __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
} }
...@@ -1104,7 +1104,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1104,7 +1104,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p; struct bnx2x_queue_setup_params *setup_p;
/* reinit the VF operation context */ /* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup; setup_p = &vf->op_params.qctor.prep_qsetup;
init_p = &vf->op_params.qctor.qstate.params.init; init_p = &vf->op_params.qctor.qstate.params.init;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment