Commit 9005df38 authored by Carolyn Wyborny's avatar Carolyn Wyborny Committed by Jeff Kirsher

igb: Cleanups to fix incorrect indentation

This patch fixes WARNING:LEADING_SPACE, WARNING:SPACING, ERROR:SPACING,
WARNING:SPACE_BEFORE_TAB and ERROR_CODE_INDENT from checkpatch file check.
Signed-off-by: default avatarCarolyn Wyborny <carolyn.wyborny@intel.com>
Tested-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent d34a15ab
...@@ -1011,8 +1011,7 @@ ...@@ -1011,8 +1011,7 @@
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */ /* DMA Coalescing register fields */
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based #define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
on DMA coal */
/* Tx Rate-Scheduler Config fields */ /* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000 #define E1000_RTTBCNRC_RS_ENA 0x80000000
......
...@@ -1297,7 +1297,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) ...@@ -1297,7 +1297,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
} }
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
switch(hw->phy.media_type) { switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes: case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES; *data = ID_LED_DEFAULT_82575_SERDES;
break; break;
......
...@@ -480,6 +480,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ...@@ -480,6 +480,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */ /* Loop to allow for up to whole page write of eeprom */
while (widx < words) { while (widx < words) {
u16 word_out = data[widx]; u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8); word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16); igb_shift_out_eec_bits(hw, word_out, 16);
widx++; widx++;
......
...@@ -358,8 +358,7 @@ ...@@ -358,8 +358,7 @@
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) #define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) #define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine #define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
* Filter - RW */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) #define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
struct e1000_hw; struct e1000_hw;
......
...@@ -198,6 +198,7 @@ struct igb_tx_buffer { ...@@ -198,6 +198,7 @@ struct igb_tx_buffer {
unsigned int bytecount; unsigned int bytecount;
u16 gso_segs; u16 gso_segs;
__be16 protocol; __be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags; u32 tx_flags;
......
...@@ -1156,7 +1156,7 @@ static struct igb_reg_test reg_test_82576[] = { ...@@ -1156,7 +1156,7 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 } { 0, 0, 0, 0 }
}; };
...@@ -1218,6 +1218,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, ...@@ -1218,6 +1218,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 val; u32 val;
wr32(reg, write & mask); wr32(reg, write & mask);
val = rd32(reg); val = rd32(reg);
if ((write & mask) != (val & mask)) { if ((write & mask) != (val & mask)) {
...@@ -1949,6 +1950,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) ...@@ -1949,6 +1950,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
*data = 0; *data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) { if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0; int i = 0;
hw->mac.serdes_has_link = false; hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment /* On some blade server designs, link establishment
......
...@@ -674,9 +674,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) ...@@ -674,9 +674,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void) static int __init igb_init_module(void)
{ {
int ret; int ret;
pr_info("%s - version %s\n", pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version); igb_driver_string, igb_driver_version);
pr_info("%s\n", igb_copyright); pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA #ifdef CONFIG_IGB_DCA
...@@ -1338,6 +1338,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter) ...@@ -1338,6 +1338,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) { for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
err = igb_alloc_q_vector(adapter, q_vectors, v_idx, err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx); tqpv, txr_idx, rqpv, rxr_idx);
...@@ -1477,6 +1478,7 @@ static void igb_irq_disable(struct igb_adapter *adapter) ...@@ -1477,6 +1478,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
*/ */
if (adapter->flags & IGB_FLAG_HAS_MSIX) { if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM); u32 regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask); wr32(E1000_EIMC, adapter->eims_enable_mask);
regval = rd32(E1000_EIAC); regval = rd32(E1000_EIAC);
...@@ -1488,6 +1490,7 @@ static void igb_irq_disable(struct igb_adapter *adapter) ...@@ -1488,6 +1490,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wrfl(); wrfl();
if (adapter->flags & IGB_FLAG_HAS_MSIX) { if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i; int i;
for (i = 0; i < adapter->num_q_vectors; i++) for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector); synchronize_irq(adapter->msix_entries[i].vector);
} else { } else {
...@@ -1506,6 +1509,7 @@ static void igb_irq_enable(struct igb_adapter *adapter) ...@@ -1506,6 +1509,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
if (adapter->flags & IGB_FLAG_HAS_MSIX) { if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC); u32 regval = rd32(E1000_EIAC);
wr32(E1000_EIAC, regval | adapter->eims_enable_mask); wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM); regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval | adapter->eims_enable_mask); wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
...@@ -1738,6 +1742,7 @@ int igb_up(struct igb_adapter *adapter) ...@@ -1738,6 +1742,7 @@ int igb_up(struct igb_adapter *adapter)
/* notify VFs that reset has been completed */ /* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) { if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT); u32 reg_data = rd32(E1000_CTRL_EXT);
reg_data |= E1000_CTRL_EXT_PFRSTD; reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data); wr32(E1000_CTRL_EXT, reg_data);
} }
...@@ -1953,6 +1958,7 @@ void igb_reset(struct igb_adapter *adapter) ...@@ -1953,6 +1958,7 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */ /* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) { if (adapter->vfs_allocated_count) {
int i; int i;
for (i = 0 ; i < adapter->vfs_allocated_count; i++) for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
...@@ -3070,6 +3076,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) ...@@ -3070,6 +3076,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* notify VFs that reset has been completed */ /* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) { if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT); u32 reg_data = rd32(E1000_CTRL_EXT);
reg_data |= E1000_CTRL_EXT_PFRSTD; reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data); wr32(E1000_CTRL_EXT, reg_data);
} }
...@@ -3423,6 +3430,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) ...@@ -3423,6 +3430,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (hw->mac.type > e1000_82575) { if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */ /* Set the default pool for the PF's first queue */
u32 vtctl = rd32(E1000_VT_CTL); u32 vtctl = rd32(E1000_VT_CTL);
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL); E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count << vtctl |= adapter->vfs_allocated_count <<
...@@ -4070,7 +4078,7 @@ static void igb_spoof_check(struct igb_adapter *adapter) ...@@ -4070,7 +4078,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr) if (!adapter->wvbr)
return; return;
for(j = 0; j < adapter->vfs_allocated_count; j++) { for (j = 0; j < adapter->vfs_allocated_count; j++) {
if (adapter->wvbr & (1 << j) || if (adapter->wvbr & (1 << j) ||
adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
dev_warn(&adapter->pdev->dev, dev_warn(&adapter->pdev->dev,
...@@ -4202,6 +4210,7 @@ static void igb_watchdog_task(struct work_struct *work) ...@@ -4202,6 +4210,7 @@ static void igb_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) { if (!netif_carrier_ok(netdev)) {
u32 ctrl; u32 ctrl;
hw->mac.ops.get_speed_and_duplex(hw, hw->mac.ops.get_speed_and_duplex(hw,
&adapter->link_speed, &adapter->link_speed,
&adapter->link_duplex); &adapter->link_duplex);
...@@ -4333,6 +4342,7 @@ static void igb_watchdog_task(struct work_struct *work) ...@@ -4333,6 +4342,7 @@ static void igb_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */ /* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->flags & IGB_FLAG_HAS_MSIX) { if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0; u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++) for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value; eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics); wr32(E1000_EICS, eics);
...@@ -4663,6 +4673,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) ...@@ -4663,6 +4673,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
return; return;
} else { } else {
u8 l4_hdr = 0; u8 l4_hdr = 0;
switch (first->protocol) { switch (first->protocol) {
case htons(ETH_P_IP): case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb); vlan_macip_lens |= skb_network_header_len(skb);
...@@ -4950,6 +4961,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ...@@ -4950,6 +4961,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
*/ */
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f; unsigned short f;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else { } else {
...@@ -5607,6 +5619,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) ...@@ -5607,6 +5619,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME; vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) { } else if (vf_data->num_vf_mc_hashes) {
int j; int j;
vmolr |= E1000_VMOLR_ROMPE; vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++) for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
igb_mta_set(hw, vf_data->vf_mc_hashes[j]); igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
...@@ -5658,6 +5671,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) ...@@ -5658,6 +5671,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
for (i = 0; i < adapter->vfs_allocated_count; i++) { for (i = 0; i < adapter->vfs_allocated_count; i++) {
u32 vmolr = rd32(E1000_VMOLR(i)); u32 vmolr = rd32(E1000_VMOLR(i));
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i]; vf_data = &adapter->vf_data[i];
...@@ -5756,6 +5770,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) ...@@ -5756,6 +5770,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
if (!adapter->vf_data[vf].vlans_enabled) { if (!adapter->vf_data[vf].vlans_enabled) {
u32 size; u32 size;
reg = rd32(E1000_VMOLR(vf)); reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK; size = reg & E1000_VMOLR_RLPML_MASK;
size += 4; size += 4;
...@@ -5784,6 +5799,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) ...@@ -5784,6 +5799,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
adapter->vf_data[vf].vlans_enabled--; adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) { if (!adapter->vf_data[vf].vlans_enabled) {
u32 size; u32 size;
reg = rd32(E1000_VMOLR(vf)); reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK; size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4; size -= 4;
...@@ -5888,8 +5904,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) ...@@ -5888,8 +5904,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
*/ */
if (!add && (adapter->netdev->flags & IFF_PROMISC)) { if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits; u32 vlvf, bits;
int regndx = igb_find_vlvf_entry(adapter, vid); int regndx = igb_find_vlvf_entry(adapter, vid);
if (regndx < 0) if (regndx < 0)
goto out; goto out;
/* See if any other pools are set for this VLAN filter /* See if any other pools are set for this VLAN filter
...@@ -6949,6 +6965,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, ...@@ -6949,6 +6965,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid; u16 vid;
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan); vid = be16_to_cpu(rx_desc->wb.upper.vlan);
...@@ -8035,6 +8052,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) ...@@ -8035,6 +8052,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
} /* endif adapter->dmac is not disabled */ } /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) { } else if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC); u32 reg = rd32(E1000_PCIEMISC);
wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
wr32(E1000_DMACR, 0); wr32(E1000_DMACR, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment