Commit d19efb72 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2018-04-24

This series contains fixes to ixgbevf, igb and ice drivers.

Colin Ian King fixes the return value on error for the new XDP support
that went into ixgbevf for 4.17.

Vinicius provides a fix for queue 0 for igb, which was not receiving all
the credits it needed when QAV mode was enabled.

Anirudh provides several fixes for the new ice driver, starting with
properly initializing num_nodes_added to zero.  Fixed up a code comment
to better reflect what is really going on in the code.  Fixed how to
detect if an OICR interrupt has occurred to a more reliable method.

Md Fahad fixes the ice driver to allocate the right amount of memory
when reading and storing the devices MAC addresses.  The device can have
up to 2 MAC addresses (LAN and WoL), while WoL is currently not
supported, we need to ensure it can be properly handled when support is
added.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f8d62037 d6fef10c
...@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act { ...@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_MIRROR_VSI_ID_S 3 #define ICE_LG_ACT_MIRROR_VSI_ID_S 3
#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) #define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
/* Action type = 5 - Large Action */ /* Action type = 5 - Generic Value */
#define ICE_LG_ACT_GENERIC 0x5 #define ICE_LG_ACT_GENERIC 0x5
#define ICE_LG_ACT_GENERIC_VALUE_S 3 #define ICE_LG_ACT_GENERIC_VALUE_S 3
#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) #define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
......
...@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, ...@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; enum ice_status status;
u16 flags; u16 flags;
u8 i;
cmd = &desc.params.mac_read; cmd = &desc.params.mac_read;
...@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, ...@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
return ICE_ERR_CFG; return ICE_ERR_CFG;
} }
ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); /* A single port can report up to two (LAN and WoL) addresses */
ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); for (i = 0; i < cmd->num_addr; i++)
if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
ether_addr_copy(hw->port_info->mac.lan_addr,
resp[i].mac_addr);
ether_addr_copy(hw->port_info->mac.perm_addr,
resp[i].mac_addr);
break;
}
return 0; return 0;
} }
...@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status) if (status)
goto err_unroll_sched; goto err_unroll_sched;
/* Get port MAC information */ /* Get MAC information */
mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); /* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
sizeof(struct ice_aqc_manage_mac_read_resp),
GFP_KERNEL);
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) { if (!mac_buf) {
status = ICE_ERR_NO_MEMORY; status = ICE_ERR_NO_MEMORY;
......
...@@ -121,8 +121,6 @@ ...@@ -121,8 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00 #define PFINT_OICR 0x0016CA00
#define PFINT_OICR_INTEVENT_S 0
#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
#define PFINT_OICR_HLP_RDY_S 14 #define PFINT_OICR_HLP_RDY_S 14
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
#define PFINT_OICR_CPM_RDY_S 15 #define PFINT_OICR_CPM_RDY_S 15
......
...@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ...@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
oicr = rd32(hw, PFINT_OICR); oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA); ena_mask = rd32(hw, PFINT_OICR_ENA);
if (!(oicr & PFINT_OICR_INTEVENT_M))
goto ena_intr;
if (oicr & PFINT_OICR_GRST_M) { if (oicr & PFINT_OICR_GRST_M) {
u32 reset; u32 reset;
/* we have a reset warning */ /* we have a reset warning */
...@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ...@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
} }
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
ena_intr:
/* re-enable interrupt causes that are not handled during this pass */ /* re-enable interrupt causes that are not handled during this pass */
wr32(hw, PFINT_OICR_ENA, ena_mask); wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) { if (!test_bit(__ICE_DOWN, pf->state)) {
......
...@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, ...@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 num_added = 0; u16 num_added = 0;
u32 temp; u32 temp;
*num_nodes_added = 0;
if (!num_nodes) if (!num_nodes)
return status; return status;
if (!parent || layer < hw->sw_entry_point_layer) if (!parent || layer < hw->sw_entry_point_layer)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
*num_nodes_added = 0;
/* max children per node per layer */ /* max children per node per layer */
max_child_nodes = max_child_nodes =
le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
......
...@@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, ...@@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
WARN_ON(hw->mac.type != e1000_i210); WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1); WARN_ON(queue < 0 || queue > 1);
if (enable) { if (enable || queue == 0) {
/* i210 does not allow the queue 0 to be in the Strict
* Priority mode while the Qav mode is enabled, so,
* instead of disabling strict priority mode, we give
* queue 0 the maximum of credits possible.
*
* See section 8.12.19 of the i210 datasheet, "Note:
* Queue0 QueueMode must be set to 1b when
* TransmitMode is set to Qav."
*/
if (queue == 0 && !enable) {
/* max "linkspeed" idleslope in kbps */
idleslope = 1000000;
hicredit = ETH_FRAME_LEN;
}
set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
......
...@@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
if (!err) if (!err)
continue; continue;
hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
break; goto err_setup_tx;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment