Commit 71f9b61c authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2018-09-25

This series contains updates to i40e and xsk.

Mariusz fixes an issue where the VF link state was not being updated
properly when the PF is down or up.  Also cleaned up the promiscuous
configuration during a VF reset.

Patryk simplifies the code a bit to use the variables for PF and HW that
are declared, rather than using the VSI pointers.  Cleaned up the
message length parameter to several virtchnl functions, since it was not
being used (or needed).

Harshitha fixes two potential race conditions when trying to change VF
settings by creating a helper function to validate that the VF is
enabled and that the VSI is set up.

Sergey corrects a double "link down" message by putting in a check for
whether or not the link is up or going down.

Björn addresses an AF_XDP zero-copy issue that buffers passed
from userspace to the kernel was leaked when the hardware descriptor
ring was torn down.  A zero-copy capable driver picks buffers off the
fill ring and places them on the hardware receive ring to be completed at
a later point when DMA is complete. Similar on the transmit side; The
driver picks buffers off the transmit ring and places them on the
transmit hardware ring.

In the typical flow, the receive buffer will be placed onto an receive
ring (completed to the user), and the transmit buffer will be placed on
the completion ring to notify the user that the transfer is done.

However, if the driver needs to tear down the hardware rings for some
reason (interface goes down, reconfiguration and such), the userspace
buffers cannot be leaked. They have to be reused or completed back to
userspace.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7a153655 3ab52af5
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "i40e.h" #include "i40e.h"
#include "i40e_diag.h" #include "i40e_diag.h"
#include "i40e_txrx_common.h"
/* ethtool statistics helpers */ /* ethtool statistics helpers */
...@@ -1710,6 +1711,13 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -1710,6 +1711,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count)) (new_rx_count == vsi->rx_rings[0]->count))
return 0; return 0;
/* If there is a AF_XDP UMEM attached to any of Rx rings,
* disallow changing the number of descriptors -- regardless
* if the netdev is running or not.
*/
if (i40e_xsk_any_rx_ring_enabled(vsi))
return -EBUSY;
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--; timeout--;
if (!timeout) if (!timeout)
......
...@@ -1532,8 +1532,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1532,8 +1532,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return 0; return 0;
} }
if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || if (test_bit(__I40E_DOWN, pf->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
if (ether_addr_equal(hw->mac.addr, addr->sa_data)) if (ether_addr_equal(hw->mac.addr, addr->sa_data))
...@@ -1557,8 +1557,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1557,8 +1557,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (vsi->type == I40E_VSI_MAIN) { if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret; i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw, ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL); addr->sa_data, NULL);
if (ret) if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
...@@ -1569,7 +1568,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1569,7 +1568,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
/* schedule our worker thread which will take care of /* schedule our worker thread which will take care of
* applying the new filter changes * applying the new filter changes
*/ */
i40e_service_event_schedule(vsi->back); i40e_service_event_schedule(pf);
return 0; return 0;
} }
...@@ -6432,7 +6431,10 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) ...@@ -6432,7 +6431,10 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
char *req_fec = ""; char *req_fec = "";
char *an = ""; char *an = "";
if (isup)
new_speed = pf->hw.phy.link_info.link_speed; new_speed = pf->hw.phy.link_info.link_speed;
else
new_speed = I40E_LINK_SPEED_UNKNOWN;
if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
return; return;
...@@ -8509,14 +8511,9 @@ static void i40e_link_event(struct i40e_pf *pf) ...@@ -8509,14 +8511,9 @@ static void i40e_link_event(struct i40e_pf *pf)
i40e_status status; i40e_status status;
bool new_link, old_link; bool new_link, old_link;
/* save off old link status information */
pf->hw.phy.link_info_old = pf->hw.phy.link_info;
/* set this to force the get_link_status call to refresh state */ /* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true; pf->hw.phy.get_link_info = true;
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
status = i40e_get_link_status(&pf->hw, &new_link); status = i40e_get_link_status(&pf->hw, &new_link);
/* On success, disable temp link polling */ /* On success, disable temp link polling */
......
...@@ -636,13 +636,18 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -636,13 +636,18 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
unsigned long bi_size; unsigned long bi_size;
u16 i; u16 i;
if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
i40e_xsk_clean_tx_ring(tx_ring);
} else {
/* ring already cleared, nothing to do */ /* ring already cleared, nothing to do */
if (!tx_ring->tx_bi) if (!tx_ring->tx_bi)
return; return;
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) for (i = 0; i < tx_ring->count; i++)
i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); i40e_unmap_and_free_tx_resource(tx_ring,
&tx_ring->tx_bi[i]);
}
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size); memset(tx_ring->tx_bi, 0, bi_size);
...@@ -1350,8 +1355,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -1350,8 +1355,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_ring->skb = NULL; rx_ring->skb = NULL;
} }
if (rx_ring->xsk_umem) if (rx_ring->xsk_umem) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free; goto skip_free;
}
/* Free all the Rx ring sk_buffs */ /* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) { for (i = 0; i < rx_ring->count; i++) {
......
...@@ -87,4 +87,8 @@ static inline void i40e_arm_wb(struct i40e_ring *tx_ring, ...@@ -87,4 +87,8 @@ static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
} }
} }
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
#endif /* I40E_TXRX_COMMON_ */ #endif /* I40E_TXRX_COMMON_ */
...@@ -1084,6 +1084,136 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) ...@@ -1084,6 +1084,136 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
return -EIO; return -EIO;
} }
static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
/**
* i40e_config_vf_promiscuous_mode
* @vf: pointer to the VF info
* @vsi_id: VSI id
* @allmulti: set MAC L2 layer multicast promiscuous enable/disable
* @alluni: set MAC L2 layer unicast promiscuous enable/disable
*
* Called from the VF to configure the promiscuous mode of
* VF vsis and from the VF reset path to reset promiscuous mode.
**/
static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
u16 vsi_id,
bool allmulti,
bool alluni)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_mac_filter *f;
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
int bkt;
vsi = i40e_find_vsi_from_id(pf, vsi_id);
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
dev_err(&pf->pdev->dev,
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
/* Lie to the VF on purpose. */
return 0;
}
if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
allmulti,
vf->port_vlan_id,
NULL);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
return aq_ret;
}
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
alluni,
vf->port_vlan_id,
NULL);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
return aq_ret;
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
vsi->seid,
allmulti,
f->vlan,
NULL);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
f->vlan,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
vsi->seid,
alluni,
f->vlan,
NULL);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
f->vlan,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
}
return aq_ret;
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
NULL);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
return aq_ret;
}
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
NULL, true);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
return aq_ret;
}
/** /**
* i40e_trigger_vf_reset * i40e_trigger_vf_reset
* @vf: pointer to the VF structure * @vf: pointer to the VF structure
...@@ -1145,6 +1275,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) ...@@ -1145,6 +1275,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 reg; u32 reg;
/* disable promisc modes in case they were enabled */
i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
/* free VF resources to begin resetting the VSI state */ /* free VF resources to begin resetting the VSI state */
i40e_free_vf_res(vf); i40e_free_vf_res(vf);
...@@ -1840,143 +1973,55 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) ...@@ -1840,143 +1973,55 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
* i40e_vc_config_promiscuous_mode_msg * i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* called from the VF to configure the promiscuous mode of * called from the VF to configure the promiscuous mode of
* VF vsis * VF vsis
**/ **/
static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
u8 *msg, u16 msglen)
{ {
struct virtchnl_promisc_info *info = struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg; (struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_mac_filter *f;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
bool allmulti = false; bool allmulti = false;
struct i40e_vsi *vsi;
bool alluni = false; bool alluni = false;
int aq_err = 0;
int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id); if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || return I40E_ERR_PARAM;
!i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
!vsi) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
dev_err(&pf->pdev->dev,
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
/* Lie to the VF on purpose. */
aq_ret = 0;
goto error_param;
}
/* Multicast promiscuous handling*/ /* Multicast promiscuous handling*/
if (info->flags & FLAG_VF_MULTICAST_PROMISC) if (info->flags & FLAG_VF_MULTICAST_PROMISC)
allmulti = true; allmulti = true;
if (vf->port_vlan_id) { if (info->flags & FLAG_VF_UNICAST_PROMISC)
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, alluni = true;
allmulti, aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
vf->port_vlan_id, alluni);
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
vsi->seid,
allmulti,
f->vlan,
NULL);
aq_err = pf->hw.aq.asq_last_status;
if (aq_ret) {
dev_err(&pf->pdev->dev,
"Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
f->vlan,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
break;
}
}
} else {
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
allmulti, NULL);
aq_err = pf->hw.aq.asq_last_status;
if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
goto error_param;
}
}
if (!aq_ret) { if (!aq_ret) {
if (allmulti) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"VF %d successfully set multicast promiscuous mode\n", "VF %d successfully set multicast promiscuous mode\n",
vf->vf_id); vf->vf_id);
if (allmulti)
set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
else
clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
}
if (info->flags & FLAG_VF_UNICAST_PROMISC)
alluni = true;
if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
alluni,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
vsi->seid,
alluni,
f->vlan,
NULL);
aq_err = pf->hw.aq.asq_last_status;
if (aq_ret)
dev_err(&pf->pdev->dev,
"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
f->vlan,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
} else { } else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, dev_info(&pf->pdev->dev,
alluni, NULL, "VF %d successfully unset multicast promiscuous mode\n",
true); vf->vf_id);
aq_err = pf->hw.aq.asq_last_status; clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
vf->vf_id, info->flags,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
goto error_param;
}
} }
if (alluni) {
if (!aq_ret) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"VF %d successfully set unicast promiscuous mode\n", "VF %d successfully set unicast promiscuous mode\n",
vf->vf_id); vf->vf_id);
if (alluni)
set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
else } else {
dev_info(&pf->pdev->dev,
"VF %d successfully unset unicast promiscuous mode\n",
vf->vf_id);
clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
} }
}
error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, return i40e_vc_send_resp_to_vf(vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
...@@ -1987,12 +2032,11 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1987,12 +2032,11 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
* i40e_vc_config_queues_msg * i40e_vc_config_queues_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* called from the VF to configure the rx/tx * called from the VF to configure the rx/tx
* queues * queues
**/ **/
static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_vsi_queue_config_info *qci = struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg; (struct virtchnl_vsi_queue_config_info *)msg;
...@@ -2105,12 +2149,11 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, ...@@ -2105,12 +2149,11 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
* i40e_vc_config_irq_map_msg * i40e_vc_config_irq_map_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* called from the VF to configure the irq to * called from the VF to configure the irq to
* queue map * queue map
**/ **/
static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_irq_map_info *irqmap_info = struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg; (struct virtchnl_irq_map_info *)msg;
...@@ -2202,11 +2245,10 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, ...@@ -2202,11 +2245,10 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
* i40e_vc_enable_queues_msg * i40e_vc_enable_queues_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* called from the VF to enable all or specific queue(s) * called from the VF to enable all or specific queue(s)
**/ **/
static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_queue_select *vqs = struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
...@@ -2261,12 +2303,11 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2261,12 +2303,11 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_disable_queues_msg * i40e_vc_disable_queues_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* called from the VF to disable all or specific * called from the VF to disable all or specific
* queue(s) * queue(s)
**/ **/
static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_queue_select *vqs = struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
...@@ -2309,14 +2350,13 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2309,14 +2350,13 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_request_queues_msg * i40e_vc_request_queues_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* VFs get a default number of queues but can use this message to request a * VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and * different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of * return 0. If unsuccessful, PF will send message informing VF of number of
* available queues and return result of sending VF a message. * available queues and return result of sending VF a message.
**/ **/
static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_vf_res_request *vfres = struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg; (struct virtchnl_vf_res_request *)msg;
...@@ -2360,11 +2400,10 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) ...@@ -2360,11 +2400,10 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
* i40e_vc_get_stats_msg * i40e_vc_get_stats_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* called from the VF to get vsi stats * called from the VF to get vsi stats
**/ **/
static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_queue_select *vqs = struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
...@@ -2470,11 +2509,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, ...@@ -2470,11 +2509,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* i40e_vc_add_mac_addr_msg * i40e_vc_add_mac_addr_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* add guest mac address filter * add guest mac address filter
**/ **/
static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_ether_addr_list *al = struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg; (struct virtchnl_ether_addr_list *)msg;
...@@ -2541,11 +2579,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2541,11 +2579,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_del_mac_addr_msg * i40e_vc_del_mac_addr_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* remove guest mac address filter * remove guest mac address filter
**/ **/
static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_ether_addr_list *al = struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg; (struct virtchnl_ether_addr_list *)msg;
...@@ -2611,11 +2648,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2611,11 +2648,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_add_vlan_msg * i40e_vc_add_vlan_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* program guest vlan id * program guest vlan id
**/ **/
static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_vlan_filter_list *vfl = struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg; (struct virtchnl_vlan_filter_list *)msg;
...@@ -2684,11 +2720,10 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2684,11 +2720,10 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_remove_vlan_msg * i40e_vc_remove_vlan_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* remove programmed guest vlan id * remove programmed guest vlan id
**/ **/
static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_vlan_filter_list *vfl = struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg; (struct virtchnl_vlan_filter_list *)msg;
...@@ -2771,13 +2806,11 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2771,13 +2806,11 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_iwarp_qvmap_msg * i40e_vc_iwarp_qvmap_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* @config: config qvmap or release it * @config: config qvmap or release it
* *
* called from the VF for the iwarp msgs * called from the VF for the iwarp msgs
**/ **/
static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
bool config)
{ {
struct virtchnl_iwarp_qvlist_info *qvlist_info = struct virtchnl_iwarp_qvlist_info *qvlist_info =
(struct virtchnl_iwarp_qvlist_info *)msg; (struct virtchnl_iwarp_qvlist_info *)msg;
...@@ -2808,11 +2841,10 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, ...@@ -2808,11 +2841,10 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
* i40e_vc_config_rss_key * i40e_vc_config_rss_key
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* Configure the VF's RSS key * Configure the VF's RSS key
**/ **/
static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_rss_key *vrk = struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg; (struct virtchnl_rss_key *)msg;
...@@ -2840,11 +2872,10 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2840,11 +2872,10 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_config_rss_lut * i40e_vc_config_rss_lut
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* Configure the VF's RSS LUT * Configure the VF's RSS LUT
**/ **/
static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_rss_lut *vrl = struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg; (struct virtchnl_rss_lut *)msg;
...@@ -2872,11 +2903,10 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2872,11 +2903,10 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_get_rss_hena * i40e_vc_get_rss_hena
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* Return the RSS HENA bits allowed by the hardware * Return the RSS HENA bits allowed by the hardware
**/ **/
static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_rss_hena *vrh = NULL; struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
...@@ -2908,11 +2938,10 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2908,11 +2938,10 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_set_rss_hena * i40e_vc_set_rss_hena
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* Set the RSS HENA bits for the VF * Set the RSS HENA bits for the VF
**/ **/
static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
{ {
struct virtchnl_rss_hena *vrh = struct virtchnl_rss_hena *vrh =
(struct virtchnl_rss_hena *)msg; (struct virtchnl_rss_hena *)msg;
...@@ -2937,12 +2966,10 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2937,12 +2966,10 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
* i40e_vc_enable_vlan_stripping * i40e_vc_enable_vlan_stripping
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* Enable vlan header stripping for the VF * Enable vlan header stripping for the VF
**/ **/
static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
u16 msglen)
{ {
struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
...@@ -2964,12 +2991,10 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, ...@@ -2964,12 +2991,10 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
* i40e_vc_disable_vlan_stripping * i40e_vc_disable_vlan_stripping
* @vf: pointer to the VF info * @vf: pointer to the VF info
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @msglen: msg length
* *
* Disable vlan header stripping for the VF * Disable vlan header stripping for the VF
**/ **/
static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
u16 msglen)
{ {
struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
...@@ -3669,65 +3694,65 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ...@@ -3669,65 +3694,65 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
ret = 0; ret = 0;
break; break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
break; break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES: case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ret = i40e_vc_config_queues_msg(vf, msg, msglen); ret = i40e_vc_config_queues_msg(vf, msg);
break; break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP: case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); ret = i40e_vc_config_irq_map_msg(vf, msg);
break; break;
case VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_ENABLE_QUEUES:
ret = i40e_vc_enable_queues_msg(vf, msg, msglen); ret = i40e_vc_enable_queues_msg(vf, msg);
i40e_vc_notify_vf_link_state(vf); i40e_vc_notify_vf_link_state(vf);
break; break;
case VIRTCHNL_OP_DISABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES:
ret = i40e_vc_disable_queues_msg(vf, msg, msglen); ret = i40e_vc_disable_queues_msg(vf, msg);
break; break;
case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_ADD_ETH_ADDR:
ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); ret = i40e_vc_add_mac_addr_msg(vf, msg);
break; break;
case VIRTCHNL_OP_DEL_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR:
ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); ret = i40e_vc_del_mac_addr_msg(vf, msg);
break; break;
case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_ADD_VLAN:
ret = i40e_vc_add_vlan_msg(vf, msg, msglen); ret = i40e_vc_add_vlan_msg(vf, msg);
break; break;
case VIRTCHNL_OP_DEL_VLAN: case VIRTCHNL_OP_DEL_VLAN:
ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); ret = i40e_vc_remove_vlan_msg(vf, msg);
break; break;
case VIRTCHNL_OP_GET_STATS: case VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen); ret = i40e_vc_get_stats_msg(vf, msg);
break; break;
case VIRTCHNL_OP_IWARP: case VIRTCHNL_OP_IWARP:
ret = i40e_vc_iwarp_msg(vf, msg, msglen); ret = i40e_vc_iwarp_msg(vf, msg, msglen);
break; break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
break; break;
case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
break; break;
case VIRTCHNL_OP_CONFIG_RSS_KEY: case VIRTCHNL_OP_CONFIG_RSS_KEY:
ret = i40e_vc_config_rss_key(vf, msg, msglen); ret = i40e_vc_config_rss_key(vf, msg);
break; break;
case VIRTCHNL_OP_CONFIG_RSS_LUT: case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg, msglen); ret = i40e_vc_config_rss_lut(vf, msg);
break; break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS: case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
ret = i40e_vc_get_rss_hena(vf, msg, msglen); ret = i40e_vc_get_rss_hena(vf, msg);
break; break;
case VIRTCHNL_OP_SET_RSS_HENA: case VIRTCHNL_OP_SET_RSS_HENA:
ret = i40e_vc_set_rss_hena(vf, msg, msglen); ret = i40e_vc_set_rss_hena(vf, msg);
break; break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); ret = i40e_vc_enable_vlan_stripping(vf, msg);
break; break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); ret = i40e_vc_disable_vlan_stripping(vf, msg);
break; break;
case VIRTCHNL_OP_REQUEST_QUEUES: case VIRTCHNL_OP_REQUEST_QUEUES:
ret = i40e_vc_request_queues_msg(vf, msg, msglen); ret = i40e_vc_request_queues_msg(vf, msg);
break; break;
case VIRTCHNL_OP_ENABLE_CHANNELS: case VIRTCHNL_OP_ENABLE_CHANNELS:
ret = i40e_vc_add_qch_msg(vf, msg); ret = i40e_vc_add_qch_msg(vf, msg);
...@@ -3795,6 +3820,35 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) ...@@ -3795,6 +3820,35 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
return 0; return 0;
} }
/**
* i40e_validate_vf
* @pf: the physical function
* @vf_id: VF identifier
*
* Check that the VF is enabled and the VSI exists.
*
* Returns 0 on success, negative on failure
**/
static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
{
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev,
"Invalid VF Identifier %d\n", vf_id);
ret = -EINVAL;
goto err_out;
}
vf = &pf->vf[vf_id];
vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
if (!vsi)
ret = -EINVAL;
err_out:
return ret;
}
/** /**
* i40e_ndo_set_vf_mac * i40e_ndo_set_vf_mac
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -3816,14 +3870,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -3816,14 +3870,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
u8 i; u8 i;
/* validate the request */ /* validate the request */
if (vf_id >= pf->num_alloc_vfs) { ret = i40e_validate_vf(pf, vf_id);
dev_err(&pf->pdev->dev, if (ret)
"Invalid VF Identifier %d\n", vf_id);
ret = -EINVAL;
goto error_param; goto error_param;
}
vf = &(pf->vf[vf_id]); vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done. /* When the VF is resetting wait until it is done.
...@@ -3942,11 +3993,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ...@@ -3942,11 +3993,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
int ret = 0; int ret = 0;
/* validate the request */ /* validate the request */
if (vf_id >= pf->num_alloc_vfs) { ret = i40e_validate_vf(pf, vf_id);
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); if (ret)
ret = -EINVAL;
goto error_pvid; goto error_pvid;
}
if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
...@@ -3960,7 +4009,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ...@@ -3960,7 +4009,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
goto error_pvid; goto error_pvid;
} }
vf = &(pf->vf[vf_id]); vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
...@@ -4080,11 +4129,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, ...@@ -4080,11 +4129,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int ret = 0; int ret = 0;
/* validate the request */ /* validate the request */
if (vf_id >= pf->num_alloc_vfs) { ret = i40e_validate_vf(pf, vf_id);
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); if (ret)
ret = -EINVAL;
goto error; goto error;
}
if (min_tx_rate) { if (min_tx_rate) {
dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
...@@ -4092,7 +4139,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, ...@@ -4092,7 +4139,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
return -EINVAL; return -EINVAL;
} }
vf = &(pf->vf[vf_id]); vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
...@@ -4128,13 +4175,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ...@@ -4128,13 +4175,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
int ret = 0; int ret = 0;
/* validate the request */ /* validate the request */
if (vf_id >= pf->num_alloc_vfs) { ret = i40e_validate_vf(pf, vf_id);
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); if (ret)
ret = -EINVAL;
goto error_param; goto error_param;
}
vf = &(pf->vf[vf_id]); vf = &pf->vf[vf_id];
/* first vsi is always the LAN vsi */ /* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
......
...@@ -140,6 +140,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) ...@@ -140,6 +140,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid) u16 qid)
{ {
struct xdp_umem_fq_reuse *reuseq;
bool if_running; bool if_running;
int err; int err;
...@@ -156,6 +157,12 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, ...@@ -156,6 +157,12 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
return -EBUSY; return -EBUSY;
} }
reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
if (!reuseq)
return -ENOMEM;
xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
err = i40e_xsk_umem_dma_map(vsi, umem); err = i40e_xsk_umem_dma_map(vsi, umem);
if (err) if (err)
return err; return err;
...@@ -353,16 +360,46 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, ...@@ -353,16 +360,46 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
} }
/** /**
* i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer
* @rx_ring: Rx ring * @rx_ring: Rx ring
* @count: The number of buffers to allocate * @bi: Rx buffer to populate
* *
* This function allocates a number of Rx buffers and places them on * This function allocates an Rx buffer. The buffer can come from fill
* the Rx ring. * queue, or via the reuse queue.
* *
* Returns true for a successful allocation, false otherwise * Returns true for a successful allocation, false otherwise
**/ **/
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *bi)
{
struct xdp_umem *umem = rx_ring->xsk_umem;
u64 handle, hr;
if (!xsk_umem_peek_addr_rq(umem, &handle)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
handle &= rx_ring->xsk_umem->chunk_mask;
hr = umem->headroom + XDP_PACKET_HEADROOM;
bi->dma = xdp_umem_get_dma(umem, handle);
bi->dma += hr;
bi->addr = xdp_umem_get_data(umem, handle);
bi->addr += hr;
bi->handle = handle + umem->headroom;
xsk_umem_discard_addr_rq(umem);
return true;
}
static __always_inline bool
__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
bool alloc(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *bi))
{ {
u16 ntu = rx_ring->next_to_use; u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
...@@ -372,7 +409,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) ...@@ -372,7 +409,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu); rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_bi[ntu]; bi = &rx_ring->rx_bi[ntu];
do { do {
if (!i40e_alloc_buffer_zc(rx_ring, bi)) { if (!alloc(rx_ring, bi)) {
ok = false; ok = false;
goto no_buffers; goto no_buffers;
} }
...@@ -404,6 +441,38 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) ...@@ -404,6 +441,38 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
return ok; return ok;
} }
/**
* i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
* @rx_ring: Rx ring
* @count: The number of buffers to allocate
*
* This function allocates a number of Rx buffers from the reuse queue
* or fill ring and places them on the Rx ring.
*
* Returns true for a successful allocation, false otherwise
**/
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
{
return __i40e_alloc_rx_buffers_zc(rx_ring, count,
i40e_alloc_buffer_slow_zc);
}
/**
* i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
* @rx_ring: Rx ring
* @count: The number of buffers to allocate
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
* Returns true for a successful allocation, false otherwise
**/
static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
{
return __i40e_alloc_rx_buffers_zc(rx_ring, count,
i40e_alloc_buffer_zc);
}
/** /**
* i40e_get_rx_buffer_zc - Return the current Rx buffer * i40e_get_rx_buffer_zc - Return the current Rx buffer
* @rx_ring: Rx ring * @rx_ring: Rx ring
...@@ -571,7 +640,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) ...@@ -571,7 +640,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure || failure = failure ||
!i40e_alloc_rx_buffers_zc(rx_ring, !i40e_alloc_rx_buffers_fast_zc(rx_ring,
cleaned_count); cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -830,3 +899,69 @@ int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) ...@@ -830,3 +899,69 @@ int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id)
return 0; return 0;
} }
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
u16 i;
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
if (!rx_bi->addr)
continue;
xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
rx_bi->addr = NULL;
}
}
/**
* i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
* @xdp_ring: XDP Tx ring
**/
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
struct xdp_umem *umem = tx_ring->xsk_umem;
struct i40e_tx_buffer *tx_bi;
u32 xsk_frames = 0;
while (ntc != ntu) {
tx_bi = &tx_ring->tx_bi[ntc];
if (tx_bi->xdpf)
i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
else
xsk_frames++;
tx_bi->xdpf = NULL;
ntc++;
if (ntc >= tx_ring->count)
ntc = 0;
}
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
}
/**
* i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
* @vsi: vsi
*
* Returns true if any of the Rx rings has an AF_XDP UMEM attached
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
int i;
if (!vsi->xsk_umems)
return false;
for (i = 0; i < vsi->num_queue_pairs; i++) {
if (vsi->xsk_umems[i])
return true;
}
return false;
}
...@@ -21,6 +21,12 @@ struct xdp_umem_page { ...@@ -21,6 +21,12 @@ struct xdp_umem_page {
dma_addr_t dma; dma_addr_t dma;
}; };
struct xdp_umem_fq_reuse {
u32 nentries;
u32 length;
u64 handles[];
};
struct xdp_umem { struct xdp_umem {
struct xsk_queue *fq; struct xsk_queue *fq;
struct xsk_queue *cq; struct xsk_queue *cq;
...@@ -37,6 +43,7 @@ struct xdp_umem { ...@@ -37,6 +43,7 @@ struct xdp_umem {
struct page **pgs; struct page **pgs;
u32 npgs; u32 npgs;
struct net_device *dev; struct net_device *dev;
struct xdp_umem_fq_reuse *fq_reuse;
u16 queue_id; u16 queue_id;
bool zc; bool zc;
spinlock_t xsk_list_lock; spinlock_t xsk_list_lock;
...@@ -75,6 +82,10 @@ void xsk_umem_discard_addr(struct xdp_umem *umem); ...@@ -75,6 +82,10 @@ void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
void xsk_umem_consume_tx_done(struct xdp_umem *umem); void xsk_umem_consume_tx_done(struct xdp_umem *umem);
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq);
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{ {
...@@ -85,6 +96,35 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) ...@@ -85,6 +96,35 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{ {
return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
} }
/* Reuse-queue aware version of FILL queue helpers */
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
return xsk_umem_peek_addr(umem, addr);
*addr = rq->handles[rq->length - 1];
return addr;
}
static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
xsk_umem_discard_addr(umem);
else
rq->length--;
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq->handles[rq->length++] = addr;
}
#else #else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ {
...@@ -128,6 +168,21 @@ static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) ...@@ -128,6 +168,21 @@ static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
{ {
} }
static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
{
return NULL;
}
static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq)
{
return NULL;
}
static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
{
}
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{ {
return NULL; return NULL;
...@@ -137,6 +192,20 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) ...@@ -137,6 +192,20 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{ {
return 0; return 0;
} }
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
return NULL;
}
static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
{
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
}
#endif /* CONFIG_XDP_SOCKETS */ #endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */ #endif /* _LINUX_XDP_SOCK_H */
...@@ -165,6 +165,8 @@ static void xdp_umem_release(struct xdp_umem *umem) ...@@ -165,6 +165,8 @@ static void xdp_umem_release(struct xdp_umem *umem)
umem->cq = NULL; umem->cq = NULL;
} }
xsk_reuseq_destroy(umem);
xdp_umem_unpin_pages(umem); xdp_umem_unpin_pages(umem);
task = get_pid_task(umem->pid, PIDTYPE_PID); task = get_pid_task(umem->pid, PIDTYPE_PID);
......
...@@ -3,7 +3,9 @@ ...@@ -3,7 +3,9 @@
* Copyright(c) 2018 Intel Corporation. * Copyright(c) 2018 Intel Corporation.
*/ */
#include <linux/log2.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/overflow.h>
#include "xsk_queue.h" #include "xsk_queue.h"
...@@ -62,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q) ...@@ -62,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q)
page_frag_free(q->ring); page_frag_free(q->ring);
kfree(q); kfree(q);
} }
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
{
struct xdp_umem_fq_reuse *newq;
/* Check for overflow */
if (nentries > (u32)roundup_pow_of_two(nentries))
return NULL;
nentries = roundup_pow_of_two(nentries);
newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
if (!newq)
return NULL;
memset(newq, 0, offsetof(typeof(*newq), handles));
newq->nentries = nentries;
return newq;
}
EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq)
{
struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
if (!oldq) {
umem->fq_reuse = newq;
return NULL;
}
if (newq->nentries < oldq->length)
return newq;
memcpy(newq->handles, oldq->handles,
array_size(oldq->length, sizeof(u64)));
newq->length = oldq->length;
umem->fq_reuse = newq;
return oldq;
}
EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
{
kvfree(rq);
}
EXPORT_SYMBOL_GPL(xsk_reuseq_free);
void xsk_reuseq_destroy(struct xdp_umem *umem)
{
xsk_reuseq_free(umem->fq_reuse);
umem->fq_reuse = NULL;
}
...@@ -258,4 +258,7 @@ void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask); ...@@ -258,4 +258,7 @@ void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops); void xskq_destroy(struct xsk_queue *q_ops);
/* Executed by the core when the entire UMEM gets freed */
void xsk_reuseq_destroy(struct xdp_umem *umem);
#endif /* _LINUX_XSK_QUEUE_H */ #endif /* _LINUX_XSK_QUEUE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment