Commit 77079683 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2018-02-13

This series contains updates to i40e and i40evf.

Wei Yongjun fixes a function that needed to be "static".  Also fixes the
use of GFP_KERNEL to GFP_ATOMIC when we have taken a spinlock.

Mitch cleans up several info messages to not include the memory
addresses being used on the off chance this information could be used
maliciously.

Alan provides several fixes to the broadcast filters starting with the
triggering of overflow promiscuous in circumstances where we run out of
space for broadcast filters to prevent traffic from being unexpectedly
dropped.  Refactored the code to improve the readability and
maintainability when we are concerned about when and how overflow
promiscuous is changed.

Harshitha cleans up a message to make it more clear on what is being
reset, so users are not confused and think the PF is resetting.

Dave fixes an issue where the MAC, firmware version and NPAR checks used
to determine if shutting off the firmware LLDP engine is supported or
not, instead set a hardware flag which ethtool can use.

Jake updates the VF driver to use __dev_uc_sync and __dev_mc_sync, like
the PF driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 153e1b84 693acdd0
......@@ -507,6 +507,7 @@ struct i40e_pf {
#define I40E_HW_STOP_FW_LLDP BIT(16)
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
#define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
u64 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0)
......
......@@ -155,8 +155,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
dev_info(&pf->pdev->dev,
" vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev, " active_vlans is %s\n",
vsi->active_vlans ? "<valid>" : "<null>");
dev_info(&pf->pdev->dev,
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
......@@ -269,14 +269,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
if (!rx_ring)
continue;
dev_info(&pf->pdev->dev,
" rx_rings[%i]: desc = %p\n",
i, rx_ring->desc);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
i, rx_ring->dev,
rx_ring->netdev,
rx_ring->rx_bi);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
i, *rx_ring->state,
......@@ -307,13 +299,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->rx_stats.realloc_count,
rx_ring->rx_stats.page_reuse_count);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
(unsigned long int)rx_ring->dma);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
rx_ring->q_vector);
" rx_rings[%i]: size = %i\n",
i, rx_ring->size);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: itr_setting = %d (%s)\n",
i, rx_ring->itr_setting,
......@@ -325,14 +312,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
if (!tx_ring)
continue;
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, tx_ring->desc);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
i, tx_ring->dev,
tx_ring->netdev,
tx_ring->tx_bi);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
i, *tx_ring->state,
......@@ -355,13 +334,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->tx_stats.tx_busy,
tx_ring->tx_stats.tx_done_old);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, tx_ring->size,
(unsigned long int)tx_ring->dma);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: vsi = %p, q_vector = %p\n",
i, tx_ring->vsi,
tx_ring->q_vector);
" tx_rings[%i]: size = %i\n",
i, tx_ring->size);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n",
i, tx_ring->dcb_tc);
......@@ -466,8 +440,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
if (vsi->back)
dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back);
dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
dev_info(&pf->pdev->dev,
" tc_config: numtc = %d, enabled_tc = 0x%x\n",
......
......@@ -4426,17 +4426,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
* unsupported FW versions.
*/
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (pf->hw.func_caps.npar_enable) {
if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) {
dev_warn(&pf->pdev->dev,
"Unable to change FW LLDP if NPAR active\n");
return -EOPNOTSUPP;
}
if (pf->hw.aq.api_maj_ver < 1 ||
(pf->hw.aq.api_maj_ver == 1 &&
pf->hw.aq.api_min_ver < 7)) {
dev_warn(&pf->pdev->dev,
"FW ver does not support changing FW LLDP\n");
"Device does not support changing FW LLDP\n");
return -EOPNOTSUPP;
}
}
......
......@@ -215,8 +215,8 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
dev_info(&pf->pdev->dev,
"param err: pile=%p needed=%d id=0x%04x\n",
pile, needed, id);
"param err: pile=%s needed=%d id=0x%04x\n",
pile ? "<valid>" : "<null>", needed, id);
return -EINVAL;
}
......@@ -1380,14 +1380,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
/* If we're in overflow promisc mode, set the state directly
* to failed, so we don't bother to try sending the filter
* to the hardware.
*/
if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
f->state = I40E_FILTER_FAILED;
else
f->state = I40E_FILTER_NEW;
f->state = I40E_FILTER_NEW;
INIT_HLIST_NODE(&f->hlist);
key = i40e_addr_to_hkey(macaddr);
......@@ -2116,17 +2109,16 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
* @list: the list of filters to send to firmware
* @add_head: Position in the add hlist
* @num_add: the number of filters to add
* @promisc_change: set to true on exit if promiscuous mode was forced on
*
* Send a request to firmware via AdminQ to add a chunk of filters. Will set
* promisc_changed to true if the firmware has run out of space for more
* filters.
* __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
* space for more filters.
*/
static
void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_aqc_add_macvlan_element_data *list,
struct i40e_new_mac_filter *add_head,
int num_add, bool *promisc_changed)
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
int aq_err, fcnt;
......@@ -2136,7 +2128,6 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
*promisc_changed = true;
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
......@@ -2177,11 +2168,13 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
NULL);
}
if (aq_ret)
if (aq_ret) {
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s setting broadcast promiscuous mode on %s\n",
"Error %s, forcing overflow promiscuous on %s\n",
i40e_aq_str(hw, hw->aq.asq_last_status),
vsi_name);
}
return aq_ret;
}
......@@ -2267,9 +2260,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
struct i40e_mac_filter *f;
struct i40e_new_mac_filter *new, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
bool old_overflow, new_overflow;
unsigned int failed_filters = 0;
unsigned int vlan_filters = 0;
bool promisc_changed = false;
char vsi_name[16] = "PF";
int filter_list_len = 0;
i40e_status aq_ret = 0;
......@@ -2291,6 +2284,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
usleep_range(1000, 2000);
pf = vsi->back;
old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
if (vsi->netdev) {
changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
vsi->current_netdev_flags = vsi->netdev->flags;
......@@ -2423,12 +2418,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_add = 0;
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state)) {
new->state = I40E_FILTER_FAILED;
continue;
}
/* handle broadcast filters by updating the broadcast
* promiscuous flag instead of adding a MAC filter.
*/
......@@ -2464,15 +2453,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
i40e_aqc_add_filters(vsi, vsi_name, add_list,
add_head, num_add,
&promisc_changed);
add_head, num_add);
memset(add_list, 0, list_size);
num_add = 0;
}
}
if (num_add) {
i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
num_add, &promisc_changed);
num_add);
}
/* Now move all of the filters from the temp add list back to
* the VSI's list.
......@@ -2501,24 +2489,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* If promiscuous mode has changed, we need to calculate a new
* threshold for when we are safe to exit
*/
if (promisc_changed)
vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
/* Check if we are able to exit overflow promiscuous mode. We can
* safely exit if we didn't just enter, we no longer have any failed
* filters, and we have reduced filters below the threshold value.
*/
if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
!promisc_changed && !failed_filters &&
(vsi->active_filters < vsi->promisc_threshold)) {
if (old_overflow && !failed_filters &&
vsi->active_filters < vsi->promisc_threshold) {
dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n",
vsi_name);
clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
promisc_changed = true;
vsi->promisc_threshold = 0;
}
......@@ -2528,6 +2508,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
goto out;
}
new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
/* If we are entering overflow promiscuous, we need to calculate a new
* threshold for when we are safe to exit
*/
if (!old_overflow && new_overflow)
vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
......@@ -2548,12 +2536,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
}
if ((changed_flags & IFF_PROMISC) || promisc_changed) {
if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state));
new_overflow);
aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
......@@ -5381,7 +5368,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
* @vsi: VSI to be configured
*
**/
int i40e_get_link_speed(struct i40e_vsi *vsi)
static int i40e_get_link_speed(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
......@@ -9954,18 +9941,17 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
mutex_lock(&pf->switch_mutex);
if (!pf->vsi[vsi->idx]) {
dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
vsi->idx, vsi->idx, vsi, vsi->type);
dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
vsi->idx, vsi->idx, vsi->type);
goto unlock_vsi;
}
if (pf->vsi[vsi->idx] != vsi) {
dev_err(&pf->pdev->dev,
"pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
"pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
pf->vsi[vsi->idx]->idx,
pf->vsi[vsi->idx],
pf->vsi[vsi->idx]->type,
vsi->idx, vsi, vsi->type);
vsi->idx, vsi->type);
goto unlock_vsi;
}
......@@ -11103,6 +11089,16 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
/* Stopping the FW LLDP engine is only supported on the
* XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
* engine is not supported if NPAR is functioning on this
* part
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
!pf->hw.func_caps.npar_enable &&
(pf->hw.aq.api_maj_ver > 1 ||
(pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
......
......@@ -785,7 +785,7 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
**/
static struct
i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
const u8 *macaddr)
{
struct i40evf_mac_filter *f;
......@@ -808,7 +808,7 @@ i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
**/
static struct
i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
const u8 *macaddr)
{
struct i40evf_mac_filter *f;
......@@ -880,50 +880,64 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
}
/**
* i40evf_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
static void i40evf_set_rx_mode(struct net_device *netdev)
* i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
* @netdev: the netdevice
* @addr: address to add
*
* Called by __dev_(mc|uc)_sync when an address needs to be added. We call
* __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/
static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f, *ftmp;
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
i40evf_add_filter(adapter, uca->addr);
}
netdev_for_each_mc_addr(mca, netdev) {
i40evf_add_filter(adapter, mca->addr);
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
goto bottom_of_search_loop;
netdev_for_each_uc_addr(uca, netdev)
if (ether_addr_equal(uca->addr, f->macaddr))
goto bottom_of_search_loop;
if (i40evf_add_filter(adapter, addr))
return 0;
else
return -ENOMEM;
}
for_each_dev_addr(netdev, ha)
if (ether_addr_equal(ha->addr, f->macaddr))
goto bottom_of_search_loop;
/**
* i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
* @netdev: the netdevice
* @addr: address to add
*
* Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
* __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/
static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f;
if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
goto bottom_of_search_loop;
/* Under some circumstances, we might receive a request to delete
* our own device address from our uc list. Because we store the
* device address in the VSI's MAC/VLAN filter list, we need to ignore
* such requests and not delete our device address from this list.
*/
if (ether_addr_equal(addr, netdev->dev_addr))
return 0;
/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
f = i40evf_find_filter(adapter, addr);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
bottom_of_search_loop:
continue;
}
return 0;
}
/**
* i40evf_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
static void i40evf_set_rx_mode(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
spin_lock_bh(&adapter->mac_vlan_list_lock);
__dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
__dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
if (netdev->flags & IFF_PROMISC &&
!(adapter->flags & I40EVF_FLAG_PROMISC_ON))
......@@ -938,8 +952,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
else if (!(netdev->flags & IFF_ALLMULTI) &&
adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
......@@ -1027,6 +1039,7 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
void i40evf_down(struct i40evf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct i40evf_vlan_filter *vlf;
struct i40evf_mac_filter *f;
if (adapter->state <= __I40EVF_DOWN_PENDING)
......@@ -1040,12 +1053,17 @@ void i40evf_down(struct i40evf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* clear the sync flag on all filters */
__dev_uc_unsync(adapter->netdev, NULL);
__dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
}
/* remove all VLAN filters */
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
f->remove = true;
}
......@@ -3067,6 +3085,7 @@ static void i40evf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_vlan_filter *vlf, *vlftmp;
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
int err;
......@@ -3129,9 +3148,10 @@ static void i40evf_remove(struct pci_dev *pdev)
list_del(&f->list);
kfree(f);
}
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
list_del(&f->list);
kfree(f);
list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
list) {
list_del(&vlf->list);
kfree(vlf);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
......
......@@ -465,7 +465,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
veal = kzalloc(len, GFP_ATOMIC);
if (!veal) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
......@@ -538,7 +538,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
(count * sizeof(struct virtchnl_ether_addr));
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
veal = kzalloc(len, GFP_ATOMIC);
if (!veal) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
......@@ -612,7 +612,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
......@@ -684,7 +684,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
......@@ -1037,7 +1037,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");
dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment