Commit c5870942 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-10-31

This series contains updates to i40e and i40evf.

Colin Ian King fixes a minor issue with dev_err message where a new line
character was missing from the end of the message.

Jake provides several most of the changes in the series, starting with
dropping the is_vf and is_netdev fields in the i40e_mac_filter structure
since they are not needed (along with the checks that used these fields).
Reason being that we use separate VSI's for SRIOV VFs and for netdev VSIs,
therefore a single VSI should only have one type of filter.  Then
simplifies our .set_rx_mode handler by using the kernel provided
__dev_uc_sync and __dev_mc_sync functions for notification of add and
deletion of filters.  Refactored the i40e_put_mac_in_vlan() to resolve
an issue where this function was arbitrarily modifying all filters to
have the same VLAN, which is incorrect because it could be modifying
active filters without putting them into the new state.  Refactored the
delete filter logic so that we can re-use the functionality, where
appropriate, without having to search for the filter twice.  Reduced the
latency of operations related to searching for specific MAC filters by
using a static hash table instead of a list.  Reduced code duplication
in the adminq command to add/delete for filters.  Fixed an issue where
TSYNVALID bit was not being checked as the true indicator of whether
the packet has an associated timestamp.  Cleaned up a second msleep()
call by simply re-ordering the code so that the extra wait is no longer
needed.

Alan provides additional fix to the work Jake has been doing to resolve
a bug where adding at least one VLAN and then removing all VLANs leaves
the MAC filters for the VSI with an incorrect value for the VID which
indicates the MAC filter's VLAN status.

Alex adds a common method for finding a VSI by type.  Also cleaned up
the logic for coalescing RS bits, which was convoluted and larger than
it needed to be.

Mitch fixes an issue with the failure to add filters when the VF driver
is reloaded by simply setting the number of filters to 0 when freeing
VF resources.

Maciej implements a I40E_NVMUPD_STATE_ERROR state for NVM update, so
that the driver has the ability to return NVM image write failure.

Filip removes unreachable code which was found using static analysis
where "if" statements were never in a "true/false" state, so clean up
unnecessary if statements.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bd68a2a8 3aa7b74d
......@@ -39,6 +39,7 @@
#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/hashtable.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/ip.h>
......@@ -428,11 +429,13 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u64 ptp_base_adj;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size; /* HW RSS table size */
......@@ -445,6 +448,20 @@ struct i40e_pf {
u16 phy_led_val;
};
/**
* i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
* Simply copies the address and returns it as a u64 for hashing
**/
static inline u64 i40e_addr_to_hkey(const u8 *macaddr)
{
u64 key = 0;
ether_addr_copy((u8 *)&key, macaddr);
return key;
}
enum i40e_filter_state {
I40E_FILTER_INVALID = 0, /* Invalid state */
I40E_FILTER_NEW, /* New, not sent to FW yet */
......@@ -454,13 +471,10 @@ enum i40e_filter_state {
/* There is no 'removed' state; the filter struct is freed */
};
struct i40e_mac_filter {
struct list_head list;
struct hlist_node hlist;
u8 macaddr[ETH_ALEN];
#define I40E_VLAN_ANY -1
s16 vlan;
u8 counter; /* number of instances of this filter */
bool is_vf; /* filter belongs to a VF */
bool is_netdev; /* filter belongs to a netdev */
enum i40e_filter_state state;
};
......@@ -501,9 +515,11 @@ struct i40e_vsi {
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
/* Per VSI lock to protect elements/list (MAC filter) */
spinlock_t mac_filter_list_lock;
struct list_head mac_filter_list;
/* Per VSI lock to protect elements/hash (MAC filter) */
spinlock_t mac_filter_hash_lock;
/* Fixed size hash table with 2^8 buckets for MAC filters */
DECLARE_HASHTABLE(mac_filter_hash, 8);
bool has_vlan_filter;
/* VSI stats */
struct rtnl_link_stats64 net_stats;
......@@ -707,6 +723,25 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
/**
* i40e_find_vsi_by_type - Find and return Flow Director VSI
* @pf: PF to search for VSI
* @type: Value indicating type of VSI we are looking for
**/
static inline struct i40e_vsi *
i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
{
int i;
for (i = 0; i < pf->num_alloc_vsi; i++) {
struct i40e_vsi *vsi = pf->vsi[i];
if (vsi && vsi->type == type)
return vsi;
}
return NULL;
}
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
......@@ -723,10 +758,8 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev);
void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev);
const u8 *macaddr, s16 vlan);
void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
......@@ -740,7 +773,8 @@ void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
int i40e_vsi_start_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
......@@ -816,14 +850,12 @@ int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
......
......@@ -964,11 +964,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
......
......@@ -3313,8 +3313,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
if (hw->num_ports != 0) {
hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
hw->num_partitions = num_functions / hw->num_ports;
}
/* additional HW specific goodies that might
* someday be HW version specific
......
......@@ -134,7 +134,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct rtnl_link_stats64 *nstat;
struct i40e_mac_filter *f;
struct i40e_vsi *vsi;
int i;
int i, bkt;
vsi = i40e_dbg_find_vsi(pf, seid);
if (!vsi) {
......@@ -166,11 +166,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
pf->hw.mac.addr,
pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
list_for_each_entry(f, &vsi->mac_filter_list, list) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
dev_info(&pf->pdev->dev,
" mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n",
f->macaddr, f->vlan, f->is_netdev, f->is_vf,
f->counter, i40e_filter_state_string[f->state]);
" mac_filter_hash: %pM vid=%d, state %s\n",
f->macaddr, f->vlan,
i40e_filter_state_string[f->state]);
}
dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold,
......@@ -867,86 +867,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
struct i40e_mac_filter *f;
int vlan = 0;
u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
"%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
&vsi_seid,
&ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
&vlan);
if (cnt == 7) {
vlan = 0;
} else if (cnt != 8) {
dev_info(&pf->pdev->dev,
"add macaddr: bad command string, cnt=%d\n",
cnt);
goto command_write_done;
}
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
"add macaddr: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, ma, vlan, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi);
if (f && !ret)
dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d added to VSI %d\n",
ma, vlan, vsi_seid);
else
dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
ma, vlan, vsi_seid, f, ret);
} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
int vlan = 0;
u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
"%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
&vsi_seid,
&ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
&vlan);
if (cnt == 7) {
vlan = 0;
} else if (cnt != 8) {
dev_info(&pf->pdev->dev,
"del macaddr: bad command string, cnt=%d\n",
cnt);
goto command_write_done;
}
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
"del macaddr: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, ma, vlan, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi);
if (!ret)
dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d removed from VSI %d\n",
ma, vlan, vsi_seid);
else
dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
ma, vlan, vsi_seid, ret);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
i40e_status ret;
u16 vid;
......@@ -1615,8 +1535,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
dev_info(&pf->pdev->dev, " dump switch\n");
......
......@@ -216,7 +216,6 @@ enum i40e_ethtool_test_id {
I40E_ETH_TEST_REG = 0,
I40E_ETH_TEST_EEPROM,
I40E_ETH_TEST_INTR,
I40E_ETH_TEST_LOOPBACK,
I40E_ETH_TEST_LINK,
};
......@@ -224,7 +223,6 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Eeprom test (offline)",
"Interrupt test (offline)",
"Loopback test (offline)",
"Link test (on/offline)"
};
......@@ -1744,17 +1742,6 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
return *data;
}
static int i40e_loopback_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
netif_info(pf, hw, netdev, "loopback test not implemented\n");
*data = 0;
return *data;
}
static inline bool i40e_active_vfs(struct i40e_pf *pf)
{
struct i40e_vf *vfs = pf->vf;
......@@ -1768,17 +1755,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
{
struct i40e_vsi **vsi = pf->vsi;
int i;
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!vsi[i])
continue;
if (vsi[i]->type == I40E_VSI_VMDQ2)
return true;
}
return false;
return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
}
static void i40e_diag_test(struct net_device *netdev,
......@@ -1800,7 +1777,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LOOPBACK] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
......@@ -1828,9 +1804,6 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* run reg test last, a reset is required after it */
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
......@@ -1851,7 +1824,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 0;
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
}
skip_ol_tests:
......
......@@ -1522,12 +1522,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
* same PCI function.
*/
netdev->dev_port = 1;
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, hw->mac.san_addr, 0);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
......
This diff is collapsed.
......@@ -722,9 +722,20 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
/* Clear error status on read */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
return 0;
}
/* Clear status even it is not read and log */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
i40e_debug(hw, I40E_DEBUG_NVM,
"Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
......@@ -1074,6 +1085,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
if (hw->aq.arq_last_status) {
hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
return;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
......
......@@ -159,16 +159,15 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct timespec64 now, then;
unsigned long flags;
then = ns_to_timespec64(delta);
spin_lock_irqsave(&pf->tmreg_lock, flags);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now);
now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
mutex_unlock(&pf->tmreg_lock);
return 0;
}
......@@ -184,11 +183,10 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, ts);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
mutex_unlock(&pf->tmreg_lock);
return 0;
}
......@@ -205,11 +203,10 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_write(pf, ts);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
mutex_unlock(&pf->tmreg_lock);
return 0;
}
......@@ -229,6 +226,47 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
/**
* i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events
* @pf: the PF data structure
*
* This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
* for noticed latch events. This allows the driver to keep track of the first
* time a latch event was noticed which will be used to help clear out Rx
* timestamps for packets that got dropped or lost.
*
* This function will return the current value of I40E_PRTTSYN_STAT_1 and is
* expected to be called only while under the ptp_rx_lock.
**/
static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 prttsyn_stat, new_latch_events;
int i;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
new_latch_events = prttsyn_stat & ~pf->latch_event_flags;
/* Update the jiffies time for any newly latched timestamp. This
* ensures that we store the time that we first discovered a timestamp
* was latched by the hardware. The service task will later determine
* if we should free the latch and drop that timestamp should too much
* time pass. This flow ensures that we only update jiffies for new
* events latched since the last time we checked, and not all events
* currently latched, so that the service task accounting remains
* accurate.
*/
for (i = 0; i < 4; i++) {
if (new_latch_events & BIT(i))
pf->latch_events[i] = jiffies;
}
/* Finally, we store the current status of the Rx timestamp latches */
pf->latch_event_flags = prttsyn_stat;
return prttsyn_stat;
}
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @vsi: The VSI with the rings relevant to 1588
......@@ -242,10 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_ring *rx_ring;
unsigned long rx_event;
u32 prttsyn_stat;
int n;
int i;
/* Since we cannot turn off the Rx timestamp logic if the device is
* configured for Tx timestamping, we check if Rx timestamping is
......@@ -255,42 +290,30 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
return;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
spin_lock_bh(&pf->ptp_rx_lock);
/* Update current latch times for Rx events */
i40e_ptp_get_rx_events(pf);
/* Unless all four receive timestamp registers are latched, we are not
* concerned about a possible PTP Rx hang, so just update the timeout
* counter and exit.
/* Check all the currently latched Rx events and see whether they have
* been latched for over a second. It is assumed that any timestamp
* should have been cleared within this time, or else it was captured
* for a dropped frame that the driver never received. Thus, we will
* clear any timestamp that has been latched for over 1 second.
*/
if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT1_MASK <<
I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT2_MASK <<
I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT3_MASK <<
I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
pf->last_rx_ptp_check = jiffies;
return;
for (i = 0; i < 4; i++) {
if ((pf->latch_event_flags & BIT(i)) &&
time_is_before_jiffies(pf->latch_events[i] + HZ)) {
rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
pf->latch_event_flags &= ~BIT(i);
pf->rx_hwtstamp_cleared++;
dev_warn(&pf->pdev->dev,
"Clearing a missed Rx timestamp event for RXTIME[%d]\n",
i);
}
/* Determine the most recent watchdog or rx_timestamp event. */
rx_event = pf->last_rx_ptp_check;
for (n = 0; n < vsi->num_queue_pairs; n++) {
rx_ring = vsi->rx_rings[n];
if (time_after(rx_ring->last_rx_timestamp, rx_event))
rx_event = rx_ring->last_rx_timestamp;
}
/* Only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5 * HZ)) {
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++;
WARN_ONCE(1, "Detected Rx timestamp register hang\n");
}
spin_unlock_bh(&pf->ptp_rx_lock);
}
/**
......@@ -353,14 +376,25 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
hw = &pf->hw;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
spin_lock_bh(&pf->ptp_rx_lock);
/* Get current Rx events and update latch times */
prttsyn_stat = i40e_ptp_get_rx_events(pf);
if (!(prttsyn_stat & BIT(index)))
/* TODO: Should we warn about missing Rx timestamp event? */
if (!(prttsyn_stat & BIT(index))) {
spin_unlock_bh(&pf->ptp_rx_lock);
return;
}
/* Clear the latched event since we're about to read its register */
pf->latch_event_flags &= ~BIT(index);
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
spin_unlock_bh(&pf->ptp_rx_lock);
ns = (((u64)hi) << 32) | lo;
i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
......@@ -514,12 +548,15 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
}
/* Clear out all 1588-related registers to clear and unlatch them. */
spin_lock_bh(&pf->ptp_rx_lock);
rd32(hw, I40E_PRTTSYN_STAT_0);
rd32(hw, I40E_PRTTSYN_TXTIME_H);
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->latch_event_flags = 0;
spin_unlock_bh(&pf->ptp_rx_lock);
/* Enable/disable the Tx timestamp interrupt based on user input. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
......@@ -658,10 +695,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
return;
}
/* we have to initialize the lock first, since we can't control
* when the user will enter the PHC device entry points
*/
spin_lock_init(&pf->tmreg_lock);
mutex_init(&pf->tmreg_lock);
spin_lock_init(&pf->ptp_rx_lock);
/* ensure we have a clock device */
err = i40e_ptp_create_clock(pf);
......
......@@ -125,10 +125,7 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
u16 i;
/* find existing FDIR VSI */
vsi = NULL;
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
if (!vsi)
return -ENOENT;
......@@ -619,7 +616,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
#define WB_STRIDE 0x3
#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
......@@ -735,7 +732,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
......@@ -1410,13 +1407,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
if (unlikely(rsyn)) {
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
rx_ring->last_rx_timestamp = jiffies;
}
if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
......@@ -2704,9 +2700,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
u16 desc_count = 0;
bool tail_bump = true;
bool do_rs = false;
u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
......@@ -2789,8 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
......@@ -2798,66 +2791,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
/* We can OR these values together as they both are checked against
* 4 below and at this point desc_count will be used as a boolean value
* after this if/else block.
*/
desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported
* if xmit_more is true
* do not update tail and do not mark RS bit.
* if xmit_more is false and last xmit_more was false
* if every packet spanned less than 4 desc
* then set RS bit on 4th packet and update tail
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
* if queue is stopped
* mark RS bit
* reset packet counter
* else if xmit_more is supported and is true
* advance packet counter to 4
* reset desc_count to 0
*
* Optimization: wmb to be issued only in case of tail update.
* Also optimize the Descriptor WB path for RS bit with the same
* algorithm.
* if desc_count >= 4
* mark RS bit
* reset packet counter
* if desc_count > 0
* update tail
*
* Note: If there are less than 4 packets
* Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
if (skb->xmit_more &&
!netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
!netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
tx_ring->packet_stride++;
} else {
if (netif_xmit_stopped(txring_txq(tx_ring))) {
goto do_rs;
} else if (skb->xmit_more) {
/* set stride to arm on next packet and reset desc_count */
tx_ring->packet_stride = WB_STRIDE;
desc_count = 0;
} else if (desc_count >= WB_STRIDE) {
do_rs:
/* write last descriptor with RS bit set */
td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
}
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) |
cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) <<
I40E_TXD_QW1_CMD_SHIFT);
build_ctob(td_cmd, td_offset, size, td_tag);
/* notify HW of packet */
if (!tail_bump) {
prefetchw(tx_desc + 1);
} else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
* We also use this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
/* notify HW of packet */
if (desc_count) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
}
return;
dma_error:
......
......@@ -307,15 +307,12 @@ struct i40e_ring {
u8 atr_sample_rate;
u8 atr_count;
unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
......
......@@ -366,6 +366,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
......
......@@ -686,17 +686,17 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
spin_lock_bh(&vsi->mac_filter_list_lock);
spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
vf->port_vlan_id ?
vf->port_vlan_id : -1);
if (!f)
dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
(u32)hena);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
......@@ -811,6 +811,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
vf->num_mac = 0;
}
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
......@@ -990,7 +991,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (vf->lan_vsi_idx == 0)
goto complete_reset;
i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
......@@ -1031,8 +1032,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
false);
i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
......@@ -1449,9 +1449,9 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
int num_vlans = 0;
int num_vlans = 0, bkt;
list_for_each_entry(f, &vsi->mac_filter_list, list) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
num_vlans++;
}
......@@ -1481,6 +1481,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
struct i40e_vsi *vsi;
bool alluni = false;
int aq_err = 0;
int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
......@@ -1507,7 +1508,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
......@@ -1557,7 +1558,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
aq_ret = 0;
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
aq_ret =
......@@ -1757,7 +1758,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the VF */
......@@ -1796,8 +1797,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
aq_ret = I40E_ERR_TIMEOUT;
i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
error_param:
/* send the response to the VF */
......@@ -1927,20 +1927,18 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* Lock once, because all function inside for loop accesses VSI's
* MAC filter list which needs to be protected using same lock.
*/
spin_lock_bh(&vsi->mac_filter_list_lock);
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr, true, false);
f = i40e_find_mac(vsi, al->list[i].addr);
if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
true, false);
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
else
f = i40e_add_filter(vsi, al->list[i].addr, -1,
true, false);
f = i40e_add_filter(vsi, al->list[i].addr, -1);
}
if (!f) {
......@@ -1948,13 +1946,13 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac++;
}
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
......@@ -2003,18 +2001,18 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_list_lock);
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
......@@ -2139,8 +2137,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
if (!ret)
i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
vf->num_vlan--;
if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
......@@ -2153,11 +2150,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
false,
vfl->vlan_id[i],
NULL);
if (ret)
dev_err(&pf->pdev->dev,
"Unable to delete VLAN filter %d for VF %d, error %d\n",
vfl->vlan_id[i], vf->vf_id, ret);
}
error_param:
......@@ -2689,6 +2681,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
int bkt;
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
......@@ -2715,23 +2708,22 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* Lock once because below invoked function add/del_filter requires
* mac_filter_list_lock to be held
* mac_filter_hash_lock to be held
*/
spin_lock_bh(&vsi->mac_filter_list_lock);
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr))
i40e_del_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
vf->port_vlan_id ? vf->port_vlan_id : -1);
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
i40e_del_filter(vsi, f->macaddr, f->vlan);
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
......@@ -2803,9 +2795,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
spin_lock_bh(&vsi->mac_filter_list_lock);
spin_lock_bh(&vsi->mac_filter_hash_lock);
is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev,
......@@ -2835,13 +2827,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
if (vsi->info.pvid) {
/* kill old VLAN */
ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
VLAN_VID_MASK));
if (ret) {
dev_info(&vsi->back->pdev->dev,
"remove VLAN failed, ret=%d, aq_err=%d\n",
ret, pf->hw.aq.asq_last_status);
}
}
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
......@@ -2940,7 +2927,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
if (max_tx_rate > speed) {
dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
max_tx_rate, vf->vf_id);
ret = -EINVAL;
goto error;
......
......@@ -912,11 +912,11 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
......
......@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
#define WB_STRIDE 0x3
#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
......@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
......@@ -1950,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
u16 desc_count = 0;
bool tail_bump = true;
bool do_rs = false;
u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
......@@ -2035,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
......@@ -2044,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
/* We can OR these values together as they both are checked against
* 4 below and at this point desc_count will be used as a boolean value
* after this if/else block.
*/
desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported
* if xmit_more is true
* do not update tail and do not mark RS bit.
* if xmit_more is false and last xmit_more was false
* if every packet spanned less than 4 desc
* then set RS bit on 4th packet and update tail
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
* if queue is stopped
* mark RS bit
* reset packet counter
* else if xmit_more is supported and is true
* advance packet counter to 4
* reset desc_count to 0
*
* Optimization: wmb to be issued only in case of tail update.
* Also optimize the Descriptor WB path for RS bit with the same
* algorithm.
* if desc_count >= 4
* mark RS bit
* reset packet counter
* if desc_count > 0
* update tail
*
* Note: If there are less than 4 packets
* Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
if (skb->xmit_more &&
!netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
!netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
tx_ring->packet_stride++;
} else {
if (netif_xmit_stopped(txring_txq(tx_ring))) {
goto do_rs;
} else if (skb->xmit_more) {
/* set stride to arm on next packet and reset desc_count */
tx_ring->packet_stride = WB_STRIDE;
desc_count = 0;
} else if (desc_count >= WB_STRIDE) {
do_rs:
/* write last descriptor with RS bit set */
td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
}
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) |
cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) <<
I40E_TXD_QW1_CMD_SHIFT);
build_ctob(td_cmd, td_offset, size, td_tag);
/* notify HW of packet */
if (!tail_bump) {
prefetchw(tx_desc + 1);
} else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
* We also use this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
/* notify HW of packet */
if (desc_count) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
}
return;
dma_error:
......
......@@ -309,7 +309,6 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
......
......@@ -348,6 +348,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
......
......@@ -1746,15 +1746,17 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
/* sleep first to make sure a minimum wait time is met */
msleep(I40EVF_RESET_WAIT_MS);
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == I40E_VFR_VFACTIVE)
break;
msleep(I40EVF_RESET_WAIT_MS);
}
pci_set_master(adapter->pdev);
/* extra wait to make sure minimum wait is met */
msleep(I40EVF_RESET_WAIT_MS);
if (i == I40EVF_RESET_WAIT_COUNT) {
struct i40evf_mac_filter *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment