Commit 13d5a30a authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2018-03-26

This series contains updates to i40e only.

Jake provides several patches which remove the need for cmpxchg64(),
starting with moving I40E_FLAG_[UDP]_FILTER_SYNC from pf->flags to pf->state
since they are modified during run time possibly when the RTNL lock is not
held so they should be a state bits and not flags.  Moved additional
"flags" which should be state fields, into pf->state.  Ensure we hold
the RTNL lock for the entire sequence of preparing for reset and when
resuming, which will protect the flags related to interrupt scheme under
RTNL lock so that their modification is properly threaded.  Finally,
cleanup the use of cmpxchg64() since it is no longer needed.  Cleaned up
the holes in the feature flags created my moving some flags to the state
field.

Björn Töpel adds XDP_REDIRECT support as well as tweaking the page
counting for XDP_REDIRECT so that it will function properly.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 34fd03b9 d9314c47
...@@ -159,9 +159,17 @@ enum i40e_state_t { ...@@ -159,9 +159,17 @@ enum i40e_state_t {
__I40E_BAD_EEPROM, __I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED, __I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED, __I40E_FD_FLUSH_REQUESTED,
__I40E_FD_ATR_AUTO_DISABLED,
__I40E_FD_SB_AUTO_DISABLED,
__I40E_RESET_FAILED, __I40E_RESET_FAILED,
__I40E_PORT_SUSPENDED, __I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE, __I40E_VF_DISABLE,
__I40E_MACVLAN_SYNC_PENDING,
__I40E_UDP_FILTER_SYNC_PENDING,
__I40E_TEMP_LINK_POLLING,
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
/* This must be last as it determines the size of the BITMAP */ /* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__, __I40E_STATE_SIZE__,
}; };
...@@ -510,40 +518,32 @@ struct i40e_pf { ...@@ -510,40 +518,32 @@ struct i40e_pf {
#define I40E_HW_RESTART_AUTONEG BIT(18) #define I40E_HW_RESTART_AUTONEG BIT(18)
#define I40E_HW_STOPPABLE_FW_LLDP BIT(19) #define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
u64 flags; u32 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0) #define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT(1)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2) #define I40E_FLAG_MSIX_ENABLED BIT(2)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(3) #define I40E_FLAG_RSS_ENABLED BIT(3)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4) #define I40E_FLAG_VMDQ_ENABLED BIT(4)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(5) #define I40E_FLAG_SRIOV_ENABLED BIT(5)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6) #define I40E_FLAG_DCB_CAPABLE BIT(6)
#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7) #define I40E_FLAG_DCB_ENABLED BIT(7)
#define I40E_FLAG_DCB_ENABLED BIT_ULL(8) #define I40E_FLAG_FD_SB_ENABLED BIT(8)
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9) #define I40E_FLAG_FD_ATR_ENABLED BIT(9)
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10) #define I40E_FLAG_MFP_ENABLED BIT(10)
#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11) #define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11)
#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12) #define I40E_FLAG_VEB_MODE_ENABLED BIT(12)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(13) #define I40E_FLAG_VEB_STATS_ENABLED BIT(13)
#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14) #define I40E_FLAG_LINK_POLLING_ENABLED BIT(14)
#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15) #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16) #define I40E_FLAG_LEGACY_RX BIT(16)
#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17) #define I40E_FLAG_PTP BIT(17)
#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18) #define I40E_FLAG_IWARP_ENABLED BIT(18)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19) #define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19)
#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20) #define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20)
#define I40E_FLAG_LEGACY_RX BIT_ULL(21) #define I40E_FLAG_TC_MQPRIO BIT(21)
#define I40E_FLAG_PTP BIT_ULL(22) #define I40E_FLAG_FD_SB_INACTIVE BIT(22)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23) #define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24) #define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25)
#define I40E_FLAG_CLIENT_RESET BIT_ULL(26)
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28)
#define I40E_FLAG_TC_MQPRIO BIT_ULL(29)
#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31)
#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32)
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
bool stat_offsets_loaded; bool stat_offsets_loaded;
......
...@@ -376,9 +376,8 @@ void i40e_client_subtask(struct i40e_pf *pf) ...@@ -376,9 +376,8 @@ void i40e_client_subtask(struct i40e_pf *pf)
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0; int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
return; return;
pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
cdev = pf->cinst; cdev = pf->cinst;
/* If we're down or resetting, just bail */ /* If we're down or resetting, just bail */
...@@ -459,7 +458,7 @@ int i40e_lan_add_device(struct i40e_pf *pf) ...@@ -459,7 +458,7 @@ int i40e_lan_add_device(struct i40e_pf *pf)
* added, we can schedule a subtask to go initiate the clients if * added, we can schedule a subtask to go initiate the clients if
* they can be launched at probe time. * they can be launched at probe time.
*/ */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
out: out:
...@@ -554,7 +553,7 @@ static void i40e_client_prepare(struct i40e_client *client) ...@@ -554,7 +553,7 @@ static void i40e_client_prepare(struct i40e_client *client)
pf = ldev->pf; pf = ldev->pf;
i40e_client_add_instance(pf); i40e_client_add_instance(pf);
/* Start the client subtask */ /* Start the client subtask */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
} }
mutex_unlock(&i40e_device_mutex); mutex_unlock(&i40e_device_mutex);
......
...@@ -3951,7 +3951,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, ...@@ -3951,7 +3951,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
return -ENOSPC; return -ENOSPC;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
...@@ -4436,21 +4436,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -4436,21 +4436,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
} }
} }
/* Compare and exchange the new flags into place. If we failed, that /* Now that we've checked to ensure that the new flags are valid, load
* is if cmpxchg returns anything but the old value, this means that * them into place. Since we only modify flags either (a) during
* something else has modified the flags variable since we copied it * initialization or (b) while holding the RTNL lock, we don't need
* originally. We'll just punt with an error and log something in the * anything fancy here.
* message buffer.
*
* This is the point of no return for this function. We need to have
* checked any discrepancies or misconfigurations and returned
* EOPNOTSUPP before updating pf->flags here.
*/ */
if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) { pf->flags = new_flags;
dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN;
}
/* Process any additional changes needed as a result of flag changes. /* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were * The changed_flags value reflects the list of bits that were
...@@ -4460,7 +4451,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -4460,7 +4451,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
/* Flush current ATR settings if ATR was disabled */ /* Flush current ATR settings if ATR was disabled */
if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) { !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
} }
......
...@@ -1083,13 +1083,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) ...@@ -1083,13 +1083,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_lpi_count, &nsd->rx_lpi_count); &osd->rx_lpi_count, &nsd->rx_lpi_count);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED && if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
!(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
nsd->fd_sb_status = true; nsd->fd_sb_status = true;
else else
nsd->fd_sb_status = false; nsd->fd_sb_status = false;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
!(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
nsd->fd_atr_status = true; nsd->fd_atr_status = true;
else else
nsd->fd_atr_status = false; nsd->fd_atr_status = false;
...@@ -1382,7 +1382,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -1382,7 +1382,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
hash_add(vsi->mac_filter_hash, &f->hlist, key); hash_add(vsi->mac_filter_hash, &f->hlist, key);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
} }
/* If we're asked to add a filter that has been marked for removal, it /* If we're asked to add a filter that has been marked for removal, it
...@@ -1432,7 +1432,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) ...@@ -1432,7 +1432,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
} }
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state);
} }
/** /**
...@@ -1955,7 +1955,7 @@ static void i40e_set_rx_mode(struct net_device *netdev) ...@@ -1955,7 +1955,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
/* check for other flag changes */ /* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) { if (vsi->current_netdev_flags != vsi->netdev->flags) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
} }
} }
...@@ -2577,9 +2577,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) ...@@ -2577,9 +2577,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
{ {
int v; int v;
if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) if (!pf)
return;
if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
return; return;
pf->flags &= ~I40E_FLAG_FILTER_SYNC;
for (v = 0; v < pf->num_alloc_vsi; v++) { for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && if (pf->vsi[v] &&
...@@ -2588,7 +2589,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) ...@@ -2588,7 +2589,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
if (ret) { if (ret) {
/* come back and try again later */ /* come back and try again later */
pf->flags |= I40E_FLAG_FILTER_SYNC; set_bit(__I40E_MACVLAN_SYNC_PENDING,
pf->state);
break; break;
} }
} }
...@@ -2632,8 +2634,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -2632,8 +2634,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
if (netif_running(netdev)) if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi); i40e_vsi_reinit_locked(vsi);
pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
I40E_FLAG_CLIENT_L2_CHANGE); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
return 0; return 0;
} }
...@@ -4720,9 +4722,9 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) ...@@ -4720,9 +4722,9 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi); i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0; vsi->current_netdev_flags = 0;
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
pf->flags |= I40E_FLAG_CLIENT_RESET; set_bit(__I40E_CLIENT_RESET, pf->state);
} }
/** /**
...@@ -6493,7 +6495,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) ...@@ -6493,7 +6495,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* On the next run of the service_task, notify any clients of the new /* On the next run of the service_task, notify any clients of the new
* opened netdev * opened netdev
*/ */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
return 0; return 0;
...@@ -8035,8 +8037,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ...@@ -8035,8 +8037,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
} else { } else {
i40e_pf_unquiesce_all_vsi(pf); i40e_pf_unquiesce_all_vsi(pf);
pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
I40E_FLAG_CLIENT_L2_CHANGE); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
} }
exit: exit:
...@@ -8142,12 +8144,10 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) ...@@ -8142,12 +8144,10 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
**/ **/
static void i40e_reenable_fdir_sb(struct i40e_pf *pf) static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
{ {
if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(I40E_DEBUG_FD & pf->hw.debug_mask)) (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
} }
/** /**
...@@ -8156,7 +8156,7 @@ static void i40e_reenable_fdir_sb(struct i40e_pf *pf) ...@@ -8156,7 +8156,7 @@ static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
**/ **/
static void i40e_reenable_fdir_atr(struct i40e_pf *pf) static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
{ {
if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
/* ATR uses the same filtering logic as SB rules. It only /* ATR uses the same filtering logic as SB rules. It only
* functions properly if the input set mask is at the default * functions properly if the input set mask is at the default
* settings. It is safe to restore the default input set * settings. It is safe to restore the default input set
...@@ -8166,7 +8166,6 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf) ...@@ -8166,7 +8166,6 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK); I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(I40E_DEBUG_FD & pf->hw.debug_mask)) (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
...@@ -8289,7 +8288,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) ...@@ -8289,7 +8288,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} }
pf->fd_flush_timestamp = jiffies; pf->fd_flush_timestamp = jiffies;
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
/* flush all filters */ /* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1, wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
...@@ -8309,7 +8308,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) ...@@ -8309,7 +8308,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */ /* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr && !pf->fd_tcp4_filter_cnt) if (!disable_atr && !pf->fd_tcp4_filter_cnt)
pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask) if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
...@@ -8433,13 +8432,12 @@ static void i40e_link_event(struct i40e_pf *pf) ...@@ -8433,13 +8432,12 @@ static void i40e_link_event(struct i40e_pf *pf)
/* On success, disable temp link polling */ /* On success, disable temp link polling */
if (status == I40E_SUCCESS) { if (status == I40E_SUCCESS) {
if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING) clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
} else { } else {
/* Enable link polling temporarily until i40e_get_link_status /* Enable link polling temporarily until i40e_get_link_status
* returns I40E_SUCCESS * returns I40E_SUCCESS
*/ */
pf->flags |= I40E_FLAG_TEMP_LINK_POLLING; set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
status); status);
return; return;
...@@ -8491,7 +8489,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) ...@@ -8491,7 +8489,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
pf->service_timer_previous = jiffies; pf->service_timer_previous = jiffies;
if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
(pf->flags & I40E_FLAG_TEMP_LINK_POLLING)) test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
i40e_link_event(pf); i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack /* Update the stats for active netdevs so the network stack
...@@ -9719,7 +9717,7 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf) ...@@ -9719,7 +9717,7 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf)
pf->pending_udp_bitmap |= BIT_ULL(i); pf->pending_udp_bitmap |= BIT_ULL(i);
} }
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
} }
/** /**
...@@ -9733,11 +9731,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ...@@ -9733,11 +9731,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
u16 port; u16 port;
int i; int i;
if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
return; return;
pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) { if (pf->pending_udp_bitmap & BIT_ULL(i)) {
pf->pending_udp_bitmap &= ~BIT_ULL(i); pf->pending_udp_bitmap &= ~BIT_ULL(i);
...@@ -9789,17 +9785,15 @@ static void i40e_service_task(struct work_struct *work) ...@@ -9789,17 +9785,15 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf); i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf); i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf); i40e_fdir_reinit_subtask(pf);
if (pf->flags & I40E_FLAG_CLIENT_RESET) { if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
/* Client subtask will reopen next time through. */ /* Client subtask will reopen next time through. */
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
pf->flags &= ~I40E_FLAG_CLIENT_RESET;
} else { } else {
i40e_client_subtask(pf); i40e_client_subtask(pf);
if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) { if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
pf->state))
i40e_notify_client_of_l2_param_changes( i40e_notify_client_of_l2_param_changes(
pf->vsi[pf->lan_vsi]); pf->vsi[pf->lan_vsi]);
pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
}
} }
i40e_sync_filters_subtask(pf); i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf); i40e_sync_udp_filters_subtask(pf);
...@@ -11291,21 +11285,19 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) ...@@ -11291,21 +11285,19 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
need_reset = true; need_reset = true;
i40e_fdir_filter_exit(pf); i40e_fdir_filter_exit(pf);
} }
pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED | pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
I40E_FLAG_FD_SB_AUTO_DISABLED); clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
pf->flags |= I40E_FLAG_FD_SB_INACTIVE; pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
/* reset fd counters */ /* reset fd counters */
pf->fd_add_err = 0; pf->fd_add_err = 0;
pf->fd_atr_cnt = 0; pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */ /* if ATR was auto disabled it can be re-enabled. */
if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(I40E_DEBUG_FD & pf->hw.debug_mask)) (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
} }
}
return need_reset; return need_reset;
} }
...@@ -11437,7 +11429,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, ...@@ -11437,7 +11429,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */ /* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].port = port; pf->udp_ports[next_idx].port = port;
pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
} }
/** /**
...@@ -11478,7 +11470,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, ...@@ -11478,7 +11470,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
*/ */
pf->udp_ports[idx].port = 0; pf->udp_ports[idx].port = 0;
pf->pending_udp_bitmap |= BIT_ULL(idx); pf->pending_udp_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
return; return;
not_found: not_found:
...@@ -11823,6 +11815,8 @@ static const struct net_device_ops i40e_netdev_ops = { ...@@ -11823,6 +11815,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_bpf = i40e_xdp, .ndo_bpf = i40e_xdp,
.ndo_xdp_xmit = i40e_xdp_xmit,
.ndo_xdp_flush = i40e_xdp_flush,
}; };
/** /**
...@@ -12240,7 +12234,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -12240,7 +12234,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
if (f_count) { if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
pf->flags |= I40E_FLAG_FILTER_SYNC; set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
} }
/* Update VSI BW information */ /* Update VSI BW information */
...@@ -14356,7 +14350,13 @@ static int __maybe_unused i40e_suspend(struct device *dev) ...@@ -14356,7 +14350,13 @@ static int __maybe_unused i40e_suspend(struct device *dev)
if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf); i40e_enable_mc_magic_wake(pf);
i40e_prep_for_reset(pf, false); /* Since we're going to destroy queues during the
* i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
* whole section
*/
rtnl_lock();
i40e_prep_for_reset(pf, true);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
...@@ -14368,6 +14368,8 @@ static int __maybe_unused i40e_suspend(struct device *dev) ...@@ -14368,6 +14368,8 @@ static int __maybe_unused i40e_suspend(struct device *dev)
*/ */
i40e_clear_interrupt_scheme(pf); i40e_clear_interrupt_scheme(pf);
rtnl_unlock();
return 0; return 0;
} }
...@@ -14385,6 +14387,11 @@ static int __maybe_unused i40e_resume(struct device *dev) ...@@ -14385,6 +14387,11 @@ static int __maybe_unused i40e_resume(struct device *dev)
if (!test_bit(__I40E_SUSPENDED, pf->state)) if (!test_bit(__I40E_SUSPENDED, pf->state))
return 0; return 0;
/* We need to hold the RTNL lock prior to restoring interrupt schemes,
* since we're going to be restoring queues
*/
rtnl_lock();
/* We cleared the interrupt scheme when we suspended, so we need to /* We cleared the interrupt scheme when we suspended, so we need to
* restore it now to resume device functionality. * restore it now to resume device functionality.
*/ */
...@@ -14395,7 +14402,9 @@ static int __maybe_unused i40e_resume(struct device *dev) ...@@ -14395,7 +14402,9 @@ static int __maybe_unused i40e_resume(struct device *dev)
} }
clear_bit(__I40E_DOWN, pf->state); clear_bit(__I40E_DOWN, pf->state);
i40e_reset_and_rebuild(pf, false, false); i40e_reset_and_rebuild(pf, false, true);
rtnl_unlock();
/* Clear suspended state last after everything is recovered */ /* Clear suspended state last after everything is recovered */
clear_bit(__I40E_SUSPENDED, pf->state); clear_bit(__I40E_SUSPENDED, pf->state);
......
...@@ -336,7 +336,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, ...@@ -336,7 +336,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask) I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
} else { } else {
pf->fd_tcp4_filter_cnt--; pf->fd_tcp4_filter_cnt--;
} }
...@@ -594,8 +594,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -594,8 +594,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; /* These set_bit() calls aren't atomic with the
* test_bit() here, but worse case we potentially
* disable ATR and queue a flush right after SB
* support is re-enabled. That shouldn't cause an
* issue in practice
*/
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
} }
...@@ -608,12 +614,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -608,12 +614,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
*/ */
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) { !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED; pf->state))
if (I40E_DEBUG_FD & pf->hw.debug_mask) if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
} }
}
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask) if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
...@@ -1583,9 +1588,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -1583,9 +1588,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = i40e_rx_offset(rx_ring); bi->page_offset = i40e_rx_offset(rx_ring);
page_ref_add(page, USHRT_MAX - 1);
/* initialize pagecnt_bias to 1 representing we fully own page */ bi->pagecnt_bias = USHRT_MAX;
bi->pagecnt_bias = 1;
return true; return true;
} }
...@@ -1951,8 +1955,8 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) ...@@ -1951,8 +1955,8 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds. * number of references the driver holds.
*/ */
if (unlikely(!pagecnt_bias)) { if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX); page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX; rx_buffer->pagecnt_bias = USHRT_MAX;
} }
...@@ -2210,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, ...@@ -2210,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
int result = I40E_XDP_PASS; int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring; struct i40e_ring *xdp_ring;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u32 act; u32 act;
...@@ -2229,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, ...@@ -2229,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_ring(xdp, xdp_ring); result = i40e_xmit_xdp_ring(xdp, xdp_ring);
break; break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED: case XDP_ABORTED:
...@@ -2264,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, ...@@ -2264,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
#endif #endif
} }
static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
{
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
*/
wmb();
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
/** /**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
...@@ -2398,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -2398,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
} }
if (xdp_xmit) { if (xdp_xmit) {
struct i40e_ring *xdp_ring; struct i40e_ring *xdp_ring =
rx_ring->vsi->xdp_rings[rx_ring->queue_index];
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; i40e_xdp_ring_update_tail(xdp_ring);
xdp_do_flush_map();
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
*/
wmb();
writel(xdp_ring->next_to_use, xdp_ring->tail);
} }
rx_ring->skb = skb; rx_ring->skb = skb;
...@@ -2647,7 +2659,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2647,7 +2659,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return; return;
if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
return; return;
/* if sampling is disabled do nothing */ /* if sampling is disabled do nothing */
...@@ -2687,7 +2699,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2687,7 +2699,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen); th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */ /* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
return; return;
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
/* HW ATR eviction will take care of removing filters on FIN /* HW ATR eviction will take care of removing filters on FIN
...@@ -3655,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3655,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return i40e_xmit_frame_ring(skb, tx_ring); return i40e_xmit_frame_ring(skb, tx_ring);
} }
/**
* i40e_xdp_xmit - Implements ndo_xdp_xmit
* @dev: netdev
* @xdp: XDP buffer
*
* Returns Zero if sent, else an error code
**/
int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
int err;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
return -ENXIO;
err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
if (err != I40E_XDP_TX)
return -ENOSPC;
return 0;
}
/**
* i40e_xdp_flush - Implements ndo_xdp_flush
* @dev: netdev
**/
void i40e_xdp_flush(struct net_device *dev)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
return;
i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
}
...@@ -510,6 +510,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); ...@@ -510,6 +510,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void i40e_detect_recover_hung(struct i40e_vsi *vsi); void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb); bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
void i40e_xdp_flush(struct net_device *dev);
/** /**
* i40e_get_head - Retrieve head from head writeback * i40e_get_head - Retrieve head from head writeback
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment