Commit 7bdaae27 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-10-03

This series contains updates to ixgbe/ixgbevf and few fixes for i40e & iavf.

Shannon Nelson fixes the message length for IPsec mailbox messages.

Radoslaw fixes a transmit hang that occurs when XDP_TX exceeds the queue
limit.  Fixes a crash when we restor flow director filters after a reset.

YueHaibing cleans up dead code, which did not have any callers.

Dan Carpenter fixes an "off by one" error in IPsec for ixgbe.

Nathan Chancellor fixes the i40e driver to use the correct enum for link
speed.  Also remove a debug statement since it was not producing useful
information and equated to always "TRUE".

Most notably, Björn introduces zero-copy AF_XDP support for the ixgbe
driver.  The ixgbe zero-copy code is located in its own file ixgbe_xsk.[ch],
analogous to the i40e ZC support. Again, as in i40e, code paths have
been copied from the XDP path to the zero-copy path. Going forward we
will try to generalize more code between the AF_XDP ZC drivers, and
also reduce the heavy C&P.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 072eff2d 37ebb5fa
......@@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
dev_info(&pf->pdev->dev, " active_vlans is %s\n",
vsi->active_vlans ? "<valid>" : "<null>");
dev_info(&pf->pdev->dev,
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
......
......@@ -4256,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf->link_forced = true;
vf->link_up = true;
pfe.event_data.link_event.link_status = true;
pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
......
......@@ -342,7 +342,7 @@ struct iavf_adapter {
struct iavf_channel_config ch_config;
u8 num_tc;
struct list_head cloud_filter_list;
/* lock to protest access to the cloud filter list */
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
};
......
......@@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
ixgbe_xsk.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
......
......@@ -228,13 +228,17 @@ struct ixgbe_tx_buffer {
struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
union {
struct {
struct page *page;
__u32 page_offset;
__u16 pagecnt_bias;
};
struct {
void *addr;
u64 handle;
};
};
};
struct ixgbe_queue_stats {
......@@ -271,6 +275,7 @@ enum ixgbe_ring_state_t {
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_TX_XDP_RING,
__IXGBE_TX_DISABLED,
};
#define ring_uses_build_skb(ring) \
......@@ -347,6 +352,10 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
struct xdp_umem *xsk_umem;
struct zero_copy_allocator zca; /* ZC allocator anchor */
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
......@@ -764,6 +773,11 @@ struct ixgbe_adapter {
#ifdef CONFIG_XFRM_OFFLOAD
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_XFRM_OFFLOAD */
/* AF_XDP zero-copy */
struct xdp_umem **xsk_umems;
u16 num_xsk_umems_used;
u16 num_xsk_umems;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
......
......@@ -3484,17 +3484,6 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
* ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode
* @hw: pointer to hardware structure
*/
bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw)
{
if (hw->mac.ops.fw_recovery_mode)
return hw->mac.ops.fw_recovery_mode(hw);
return false;
}
/**
* ixgbe_get_device_caps_generic - Get additional device capabilities
* @hw: pointer to hardware structure
......
......@@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
int txr_remaining = adapter->num_tx_queues;
int xdp_remaining = adapter->num_xdp_queues;
int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
int err;
int err, i;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
......@@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
xdp_idx += xqpv;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
if (adapter->rx_ring[i])
adapter->rx_ring[i]->ring_idx = i;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
if (adapter->tx_ring[i])
adapter->tx_ring[i]->ring_idx = i;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
if (adapter->xdp_ring[i])
adapter->xdp_ring[i]->ring_idx = i;
}
return 0;
err_out:
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2018 Intel Corporation. */
#ifndef _IXGBE_TXRX_COMMON_H_
#define _IXGBE_TXRX_COMMON_H_
#define IXGBE_XDP_PASS 0
#define IXGBE_XDP_CONSUMED BIT(0)
#define IXGBE_XDP_TX BIT(1)
#define IXGBE_XDP_REDIR BIT(2)
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf);
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring);
int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
u16 qid);
int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
u16 qid);
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget);
void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget);
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
#endif /* #define _IXGBE_TXRX_COMMON_H_ */
This diff is collapsed.
......@@ -21,7 +21,6 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
struct sa_mbx_msg *sam;
u16 msglen;
int ret;
/* send the important bits to the PF */
......@@ -38,16 +37,14 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
msgbuf[0] = IXGBE_VF_IPSEC_ADD;
msglen = sizeof(*sam) + sizeof(msgbuf[0]);
spin_lock_bh(&adapter->mbx_lock);
ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen);
ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
if (ret)
goto out;
msglen = sizeof(msgbuf[0]) * 2;
ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen);
ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
if (ret)
goto out;
......@@ -80,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
spin_lock_bh(&adapter->mbx_lock);
err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf));
err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
if (err)
goto out;
err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf));
err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
if (err)
goto out;
......@@ -470,7 +467,7 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
}
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
__func__, sa_idx, xs->xso.offload_handle);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment