Commit f89370d4 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2020-04-17

This series contains updates to e1000e and igc only.

Sasha adds partial generic segmentation offload (GSO partial) support to
the igc driver.  Also added support for translating taprio schedules
into i225 cycles in igc.  Did clean up of dead code or unused defines in
the igc driver.  Refactored the code to avoid forward declarations where
possible.  Enables the NETIF_F_HW_TC flag for igc by default.

Vinicius adds support for ETF offloading using the similar approach that
taprio offload used.

Kees Cook fixes a clang warning in the e1000e driver by moving the
declared variable either into the switch case that uses the variable or
lift them up into the main function body, to help the compiler.

Andre fixed some register overwriting when dumping registers via ethtool
for igc driver.  Also fixed support for ethtool Network Flow
Classification (NFC) queue redirection by adding the missing code needed
to enable the queue selection feature from Receive Address High (RAH)
register.  Cleans up code to remove the code bits designed to support
tc-flower filters, since this client part does not support it.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 123aff2a ac9156b2
......@@ -3136,8 +3136,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
case e1000_82544: {
unsigned int pull_size;
case e1000_82544:
/* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to
* this hardware's requirements
......@@ -3158,6 +3159,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
len = skb_headlen(skb);
break;
}
default:
/* do nothing */
break;
......
......@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_ethtool.o igc_ptp.o igc_dump.o
igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
This diff is collapsed.
......@@ -44,9 +44,6 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
/* Physical Func Reset Done Indication */
#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
......@@ -66,8 +63,11 @@
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define IGC_RAH_QSEL_MASK 0x000C0000
#define IGC_RAH_QSEL_SHIFT 18
#define IGC_RAH_QSEL_ENABLE BIT(28)
#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
#define IGC_RAH_POOL_1 0x00040000
#define IGC_RAL_MAC_ADDR_LEN 4
#define IGC_RAH_MAC_ADDR_LEN 2
......@@ -94,8 +94,6 @@
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
#define IGC_CONNSW_AUTOSENSE_EN 0x1
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
......@@ -377,6 +375,11 @@
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
/* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
......@@ -431,6 +434,14 @@
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
......@@ -497,7 +508,6 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_MDIC_DEST 0x80000000
#define IGC_N0_QUEUE -1
......
......@@ -153,7 +153,7 @@ static void igc_get_regs(struct net_device *netdev,
memset(p, 0, IGC_REGS_LEN * sizeof(u32));
regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(IGC_CTRL);
......@@ -306,6 +306,15 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[164 + i] = rd32(IGC_TDT(i));
for (i = 0; i < 4; i++)
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
/* XXX: Due to a bug few lines above, RAL and RAH registers are
* overwritten. To preserve the ABI, we write these registers again in
* regs_buff.
*/
for (i = 0; i < 16; i++)
regs_buff[172 + i] = rd32(IGC_RAL(i));
for (i = 0; i < 16; i++)
regs_buff[188 + i] = rd32(IGC_RAH(i));
}
static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
......
......@@ -9,11 +9,13 @@
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/pm_runtime.h>
#include <net/pkt_sched.h>
#include <net/ipv6.h>
#include "igc.h"
#include "igc_hw.h"
#include "igc_tsn.h"
#define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
......@@ -106,6 +108,9 @@ void igc_reset(struct igc_adapter *adapter)
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
/* Re-enable TSN offloading, where applicable. */
igc_tsn_offload_apply(adapter);
igc_get_phy_info(hw);
}
......@@ -775,13 +780,18 @@ static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
rar_low = le32_to_cpup((__le32 *)(addr));
rar_high = le16_to_cpup((__le16 *)(addr + 4));
if (adapter->mac_table[index].state & IGC_MAC_STATE_QUEUE_STEERING) {
u8 queue = adapter->mac_table[index].queue;
u32 qsel = IGC_RAH_QSEL_MASK & (queue << IGC_RAH_QSEL_SHIFT);
rar_high |= qsel;
rar_high |= IGC_RAH_QSEL_ENABLE;
}
/* Indicate to hardware the Address is Valid. */
if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
if (is_valid_ether_addr(addr))
rar_high |= IGC_RAH_AV;
rar_high |= IGC_RAH_POOL_1 <<
adapter->mac_table[index].queue;
}
wr32(IGC_RAL(index), rar_low);
......@@ -864,6 +874,23 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
{
ktime_t cycle_time = adapter->cycle_time;
ktime_t base_time = adapter->base_time;
u32 launchtime;
/* FIXME: when using ETF together with taprio, we may have a
* case where 'delta' is larger than the cycle_time, this may
* cause problems if we don't read the current value of
* IGC_BASET, as the value writen into the launchtime
* descriptor field may be misinterpreted.
*/
div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
return cpu_to_le32(launchtime);
}
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd,
......@@ -871,7 +898,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
{
struct igc_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
struct timespec64 ts;
context_desc = IGC_TX_CTXTDESC(tx_ring, i);
......@@ -893,9 +919,12 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
ts = ktime_to_timespec64(first->skb->tstamp);
struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
ktime_t txtime = first->skb->tstamp;
first->skb->tstamp = ktime_set(0, 0);
context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
context_desc->launch_time = igc_tx_launchtime(adapter,
txtime);
} else {
context_desc->launch_time = 0;
}
......@@ -2325,7 +2354,9 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
igc_set_default_mac_filter(adapter);
igc_nfc_filter_restore(adapter);
igc_configure_tx(adapter);
igc_configure_rx(adapter);
......@@ -3458,9 +3489,6 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
igc_erase_filter(adapter, rule);
hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
igc_erase_filter(adapter, rule);
spin_unlock(&adapter->nfc_lock);
}
......@@ -4009,7 +4037,6 @@ static void igc_watchdog_task(struct work_struct *work)
struct igc_hw *hw = &adapter->hw;
struct igc_phy_info *phy = &hw->phy;
u16 phy_data, retry_count = 20;
u32 connsw;
u32 link;
int i;
......@@ -4022,14 +4049,6 @@ static void igc_watchdog_task(struct work_struct *work)
link = false;
}
/* Force link down if we have fiber to swap to */
if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
if (hw->phy.media_type == igc_media_type_copper) {
connsw = rd32(IGC_CONNSW);
if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
link = 0;
}
}
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
......@@ -4491,6 +4510,158 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
bool enable)
{
struct igc_ring *ring;
int i;
if (queue < 0 || queue >= adapter->num_tx_queues)
return -EINVAL;
ring = adapter->tx_ring[queue];
ring->launchtime_enable = enable;
if (adapter->base_time)
return 0;
adapter->cycle_time = NSEC_PER_SEC;
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = adapter->tx_ring[i];
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
}
return 0;
}
static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
size_t n;
if (qopt->cycle_time_extension)
return false;
for (n = 0; n < qopt->num_entries; n++) {
const struct tc_taprio_sched_entry *e;
int i;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
* settings.
*/
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
if (e->gate_mask & BIT(i))
queue_uses[i]++;
if (queue_uses[i] > 1)
return false;
}
}
return true;
}
static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
struct tc_etf_qopt_offload *qopt)
{
struct igc_hw *hw = &adapter->hw;
int err;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
if (err)
return err;
return igc_tsn_offload_apply(adapter);
}
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
u32 start_time = 0, end_time = 0;
size_t n;
if (!qopt->enable) {
adapter->base_time = 0;
return 0;
}
if (adapter->base_time)
return -EALREADY;
if (!validate_schedule(qopt))
return -EINVAL;
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
/* FIXME: be a little smarter about cases when the gate for a
* queue stays open for more than one entry.
*/
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
end_time += e->interval;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i)))
continue;
ring->start_time = start_time;
ring->end_time = end_time;
}
start_time += e->interval;
}
return 0;
}
static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
struct igc_hw *hw = &adapter->hw;
int err;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
err = igc_save_qbv_schedule(adapter, qopt);
if (err)
return err;
return igc_tsn_offload_apply(adapter);
}
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return igc_tsn_enable_qbv_scheduling(adapter, type_data);
case TC_SETUP_QDISC_ETF:
return igc_tsn_enable_launchtime(adapter, type_data);
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
......@@ -4503,6 +4674,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
};
/* PCIe configuration access */
......@@ -4726,6 +4898,17 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
netdev->features |= NETIF_F_HW_TC;
#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
/* setup the private structure */
err = igc_sw_init(adapter);
......
......@@ -231,6 +231,18 @@
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
#define IGC_BASET_L 0x3314
#define IGC_BASET_H 0x3318
#define IGC_QBVCYCLET 0x331C
#define IGC_QBVCYCLET_S 0x3320
#define IGC_STQT(_n) (0x3324 + 0x4 * (_n))
#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n))
#define IGC_DTXMXPKTSZ 0x355C
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
#include "igc_tsn.h"
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (ring->launchtime_enable)
return true;
}
return false;
}
/* Returns the TSN specific registers to their default values after
* TSN offloading is disabled.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
return 0;
adapter->cycle_time = 0;
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
IGC_TQAVCTRL_ENHANCED_QAV);
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
ring->start_time = 0;
ring->end_time = 0;
ring->launchtime_enable = false;
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
}
wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
return 0;
}
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
int i;
if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
return 0;
cycle = adapter->cycle_time;
base_time = adapter->base_time;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
wr32(IGC_TQAVCTRL, tqavctrl);
wr32(IGC_QBVCYCLET_S, cycle);
wr32(IGC_QBVCYCLET, cycle);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
wr32(IGC_STQT(i), ring->start_time);
wr32(IGC_ENDQT(i), ring->end_time);
if (adapter->base_time) {
/* If we have a base_time we are in "taprio"
* mode and we need to be strict about the
* cycles: only transmit a packet if it can be
* completed during that cycle.
*/
txqctl |= IGC_TXQCTL_STRICT_CYCLE |
IGC_TXQCTL_STRICT_END;
}
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
wr32(IGC_TXQCTL(i), txqctl);
}
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
systim = ktime_set(sec, nsec);
if (ktime_compare(systim, base_time) > 0) {
s64 n;
n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
}
baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
wr32(IGC_BASET_H, baset_h);
wr32(IGC_BASET_L, baset_l);
adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
return 0;
}
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
return 0;
if (!is_any_enabled) {
int err = igc_tsn_disable_offload(adapter);
if (err < 0)
return err;
/* The BASET registers aren't cleared when writing
* into them, force a reset if the interface is
* running.
*/
if (netif_running(adapter->netdev))
schedule_work(&adapter->reset_task);
return 0;
}
return igc_tsn_enable_offload(adapter);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Intel Corporation */
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
int igc_tsn_offload_apply(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment