Commit 89529367 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-12-15 (igc)

Muhammad Husaini Zulkifli says:

This patch series fixes bugs for the Time-Sensitive Networking(TSN)
Qbv Scheduling features.

An overview of each patch series is given below:

Patch 1: Using a first flag bit to schedule a packet to the next cycle if
packet cannot fit in current Qbv cycle.
Patch 2: Enable strict cycle for Qbv scheduling.
Patch 3: Prevent user to set basetime less than zero during tc config.
Patch 4: Allow the basetime enrollment with zero value.
Patch 5: Calculate the new end time value to exclude the time interval that
exceed the cycle time as user can specify the cycle time in tc config.
Patch 6: Resolve the HW bugs where the gate is not fully closed.
---
This contains the net patches from this original pull request:
https://lore.kernel.org/netdev/20221205212414.3197525-1-anthony.l.nguyen@intel.com/

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2d7afdcb 72abeedd
...@@ -94,6 +94,8 @@ struct igc_ring { ...@@ -94,6 +94,8 @@ struct igc_ring {
u8 queue_index; /* logical index of the ring*/ u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */ u8 reg_idx; /* physical index of the ring */
bool launchtime_enable; /* true if LaunchTime is enabled */ bool launchtime_enable; /* true if LaunchTime is enabled */
ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
ktime_t last_ff_cycle; /* Last cycle with an active first flag */
u32 start_time; u32 start_time;
u32 end_time; u32 end_time;
...@@ -182,6 +184,7 @@ struct igc_adapter { ...@@ -182,6 +184,7 @@ struct igc_adapter {
ktime_t base_time; ktime_t base_time;
ktime_t cycle_time; ktime_t cycle_time;
bool qbv_enable;
/* OS defined structs */ /* OS defined structs */
struct pci_dev *pdev; struct pci_dev *pdev;
......
...@@ -321,6 +321,8 @@ ...@@ -321,6 +321,8 @@
#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080
/* Transmit Control */ /* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */ #define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */
......
...@@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev) ...@@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev); return netdev_mc_count(netdev);
} }
static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
bool *first_flag, bool *insert_empty)
{ {
struct igc_adapter *adapter = netdev_priv(ring->netdev);
ktime_t cycle_time = adapter->cycle_time; ktime_t cycle_time = adapter->cycle_time;
ktime_t base_time = adapter->base_time; ktime_t base_time = adapter->base_time;
ktime_t now = ktime_get_clocktai();
ktime_t baset_est, end_of_cycle;
u32 launchtime; u32 launchtime;
s64 n;
/* FIXME: when using ETF together with taprio, we may have a n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
* case where 'delta' is larger than the cycle_time, this may
* cause problems if we don't read the current value of baset_est = ktime_add_ns(base_time, cycle_time * (n));
* IGC_BASET, as the value writen into the launchtime end_of_cycle = ktime_add_ns(baset_est, cycle_time);
* descriptor field may be misinterpreted.
if (ktime_compare(txtime, end_of_cycle) >= 0) {
if (baset_est != ring->last_ff_cycle) {
*first_flag = true;
ring->last_ff_cycle = baset_est;
if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
*insert_empty = true;
}
}
/* Introducing a window at end of cycle on which packets
* potentially not honor launchtime. Window of 5us chosen
* considering software update the tail pointer and packets
* are dma'ed to packet buffer.
*/ */
div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
txtime);
ring->last_tx_cycle = end_of_cycle;
launchtime = ktime_sub_ns(txtime, baset_est);
if (launchtime > 0)
div_s64_rem(launchtime, cycle_time, &launchtime);
else
launchtime = 0;
return cpu_to_le32(launchtime); return cpu_to_le32(launchtime);
} }
static int igc_init_empty_frame(struct igc_ring *ring,
struct igc_tx_buffer *buffer,
struct sk_buff *skb)
{
unsigned int size;
dma_addr_t dma;
size = skb_headlen(skb);
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
return -ENOMEM;
}
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;
buffer->gso_segs = 1;
buffer->time_stamp = jiffies;
dma_unmap_len_set(buffer, len, skb->len);
dma_unmap_addr_set(buffer, dma, dma);
return 0;
}
static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
struct sk_buff *skb,
struct igc_tx_buffer *first)
{
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
int err;
if (!igc_desc_unused(ring))
return -EBUSY;
err = igc_init_empty_frame(ring, first, skb);
if (err)
return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
first->bytecount;
olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc = IGC_TX_DESC(ring, ring->next_to_use);
desc->read.cmd_type_len = cpu_to_le32(cmd_type);
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
netdev_tx_sent_queue(txring_txq(ring), skb->len);
first->next_to_watch = desc;
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
return 0;
}
#define IGC_EMPTY_FRAME_SIZE 60
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first, __le32 launch_time, bool first_flag,
u32 vlan_macip_lens, u32 type_tucmd, u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx) u32 mss_l4len_idx)
{ {
...@@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, ...@@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4; mss_l4len_idx |= tx_ring->reg_idx << 4;
if (first_flag)
mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
context_desc->launch_time = launch_time;
/* We assume there is always a valid Tx time available. Invalid times
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
ktime_t txtime = first->skb->tstamp;
skb_txtime_consumed(first->skb);
context_desc->launch_time = igc_tx_launchtime(adapter,
txtime);
} else {
context_desc->launch_time = 0;
}
} }
static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
__le32 launch_time, bool first_flag)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0; u32 vlan_macip_lens = 0;
...@@ -1096,7 +1180,8 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) ...@@ -1096,7 +1180,8 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
vlan_macip_lens, type_tucmd, 0);
} }
static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
...@@ -1320,6 +1405,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, ...@@ -1320,6 +1405,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
static int igc_tso(struct igc_ring *tx_ring, static int igc_tso(struct igc_ring *tx_ring,
struct igc_tx_buffer *first, struct igc_tx_buffer *first,
__le32 launch_time, bool first_flag,
u8 *hdr_len) u8 *hdr_len)
{ {
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
...@@ -1406,8 +1492,8 @@ static int igc_tso(struct igc_ring *tx_ring, ...@@ -1406,8 +1492,8 @@ static int igc_tso(struct igc_ring *tx_ring,
vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
type_tucmd, mss_l4len_idx); vlan_macip_lens, type_tucmd, mss_l4len_idx);
return 1; return 1;
} }
...@@ -1415,11 +1501,14 @@ static int igc_tso(struct igc_ring *tx_ring, ...@@ -1415,11 +1501,14 @@ static int igc_tso(struct igc_ring *tx_ring,
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring) struct igc_ring *tx_ring)
{ {
bool first_flag = false, insert_empty = false;
u16 count = TXD_USE_COUNT(skb_headlen(skb)); u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb); __be16 protocol = vlan_get_protocol(skb);
struct igc_tx_buffer *first; struct igc_tx_buffer *first;
__le32 launch_time = 0;
u32 tx_flags = 0; u32 tx_flags = 0;
unsigned short f; unsigned short f;
ktime_t txtime;
u8 hdr_len = 0; u8 hdr_len = 0;
int tso = 0; int tso = 0;
...@@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, ...@@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_frag_size( count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f])); &skb_shinfo(skb)->frags[f]));
if (igc_maybe_stop_tx(tx_ring, count + 3)) { if (igc_maybe_stop_tx(tx_ring, count + 5)) {
/* this is a hard error */ /* this is a hard error */
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
if (!tx_ring->launchtime_enable)
goto done;
txtime = skb->tstamp;
skb->tstamp = ktime_set(0, 0);
launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) {
struct igc_tx_buffer *empty_info;
struct sk_buff *empty;
void *data;
empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
if (!empty)
goto done;
data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
memset(data, 0, IGC_EMPTY_FRAME_SIZE);
igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
if (igc_init_tx_empty_descriptor(tx_ring,
empty,
empty_info) < 0)
dev_kfree_skb_any(empty);
}
done:
/* record the location of the first descriptor for this packet */ /* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGC_TX_BUFFER_TYPE_SKB; first->type = IGC_TX_BUFFER_TYPE_SKB;
...@@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, ...@@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags; first->tx_flags = tx_flags;
first->protocol = protocol; first->protocol = protocol;
tso = igc_tso(tx_ring, first, &hdr_len); tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
else if (!tso) else if (!tso)
igc_tx_csum(tx_ring, first); igc_tx_csum(tx_ring, first, launch_time, first_flag);
igc_tx_map(tx_ring, first, hdr_len); igc_tx_map(tx_ring, first, hdr_len);
...@@ -5925,10 +6043,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, ...@@ -5925,10 +6043,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
bool queue_configured[IGC_MAX_TX_QUEUES] = { }; bool queue_configured[IGC_MAX_TX_QUEUES] = { };
u32 start_time = 0, end_time = 0; u32 start_time = 0, end_time = 0;
size_t n; size_t n;
int i;
adapter->qbv_enable = qopt->enable;
if (!qopt->enable) if (!qopt->enable)
return igc_tsn_clear_schedule(adapter); return igc_tsn_clear_schedule(adapter);
if (qopt->base_time < 0)
return -ERANGE;
if (adapter->base_time) if (adapter->base_time)
return -EALREADY; return -EALREADY;
...@@ -5940,10 +6064,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, ...@@ -5940,10 +6064,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
for (n = 0; n < qopt->num_entries; n++) { for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n]; struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
end_time += e->interval; end_time += e->interval;
/* If any of the conditions below are true, we need to manually
* control the end time of the cycle.
* 1. Qbv users can specify a cycle time that is not equal
* to the total GCL intervals. Hence, recalculation is
* necessary here to exclude the time interval that
* exceeds the cycle time.
* 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
* once the end of the list is reached, it will switch
* to the END_OF_CYCLE state and leave the gates in the
* same state until the next cycle is started.
*/
if (end_time > adapter->cycle_time ||
n + 1 == qopt->num_entries)
end_time = adapter->cycle_time;
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i]; struct igc_ring *ring = adapter->tx_ring[i];
...@@ -5964,6 +6102,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, ...@@ -5964,6 +6102,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
start_time += e->interval; start_time += e->interval;
} }
/* Check whether a queue gets configured.
* If not, set the start and end time to be end time.
*/
for (i = 0; i < adapter->num_tx_queues; i++) {
if (!queue_configured[i]) {
struct igc_ring *ring = adapter->tx_ring[i];
ring->start_time = end_time;
ring->end_time = end_time;
}
}
return 0; return 0;
} }
......
...@@ -36,7 +36,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) ...@@ -36,7 +36,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{ {
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
if (adapter->base_time) if (adapter->qbv_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED; new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_any_launchtime(adapter)) if (is_any_launchtime(adapter))
...@@ -140,15 +140,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) ...@@ -140,15 +140,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_STQT(i), ring->start_time); wr32(IGC_STQT(i), ring->start_time);
wr32(IGC_ENDQT(i), ring->end_time); wr32(IGC_ENDQT(i), ring->end_time);
if (adapter->base_time) {
/* If we have a base_time we are in "taprio"
* mode and we need to be strict about the
* cycles: only transmit a packet if it can be
* completed during that cycle.
*/
txqctl |= IGC_TXQCTL_STRICT_CYCLE | txqctl |= IGC_TXQCTL_STRICT_CYCLE |
IGC_TXQCTL_STRICT_END; IGC_TXQCTL_STRICT_END;
}
if (ring->launchtime_enable) if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment