Commit 8e7610e6 authored by Robert Hancock's avatar Robert Hancock Committed by David S. Miller

net: macb: simplify TX timestamp handling

This driver was capturing the TX timestamp values from the TX ring
during the TX completion path, but deferring the actual packet TX
timestamp updating to a workqueue. There does not seem to be much of a
reason for this with the current state of the driver. Simplify this to
just do the TX timestamping as part of the TX completion path, to avoid
the need for the extra timestamp buffer and workqueue.
Signed-off-by: default avatarRobert Hancock <robert.hancock@calian.com>
Reviewed-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Reviewed-by: default avatarClaudiu Beznea <claudiu.beznea@microchip.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent eb1d929f
...@@ -768,8 +768,6 @@ ...@@ -768,8 +768,6 @@
#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4) #define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value)) #define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
/* Conditional GEM/MACB macros. These perform the operation to the correct /* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers * register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l * and bitfields that are common across both devices, use macb_{read,write}l
...@@ -819,11 +817,6 @@ struct macb_dma_desc_ptp { ...@@ -819,11 +817,6 @@ struct macb_dma_desc_ptp {
u32 ts_1; u32 ts_1;
u32 ts_2; u32 ts_2;
}; };
struct gem_tx_ts {
struct sk_buff *skb;
struct macb_dma_desc_ptp desc_ptp;
};
#endif #endif
/* DMA descriptor bitfields */ /* DMA descriptor bitfields */
...@@ -1224,12 +1217,6 @@ struct macb_queue { ...@@ -1224,12 +1217,6 @@ struct macb_queue {
void *rx_buffers; void *rx_buffers;
struct napi_struct napi_rx; struct napi_struct napi_rx;
struct queue_stats stats; struct queue_stats stats;
#ifdef CONFIG_MACB_USE_HWSTAMP
struct work_struct tx_ts_task;
unsigned int tx_ts_head, tx_ts_tail;
struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
#endif
}; };
struct ethtool_rx_fs_item { struct ethtool_rx_fs_item {
...@@ -1340,14 +1327,14 @@ enum macb_bd_control { ...@@ -1340,14 +1327,14 @@ enum macb_bd_control {
void gem_ptp_init(struct net_device *ndev); void gem_ptp_init(struct net_device *ndev);
void gem_ptp_remove(struct net_device *ndev); void gem_ptp_remove(struct net_device *ndev);
int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des); void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc); void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc) static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
{ {
if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED) if (bp->tstamp_config.tx_type == TSTAMP_DISABLED)
return -ENOTSUPP; return;
return gem_ptp_txstamp(queue, skb, desc); gem_ptp_txstamp(bp, skb, desc);
} }
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
...@@ -1363,11 +1350,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd); ...@@ -1363,11 +1350,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
static inline void gem_ptp_init(struct net_device *ndev) { } static inline void gem_ptp_init(struct net_device *ndev) { }
static inline void gem_ptp_remove(struct net_device *ndev) { } static inline void gem_ptp_remove(struct net_device *ndev) { }
static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc) static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
{
return -1;
}
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { } static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
#endif #endif
......
...@@ -1246,13 +1246,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) ...@@ -1246,13 +1246,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
/* First, update TX stats if needed */ /* First, update TX stats if needed */
if (skb) { if (skb) {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!ptp_one_step_sync(skb) && !ptp_one_step_sync(skb))
gem_ptp_do_txstamp(queue, skb, desc) == 0) { gem_ptp_do_txstamp(bp, skb, desc);
/* skb now belongs to timestamp buffer
* and will be removed later
*/
tx_skb->skb = NULL;
}
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail), macb_tx_ring_wrap(bp, tail),
skb->data); skb->data);
...@@ -2315,6 +2311,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2315,6 +2311,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret; return ret;
} }
#ifdef CONFIG_MACB_USE_HWSTAMP
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
(bp->hw_dma_cap & HW_DMA_CAP_PTP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
#endif
is_lso = (skb_shinfo(skb)->gso_size != 0); is_lso = (skb_shinfo(skb)->gso_size != 0);
if (is_lso) { if (is_lso) {
......
...@@ -292,79 +292,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, ...@@ -292,79 +292,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
} }
} }
static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb, void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb,
struct macb_dma_desc_ptp *desc_ptp)
{
struct skb_shared_hwtstamps shhwtstamps;
struct timespec64 ts;
gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb, &shhwtstamps);
}
int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
struct macb_dma_desc *desc) struct macb_dma_desc *desc)
{ {
unsigned long tail = READ_ONCE(queue->tx_ts_tail); struct skb_shared_hwtstamps shhwtstamps;
unsigned long head = queue->tx_ts_head;
struct macb_dma_desc_ptp *desc_ptp; struct macb_dma_desc_ptp *desc_ptp;
struct gem_tx_ts *tx_timestamp; struct timespec64 ts;
if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
return -EINVAL;
if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) {
return -ENOMEM; dev_warn_ratelimited(&bp->pdev->dev,
"Timestamp not set in TX BD as expected\n");
return;
}
desc_ptp = macb_ptp_desc(queue->bp, desc); desc_ptp = macb_ptp_desc(bp, desc);
/* Unlikely but check */ /* Unlikely but check */
if (!desc_ptp) if (!desc_ptp) {
return -EINVAL; dev_warn_ratelimited(&bp->pdev->dev,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; "Timestamp not supported in BD\n");
tx_timestamp = &queue->tx_timestamps[head]; return;
tx_timestamp->skb = skb; }
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
dma_rmb(); dma_rmb();
tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
/* move head */
smp_store_release(&queue->tx_ts_head,
(head + 1) & (PTP_TS_BUFFER_SIZE - 1));
schedule_work(&queue->tx_ts_task);
return 0;
}
static void gem_tx_timestamp_flush(struct work_struct *work) memset(&shhwtstamps, 0, sizeof(shhwtstamps));
{ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
struct macb_queue *queue = skb_tstamp_tx(skb, &shhwtstamps);
container_of(work, struct macb_queue, tx_ts_task);
unsigned long head, tail;
struct gem_tx_ts *tx_ts;
/* take current head */
head = smp_load_acquire(&queue->tx_ts_head);
tail = queue->tx_ts_tail;
while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
tx_ts = &queue->tx_timestamps[tail];
gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
/* cleanup */
dev_kfree_skb_any(tx_ts->skb);
/* remove old tail */
smp_store_release(&queue->tx_ts_tail,
(tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
tail = queue->tx_ts_tail;
}
} }
void gem_ptp_init(struct net_device *dev) void gem_ptp_init(struct net_device *dev)
{ {
struct macb *bp = netdev_priv(dev); struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
unsigned int q;
bp->ptp_clock_info = gem_ptp_caps_template; bp->ptp_clock_info = gem_ptp_caps_template;
...@@ -384,11 +344,6 @@ void gem_ptp_init(struct net_device *dev) ...@@ -384,11 +344,6 @@ void gem_ptp_init(struct net_device *dev)
} }
spin_lock_init(&bp->tsu_clk_lock); spin_lock_init(&bp->tsu_clk_lock);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue->tx_ts_head = 0;
queue->tx_ts_tail = 0;
INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
}
gem_ptp_init_tsu(bp); gem_ptp_init_tsu(bp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment