Commit c8f8e47e authored by Grygorii Strashko's avatar Grygorii Strashko Committed by David S. Miller

net: ethernet: ti: cpts: move tx timestamp processing to ptp worker only

Now the tx timestamp processing happens from different contexts - softirq
and thread/PTP worker. Enabling IRQ will add one more hard_irq context.
This makes over all defered TX timestamp processing and locking
overcomplicated. Move tx timestamp processing to PTP worker always instead.

napi_rx->cpts_tx_timestamp
 if ptp_packet then
    push to txq
    ptp_schedule_worker()

do_aux_work->cpts_overflow_check
 cpts_process_events()
Signed-off-by: default avatarGrygorii Strashko <grygorii.strashko@ti.com>
Acked-by: default avatarRichard Cochran <richardcochran@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3bfd41b5
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include "cpts.h" #include "cpts.h"
#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
#define CPTS_SKB_RX_TX_TMO 100 /*ms */
#define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
struct cpts_skb_cb_data { struct cpts_skb_cb_data {
u32 skb_mtype_seqid; u32 skb_mtype_seqid;
...@@ -92,46 +94,6 @@ static void cpts_purge_txq(struct cpts *cpts) ...@@ -92,46 +94,6 @@ static void cpts_purge_txq(struct cpts *cpts)
dev_dbg(cpts->dev, "txq cleaned up %d\n", removed); dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
} }
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff *skb, *tmp;
bool found = false;
u32 mtype_seqid;
mtype_seqid = event->high &
((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
(SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
(EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
/* no need to grab txq.lock as access is always done under cpts->lock */
skb_queue_walk_safe(&cpts->txq, skb, tmp) {
struct skb_shared_hwtstamps ssh;
struct cpts_skb_cb_data *skb_cb =
(struct cpts_skb_cb_data *)skb->cb;
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
memset(&ssh, 0, sizeof(ssh));
ssh.hwtstamp = ns_to_ktime(event->timestamp);
skb_tstamp_tx(skb, &ssh);
found = true;
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
mtype_seqid);
break;
}
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
}
}
return found;
}
/* /*
* Returns zero if matching event type was found. * Returns zero if matching event type was found.
*/ */
...@@ -151,7 +113,6 @@ static int cpts_fifo_read(struct cpts *cpts, int match) ...@@ -151,7 +113,6 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
} }
event = list_first_entry(&cpts->pool, struct cpts_event, list); event = list_first_entry(&cpts->pool, struct cpts_event, list);
event->tmo = jiffies + 2;
event->high = hi; event->high = hi;
event->low = lo; event->low = lo;
event->timestamp = timecounter_cyc2time(&cpts->tc, event->low); event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
...@@ -169,14 +130,10 @@ static int cpts_fifo_read(struct cpts *cpts, int match) ...@@ -169,14 +130,10 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
} }
break; break;
case CPTS_EV_TX: case CPTS_EV_TX:
if (cpts_match_tx_ts(cpts, event)) {
/* if the new event matches an existing skb,
* then don't queue it
*/
break;
}
/* fall through */
case CPTS_EV_RX: case CPTS_EV_RX:
event->tmo = jiffies +
msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
list_del_init(&event->list); list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events); list_add_tail(&event->list, &cpts->events);
break; break;
...@@ -297,6 +254,84 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp, ...@@ -297,6 +254,84 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff_head txq_list;
struct sk_buff *skb, *tmp;
unsigned long flags;
bool found = false;
u32 mtype_seqid;
mtype_seqid = event->high &
((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
(SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
(EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
__skb_queue_head_init(&txq_list);
spin_lock_irqsave(&cpts->txq.lock, flags);
skb_queue_splice_init(&cpts->txq, &txq_list);
spin_unlock_irqrestore(&cpts->txq.lock, flags);
skb_queue_walk_safe(&txq_list, skb, tmp) {
struct skb_shared_hwtstamps ssh;
struct cpts_skb_cb_data *skb_cb =
(struct cpts_skb_cb_data *)skb->cb;
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
memset(&ssh, 0, sizeof(ssh));
ssh.hwtstamp = ns_to_ktime(event->timestamp);
skb_tstamp_tx(skb, &ssh);
found = true;
__skb_unlink(skb, &txq_list);
dev_consume_skb_any(skb);
dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
mtype_seqid);
break;
}
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
__skb_unlink(skb, &txq_list);
dev_consume_skb_any(skb);
}
}
spin_lock_irqsave(&cpts->txq.lock, flags);
skb_queue_splice(&txq_list, &cpts->txq);
spin_unlock_irqrestore(&cpts->txq.lock, flags);
return found;
}
static void cpts_process_events(struct cpts *cpts)
{
struct list_head *this, *next;
struct cpts_event *event;
LIST_HEAD(events_free);
unsigned long flags;
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
list_splice_init(&cpts->events, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
event = list_entry(this, struct cpts_event, list);
if (cpts_match_tx_ts(cpts, event) ||
time_after(jiffies, event->tmo)) {
list_del_init(&event->list);
list_add(&event->list, &events_free);
}
}
spin_lock_irqsave(&cpts->lock, flags);
list_splice_tail(&events, &cpts->events);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
static long cpts_overflow_check(struct ptp_clock_info *ptp) static long cpts_overflow_check(struct ptp_clock_info *ptp)
{ {
struct cpts *cpts = container_of(ptp, struct cpts, info); struct cpts *cpts = container_of(ptp, struct cpts, info);
...@@ -305,17 +340,20 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp) ...@@ -305,17 +340,20 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
u64 ns; u64 ns;
spin_lock_irqsave(&cpts->lock, flags); spin_lock_irqsave(&cpts->lock, flags);
cpts_update_cur_time(cpts, -1, NULL); cpts_update_cur_time(cpts, -1, NULL);
spin_unlock_irqrestore(&cpts->lock, flags);
ns = timecounter_read(&cpts->tc); ns = timecounter_read(&cpts->tc);
cpts_process_events(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq)) { if (!skb_queue_empty(&cpts->txq)) {
cpts_purge_txq(cpts); cpts_purge_txq(cpts);
if (!skb_queue_empty(&cpts->txq)) if (!skb_queue_empty(&cpts->txq))
delay = CPTS_SKB_TX_WORK_TIMEOUT; delay = CPTS_SKB_TX_WORK_TIMEOUT;
} }
spin_unlock_irqrestore(&cpts->lock, flags); spin_unlock_irqrestore(&cpts->txq.lock, flags);
dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns); dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
return (long)delay; return (long)delay;
...@@ -409,19 +447,6 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, ...@@ -409,19 +447,6 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
break; break;
} }
} }
if (ev_type == CPTS_EV_TX && !ns) {
struct cpts_skb_cb_data *skb_cb =
(struct cpts_skb_cb_data *)skb->cb;
/* Not found, add frame to queue for processing later.
* The periodic FIFO check will handle this.
*/
skb_get(skb);
/* get the timestamp for timeouts */
skb_cb->tmo = jiffies + msecs_to_jiffies(100);
__skb_queue_tail(&cpts->txq, skb);
ptp_schedule_worker(cpts->clock, 0);
}
spin_unlock_irqrestore(&cpts->lock, flags); spin_unlock_irqrestore(&cpts->lock, flags);
return ns; return ns;
...@@ -455,9 +480,7 @@ EXPORT_SYMBOL_GPL(cpts_rx_timestamp); ...@@ -455,9 +480,7 @@ EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{ {
struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb; struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
struct skb_shared_hwtstamps ssh;
int ret; int ret;
u64 ns;
if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
return; return;
...@@ -471,12 +494,12 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) ...@@ -471,12 +494,12 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
dev_dbg(cpts->dev, "%s mtype seqid %08x\n", dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
__func__, skb_cb->skb_mtype_seqid); __func__, skb_cb->skb_mtype_seqid);
ns = cpts_find_ts(cpts, skb, CPTS_EV_TX, skb_cb->skb_mtype_seqid); /* Always defer TX TS processing to PTP worker */
if (!ns) skb_get(skb);
return; /* get the timestamp for timeouts */
memset(&ssh, 0, sizeof(ssh)); skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
ssh.hwtstamp = ns_to_ktime(ns); skb_queue_tail(&cpts->txq, skb);
skb_tstamp_tx(skb, &ssh); ptp_schedule_worker(cpts->clock, 0);
} }
EXPORT_SYMBOL_GPL(cpts_tx_timestamp); EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment