Commit 2e7b4711 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-ptp' into main

Michael Chan says:

====================
bnxt_en: PTP updates for net-next

The first 5 patches implement the PTP feature on the new BCM5760X
chips.  The main new hardware feature is the new TX timestamp
completion which enables the driver to retrieve the TX timestamp
in NAPI without deferring to the PTP worker.

The last 5 patches increase the number of TX PTP packets in-flight
from 1 to 4 on the older BCM5750X chips.  On these older chips, we
need to call firmware in the PTP worker to retrieve the timestamp.
We use an arry to keep track of the in-flight TX PTP packets.

v2: Patch #2: Fix the unwind of txr->is_ts_pkt when bnxt_start_xmit() aborts.
    Patch #4: Set the SKBTX_IN_PROGRESS flag for timestamp packets.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 42c45ac1 06033839
......@@ -456,8 +456,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping;
unsigned int length, pad = 0;
u32 len, free_size, vlan_tag_flags, cfa_action, flags;
u16 prod, last_frag;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct pci_dev *pdev = bp->pdev;
u16 prod, last_frag, txts_prod;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
......@@ -509,27 +510,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
ptp->tx_tstamp_en) {
if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
} else if (!skb_is_gso(skb)) {
u16 seq_id, hdr_off;
if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb)) {
if (atomic_dec_if_positive(&ptp->tx_avail) < 0) {
atomic64_inc(&ptp->stats.ts_err);
goto tx_no_ts;
}
if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
&ptp->tx_hdr_off)) {
if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
!bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
if (vlan_tag_flags)
ptp->tx_hdr_off += VLAN_HLEN;
hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
} else {
atomic_inc(&bp->ptp_cfg->tx_avail);
ptp->txts_req[txts_prod].tx_seqid = seq_id;
ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
tx_buf->txts_prod = txts_prod;
}
}
}
tx_no_ts:
if (unlikely(skb->no_fcs))
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
......@@ -758,8 +761,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
tx_kick_pending:
if (BNXT_TX_PTP_IS_SET(lflags)) {
txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
atomic_inc(&bp->ptp_cfg->tx_avail);
if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
/* set SKB to err so PTP worker will clean up */
ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
}
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
......@@ -768,7 +774,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
/* Returns true if some remaining TX packets not processed. */
static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
int budget)
{
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
......@@ -777,24 +784,33 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
int tx_pkts = 0;
bool rc = false;
while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
bool is_ts_pkt;
int j, last;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
cons = NEXT_TX(cons);
skb = tx_buf->skb;
tx_buf->skb = NULL;
if (unlikely(!skb)) {
bnxt_sched_reset_txr(bp, txr, cons);
return;
return rc;
}
is_ts_pkt = tx_buf->is_ts_pkt;
if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
rc = true;
break;
}
cons = NEXT_TX(cons);
tx_pkts++;
tx_bytes += skb->len;
tx_buf->skb = NULL;
tx_buf->is_ts_pkt = 0;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
......@@ -814,15 +830,11 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
skb_frag_size(&skb_shinfo(skb)->frags[j]),
DMA_TO_DEVICE);
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb)) {
skb = NULL;
} else {
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
atomic_inc(&bp->ptp_cfg->tx_avail);
}
bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
skb = NULL;
}
}
......@@ -837,18 +849,22 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
return rc;
}
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_tx_ring_info *txr;
bool more = false;
int i;
bnxt_for_each_napi_tx(i, bnapi, txr) {
if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
__bnxt_tx_int(bp, txr, budget);
more |= __bnxt_tx_int(bp, txr, budget);
}
bnapi->events &= ~BNXT_TX_CMP_EVENT;
if (!more)
bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
......@@ -2914,6 +2930,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 1;
break;
}
} else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
......@@ -2945,8 +2963,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
if (event & BNXT_REDIRECT_EVENT)
if (event & BNXT_REDIRECT_EVENT) {
xdp_do_flush();
event &= ~BNXT_REDIRECT_EVENT;
}
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
......@@ -2956,6 +2976,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
wmb();
bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
event &= ~BNXT_TX_EVENT;
}
cpr->cp_raw_cons = raw_cons;
......@@ -2973,13 +2994,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bnapi->events &= ~BNXT_RX_EVENT;
}
if (bnapi->events & BNXT_AGG_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnapi->events &= ~BNXT_AGG_EVENT;
}
bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
......@@ -6788,6 +6810,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
struct bnxt_tx_ring_info *txr;
u16 flags = 0;
txr = container_of(ring, struct bnxt_tx_ring_info,
tx_ring_struct);
......@@ -6801,6 +6824,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
req->cmpl_coal_cnt =
RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
req->flags = cpu_to_le16(flags);
break;
}
case HWRM_RING_ALLOC_RX:
......@@ -8981,7 +9007,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
rc = -ENODEV;
goto no_ptp;
}
......@@ -8997,7 +9023,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
goto exit;
flags = resp->flags;
if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
if (BNXT_CHIP_P5_AND_MINUS(bp) &&
!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
rc = -ENODEV;
goto exit;
}
......@@ -9010,10 +9037,13 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
ptp->bp = bp;
bp->ptp_cfg = ptp;
}
if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
if (flags &
(PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
} else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
} else if (BNXT_CHIP_P5(bp)) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
......@@ -9095,6 +9125,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
......@@ -12136,8 +12168,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
if (bp->ptp_cfg)
atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
......
......@@ -181,6 +181,32 @@ struct tx_cmp {
#define TX_CMP_SQ_CONS_IDX(txcmp) \
(le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
struct tx_ts_cmp {
__le32 tx_ts_cmp_flags_type;
#define TX_TS_CMP_FLAGS_ERROR (1 << 6)
#define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7)
#define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7)
#define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7)
#define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8)
#define TX_TS_CMP_TS_SUB_NS (0xf << 12)
#define TX_TS_CMP_TS_NS_MID (0xffff << 16)
#define TX_TS_CMP_TS_NS_MID_SFT 16
u32 tx_ts_cmp_opaque;
__le32 tx_ts_cmp_errors_v;
#define TX_TS_CMP_V (1 << 0)
#define TX_TS_CMP_TS_INVALID_ERR (1 << 10)
__le32 tx_ts_cmp_ts_ns_lo;
};
#define BNXT_GET_TX_TS_48B_NS(tscmp) \
(le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \
((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \
TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT))
#define BNXT_TX_TS_ERR(tscmp) \
(((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\
((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR)))
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
......@@ -848,11 +874,14 @@ struct bnxt_sw_tx_bd {
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
struct page *page;
u8 is_gso;
u8 is_ts_pkt;
u8 is_push;
u8 action;
unsigned short nr_frags;
u16 rx_prod;
union {
u16 rx_prod;
u16 txts_prod;
};
};
struct bnxt_sw_rx_bd {
......@@ -2237,9 +2266,17 @@ struct bnxt {
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
/* Chip class phase 3.x */
#define BNXT_CHIP_P3(bp) \
(BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \
BNXT_CHIP_TYPE_NITRO_A0(bp))
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
#define BNXT_CHIP_P5_AND_MINUS(bp) \
(BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
......@@ -2384,6 +2421,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
#define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
......
......@@ -110,7 +110,7 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
}
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
u32 txts_tmo)
u32 txts_tmo, int slot)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
......@@ -123,11 +123,12 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot];
u32 tmo_us = txts_tmo * 1000;
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid);
req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off);
if (!tmo_us)
tmo_us = BNXT_PTP_QTS_TIMEOUT;
tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
......@@ -656,6 +657,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
(ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
return 0;
}
if (bp->flags & BNXT_FLAG_CHIP_P7) {
for (i = 0; i < 2; i++) {
if (reg_arr[i] & BNXT_GRC_BASE_MASK)
return -EINVAL;
ptp->refclk_mapped_regs[i] = reg_arr[i];
}
return 0;
}
return -ENODEV;
}
......@@ -674,43 +683,44 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc)
return ns;
}
static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
struct bnxt_ptp_tx_req *txts_req;
unsigned long now = jiffies;
u64 ts = 0, ns = 0;
u32 tmo = 0;
int rc;
if (!ptp->txts_pending)
ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo);
if (!time_after_eq(now, ptp->abs_txts_tmo))
tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now);
txts_req = &ptp->txts_req[slot];
/* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */
smp_rmb();
if (!time_after_eq(now, txts_req->abs_txts_tmo))
tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now);
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
tmo);
tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
spin_lock_bh(&ptp->ptp_lock);
ns = timecounter_cyc2time(&ptp->tc, ts);
spin_unlock_bh(&ptp->ptp_lock);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
skb_tstamp_tx(txts_req->tx_skb, &timestamp);
ptp->stats.ts_pkts++;
} else {
if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) {
ptp->txts_pending = true;
return;
}
if (!time_after_eq(jiffies, txts_req->abs_txts_tmo))
return -EAGAIN;
ptp->stats.ts_lost++;
netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
atomic_inc(&ptp->tx_avail);
ptp->txts_pending = false;
dev_kfree_skb_any(txts_req->tx_skb);
txts_req->tx_skb = NULL;
return 0;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
......@@ -719,12 +729,30 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
ptp_info);
unsigned long now = jiffies;
struct bnxt *bp = ptp->bp;
u16 cons = ptp->txts_cons;
u32 num_requests;
int rc = 0;
num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail);
while (num_requests--) {
if (IS_ERR(ptp->txts_req[cons].tx_skb))
goto next_slot;
if (!ptp->txts_req[cons].tx_skb)
break;
rc = bnxt_stamp_tx_skb(bp, cons);
if (rc == -EAGAIN)
break;
next_slot:
BNXT_PTP_INC_TX_AVAIL(ptp);
cons = NEXT_TXTS(cons);
}
ptp->txts_cons = cons;
if (ptp->tx_skb)
bnxt_stamp_tx_skb(bp, ptp->tx_skb);
if (!time_after_eq(now, ptp->next_period))
if (!time_after_eq(now, ptp->next_period)) {
if (rc == -EAGAIN)
return 0;
return ptp->next_period - now;
}
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
......@@ -734,22 +762,37 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
spin_unlock_bh(&ptp->ptp_lock);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
if (ptp->txts_pending)
if (rc == -EAGAIN)
return 0;
return HZ;
}
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
{
spin_lock_bh(&ptp->ptp_tx_lock);
if (ptp->tx_avail) {
*prod = ptp->txts_prod;
ptp->txts_prod = NEXT_TXTS(*prod);
ptp->tx_avail--;
spin_unlock_bh(&ptp->ptp_tx_lock);
return 0;
}
spin_unlock_bh(&ptp->ptp_tx_lock);
atomic64_inc(&ptp->stats.ts_err);
return -ENOSPC;
}
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct bnxt_ptp_tx_req *txts_req;
if (ptp->tx_skb) {
netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
return -EBUSY;
}
ptp->tx_skb = skb;
txts_req = &ptp->txts_req[prod];
txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo);
/* make sure abs_txts_tmo is written first */
smp_wmb();
txts_req->tx_skb = skb;
ptp_schedule_worker(ptp->ptp_clock, 0);
return 0;
}
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
......@@ -768,6 +811,38 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
return 0;
}
void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
struct tx_ts_cmp *tscmp)
{
struct skb_shared_hwtstamps timestamp = {};
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 opaque = tscmp->tx_ts_cmp_opaque;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
u64 ts, ns;
u16 cons;
txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
ts = BNXT_GET_TX_TS_48B_NS(tscmp);
cons = TX_OPAQUE_IDX(opaque);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
if (tx_buf->is_ts_pkt) {
if (BNXT_TX_TS_ERR(tscmp)) {
netdev_err(bp->dev,
"timestamp completion error 0x%x 0x%x\n",
le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
} else {
spin_lock_bh(&ptp->ptp_lock);
ns = timecounter_cyc2time(&ptp->tc, ts);
spin_unlock_bh(&ptp->ptp_lock);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(tx_buf->skb, &timestamp);
}
tx_buf->is_ts_pkt = 0;
}
}
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
......@@ -914,7 +989,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
return rc;
} else {
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
&ns, 0);
&ns, 0, 0);
if (rc)
return rc;
}
......@@ -954,8 +1029,9 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_free(bp);
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
spin_lock_init(&ptp->ptp_tx_lock);
if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
......@@ -986,7 +1062,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
ptp->stats.ts_lost = 0;
atomic64_set(&ptp->stats.ts_err, 0);
if (BNXT_CHIP_P5(bp)) {
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
spin_lock_bh(&ptp->ptp_lock);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
......@@ -1005,6 +1081,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int i;
if (!ptp)
return;
......@@ -1016,9 +1093,11 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
if (ptp->tx_skb) {
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
for (i = 0; i < BNXT_MAX_TX_TS; i++) {
if (ptp->txts_req[i].tx_skb) {
dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
ptp->txts_req[i].tx_skb = NULL;
}
}
bnxt_unmap_ptp_regs(bp);
......
......@@ -85,6 +85,16 @@ struct bnxt_ptp_stats {
atomic64_t ts_err;
};
#define BNXT_MAX_TX_TS 4
#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1))
struct bnxt_ptp_tx_req {
struct sk_buff *tx_skb;
u16 tx_seqid;
u16 tx_hdr_off;
unsigned long abs_txts_tmo;
};
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
......@@ -93,7 +103,8 @@ struct bnxt_ptp_cfg {
struct bnxt_pps pps_info;
/* serialize timecounter access */
spinlock_t ptp_lock;
struct sk_buff *tx_skb;
/* serialize ts tx request queuing */
spinlock_t ptp_tx_lock;
u64 current_time;
u64 old_time;
unsigned long next_period;
......@@ -102,11 +113,10 @@ struct bnxt_ptp_cfg {
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
u16 tx_seqid;
u16 tx_hdr_off;
struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS];
struct bnxt *bp;
atomic_t tx_avail;
#define BNXT_MAX_TX_TS 1
u32 tx_avail;
u16 rxctl;
#define BNXT_PTP_MSG_SYNC (1 << 0)
#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
......@@ -123,14 +133,14 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
u8 txts_pending:1;
int rx_filter;
u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
u32 txts_tmo;
unsigned long abs_txts_tmo;
u16 txts_prod;
u16 txts_cons;
struct bnxt_ptp_stats stats;
};
......@@ -147,6 +157,13 @@ do { \
((dst) = READ_ONCE(src))
#endif
#define BNXT_PTP_INC_TX_AVAIL(ptp) \
do { \
spin_lock_bh(&(ptp)->ptp_tx_lock); \
(ptp)->tx_avail++; \
spin_unlock_bh(&(ptp)->ptp_tx_lock); \
} while (0)
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
......@@ -154,8 +171,11 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
struct tx_ts_cmp *tscmp);
void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment