Commit f244e256 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-ptp'

Guangbin Huang says:

====================
net: hns3: add support for PTP

This series adds PTP support for the HNS3 ethernet driver.

change log:
V1 -> V2:
1. use spinlock to prevent concurrency
2. add the handling when ptp_clock_register() returns NULL
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 76cf404c b34c157f
......@@ -102,6 +102,7 @@ config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
default m
depends on PCI_MSI
imply PTP_1588_CLOCK
help
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
......
......@@ -276,6 +276,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_MAC_MC,
HNAE3_DBG_CMD_MNG_TBL,
HNAE3_DBG_CMD_LOOPBACK,
HNAE3_DBG_CMD_PTP_INFO,
HNAE3_DBG_CMD_INTERRUPT_INFO,
HNAE3_DBG_CMD_RESET_INFO,
HNAE3_DBG_CMD_IMP_INFO,
......@@ -525,6 +526,12 @@ struct hnae3_ae_dev {
* Check if any cls flower rule exist
* dbg_read_cmd
* Execute debugfs read command.
* set_tx_hwts_info
* Save information for 1588 tx packet
* get_rx_hwts
* Get 1588 rx hwstamp
* get_ts_info
* Get phc info
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
......@@ -710,6 +717,12 @@ struct hnae3_ae_ops {
struct ethtool_link_ksettings *cmd);
int (*set_phy_link_ksettings)(struct hnae3_handle *handle,
const struct ethtool_link_ksettings *cmd);
bool (*set_tx_hwts_info)(struct hnae3_handle *handle,
struct sk_buff *skb);
void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
int (*get_ts_info)(struct hnae3_handle *handle,
struct ethtool_ts_info *info);
};
struct hnae3_dcb_ops {
......
......@@ -316,6 +316,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
{
.name = "ptp_info",
.cmd = HNAE3_DBG_CMD_PTP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
......@@ -1059,8 +1066,10 @@ int hns3_dbg_init(struct hnae3_handle *handle)
handle->hnae3_dbgfs);
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
(hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_PTP_INFO &&
!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)))
continue;
if (!hns3_dbg_cmd[i].init) {
......
......@@ -1799,6 +1799,18 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
struct hns3_desc *desc)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!(h->ae_algo->ops->set_tx_hwts_info &&
h->ae_algo->ops->set_tx_hwts_info(h, skb)))
return;
desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
}
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
......@@ -1851,10 +1863,16 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
cpu_to_le16(BIT(HNS3_TXD_FE_B));
trace_hns3_tx_desc(ring, pre_ntu);
skb_tx_timestamp(skb);
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
......@@ -3585,6 +3603,15 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ol_info = le32_to_cpu(desc->rx.ol_info);
csum = le16_to_cpu(desc->csum);
if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
struct hnae3_handle *h = hns3_get_handle(netdev);
u32 nsec = le32_to_cpu(desc->ts_nsec);
u32 sec = le32_to_cpu(desc->ts_sec);
if (h->ae_algo->ops->get_rx_hwts)
h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
}
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
......
......@@ -122,8 +122,9 @@ enum hns3_nic_state {
#define HNS3_RXD_LUM_B 9
#define HNS3_RXD_CRCP_B 10
#define HNS3_RXD_L3L4P_B 11
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_TSIDX_S 12
#define HNS3_RXD_TSIDX_M (0x3 << HNS3_RXD_TSIDX_S)
#define HNS3_RXD_TS_VLD_B 14
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
......@@ -240,6 +241,10 @@ struct __packed hns3_desc {
union {
__le64 addr;
__le16 csum;
struct {
__le32 ts_nsec;
__le32 ts_sec;
};
};
union {
struct {
......
......@@ -1598,6 +1598,17 @@ static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags)
ETHTOOL_COALESCE_TX_USECS_HIGH | \
ETHTOOL_COALESCE_MAX_FRAMES)
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
if (handle->ae_algo->ops->get_ts_info)
return handle->ae_algo->ops->get_ts_info(handle, info);
return ethtool_op_get_ts_info(netdev, info);
}
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
......@@ -1662,6 +1673,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
.set_priv_flags = hns3_set_priv_flags,
.get_ts_info = hns3_get_ts_info,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
......
......@@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
......@@ -130,6 +130,10 @@ enum hclge_opcode_type {
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
/* PTP commands */
HCLGE_OPC_PTP_INT_EN = 0x0501,
HCLGE_OPC_PTP_MODE_CFG = 0x0507,
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
......
......@@ -2173,6 +2173,57 @@ static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
}
static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_ptp *ptp = hdev->ptp;
u32 sw_cfg = ptp->ptp_cfg;
unsigned int tx_start;
unsigned int last_rx;
int pos = 0;
u32 hw_cfg;
int ret;
pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
ptp->info.name);
pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
"yes" : "no");
pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
"yes" : "no");
pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
"yes" : "no");
last_rx = jiffies_to_msecs(ptp->last_rx);
pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
tx_start = jiffies_to_msecs(ptp->tx_start);
pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
ptp->tx_skipped);
pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
ptp->tx_timeout);
pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
ptp->last_tx_seqid);
ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
if (ret)
return ret;
pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
sw_cfg, hw_cfg);
pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
return 0;
}
static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
{
hclge_dbg_dump_mac_list(hdev, buf, len, true);
......@@ -2244,6 +2295,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_LOOPBACK,
.dbg_dump = hclge_dbg_dump_loopback,
},
{
.cmd = HNAE3_DBG_CMD_PTP_INFO,
.dbg_dump = hclge_dbg_dump_ptp_info,
},
{
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
.dbg_dump = hclge_dbg_dump_interrupt,
......
......@@ -3346,6 +3346,12 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
return HCLGE_VECTOR0_EVENT_ERR;
/* check for vector0 ptp event source */
if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
*clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_PTP;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
......@@ -3365,6 +3371,7 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
switch (event_type) {
case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
......@@ -3393,6 +3400,7 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
unsigned long flags;
u32 clearval = 0;
u32 event_cause;
......@@ -3407,6 +3415,11 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
case HCLGE_VECTOR0_EVENT_PTP:
spin_lock_irqsave(&hdev->ptp->lock, flags);
hclge_ptp_clean_tx_hwts(hdev);
spin_unlock_irqrestore(&hdev->ptp->lock, flags);
break;
case HCLGE_VECTOR0_EVENT_MBX:
/* If we are here then,
* 1. Either we are not handling any mbx task and we are not
......@@ -3428,7 +3441,8 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
hclge_clear_event_cause(hdev, event_cause, clearval);
/* Enable interrupt if it is not caused by reset event or error event */
if (event_cause == HCLGE_VECTOR0_EVENT_MBX ||
if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
event_cause == HCLGE_VECTOR0_EVENT_MBX ||
event_cause == HCLGE_VECTOR0_EVENT_OTHER)
hclge_enable_vector(&hdev->misc_vector, true);
......@@ -4375,6 +4389,27 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
hclge_task_schedule(hdev, delta);
}
static void hclge_ptp_service_task(struct hclge_dev *hdev)
{
unsigned long flags;
if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
!test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
!time_is_before_jiffies(hdev->ptp->tx_start + HZ))
return;
/* to prevent concurrence with the irq handler */
spin_lock_irqsave(&hdev->ptp->lock, flags);
/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
* handler may handle it just before spin_lock_irqsave().
*/
if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
hclge_ptp_clean_tx_hwts(hdev);
spin_unlock_irqrestore(&hdev->ptp->lock, flags);
}
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
......@@ -4382,6 +4417,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_errhand_service_task(hdev);
hclge_reset_service_task(hdev);
hclge_ptp_service_task(hdev);
hclge_mailbox_service_task(hdev);
hclge_periodic_service_task(hdev);
......@@ -9413,8 +9449,15 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
switch (cmd) {
case SIOCGHWTSTAMP:
return hclge_ptp_get_cfg(hdev, ifr);
case SIOCSHWTSTAMP:
return hclge_ptp_set_cfg(hdev, ifr);
default:
if (!hdev->hw.mac.phydev)
return hclge_mii_ioctl(hdev, ifr, cmd);
}
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
......@@ -11530,6 +11573,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
ret = hclge_ptp_init(hdev);
if (ret)
goto err_mdiobus_unreg;
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
......@@ -11901,6 +11948,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
ret = hclge_ptp_init(hdev);
if (ret)
return ret;
/* Log and clear the hw errors those already occurred */
if (hnae3_dev_ras_imp_supported(hdev))
hclge_handle_occurred_error(hdev);
......@@ -11954,6 +12005,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
hclge_ptp_uninit(hdev);
hclge_uninit_rxd_adv_layout(hdev);
hclge_uninit_mac_table(hdev);
hclge_del_all_fd_entries(hdev);
......@@ -12850,6 +12902,9 @@ static const struct hnae3_ae_ops hclge_ops = {
.cls_flower_active = hclge_is_cls_flower_active,
.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
.set_tx_hwts_info = hclge_ptp_set_tx_info,
.get_rx_hwts = hclge_ptp_get_rx_hwts,
.get_ts_info = hclge_ptp_get_ts_info,
};
static struct hnae3_ae_algo ae_algo = {
......
......@@ -10,6 +10,7 @@
#include <linux/kfifo.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
#include "hnae3.h"
#define HCLGE_MOD_VERSION "1.0"
......@@ -178,6 +179,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_FUN_RST_ING_B 0
/* Vector0 register bits define */
#define HCLGE_VECTOR0_REG_PTP_INT_B 0
#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
......@@ -230,6 +232,8 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_TBL_CHANGED,
HCLGE_STATE_FD_CLEAR_ALL,
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
HCLGE_STATE_MAX
};
......@@ -237,6 +241,7 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_RST,
HCLGE_VECTOR0_EVENT_MBX,
HCLGE_VECTOR0_EVENT_ERR,
HCLGE_VECTOR0_EVENT_PTP,
HCLGE_VECTOR0_EVENT_OTHER,
};
......@@ -935,6 +940,7 @@ struct hclge_dev {
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
};
/* VPort level vlan tag configuration for TX direction */
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2021 Hisilicon Limited.
#ifndef __HCLGE_PTP_H
#define __HCLGE_PTP_H
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/types.h>
#define HCLGE_PTP_REG_OFFSET 0x29000
#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
#define HCLGE_PTP_TX_TS_NSEC_REG 0x4
#define HCLGE_PTP_TX_TS_NSEC_MASK GENMASK(29, 0)
#define HCLGE_PTP_TX_TS_SEC_L_REG 0x8
#define HCLGE_PTP_TX_TS_SEC_H_REG 0xC
#define HCLGE_PTP_TX_TS_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TX_TS_CNT_REG 0x30
#define HCLGE_PTP_TIME_SEC_H_REG 0x50
#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TIME_SEC_L_REG 0x54
#define HCLGE_PTP_TIME_NSEC_REG 0x58
#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
#define HCLGE_PTP_TIME_SYNC_REG 0x5C
#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
#define HCLGE_PTP_TIME_ADJ_REG 0x60
#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
#define HCLGE_PTP_CYCLE_QUO_REG 0x64
#define HCLGE_PTP_CYCLE_DEN_REG 0x68
#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
#define HCLGE_PTP_CYCLE_CFG_REG 0x70
#define HCLGE_PTP_CYCLE_ADJ_EN BIT(0)
#define HCLGE_PTP_CUR_TIME_SEC_H_REG 0x74
#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
#define HCLGE_PTP_CYCLE_ADJ_BASE 2
#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000
#define HCLGE_PTP_SEC_H_OFFSET 32u
#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
#define HCLGE_PTP_FLAG_EN BIT(0)
#define HCLGE_PTP_FLAG_TX_EN BIT(1)
#define HCLGE_PTP_FLAG_RX_EN BIT(2)
struct hclge_ptp {
struct hclge_dev *hdev;
struct ptp_clock *clock;
struct sk_buff *tx_skb;
unsigned long flags;
void __iomem *io_base;
struct ptp_clock_info info;
struct hwtstamp_config ts_cfg;
spinlock_t lock; /* protects ptp registers */
u32 ptp_cfg;
u32 last_tx_seqid;
unsigned long tx_start;
unsigned long tx_cnt;
unsigned long tx_skipped;
unsigned long tx_cleaned;
unsigned long last_rx;
unsigned long rx_cnt;
unsigned long tx_timeout;
};
struct hclge_ptp_int_cmd {
#define HCLGE_PTP_INT_EN_B BIT(0)
u8 int_en;
u8 rsvd[23];
};
enum hclge_ptp_udp_type {
HCLGE_PTP_UDP_NOT_TYPE,
HCLGE_PTP_UDP_P13F_TYPE,
HCLGE_PTP_UDP_P140_TYPE,
HCLGE_PTP_UDP_FULL_TYPE,
};
enum hclge_ptp_msg_type {
HCLGE_PTP_MSG_TYPE_V2_L2,
HCLGE_PTP_MSG_TYPE_V2,
HCLGE_PTP_MSG_TYPE_V2_EVENT,
};
enum hclge_ptp_msg0_type {
HCLGE_PTP_MSG0_V2_DELAY_REQ = 1,
HCLGE_PTP_MSG0_V2_PDELAY_REQ,
HCLGE_PTP_MSG0_V2_DELAY_RESP,
HCLGE_PTP_MSG0_V2_EVENT = 0xF,
};
#define HCLGE_PTP_MSG1_V2_DEFAULT 1
struct hclge_ptp_cfg_cmd {
#define HCLGE_PTP_EN_B BIT(0)
#define HCLGE_PTP_TX_EN_B BIT(1)
#define HCLGE_PTP_RX_EN_B BIT(2)
#define HCLGE_PTP_UDP_EN_SHIFT 3
#define HCLGE_PTP_UDP_EN_MASK GENMASK(4, 3)
#define HCLGE_PTP_MSG_TYPE_SHIFT 8
#define HCLGE_PTP_MSG_TYPE_MASK GENMASK(9, 8)
#define HCLGE_PTP_MSG1_SHIFT 16
#define HCLGE_PTP_MSG1_MASK GENMASK(19, 16)
#define HCLGE_PTP_MSG0_SHIFT 24
#define HCLGE_PTP_MSG0_MASK GENMASK(27, 24)
__le32 cfg;
u8 rsvd[20];
};
static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info)
{
struct hclge_ptp *ptp = container_of(info, struct hclge_ptp, info);
return ptp->hdev;
}
bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
void hclge_ptp_clean_tx_hwts(struct hclge_dev *dev);
void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
int hclge_ptp_init(struct hclge_dev *hdev);
void hclge_ptp_uninit(struct hclge_dev *hdev);
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
struct ethtool_ts_info *info);
int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment