Commit 7c39afb3 authored by Feras Daoud's avatar Feras Daoud Committed by Saeed Mahameed

net/mlx5: PTP code migration to driver core section

PTP code is moved to core section of mlx5 driver in order to share
it between ethernet and infiniband. This movement involves the following
changes:
- Change mlx5e_ prefix to be mlx5_
- Add clock structs to Core
- Add clock object to mlx5_core_dev
- Call Init/Uninit clock from core init/cleanup
- Rename mlx5e_tstamp to be mlx5_clock
Signed-off-by: default avatarFeras Daoud <ferasda@mellanox.com>
Signed-off-by: default avatarEitan Rabin <rabin@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent ae904bea
......@@ -267,28 +267,6 @@ struct mlx5e_dcbx {
};
#endif
#define MAX_PIN_NUM 8
struct mlx5e_pps {
u8 pin_caps[MAX_PIN_NUM];
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
};
struct mlx5e_tstamp {
rwlock_t lock;
struct cyclecounter cycles;
struct timecounter clock;
struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5e_pps pps_info;
};
enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_AM,
......@@ -375,9 +353,10 @@ struct mlx5e_txqsq {
u8 min_inline_mode;
u16 edge;
struct device *pdev;
struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
......@@ -543,10 +522,11 @@ struct mlx5e_rq {
struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
......@@ -588,7 +568,7 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct mlx5e_tstamp *tstamp;
struct hwtstamp_config *tstamp;
int ix;
};
......@@ -789,7 +769,7 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_tstamp tstamp;
struct hwtstamp_config tstamp;
u16 q_counter;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
......@@ -873,12 +853,6 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
struct skb_shared_hwtstamps *hwts);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
......@@ -889,6 +863,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_timestamp_set(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
......
......@@ -1417,14 +1417,15 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
int ret;
ret = ethtool_op_get_ts_info(priv->netdev, info);
if (ret)
return ret;
info->phc_index = priv->tstamp.ptp ?
ptp_clock_index(priv->tstamp.ptp) : -1;
info->phc_index = mdev->clock.ptp ?
ptp_clock_index(mdev->clock.ptp) : -1;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return 0;
......@@ -1754,7 +1755,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
return -EINVAL;
}
......
......@@ -373,8 +373,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = NULL;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
......@@ -384,14 +382,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
case MLX5_DEV_EVENT_PPS:
eqe = (struct mlx5_eqe *)param;
ptp_event.index = eqe->data.pps.pin;
ptp_event.timestamp =
timecounter_cyc2time(&priv->tstamp.clock,
be64_to_cpu(eqe->data.pps.time_stamp));
mlx5e_pps_event_handler(vpriv, &ptp_event);
break;
default:
break;
}
......@@ -585,6 +575,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = c->tstamp;
rq->clock = &mdev->clock;
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
......@@ -1123,6 +1114,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->txq_ix = txq_ix;
......@@ -2678,6 +2670,12 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
void mlx5e_timestamp_set(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
......@@ -2693,7 +2691,7 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
mlx5e_timestamp_init(priv);
mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
......@@ -2731,7 +2729,6 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
......@@ -3403,6 +3400,80 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct hwtstamp_config config;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* TX HW timestamp */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
mutex_lock(&priv->state_lock);
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling cqe compression");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
mutex_unlock(&priv->state_lock);
return err;
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
mutex_unlock(&priv->state_lock);
return -ERANGE;
}
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct hwtstamp_config *cfg = &priv->tstamp;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
}
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
......
......@@ -42,10 +42,11 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
#include "lib/clock.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
......@@ -661,7 +662,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
......@@ -676,8 +676,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
rq->stats.lro_bytes += cqe_bcnt;
}
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
......@@ -1163,7 +1164,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
struct mlx5e_tstamp *tstamp = rq->tstamp;
char *pseudo_header;
u8 *dgid;
u8 g;
......@@ -1188,8 +1188,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
......
......@@ -35,6 +35,7 @@
#include "en.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
#include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
......@@ -452,8 +453,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
SKBTX_HW_TSTAMP)) {
struct skb_shared_hwtstamps hwts = {};
mlx5e_fill_hwstamp(sq->tstamp,
get_cqe_ts(cqe), &hwts);
hwts.hwtstamp =
mlx5_timecounter_cyc2time(sq->clock,
get_cqe_ts(cqe));
skb_tstamp_tx(skb, &hwts);
}
......
......@@ -491,8 +491,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_PPS_EVENT:
if (dev->event)
dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
mlx5_pps_event(dev, eqe);
break;
case MLX5_EVENT_TYPE_FPGA_ERROR:
......
......@@ -404,7 +404,7 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
mlx5e_timestamp_init(priv);
mlx5e_timestamp_set(priv);
mutex_unlock(&priv->state_lock);
return 0;
......@@ -429,7 +429,6 @@ static int mlx5i_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
......
......@@ -34,250 +34,164 @@
#include "en.h"
enum {
MLX5E_CYCLES_SHIFT = 23
MLX5_CYCLES_SHIFT = 23
};
enum {
MLX5E_PIN_MODE_IN = 0x0,
MLX5E_PIN_MODE_OUT = 0x1,
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
};
enum {
MLX5E_OUT_PATTERN_PULSE = 0x0,
MLX5E_OUT_PATTERN_PERIODIC = 0x1,
MLX5_OUT_PATTERN_PULSE = 0x0,
MLX5_OUT_PATTERN_PERIODIC = 0x1,
};
enum {
MLX5E_EVENT_MODE_DISABLE = 0x0,
MLX5E_EVENT_MODE_REPETETIVE = 0x1,
MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
MLX5_EVENT_MODE_DISABLE = 0x0,
MLX5_EVENT_MODE_REPETETIVE = 0x1,
MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
};
enum {
MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
MLX5_MTPPS_FS_ENABLE = BIT(0x0),
MLX5_MTPPS_FS_PATTERN = BIT(0x2),
MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts)
static u64 read_internal_timer(const struct cyclecounter *cc)
{
u64 nsec;
struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
read_lock(&tstamp->lock);
nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
read_unlock(&tstamp->lock);
hwts->hwtstamp = ns_to_ktime(nsec);
}
static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
cycles);
return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
return mlx5_read_internal_timer(mdev) & cc->mask;
}
static void mlx5e_pps_out(struct work_struct *work)
static void mlx5_pps_out(struct work_struct *work)
{
struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
out_work);
struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
pps_info);
struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
out_work);
struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
pps_info);
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
for (i = 0; i < clock->ptp_info.n_pins; i++) {
u64 tstart;
write_lock_irqsave(&tstamp->lock, flags);
tstart = tstamp->pps_info.start[i];
tstamp->pps_info.start[i] = 0;
write_unlock_irqrestore(&tstamp->lock, flags);
write_lock_irqsave(&clock->lock, flags);
tstart = clock->pps_info.start[i];
clock->pps_info.start[i] = 0;
write_unlock_irqrestore(&clock->lock, flags);
if (!tstart)
continue;
MLX5_SET(mtpps_reg, in, pin, i);
MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
mlx5_set_mtpps(mdev, in, sizeof(in));
}
}
static void mlx5e_timestamp_overflow(struct work_struct *work)
static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work);
struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
overflow_work);
unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
write_unlock_irqrestore(&tstamp->lock, flags);
queue_delayed_work(priv->wq, &tstamp->overflow_work,
msecs_to_jiffies(tstamp->overflow_period * 1000));
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct hwtstamp_config config;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* TX HW timestamp */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
mutex_lock(&priv->state_lock);
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling cqe compression");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
mutex_unlock(&priv->state_lock);
return err;
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
mutex_unlock(&priv->state_lock);
return -ERANGE;
}
memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
write_lock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
write_unlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
u64 ns = timespec64_to_ns(ts);
unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags);
timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
write_unlock_irqrestore(&tstamp->lock, flags);
write_lock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
}
static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
u64 ns;
unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags);
ns = timecounter_read(&tstamp->clock);
write_unlock_irqrestore(&tstamp->lock, flags);
write_lock_irqsave(&clock->lock, flags);
ns = timecounter_read(&clock->tc);
write_unlock_irqrestore(&clock->lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags);
timecounter_adjtime(&tstamp->clock, delta);
write_unlock_irqrestore(&tstamp->lock, flags);
write_lock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
}
static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
u64 adj;
u32 diff;
unsigned long flags;
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
if (delta < 0) {
neg_adj = 1;
delta = -delta;
}
adj = tstamp->nominal_c_mult;
adj = clock->nominal_c_mult;
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
tstamp->nominal_c_mult + diff;
write_unlock_irqrestore(&tstamp->lock, flags);
write_lock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
write_unlock_irqrestore(&clock->lock, flags);
return 0;
}
static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 field_select = 0;
u8 pin_mode = 0;
......@@ -285,24 +199,24 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
int pin = -1;
int err = 0;
if (!MLX5_PPS_CAP(priv->mdev))
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
if (rq->extts.index >= tstamp->ptp_info.n_pins)
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
if (on) {
pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
pin_mode = MLX5E_PIN_MODE_IN;
pin_mode = MLX5_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE;
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE;
} else {
pin = rq->extts.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
field_select = MLX5_MTPPS_FS_ENABLE;
}
MLX5_SET(mtpps_reg, in, pin, pin);
......@@ -311,22 +225,22 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
return err;
return mlx5_set_mtppse(priv->mdev, pin, 0,
MLX5E_EVENT_MODE_REPETETIVE & on);
return mlx5_set_mtppse(mdev, pin, 0,
MLX5_EVENT_MODE_REPETETIVE & on);
}
static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u64 nsec_now, nsec_delta, time_stamp = 0;
u64 cycles_now, cycles_delta;
......@@ -339,20 +253,20 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
int err = 0;
s64 ns;
if (!MLX5_PPS_CAP(priv->mdev))
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
if (rq->perout.index >= tstamp->ptp_info.n_pins)
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
if (on) {
pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
rq->perout.index);
if (pin < 0)
return -EBUSY;
pin_mode = MLX5E_PIN_MODE_OUT;
pattern = MLX5E_OUT_PATTERN_PERIODIC;
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
ts.tv_nsec = rq->perout.period.nsec;
ns = timespec64_to_ns(&ts);
......@@ -363,21 +277,21 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts);
cycles_now = mlx5_read_internal_timer(tstamp->mdev);
write_lock_irqsave(&tstamp->lock, flags);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
cycles_now = mlx5_read_internal_timer(mdev);
write_lock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
tstamp->cycles.mult);
write_unlock_irqrestore(&tstamp->lock, flags);
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
write_unlock_irqrestore(&clock->lock, flags);
time_stamp = cycles_now + cycles_delta;
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE |
MLX5E_MTPPS_FS_TIME_STAMP;
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
pin = rq->perout.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
field_select = MLX5_MTPPS_FS_ENABLE;
}
MLX5_SET(mtpps_reg, in, pin, pin);
......@@ -387,233 +301,225 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
return err;
return mlx5_set_mtppse(priv->mdev, pin, 0,
MLX5E_EVENT_MODE_REPETETIVE & on);
return mlx5_set_mtppse(mdev, pin, 0,
MLX5_EVENT_MODE_REPETETIVE & on);
}
static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
tstamp->pps_info.enabled = !!on;
clock->pps_info.enabled = !!on;
return 0;
}
static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
return mlx5e_extts_configure(ptp, rq, on);
return mlx5_extts_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT:
return mlx5e_perout_configure(ptp, rq, on);
return mlx5_perout_configure(ptp, rq, on);
case PTP_CLK_REQ_PPS:
return mlx5e_pps_configure(ptp, rq, on);
return mlx5_pps_configure(ptp, rq, on);
default:
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
}
static const struct ptp_clock_info mlx5e_ptp_clock_info = {
static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "mlx5_p2p",
.max_adj = 100000000,
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
.adjfreq = mlx5e_ptp_adjfreq,
.adjtime = mlx5e_ptp_adjtime,
.gettime64 = mlx5e_ptp_gettime,
.settime64 = mlx5e_ptp_settime,
.adjfreq = mlx5_ptp_adjfreq,
.adjtime = mlx5_ptp_adjtime,
.gettime64 = mlx5_ptp_gettime,
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
};
static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
{
tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
static int mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
tstamp->ptp_info.pin_config =
kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
tstamp->ptp_info.n_pins, GFP_KERNEL);
if (!tstamp->ptp_info.pin_config)
clock->ptp_info.pin_config =
kzalloc(sizeof(*clock->ptp_info.pin_config) *
clock->ptp_info.n_pins, GFP_KERNEL);
if (!clock->ptp_info.pin_config)
return -ENOMEM;
tstamp->ptp_info.enable = mlx5e_ptp_enable;
tstamp->ptp_info.verify = mlx5e_ptp_verify;
tstamp->ptp_info.pps = 1;
clock->ptp_info.enable = mlx5_ptp_enable;
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
snprintf(tstamp->ptp_info.pin_config[i].name,
sizeof(tstamp->ptp_info.pin_config[i].name),
for (i = 0; i < clock->ptp_info.n_pins; i++) {
snprintf(clock->ptp_info.pin_config[i].name,
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
tstamp->ptp_info.pin_config[i].index = i;
tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
tstamp->ptp_info.pin_config[i].chan = i;
clock->ptp_info.pin_config[i].index = i;
clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
clock->ptp_info.pin_config[i].chan = i;
}
return 0;
}
static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
struct mlx5e_tstamp *tstamp)
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
mlx5_query_mtpps(priv->mdev, out, sizeof(out));
tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
cap_number_of_pps_pins);
tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_in_pins);
tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
mlx5_query_mtpps(mdev, out, sizeof(out));
clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
cap_number_of_pps_pins);
clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_in_pins);
clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event)
void mlx5_pps_event(struct mlx5_core_dev *mdev,
struct mlx5_eqe *eqe)
{
struct net_device *netdev = priv->netdev;
struct mlx5e_tstamp *tstamp = &priv->tstamp;
struct mlx5_clock *clock = &mdev->clock;
struct ptp_clock_event ptp_event;
struct timespec64 ts;
u64 nsec_now, nsec_delta;
u64 cycles_now, cycles_delta;
int pin = event->index;
int pin = eqe->data.pps.pin;
s64 ns;
unsigned long flags;
switch (tstamp->ptp_info.pin_config[pin].func) {
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
if (tstamp->pps_info.enabled) {
event->type = PTP_CLOCK_PPSUSR;
event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
} else {
event->type = PTP_CLOCK_EXTTS;
ptp_event.type = PTP_CLOCK_EXTTS;
}
ptp_clock_event(tstamp->ptp, event);
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
cycles_now = mlx5_read_internal_timer(tstamp->mdev);
mlx5_ptp_gettime(&clock->ptp_info, &ts);
cycles_now = mlx5_read_internal_timer(mdev);
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
write_lock_irqsave(&tstamp->lock, flags);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
write_lock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
tstamp->cycles.mult);
tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
queue_work(priv->wq, &tstamp->pps_info.out_work);
write_unlock_irqrestore(&tstamp->lock, flags);
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta;
schedule_work(&clock->pps_info.out_work);
write_unlock_irqrestore(&clock->lock, flags);
break;
default:
netdev_err(netdev, "%s: Unhandled event\n", __func__);
mlx5_core_err(mdev, " Unhandled event\n");
}
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5e_tstamp *tstamp = &priv->tstamp;
struct mlx5_clock *clock = &mdev->clock;
u64 ns;
u64 frac = 0;
u32 dev_freq;
mlx5e_timestamp_init_config(tstamp);
dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
if (!dev_freq) {
mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
return;
}
rwlock_init(&tstamp->lock);
tstamp->cycles.read = mlx5e_read_internal_timer;
tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
tstamp->cycles.shift);
tstamp->nominal_c_mult = tstamp->cycles.mult;
tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
tstamp->mdev = priv->mdev;
timecounter_init(&tstamp->clock, &tstamp->cycles,
rwlock_init(&clock->lock);
clock->cycles.read = read_internal_timer;
clock->cycles.shift = MLX5_CYCLES_SHIFT;
clock->cycles.mult = clocksource_khz2mult(dev_freq,
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
*/
ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
frac, &frac);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
tstamp->overflow_period = ns;
clock->overflow_period = ns;
INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
if (tstamp->overflow_period)
queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
if (clock->overflow_period)
schedule_delayed_work(&clock->overflow_work, 0);
else
mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
/* Configure the PHC */
tstamp->ptp_info = mlx5e_ptp_clock_info;
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
clock->ptp_info = mlx5_ptp_clock_info;
/* Initialize 1PPS data structures */
if (MLX5_PPS_CAP(priv->mdev))
mlx5e_get_pps_caps(priv, tstamp);
if (tstamp->ptp_info.n_pins)
mlx5e_init_pin_config(tstamp);
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
if (IS_ERR(tstamp->ptp)) {
mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
PTR_ERR(tstamp->ptp));
tstamp->ptp = NULL;
if (MLX5_PPS_CAP(mdev))
mlx5_get_pps_caps(mdev);
if (clock->ptp_info.n_pins)
mlx5_init_pin_config(clock);
clock->ptp = ptp_clock_register(&clock->ptp_info,
&mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
PTR_ERR(clock->ptp));
clock->ptp = NULL;
}
}
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
{
struct mlx5e_tstamp *tstamp = &priv->tstamp;
struct mlx5_clock *clock = &mdev->clock;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
return;
if (priv->tstamp.ptp) {
ptp_clock_unregister(priv->tstamp.ptp);
priv->tstamp.ptp = NULL;
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
}
cancel_work_sync(&tstamp->pps_info.out_work);
cancel_delayed_work_sync(&tstamp->overflow_work);
kfree(tstamp->ptp_info.pin_config);
cancel_work_sync(&clock->pps_info.out_work);
cancel_delayed_work_sync(&clock->overflow_work);
kfree(clock->ptp_info.pin_config);
}
/*
* Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
u64 nsec;
read_lock(&clock->lock);
nsec = timecounter_cyc2time(&clock->tc, timestamp);
read_unlock(&clock->lock);
return ns_to_ktime(nsec);
}
#endif
......@@ -59,6 +59,7 @@
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "accel/ipsec.h"
#include "lib/clock.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
......@@ -889,6 +890,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
......@@ -949,6 +952,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
......
......@@ -93,6 +93,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
......
......@@ -49,6 +49,8 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
#include <linux/mlx5/srq.h>
#include <linux/timecounter.h>
#include <linux/ptp_clock_kernel.h>
enum {
MLX5_BOARD_ID_LEN = 64,
......@@ -760,6 +762,27 @@ struct mlx5_rsvd_gids {
struct ida ida;
};
#define MAX_PIN_NUM 8
struct mlx5_pps {
u8 pin_caps[MAX_PIN_NUM];
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
};
struct mlx5_clock {
rwlock_t lock;
struct cyclecounter cycles;
struct timecounter tc;
struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
};
struct mlx5_core_dev {
struct pci_dev *pdev;
/* sync pci state */
......@@ -800,6 +823,7 @@ struct mlx5_core_dev {
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
struct mlx5_clock clock;
};
struct mlx5_db {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment