Commit 90fecc14 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2017-07-27-V2' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2017-07-27

This series contains some misc fixes to the mlx5 driver.

Please pull and let me know if there's any problem.

V1->V2:
 - removed redundant braces

for -stable:
4.7
net/mlx5: Fix command bad flow on command entry allocation failure

4.9
net/mlx5: Consider tx_enabled in all modes on remap
net/mlx5e: Fix outer_header_zero() check size

4.10
net/mlx5: Fix mlx5_add_flow_rules call with correct num of dests

4.11
net/mlx5: Fix mlx5_ifc_mtpps_reg_bits structure size
net/mlx5e: Add field select to MTPPS register
net/mlx5e: Fix broken disable 1PPS flow
net/mlx5e: Change 1PPS out scheme
net/mlx5e: Add missing support for PTP_CLK_REQ_PPS request
net/mlx5e: Fix wrong delay calculation for overflow check scheduling
net/mlx5e: Schedule overflow check work to mlx5e workqueue

4.12
net/mlx5: Fix command completion after timeout access invalid structure
net/mlx5e: IPoIB, Modify add/remove underlay QPN flows

I hope this is not too much, but most of the patches do apply cleanly on -stable.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 996f6e12 bcec601f
...@@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work) ...@@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
} }
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg);
static void cmd_work_handler(struct work_struct *work) static void cmd_work_handler(struct work_struct *work)
{ {
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
...@@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work) ...@@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work)
struct semaphore *sem; struct semaphore *sem;
unsigned long flags; unsigned long flags;
bool poll_cmd = ent->polling; bool poll_cmd = ent->polling;
int alloc_ret;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem); down(sem);
if (!ent->page_queue) { if (!ent->page_queue) {
ent->idx = alloc_ent(cmd); alloc_ret = alloc_ent(cmd);
if (ent->idx < 0) { if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n"); mlx5_core_err(dev, "failed to allocate command entry\n");
if (ent->callback) {
ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
free_cmd(ent);
} else {
ent->ret = -EAGAIN;
complete(&ent->done);
}
up(sem); up(sem);
return; return;
} }
ent->idx = alloc_ret;
} else { } else {
ent->idx = cmd->max_reg_cmds; ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags); spin_lock_irqsave(&cmd->alloc_lock, flags);
...@@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, ...@@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent); err = wait_func(dev, ent);
if (err == -ETIMEDOUT) if (err == -ETIMEDOUT)
goto out_free; goto out;
ds = ent->ts2 - ent->ts1; ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode); op = MLX5_GET(mbox_in, in->first.data, opcode);
...@@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) ...@@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx); ent->idx);
free_ent(cmd, ent->idx); free_ent(cmd, ent->idx);
free_cmd(ent);
} }
continue; continue;
} }
...@@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) ...@@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
free_msg(dev, ent->in); free_msg(dev, ent->in);
err = err ? err : ent->status; err = err ? err : ent->status;
free_cmd(ent); if (!forced)
free_cmd(ent);
callback(err, context); callback(err, context);
} else { } else {
complete(&ent->done); complete(&ent->done);
......
...@@ -266,6 +266,14 @@ struct mlx5e_dcbx { ...@@ -266,6 +266,14 @@ struct mlx5e_dcbx {
}; };
#endif #endif
#define MAX_PIN_NUM 8
struct mlx5e_pps {
u8 pin_caps[MAX_PIN_NUM];
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
};
struct mlx5e_tstamp { struct mlx5e_tstamp {
rwlock_t lock; rwlock_t lock;
struct cyclecounter cycles; struct cyclecounter cycles;
...@@ -277,7 +285,7 @@ struct mlx5e_tstamp { ...@@ -277,7 +285,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct ptp_clock *ptp; struct ptp_clock *ptp;
struct ptp_clock_info ptp_info; struct ptp_clock_info ptp_info;
u8 *pps_pin_caps; struct mlx5e_pps pps_info;
}; };
enum { enum {
......
...@@ -53,6 +53,15 @@ enum { ...@@ -53,6 +53,15 @@ enum {
MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
}; };
enum {
MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts) struct skb_shared_hwtstamps *hwts)
{ {
...@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) ...@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
} }
static void mlx5e_pps_out(struct work_struct *work)
{
struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
out_work);
struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
pps_info);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
u64 tstart;
write_lock_irqsave(&tstamp->lock, flags);
tstart = tstamp->pps_info.start[i];
tstamp->pps_info.start[i] = 0;
write_unlock_irqrestore(&tstamp->lock, flags);
if (!tstart)
continue;
MLX5_SET(mtpps_reg, in, pin, i);
MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
}
}
static void mlx5e_timestamp_overflow(struct work_struct *work) static void mlx5e_timestamp_overflow(struct work_struct *work)
{ {
struct delayed_work *dwork = to_delayed_work(work); struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work); overflow_work);
struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
unsigned long flags; unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags); write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock); timecounter_read(&tstamp->clock);
write_unlock_irqrestore(&tstamp->lock, flags); write_unlock_irqrestore(&tstamp->lock, flags);
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); queue_delayed_work(priv->wq, &tstamp->overflow_work,
msecs_to_jiffies(tstamp->overflow_period * 1000));
} }
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
...@@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) ...@@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0; int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info); ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
/* For future use need to add a loop for finding all 1PPS out pins */
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
mlx5_set_mtpps(priv->mdev, in, sizeof(in));
}
if (delta < 0) { if (delta < 0) {
neg_adj = 1; neg_adj = 1;
...@@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, ...@@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv = struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp); container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0; u8 pattern = 0;
int pin = -1; int pin = -1;
int err = 0; int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, pps) || if (!MLX5_PPS_CAP(priv->mdev))
!MLX5_CAP_GEN(priv->mdev, pps_modify))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (rq->extts.index >= tstamp->ptp_info.n_pins) if (rq->extts.index >= tstamp->ptp_info.n_pins)
...@@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, ...@@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0) if (pin < 0)
return -EBUSY; return -EBUSY;
pin_mode = MLX5E_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE;
} else {
pin = rq->extts.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
} }
if (rq->extts.flags & PTP_FALLING_EDGE)
pattern = 1;
MLX5_SET(mtpps_reg, in, pin, pin); MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern); MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err) if (err)
...@@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, ...@@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv = struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp); container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u64 nsec_now, nsec_delta, time_stamp; u64 nsec_now, nsec_delta, time_stamp = 0;
u64 cycles_now, cycles_delta; u64 cycles_now, cycles_delta;
struct timespec64 ts; struct timespec64 ts;
unsigned long flags; unsigned long flags;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1; int pin = -1;
int err = 0;
s64 ns; s64 ns;
if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) if (!MLX5_PPS_CAP(priv->mdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (rq->perout.index >= tstamp->ptp_info.n_pins) if (rq->perout.index >= tstamp->ptp_info.n_pins)
...@@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, ...@@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
rq->perout.index); rq->perout.index);
if (pin < 0) if (pin < 0)
return -EBUSY; return -EBUSY;
}
ts.tv_sec = rq->perout.period.sec; pin_mode = MLX5E_PIN_MODE_OUT;
ts.tv_nsec = rq->perout.period.nsec; pattern = MLX5E_OUT_PATTERN_PERIODIC;
ns = timespec64_to_ns(&ts); ts.tv_sec = rq->perout.period.sec;
if (on) ts.tv_nsec = rq->perout.period.nsec;
ns = timespec64_to_ns(&ts);
if ((ns >> 1) != 500000000LL) if ((ns >> 1) != 500000000LL)
return -EINVAL; return -EINVAL;
ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec; ts.tv_sec = rq->perout.start.sec;
ns = timespec64_to_ns(&ts); ts.tv_nsec = rq->perout.start.nsec;
cycles_now = mlx5_read_internal_timer(tstamp->mdev); ns = timespec64_to_ns(&ts);
write_lock_irqsave(&tstamp->lock, flags); cycles_now = mlx5_read_internal_timer(tstamp->mdev);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); write_lock_irqsave(&tstamp->lock, flags);
nsec_delta = ns - nsec_now; nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, nsec_delta = ns - nsec_now;
tstamp->cycles.mult); cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
write_unlock_irqrestore(&tstamp->lock, flags); tstamp->cycles.mult);
time_stamp = cycles_now + cycles_delta; write_unlock_irqrestore(&tstamp->lock, flags);
time_stamp = cycles_now + cycles_delta;
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE |
MLX5E_MTPPS_FS_TIME_STAMP;
} else {
pin = rq->perout.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
}
MLX5_SET(mtpps_reg, in, pin, pin); MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err)
return err;
return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); return mlx5_set_mtppse(priv->mdev, pin, 0,
MLX5E_EVENT_MODE_REPETETIVE & on);
}
static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
tstamp->pps_info.enabled = !!on;
return 0;
} }
static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
...@@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, ...@@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
return mlx5e_extts_configure(ptp, rq, on); return mlx5e_extts_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
return mlx5e_perout_configure(ptp, rq, on); return mlx5e_perout_configure(ptp, rq, on);
case PTP_CLK_REQ_PPS:
return mlx5e_pps_configure(ptp, rq, on);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) ...@@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
return -ENOMEM; return -ENOMEM;
tstamp->ptp_info.enable = mlx5e_ptp_enable; tstamp->ptp_info.enable = mlx5e_ptp_enable;
tstamp->ptp_info.verify = mlx5e_ptp_verify; tstamp->ptp_info.verify = mlx5e_ptp_verify;
tstamp->ptp_info.pps = 1;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) { for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
snprintf(tstamp->ptp_info.pin_config[i].name, snprintf(tstamp->ptp_info.pin_config[i].name,
...@@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, ...@@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins); cap_max_num_of_pps_out_pins);
tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
} }
void mlx5e_pps_event_handler(struct mlx5e_priv *priv, void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event) struct ptp_clock_event *event)
{ {
struct net_device *netdev = priv->netdev;
struct mlx5e_tstamp *tstamp = &priv->tstamp; struct mlx5e_tstamp *tstamp = &priv->tstamp;
struct timespec64 ts;
u64 nsec_now, nsec_delta;
u64 cycles_now, cycles_delta;
int pin = event->index;
s64 ns;
unsigned long flags;
ptp_clock_event(tstamp->ptp, event); switch (tstamp->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
if (tstamp->pps_info.enabled) {
event->type = PTP_CLOCK_PPSUSR;
event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
} else {
event->type = PTP_CLOCK_EXTTS;
}
ptp_clock_event(tstamp->ptp, event);
break;
case PTP_PF_PEROUT:
mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
cycles_now = mlx5_read_internal_timer(tstamp->mdev);
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
write_lock_irqsave(&tstamp->lock, flags);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
tstamp->cycles.mult);
tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
queue_work(priv->wq, &tstamp->pps_info.out_work);
write_unlock_irqrestore(&tstamp->lock, flags);
break;
default:
netdev_err(netdev, "%s: Unhandled event\n", __func__);
}
} }
void mlx5e_timestamp_init(struct mlx5e_priv *priv) void mlx5e_timestamp_init(struct mlx5e_priv *priv)
...@@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) ...@@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
do_div(ns, NSEC_PER_SEC / 2 / HZ); do_div(ns, NSEC_PER_SEC / 2 / HZ);
tstamp->overflow_period = ns; tstamp->overflow_period = ns;
INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
if (tstamp->overflow_period) if (tstamp->overflow_period)
schedule_delayed_work(&tstamp->overflow_work, 0); queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
else else
mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
...@@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) ...@@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
/* Initialize 1PPS data structures */ /* Initialize 1PPS data structures */
#define MAX_PIN_NUM 8 if (MLX5_PPS_CAP(priv->mdev))
tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); mlx5e_get_pps_caps(priv, tstamp);
if (tstamp->pps_pin_caps) { if (tstamp->ptp_info.n_pins)
if (MLX5_CAP_GEN(priv->mdev, pps)) mlx5e_init_pin_config(tstamp);
mlx5e_get_pps_caps(priv, tstamp);
if (tstamp->ptp_info.n_pins)
mlx5e_init_pin_config(tstamp);
} else {
mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
}
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev); &priv->mdev->pdev->dev);
...@@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) ...@@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL; priv->tstamp.ptp = NULL;
} }
kfree(tstamp->pps_pin_caps); cancel_work_sync(&tstamp->pps_info.out_work);
kfree(tstamp->ptp_info.pin_config);
cancel_delayed_work_sync(&tstamp->overflow_work); cancel_delayed_work_sync(&tstamp->overflow_work);
kfree(tstamp->ptp_info.pin_config);
} }
...@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv, ...@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
static bool outer_header_zero(u32 *match_criteria) static bool outer_header_zero(u32 *match_criteria)
{ {
int size = MLX5_ST_SZ_BYTES(fte_match_param); int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers); outer_headers);
...@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, ...@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
......
...@@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, ...@@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
break; break;
case MLX5_DEV_EVENT_PPS: case MLX5_DEV_EVENT_PPS:
eqe = (struct mlx5_eqe *)param; eqe = (struct mlx5_eqe *)param;
ptp_event.type = PTP_CLOCK_EXTTS;
ptp_event.index = eqe->data.pps.pin; ptp_event.index = eqe->data.pps.pin;
ptp_event.timestamp = ptp_event.timestamp =
timecounter_cyc2time(&priv->tstamp.clock, timecounter_cyc2time(&priv->tstamp.clock,
......
...@@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) ...@@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else else
mlx5_core_dbg(dev, "port_module_event is not set\n"); mlx5_core_dbg(dev, "port_module_event is not set\n");
if (MLX5_CAP_GEN(dev, pps)) if (MLX5_PPS_CAP(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
if (MLX5_CAP_GEN(dev, fpga)) if (MLX5_CAP_GEN(dev, fpga))
......
...@@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int i; int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH ||
esw->mode == SRIOV_NONE)
return; return;
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
......
...@@ -178,8 +178,6 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core ...@@ -178,8 +178,6 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{ {
mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
mlx5_core_destroy_qp(mdev, qp); mlx5_core_destroy_qp(mdev, qp);
} }
...@@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) ...@@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err; return err;
} }
mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) { if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
...@@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) ...@@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv)
{ {
struct mlx5i_priv *ipriv = priv->ppriv;
int err; int err;
err = mlx5e_create_indirect_rqt(priv); err = mlx5e_create_indirect_rqt(priv);
...@@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) ...@@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err) if (err)
goto err_destroy_indirect_tirs; goto err_destroy_indirect_tirs;
err = mlx5i_create_flow_steering(priv); err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
if (err) if (err)
goto err_destroy_direct_tirs; goto err_destroy_direct_tirs;
err = mlx5i_create_flow_steering(priv);
if (err)
goto err_remove_rx_underlay_qpn;
return 0; return 0;
err_remove_rx_underlay_qpn:
mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs: err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs: err_destroy_indirect_tirs:
...@@ -290,6 +293,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) ...@@ -290,6 +293,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{ {
struct mlx5i_priv *ipriv = priv->ppriv;
mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv); mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_indirect_tirs(priv);
......
...@@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) ...@@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2) u8 *port1, u8 *port2)
{ {
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { *port1 = 1;
if (tracker->netdev_state[0].tx_enabled) { *port2 = 2;
*port1 = 1; if (!tracker->netdev_state[0].tx_enabled ||
*port2 = 1; !tracker->netdev_state[0].link_up) {
} else { *port1 = 2;
*port1 = 2; return;
*port2 = 2;
}
} else {
*port1 = 1;
*port2 = 2;
if (!tracker->netdev_state[0].link_up)
*port1 = 2;
else if (!tracker->netdev_state[1].link_up)
*port2 = 1;
} }
if (!tracker->netdev_state[1].tx_enabled ||
!tracker->netdev_state[1].link_up)
*port2 = 1;
} }
static void mlx5_activate_lag(struct mlx5_lag *ldev, static void mlx5_activate_lag(struct mlx5_lag *ldev,
......
...@@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); ...@@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void); void mlx5e_init(void);
......
...@@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) ...@@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
int vf; int vf;
if (!sriov->enabled_vfs) if (!sriov->enabled_vfs)
#ifdef CONFIG_MLX5_CORE_EN
goto disable_sriov_resources;
#else
return; return;
#endif
for (vf = 0; vf < sriov->num_vfs; vf++) { for (vf = 0; vf < sriov->num_vfs; vf++) {
if (!sriov->vfs_ctx[vf].enabled) if (!sriov->vfs_ctx[vf].enabled)
...@@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) ...@@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
} }
#ifdef CONFIG_MLX5_CORE_EN #ifdef CONFIG_MLX5_CORE_EN
disable_sriov_resources:
mlx5_eswitch_disable_sriov(dev->priv.eswitch); mlx5_eswitch_disable_sriov(dev->priv.eswitch);
#endif #endif
......
...@@ -7749,8 +7749,10 @@ struct mlx5_ifc_pcam_reg_bits { ...@@ -7749,8 +7749,10 @@ struct mlx5_ifc_pcam_reg_bits {
}; };
struct mlx5_ifc_mcam_enhanced_features_bits { struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x7f]; u8 reserved_at_0[0x7d];
u8 mtpps_enh_out_per_adj[0x1];
u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1]; u8 pcie_performance_group[0x1];
}; };
...@@ -8159,7 +8161,8 @@ struct mlx5_ifc_mtpps_reg_bits { ...@@ -8159,7 +8161,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_78[0x4]; u8 reserved_at_78[0x4];
u8 cap_pin_4_mode[0x4]; u8 cap_pin_4_mode[0x4];
u8 reserved_at_80[0x80]; u8 field_select[0x20];
u8 reserved_at_a0[0x60];
u8 enable[0x1]; u8 enable[0x1];
u8 reserved_at_101[0xb]; u8 reserved_at_101[0xb];
...@@ -8174,8 +8177,9 @@ struct mlx5_ifc_mtpps_reg_bits { ...@@ -8174,8 +8177,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 out_pulse_duration[0x10]; u8 out_pulse_duration[0x10];
u8 out_periodic_adjustment[0x10]; u8 out_periodic_adjustment[0x10];
u8 enhanced_out_periodic_adjustment[0x20];
u8 reserved_at_1a0[0x60]; u8 reserved_at_1c0[0x20];
}; };
struct mlx5_ifc_mtppse_reg_bits { struct mlx5_ifc_mtppse_reg_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment