Commit 847c6bdb authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'felix-dsa-driver-fixes'

Vladimir Oltean says:

====================
Felix DSA driver fixes

This is an assorted collection of fixes for issues seen on the NXP
LS1028A switch.

- PTP packet drops due to switch congestion result in catastrophic
  damage to the driver's state
- loops are not blocked by STP if using the ocelot-8021q tagger
- driver uses the wrong CPU port when two of them are defined in DT
- module autoloading is broken* with both tagging protocol drivers
  (ocelot and ocelot-8021q)

Changes in v2:
- Stop printing that we aren't going to take TX timestamps if we don't
  have TX timestamping anyway, and we are just carrying PTP frames for a
  cascaded DSA switch.
- Shorten the deferred xmit kthread name so that it fits the 16
  character limit (TASK_COMM_LEN)
====================

Link: https://lore.kernel.org/r/20211012114044.2526146-1-vladimir.oltean@nxp.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3af760e4 8d5f7954
......@@ -266,12 +266,12 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
*/
static int felix_setup_mmio_filtering(struct felix *felix)
{
unsigned long user_ports = 0, cpu_ports = 0;
unsigned long user_ports = dsa_user_ports(felix->ds);
struct ocelot_vcap_filter *redirect_rule;
struct ocelot_vcap_filter *tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int port, ret;
int cpu = -1, port, ret;
tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!tagging_rule)
......@@ -284,12 +284,15 @@ static int felix_setup_mmio_filtering(struct felix *felix)
}
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (dsa_is_user_port(ds, port))
user_ports |= BIT(port);
if (dsa_is_cpu_port(ds, port))
cpu_ports |= BIT(port);
if (dsa_is_cpu_port(ds, port)) {
cpu = port;
break;
}
}
if (cpu < 0)
return -EINVAL;
tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
*(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
......@@ -325,7 +328,7 @@ static int felix_setup_mmio_filtering(struct felix *felix)
* the CPU port module
*/
redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
redirect_rule->action.port_mask = cpu_ports;
redirect_rule->action.port_mask = BIT(cpu);
} else {
/* Trap PTP packets only to the CPU port module (which is
* redirected to the NPI port)
......@@ -1074,6 +1077,101 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
struct sk_buff *skb)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
struct sk_buff *skb_match = NULL, *skb_tmp;
unsigned long flags;
if (!clone)
return;
spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
if (skb != clone)
continue;
__skb_unlink(skb, &ocelot_port->tx_skbs);
skb_match = skb;
break;
}
spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
WARN_ONCE(!skb_match,
"Could not find skb clone in TX timestamping list\n");
}
#define work_to_xmit_work(w) \
container_of((w), struct felix_deferred_xmit_work, work)
static void felix_port_deferred_xmit(struct kthread_work *work)
{
struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
struct dsa_switch *ds = xmit_work->dp->ds;
struct sk_buff *skb = xmit_work->skb;
u32 rew_op = ocelot_ptp_rew_op(skb);
struct ocelot *ocelot = ds->priv;
int port = xmit_work->dp->index;
int retries = 10;
do {
if (ocelot_can_inject(ocelot, 0))
break;
cpu_relax();
} while (--retries);
if (!retries) {
dev_err(ocelot->dev, "port %d failed to inject skb\n",
port);
ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
kfree_skb(skb);
return;
}
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
consume_skb(skb);
kfree(xmit_work);
}
static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
struct felix_port *felix_port;
if (!dsa_port_is_user(dp))
return 0;
felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
if (!felix_port)
return -ENOMEM;
felix_port->xmit_worker = felix->xmit_worker;
felix_port->xmit_work_fn = felix_port_deferred_xmit;
dp->priv = felix_port;
return 0;
}
static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct felix_port *felix_port = dp->priv;
if (!felix_port)
return;
dp->priv = NULL;
kfree(felix_port);
}
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
* us to allocate structures twice (leak memory) and map PCI memory twice
......@@ -1102,6 +1200,12 @@ static int felix_setup(struct dsa_switch *ds)
}
}
felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
if (IS_ERR(felix->xmit_worker)) {
err = PTR_ERR(felix->xmit_worker);
goto out_deinit_timestamp;
}
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
......@@ -1112,6 +1216,14 @@ static int felix_setup(struct dsa_switch *ds)
* bits of vlan tag.
*/
felix_port_qos_map_init(ocelot, port);
err = felix_port_setup_tagger_data(ds, port);
if (err) {
dev_err(ds->dev,
"port %d failed to set up tagger data: %pe\n",
port, ERR_PTR(err));
goto out_deinit_ports;
}
}
err = ocelot_devlink_sb_register(ocelot);
......@@ -1126,6 +1238,7 @@ static int felix_setup(struct dsa_switch *ds)
* there's no real point in checking for errors.
*/
felix_set_tag_protocol(ds, port, felix->tag_proto);
break;
}
ds->mtu_enforcement_ingress = true;
......@@ -1138,9 +1251,13 @@ static int felix_setup(struct dsa_switch *ds)
if (dsa_is_unused_port(ds, port))
continue;
felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
kthread_destroy_worker(felix->xmit_worker);
out_deinit_timestamp:
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
......@@ -1162,19 +1279,23 @@ static void felix_teardown(struct dsa_switch *ds)
continue;
felix_del_tag_protocol(ds, port, felix->tag_proto);
break;
}
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
kthread_destroy_worker(felix->xmit_worker);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
}
......@@ -1291,8 +1412,12 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
if (!ocelot->ptp)
return;
if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone))
if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
dev_err_ratelimited(ds->dev,
"port %d delivering skb without TX timestamp\n",
port);
return;
}
if (clone)
OCELOT_SKB_CB(skb)->clone = clone;
......
......@@ -62,6 +62,7 @@ struct felix {
resource_size_t switch_base;
resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
struct kthread_worker *xmit_worker;
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
......
......@@ -569,49 +569,44 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
struct sk_buff *clone)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
unsigned long flags;
spin_lock_irqsave(&ocelot->ts_id_lock, flags);
spin_lock(&ocelot_port->ts_id_lock);
if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
return -EBUSY;
}
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
skb_queue_tail(&ocelot_port->tx_skbs, clone);
spin_unlock(&ocelot_port->ts_id_lock);
}
ocelot_port->ts_id++;
if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
ocelot_port->ts_id = 0;
u32 ocelot_ptp_rew_op(struct sk_buff *skb)
{
struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
u32 rew_op = 0;
ocelot_port->ptp_skbs_in_flight++;
ocelot->ptp_skbs_in_flight++;
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
rew_op = ptp_cmd;
rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
} else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
rew_op = ptp_cmd;
}
skb_queue_tail(&ocelot_port->tx_skbs, clone);
return rew_op;
spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_rew_op);
static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb)
static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
unsigned int ptp_class)
{
struct ptp_header *hdr;
unsigned int ptp_class;
u8 msgtype, twostep;
ptp_class = ptp_classify_raw(skb);
if (ptp_class == PTP_CLASS_NONE)
return false;
hdr = ptp_parse_header(skb, ptp_class);
if (!hdr)
return false;
......@@ -631,10 +626,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 ptp_cmd = ocelot_port->ptp_cmd;
unsigned int ptp_class;
int err;
/* Don't do anything if PTP timestamping not enabled */
if (!ptp_cmd)
return 0;
ptp_class = ptp_classify_raw(skb);
if (ptp_class == PTP_CLASS_NONE)
return -EINVAL;
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
if (ocelot_ptp_is_onestep_sync(skb)) {
if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
return 0;
}
......@@ -648,8 +653,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (!(*clone))
return -ENOMEM;
ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
if (err)
return err;
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
......@@ -683,6 +692,17 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
{
struct ptp_header *hdr;
hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
if (WARN_ON(!hdr))
return false;
return seqid == ntohs(hdr->sequence_id);
}
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
......@@ -690,10 +710,10 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
while (budget--) {
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
u32 val, id, seqid, txport;
struct ocelot_port *port;
struct timespec64 ts;
unsigned long flags;
u32 val, id, txport;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
......@@ -706,10 +726,17 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Retrieve the ts ID and Tx port */
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
/* Retrieve its associated skb */
port = ocelot->ports[txport];
spin_lock(&ocelot->ts_id_lock);
port->ptp_skbs_in_flight--;
ocelot->ptp_skbs_in_flight--;
spin_unlock(&ocelot->ts_id_lock);
/* Retrieve its associated skb */
try_again:
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
......@@ -722,12 +749,20 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
if (WARN_ON(!skb_match))
continue;
if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
dev_err_ratelimited(ocelot->dev,
"port %d received stale TX timestamp for seqid %d, discarding\n",
txport, seqid);
dev_kfree_skb_any(skb);
goto try_again;
}
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
if (unlikely(!skb_match))
continue;
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
......@@ -1948,7 +1983,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_head_init(&ocelot_port->tx_skbs);
spin_lock_init(&ocelot_port->ts_id_lock);
/* Basic L2 initialization */
......@@ -2081,6 +2115,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
......
......@@ -8,6 +8,7 @@
* Copyright 2020-2021 NXP
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
......
......@@ -5,7 +5,28 @@
#ifndef _NET_DSA_TAG_OCELOT_H
#define _NET_DSA_TAG_OCELOT_H
#include <linux/kthread.h>
#include <linux/packing.h>
#include <linux/skbuff.h>
struct ocelot_skb_cb {
struct sk_buff *clone;
unsigned int ptp_class; /* valid only for clones */
u8 ptp_cmd;
u8 ts_id;
};
#define OCELOT_SKB_CB(skb) \
((struct ocelot_skb_cb *)((skb)->cb))
#define IFH_TAG_TYPE_C 0
#define IFH_TAG_TYPE_S 1
#define IFH_REW_OP_NOOP 0x0
#define IFH_REW_OP_DSCP 0x1
#define IFH_REW_OP_ONE_STEP_PTP 0x2
#define IFH_REW_OP_TWO_STEP_PTP 0x3
#define IFH_REW_OP_ORIGIN_PTP 0x5
#define OCELOT_TAG_LEN 16
#define OCELOT_SHORT_PREFIX_LEN 4
......@@ -140,6 +161,17 @@
* +------+------+------+------+------+------+------+------+
*/
struct felix_deferred_xmit_work {
struct dsa_port *dp;
struct sk_buff *skb;
struct kthread_work work;
};
struct felix_port {
void (*xmit_work_fn)(struct kthread_work *work);
struct kthread_worker *xmit_worker;
};
static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
{
packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
......@@ -215,4 +247,21 @@ static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
}
/* Determine the PTP REW_OP to use for injecting the given skb */
static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
{
struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
u32 rew_op = 0;
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
rew_op = ptp_cmd;
rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
} else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
rew_op = ptp_cmd;
}
return rew_op;
}
#endif
......@@ -89,15 +89,6 @@
/* Source PGIDs, one per physical port */
#define PGID_SRC 80
#define IFH_TAG_TYPE_C 0
#define IFH_TAG_TYPE_S 1
#define IFH_REW_OP_NOOP 0x0
#define IFH_REW_OP_DSCP 0x1
#define IFH_REW_OP_ONE_STEP_PTP 0x2
#define IFH_REW_OP_TWO_STEP_PTP 0x3
#define IFH_REW_OP_ORIGIN_PTP 0x5
#define OCELOT_NUM_TC 8
#define OCELOT_SPEED_2500 0
......@@ -603,10 +594,10 @@ struct ocelot_port {
/* The VLAN ID that will be transmitted as untagged, on egress */
struct ocelot_vlan native_vlan;
unsigned int ptp_skbs_in_flight;
u8 ptp_cmd;
struct sk_buff_head tx_skbs;
u8 ts_id;
spinlock_t ts_id_lock;
phy_interface_t phy_mode;
......@@ -680,6 +671,9 @@ struct ocelot {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
struct hwtstamp_config hwtstamp_config;
unsigned int ptp_skbs_in_flight;
/* Protects the 2-step TX timestamp ID logic */
spinlock_t ts_id_lock;
/* Protects the PTP interface state */
struct mutex ptp_lock;
/* Protects the PTP clock */
......@@ -692,15 +686,6 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
struct ocelot_skb_cb {
struct sk_buff *clone;
u8 ptp_cmd;
u8 ts_id;
};
#define OCELOT_SKB_CB(skb) \
((struct ocelot_skb_cb *)((skb)->cb))
#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
......@@ -752,8 +737,6 @@ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 val, u32 reg, u32 offset);
#if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB)
/* Packet I/O */
bool ocelot_can_inject(struct ocelot *ocelot, int grp);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
......@@ -761,36 +744,6 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
u32 ocelot_ptp_rew_op(struct sk_buff *skb);
#else
static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp)
{
return false;
}
static inline void ocelot_port_inject_frame(struct ocelot *ocelot, int port,
int grp, u32 rew_op,
struct sk_buff *skb)
{
}
static inline int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp,
struct sk_buff **skb)
{
return -EIO;
}
static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
{
}
static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
{
return 0;
}
#endif
/* Hardware initialization */
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
......
......@@ -13,6 +13,9 @@
#include <linux/ptp_clock_kernel.h>
#include <soc/mscc/ocelot.h>
#define OCELOT_MAX_PTP_ID 63
#define OCELOT_PTP_FIFO_SIZE 128
#define PTP_PIN_CFG_RSZ 0x20
#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
......
......@@ -101,8 +101,6 @@ config NET_DSA_TAG_RTL4_A
config NET_DSA_TAG_OCELOT
tristate "Tag driver for Ocelot family of switches, using NPI port"
depends on MSCC_OCELOT_SWITCH_LIB || \
(MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
select PACKING
help
Say Y or M if you want to enable NPI tagging for the Ocelot switches
......@@ -114,8 +112,6 @@ config NET_DSA_TAG_OCELOT
config NET_DSA_TAG_OCELOT_8021Q
tristate "Tag driver for Ocelot family of switches, using VLAN"
depends on MSCC_OCELOT_SWITCH_LIB || \
(MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
help
Say Y or M if you want to enable support for tagging frames with a
custom VLAN-based header. Frames that require timestamping, such as
......
......@@ -2,7 +2,6 @@
/* Copyright 2019 NXP
*/
#include <linux/dsa/ocelot.h>
#include <soc/mscc/ocelot.h>
#include "dsa_priv.h"
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
......
......@@ -9,10 +9,32 @@
* that on egress
*/
#include <linux/dsa/8021q.h>
#include <soc/mscc/ocelot.h>
#include <soc/mscc/ocelot_ptp.h>
#include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb)
{
struct felix_deferred_xmit_work *xmit_work;
struct felix_port *felix_port = dp->priv;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
/* Calls felix_port_deferred_xmit in felix.c */
kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
xmit_work->skb = skb_get(skb);
kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
return NULL;
}
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
......@@ -20,18 +42,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
struct ocelot *ocelot = dp->ds->priv;
int port = dp->index;
u32 rew_op = 0;
struct ethhdr *hdr = eth_hdr(skb);
rew_op = ocelot_ptp_rew_op(skb);
if (rew_op) {
if (!ocelot_can_inject(ocelot, 0))
return NULL;
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
return NULL;
}
if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
return ocelot_defer_xmit(dp, skb);
return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment