Commit e5b0a1b7 authored by David S. Miller's avatar David S. Miller

Merge branch 'sja1110-dsa-tagging'

Vladimir Oltean says:

====================
DSA tagging driver for NXP SJA1110

This series adds support for tagging data and control packets on the new
NXP SJA1110 switch (supported by the sja1105 driver). Up to this point
it used the sja1105 driver, which allowed it to send data packets, but
not PDUs as those required by STP and PTP.

To accommodate this new tagger which has both a header and a trailer, we
need to refactor the entire DSA tagging scheme, to replace the "overhead"
concept with separate "needed_headroom" and "needed_tailroom" concepts,
so that SJA1110 can declare its need for both.

There is also some consolidation work for the receive path of tag_8021q
and its callers (sja1105 and ocelot-8021q).

Changes in v3:
Rebase in front of the "Port the SJA1105 DSA driver to XPCS" series
which seems to have stalled for now.

Changes in v2:
Export the dsa_8021q_rcv and sja1110_process_meta_tstamp symbols to
avoid build errors as modules.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f244e256 566b18c8
......@@ -93,14 +93,15 @@ A tagging protocol may tag all packets with switch tags of the same length, or
the tag length might vary (for example packets with PTP timestamps might
require an extended switch tag, or there might be one tag length on TX and a
different one on RX). Either way, the tagging protocol driver must populate the
``struct dsa_device_ops::overhead`` with the length in octets of the longest
switch frame header. The DSA framework will automatically adjust the MTU of the
master interface to accomodate for this extra size in order for DSA user ports
to support the standard MTU (L2 payload length) of 1500 octets. The ``overhead``
is also used to request from the network stack, on a best-effort basis, the
allocation of packets with a ``needed_headroom`` or ``needed_tailroom``
sufficient such that the act of pushing the switch tag on transmission of a
packet does not cause it to reallocate due to lack of memory.
``struct dsa_device_ops::needed_headroom`` and/or ``struct dsa_device_ops::needed_tailroom``
with the length in octets of the longest switch frame header/trailer. The DSA
framework will automatically adjust the MTU of the master interface to
accommodate for this extra size in order for DSA user ports to support the
standard MTU (L2 payload length) of 1500 octets. The ``needed_headroom`` and
``needed_tailroom`` properties are also used to request from the network stack,
on a best-effort basis, the allocation of packets with enough extra space such
that the act of pushing the switch tag on transmission of a packet does not
cause it to reallocate due to lack of memory.
Even though applications are not expected to parse DSA-specific frame headers,
the format on the wire of the tagging protocol represents an Application Binary
......@@ -169,8 +170,8 @@ The job of this method is to prepare the skb in a way that the switch will
understand what egress port the packet is for (and not deliver it towards other
ports). Typically this is fulfilled by pushing a frame header. Checking for
insufficient size in the skb headroom or tailroom is unnecessary provided that
the ``overhead`` and ``tail_tag`` properties were filled out properly, because
DSA ensures there is enough space before calling this method.
the ``needed_headroom`` and ``needed_tailroom`` properties were filled out
properly, because DSA ensures there is enough space before calling this method.
The reception of a packet goes through the tagger's ``rcv`` function. The
passed ``struct sk_buff *skb`` has ``skb->data`` pointing at
......
......@@ -109,6 +109,8 @@ struct sja1105_info {
int num_cbs_shapers;
int max_frame_mem;
int num_ports;
bool multiple_cascade_ports;
enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
......@@ -128,6 +130,8 @@ struct sja1105_info {
const unsigned char *addr, u16 vid);
void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op);
bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
int (*clocking_setup)(struct sja1105_private *priv);
const char *name;
bool supports_mii[SJA1105_MAX_NUM_PORTS];
......
......@@ -654,14 +654,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.host_port = priv->ds->num_ports,
/* Default to an invalid value */
.mirr_port = priv->ds->num_ports,
/* Link-local traffic received on casc_port will be forwarded
* to host_port without embedding the source port and device ID
* info in the destination MAC address (presumably because it
* is a cascaded port and a downstream SJA switch already did
* that). Default to an invalid port (to disable the feature)
* and overwrite this if we find any DSA (cascaded) ports.
*/
.casc_port = priv->ds->num_ports,
/* No TTEthernet */
.vllupformat = SJA1105_VL_FORMAT_PSFP,
.vlmarker = 0,
......@@ -673,7 +665,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
*/
.tpid = ETH_P_SJA1105,
.tpid2 = ETH_P_SJA1105,
/* Enable the TTEthernet engine on SJA1110 */
.tte_en = true,
/* Set up the EtherType for control packets on SJA1110 */
.header_type = ETH_P_SJA1110,
};
struct sja1105_general_params_entry *general_params;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int port;
......@@ -699,12 +696,26 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
table->entry_count = table->ops->max_entry_count;
general_params = table->entries;
/* This table only has a single entry */
((struct sja1105_general_params_entry *)table->entries)[0] =
default_general_params;
general_params[0] = default_general_params;
sja1110_select_tdmaconfigidx(priv);
/* Link-local traffic received on casc_port will be forwarded
* to host_port without embedding the source port and device ID
* info in the destination MAC address, and no RX timestamps will be
* taken either (presumably because it is a cascaded port and a
* downstream SJA switch already did that).
* To disable the feature, we need to do different things depending on
* switch generation. On SJA1105 we need to set an invalid port, while
* on SJA1110 which support multiple cascaded ports, this field is a
* bitmask so it must be left zero.
*/
if (!priv->info->multiple_cascade_ports)
general_params->casc_port = ds->num_ports;
return 0;
}
......@@ -2165,7 +2176,9 @@ static enum dsa_tag_protocol
sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_SJA1105;
struct sja1105_private *priv = ds->priv;
return priv->info->tag_proto;
}
static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
......
......@@ -79,6 +79,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
priv->tagger_data.stampable_skb = NULL;
}
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
......@@ -397,7 +398,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
*shwt = (struct skb_shared_hwtstamps) {0};
ts = SJA1105_SKB_CB(skb)->meta_tstamp;
ts = SJA1105_SKB_CB(skb)->tstamp;
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
......@@ -413,9 +414,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
return -1;
}
/* Called from dsa_skb_defer_rx_timestamp */
bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
......@@ -431,6 +430,89 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return true;
}
bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ts = SJA1105_SKB_CB(skb)->tstamp;
*shwt = (struct skb_shared_hwtstamps) {0};
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
/* Don't defer */
return false;
}
/* Called from dsa_skb_defer_rx_timestamp */
bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct sja1105_private *priv = ds->priv;
return priv->info->rxtstamp(ds, port, skb);
}
void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
enum sja1110_meta_tstamp dir, u64 tstamp)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shwt = {0};
/* We don't care about RX timestamps on the CPU port */
if (dir == SJA1110_META_TSTAMP_RX)
return;
spin_lock(&ptp_data->skb_txtstamp_queue.lock);
skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
continue;
__skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
skb_match = skb;
break;
}
spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
if (WARN_ON(!skb_match))
return;
shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
skb_complete_tx_timestamp(skb_match, &shwt);
}
EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp);
/* In addition to cloning the skb which is done by the common
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
* packet to the TX timestamping queue.
*/
void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_port *sp = &priv->ports[port];
u8 ts_id;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
spin_lock(&sp->data->meta_lock);
ts_id = sp->data->ts_id;
/* Deal automatically with 8-bit wraparound */
sp->data->ts_id++;
SJA1105_SKB_CB(clone)->ts_id = ts_id;
spin_unlock(&sp->data->meta_lock);
skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
}
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
* the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit
* callback, where we will timestamp it synchronously.
......@@ -449,6 +531,9 @@ void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
return;
SJA1105_SKB_CB(skb)->clone = clone;
if (priv->info->txtstamp)
priv->info->txtstamp(ds, port, skb);
}
static int sja1105_ptp_reset(struct dsa_switch *ds)
......@@ -865,7 +950,10 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_per_out = 1,
};
/* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
/* Only used on SJA1110 */
skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
spin_lock_init(&tagger_data->meta_lock);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
......@@ -890,6 +978,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
......
......@@ -75,7 +75,12 @@ struct sja1105_ptp_cmd {
struct sja1105_ptp_data {
struct timer_list extts_timer;
/* Used only on SJA1105 to reconstruct partial timestamps */
struct sk_buff_head skb_rxtstamp_queue;
/* Used on SJA1110 where meta frames are generated only for
* 2-step TX timestamps
*/
struct sk_buff_head skb_txtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
......@@ -122,6 +127,10 @@ int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
sja1105_spi_rw_mode_t rw);
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
#else
struct sja1105_ptp_cmd;
......@@ -184,6 +193,10 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds,
#define sja1105_hwtstamp_set NULL
#define sja1105_rxtstamp NULL
#define sja1110_rxtstamp NULL
#define sja1110_txtstamp NULL
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
......@@ -569,6 +569,7 @@ const struct sja1105_info sja1105e_info = {
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
......@@ -579,6 +580,7 @@ const struct sja1105_info sja1105e_info = {
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
.port_speed = {
......@@ -600,6 +602,7 @@ const struct sja1105_info sja1105t_info = {
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
......@@ -610,6 +613,7 @@ const struct sja1105_info sja1105t_info = {
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
.port_speed = {
......@@ -631,6 +635,7 @@ const struct sja1105_info sja1105p_info = {
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
......@@ -642,6 +647,7 @@ const struct sja1105_info sja1105p_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
.port_speed = {
......@@ -663,6 +669,7 @@ const struct sja1105_info sja1105q_info = {
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
......@@ -674,6 +681,7 @@ const struct sja1105_info sja1105q_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
.port_speed = {
......@@ -695,6 +703,7 @@ const struct sja1105_info sja1105r_info = {
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
......@@ -706,6 +715,7 @@ const struct sja1105_info sja1105r_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
.port_speed = {
......@@ -729,6 +739,7 @@ const struct sja1105_info sja1105s_info = {
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
......@@ -740,6 +751,7 @@ const struct sja1105_info sja1105s_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
......@@ -762,7 +774,9 @@ const struct sja1105_info sja1110a_info = {
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
......@@ -773,6 +787,8 @@ const struct sja1105_info sja1110a_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.clocking_setup = sja1110_clocking_setup,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
......@@ -807,7 +823,9 @@ const struct sja1105_info sja1110b_info = {
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
......@@ -818,6 +836,8 @@ const struct sja1105_info sja1110b_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.clocking_setup = sja1110_clocking_setup,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
......@@ -852,7 +872,9 @@ const struct sja1105_info sja1110c_info = {
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
......@@ -863,6 +885,8 @@ const struct sja1105_info sja1110c_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.clocking_setup = sja1110_clocking_setup,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
......@@ -897,7 +921,9 @@ const struct sja1105_info sja1110d_info = {
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.qinq_tpid = ETH_P_8021AD,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
......@@ -908,6 +934,8 @@ const struct sja1105_info sja1110d_info = {
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.clocking_setup = sja1110_clocking_setup,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
......
......@@ -212,6 +212,7 @@ size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
sja1105_packing(buf, &entry->egrmirrdei, 110, 110, size, op);
sja1105_packing(buf, &entry->replay_port, 109, 106, size, op);
sja1105_packing(buf, &entry->tdmaconfigidx, 70, 67, size, op);
sja1105_packing(buf, &entry->header_type, 64, 49, size, op);
sja1105_packing(buf, &entry->tte_en, 16, 16, size, op);
return size;
}
......
......@@ -217,6 +217,7 @@ struct sja1105_general_params_entry {
/* SJA1110 only */
u64 tte_en;
u64 tdmaconfigidx;
u64 header_type;
};
struct sja1105_schedule_entry_points_entry {
......
......@@ -37,8 +37,6 @@ struct dsa_8021q_context {
#define DSA_8021Q_N_SUBVLAN 8
#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q)
int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
......@@ -52,6 +50,9 @@ int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
int *subvlan);
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
......@@ -70,78 +71,4 @@ bool vid_is_dsa_8021q_txvlan(u16 vid);
bool vid_is_dsa_8021q(u16 vid);
#else
int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
{
return 0;
}
int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
struct dsa_8021q_context *other_ctx,
int other_port)
{
return 0;
}
int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
struct dsa_8021q_context *other_ctx,
int other_port)
{
return 0;
}
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci)
{
return NULL;
}
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
{
return 0;
}
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
{
return 0;
}
u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan)
{
return 0;
}
int dsa_8021q_rx_switch_id(u16 vid)
{
return 0;
}
int dsa_8021q_rx_source_port(u16 vid)
{
return 0;
}
u16 dsa_8021q_rx_subvlan(u16 vid)
{
return 0;
}
bool vid_is_dsa_8021q_rxvlan(u16 vid)
{
return false;
}
bool vid_is_dsa_8021q_txvlan(u16 vid)
{
return false;
}
bool vid_is_dsa_8021q(u16 vid)
{
return false;
}
#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
#endif /* _NET_DSA_8021Q_H */
......@@ -14,6 +14,7 @@
#define ETH_P_SJA1105 ETH_P_DSA_8021Q
#define ETH_P_SJA1105_META 0x0008
#define ETH_P_SJA1110 0xdadc
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
......@@ -44,11 +45,14 @@ struct sja1105_tagger_data {
*/
spinlock_t meta_lock;
unsigned long state;
u8 ts_id;
};
struct sja1105_skb_cb {
struct sk_buff *clone;
u32 meta_tstamp;
u64 tstamp;
/* Only valid for packets cloned for 2-step TX timestamping */
u8 ts_id;
};
#define SJA1105_SKB_CB(skb) \
......@@ -65,4 +69,24 @@ struct sja1105_port {
u16 xmit_tpid;
};
enum sja1110_meta_tstamp {
SJA1110_META_TSTAMP_TX = 0,
SJA1110_META_TSTAMP_RX = 1,
};
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
enum sja1110_meta_tstamp dir, u64 tstamp);
#else
static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
u8 ts_id, enum sja1110_meta_tstamp dir,
u64 tstamp)
{
}
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _NET_DSA_SJA1105_H */
......@@ -50,6 +50,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
#define DSA_TAG_PROTO_SEVILLE_VALUE 21
#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
#define DSA_TAG_PROTO_SJA1110_VALUE 23
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
......@@ -75,6 +76,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
};
struct packet_type;
......@@ -91,7 +93,8 @@ struct dsa_device_ops {
* as regular on the master net device.
*/
bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
unsigned int overhead;
unsigned int needed_headroom;
unsigned int needed_tailroom;
const char *name;
enum dsa_tag_protocol proto;
/* Some tagging protocols either mangle or shift the destination MAC
......@@ -100,7 +103,6 @@ struct dsa_device_ops {
* its RX filter.
*/
bool promisc_on_master;
bool tail_tag;
};
/* This structure defines the control interfaces that are overlayed by the
......@@ -926,7 +928,7 @@ static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
{
#if IS_ENABLED(CONFIG_NET_DSA)
const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
int tag_len = ops->overhead;
int tag_len = ops->needed_headroom;
*offset = tag_len;
*proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
......
......@@ -944,7 +944,7 @@ bool __skb_flow_dissect(const struct net *net,
ops = skb->dev->dsa_ptr->tag_ops;
/* Tail taggers don't break flow dissection */
if (!ops->tail_tag) {
if (!ops->needed_headroom) {
if (ops->flow_dissect)
ops->flow_dissect(skb, &proto, &offset);
else
......
......@@ -154,6 +154,11 @@ const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
{
return ops->needed_headroom + ops->needed_tailroom;
}
/* master.c */
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
void dsa_master_teardown(struct net_device *dev);
......
......@@ -346,10 +346,12 @@ static struct lock_class_key dsa_master_addr_list_lock_key;
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
struct dsa_switch *ds = cpu_dp->ds;
struct device_link *consumer_link;
int ret;
int mtu, ret;
mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
/* The DSA master must use SET_NETDEV_DEV for this to work. */
consumer_link = device_link_add(ds->dev, dev->dev.parent,
......
......@@ -1569,7 +1569,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
old_master_mtu = master->mtu;
new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
if (new_master_mtu > mtu_limit)
return -ERANGE;
......@@ -1605,7 +1605,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
out_port_failed:
if (new_master_mtu != old_master_mtu)
dsa_port_mtu_change(cpu_dp, old_master_mtu -
cpu_dp->tag_ops->overhead,
dsa_tag_protocol_overhead(cpu_dp->tag_ops),
true);
out_cpu_failed:
if (new_master_mtu != old_master_mtu)
......@@ -1824,10 +1824,8 @@ void dsa_slave_setup_tagger(struct net_device *slave)
const struct dsa_port *cpu_dp = dp->cpu_dp;
struct net_device *master = cpu_dp->master;
if (cpu_dp->tag_ops->tail_tag)
slave->needed_tailroom = cpu_dp->tag_ops->overhead;
else
slave->needed_headroom = cpu_dp->tag_ops->overhead;
slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
/* Try to save one extra realloc later in the TX path (in the master)
* by also inheriting the master's needed headroom and tailroom.
* The 8021q driver also does this.
......
......@@ -471,4 +471,27 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
int *subvlan)
{
u16 vid, tci;
skb_push_rcsum(skb, ETH_HLEN);
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
__skb_vlan_pop(skb, &tci);
}
skb_pull_rcsum(skb, ETH_HLEN);
vid = tci & VLAN_VID_MASK;
*source_port = dsa_8021q_rx_source_port(vid);
*switch_id = dsa_8021q_rx_switch_id(vid);
*subvlan = dsa_8021q_rx_subvlan(vid);
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
MODULE_LICENSE("GPL v2");
......@@ -85,7 +85,7 @@ static const struct dsa_device_ops ar9331_netdev_ops = {
.proto = DSA_TAG_PROTO_AR9331,
.xmit = ar9331_tag_xmit,
.rcv = ar9331_tag_rcv,
.overhead = AR9331_HDR_LEN,
.needed_headroom = AR9331_HDR_LEN,
};
MODULE_LICENSE("GPL v2");
......
......@@ -205,7 +205,7 @@ static const struct dsa_device_ops brcm_netdev_ops = {
.proto = DSA_TAG_PROTO_BRCM,
.xmit = brcm_tag_xmit,
.rcv = brcm_tag_rcv,
.overhead = BRCM_TAG_LEN,
.needed_headroom = BRCM_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_netdev_ops);
......@@ -286,7 +286,7 @@ static const struct dsa_device_ops brcm_legacy_netdev_ops = {
.proto = DSA_TAG_PROTO_BRCM_LEGACY,
.xmit = brcm_leg_tag_xmit,
.rcv = brcm_leg_tag_rcv,
.overhead = BRCM_LEG_TAG_LEN,
.needed_headroom = BRCM_LEG_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
......@@ -314,7 +314,7 @@ static const struct dsa_device_ops brcm_prepend_netdev_ops = {
.proto = DSA_TAG_PROTO_BRCM_PREPEND,
.xmit = brcm_tag_xmit_prepend,
.rcv = brcm_tag_rcv_prepend,
.overhead = BRCM_TAG_LEN,
.needed_headroom = BRCM_TAG_LEN,
};
DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
......
......@@ -303,7 +303,7 @@ static const struct dsa_device_ops dsa_netdev_ops = {
.proto = DSA_TAG_PROTO_DSA,
.xmit = dsa_xmit,
.rcv = dsa_rcv,
.overhead = DSA_HLEN,
.needed_headroom = DSA_HLEN,
};
DSA_TAG_DRIVER(dsa_netdev_ops);
......@@ -346,7 +346,7 @@ static const struct dsa_device_ops edsa_netdev_ops = {
.proto = DSA_TAG_PROTO_EDSA,
.xmit = edsa_xmit,
.rcv = edsa_rcv,
.overhead = EDSA_HLEN,
.needed_headroom = EDSA_HLEN,
};
DSA_TAG_DRIVER(edsa_netdev_ops);
......
......@@ -103,7 +103,7 @@ static const struct dsa_device_ops gswip_netdev_ops = {
.proto = DSA_TAG_PROTO_GSWIP,
.xmit = gswip_tag_xmit,
.rcv = gswip_tag_rcv,
.overhead = GSWIP_RX_HEADER_LEN,
.needed_headroom = GSWIP_RX_HEADER_LEN,
};
MODULE_LICENSE("GPL");
......
......@@ -54,8 +54,7 @@ static const struct dsa_device_ops hellcreek_netdev_ops = {
.proto = DSA_TAG_PROTO_HELLCREEK,
.xmit = hellcreek_xmit,
.rcv = hellcreek_rcv,
.overhead = HELLCREEK_TAG_LEN,
.tail_tag = true,
.needed_tailroom = HELLCREEK_TAG_LEN,
};
MODULE_LICENSE("Dual MIT/GPL");
......
......@@ -77,8 +77,7 @@ static const struct dsa_device_ops ksz8795_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ8795,
.xmit = ksz8795_xmit,
.rcv = ksz8795_rcv,
.overhead = KSZ_INGRESS_TAG_LEN,
.tail_tag = true,
.needed_tailroom = KSZ_INGRESS_TAG_LEN,
};
DSA_TAG_DRIVER(ksz8795_netdev_ops);
......@@ -149,8 +148,7 @@ static const struct dsa_device_ops ksz9477_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9477,
.xmit = ksz9477_xmit,
.rcv = ksz9477_rcv,
.overhead = KSZ9477_INGRESS_TAG_LEN,
.tail_tag = true,
.needed_tailroom = KSZ9477_INGRESS_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9477_netdev_ops);
......@@ -183,8 +181,7 @@ static const struct dsa_device_ops ksz9893_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9893,
.xmit = ksz9893_xmit,
.rcv = ksz9477_rcv,
.overhead = KSZ_INGRESS_TAG_LEN,
.tail_tag = true,
.needed_tailroom = KSZ_INGRESS_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9893_netdev_ops);
......
......@@ -125,7 +125,7 @@ static const struct dsa_device_ops lan9303_netdev_ops = {
.proto = DSA_TAG_PROTO_LAN9303,
.xmit = lan9303_xmit,
.rcv = lan9303_rcv,
.overhead = LAN9303_TAG_LEN,
.needed_headroom = LAN9303_TAG_LEN,
};
MODULE_LICENSE("GPL");
......
......@@ -102,7 +102,7 @@ static const struct dsa_device_ops mtk_netdev_ops = {
.proto = DSA_TAG_PROTO_MTK,
.xmit = mtk_tag_xmit,
.rcv = mtk_tag_rcv,
.overhead = MTK_HDR_LEN,
.needed_headroom = MTK_HDR_LEN,
};
MODULE_LICENSE("GPL");
......
......@@ -143,7 +143,7 @@ static const struct dsa_device_ops ocelot_netdev_ops = {
.proto = DSA_TAG_PROTO_OCELOT,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
.overhead = OCELOT_TOTAL_TAG_LEN,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
.promisc_on_master = true,
};
......@@ -155,7 +155,7 @@ static const struct dsa_device_ops seville_netdev_ops = {
.proto = DSA_TAG_PROTO_SEVILLE,
.xmit = seville_xmit,
.rcv = ocelot_rcv,
.overhead = OCELOT_TOTAL_TAG_LEN,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
.promisc_on_master = true,
};
......
......@@ -41,29 +41,15 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
int src_port, switch_id, qos_class;
u16 vid, tci;
int src_port, switch_id, subvlan;
skb_push_rcsum(skb, ETH_HLEN);
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
__skb_vlan_pop(skb, &tci);
}
skb_pull_rcsum(skb, ETH_HLEN);
vid = tci & VLAN_VID_MASK;
src_port = dsa_8021q_rx_source_port(vid);
switch_id = dsa_8021q_rx_switch_id(vid);
qos_class = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
dsa_8021q_rcv(skb, &src_port, &switch_id, &subvlan);
skb->dev = dsa_master_find_slave(netdev, switch_id, src_port);
if (!skb->dev)
return NULL;
skb->offload_fwd_mark = 1;
skb->priority = qos_class;
return skb;
}
......@@ -73,7 +59,7 @@ static const struct dsa_device_ops ocelot_8021q_netdev_ops = {
.proto = DSA_TAG_PROTO_OCELOT_8021Q,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
.overhead = VLAN_HLEN,
.needed_headroom = VLAN_HLEN,
.promisc_on_master = true,
};
......
......@@ -91,7 +91,7 @@ static const struct dsa_device_ops qca_netdev_ops = {
.proto = DSA_TAG_PROTO_QCA,
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
.overhead = QCA_HDR_LEN,
.needed_headroom = QCA_HDR_LEN,
};
MODULE_LICENSE("GPL");
......
......@@ -124,7 +124,7 @@ static const struct dsa_device_ops rtl4a_netdev_ops = {
.proto = DSA_TAG_PROTO_RTL4_A,
.xmit = rtl4a_tag_xmit,
.rcv = rtl4a_tag_rcv,
.overhead = RTL4_A_HDR_LEN,
.needed_headroom = RTL4_A_HDR_LEN,
};
module_dsa_tag_driver(rtl4a_netdev_ops);
......
......@@ -7,6 +7,52 @@
#include <linux/packing.h>
#include "dsa_priv.h"
/* Is this a TX or an RX header? */
#define SJA1110_HEADER_HOST_TO_SWITCH BIT(15)
/* RX header */
#define SJA1110_RX_HEADER_IS_METADATA BIT(14)
#define SJA1110_RX_HEADER_HOST_ONLY BIT(13)
#define SJA1110_RX_HEADER_HAS_TRAILER BIT(12)
/* Trap-to-host format (no trailer present) */
#define SJA1110_RX_HEADER_SRC_PORT(x) (((x) & GENMASK(7, 4)) >> 4)
#define SJA1110_RX_HEADER_SWITCH_ID(x) ((x) & GENMASK(3, 0))
/* Timestamp format (trailer present) */
#define SJA1110_RX_HEADER_TRAILER_POS(x) ((x) & GENMASK(11, 0))
#define SJA1110_RX_TRAILER_SWITCH_ID(x) (((x) & GENMASK(7, 4)) >> 4)
#define SJA1110_RX_TRAILER_SRC_PORT(x) ((x) & GENMASK(3, 0))
/* Meta frame format (for 2-step TX timestamps) */
#define SJA1110_RX_HEADER_N_TS(x) (((x) & GENMASK(8, 4)) >> 4)
/* TX header */
#define SJA1110_TX_HEADER_UPDATE_TC BIT(14)
#define SJA1110_TX_HEADER_TAKE_TS BIT(13)
#define SJA1110_TX_HEADER_TAKE_TS_CASC BIT(12)
#define SJA1110_TX_HEADER_HAS_TRAILER BIT(11)
/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
#define SJA1110_TX_HEADER_PRIO(x) (((x) << 7) & GENMASK(10, 7))
#define SJA1110_TX_HEADER_TSTAMP_ID(x) ((x) & GENMASK(7, 0))
/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
#define SJA1110_TX_HEADER_TRAILER_POS(x) ((x) & GENMASK(10, 0))
#define SJA1110_TX_TRAILER_TSTAMP_ID(x) (((x) << 24) & GENMASK(31, 24))
#define SJA1110_TX_TRAILER_PRIO(x) (((x) << 21) & GENMASK(23, 21))
#define SJA1110_TX_TRAILER_SWITCHID(x) (((x) << 12) & GENMASK(15, 12))
#define SJA1110_TX_TRAILER_DESTPORTS(x) (((x) << 1) & GENMASK(11, 1))
#define SJA1110_META_TSTAMP_SIZE 10
#define SJA1110_HEADER_LEN 4
#define SJA1110_RX_TRAILER_LEN 13
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
{
......@@ -140,6 +186,57 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
}
static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct dsa_port *dp = dsa_slave_to_port(netdev);
u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
struct ethhdr *eth_hdr;
__be32 *tx_trailer;
__be16 *tx_header;
int trailer_pos;
/* Transmitting control packets is done using in-band control
* extensions, while data packets are transmitted using
* tag_8021q TX VLANs.
*/
if (likely(!sja1105_is_link_local(skb)))
return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp->priv),
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
skb_push(skb, SJA1110_HEADER_LEN);
/* Move Ethernet header to the left, making space for DSA tag */
memmove(skb->data, skb->data + SJA1110_HEADER_LEN, 2 * ETH_ALEN);
trailer_pos = skb->len;
/* On TX, skb->data points to skb_mac_header(skb) */
eth_hdr = (struct ethhdr *)skb->data;
tx_header = (__be16 *)(eth_hdr + 1);
tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
eth_hdr->h_proto = htons(ETH_P_SJA1110);
*tx_header = htons(SJA1110_HEADER_HOST_TO_SWITCH |
SJA1110_TX_HEADER_HAS_TRAILER |
SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
*tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
if (clone) {
u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
*tx_header |= htons(SJA1110_TX_HEADER_TAKE_TS);
*tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
}
return skb;
}
static void sja1105_transfer_meta(struct sk_buff *skb,
const struct sja1105_meta *meta)
{
......@@ -147,7 +244,7 @@ static void sja1105_transfer_meta(struct sk_buff *skb,
hdr->h_dest[3] = meta->dmac_byte_3;
hdr->h_dest[4] = meta->dmac_byte_4;
SJA1105_SKB_CB(skb)->meta_tstamp = meta->tstamp;
SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
}
/* This is a simple state machine which follows the hardware mechanism of
......@@ -275,46 +372,38 @@ static void sja1105_decode_subvlan(struct sk_buff *skb, u16 subvlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
}
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
{
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
skb_vlan_tag_present(skb);
}
static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
{
return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
}
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
int source_port, switch_id, subvlan = 0;
struct sja1105_meta meta = {0};
int source_port, switch_id;
struct ethhdr *hdr;
u16 tpid, vid, tci;
bool is_link_local;
u16 subvlan = 0;
bool is_tagged;
bool is_meta;
hdr = eth_hdr(skb);
tpid = ntohs(hdr->h_proto);
is_tagged = (tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
skb_vlan_tag_present(skb));
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
skb->offload_fwd_mark = 1;
if (is_tagged) {
if (sja1105_skb_has_tag_8021q(skb)) {
/* Normal traffic path. */
skb_push_rcsum(skb, ETH_HLEN);
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
__skb_vlan_pop(skb, &tci);
}
skb_pull_rcsum(skb, ETH_HLEN);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
vid = tci & VLAN_VID_MASK;
source_port = dsa_8021q_rx_source_port(vid);
switch_id = dsa_8021q_rx_switch_id(vid);
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
subvlan = dsa_8021q_rx_subvlan(vid);
dsa_8021q_rcv(skb, &source_port, &switch_id, &subvlan);
} else if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
......@@ -346,6 +435,138 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_meta);
}
static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
{
int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
struct net_device *master = skb->dev;
struct dsa_port *cpu_dp;
u8 *buf = skb->data + 2;
struct dsa_switch *ds;
int i;
cpu_dp = master->dsa_ptr;
ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
if (!ds) {
net_err_ratelimited("%s: cannot find switch id %d\n",
master->name, switch_id);
return NULL;
}
for (i = 0; i <= n_ts; i++) {
u8 ts_id, source_port, dir;
u64 tstamp;
ts_id = buf[0];
source_port = (buf[1] & GENMASK(7, 4)) >> 4;
dir = (buf[1] & BIT(3)) >> 3;
tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
sja1110_process_meta_tstamp(ds, source_port, ts_id, dir,
tstamp);
buf += SJA1110_META_TSTAMP_SIZE;
}
/* Discard the meta frame, we've consumed the timestamps it contained */
return NULL;
}
static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
int *source_port,
int *switch_id)
{
u16 rx_header;
if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
return NULL;
/* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
* what we need because the caller has checked the EtherType (which is
* located 2 bytes back) and we just need a pointer to the header that
* comes afterwards.
*/
rx_header = ntohs(*(__be16 *)skb->data);
if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
return sja1110_rcv_meta(skb, rx_header);
/* Timestamp frame, we have a trailer */
if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
u8 last_byte = rx_trailer[12];
/* The timestamp is unaligned, so we need to use packing()
* to get it
*/
packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
*source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
*switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
/* skb->len counts from skb->data, while start_of_padding
* counts from the destination MAC address. Right now skb->data
* is still as set by the DSA master, so to trim away the
* padding and trailer we need to account for the fact that
* skb->data points to skb_mac_header(skb) + ETH_HLEN.
*/
pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN);
/* Trap-to-host frame, no timestamp trailer */
} else {
*source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
*switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
}
/* Advance skb->data past the DSA header */
skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
/* Remove the DSA header */
memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - SJA1110_HEADER_LEN,
2 * ETH_ALEN);
/* With skb->data in its final place, update the MAC header
* so that eth_hdr() continues to works properly.
*/
skb_set_mac_header(skb, -ETH_HLEN);
return skb;
}
static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
int source_port = -1, switch_id = -1, subvlan = 0;
skb->offload_fwd_mark = 1;
if (sja1110_skb_has_inband_control_extension(skb)) {
skb = sja1110_rcv_inband_control_extension(skb, &source_port,
&switch_id);
if (!skb)
return NULL;
}
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
dsa_8021q_rcv(skb, &source_port, &switch_id, &subvlan);
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
if (!skb->dev) {
netdev_warn(netdev,
"Couldn't decode source port %d and switch id %d\n",
source_port, switch_id);
return NULL;
}
if (subvlan)
sja1105_decode_subvlan(skb, subvlan);
return skb;
}
static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
int *offset)
{
......@@ -356,18 +577,53 @@ static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
dsa_tag_generic_flow_dissect(skb, proto, offset);
}
static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
int *offset)
{
/* Management frames have 2 DSA tags on RX, so the needed_headroom we
* declared is fine for the generic dissector adjustment procedure.
*/
if (unlikely(sja1105_is_link_local(skb)))
return dsa_tag_generic_flow_dissect(skb, proto, offset);
/* For the rest, there is a single DSA tag, the tag_8021q one */
*offset = VLAN_HLEN;
*proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
}
static const struct dsa_device_ops sja1105_netdev_ops = {
.name = "sja1105",
.proto = DSA_TAG_PROTO_SJA1105,
.xmit = sja1105_xmit,
.rcv = sja1105_rcv,
.filter = sja1105_filter,
.overhead = VLAN_HLEN,
.needed_headroom = VLAN_HLEN,
.flow_dissect = sja1105_flow_dissect,
.promisc_on_master = true,
};
MODULE_LICENSE("GPL v2");
DSA_TAG_DRIVER(sja1105_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105);
module_dsa_tag_driver(sja1105_netdev_ops);
static const struct dsa_device_ops sja1110_netdev_ops = {
.name = "sja1110",
.proto = DSA_TAG_PROTO_SJA1110,
.xmit = sja1110_xmit,
.rcv = sja1110_rcv,
.filter = sja1105_filter,
.flow_dissect = sja1110_flow_dissect,
.needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
.needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
};
DSA_TAG_DRIVER(sja1110_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110);
static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
&DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
&DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
};
module_dsa_tag_drivers(sja1105_tag_driver_array);
MODULE_LICENSE("GPL v2");
......@@ -55,8 +55,7 @@ static const struct dsa_device_ops trailer_netdev_ops = {
.proto = DSA_TAG_PROTO_TRAILER,
.xmit = trailer_xmit,
.rcv = trailer_rcv,
.overhead = 4,
.tail_tag = true,
.needed_tailroom = 4,
};
MODULE_LICENSE("GPL");
......
......@@ -56,8 +56,7 @@ static const struct dsa_device_ops xrs700x_netdev_ops = {
.proto = DSA_TAG_PROTO_XRS700X,
.xmit = xrs700x_xmit,
.rcv = xrs700x_rcv,
.overhead = 1,
.tail_tag = true,
.needed_tailroom = 1,
};
MODULE_LICENSE("GPL");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment