Commit 0b6b0d31 authored by David S. Miller's avatar David S. Miller

Merge branch 'qca8k-mdio'

Ansuel Smith says:

====================
Add support for qca8k mdio rw in Ethernet packet

The main reason for this is that we notice some routing problem in the
switch and it seems assisted learning is needed. Considering mdio is
quite slow due to the indirect write using this Ethernet alternative way
seems to be quicker.

The qca8k switch supports a special way to pass mdio read/write request
using specially crafted Ethernet packet.
This works by putting some defined data in the Ethernet header where the
mac source and dst should be placed. The Ethernet type header is set to qca
header and is set to a mdio read/write type.
This is used to communicate to the switch that this is a special packet
and should be parsed differently.

Currently we use Ethernet packet for
- MIB counter
- mdio read/write configuration
- phy read/write for each port

Current implementation of this use completion API to wait for the packet
to be processed by the tagger and has a timeout that fallback to the
legacy mdio way and mutex to enforce one transaction at time.

We now have connect()/disconnect() ops for the tagger. They are used to
allocate priv data in the dsa priv. The header still has to be put in
global include to make it usable by a dsa driver.
They are called when the tag is connect to the dst and the data is freed
using discconect on tagger change.

(if someone wonder why the bind function is put at in the general setup
function it's because tag is set in the cpu port where the notifier is
still not available and we require the notifier to sen the
tag_proto_connect() event.

We now have a tag_proto_connect() for the dsa driver used to put
additional data in the tagger priv (that is actually the dsa priv).
This is called using a switch event DSA_NOTIFIER_TAG_PROTO_CONNECT.
Current use for this is adding handler for the Ethernet packet to keep
the tagger code as dumb as possible.

The tagger priv implement only the handler for the special packet. All the
other stuff is placed in the qca8k_priv and the tagger has to access
it under lock.

We use the new API from Vladimir to track if the master port is
operational or not. We had to track many thing to reach a usable state.
Checking if the port is UP is not enough and tracking a NETDEV_CHANGE is
also not enough since it use also for other task. The correct way was
both track for interface UP and if a qdisc was assigned to the
interface. That tells us the port (and the tagger indirectly) is ready
to accept and process packet.

I tested this with multicpu port and with port6 set as the unique port and
it's sad.
It seems they implemented this feature in a bad way and this is only
supported with cpu port0. When cpu port6 is the unique port, the switch
doesn't send ack packet. With multicpu port, packet ack are not duplicated
and only cpu port0 sends them. This is the same for the MIB counter.
For this reason this feature is enabled only when cpu port0 is enabled and
operational.

v8:
- Reworked to rolling counter for the seq_num
- Reworked the hi/lo cache patch
- Fix multiple missing skb free and mutex lock errors
- Fix some spelling mistake
- Add macro build check for mgmt packet size
- Change some struct naming to make them more descriptive
v7:
- Rebase on net-next changes
- Add bulk patches to speedup this even more
v6:
- Fix some error in ethtool handler caused by rebase/cleanup
v5:
- Adapt to new API fixes
- Fix a wrong logic for noop
- Add additional lock for master_state change
- Limit mdio Ethernet to cpu port0 (switch limitation)
- Add priority to these special packet
- Move mdio cache to qca8k_priv
v4:
- Remove duplicate patch sent by mistake.
v3:
- Include MIB with Ethernet packet.
- Include phy read/write with Ethernet packet.
- Reorganize code with new API.
- Introuce master tracking by Vladimir
v2:
- Address all suggestion from Vladimir.
  Try to generilize this with connect/disconnect function from the
  tagger and tag_proto_connect for the driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 000fe940 4f3701fc
......@@ -20,6 +20,7 @@
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
#include <linux/dsa/tag_qca.h>
#include "qca8k.h"
......@@ -74,12 +75,6 @@ static const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0xac, "TXUnicast"),
};
/* The 32bit switch registers are accessed indirectly. To achieve this we need
* to set the page of the register. Track the last page that was set to reduce
* mdio writes
*/
static u16 qca8k_current_page = 0xffff;
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
......@@ -93,6 +88,44 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
*page = regaddr & 0x3ff;
}
static int
qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
{
u16 *cached_lo = &priv->mdio_cache.lo;
struct mii_bus *bus = priv->bus;
int ret;
if (lo == *cached_lo)
return 0;
ret = bus->write(bus, phy_id, regnum, lo);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit lo register\n");
*cached_lo = lo;
return 0;
}
static int
qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
{
u16 *cached_hi = &priv->mdio_cache.hi;
struct mii_bus *bus = priv->bus;
int ret;
if (hi == *cached_hi)
return 0;
ret = bus->write(bus, phy_id, regnum, hi);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit hi register\n");
*cached_hi = hi;
return 0;
}
static int
qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
......@@ -116,7 +149,7 @@ qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
}
static void
qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
{
u16 lo, hi;
int ret;
......@@ -124,20 +157,19 @@ qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
lo = val & 0xffff;
hi = (u16)(val >> 16);
ret = bus->write(bus, phy_id, regnum, lo);
ret = qca8k_set_lo(priv, phy_id, regnum, lo);
if (ret >= 0)
ret = bus->write(bus, phy_id, regnum + 1, hi);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit register\n");
ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
}
static int
qca8k_set_page(struct mii_bus *bus, u16 page)
qca8k_set_page(struct qca8k_priv *priv, u16 page)
{
u16 *cached_page = &priv->mdio_cache.page;
struct mii_bus *bus = priv->bus;
int ret;
if (page == qca8k_current_page)
if (page == *cached_page)
return 0;
ret = bus->write(bus, 0x18, 0, page);
......@@ -147,7 +179,7 @@ qca8k_set_page(struct mii_bus *bus, u16 page)
return ret;
}
qca8k_current_page = page;
*cached_page = page;
usleep_range(1000, 2000);
return 0;
}
......@@ -170,6 +202,252 @@ qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
return regmap_update_bits(priv->regmap, reg, mask, write_val);
}
static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data;
struct qca8k_priv *priv = ds->priv;
struct qca_mgmt_ethhdr *mgmt_ethhdr;
u8 len, cmd;
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
mgmt_eth_data = &priv->mgmt_eth_data;
cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
/* Make sure the seq match the requested packet */
if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
mgmt_eth_data->ack = true;
if (cmd == MDIO_READ) {
mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
/* Get the rest of the 12 byte of data.
* The read/write function will extract the requested data.
*/
if (len > QCA_HDR_MGMT_DATA1_LEN)
memcpy(mgmt_eth_data->data + 1, skb->data,
QCA_HDR_MGMT_DATA2_LEN);
}
complete(&mgmt_eth_data->rw_done);
}
static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
int priority, unsigned int len)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
unsigned int real_len;
struct sk_buff *skb;
u32 *data2;
u16 hdr;
skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
if (!skb)
return NULL;
/* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
* Actually for some reason the steps are:
* 0: nothing
* 1-4: first 4 byte
* 5-6: first 12 byte
* 7-15: all 16 byte
*/
if (len == 16)
real_len = 15;
else
real_len = len;
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
hdr |= QCA_HDR_XMIT_FROM_CPU;
hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
QCA_HDR_MGMT_CHECK_CODE_VAL);
if (cmd == MDIO_WRITE)
mgmt_ethhdr->mdio_data = *val;
mgmt_ethhdr->hdr = htons(hdr);
data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
return skb;
}
static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
}
static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
struct sk_buff *skb;
bool ack;
int ret;
skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
QCA8K_ETHERNET_MDIO_PRIORITY, len);
if (!skb)
return -ENOMEM;
mutex_lock(&mgmt_eth_data->mutex);
/* Check mgmt_master if is operational */
if (!priv->mgmt_master) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
skb->dev = priv->mgmt_master;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
ack = mgmt_eth_data->ack;
mutex_unlock(&mgmt_eth_data->mutex);
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
return 0;
}
static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
struct sk_buff *skb;
bool ack;
int ret;
skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
QCA8K_ETHERNET_MDIO_PRIORITY, len);
if (!skb)
return -ENOMEM;
mutex_lock(&mgmt_eth_data->mutex);
/* Check mgmt_master if is operational */
if (!priv->mgmt_master) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
skb->dev = priv->mgmt_master;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
ack = mgmt_eth_data->ack;
mutex_unlock(&mgmt_eth_data->mutex);
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
return 0;
}
static int
qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
u32 val = 0;
int ret;
ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
if (ret)
return ret;
val &= ~mask;
val |= write_val;
return qca8k_write_eth(priv, reg, &val, sizeof(val));
}
static int
qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
int i, count = len / sizeof(u32), ret;
if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
return 0;
for (i = 0; i < count; i++) {
ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
if (ret < 0)
return ret;
}
return 0;
}
static int
qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
int i, count = len / sizeof(u32), ret;
u32 tmp;
if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
return 0;
for (i = 0; i < count; i++) {
tmp = val[i];
ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
if (ret < 0)
return ret;
}
return 0;
}
static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
......@@ -178,11 +456,14 @@ qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
u16 r1, r2, page;
int ret;
if (!qca8k_read_eth(priv, reg, val, sizeof(val)))
return 0;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(bus, page);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
......@@ -201,15 +482,18 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
u16 r1, r2, page;
int ret;
if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
return 0;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(bus, page);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
qca8k_mii_write32(priv, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
......@@ -225,11 +509,14 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
u32 val;
int ret;
if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
return 0;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(bus, page);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
......@@ -239,7 +526,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
val &= ~mask;
val |= write_val;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
qca8k_mii_write32(priv, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
......@@ -296,17 +583,13 @@ qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
static int
qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
u32 reg[4], val;
int i, ret;
u32 reg[3];
int ret;
/* load the ARL table into an array */
for (i = 0; i < 4; i++) {
ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
if (ret < 0)
return ret;
reg[i] = val;
}
ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
if (ret)
return ret;
/* vid - 83:72 */
fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
......@@ -330,7 +613,6 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
u8 aging)
{
u32 reg[3] = { 0 };
int i;
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
......@@ -347,8 +629,7 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
for (i = 0; i < 3; i++)
qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
}
static int
......@@ -632,7 +913,10 @@ qca8k_mib_init(struct qca8k_priv *priv)
int ret;
mutex_lock(&priv->reg_mutex);
ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
QCA8K_MIB_BUSY);
if (ret)
goto exit;
......@@ -666,6 +950,199 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
}
static int
qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
struct sk_buff *read_skb, u32 *val)
{
struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
bool ack;
int ret;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the copy pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
*val = mgmt_eth_data->data[0];
return 0;
}
static int
qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
int regnum, u16 data)
{
struct sk_buff *write_skb, *clear_skb, *read_skb;
struct qca8k_mgmt_eth_data *mgmt_eth_data;
u32 write_val, clear_val = 0, val;
struct net_device *mgmt_master;
int ret, ret1;
bool ack;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
mgmt_eth_data = &priv->mgmt_eth_data;
write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
if (read) {
write_val |= QCA8K_MDIO_MASTER_READ;
} else {
write_val |= QCA8K_MDIO_MASTER_WRITE;
write_val |= QCA8K_MDIO_MASTER_DATA(data);
}
/* Prealloc all the needed skb before the lock */
write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
if (!write_skb)
return -ENOMEM;
clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
if (!write_skb) {
ret = -ENOMEM;
goto err_clear_skb;
}
read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
if (!read_skb) {
ret = -ENOMEM;
goto err_read_skb;
}
/* Actually start the request:
* 1. Send mdio master packet
* 2. Busy Wait for mdio master command
* 3. Get the data if we are reading
* 4. Reset the mdio master (even with error)
*/
mutex_lock(&mgmt_eth_data->mutex);
/* Check if mgmt_master is operational */
mgmt_master = priv->mgmt_master;
if (!mgmt_master) {
mutex_unlock(&mgmt_eth_data->mutex);
ret = -EINVAL;
goto err_mgmt_master;
}
read_skb->dev = mgmt_master;
clear_skb->dev = mgmt_master;
write_skb->dev = mgmt_master;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the write pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(write_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
kfree_skb(read_skb);
goto exit;
}
if (!ack) {
ret = -EINVAL;
kfree_skb(read_skb);
goto exit;
}
ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
!(val & QCA8K_MDIO_MASTER_BUSY), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
mgmt_eth_data, read_skb, &val);
if (ret < 0 && ret1 < 0) {
ret = ret1;
goto exit;
}
if (read) {
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the read pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(read_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
goto exit;
}
if (!ack) {
ret = -EINVAL;
goto exit;
}
ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
} else {
kfree_skb(read_skb);
}
exit:
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the clear pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(clear_skb);
wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
return ret;
/* Error handling before lock */
err_mgmt_master:
kfree_skb(read_skb);
err_read_skb:
kfree_skb(clear_skb);
err_clear_skb:
kfree_skb(write_skb);
return ret;
}
static u32
qca8k_port_to_phy(int port)
{
......@@ -704,8 +1181,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
}
static int
qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
......@@ -722,18 +1200,18 @@ qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(bus, page);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
......@@ -741,8 +1219,9 @@ qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
}
static int
qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
......@@ -758,11 +1237,11 @@ qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(bus, page);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
......@@ -773,7 +1252,7 @@ qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
......@@ -787,24 +1266,35 @@ static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
struct mii_bus *bus = priv->bus;
int ret;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
if (!ret)
return 0;
return qca8k_mdio_write(bus, phy, regnum, data);
return qca8k_mdio_write(priv, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
struct mii_bus *bus = priv->bus;
int ret;
return qca8k_mdio_read(bus, phy, regnum);
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
if (ret >= 0)
return ret;
return qca8k_mdio_read(priv, phy, regnum);
}
static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
int ret;
/* Check if the legacy mapping should be used and the
* port is not correctly mapped to the right PHY in the
......@@ -813,7 +1303,12 @@ qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
if (priv->legacy_phy_port_mapping)
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
return qca8k_mdio_write(priv->bus, port, regnum, data);
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
if (!ret)
return ret;
return qca8k_mdio_write(priv, port, regnum, data);
}
static int
......@@ -829,7 +1324,12 @@ qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
if (priv->legacy_phy_port_mapping)
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
ret = qca8k_mdio_read(priv->bus, port, regnum);
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
if (ret >= 0)
return ret;
ret = qca8k_mdio_read(priv, port, regnum);
if (ret < 0)
return 0xffff;
......@@ -1703,6 +2203,97 @@ qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
ETH_GSTRING_LEN);
}
static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
const struct qca8k_match_data *match_data;
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
struct mib_ethhdr *mib_ethhdr;
int i, mib_len, offset = 0;
u64 *data;
u8 port;
mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
mib_eth_data = &priv->mib_eth_data;
/* The switch autocast every port. Ignore other packet and
* parse only the requested one.
*/
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
if (port != mib_eth_data->req_port)
goto exit;
match_data = device_get_match_data(priv->dev);
data = mib_eth_data->data;
for (i = 0; i < match_data->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */
if (i < 3) {
data[i] = mib_ethhdr->data[i];
continue;
}
mib_len = sizeof(uint32_t);
/* Some mib are 64 bit wide */
if (mib->size == 2)
mib_len = sizeof(uint64_t);
/* Copy the mib value from packet to the */
memcpy(data + i, skb->data + offset, mib_len);
/* Set the offset for the next mib */
offset += mib_len;
}
exit:
/* Complete on receiving all the mib packet */
if (refcount_dec_and_test(&mib_eth_data->port_parsed))
complete(&mib_eth_data->rw_done);
}
static int
qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
int ret;
mib_eth_data = &priv->mib_eth_data;
mutex_lock(&mib_eth_data->mutex);
reinit_completion(&mib_eth_data->rw_done);
mib_eth_data->req_port = dp->index;
mib_eth_data->data = data;
refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
mutex_lock(&priv->reg_mutex);
/* Send mib autocast request */
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
QCA8K_MIB_BUSY);
mutex_unlock(&priv->reg_mutex);
if (ret)
goto exit;
ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
exit:
mutex_unlock(&mib_eth_data->mutex);
return ret;
}
static void
qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
......@@ -1714,6 +2305,10 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
u32 hi = 0;
int ret;
if (priv->mgmt_master &&
qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
return;
match_data = of_device_get_match_data(priv->dev);
for (i = 0; i < match_data->mib_count; i++) {
......@@ -2383,6 +2978,46 @@ qca8k_port_lag_leave(struct dsa_switch *ds, int port,
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
static void
qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
bool operational)
{
struct dsa_port *dp = master->dsa_ptr;
struct qca8k_priv *priv = ds->priv;
/* Ethernet MIB/MDIO is only supported for CPU port 0 */
if (dp->index != 0)
return;
mutex_lock(&priv->mgmt_eth_data.mutex);
mutex_lock(&priv->mib_eth_data.mutex);
priv->mgmt_master = operational ? (struct net_device *)master : NULL;
mutex_unlock(&priv->mib_eth_data.mutex);
mutex_unlock(&priv->mgmt_eth_data.mutex);
}
static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct qca_tagger_data *tagger_data;
switch (proto) {
case DSA_TAG_PROTO_QCA:
tagger_data = ds->tagger_data;
tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
......@@ -2418,6 +3053,8 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
.master_state_change = qca8k_master_change,
.connect_tag_protocol = qca8k_connect_tag_protocol,
};
static int qca8k_read_switch_id(struct qca8k_priv *priv)
......@@ -2488,6 +3125,10 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return PTR_ERR(priv->regmap);
}
priv->mdio_cache.page = 0xffff;
priv->mdio_cache.lo = 0xffff;
priv->mdio_cache.hi = 0xffff;
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
......@@ -2497,6 +3138,12 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv->ds)
return -ENOMEM;
mutex_init(&priv->mgmt_eth_data.mutex);
init_completion(&priv->mgmt_eth_data.rw_done);
mutex_init(&priv->mib_eth_data.mutex);
init_completion(&priv->mib_eth_data.rw_done);
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
......
......@@ -11,6 +11,11 @@
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
#include <linux/dsa/tag_qca.h>
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
#define QCA8K_ETHERNET_TIMEOUT 100
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
......@@ -63,7 +68,7 @@
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
#define QCA8K_REG_MIB 0x034
#define QCA8K_MIB_FLUSH BIT(24)
#define QCA8K_MIB_FUNC GENMASK(26, 24)
#define QCA8K_MIB_CPU_KEEP BIT(20)
#define QCA8K_MIB_BUSY BIT(17)
#define QCA8K_MDIO_MASTER_CTRL 0x3c
......@@ -313,6 +318,12 @@ enum qca8k_vlan_cmd {
QCA8K_VLAN_READ = 6,
};
enum qca8k_mid_cmd {
QCA8K_MIB_FLUSH = 1,
QCA8K_MIB_FLUSH_PORT = 2,
QCA8K_MIB_CAST = 3,
};
struct ar8xxx_port_status {
int enabled;
};
......@@ -328,6 +339,22 @@ enum {
QCA8K_CPU_PORT6,
};
struct qca8k_mgmt_eth_data {
struct completion rw_done;
struct mutex mutex; /* Enforce one mdio read/write at time */
bool ack;
u32 seq;
u32 data[4];
};
struct qca8k_mib_eth_data {
struct completion rw_done;
struct mutex mutex; /* Process one command at time */
refcount_t port_parsed; /* Counter to track parsed port */
u8 req_port;
u64 *data; /* pointer to ethtool data */
};
struct qca8k_ports_config {
bool sgmii_rx_clk_falling_edge;
bool sgmii_tx_clk_falling_edge;
......@@ -336,6 +363,19 @@ struct qca8k_ports_config {
u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
struct qca8k_mdio_cache {
/* The 32bit switch registers are accessed indirectly. To achieve this we need
* to set the page of the register. Track the last page that was set to reduce
* mdio writes
*/
u16 page;
/* lo and hi can also be cached and from Documentation we can skip one
* extra mdio write if lo or hi is didn't change.
*/
u16 lo;
u16 hi;
};
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
......@@ -353,6 +393,10 @@ struct qca8k_priv {
struct dsa_switch_ops ops;
struct gpio_desc *reset_gpio;
unsigned int port_mtu[QCA8K_NUM_PORTS];
struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
struct qca8k_mgmt_eth_data mgmt_eth_data;
struct qca8k_mib_eth_data mib_eth_data;
struct qca8k_mdio_cache mdio_cache;
};
struct qca8k_mib_desc {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __TAG_QCA_H
#define __TAG_QCA_H
#define QCA_HDR_LEN 2
#define QCA_HDR_VERSION 0x2
#define QCA_HDR_RECV_VERSION GENMASK(15, 14)
#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11)
#define QCA_HDR_RECV_TYPE GENMASK(10, 6)
#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0)
/* Packet type for recv */
#define QCA_HDR_RECV_TYPE_NORMAL 0x0
#define QCA_HDR_RECV_TYPE_MIB 0x1
#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2
#define QCA_HDR_XMIT_VERSION GENMASK(15, 14)
#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11)
#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8)
#define QCA_HDR_XMIT_FROM_CPU BIT(7)
#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0)
/* Packet type for xmit */
#define QCA_HDR_XMIT_TYPE_NORMAL 0x0
#define QCA_HDR_XMIT_TYPE_RW_REG 0x1
/* Check code for a valid mgmt packet. Switch will ignore the packet
* with this wrong.
*/
#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5
/* Specific define for in-band MDIO read/write with Ethernet packet */
#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */
#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */
#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */
#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \
QCA_HDR_MGMT_COMMAND_LEN + \
QCA_HDR_MGMT_DATA1_LEN)
#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */
#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */
#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \
QCA_HDR_LEN + \
QCA_HDR_MGMT_DATA2_LEN + \
QCA_HDR_MGMT_PADDING_LEN)
#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */
#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */
#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */
#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */
#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */
/* Special struct emulating a Ethernet header */
struct qca_mgmt_ethhdr {
u32 command; /* command bit 31:0 */
u32 seq; /* seq 63:32 */
u32 mdio_data; /* first 4byte mdio */
__be16 hdr; /* qca hdr */
} __packed;
enum mdio_cmd {
MDIO_WRITE = 0x0,
MDIO_READ
};
struct mib_ethhdr {
u32 data[3]; /* first 3 mib counter */
__be16 hdr; /* qca hdr */
} __packed;
struct qca_tagger_data {
void (*rw_reg_ack_handler)(struct dsa_switch *ds,
struct sk_buff *skb);
void (*mib_autocast_handler)(struct dsa_switch *ds,
struct sk_buff *skb);
};
#endif /* __TAG_QCA_H */
......@@ -278,6 +278,10 @@ struct dsa_port {
u8 devlink_port_setup:1;
/* Master state bits, valid only on CPU ports */
u8 master_admin_up:1;
u8 master_oper_up:1;
u8 setup:1;
struct device_node *dn;
......@@ -478,6 +482,12 @@ static inline bool dsa_port_is_unused(struct dsa_port *dp)
return dp->type == DSA_PORT_TYPE_UNUSED;
}
static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
{
return dsa_port_is_cpu(dp) && dp->master_admin_up &&
dp->master_oper_up;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
{
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
......@@ -1036,6 +1046,13 @@ struct dsa_switch_ops {
int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
u16 flags);
int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
/*
* DSA master tracking operations
*/
void (*master_state_change)(struct dsa_switch *ds,
const struct net_device *master,
bool operational);
};
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
......
......@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <net/devlink.h>
#include <net/sch_generic.h>
#include "dsa_priv.h"
......@@ -1064,9 +1065,18 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_cpu(dp)) {
err = dsa_master_setup(dp->master, dp);
struct net_device *master = dp->master;
bool admin_up = (master->flags & IFF_UP) &&
!qdisc_tx_is_noop(master);
err = dsa_master_setup(master, dp);
if (err)
return err;
/* Replay master state event */
dsa_tree_master_admin_state_change(dst, master, admin_up);
dsa_tree_master_oper_state_change(dst, master,
netif_oper_up(master));
}
}
......@@ -1081,9 +1091,19 @@ static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
rtnl_lock();
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_cpu(dp))
dsa_master_teardown(dp->master);
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_cpu(dp)) {
struct net_device *master = dp->master;
/* Synthesizing an "admin down" state is sufficient for
* the switches to get a notification if the master is
* currently up and running.
*/
dsa_tree_master_admin_state_change(dst, master, false);
dsa_master_teardown(master);
}
}
rtnl_unlock();
}
......@@ -1279,6 +1299,52 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
return err;
}
static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
struct net_device *master)
{
struct dsa_notifier_master_state_info info;
struct dsa_port *cpu_dp = master->dsa_ptr;
info.master = master;
info.operational = dsa_port_master_is_operational(cpu_dp);
dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
}
void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
struct net_device *master,
bool up)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
bool notify = false;
if ((dsa_port_master_is_operational(cpu_dp)) !=
(up && cpu_dp->master_oper_up))
notify = true;
cpu_dp->master_admin_up = up;
if (notify)
dsa_tree_master_state_change(dst, master);
}
void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
struct net_device *master,
bool up)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
bool notify = false;
if ((dsa_port_master_is_operational(cpu_dp)) !=
(cpu_dp->master_admin_up && up))
notify = true;
cpu_dp->master_oper_up = up;
if (notify)
dsa_tree_master_state_change(dst, master);
}
static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
{
struct dsa_switch_tree *dst = ds->dst;
......
......@@ -40,6 +40,7 @@ enum {
DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
DSA_NOTIFIER_MASTER_STATE_CHANGE,
};
/* DSA_NOTIFIER_AGEING_TIME */
......@@ -109,6 +110,12 @@ struct dsa_notifier_tag_8021q_vlan_info {
u16 vid;
};
/* DSA_NOTIFIER_MASTER_STATE_CHANGE */
struct dsa_notifier_master_state_info {
const struct net_device *master;
bool operational;
};
struct dsa_switchdev_event_work {
struct dsa_switch *ds;
int port;
......@@ -482,6 +489,12 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
struct net_device *master,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops);
void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
struct net_device *master,
bool up);
void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
struct net_device *master,
bool up);
unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
void dsa_bridge_num_put(const struct net_device *bridge_dev,
unsigned int bridge_num);
......
......@@ -2346,6 +2346,36 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
err = dsa_port_lag_change(dp, info->lower_state_info);
return notifier_from_errno(err);
}
case NETDEV_CHANGE:
case NETDEV_UP: {
/* Track state of master port.
* DSA driver may require the master port (and indirectly
* the tagger) to be available for some special operation.
*/
if (netdev_uses_dsa(dev)) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->ds->dst;
/* Track when the master port is UP */
dsa_tree_master_oper_state_change(dst, dev,
netif_oper_up(dev));
/* Track when the master port is ready and can accept
* packet.
* NETDEV_UP event is not enough to flag a port as ready.
* We also have to wait for linkwatch_do_dev to dev_activate
* and emit a NETDEV_CHANGE event.
* We check if a master port is ready by checking if the dev
* have a qdisc assigned and is not noop.
*/
dsa_tree_master_admin_state_change(dst, dev,
!qdisc_tx_is_noop(dev));
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
case NETDEV_GOING_DOWN: {
struct dsa_port *dp, *cpu_dp;
struct dsa_switch_tree *dst;
......@@ -2357,6 +2387,8 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
cpu_dp = dev->dsa_ptr;
dst = cpu_dp->ds->dst;
dsa_tree_master_admin_state_change(dst, dev, false);
list_for_each_entry(dp, &dst->ports, list) {
if (!dsa_port_is_user(dp))
continue;
......
......@@ -697,6 +697,18 @@ dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
return 0;
}
static int
dsa_switch_master_state_change(struct dsa_switch *ds,
struct dsa_notifier_master_state_info *info)
{
if (!ds->ops->master_state_change)
return 0;
ds->ops->master_state_change(ds, info->master, info->operational);
return 0;
}
static int dsa_switch_event(struct notifier_block *nb,
unsigned long event, void *info)
{
......@@ -770,6 +782,9 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
err = dsa_switch_tag_8021q_vlan_del(ds, info);
break;
case DSA_NOTIFIER_MASTER_STATE_CHANGE:
err = dsa_switch_master_state_change(ds, info);
break;
default:
err = -EOPNOTSUPP;
break;
......
......@@ -4,30 +4,12 @@
*/
#include <linux/etherdevice.h>
#include <linux/bitfield.h>
#include <net/dsa.h>
#include <linux/dsa/tag_qca.h>
#include "dsa_priv.h"
#define QCA_HDR_LEN 2
#define QCA_HDR_VERSION 0x2
#define QCA_HDR_RECV_VERSION_MASK GENMASK(15, 14)
#define QCA_HDR_RECV_VERSION_S 14
#define QCA_HDR_RECV_PRIORITY_MASK GENMASK(13, 11)
#define QCA_HDR_RECV_PRIORITY_S 11
#define QCA_HDR_RECV_TYPE_MASK GENMASK(10, 6)
#define QCA_HDR_RECV_TYPE_S 6
#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
#define QCA_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
#define QCA_HDR_XMIT_VERSION_MASK GENMASK(15, 14)
#define QCA_HDR_XMIT_VERSION_S 14
#define QCA_HDR_XMIT_PRIORITY_MASK GENMASK(13, 11)
#define QCA_HDR_XMIT_PRIORITY_S 11
#define QCA_HDR_XMIT_CONTROL_MASK GENMASK(10, 8)
#define QCA_HDR_XMIT_CONTROL_S 8
#define QCA_HDR_XMIT_FROM_CPU BIT(7)
#define QCA_HDR_XMIT_DP_BIT_MASK GENMASK(6, 0)
static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
......@@ -40,8 +22,9 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
phdr = dsa_etype_header_pos_tx(skb);
/* Set the version field, and set destination port information */
hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
QCA_HDR_XMIT_FROM_CPU | BIT(dp->index);
hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
hdr |= QCA_HDR_XMIT_FROM_CPU;
hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(dp->index));
*phdr = htons(hdr);
......@@ -50,10 +33,17 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev)
{
u8 ver;
u16 hdr;
int port;
struct qca_tagger_data *tagger_data;
struct dsa_port *dp = dev->dsa_ptr;
struct dsa_switch *ds = dp->ds;
u8 ver, pk_type;
__be16 *phdr;
int port;
u16 hdr;
BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
tagger_data = ds->tagger_data;
if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
return NULL;
......@@ -62,16 +52,33 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev)
hdr = ntohs(*phdr);
/* Make sure the version is correct */
ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S;
ver = FIELD_GET(QCA_HDR_RECV_VERSION, hdr);
if (unlikely(ver != QCA_HDR_VERSION))
return NULL;
/* Get pk type */
pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr);
/* Ethernet mgmt read/write packet */
if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) {
if (likely(tagger_data->rw_reg_ack_handler))
tagger_data->rw_reg_ack_handler(ds, skb);
return NULL;
}
/* Ethernet MIB counter packet */
if (pk_type == QCA_HDR_RECV_TYPE_MIB) {
if (likely(tagger_data->mib_autocast_handler))
tagger_data->mib_autocast_handler(ds, skb);
return NULL;
}
/* Remove QCA tag and recalculate checksum */
skb_pull_rcsum(skb, QCA_HDR_LEN);
dsa_strip_etype_header(skb, QCA_HDR_LEN);
/* Get source port information */
port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK);
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, hdr);
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev)
......@@ -80,12 +87,34 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev)
return skb;
}
static int qca_tag_connect(struct dsa_switch *ds)
{
struct qca_tagger_data *tagger_data;
tagger_data = kzalloc(sizeof(*tagger_data), GFP_KERNEL);
if (!tagger_data)
return -ENOMEM;
ds->tagger_data = tagger_data;
return 0;
}
static void qca_tag_disconnect(struct dsa_switch *ds)
{
kfree(ds->tagger_data);
ds->tagger_data = NULL;
}
static const struct dsa_device_ops qca_netdev_ops = {
.name = "qca",
.proto = DSA_TAG_PROTO_QCA,
.connect = qca_tag_connect,
.disconnect = qca_tag_disconnect,
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
.needed_headroom = QCA_HDR_LEN,
.promisc_on_master = true,
};
MODULE_LICENSE("GPL");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment