Commit 9e9f6e72 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-bcmgenet-add-support-for-Wake-on-Filter'

Doug Berger says:

====================
net: bcmgenet: add support for Wake on Filter

Changes in v2:
	Corrected Signed-off-by for commit 3/7.

This commit set adds support for waking from 'standby' using a
Rx Network Flow Classification filter specified with ethtool.

The first two commits are bug fixes that should be applied to the
stable branches, but are included in this patch set to reduce merge
conflicts that might occur if not applied before the other commits
in this set.

The next commit consolidates WoL clock managment as a part of the
overall WoL configuration.

The next commit restores a set of functions that were removed from
the driver just prior to the 4.9 kernel release.

The following commit relocates the functions in the file to prevent
the need for additional forward declarations.

Next, support for the Rx Network Flow Classification interface of
ethtool is added.

Finally, support for the WAKE_FILTER wol method is added.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 323e395f f50932cc
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Broadcom GENET (Gigabit Ethernet) controller driver * Broadcom GENET (Gigabit Ethernet) controller driver
* *
* Copyright (c) 2014-2019 Broadcom * Copyright (c) 2014-2020 Broadcom
*/ */
#define pr_fmt(fmt) "bcmgenet: " fmt #define pr_fmt(fmt) "bcmgenet: " fmt
...@@ -65,6 +65,9 @@ ...@@ -65,6 +65,9 @@
#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
TOTAL_DESC * DMA_DESC_SIZE) TOTAL_DESC * DMA_DESC_SIZE)
/* Forward declarations */
static void bcmgenet_set_rx_mode(struct net_device *dev);
static inline void bcmgenet_writel(u32 value, void __iomem *offset) static inline void bcmgenet_writel(u32 value, void __iomem *offset)
{ {
/* MIPS chips strapped for BE will automagically configure the /* MIPS chips strapped for BE will automagically configure the
...@@ -456,6 +459,384 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, ...@@ -456,6 +459,384 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]); genet_dma_ring_regs[r]);
} }
static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
u32 f_index)
{
u32 offset;
u32 reg;
offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
reg = bcmgenet_hfb_reg_readl(priv, offset);
return !!(reg & (1 << (f_index % 32)));
}
static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset;
u32 reg;
offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg |= (1 << (f_index % 32));
bcmgenet_hfb_reg_writel(priv, reg, offset);
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
reg |= RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
offset = HFB_FLT_ENABLE_V3PLUS;
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
if (f_index < 32) {
reg1 &= ~(1 << (f_index % 32));
bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
} else {
reg &= ~(1 << (f_index % 32));
bcmgenet_hfb_reg_writel(priv, reg, offset);
}
if (!reg && !reg1) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
}
}
static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 f_index, u32 rx_queue)
{
u32 offset;
u32 reg;
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
}
static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 f_index, u32 f_length)
{
u32 offset;
u32 reg;
offset = HFB_FLT_LEN_V3PLUS +
((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
sizeof(u32);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
bcmgenet_hfb_reg_writel(priv, reg, offset);
}
static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
{
u32 f_index;
/* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
for (f_index = MAX_NUM_OF_FS_RULES;
f_index < priv->hw_params->hfb_filter_cnt; f_index++)
if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
return f_index;
return -ENOMEM;
}
static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
{
while (size) {
switch (*(unsigned char *)mask++) {
case 0x00:
case 0x0f:
case 0xf0:
case 0xff:
size--;
continue;
default:
return -EINVAL;
}
}
return 0;
}
#define VALIDATE_MASK(x) \
bcmgenet_hfb_validate_mask(&(x), sizeof(x))
static int bcmgenet_hfb_insert_data(u32 *f, int offset,
void *val, void *mask, size_t size)
{
int index;
u32 tmp;
index = offset / 2;
tmp = f[index];
while (size--) {
if (offset++ & 1) {
tmp &= ~0x300FF;
tmp |= (*(unsigned char *)val++);
switch ((*(unsigned char *)mask++)) {
case 0xFF:
tmp |= 0x30000;
break;
case 0xF0:
tmp |= 0x20000;
break;
case 0x0F:
tmp |= 0x10000;
break;
}
f[index++] = tmp;
if (size)
tmp = f[index];
} else {
tmp &= ~0xCFF00;
tmp |= (*(unsigned char *)val++) << 8;
switch ((*(unsigned char *)mask++)) {
case 0xFF:
tmp |= 0xC0000;
break;
case 0xF0:
tmp |= 0x80000;
break;
case 0x0F:
tmp |= 0x40000;
break;
}
if (!size)
f[index] = tmp;
}
}
return 0;
}
static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
u32 f_length, u32 rx_queue, int f_index)
{
u32 base = f_index * priv->hw_params->hfb_filter_size;
int i;
for (i = 0; i < f_length; i++)
bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
}
static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
int err = 0, offset = 0, f_length = 0;
u16 val_16, mask_16;
u8 val_8, mask_8;
size_t size;
u32 *f_data;
f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
GFP_KERNEL);
if (!f_data)
return -ENOMEM;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(f_data, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
sizeof(fs->h_ext.h_dest));
}
if (fs->flow_type & FLOW_EXT) {
if (fs->m_ext.vlan_etype ||
fs->m_ext.vlan_tci) {
bcmgenet_hfb_insert_data(f_data, 12,
&fs->h_ext.vlan_etype,
&fs->m_ext.vlan_etype,
sizeof(fs->h_ext.vlan_etype));
bcmgenet_hfb_insert_data(f_data, 14,
&fs->h_ext.vlan_tci,
&fs->m_ext.vlan_tci,
sizeof(fs->h_ext.vlan_tci));
offset += VLAN_HLEN;
f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
}
}
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
f_length += DIV_ROUND_UP(ETH_HLEN, 2);
bcmgenet_hfb_insert_data(f_data, 0,
&fs->h_u.ether_spec.h_dest,
&fs->m_u.ether_spec.h_dest,
sizeof(fs->h_u.ether_spec.h_dest));
bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
&fs->h_u.ether_spec.h_source,
&fs->m_u.ether_spec.h_source,
sizeof(fs->h_u.ether_spec.h_source));
bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
&fs->h_u.ether_spec.h_proto,
&fs->m_u.ether_spec.h_proto,
sizeof(fs->h_u.ether_spec.h_proto));
break;
case IP_USER_FLOW:
f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
/* Specify IP Ether Type */
val_16 = htons(ETH_P_IP);
mask_16 = 0xFFFF;
bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
&val_16, &mask_16, sizeof(val_16));
bcmgenet_hfb_insert_data(f_data, 15 + offset,
&fs->h_u.usr_ip4_spec.tos,
&fs->m_u.usr_ip4_spec.tos,
sizeof(fs->h_u.usr_ip4_spec.tos));
bcmgenet_hfb_insert_data(f_data, 23 + offset,
&fs->h_u.usr_ip4_spec.proto,
&fs->m_u.usr_ip4_spec.proto,
sizeof(fs->h_u.usr_ip4_spec.proto));
bcmgenet_hfb_insert_data(f_data, 26 + offset,
&fs->h_u.usr_ip4_spec.ip4src,
&fs->m_u.usr_ip4_spec.ip4src,
sizeof(fs->h_u.usr_ip4_spec.ip4src));
bcmgenet_hfb_insert_data(f_data, 30 + offset,
&fs->h_u.usr_ip4_spec.ip4dst,
&fs->m_u.usr_ip4_spec.ip4dst,
sizeof(fs->h_u.usr_ip4_spec.ip4dst));
if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
break;
/* Only supports 20 byte IPv4 header */
val_8 = 0x45;
mask_8 = 0xFF;
bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
&val_8, &mask_8,
sizeof(val_8));
size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
bcmgenet_hfb_insert_data(f_data,
ETH_HLEN + 20 + offset,
&fs->h_u.usr_ip4_spec.l4_4_bytes,
&fs->m_u.usr_ip4_spec.l4_4_bytes,
size);
f_length += DIV_ROUND_UP(size, 2);
break;
}
if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
/* Ring 0 flows can be handled by the default Descriptor Ring
* We'll map them to ring 0, but don't enable the filter
*/
bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
fs->location);
rule->state = BCMGENET_RXNFC_STATE_DISABLED;
} else {
/* Other Rx rings are direct mapped here */
bcmgenet_hfb_set_filter(priv, f_data, f_length,
fs->ring_cookie, fs->location);
bcmgenet_hfb_enable_filter(priv, fs->location);
rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
kfree(f_data);
return err;
}
/* bcmgenet_hfb_add_filter
*
* Add new filter to Hardware Filter Block to match and direct Rx traffic to
* desired Rx queue.
*
* f_data is an array of unsigned 32-bit integers where each 32-bit integer
* provides filter data for 2 bytes (4 nibbles) of Rx frame:
*
* bits 31:20 - unused
* bit 19 - nibble 0 match enable
* bit 18 - nibble 1 match enable
* bit 17 - nibble 2 match enable
* bit 16 - nibble 3 match enable
* bits 15:12 - nibble 0 data
* bits 11:8 - nibble 1 data
* bits 7:4 - nibble 2 data
* bits 3:0 - nibble 3 data
*
* Example:
* In order to match:
* - Ethernet frame type = 0x0800 (IP)
* - IP version field = 4
* - IP protocol field = 0x11 (UDP)
*
* The following filter is needed:
* u32 hfb_filter_ipv4_udp[] = {
* Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
* Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
* Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
* };
*
* To add the filter to HFB and direct the traffic to Rx queue 0, call:
* bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
* ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
*/
int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
u32 f_length, u32 rx_queue)
{
int f_index;
f_index = bcmgenet_hfb_find_unused_filter(priv);
if (f_index < 0)
return -ENOMEM;
if (f_length > priv->hw_params->hfb_filter_size)
return -EINVAL;
bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
bcmgenet_hfb_enable_filter(priv, f_index);
return 0;
}
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
*/
static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
bcmgenet_rdma_writel(priv, 0x0, i);
for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
bcmgenet_hfb_reg_writel(priv, 0x0,
HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
for (i = 0; i < priv->hw_params->hfb_filter_cnt *
priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
{
int i;
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
return;
INIT_LIST_HEAD(&priv->rxnfc_list);
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
}
bcmgenet_hfb_clear(priv);
}
static int bcmgenet_begin(struct net_device *dev) static int bcmgenet_begin(struct net_device *dev)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
...@@ -1040,6 +1421,229 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) ...@@ -1040,6 +1421,229 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phy_ethtool_set_eee(dev->phydev, e); return phy_ethtool_set_eee(dev->phydev, e);
} }
static int bcmgenet_validate_flow(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct ethtool_usrip4_spec *l4_mask;
struct ethhdr *eth_mask;
if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
netdev_err(dev, "rxnfc: Invalid location (%d)\n",
cmd->fs.location);
return -EINVAL;
}
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case IP_USER_FLOW:
l4_mask = &cmd->fs.m_u.usr_ip4_spec;
/* don't allow mask which isn't valid */
if (VALIDATE_MASK(l4_mask->ip4src) ||
VALIDATE_MASK(l4_mask->ip4dst) ||
VALIDATE_MASK(l4_mask->l4_4_bytes) ||
VALIDATE_MASK(l4_mask->proto) ||
VALIDATE_MASK(l4_mask->ip_ver) ||
VALIDATE_MASK(l4_mask->tos)) {
netdev_err(dev, "rxnfc: Unsupported mask\n");
return -EINVAL;
}
break;
case ETHER_FLOW:
eth_mask = &cmd->fs.m_u.ether_spec;
/* don't allow mask which isn't valid */
if (VALIDATE_MASK(eth_mask->h_source) ||
VALIDATE_MASK(eth_mask->h_source) ||
VALIDATE_MASK(eth_mask->h_proto)) {
netdev_err(dev, "rxnfc: Unsupported mask\n");
return -EINVAL;
}
break;
default:
netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
cmd->fs.flow_type);
return -EINVAL;
}
if ((cmd->fs.flow_type & FLOW_EXT)) {
/* don't allow mask which isn't valid */
if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
netdev_err(dev, "rxnfc: Unsupported mask\n");
return -EINVAL;
}
if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
netdev_err(dev, "rxnfc: user-def not supported\n");
return -EINVAL;
}
}
if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
/* don't allow mask which isn't valid */
if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
netdev_err(dev, "rxnfc: Unsupported mask\n");
return -EINVAL;
}
}
return 0;
}
static int bcmgenet_insert_flow(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *loc_rule;
int err;
if (priv->hw_params->hfb_filter_size < 128) {
netdev_err(dev, "rxnfc: Not supported by this device\n");
return -EINVAL;
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
err = bcmgenet_validate_flow(dev, cmd);
if (err)
return err;
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
list_del(&loc_rule->list);
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec));
err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
if (err) {
netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
err);
return err;
}
list_add_tail(&loc_rule->list, &priv->rxnfc_list);
return 0;
}
static int bcmgenet_delete_flow(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
int err = 0;
if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
return -EINVAL;
rule = &priv->rxnfc_rules[cmd->fs.location];
if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
err = -ENOENT;
goto out;
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
list_del(&rule->list);
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
out:
return err;
}
static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = bcmgenet_insert_flow(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
err = bcmgenet_delete_flow(dev, cmd);
break;
default:
netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
cmd->cmd);
return -EINVAL;
}
return err;
}
static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
int loc)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
int err = 0;
if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
return -EINVAL;
rule = &priv->rxnfc_rules[loc];
if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
err = -ENOENT;
else
memcpy(&cmd->fs, &rule->fs,
sizeof(struct ethtool_rx_flow_spec));
return err;
}
static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
{
struct list_head *pos;
int res = 0;
list_for_each(pos, &priv->rxnfc_list)
res++;
return res;
}
static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
int err = 0;
int i = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->hw_params->rx_queues ?: 1;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bcmgenet_get_num_flows(priv);
cmd->data = MAX_NUM_OF_FS_RULES;
break;
case ETHTOOL_GRXCLSRULE:
err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
list_for_each_entry(rule, &priv->rxnfc_list, list)
if (i < cmd->rule_cnt)
rule_locs[i++] = rule->fs.location;
cmd->rule_cnt = i;
cmd->data = MAX_NUM_OF_FS_RULES;
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
/* standard ethtool support functions. */ /* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = { static const struct ethtool_ops bcmgenet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
...@@ -1064,6 +1668,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { ...@@ -1064,6 +1668,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_link_ksettings = bcmgenet_get_link_ksettings, .get_link_ksettings = bcmgenet_get_link_ksettings,
.set_link_ksettings = bcmgenet_set_link_ksettings, .set_link_ksettings = bcmgenet_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
}; };
/* Power down the unimac, based on mode. */ /* Power down the unimac, based on mode. */
...@@ -2756,43 +3362,12 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) ...@@ -2756,43 +3362,12 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL); bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
} }
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
*/
static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
bcmgenet_rdma_writel(priv, 0x0, i);
for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
bcmgenet_hfb_reg_writel(priv, 0x0,
HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
for (i = 0; i < priv->hw_params->hfb_filter_cnt *
priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
{
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
return;
bcmgenet_hfb_clear(priv);
}
static void bcmgenet_netif_start(struct net_device *dev) static void bcmgenet_netif_start(struct net_device *dev)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */ /* Start the network engine */
bcmgenet_set_rx_mode(dev);
bcmgenet_enable_rx_napi(priv); bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
...@@ -3604,8 +4179,8 @@ static int bcmgenet_resume(struct device *d) ...@@ -3604,8 +4179,8 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d); struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned long dma_ctrl; unsigned long dma_ctrl;
u32 offset, reg;
int ret; int ret;
u32 reg;
if (!netif_running(dev)) if (!netif_running(dev))
return 0; return 0;
...@@ -3615,6 +4190,10 @@ static int bcmgenet_resume(struct device *d) ...@@ -3615,6 +4190,10 @@ static int bcmgenet_resume(struct device *d)
if (ret) if (ret)
return ret; return ret;
/* From WOL-enabled suspend, switch to regular clock */
if (device_may_wakeup(d) && priv->wolopts)
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
/* If this is an internal GPHY, power it back on now, before UniMAC is /* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed * brought out of reset as absolutely no UniMAC activity is allowed
*/ */
...@@ -3625,10 +4204,6 @@ static int bcmgenet_resume(struct device *d) ...@@ -3625,10 +4204,6 @@ static int bcmgenet_resume(struct device *d)
init_umac(priv); init_umac(priv);
/* From WOL-enabled suspend, switch to regular clock */
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
phy_init_hw(dev->phydev); phy_init_hw(dev->phydev);
/* Speed settings must be restored */ /* Speed settings must be restored */
...@@ -3640,15 +4215,17 @@ static int bcmgenet_resume(struct device *d) ...@@ -3640,15 +4215,17 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_set_hw_addr(priv, dev->dev_addr); bcmgenet_set_hw_addr(priv, dev->dev_addr);
offset = HFB_FLT_ENABLE_V3PLUS;
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
if (priv->internal_phy) { if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK; reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
} }
if (priv->wolopts)
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
/* Disable RX/TX DMA and flush TX queues */ /* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv); dma_ctrl = bcmgenet_dma_disable(priv);
...@@ -3686,6 +4263,7 @@ static int bcmgenet_suspend(struct device *d) ...@@ -3686,6 +4263,7 @@ static int bcmgenet_suspend(struct device *d)
struct net_device *dev = dev_get_drvdata(d); struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
int ret = 0; int ret = 0;
u32 offset;
if (!netif_running(dev)) if (!netif_running(dev))
return 0; return 0;
...@@ -3697,13 +4275,18 @@ static int bcmgenet_suspend(struct device *d) ...@@ -3697,13 +4275,18 @@ static int bcmgenet_suspend(struct device *d)
if (!device_may_wakeup(d)) if (!device_may_wakeup(d))
phy_suspend(dev->phydev); phy_suspend(dev->phydev);
/* Preserve filter state and disable filtering */
priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
offset = HFB_FLT_ENABLE_V3PLUS;
priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
/* Prepare the device for Wake-on-LAN and switch to the slow clock */ /* Prepare the device for Wake-on-LAN and switch to the slow clock */
if (device_may_wakeup(d) && priv->wolopts) { if (device_may_wakeup(d) && priv->wolopts)
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
clk_prepare_enable(priv->clk_wol); else if (priv->internal_phy)
} else if (priv->internal_phy) {
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
}
/* Turn off the clocks */ /* Turn off the clocks */
clk_disable_unprepare(priv->clk); clk_disable_unprepare(priv->clk);
......
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2014-2017 Broadcom * Copyright (c) 2014-2020 Broadcom
*/ */
#ifndef __BCMGENET_H__ #ifndef __BCMGENET_H__
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/dim.h> #include <linux/dim.h>
#include <linux/ethtool.h>
/* total number of Buffer Descriptors, same for Rx/Tx */ /* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256 #define TOTAL_DESC 256
...@@ -31,6 +32,7 @@ ...@@ -31,6 +32,7 @@
#define DMA_MAX_BURST_LENGTH 0x10 #define DMA_MAX_BURST_LENGTH 0x10
/* misc. configuration */ /* misc. configuration */
#define MAX_NUM_OF_FS_RULES 16
#define CLEAR_ALL_HFB 0xFF #define CLEAR_ALL_HFB 0xFF
#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) #define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
#define DMA_FC_THRESH_LO 5 #define DMA_FC_THRESH_LO 5
...@@ -608,6 +610,18 @@ struct bcmgenet_rx_ring { ...@@ -608,6 +610,18 @@ struct bcmgenet_rx_ring {
struct bcmgenet_priv *priv; struct bcmgenet_priv *priv;
}; };
enum bcmgenet_rxnfc_state {
BCMGENET_RXNFC_STATE_UNUSED = 0,
BCMGENET_RXNFC_STATE_DISABLED,
BCMGENET_RXNFC_STATE_ENABLED
};
struct bcmgenet_rxnfc_rule {
struct list_head list;
struct ethtool_rx_flow_spec fs;
enum bcmgenet_rxnfc_state state;
};
/* device context */ /* device context */
struct bcmgenet_priv { struct bcmgenet_priv {
void __iomem *base; void __iomem *base;
...@@ -626,6 +640,8 @@ struct bcmgenet_priv { ...@@ -626,6 +640,8 @@ struct bcmgenet_priv {
struct enet_cb *rx_cbs; struct enet_cb *rx_cbs;
unsigned int num_rx_bds; unsigned int num_rx_bds;
unsigned int rx_buf_len; unsigned int rx_buf_len;
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
...@@ -676,6 +692,9 @@ struct bcmgenet_priv { ...@@ -676,6 +692,9 @@ struct bcmgenet_priv {
/* WOL */ /* WOL */
struct clk *clk_wol; struct clk *clk_wol;
u32 wolopts; u32 wolopts;
u8 sopass[SOPASS_MAX];
bool wol_active;
u32 hfb_en[3];
struct bcmgenet_mib_counters mib; struct bcmgenet_mib_counters mib;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
* *
* Copyright (c) 2014-2017 Broadcom * Copyright (c) 2014-2020 Broadcom
*/ */
#define pr_fmt(fmt) "bcmgenet_wol: " fmt #define pr_fmt(fmt) "bcmgenet_wol: " fmt
...@@ -41,18 +41,13 @@ ...@@ -41,18 +41,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg;
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts; wol->wolopts = priv->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass)); memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE) { if (wol->wolopts & WAKE_MAGICSECURE)
reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS); memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
put_unaligned_be16(reg, &wol->sopass[0]);
reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
put_unaligned_be32(reg, &wol->sopass[2]);
}
} }
/* ethtool function - set WOL (Wake on LAN) settings. /* ethtool function - set WOL (Wake on LAN) settings.
...@@ -62,25 +57,15 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ...@@ -62,25 +57,15 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev; struct device *kdev = &priv->pdev->dev;
u32 reg;
if (!device_can_wakeup(kdev)) if (!device_can_wakeup(kdev))
return -ENOTSUPP; return -ENOTSUPP;
if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER))
return -EINVAL; return -EINVAL;
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (wol->wolopts & WAKE_MAGICSECURE)
if (wol->wolopts & WAKE_MAGICSECURE) { memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
UMAC_MPD_PW_MS);
bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
UMAC_MPD_PW_LS);
reg |= MPD_PW_EN;
} else {
reg &= ~MPD_PW_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
/* Flag the device and relevant IRQ as wakeup capable */ /* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) { if (wol->wolopts) {
...@@ -120,12 +105,21 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) ...@@ -120,12 +105,21 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
return retries; return retries;
} }
static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv)
{
bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
UMAC_MPD_PW_MS);
bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
UMAC_MPD_PW_LS);
}
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode) enum bcmgenet_power_mode mode)
{ {
struct net_device *dev = priv->dev; struct net_device *dev = priv->dev;
struct bcmgenet_rxnfc_rule *rule;
u32 reg, hfb_ctrl_reg, hfb_enable = 0;
int retries = 0; int retries = 0;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) { if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
...@@ -142,22 +136,48 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, ...@@ -142,22 +136,48 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_umac_writel(priv, reg, UMAC_CMD); bcmgenet_umac_writel(priv, reg, UMAC_CMD);
mdelay(10); mdelay(10);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN; reg |= MPD_EN;
if (priv->wolopts & WAKE_MAGICSECURE) {
bcmgenet_set_mpd_password(priv);
reg |= MPD_PW_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
if (priv->wolopts & WAKE_FILTER) {
list_for_each_entry(rule, &priv->rxnfc_list, list)
if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
hfb_enable |= (1 << rule->fs.location);
reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
}
/* Do not leave UniMAC in MPD mode only */ /* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv); retries = bcmgenet_poll_wol_status(priv);
if (retries < 0) { if (retries < 0) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg &= ~MPD_EN; reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
return retries; return retries;
} }
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries); retries);
clk_prepare_enable(priv->clk_wol);
priv->wol_active = 1;
if (hfb_enable) {
bcmgenet_hfb_reg_writel(priv, hfb_enable,
HFB_FLT_ENABLE_V3PLUS + 4);
hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
}
/* Enable CRC forward */ /* Enable CRC forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1; priv->crc_fwd_en = 1;
...@@ -186,12 +206,22 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, ...@@ -186,12 +206,22 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
return; return;
} }
if (!priv->wol_active)
return; /* failed to suspend so skip the rest */
priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
/* Disable Magic Packet Detection */
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN)) reg &= ~(MPD_EN | MPD_PW_EN);
return; /* already powered up so skip the rest */
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
/* Disable WAKE_FILTER Detection */
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */ /* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD; reg &= ~CMD_CRC_FWD;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment