Commit 7381f93a authored by David S. Miller's avatar David S. Miller

Merge branch 'fix-some-bugs-for-HNS3-driver'

Peng Li says:

====================
fix some bugs for HNS3 driver

This patchset fixes some bugs for HNS3 driver:
[Patch 1/11 - 5/11] fix various bugs reported by hisilicon test team.
[Patch 6/11 - 7/11] fix bugs about interrupt coalescing self-adaptive
function.
[Patch 8/11 - 11/11] fix bugs about ethtool_ops.get_link_ksettings.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 14530746 1931dc20
...@@ -411,6 +411,10 @@ struct hnae3_ae_ops { ...@@ -411,6 +411,10 @@ struct hnae3_ae_ops {
u32 *flowctrl_adv); u32 *flowctrl_adv);
int (*set_led_id)(struct hnae3_handle *handle, int (*set_led_id)(struct hnae3_handle *handle,
enum ethtool_phys_id_state status); enum ethtool_phys_id_state status);
void (*get_link_mode)(struct hnae3_handle *handle,
unsigned long *supported,
unsigned long *advertising);
void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type);
}; };
struct hnae3_dcb_ops { struct hnae3_dcb_ops {
......
...@@ -214,6 +214,7 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -214,6 +214,7 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
/* Default: disable RL */ /* Default: disable RL */
h->kinfo.int_rl_setting = 0; h->kinfo.int_rl_setting = 0;
tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
} }
...@@ -1404,11 +1405,15 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev, ...@@ -1404,11 +1405,15 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid) __be16 proto, u16 vid)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO; int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter) if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
if (!ret)
set_bit(vid, priv->active_vlans);
return ret; return ret;
} }
...@@ -1416,14 +1421,32 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev, ...@@ -1416,14 +1421,32 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid) __be16 proto, u16 vid)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO; int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter) if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
if (!ret)
clear_bit(vid, priv->active_vlans);
return ret; return ret;
} }
static void hns3_restore_vlan(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
u16 vid;
int ret;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
if (ret)
netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
vid, ret);
}
}
static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto) u8 qos, __be16 vlan_proto)
{ {
...@@ -2383,15 +2406,15 @@ int hns3_clean_rx_ring( ...@@ -2383,15 +2406,15 @@ int hns3_clean_rx_ring(
static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
{ {
#define HNS3_RX_ULTRA_PACKET_RATE 40000 struct hns3_enet_tqp_vector *tqp_vector =
ring_group->ring->tqp_vector;
enum hns3_flow_level_range new_flow_level; enum hns3_flow_level_range new_flow_level;
struct hns3_enet_tqp_vector *tqp_vector; int packets_per_msecs;
int packets_per_secs; int bytes_per_msecs;
int bytes_per_usecs; u32 time_passed_ms;
u16 new_int_gl; u16 new_int_gl;
int usecs;
if (!ring_group->coal.int_gl) if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
return false; return false;
if (ring_group->total_packets == 0) { if (ring_group->total_packets == 0) {
...@@ -2408,33 +2431,44 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) ...@@ -2408,33 +2431,44 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
*/ */
new_flow_level = ring_group->coal.flow_level; new_flow_level = ring_group->coal.flow_level;
new_int_gl = ring_group->coal.int_gl; new_int_gl = ring_group->coal.int_gl;
tqp_vector = ring_group->ring->tqp_vector; time_passed_ms =
usecs = (ring_group->coal.int_gl << 1); jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
bytes_per_usecs = ring_group->total_bytes / usecs;
/* 1000000 microseconds */ if (!time_passed_ms)
packets_per_secs = ring_group->total_packets * 1000000 / usecs; return false;
do_div(ring_group->total_packets, time_passed_ms);
packets_per_msecs = ring_group->total_packets;
do_div(ring_group->total_bytes, time_passed_ms);
bytes_per_msecs = ring_group->total_bytes;
#define HNS3_RX_LOW_BYTE_RATE 10000
#define HNS3_RX_MID_BYTE_RATE 20000
switch (new_flow_level) { switch (new_flow_level) {
case HNS3_FLOW_LOW: case HNS3_FLOW_LOW:
if (bytes_per_usecs > 10) if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
new_flow_level = HNS3_FLOW_MID; new_flow_level = HNS3_FLOW_MID;
break; break;
case HNS3_FLOW_MID: case HNS3_FLOW_MID:
if (bytes_per_usecs > 20) if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
new_flow_level = HNS3_FLOW_HIGH; new_flow_level = HNS3_FLOW_HIGH;
else if (bytes_per_usecs <= 10) else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
new_flow_level = HNS3_FLOW_LOW; new_flow_level = HNS3_FLOW_LOW;
break; break;
case HNS3_FLOW_HIGH: case HNS3_FLOW_HIGH:
case HNS3_FLOW_ULTRA: case HNS3_FLOW_ULTRA:
default: default:
if (bytes_per_usecs <= 20) if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
new_flow_level = HNS3_FLOW_MID; new_flow_level = HNS3_FLOW_MID;
break; break;
} }
if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && #define HNS3_RX_ULTRA_PACKET_RATE 40
(&tqp_vector->rx_group == ring_group))
if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
&tqp_vector->rx_group == ring_group)
new_flow_level = HNS3_FLOW_ULTRA; new_flow_level = HNS3_FLOW_ULTRA;
switch (new_flow_level) { switch (new_flow_level) {
...@@ -2470,6 +2504,11 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) ...@@ -2470,6 +2504,11 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
bool rx_update, tx_update; bool rx_update, tx_update;
if (tqp_vector->int_adapt_down > 0) {
tqp_vector->int_adapt_down--;
return;
}
if (rx_group->coal.gl_adapt_enable) { if (rx_group->coal.gl_adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group); rx_update = hns3_get_new_int_gl(rx_group);
if (rx_update) if (rx_update)
...@@ -2483,6 +2522,9 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) ...@@ -2483,6 +2522,9 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
hns3_set_vector_coalesce_tx_gl(tqp_vector, hns3_set_vector_coalesce_tx_gl(tqp_vector,
tx_group->coal.int_gl); tx_group->coal.int_gl);
} }
tqp_vector->last_jiffies = jiffies;
tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
} }
static int hns3_nic_common_poll(struct napi_struct *napi, int budget) static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
...@@ -3341,6 +3383,10 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) ...@@ -3341,6 +3383,10 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
hns3_nic_set_rx_mode(netdev); hns3_nic_set_rx_mode(netdev);
hns3_recover_hw_addr(netdev); hns3_recover_hw_addr(netdev);
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF))
hns3_restore_vlan(netdev);
/* Carrier off reporting is important to ethtool even BEFORE open */ /* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev); netif_carrier_off(netdev);
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#ifndef __HNS3_ENET_H #ifndef __HNS3_ENET_H
#define __HNS3_ENET_H #define __HNS3_ENET_H
#include <linux/if_vlan.h>
#include "hnae3.h" #include "hnae3.h"
extern const char hns3_driver_version[]; extern const char hns3_driver_version[];
...@@ -460,6 +462,8 @@ enum hns3_link_mode_bits { ...@@ -460,6 +462,8 @@ enum hns3_link_mode_bits {
#define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40 #define HNS3_INT_RL_ENABLE_MASK 0x40
#define HNS3_INT_ADAPT_DOWN_START 100
struct hns3_enet_coalesce { struct hns3_enet_coalesce {
u16 int_gl; u16 int_gl;
u8 gl_adapt_enable; u8 gl_adapt_enable;
...@@ -495,6 +499,7 @@ struct hns3_enet_tqp_vector { ...@@ -495,6 +499,7 @@ struct hns3_enet_tqp_vector {
/* when 0 should adjust interrupt coalesce parameter */ /* when 0 should adjust interrupt coalesce parameter */
u8 int_adapt_down; u8 int_adapt_down;
unsigned long last_jiffies;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum hns3_udp_tnl_type { enum hns3_udp_tnl_type {
...@@ -539,6 +544,7 @@ struct hns3_nic_priv { ...@@ -539,6 +544,7 @@ struct hns3_nic_priv {
struct notifier_block notifier_block; struct notifier_block notifier_block;
/* Vxlan/Geneve information */ /* Vxlan/Geneve information */
struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
}; };
union l3_hdr_info { union l3_hdr_info {
......
...@@ -74,19 +74,6 @@ struct hns3_link_mode_mapping { ...@@ -74,19 +74,6 @@ struct hns3_link_mode_mapping {
u32 ethtool_link_mode; u32 ethtool_link_mode;
}; };
static const struct hns3_link_mode_mapping hns3_lm_map[] = {
{HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
{HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
{HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT},
{HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
{HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
{HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT},
{HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
{HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT},
{HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
{HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
};
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -365,24 +352,6 @@ static void hns3_self_test(struct net_device *ndev, ...@@ -365,24 +352,6 @@ static void hns3_self_test(struct net_device *ndev,
dev_open(ndev); dev_open(ndev);
} }
static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd,
bool is_advertised)
{
int i;
for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) {
if (!(caps & hns3_lm_map[i].hns3_link_mode))
continue;
if (is_advertised)
__set_bit(hns3_lm_map[i].ethtool_link_mode,
cmd->link_modes.advertising);
else
__set_bit(hns3_lm_map[i].ethtool_link_mode,
cmd->link_modes.supported);
}
}
static int hns3_get_sset_count(struct net_device *netdev, int stringset) static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
...@@ -594,18 +563,19 @@ static int hns3_get_link_ksettings(struct net_device *netdev, ...@@ -594,18 +563,19 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
u32 flowctrl_adv = 0; u32 flowctrl_adv = 0;
u32 supported_caps;
u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u8 link_stat; u8 link_stat;
if (!h->ae_algo || !h->ae_algo->ops) if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* 1.auto_neg & speed & duplex from cmd */ /* 1.auto_neg & speed & duplex from cmd */
if (netdev->phydev) if (netdev->phydev) {
phy_ethtool_ksettings_get(netdev->phydev, cmd); phy_ethtool_ksettings_get(netdev->phydev, cmd);
else if (h->ae_algo->ops->get_ksettings_an_result)
return 0;
}
if (h->ae_algo->ops->get_ksettings_an_result)
h->ae_algo->ops->get_ksettings_an_result(h, h->ae_algo->ops->get_ksettings_an_result(h,
&cmd->base.autoneg, &cmd->base.autoneg,
&cmd->base.speed, &cmd->base.speed,
...@@ -619,62 +589,16 @@ static int hns3_get_link_ksettings(struct net_device *netdev, ...@@ -619,62 +589,16 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN;
} }
/* 2.media_type get from bios parameter block */ /* 2.get link mode and port type*/
if (h->ae_algo->ops->get_media_type) { if (h->ae_algo->ops->get_link_mode)
h->ae_algo->ops->get_media_type(h, &media_type); h->ae_algo->ops->get_link_mode(h,
cmd->link_modes.supported,
switch (media_type) { cmd->link_modes.advertising);
case HNAE3_MEDIA_TYPE_FIBER:
cmd->base.port = PORT_FIBRE;
supported_caps = HNS3_LM_FIBRE_BIT |
HNS3_LM_AUTONEG_BIT |
HNS3_LM_PAUSE_BIT |
HNS3_LM_1000BASET_FULL_BIT;
advertised_caps = supported_caps;
break;
case HNAE3_MEDIA_TYPE_COPPER:
cmd->base.port = PORT_TP;
supported_caps = HNS3_LM_TP_BIT |
HNS3_LM_AUTONEG_BIT |
HNS3_LM_PAUSE_BIT |
HNS3_LM_1000BASET_FULL_BIT |
HNS3_LM_100BASET_FULL_BIT |
HNS3_LM_100BASET_HALF_BIT |
HNS3_LM_10BASET_FULL_BIT |
HNS3_LM_10BASET_HALF_BIT;
advertised_caps = supported_caps;
break;
case HNAE3_MEDIA_TYPE_BACKPLANE:
cmd->base.port = PORT_NONE; cmd->base.port = PORT_NONE;
supported_caps = HNS3_LM_BACKPLANE_BIT | if (h->ae_algo->ops->get_port_type)
HNS3_LM_PAUSE_BIT | h->ae_algo->ops->get_port_type(h,
HNS3_LM_AUTONEG_BIT | &cmd->base.port);
HNS3_LM_1000BASET_FULL_BIT |
HNS3_LM_100BASET_FULL_BIT |
HNS3_LM_100BASET_HALF_BIT |
HNS3_LM_10BASET_FULL_BIT |
HNS3_LM_10BASET_HALF_BIT;
advertised_caps = supported_caps;
break;
case HNAE3_MEDIA_TYPE_UNKNOWN:
default:
cmd->base.port = PORT_OTHER;
supported_caps = 0;
advertised_caps = 0;
break;
}
if (!cmd->base.autoneg)
advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
advertised_caps &= ~HNS3_LM_PAUSE_BIT;
/* now, map driver link modes to ethtool link modes */
hns3_driv_to_eth_caps(supported_caps, cmd, false);
hns3_driv_to_eth_caps(advertised_caps, cmd, true);
}
/* 3.mdix_ctrl&mdix get from phy reg */ /* 3.mdix_ctrl&mdix get from phy reg */
if (h->ae_algo->ops->get_mdix_mode) if (h->ae_algo->ops->get_mdix_mode)
...@@ -1133,6 +1057,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { ...@@ -1133,6 +1057,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_channels = hns3_get_channels, .get_channels = hns3_get_channels,
.get_coalesce = hns3_get_coalesce, .get_coalesce = hns3_get_coalesce,
.set_coalesce = hns3_set_coalesce, .set_coalesce = hns3_set_coalesce,
.get_link = hns3_get_link,
}; };
static const struct ethtool_ops hns3_ethtool_ops = { static const struct ethtool_ops hns3_ethtool_ops = {
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/io.h> #include <linux/io.h>
#define HCLGE_CMDQ_TX_TIMEOUT 1000 #define HCLGE_CMDQ_TX_TIMEOUT 30000
struct hclge_dev; struct hclge_dev;
struct hclge_desc { struct hclge_desc {
...@@ -414,6 +414,8 @@ struct hclge_pf_res_cmd { ...@@ -414,6 +414,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) #define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
#define HCLGE_CFG_RSS_SIZE_S 24 #define HCLGE_CFG_RSS_SIZE_S 24
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
struct hclge_cfg_param_cmd { struct hclge_cfg_param_cmd {
__le32 offset; __le32 offset;
......
...@@ -55,6 +55,8 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { ...@@ -55,6 +55,8 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{0, } {0, }
}; };
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
"Mac Loopback test", "Mac Loopback test",
"Serdes Loopback test", "Serdes Loopback test",
...@@ -1024,6 +1026,45 @@ static int hclge_parse_speed(int speed_cmd, int *speed) ...@@ -1024,6 +1026,45 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
return 0; return 0;
} }
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
u8 speed_ability)
{
unsigned long *supported = hdev->hw.mac.supported;
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
supported);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
supported);
set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
}
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
{
u8 media_type = hdev->hw.mac.media_type;
if (media_type != HNAE3_MEDIA_TYPE_FIBER)
return;
hclge_parse_fiber_link_mode(hdev, speed_ability);
}
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{ {
struct hclge_cfg_param_cmd *req; struct hclge_cfg_param_cmd *req;
...@@ -1072,6 +1113,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) ...@@ -1072,6 +1113,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[1].data; req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]); cfg->numa_node_map = __le32_to_cpu(req->param[0]);
cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
} }
/* hclge_get_cfg: query the static parameter from flash /* hclge_get_cfg: query the static parameter from flash
...@@ -1160,6 +1205,8 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1160,6 +1205,8 @@ static int hclge_configure(struct hclge_dev *hdev)
return ret; return ret;
} }
hclge_parse_link_mode(hdev, cfg.speed_ability);
if ((hdev->tc_max > HNAE3_MAX_TC) || if ((hdev->tc_max > HNAE3_MAX_TC) ||
(hdev->tc_max < 1)) { (hdev->tc_max < 1)) {
dev_warn(&hdev->pdev->dev, "TC num = %d.\n", dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
...@@ -4772,11 +4819,9 @@ static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) ...@@ -4772,11 +4819,9 @@ static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport); return hclge_set_vlan_rx_offload_cfg(vport);
} }
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
{ {
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_max_frm_size_cmd *req; struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc; struct hclge_desc desc;
int max_frm_size; int max_frm_size;
int ret; int ret;
...@@ -4805,6 +4850,27 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) ...@@ -4805,6 +4850,27 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
return 0; return 0;
} }
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int ret;
ret = hclge_set_mac_mtu(hdev, new_mtu);
if (ret) {
dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret);
return ret;
}
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret);
return ret;
}
static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
bool enable) bool enable)
{ {
...@@ -4907,6 +4973,43 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -4907,6 +4973,43 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
} }
} }
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
if (ret) {
dev_warn(&hdev->pdev->dev,
"Send reset tqp cmd fail, ret = %d\n", ret);
return;
}
reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
return;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
if (ret)
dev_warn(&hdev->pdev->dev,
"Deassert the soft reset fail, ret = %d\n", ret);
}
static u32 hclge_get_fw_version(struct hnae3_handle *handle) static u32 hclge_get_fw_version(struct hnae3_handle *handle)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
...@@ -5392,11 +5495,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5392,11 +5495,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
return ret; return ret;
} }
ret = hclge_buffer_alloc(hdev);
if (ret) {
dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
return ret;
}
ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
if (ret) { if (ret) {
...@@ -5503,12 +5601,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5503,12 +5601,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
ret = hclge_buffer_alloc(hdev);
if (ret) {
dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
return ret;
}
ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
...@@ -6014,6 +6106,42 @@ static int hclge_update_led_status(struct hclge_dev *hdev) ...@@ -6014,6 +6106,42 @@ static int hclge_update_led_status(struct hclge_dev *hdev)
HCLGE_LED_NO_CHANGE); HCLGE_LED_NO_CHANGE);
} }
static void hclge_get_link_mode(struct hnae3_handle *handle,
unsigned long *supported,
unsigned long *advertising)
{
unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
unsigned int idx = 0;
for (; idx < size; idx++) {
supported[idx] = hdev->hw.mac.supported[idx];
advertising[idx] = hdev->hw.mac.advertising[idx];
}
}
static void hclge_get_port_type(struct hnae3_handle *handle,
u8 *port_type)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u8 media_type = hdev->hw.mac.media_type;
switch (media_type) {
case HNAE3_MEDIA_TYPE_FIBER:
*port_type = PORT_FIBRE;
break;
case HNAE3_MEDIA_TYPE_COPPER:
*port_type = PORT_TP;
break;
case HNAE3_MEDIA_TYPE_UNKNOWN:
default:
*port_type = PORT_OTHER;
break;
}
}
static const struct hnae3_ae_ops hclge_ops = { static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev, .init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev,
...@@ -6069,6 +6197,8 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -6069,6 +6197,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_regs_len = hclge_get_regs_len, .get_regs_len = hclge_get_regs_len,
.get_regs = hclge_get_regs, .get_regs = hclge_get_regs,
.set_led_id = hclge_set_led_id, .set_led_id = hclge_set_led_id,
.get_link_mode = hclge_get_link_mode,
.get_port_type = hclge_get_port_type,
}; };
static struct hnae3_ae_algo ae_algo = { static struct hnae3_ae_algo ae_algo = {
......
...@@ -106,6 +106,12 @@ ...@@ -106,6 +106,12 @@
#define HCLGE_MAC_MIN_FRAME 64 #define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728 #define HCLGE_MAC_MAX_FRAME 9728
#define HCLGE_SUPPORT_1G_BIT BIT(0)
#define HCLGE_SUPPORT_10G_BIT BIT(1)
#define HCLGE_SUPPORT_25G_BIT BIT(2)
#define HCLGE_SUPPORT_50G_BIT BIT(3)
#define HCLGE_SUPPORT_100G_BIT BIT(4)
enum HCLGE_DEV_STATE { enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING, HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN, HCLGE_STATE_DOWN,
...@@ -170,6 +176,8 @@ struct hclge_mac { ...@@ -170,6 +176,8 @@ struct hclge_mac {
struct phy_device *phydev; struct phy_device *phydev;
struct mii_bus *mdio_bus; struct mii_bus *mdio_bus;
phy_interface_t phy_if; phy_interface_t phy_if;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
}; };
struct hclge_hw { struct hclge_hw {
...@@ -236,6 +244,7 @@ struct hclge_cfg { ...@@ -236,6 +244,7 @@ struct hclge_cfg {
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
u8 default_speed; u8 default_speed;
u32 numa_node_map; u32 numa_node_map;
u8 speed_ability;
}; };
struct hclge_tm_info { struct hclge_tm_info {
...@@ -646,5 +655,6 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); ...@@ -646,5 +655,6 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev);
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev); int hclge_cfg_flowctrl(struct hclge_dev *hdev);
#endif #endif
...@@ -309,27 +309,34 @@ static int hclge_get_link_info(struct hclge_vport *vport, ...@@ -309,27 +309,34 @@ static int hclge_get_link_info(struct hclge_vport *vport,
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 link_status; u16 link_status;
u8 msg_data[2]; u8 msg_data[8];
u8 dest_vfid; u8 dest_vfid;
u16 duplex;
/* mac.link can only be 0 or 1 */ /* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link; link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex;
memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid; dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */ /* send this requested info to VF */
return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
} }
static void hclge_reset_vf_queue(struct hclge_vport *vport, static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req) struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{ {
u16 queue_id; u16 queue_id;
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
hclge_reset_tqp(&vport->nic, queue_id); hclge_reset_vf_queue(vport, queue_id);
/* send response msg to VF after queue reset complete*/
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
} }
void hclge_mbx_handler(struct hclge_dev *hdev) void hclge_mbx_handler(struct hclge_dev *hdev)
...@@ -407,7 +414,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -407,7 +414,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret); ret);
break; break;
case HCLGE_MBX_QUEUE_RESET: case HCLGE_MBX_QUEUE_RESET:
hclge_reset_vf_queue(vport, req); hclge_mbx_reset_vf_queue(vport, req);
break; break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/types.h> #include <linux/types.h>
#include "hnae3.h" #include "hnae3.h"
#define HCLGEVF_CMDQ_TX_TIMEOUT 200 #define HCLGEVF_CMDQ_TX_TIMEOUT 30000
#define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_INVLD_B 0
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1
......
...@@ -18,6 +18,8 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { ...@@ -18,6 +18,8 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
{0, } {0, }
}; };
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
struct hnae3_handle *handle) struct hnae3_handle *handle)
{ {
...@@ -817,11 +819,17 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -817,11 +819,17 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[2]; u8 msg_data[2];
int ret;
memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false, /* disable vf queue before send queue reset msg to PF */
NULL, 0); ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
if (ret)
return;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
2, true, NULL, 0);
} }
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
...@@ -1460,6 +1468,34 @@ static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, ...@@ -1460,6 +1468,34 @@ static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->rss_size_max; *max_rss_size = hdev->rss_size_max;
} }
static int hclgevf_get_status(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
return hdev->hw.mac.link;
}
static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed,
u8 *duplex)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
if (speed)
*speed = hdev->hw.mac.speed;
if (duplex)
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = AUTONEG_DISABLE;
}
void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
u8 duplex)
{
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
}
static const struct hnae3_ae_ops hclgevf_ops = { static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev, .init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev,
...@@ -1492,6 +1528,8 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -1492,6 +1528,8 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_vlan_filter = hclgevf_set_vlan_filter, .set_vlan_filter = hclgevf_set_vlan_filter,
.get_channels = hclgevf_get_channels, .get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
.get_status = hclgevf_get_status,
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
}; };
static struct hnae3_ae_algo ae_algovf = { static struct hnae3_ae_algo ae_algovf = {
......
...@@ -61,6 +61,8 @@ enum hclgevf_states { ...@@ -61,6 +61,8 @@ enum hclgevf_states {
struct hclgevf_mac { struct hclgevf_mac {
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
int link; int link;
u8 duplex;
u32 speed;
}; };
struct hclgevf_hw { struct hclgevf_hw {
...@@ -161,4 +163,6 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, ...@@ -161,4 +163,6 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
u8 *resp_data, u16 resp_len); u8 *resp_data, u16 resp_len);
void hclgevf_mbx_handler(struct hclgevf_dev *hdev); void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
u8 duplex);
#endif #endif
...@@ -133,6 +133,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -133,6 +133,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
struct hclgevf_cmq_ring *crq; struct hclgevf_cmq_ring *crq;
struct hclgevf_desc *desc; struct hclgevf_desc *desc;
u16 link_status, flag; u16 link_status, flag;
u32 speed;
u8 duplex;
u8 *temp; u8 *temp;
int i; int i;
...@@ -164,9 +166,12 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -164,9 +166,12 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break; break;
case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_LINK_STAT_CHANGE:
link_status = le16_to_cpu(req->msg[1]); link_status = le16_to_cpu(req->msg[1]);
memcpy(&speed, &req->msg[2], sizeof(speed));
duplex = (u8)le16_to_cpu(req->msg[4]);
/* update upper layer with new link link status */ /* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status); hclgevf_update_link_status(hdev, link_status);
hclgevf_update_speed_duplex(hdev, speed, duplex);
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment