Commit f04d402f authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-fixes'

Huazhong Tan says:

====================
net: hns3: code optimizations & bugfixes for HNS3 driver

This patchset includes bugfixes and code optimizations for the HNS3
ethernet controller driver
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 51a5365c 1f609492
......@@ -124,6 +124,7 @@ enum hnae3_reset_notify_type {
HNAE3_DOWN_CLIENT,
HNAE3_INIT_CLIENT,
HNAE3_UNINIT_CLIENT,
HNAE3_RESTORE_CLIENT,
};
enum hnae3_reset_type {
......@@ -500,6 +501,7 @@ struct hnae3_tc_info {
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
u16 req_rss_size;
u16 rx_buf_len;
u16 num_desc;
......
......@@ -3185,6 +3185,9 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
continue;
ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain);
if (ret)
......@@ -3205,7 +3208,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
}
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi);
......@@ -3238,6 +3240,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
{
struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
int desc_num = priv->ae_handle->kinfo.num_desc;
struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring;
......@@ -3263,7 +3266,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->dev = priv->dev;
ring->desc_dma_addr = 0;
ring->buf_size = q->buf_size;
ring->desc_num = q->desc_num;
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
......@@ -3725,7 +3728,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
bool if_running;
int ret;
if (tc > HNAE3_MAX_TC)
......@@ -3734,24 +3736,13 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (!ndev)
return -ENODEV;
if_running = netif_running(ndev);
if (if_running) {
(void)hns3_nic_net_stop(ndev);
msleep(100);
}
ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
if (ret)
goto err_out;
return ret;
ret = hns3_nic_set_real_num_queue(ndev);
err_out:
if (if_running)
(void)hns3_nic_net_open(ndev);
return ret;
}
......@@ -4013,41 +4004,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
bool vlan_filter_enable;
int ret;
ret = hns3_init_mac_addr(netdev, false);
if (ret)
return ret;
ret = hns3_recover_hw_addr(netdev);
if (ret)
return ret;
ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
if (ret)
return ret;
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
if (ret)
return ret;
}
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
ret = hns3_restore_fd_rules(netdev);
ret = hns3_get_ring_config(priv);
if (ret)
return ret;
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
return ret;
goto err_put_ring;
hns3_restore_coal(priv);
......@@ -4068,10 +4036,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
err_put_ring:
hns3_put_ring_config(priv);
priv->ring_data = NULL;
return ret;
}
static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
bool vlan_filter_enable;
int ret;
ret = hns3_init_mac_addr(netdev, false);
if (ret)
return ret;
ret = hns3_recover_hw_addr(netdev);
if (ret)
return ret;
ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
if (ret)
return ret;
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
if (ret)
return ret;
}
return hns3_restore_fd_rules(netdev);
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
......@@ -4101,6 +4103,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
priv->ring_data = NULL;
clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
......@@ -4124,6 +4129,9 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle);
break;
case HNAE3_RESTORE_CLIENT:
ret = hns3_reset_notify_restore_enet(handle);
break;
default:
break;
}
......@@ -4131,57 +4139,11 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
if (ret)
return ret;
ret = hns3_get_ring_config(priv);
if (ret)
return ret;
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
goto err_alloc_vector;
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_uninit_vector;
ret = hns3_init_all_ring(priv);
if (ret)
goto err_put_ring;
return 0;
err_put_ring:
hns3_put_ring_config(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
err_alloc_vector:
hns3_nic_dealloc_vector_data(priv);
return ret;
}
static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
{
return (new_tqp_num / num_tc) * num_tc;
}
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
bool if_running = netif_running(netdev);
u32 new_tqp_num = ch->combined_count;
u16 org_tqp_num;
int ret;
......@@ -4190,39 +4152,28 @@ int hns3_set_channels(struct net_device *netdev,
return -EINVAL;
if (new_tqp_num > hns3_get_max_available_channels(h) ||
new_tqp_num < kinfo->num_tc) {
new_tqp_num < 1) {
dev_err(&netdev->dev,
"Change tqps fail, the tqp range is from %d to %d",
kinfo->num_tc,
"Change tqps fail, the tqp range is from 1 to %d",
hns3_get_max_available_channels(h));
return -EINVAL;
}
new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
if (kinfo->num_tqps == new_tqp_num)
if (kinfo->rss_size == new_tqp_num)
return 0;
if (if_running)
hns3_nic_net_stop(netdev);
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
dev_err(&netdev->dev,
"Unbind vector with tqp fail, nothing is changed");
goto open_netdev;
}
hns3_store_coal(priv);
hns3_nic_dealloc_vector_data(priv);
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
if (ret)
return ret;
org_tqp_num = h->kinfo.num_tqps;
ret = hns3_modify_tqp_num(netdev, new_tqp_num);
ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
if (ret) {
ret = hns3_modify_tqp_num(netdev, org_tqp_num);
ret = h->ae_algo->ops->set_channels(h, org_tqp_num);
if (ret) {
/* If revert to old tqp failed, fatal error occurred */
dev_err(&netdev->dev,
......@@ -4232,12 +4183,11 @@ int hns3_set_channels(struct net_device *netdev,
dev_info(&netdev->dev,
"Change tqp num fail, Revert to old tqp num");
}
open_netdev:
if (if_running)
hns3_nic_net_open(netdev);
ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
if (ret)
return ret;
return hns3_reset_notify(h, HNAE3_UP_CLIENT);
}
static const struct hnae3_client_ops client_ops = {
......
......@@ -412,7 +412,6 @@ struct hns3_enet_ring {
unsigned char *va; /* first buffer address for current packet */
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
......
......@@ -222,6 +222,16 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
if (map_changed) {
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
return ret;
}
hclge_tm_schd_info_update(hdev, num_tc);
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
......@@ -232,6 +242,13 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
ret = hclge_client_setup_tc(hdev);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
return ret;
}
return hclge_tm_dwrr_cfg(hdev);
......
......@@ -1068,14 +1068,14 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
return ret;
}
static int hclge_assign_tqp(struct hclge_vport *vport)
static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
alloced < kinfo->num_tqps; i++) {
alloced < num_tqps; i++) {
if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced;
......@@ -1085,7 +1085,9 @@ static int hclge_assign_tqp(struct hclge_vport *vport)
alloced++;
}
}
vport->alloc_tqps = kinfo->num_tqps;
vport->alloc_tqps = alloced;
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
return 0;
}
......@@ -1096,36 +1098,17 @@ static int hclge_knic_setup(struct hclge_vport *vport,
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
int ret;
kinfo->num_desc = num_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
kinfo->rss_size
= min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i)) {
kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
kinfo->tc_info[i].tc = i;
} else {
/* Set to default queue if TC is disable */
kinfo->tc_info[i].enable = false;
kinfo->tc_info[i].tqp_offset = 0;
kinfo->tc_info[i].tqp_count = 1;
kinfo->tc_info[i].tc = 0;
}
}
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
return -ENOMEM;
ret = hclge_assign_tqp(vport);
ret = hclge_assign_tqp(vport, num_tqps);
if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
......@@ -1140,7 +1123,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
u16 i;
kinfo = &nic->kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
for (i = 0; i < vport->alloc_tqps; i++) {
struct hclge_tqp *q =
container_of(kinfo->tqp[i], struct hclge_tqp, q);
bool is_pf;
......@@ -2418,7 +2401,7 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
hclge_free_vector(hdev, 0);
}
static int hclge_notify_client(struct hclge_dev *hdev,
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
......@@ -2883,6 +2866,10 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset_lock;
ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
if (ret)
goto err_reset_lock;
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
......@@ -5258,6 +5245,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
......@@ -5276,7 +5264,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
break;
}
for (i = 0; i < vport->alloc_tqps; i++) {
kinfo = &vport->nic.kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
ret = hclge_tqp_enable(hdev, i, 0, en);
if (ret)
return ret;
......@@ -5288,11 +5277,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo;
struct hnae3_queue *queue;
struct hclge_tqp *tqp;
int i;
for (i = 0; i < vport->alloc_tqps; i++) {
kinfo = &vport->nic.kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
......@@ -7523,18 +7514,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
return min_t(u32, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
}
static void hclge_get_channels(struct hnae3_handle *handle,
struct ethtool_channels *ch)
{
struct hclge_vport *vport = hclge_get_vport(handle);
ch->max_combined = hclge_get_max_channels(handle);
ch->other_count = 1;
ch->max_other = 1;
ch->combined_count = vport->alloc_tqps;
ch->combined_count = handle->kinfo.rss_size;
}
static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
......@@ -7547,25 +7537,6 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->rss_size_max;
}
static void hclge_release_tqp(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_tqp, q);
tqp->q.handle = NULL;
tqp->q.tqp_index = 0;
tqp->alloced = false;
}
devm_kfree(&hdev->pdev->dev, kinfo->tqp);
kinfo->tqp = NULL;
}
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
......@@ -7580,24 +7551,11 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
u32 *rss_indir;
int ret, i;
/* Free old tqps, and reallocate with new tqp number when nic setup */
hclge_release_tqp(vport);
kinfo->req_rss_size = new_tqps_num;
ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
return ret;
}
ret = hclge_map_tqp_to_vport(hdev, vport);
if (ret) {
dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
return ret;
}
ret = hclge_tm_schd_init(hdev);
ret = hclge_tm_vport_map_update(hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
return ret;
}
......
......@@ -878,4 +878,6 @@ void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
#endif
......@@ -517,19 +517,32 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
u16 max_rss_size;
u8 i;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
kinfo->num_tc =
min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
kinfo->rss_size
= min_t(u16, hdev->rss_size_max,
kinfo->num_tqps / kinfo->num_tc);
kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, kinfo->req_rss_size);
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
}
kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
for (i = 0; i < kinfo->num_tc; i++) {
for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i)) {
kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
......@@ -1228,10 +1241,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
}
static int hclge_tm_bp_setup(struct hclge_dev *hdev)
{
int ret = 0;
int i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_bp_setup_hw(hdev, i);
if (ret)
return ret;
}
return ret;
}
int hclge_pause_setup_hw(struct hclge_dev *hdev)
{
int ret;
u8 i;
ret = hclge_pause_param_setup_hw(hdev);
if (ret)
......@@ -1250,13 +1276,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret)
dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_bp_setup_hw(hdev, i);
if (ret)
return ret;
}
return 0;
return hclge_tm_bp_setup(hdev);
}
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
......@@ -1327,3 +1347,20 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return hclge_tm_init_hw(hdev);
}
int hclge_tm_vport_map_update(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
hclge_tm_vport_tc_info_update(vport);
ret = hclge_vport_q_to_qs_map(hdev, vport);
if (ret)
return ret;
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
return 0;
return hclge_tm_bp_setup(hdev);
}
......@@ -142,6 +142,7 @@ struct hclge_port_shapping_cmd {
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_tm_vport_map_update(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev);
int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
......
......@@ -1264,7 +1264,7 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
return 0;
return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment