Commit 44122887 authored by Jian Shen's avatar Jian Shen Committed by David S. Miller

net: hns3: refine the flow director handle

In order to be compatible with aRFS rules, this patch adds
spin_lock for flow director rule adding, deleting, querying,
and packages the rule configuration.
Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ffab9691
...@@ -1226,8 +1226,10 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1226,8 +1226,10 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map = 0; hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space; hdev->wanted_umv_size = cfg.umv_space;
if (hnae3_dev_fd_supported(hdev)) if (hnae3_dev_fd_supported(hdev)) {
hdev->fd_en = true; hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) { if (ret) {
...@@ -4906,14 +4908,18 @@ static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) ...@@ -4906,14 +4908,18 @@ static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
struct hclge_fd_rule *rule = NULL; struct hclge_fd_rule *rule = NULL;
struct hlist_node *node2; struct hlist_node *node2;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= location) if (rule->location >= location)
break; break;
} }
spin_unlock_bh(&hdev->fd_rule_lock);
return rule && rule->location == location; return rule && rule->location == location;
} }
/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_update_rule_list(struct hclge_dev *hdev, static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
struct hclge_fd_rule *new_rule, struct hclge_fd_rule *new_rule,
u16 location, u16 location,
...@@ -4937,9 +4943,13 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev, ...@@ -4937,9 +4943,13 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
kfree(rule); kfree(rule);
hdev->hclge_fd_rule_num--; hdev->hclge_fd_rule_num--;
if (!is_add) if (!is_add) {
return 0; if (!hdev->hclge_fd_rule_num)
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
clear_bit(location, hdev->fd_bmap);
return 0;
}
} else if (!is_add) { } else if (!is_add) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"delete fail, rule %d is inexistent\n", "delete fail, rule %d is inexistent\n",
...@@ -4954,7 +4964,9 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev, ...@@ -4954,7 +4964,9 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
else else
hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
set_bit(location, hdev->fd_bmap);
hdev->hclge_fd_rule_num++; hdev->hclge_fd_rule_num++;
hdev->fd_active_type = new_rule->rule_type;
return 0; return 0;
} }
...@@ -5112,6 +5124,36 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -5112,6 +5124,36 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
return 0; return 0;
} }
/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_config_rule(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
int ret;
if (!rule) {
dev_err(&hdev->pdev->dev,
"The flow director rule is NULL\n");
return -EINVAL;
}
/* it will never fail here, so needn't to check return value */
hclge_fd_update_rule_list(hdev, rule, rule->location, true);
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
if (ret)
goto clear_rule;
ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
if (ret)
goto clear_rule;
return 0;
clear_rule:
hclge_fd_update_rule_list(hdev, rule, rule->location, false);
return ret;
}
static int hclge_add_fd_entry(struct hnae3_handle *handle, static int hclge_add_fd_entry(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd) struct ethtool_rxnfc *cmd)
{ {
...@@ -5174,8 +5216,10 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -5174,8 +5216,10 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -ENOMEM; return -ENOMEM;
ret = hclge_fd_get_tuple(hdev, fs, rule); ret = hclge_fd_get_tuple(hdev, fs, rule);
if (ret) if (ret) {
goto free_rule; kfree(rule);
return ret;
}
rule->flow_type = fs->flow_type; rule->flow_type = fs->flow_type;
...@@ -5184,23 +5228,13 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -5184,23 +5228,13 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
rule->vf_id = dst_vport_id; rule->vf_id = dst_vport_id;
rule->queue_id = q_index; rule->queue_id = q_index;
rule->action = action; rule->action = action;
rule->rule_type = HCLGE_FD_EP_ACTIVE;
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); spin_lock_bh(&hdev->fd_rule_lock);
if (ret) ret = hclge_fd_config_rule(hdev, rule);
goto free_rule;
ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
if (ret)
goto free_rule;
ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
if (ret)
goto free_rule;
return ret; spin_unlock_bh(&hdev->fd_rule_lock);
free_rule:
kfree(rule);
return ret; return ret;
} }
...@@ -5232,8 +5266,12 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, ...@@ -5232,8 +5266,12 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (ret) if (ret)
return ret; return ret;
return hclge_fd_update_rule_list(hdev, NULL, fs->location, spin_lock_bh(&hdev->fd_rule_lock);
false); ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
} }
static void hclge_del_all_fd_entries(struct hnae3_handle *handle, static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
...@@ -5243,25 +5281,30 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, ...@@ -5243,25 +5281,30 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
struct hlist_node *node; struct hlist_node *node;
u16 location;
if (!hnae3_dev_fd_supported(hdev)) if (!hnae3_dev_fd_supported(hdev))
return; return;
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
NULL, false);
if (clear_list) { if (clear_list) {
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
rule_node) { rule_node) {
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
rule->location, NULL, false);
hlist_del(&rule->rule_node); hlist_del(&rule->rule_node);
kfree(rule); kfree(rule);
hdev->hclge_fd_rule_num--;
} }
} else { hdev->fd_active_type = HCLGE_FD_RULE_NONE;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, hdev->hclge_fd_rule_num = 0;
rule_node) bitmap_zero(hdev->fd_bmap,
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
rule->location, NULL, false);
} }
spin_unlock_bh(&hdev->fd_rule_lock);
} }
static int hclge_restore_fd_entries(struct hnae3_handle *handle) static int hclge_restore_fd_entries(struct hnae3_handle *handle)
...@@ -5283,6 +5326,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) ...@@ -5283,6 +5326,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (!hdev->fd_en) if (!hdev->fd_en)
return 0; return 0;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
if (!ret) if (!ret)
...@@ -5292,11 +5336,18 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) ...@@ -5292,11 +5336,18 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Restore rule %d failed, remove it\n", "Restore rule %d failed, remove it\n",
rule->location); rule->location);
clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node); hlist_del(&rule->rule_node);
kfree(rule); kfree(rule);
hdev->hclge_fd_rule_num--; hdev->hclge_fd_rule_num--;
} }
} }
if (hdev->hclge_fd_rule_num)
hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
spin_unlock_bh(&hdev->fd_rule_lock);
return 0; return 0;
} }
...@@ -5329,13 +5380,18 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -5329,13 +5380,18 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs = (struct ethtool_rx_flow_spec *)&cmd->fs; fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= fs->location) if (rule->location >= fs->location)
break; break;
} }
if (!rule || fs->location != rule->location) if (!rule || fs->location != rule->location) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOENT; return -ENOENT;
}
fs->flow_type = rule->flow_type; fs->flow_type = rule->flow_type;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
...@@ -5474,6 +5530,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -5474,6 +5530,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break; break;
default: default:
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -5505,6 +5562,8 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -5505,6 +5562,8 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs->ring_cookie |= vf_id; fs->ring_cookie |= vf_id;
} }
spin_unlock_bh(&hdev->fd_rule_lock);
return 0; return 0;
} }
...@@ -5522,15 +5581,20 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, ...@@ -5522,15 +5581,20 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, hlist_for_each_entry_safe(rule, node2,
&hdev->fd_rule_list, rule_node) { &hdev->fd_rule_list, rule_node) {
if (cnt == cmd->rule_cnt) if (cnt == cmd->rule_cnt) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EMSGSIZE; return -EMSGSIZE;
}
rule_locs[cnt] = rule->location; rule_locs[cnt] = rule->location;
cnt++; cnt++;
} }
spin_unlock_bh(&hdev->fd_rule_lock);
cmd->rule_cnt = cnt; cmd->rule_cnt = cnt;
return 0; return 0;
...@@ -5565,10 +5629,12 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) ...@@ -5565,10 +5629,12 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
bool clear;
hdev->fd_en = enable; hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
if (!enable) if (!enable)
hclge_del_all_fd_entries(handle, false); hclge_del_all_fd_entries(handle, clear);
else else
hclge_restore_fd_entries(handle); hclge_restore_fd_entries(handle);
} }
...@@ -8143,6 +8209,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -8143,6 +8209,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_init(&hdev->vport_lock); mutex_init(&hdev->vport_lock);
mutex_init(&hdev->vport_cfg_mutex); mutex_init(&hdev->vport_cfg_mutex);
spin_lock_init(&hdev->fd_rule_lock);
ret = hclge_pci_init(hdev); ret = hclge_pci_init(hdev);
if (ret) { if (ret) {
......
...@@ -578,6 +578,15 @@ static const struct key_info tuple_key_info[] = { ...@@ -578,6 +578,15 @@ static const struct key_info tuple_key_info[] = {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32 #define MAX_META_DATA_LENGTH 32
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
HCLGE_FD_EP_ACTIVE,
};
enum HCLGE_FD_PACKET_TYPE { enum HCLGE_FD_PACKET_TYPE {
NIC_PACKET, NIC_PACKET,
ROCE_PACKET, ROCE_PACKET,
...@@ -630,6 +639,7 @@ struct hclge_fd_rule { ...@@ -630,6 +639,7 @@ struct hclge_fd_rule {
u16 vf_id; u16 vf_id;
u16 queue_id; u16 queue_id;
u16 location; u16 location;
enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
}; };
struct hclge_fd_ad_data { struct hclge_fd_ad_data {
...@@ -809,7 +819,10 @@ struct hclge_dev { ...@@ -809,7 +819,10 @@ struct hclge_dev {
struct hclge_fd_cfg fd_cfg; struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list; struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num; u16 hclge_fd_rule_num;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en; u8 fd_en;
u16 wanted_umv_size; u16 wanted_umv_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment