Commit 6349a169 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Adapt-driver-to-upcoming-firmware-versions'

Ido Schimmel says:

====================
mlxsw: Adapt driver to upcoming firmware versions

The first two patches make sure that reserved fields are set to zero, as
required by the device's programmer's reference manual (PRM).

Last two patches prevent the driver from performing an invalid operation
that is going to be denied by upcoming firmware versions.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c846d8da 04719507
...@@ -1519,8 +1519,7 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, ...@@ -1519,8 +1519,7 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
u8 *p_status) u8 *p_status)
{ {
struct mlxsw_pci *mlxsw_pci = bus_priv; struct mlxsw_pci *mlxsw_pci = bus_priv;
dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
bool evreq = mlxsw_pci->cmd.nopoll; bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
bool *p_wait_done = &mlxsw_pci->cmd.wait_done; bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
...@@ -1532,11 +1531,15 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, ...@@ -1532,11 +1531,15 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
if (err) if (err)
return err; return err;
if (in_mbox) if (in_mbox) {
memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
}
mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
if (out_mbox)
out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
......
...@@ -3779,12 +3779,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) ...@@ -3779,12 +3779,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
} }
static const struct mlxsw_config_profile mlxsw_sp_config_profile = { static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
.used_max_mid = 1, .used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX, .max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
.max_pgt = 0,
.used_flood_tables = 1, .used_flood_tables = 1,
.used_flood_mode = 1, .used_flood_mode = 1,
.flood_mode = 3, .flood_mode = 3,
......
...@@ -160,6 +160,13 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block) ...@@ -160,6 +160,13 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
return block->disable_count; return block->disable_count;
} }
static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
/* We hold a reference on ruleset ourselves */
return ruleset->ref_count == 2;
}
static int static int
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_block *block,
...@@ -341,21 +348,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, ...@@ -341,21 +348,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_ht_insert; goto err_ht_insert;
if (!chain_index) {
/* We only need ruleset with chain index 0, the implicit one,
* to be directly bound to device. The rest of the rulesets
* are bound by "Goto action set".
*/
err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
goto err_ruleset_bind;
}
return ruleset; return ruleset;
err_ruleset_bind:
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
err_ht_insert: err_ht_insert:
ops->ruleset_del(mlxsw_sp, ruleset->priv); ops->ruleset_del(mlxsw_sp, ruleset->priv);
err_ops_ruleset_add: err_ops_ruleset_add:
...@@ -369,12 +363,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, ...@@ -369,12 +363,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset) struct mlxsw_sp_acl_ruleset *ruleset)
{ {
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
if (!chain_index)
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params); mlxsw_sp_acl_ruleset_ht_params);
ops->ruleset_del(mlxsw_sp, ruleset->priv); ops->ruleset_del(mlxsw_sp, ruleset->priv);
...@@ -688,10 +678,25 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, ...@@ -688,10 +678,25 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_rhashtable_insert; goto err_rhashtable_insert;
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
/* We only need ruleset with chain index 0, the implicit
* one, to be directly bound to device. The rest of the
* rulesets are bound by "Goto action set".
*/
err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset,
ruleset->ht_key.block);
if (err)
goto err_ruleset_block_bind;
}
list_add_tail(&rule->list, &mlxsw_sp->acl->rules); list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
ruleset->ht_key.block->rule_count++; ruleset->ht_key.block->rule_count++;
return 0; return 0;
err_ruleset_block_bind:
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
err_rhashtable_insert: err_rhashtable_insert:
ops->rule_del(mlxsw_sp, rule->priv); ops->rule_del(mlxsw_sp, rule->priv);
return err; return err;
...@@ -705,6 +710,10 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, ...@@ -705,6 +710,10 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
ruleset->ht_key.block->rule_count--; ruleset->ht_key.block->rule_count--;
list_del(&rule->list); list_del(&rule->list);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
ruleset->ht_key.block);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params); mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv); ops->rule_del(mlxsw_sp, rule->priv);
......
...@@ -228,10 +228,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, ...@@ -228,10 +228,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
return err; return err;
err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
if (err)
goto err_group_update;
err = rhashtable_init(&group->chunk_ht, err = rhashtable_init(&group->chunk_ht,
&mlxsw_sp_acl_tcam_chunk_ht_params); &mlxsw_sp_acl_tcam_chunk_ht_params);
if (err) if (err)
...@@ -240,7 +236,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, ...@@ -240,7 +236,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
return 0; return 0;
err_rhashtable_init: err_rhashtable_init:
err_group_update:
mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment