Commit 84350051 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

mlxsw: spectrum_acl: Do rollback as another call to mlxsw_sp_acl_tcam_vchunk_migrate_all()

In order to simplify the code and to prepare it for
interrupted/continued migration process, do the rollback in case of
migration error as another call to mlxsw_sp_acl_tcam_vchunk_migrate_all().
It can be understood as "migrate all back".
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 844f01da
...@@ -1227,48 +1227,34 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, ...@@ -1227,48 +1227,34 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
{ {
struct mlxsw_sp_acl_tcam_ventry *ventry; struct mlxsw_sp_acl_tcam_ventry *ventry;
int err; int err;
int err2;
err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk, if (vchunk->chunk->region != region) {
region, ctx); err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
if (err) region, ctx);
return err; if (err)
return err;
} else if (!vchunk->chunk2) {
/* The chunk is already as it should be, nothing to do. */
return 0;
}
list_for_each_entry(ventry, &vchunk->ventry_list, list) { list_for_each_entry(ventry, &vchunk->ventry_list, list) {
err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry, err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
vchunk->chunk); vchunk->chunk);
if (err) { if (err) {
if (ctx->this_is_rollback) { if (ctx->this_is_rollback)
vchunk->vregion->failed_rollback = true;
return err; return err;
} /* Swap the chunk and chunk2 pointers so the follow-up
goto rollback; * rollback call will see the original chunk pointer
* in vchunk->chunk.
*/
swap(vchunk->chunk, vchunk->chunk2);
return err;
} }
} }
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk); mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk);
return 0; return 0;
rollback:
/* Migrate the entries back to the original chunk. If some entry
* migration fails, there's no good way how to proceed. Set the
* vregion with "failed_rollback" flag.
*/
swap(vchunk->chunk, vchunk->chunk2);
list_for_each_entry_continue_reverse(ventry, &vchunk->ventry_list,
list) {
err2 = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
vchunk->chunk);
if (err2) {
vchunk->vregion->failed_rollback = true;
goto err_rollback;
}
}
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk);
err_rollback:
return err;
} }
static int static int
...@@ -1284,23 +1270,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp, ...@@ -1284,23 +1270,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
vregion->region, vregion->region,
ctx); ctx);
if (err) if (err)
goto rollback; return err;
} }
return 0; return 0;
rollback:
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again to vregion->region.
*/
swap(vregion->region, vregion->region2);
ctx->this_is_rollback = true;
list_for_each_entry_continue_reverse(vchunk, &vregion->vchunk_list,
list) {
mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
vregion->region,
ctx);
}
return err;
} }
static int static int
...@@ -1308,11 +1280,22 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, ...@@ -1308,11 +1280,22 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion, struct mlxsw_sp_acl_tcam_vregion *vregion,
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
{ {
int err; int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion); trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
mutex_lock(&vregion->lock); mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, ctx); err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, ctx);
if (err) {
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, ctx);
if (err2)
vregion->failed_rollback = true;
}
mutex_unlock(&vregion->lock); mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion); trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment