Commit db701955 authored by Ido Schimmel's avatar Ido Schimmel Committed by David S. Miller

rocker: Implement FIB offload in deferred work

Convert rocker to offload FIBs in deferred work in a similar fashion to
mlxsw, which was converted in the previous commits.
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c1bb279c
...@@ -2166,28 +2166,70 @@ static const struct switchdev_ops rocker_port_switchdev_ops = { ...@@ -2166,28 +2166,70 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_obj_dump = rocker_port_obj_dump, .switchdev_port_obj_dump = rocker_port_obj_dump,
}; };
static int rocker_router_fib_event(struct notifier_block *nb, struct rocker_fib_event_work {
unsigned long event, void *ptr) struct work_struct work;
struct fib_entry_notifier_info fen_info;
struct rocker *rocker;
unsigned long event;
};
static void rocker_router_fib_event_work(struct work_struct *work)
{ {
struct rocker *rocker = container_of(nb, struct rocker, fib_nb); struct rocker_fib_event_work *fib_work =
struct fib_entry_notifier_info *fen_info = ptr; container_of(work, struct rocker_fib_event_work, work);
struct rocker *rocker = fib_work->rocker;
int err; int err;
switch (event) { /* Protect internal structures from changes */
rtnl_lock();
switch (fib_work->event) {
case FIB_EVENT_ENTRY_ADD: case FIB_EVENT_ENTRY_ADD:
err = rocker_world_fib4_add(rocker, fen_info); err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
if (err) if (err)
rocker_world_fib4_abort(rocker); rocker_world_fib4_abort(rocker);
else fib_info_put(fib_work->fen_info.fi);
break; break;
case FIB_EVENT_ENTRY_DEL: case FIB_EVENT_ENTRY_DEL:
rocker_world_fib4_del(rocker, fen_info); rocker_world_fib4_del(rocker, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break; break;
case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL: case FIB_EVENT_RULE_DEL:
rocker_world_fib4_abort(rocker); rocker_world_fib4_abort(rocker);
break; break;
} }
rtnl_unlock();
kfree(fib_work);
}
/* Called with rcu_read_lock() */
static int rocker_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
struct rocker_fib_event_work *fib_work;
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
if (WARN_ON(!fib_work))
return NOTIFY_BAD;
INIT_WORK(&fib_work->work, rocker_router_fib_event_work);
fib_work->rocker = rocker;
fib_work->event = event;
switch (event) {
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
/* Take referece on fib_info to prevent it from being
* freed while work is queued. Release it afterwards.
*/
fib_info_hold(fib_work->fen_info.fi);
break;
}
queue_work(rocker->rocker_owq, &fib_work->work);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
......
...@@ -2516,6 +2516,7 @@ static void ofdpa_fini(struct rocker *rocker) ...@@ -2516,6 +2516,7 @@ static void ofdpa_fini(struct rocker *rocker)
int bkt; int bkt;
del_timer_sync(&ofdpa->fdb_cleanup_timer); del_timer_sync(&ofdpa->fdb_cleanup_timer);
flush_workqueue(rocker->rocker_owq);
spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment