Commit c66f6741 authored by Daniel Jurgens's avatar Daniel Jurgens Committed by Jason Gunthorpe

IB/core: Don't register each MAD agent for LSM notifier

When creating many MAD agents in a short period of time, receive packet
processing can be delayed long enough to cause timeouts while new agents
are being added to the atomic notifier chain with IRQs disabled.  Notifier
chain registration and unregstration is an O(n) operation. With large
numbers of MAD agents being created and destroyed simultaneously the CPUs
spend too much time with interrupts disabled.

Instead of each MAD agent registering for it's own LSM notification,
maintain a list of agents internally and register once, this registration
already existed for handling the PKeys. This list is write mostly, so a
normal spin lock is used vs a read/write lock. All MAD agents must be
checked, so a single list is used instead of breaking them down per
device.

Notifier calls are done under rcu_read_lock, so there isn't a risk of
similar packet timeouts while checking the MAD agents security settings
when notified.
Signed-off-by: default avatarDaniel Jurgens <danielj@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Acked-by: default avatarPaul Moore <paul@paul-moore.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 805b754d
...@@ -202,6 +202,7 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, ...@@ -202,6 +202,7 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
enum ib_qp_type qp_type); enum ib_qp_type qp_type);
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
void ib_mad_agent_security_change(void);
#else #else
static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
{ {
...@@ -267,6 +268,10 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, ...@@ -267,6 +268,10 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
{ {
return 0; return 0;
} }
static inline void ib_mad_agent_security_change(void)
{
}
#endif #endif
struct ib_device *ib_device_get_by_index(u32 ifindex); struct ib_device *ib_device_get_by_index(u32 ifindex);
......
...@@ -452,6 +452,7 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event, ...@@ -452,6 +452,7 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE; return NOTIFY_DONE;
schedule_work(&ib_policy_change_work); schedule_work(&ib_policy_change_work);
ib_mad_agent_security_change();
return NOTIFY_OK; return NOTIFY_OK;
} }
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
#include "core_priv.h" #include "core_priv.h"
#include "mad_priv.h" #include "mad_priv.h"
static LIST_HEAD(mad_agent_list);
/* Lock to protect mad_agent_list */
static DEFINE_SPINLOCK(mad_agent_list_lock);
static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
{ {
struct pkey_index_qp_list *pkey = NULL; struct pkey_index_qp_list *pkey = NULL;
...@@ -676,19 +680,18 @@ static int ib_security_pkey_access(struct ib_device *dev, ...@@ -676,19 +680,18 @@ static int ib_security_pkey_access(struct ib_device *dev,
return security_ib_pkey_access(sec, subnet_prefix, pkey); return security_ib_pkey_access(sec, subnet_prefix, pkey);
} }
static int ib_mad_agent_security_change(struct notifier_block *nb, void ib_mad_agent_security_change(void)
unsigned long event,
void *data)
{ {
struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb); struct ib_mad_agent *ag;
if (event != LSM_POLICY_CHANGE) spin_lock(&mad_agent_list_lock);
return NOTIFY_DONE; list_for_each_entry(ag,
&mad_agent_list,
ag->smp_allowed = !security_ib_endport_manage_subnet( mad_agent_sec_list)
ag->security, dev_name(&ag->device->dev), ag->port_num); WRITE_ONCE(ag->smp_allowed,
!security_ib_endport_manage_subnet(ag->security,
return NOTIFY_OK; dev_name(&ag->device->dev), ag->port_num));
spin_unlock(&mad_agent_list_lock);
} }
int ib_mad_agent_security_setup(struct ib_mad_agent *agent, int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
...@@ -699,6 +702,8 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, ...@@ -699,6 +702,8 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
if (!rdma_protocol_ib(agent->device, agent->port_num)) if (!rdma_protocol_ib(agent->device, agent->port_num))
return 0; return 0;
INIT_LIST_HEAD(&agent->mad_agent_sec_list);
ret = security_ib_alloc_security(&agent->security); ret = security_ib_alloc_security(&agent->security);
if (ret) if (ret)
return ret; return ret;
...@@ -706,22 +711,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, ...@@ -706,22 +711,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
if (qp_type != IB_QPT_SMI) if (qp_type != IB_QPT_SMI)
return 0; return 0;
spin_lock(&mad_agent_list_lock);
ret = security_ib_endport_manage_subnet(agent->security, ret = security_ib_endport_manage_subnet(agent->security,
dev_name(&agent->device->dev), dev_name(&agent->device->dev),
agent->port_num); agent->port_num);
if (ret) if (ret)
goto free_security; goto free_security;
agent->lsm_nb.notifier_call = ib_mad_agent_security_change; WRITE_ONCE(agent->smp_allowed, true);
ret = register_lsm_notifier(&agent->lsm_nb); list_add(&agent->mad_agent_sec_list, &mad_agent_list);
if (ret) spin_unlock(&mad_agent_list_lock);
goto free_security;
agent->smp_allowed = true;
agent->lsm_nb_reg = true;
return 0; return 0;
free_security: free_security:
spin_unlock(&mad_agent_list_lock);
security_ib_free_security(agent->security); security_ib_free_security(agent->security);
return ret; return ret;
} }
...@@ -731,8 +734,11 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) ...@@ -731,8 +734,11 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
if (!rdma_protocol_ib(agent->device, agent->port_num)) if (!rdma_protocol_ib(agent->device, agent->port_num))
return; return;
if (agent->lsm_nb_reg) if (agent->qp->qp_type == IB_QPT_SMI) {
unregister_lsm_notifier(&agent->lsm_nb); spin_lock(&mad_agent_list_lock);
list_del(&agent->mad_agent_sec_list);
spin_unlock(&mad_agent_list_lock);
}
security_ib_free_security(agent->security); security_ib_free_security(agent->security);
} }
...@@ -743,7 +749,7 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) ...@@ -743,7 +749,7 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
return 0; return 0;
if (map->agent.qp->qp_type == IB_QPT_SMI) { if (map->agent.qp->qp_type == IB_QPT_SMI) {
if (!map->agent.smp_allowed) if (!READ_ONCE(map->agent.smp_allowed))
return -EACCES; return -EACCES;
return 0; return 0;
} }
......
...@@ -617,11 +617,10 @@ struct ib_mad_agent { ...@@ -617,11 +617,10 @@ struct ib_mad_agent {
u32 hi_tid; u32 hi_tid;
u32 flags; u32 flags;
void *security; void *security;
struct notifier_block lsm_nb; struct list_head mad_agent_sec_list;
u8 port_num; u8 port_num;
u8 rmpp_version; u8 rmpp_version;
bool smp_allowed; bool smp_allowed;
bool lsm_nb_reg;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment