Commit d658f8a0 authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

ipv4: ipmr: remove net pointer from struct mfc_cache

Now that cache entries in unres_queue don't need to be distinguished by their
network namespace pointer anymore, we can remove it from struct mfc_cache
add pass the namespace as function argument to the functions that need it.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e258beb2
...@@ -192,9 +192,6 @@ struct vif_device { ...@@ -192,9 +192,6 @@ struct vif_device {
struct mfc_cache { struct mfc_cache {
struct mfc_cache *next; /* Next entry on cache line */ struct mfc_cache *next; /* Next entry on cache line */
#ifdef CONFIG_NET_NS
struct net *mfc_net;
#endif
__be32 mfc_mcastgrp; /* Group the entry belongs to */ __be32 mfc_mcastgrp; /* Group the entry belongs to */
__be32 mfc_origin; /* Source of packet */ __be32 mfc_origin; /* Source of packet */
vifi_t mfc_parent; /* Source interface */ vifi_t mfc_parent; /* Source interface */
...@@ -217,18 +214,6 @@ struct mfc_cache { ...@@ -217,18 +214,6 @@ struct mfc_cache {
} mfc_un; } mfc_un;
}; };
static inline
struct net *mfc_net(const struct mfc_cache *mfc)
{
return read_pnet(&mfc->mfc_net);
}
static inline
void mfc_net_set(struct mfc_cache *mfc, struct net *net)
{
write_pnet(&mfc->mfc_net, hold_net(net));
}
#define MFC_STATIC 1 #define MFC_STATIC 1
#define MFC_NOTIFY 2 #define MFC_NOTIFY 2
......
...@@ -93,10 +93,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock); ...@@ -93,10 +93,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly; static struct kmem_cache *mrt_cachep __read_mostly;
static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); static int ip_mr_forward(struct net *net, struct sk_buff *skb,
struct mfc_cache *cache, int local);
static int ipmr_cache_report(struct net *net, static int ipmr_cache_report(struct net *net,
struct sk_buff *pkt, vifi_t vifi, int assert); struct sk_buff *pkt, vifi_t vifi, int assert);
static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); static int ipmr_fill_mroute(struct net *net, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
...@@ -325,7 +327,6 @@ static int vif_delete(struct net *net, int vifi, int notify, ...@@ -325,7 +327,6 @@ static int vif_delete(struct net *net, int vifi, int notify,
static inline void ipmr_cache_free(struct mfc_cache *c) static inline void ipmr_cache_free(struct mfc_cache *c)
{ {
release_net(mfc_net(c));
kmem_cache_free(mrt_cachep, c); kmem_cache_free(mrt_cachep, c);
} }
...@@ -333,11 +334,10 @@ static inline void ipmr_cache_free(struct mfc_cache *c) ...@@ -333,11 +334,10 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
and reporting error to netlink readers. and reporting error to netlink readers.
*/ */
static void ipmr_destroy_unres(struct mfc_cache *c) static void ipmr_destroy_unres(struct net *net, struct mfc_cache *c)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct nlmsgerr *e; struct nlmsgerr *e;
struct net *net = mfc_net(c);
atomic_dec(&net->ipv4.cache_resolve_queue_len); atomic_dec(&net->ipv4.cache_resolve_queue_len);
...@@ -392,7 +392,7 @@ static void ipmr_expire_process(unsigned long arg) ...@@ -392,7 +392,7 @@ static void ipmr_expire_process(unsigned long arg)
*cp = c->next; *cp = c->next;
ipmr_destroy_unres(c); ipmr_destroy_unres(net, c);
} }
if (net->ipv4.mfc_unres_queue != NULL) if (net->ipv4.mfc_unres_queue != NULL)
...@@ -404,10 +404,10 @@ static void ipmr_expire_process(unsigned long arg) ...@@ -404,10 +404,10 @@ static void ipmr_expire_process(unsigned long arg)
/* Fill oifs list. It is called under write locked mrt_lock. */ /* Fill oifs list. It is called under write locked mrt_lock. */
static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache,
unsigned char *ttls)
{ {
int vifi; int vifi;
struct net *net = mfc_net(cache);
cache->mfc_un.res.minvif = MAXVIFS; cache->mfc_un.res.minvif = MAXVIFS;
cache->mfc_un.res.maxvif = 0; cache->mfc_un.res.maxvif = 0;
...@@ -547,24 +547,22 @@ static struct mfc_cache *ipmr_cache_find(struct net *net, ...@@ -547,24 +547,22 @@ static struct mfc_cache *ipmr_cache_find(struct net *net,
/* /*
* Allocate a multicast cache entry * Allocate a multicast cache entry
*/ */
static struct mfc_cache *ipmr_cache_alloc(struct net *net) static struct mfc_cache *ipmr_cache_alloc(void)
{ {
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c == NULL) if (c == NULL)
return NULL; return NULL;
c->mfc_un.res.minvif = MAXVIFS; c->mfc_un.res.minvif = MAXVIFS;
mfc_net_set(c, net);
return c; return c;
} }
static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) static struct mfc_cache *ipmr_cache_alloc_unres(void)
{ {
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if (c == NULL) if (c == NULL)
return NULL; return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved); skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10*HZ; c->mfc_un.unres.expires = jiffies + 10*HZ;
mfc_net_set(c, net);
return c; return c;
} }
...@@ -572,7 +570,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) ...@@ -572,7 +570,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
* A cache entry has gone into a resolved state from queued * A cache entry has gone into a resolved state from queued
*/ */
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc,
struct mfc_cache *c)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct nlmsgerr *e; struct nlmsgerr *e;
...@@ -585,7 +584,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) ...@@ -585,7 +584,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
if (ip_hdr(skb)->version == 0) { if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { if (ipmr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) {
nlh->nlmsg_len = (skb_tail_pointer(skb) - nlh->nlmsg_len = (skb_tail_pointer(skb) -
(u8 *)nlh); (u8 *)nlh);
} else { } else {
...@@ -597,9 +596,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) ...@@ -597,9 +596,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
memset(&e->msg, 0, sizeof(e->msg)); memset(&e->msg, 0, sizeof(e->msg));
} }
rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else } else
ip_mr_forward(skb, c, 0); ip_mr_forward(net, skb, c, 0);
} }
} }
...@@ -717,7 +716,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) ...@@ -717,7 +716,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
*/ */
if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
(c = ipmr_cache_alloc_unres(net)) == NULL) { (c = ipmr_cache_alloc_unres()) == NULL) {
spin_unlock_bh(&mfc_unres_lock); spin_unlock_bh(&mfc_unres_lock);
kfree_skb(skb); kfree_skb(skb);
...@@ -814,7 +813,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) ...@@ -814,7 +813,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
if (c != NULL) { if (c != NULL) {
write_lock_bh(&mrt_lock); write_lock_bh(&mrt_lock);
c->mfc_parent = mfc->mfcc_parent; c->mfc_parent = mfc->mfcc_parent;
ipmr_update_thresholds(c, mfc->mfcc_ttls); ipmr_update_thresholds(net, c, mfc->mfcc_ttls);
if (!mrtsock) if (!mrtsock)
c->mfc_flags |= MFC_STATIC; c->mfc_flags |= MFC_STATIC;
write_unlock_bh(&mrt_lock); write_unlock_bh(&mrt_lock);
...@@ -824,14 +823,14 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) ...@@ -824,14 +823,14 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
return -EINVAL; return -EINVAL;
c = ipmr_cache_alloc(net); c = ipmr_cache_alloc();
if (c == NULL) if (c == NULL)
return -ENOMEM; return -ENOMEM;
c->mfc_origin = mfc->mfcc_origin.s_addr; c->mfc_origin = mfc->mfcc_origin.s_addr;
c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
c->mfc_parent = mfc->mfcc_parent; c->mfc_parent = mfc->mfcc_parent;
ipmr_update_thresholds(c, mfc->mfcc_ttls); ipmr_update_thresholds(net, c, mfc->mfcc_ttls);
if (!mrtsock) if (!mrtsock)
c->mfc_flags |= MFC_STATIC; c->mfc_flags |= MFC_STATIC;
...@@ -859,7 +858,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) ...@@ -859,7 +858,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
spin_unlock_bh(&mfc_unres_lock); spin_unlock_bh(&mfc_unres_lock);
if (uc) { if (uc) {
ipmr_cache_resolve(uc, c); ipmr_cache_resolve(net, uc, c);
ipmr_cache_free(uc); ipmr_cache_free(uc);
} }
return 0; return 0;
...@@ -910,7 +909,7 @@ static void mroute_clean_tables(struct net *net) ...@@ -910,7 +909,7 @@ static void mroute_clean_tables(struct net *net)
cp = &net->ipv4.mfc_unres_queue; cp = &net->ipv4.mfc_unres_queue;
while ((c = *cp) != NULL) { while ((c = *cp) != NULL) {
*cp = c->next; *cp = c->next;
ipmr_destroy_unres(c); ipmr_destroy_unres(net, c);
} }
spin_unlock_bh(&mfc_unres_lock); spin_unlock_bh(&mfc_unres_lock);
} }
...@@ -1221,9 +1220,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) ...@@ -1221,9 +1220,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
* Processing handlers for ipmr_forward * Processing handlers for ipmr_forward
*/ */
static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) static void ipmr_queue_xmit(struct net *net, struct sk_buff *skb,
struct mfc_cache *c, int vifi)
{ {
struct net *net = mfc_net(c);
const struct iphdr *iph = ip_hdr(skb); const struct iphdr *iph = ip_hdr(skb);
struct vif_device *vif = &net->ipv4.vif_table[vifi]; struct vif_device *vif = &net->ipv4.vif_table[vifi];
struct net_device *dev; struct net_device *dev;
...@@ -1335,11 +1334,11 @@ static int ipmr_find_vif(struct net_device *dev) ...@@ -1335,11 +1334,11 @@ static int ipmr_find_vif(struct net_device *dev)
/* "local" means that we should preserve one skb (for local delivery) */ /* "local" means that we should preserve one skb (for local delivery) */
static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) static int ip_mr_forward(struct net *net, struct sk_buff *skb,
struct mfc_cache *cache, int local)
{ {
int psend = -1; int psend = -1;
int vif, ct; int vif, ct;
struct net *net = mfc_net(cache);
vif = cache->mfc_parent; vif = cache->mfc_parent;
cache->mfc_un.res.pkt++; cache->mfc_un.res.pkt++;
...@@ -1396,7 +1395,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local ...@@ -1396,7 +1395,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
if (psend != -1) { if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) if (skb2)
ipmr_queue_xmit(skb2, cache, psend); ipmr_queue_xmit(net, skb2, cache, psend);
} }
psend = ct; psend = ct;
} }
...@@ -1405,9 +1404,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local ...@@ -1405,9 +1404,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
if (local) { if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) if (skb2)
ipmr_queue_xmit(skb2, cache, psend); ipmr_queue_xmit(net, skb2, cache, psend);
} else { } else {
ipmr_queue_xmit(skb, cache, psend); ipmr_queue_xmit(net, skb, cache, psend);
return 0; return 0;
} }
} }
...@@ -1488,7 +1487,7 @@ int ip_mr_input(struct sk_buff *skb) ...@@ -1488,7 +1487,7 @@ int ip_mr_input(struct sk_buff *skb)
return -ENODEV; return -ENODEV;
} }
ip_mr_forward(skb, cache, local); ip_mr_forward(net, skb, cache, local);
read_unlock(&mrt_lock); read_unlock(&mrt_lock);
...@@ -1602,11 +1601,11 @@ static int pim_rcv(struct sk_buff * skb) ...@@ -1602,11 +1601,11 @@ static int pim_rcv(struct sk_buff * skb)
#endif #endif
static int static int
ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) ipmr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc_cache *c,
struct rtmsg *rtm)
{ {
int ct; int ct;
struct rtnexthop *nhp; struct rtnexthop *nhp;
struct net *net = mfc_net(c);
u8 *b = skb_tail_pointer(skb); u8 *b = skb_tail_pointer(skb);
struct rtattr *mp_head; struct rtattr *mp_head;
...@@ -1686,7 +1685,7 @@ int ipmr_get_route(struct net *net, ...@@ -1686,7 +1685,7 @@ int ipmr_get_route(struct net *net,
if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
cache->mfc_flags |= MFC_NOTIFY; cache->mfc_flags |= MFC_NOTIFY;
err = ipmr_fill_mroute(skb, cache, rtm); err = ipmr_fill_mroute(net, skb, cache, rtm);
read_unlock(&mrt_lock); read_unlock(&mrt_lock);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment