Commit 9d930594 authored by David S. Miller's avatar David S. Miller
parents 738faca3 96120d86
...@@ -270,7 +270,8 @@ struct nf_afinfo { ...@@ -270,7 +270,8 @@ struct nf_afinfo {
unsigned int dataoff, unsigned int dataoff,
unsigned int len, unsigned int len,
u_int8_t protocol); u_int8_t protocol);
int (*route)(struct dst_entry **dst, struct flowi *fl); int (*route)(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict);
void (*saveroute)(const struct sk_buff *skb, void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry); struct nf_queue_entry *entry);
int (*reroute)(struct sk_buff *skb, int (*reroute)(struct sk_buff *skb,
......
...@@ -293,7 +293,7 @@ struct ip_set { ...@@ -293,7 +293,7 @@ struct ip_set {
/* Lock protecting the set data */ /* Lock protecting the set data */
rwlock_t lock; rwlock_t lock;
/* References to the set */ /* References to the set */
atomic_t ref; u32 ref;
/* The core set type */ /* The core set type */
struct ip_set_type *type; struct ip_set_type *type;
/* The type variant doing the real job */ /* The type variant doing the real job */
......
...@@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb) ...@@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
if (h->netmask != HOST_MASK) if (h->netmask != HOST_MASK)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
#endif #endif
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
if (with_timeout(h->timeout)) if (with_timeout(h->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
......
...@@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb) ...@@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb)
*/ */
if (likely(skb->dev && skb->dev->nd_net)) if (likely(skb->dev && skb->dev->nd_net))
return dev_net(skb->dev); return dev_net(skb->dev);
if (skb_dst(skb)->dev) if (skb_dst(skb) && skb_dst(skb)->dev)
return dev_net(skb_dst(skb)->dev); return dev_net(skb_dst(skb)->dev);
WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
__func__, __LINE__); __func__, __LINE__);
......
...@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, ...@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
return csum; return csum;
} }
static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) static int nf_ip_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict __always_unused)
{ {
struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4); struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
if (IS_ERR(rt)) if (IS_ERR(rt))
return PTR_ERR(rt); return PTR_ERR(rt);
*dst = &rt->dst; *dst = &rt->dst;
......
...@@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb, ...@@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb,
return 0; return 0;
} }
static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) static int nf_ip6_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict)
{ {
*dst = ip6_route_output(&init_net, NULL, &fl->u.ip6); static const struct ipv6_pinfo fake_pinfo;
static const struct inet_sock fake_sk = {
/* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
.sk.sk_bound_dev_if = 1,
.pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
};
const void *sk = strict ? &fake_sk : NULL;
*dst = ip6_route_output(net, sk, &fl->u.ip6);
return (*dst)->error; return (*dst)->error;
} }
......
...@@ -652,7 +652,6 @@ comment "Xtables matches" ...@@ -652,7 +652,6 @@ comment "Xtables matches"
config NETFILTER_XT_MATCH_ADDRTYPE config NETFILTER_XT_MATCH_ADDRTYPE
tristate '"addrtype" address type match support' tristate '"addrtype" address type match support'
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
depends on (IPV6 || IPV6=n)
---help--- ---help---
This option allows you to match what routing thinks of an address, This option allows you to match what routing thinks of an address,
eg. UNICAST, LOCAL, BROADCAST, ... eg. UNICAST, LOCAL, BROADCAST, ...
......
...@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb) ...@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
if (map->netmask != 32) if (map->netmask != 32)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize)); htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))
......
...@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb) ...@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) htonl(sizeof(*map)
+ (map->last_ip - map->first_ip + 1) * map->dsize)); + (map->last_ip - map->first_ip + 1) * map->dsize));
......
...@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb) ...@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize)); htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
static LIST_HEAD(ip_set_type_list); /* all registered set types */ static LIST_HEAD(ip_set_type_list); /* all registered set types */
static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
static struct ip_set **ip_set_list; /* all individual sets */ static struct ip_set **ip_set_list; /* all individual sets */
static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
...@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); ...@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
static inline void static inline void
__ip_set_get(ip_set_id_t index) __ip_set_get(ip_set_id_t index)
{ {
atomic_inc(&ip_set_list[index]->ref); write_lock_bh(&ip_set_ref_lock);
ip_set_list[index]->ref++;
write_unlock_bh(&ip_set_ref_lock);
} }
static inline void static inline void
__ip_set_put(ip_set_id_t index) __ip_set_put(ip_set_id_t index)
{ {
atomic_dec(&ip_set_list[index]->ref); write_lock_bh(&ip_set_ref_lock);
BUG_ON(ip_set_list[index]->ref == 0);
ip_set_list[index]->ref--;
write_unlock_bh(&ip_set_ref_lock);
} }
/* /*
...@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb, ...@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index]; struct ip_set *set = ip_set_list[index];
int ret = 0; int ret = 0;
BUG_ON(set == NULL || atomic_read(&set->ref) == 0); BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index); pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension || if (dim < set->type->dimension ||
...@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb, ...@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index]; struct ip_set *set = ip_set_list[index];
int ret; int ret;
BUG_ON(set == NULL || atomic_read(&set->ref) == 0); BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index); pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension || if (dim < set->type->dimension ||
...@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb, ...@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index]; struct ip_set *set = ip_set_list[index];
int ret = 0; int ret = 0;
BUG_ON(set == NULL || atomic_read(&set->ref) == 0); BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index); pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension || if (dim < set->type->dimension ||
...@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del); ...@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
* Find set by name, reference it once. The reference makes sure the * Find set by name, reference it once. The reference makes sure the
* thing pointed to, does not go away under our feet. * thing pointed to, does not go away under our feet.
* *
* The nfnl mutex must already be activated.
*/ */
ip_set_id_t ip_set_id_t
ip_set_get_byname(const char *name, struct ip_set **set) ip_set_get_byname(const char *name, struct ip_set **set)
...@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname); ...@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
* reference count by 1. The caller shall not assume the index * reference count by 1. The caller shall not assume the index
* to be valid, after calling this function. * to be valid, after calling this function.
* *
* The nfnl mutex must already be activated.
*/ */
void void
ip_set_put_byindex(ip_set_id_t index) ip_set_put_byindex(ip_set_id_t index)
{ {
if (ip_set_list[index] != NULL) { if (ip_set_list[index] != NULL)
BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
__ip_set_put(index); __ip_set_put(index);
}
} }
EXPORT_SYMBOL_GPL(ip_set_put_byindex); EXPORT_SYMBOL_GPL(ip_set_put_byindex);
...@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex); ...@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
* can't be destroyed. The set cannot be renamed due to * can't be destroyed. The set cannot be renamed due to
* the referencing either. * the referencing either.
* *
* The nfnl mutex must already be activated.
*/ */
const char * const char *
ip_set_name_byindex(ip_set_id_t index) ip_set_name_byindex(ip_set_id_t index)
...@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index) ...@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
const struct ip_set *set = ip_set_list[index]; const struct ip_set *set = ip_set_list[index];
BUG_ON(set == NULL); BUG_ON(set == NULL);
BUG_ON(atomic_read(&set->ref) == 0); BUG_ON(set->ref == 0);
/* Referenced, so it's safe */ /* Referenced, so it's safe */
return set->name; return set->name;
...@@ -515,10 +516,7 @@ void ...@@ -515,10 +516,7 @@ void
ip_set_nfnl_put(ip_set_id_t index) ip_set_nfnl_put(ip_set_id_t index)
{ {
nfnl_lock(); nfnl_lock();
if (ip_set_list[index] != NULL) { ip_set_put_byindex(index);
BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
__ip_set_put(index);
}
nfnl_unlock(); nfnl_unlock();
} }
EXPORT_SYMBOL_GPL(ip_set_nfnl_put); EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
...@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put); ...@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
/* /*
* Communication protocol with userspace over netlink. * Communication protocol with userspace over netlink.
* *
* We already locked by nfnl_lock. * The commands are serialized by the nfnl mutex.
*/ */
static inline bool static inline bool
...@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb, ...@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
return -ENOMEM; return -ENOMEM;
rwlock_init(&set->lock); rwlock_init(&set->lock);
strlcpy(set->name, name, IPSET_MAXNAMELEN); strlcpy(set->name, name, IPSET_MAXNAMELEN);
atomic_set(&set->ref, 0);
set->family = family; set->family = family;
/* /*
...@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb, ...@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
/* /*
* Here, we have a valid, constructed set and we are protected * Here, we have a valid, constructed set and we are protected
* by nfnl_lock. Find the first free index in ip_set_list and * by the nfnl mutex. Find the first free index in ip_set_list
* check clashing. * and check clashing.
*/ */
if ((ret = find_free_id(set->name, &index, &clash)) != 0) { if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
/* If this is the same set and requested, ignore error */ /* If this is the same set and requested, ignore error */
...@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb, ...@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
const struct nlattr * const attr[]) const struct nlattr * const attr[])
{ {
ip_set_id_t i; ip_set_id_t i;
int ret = 0;
if (unlikely(protocol_failed(attr))) if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
/* References are protected by the nfnl mutex */ /* Commands are serialized and references are
* protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call
* ip_set_put|get_nfnl_* functions, that way we
* can safely check references here.
*
* list:set timer can only decrement the reference
* counter, so if it's already zero, we can proceed
* without holding the lock.
*/
read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) { if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++) { for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL && if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
(atomic_read(&ip_set_list[i]->ref))) ret = IPSET_ERR_BUSY;
return -IPSET_ERR_BUSY; goto out;
}
} }
read_unlock_bh(&ip_set_ref_lock);
for (i = 0; i < ip_set_max; i++) { for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL) if (ip_set_list[i] != NULL)
ip_set_destroy_set(i); ip_set_destroy_set(i);
} }
} else { } else {
i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
if (i == IPSET_INVALID_ID) if (i == IPSET_INVALID_ID) {
return -ENOENT; ret = -ENOENT;
else if (atomic_read(&ip_set_list[i]->ref)) goto out;
return -IPSET_ERR_BUSY; } else if (ip_set_list[i]->ref) {
ret = -IPSET_ERR_BUSY;
goto out;
}
read_unlock_bh(&ip_set_ref_lock);
ip_set_destroy_set(i); ip_set_destroy_set(i);
} }
return 0; return 0;
out:
read_unlock_bh(&ip_set_ref_lock);
return ret;
} }
/* Flush sets */ /* Flush sets */
...@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb, ...@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set; struct ip_set *set;
const char *name2; const char *name2;
ip_set_id_t i; ip_set_id_t i;
int ret = 0;
if (unlikely(protocol_failed(attr) || if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL || attr[IPSET_ATTR_SETNAME] == NULL ||
...@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb, ...@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL) if (set == NULL)
return -ENOENT; return -ENOENT;
if (atomic_read(&set->ref) != 0)
return -IPSET_ERR_REFERENCED; read_lock_bh(&ip_set_ref_lock);
if (set->ref != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
}
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
for (i = 0; i < ip_set_max; i++) { for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL && if (ip_set_list[i] != NULL &&
STREQ(ip_set_list[i]->name, name2)) STREQ(ip_set_list[i]->name, name2)) {
return -IPSET_ERR_EXIST_SETNAME2; ret = -IPSET_ERR_EXIST_SETNAME2;
goto out;
}
} }
strncpy(set->name, name2, IPSET_MAXNAMELEN); strncpy(set->name, name2, IPSET_MAXNAMELEN);
return 0; out:
read_unlock_bh(&ip_set_ref_lock);
return ret;
} }
/* Swap two sets so that name/index points to the other. /* Swap two sets so that name/index points to the other.
* References and set names are also swapped. * References and set names are also swapped.
* *
* We are protected by the nfnl mutex and references are * The commands are serialized by the nfnl mutex and references are
* manipulated only by holding the mutex. The kernel interfaces * protected by the ip_set_ref_lock. The kernel interfaces
* do not hold the mutex but the pointer settings are atomic * do not hold the mutex but the pointer settings are atomic
* so the ip_set_list always contains valid pointers to the sets. * so the ip_set_list always contains valid pointers to the sets.
*/ */
...@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb, ...@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *from, *to; struct ip_set *from, *to;
ip_set_id_t from_id, to_id; ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN]; char from_name[IPSET_MAXNAMELEN];
u32 from_ref;
if (unlikely(protocol_failed(attr) || if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL || attr[IPSET_ATTR_SETNAME] == NULL ||
...@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb, ...@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
from->type->family == to->type->family)) from->type->family == to->type->family))
return -IPSET_ERR_TYPE_MISMATCH; return -IPSET_ERR_TYPE_MISMATCH;
/* No magic here: ref munging protected by the nfnl_lock */
strncpy(from_name, from->name, IPSET_MAXNAMELEN); strncpy(from_name, from->name, IPSET_MAXNAMELEN);
from_ref = atomic_read(&from->ref);
strncpy(from->name, to->name, IPSET_MAXNAMELEN); strncpy(from->name, to->name, IPSET_MAXNAMELEN);
atomic_set(&from->ref, atomic_read(&to->ref));
strncpy(to->name, from_name, IPSET_MAXNAMELEN); strncpy(to->name, from_name, IPSET_MAXNAMELEN);
atomic_set(&to->ref, from_ref);
write_lock_bh(&ip_set_ref_lock);
swap(from->ref, to->ref);
ip_set_list[from_id] = to; ip_set_list[from_id] = to;
ip_set_list[to_id] = from; ip_set_list[to_id] = from;
write_unlock_bh(&ip_set_ref_lock);
return 0; return 0;
} }
...@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb) ...@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
{ {
if (cb->args[2]) { if (cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
__ip_set_put((ip_set_id_t) cb->args[1]); ip_set_put_byindex((ip_set_id_t) cb->args[1]);
} }
return 0; return 0;
} }
...@@ -1068,7 +1091,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1068,7 +1091,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
/* If there was an error or set is done, release set */ /* If there was an error or set is done, release set */
if (ret || !cb->args[2]) { if (ret || !cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[index]->name); pr_debug("release set %s\n", ip_set_list[index]->name);
__ip_set_put(index); ip_set_put_byindex(index);
} }
/* If we dump all sets, continue with dumping last ones */ /* If we dump all sets, continue with dumping last ones */
......
...@@ -43,14 +43,19 @@ struct list_set { ...@@ -43,14 +43,19 @@ struct list_set {
static inline struct set_elem * static inline struct set_elem *
list_set_elem(const struct list_set *map, u32 id) list_set_elem(const struct list_set *map, u32 id)
{ {
return (struct set_elem *)((char *)map->members + id * map->dsize); return (struct set_elem *)((void *)map->members + id * map->dsize);
}
static inline struct set_telem *
list_set_telem(const struct list_set *map, u32 id)
{
return (struct set_telem *)((void *)map->members + id * map->dsize);
} }
static inline bool static inline bool
list_set_timeout(const struct list_set *map, u32 id) list_set_timeout(const struct list_set *map, u32 id)
{ {
const struct set_telem *elem = const struct set_telem *elem = list_set_telem(map, id);
(const struct set_telem *) list_set_elem(map, id);
return ip_set_timeout_test(elem->timeout); return ip_set_timeout_test(elem->timeout);
} }
...@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id) ...@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
static inline bool static inline bool
list_set_expired(const struct list_set *map, u32 id) list_set_expired(const struct list_set *map, u32 id)
{ {
const struct set_telem *elem = const struct set_telem *elem = list_set_telem(map, id);
(const struct set_telem *) list_set_elem(map, id);
return ip_set_timeout_expired(elem->timeout); return ip_set_timeout_expired(elem->timeout);
} }
static inline int
list_set_exist(const struct set_telem *elem)
{
return elem->id != IPSET_INVALID_ID &&
!ip_set_timeout_expired(elem->timeout);
}
/* Set list without and with timeout */ /* Set list without and with timeout */
static int static int
...@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id, ...@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
struct set_telem *e; struct set_telem *e;
for (; i < map->size; i++) { for (; i < map->size; i++) {
e = (struct set_telem *)list_set_elem(map, i); e = list_set_telem(map, i);
swap(e->id, id); swap(e->id, id);
swap(e->timeout, timeout);
if (e->id == IPSET_INVALID_ID) if (e->id == IPSET_INVALID_ID)
break; break;
swap(e->timeout, timeout);
} }
} }
...@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, ...@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
/* Last element replaced: e.g. add new,before,last */ /* Last element replaced: e.g. add new,before,last */
ip_set_put_byindex(e->id); ip_set_put_byindex(e->id);
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))
list_elem_tadd(map, i, id, timeout); list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
else else
list_elem_add(map, i, id); list_elem_add(map, i, id);
...@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, ...@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
} }
static int static int
list_set_del(struct list_set *map, ip_set_id_t id, u32 i) list_set_del(struct list_set *map, u32 i)
{ {
struct set_elem *a = list_set_elem(map, i), *b; struct set_elem *a = list_set_elem(map, i), *b;
ip_set_put_byindex(id); ip_set_put_byindex(a->id);
for (; i < map->size - 1; i++) { for (; i < map->size - 1; i++) {
b = list_set_elem(map, i + 1); b = list_set_elem(map, i + 1);
...@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
(before == 0 || (before == 0 ||
(before > 0 && (before > 0 &&
next_id_eq(map, i, refid)))) next_id_eq(map, i, refid))))
ret = list_set_del(map, id, i); ret = list_set_del(map, i);
else if (before < 0 && else if (before < 0 &&
elem->id == refid && elem->id == refid &&
next_id_eq(map, i, id)) next_id_eq(map, i, id))
ret = list_set_del(map, id, i + 1); ret = list_set_del(map, i + 1);
} }
break; break;
default: default:
...@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) ...@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
if (with_timeout(map->timeout)) if (with_timeout(map->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->size * map->dsize)); htonl(sizeof(*map) + map->size * map->dsize));
ipset_nest_end(skb, nested); ipset_nest_end(skb, nested);
...@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set) ...@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
struct set_telem *e; struct set_telem *e;
u32 i; u32 i;
/* We run parallel with other readers (test element) write_lock_bh(&set->lock);
* but adding/deleting new entries is locked out */ for (i = 0; i < map->size; i++) {
read_lock_bh(&set->lock); e = list_set_telem(map, i);
for (i = map->size - 1; i >= 0; i--) { if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
e = (struct set_telem *) list_set_elem(map, i); list_set_del(map, i);
if (e->id != IPSET_INVALID_ID &&
list_set_expired(map, i))
list_set_del(map, e->id, i);
} }
read_unlock_bh(&set->lock); write_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc); add_timer(&map->gc);
......
...@@ -3120,7 +3120,7 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state, ...@@ -3120,7 +3120,7 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
static int ip_vs_genl_dump_daemons(struct sk_buff *skb, static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb) struct netlink_callback *cb)
{ {
struct net *net = skb_net(skb); struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net); struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_mutex); mutex_lock(&__ip_vs_mutex);
......
...@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f, ...@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
CHECK_BOUND(bs, 2); CHECK_BOUND(bs, 2);
count = *bs->cur++; count = *bs->cur++;
count <<= 8; count <<= 8;
count = *bs->cur++; count += *bs->cur++;
break; break;
case SEMI: case SEMI:
BYTE_ALIGN(bs); BYTE_ALIGN(bs);
......
...@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src, ...@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2)); memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->ip; fl2.daddr = dst->ip;
if (!afinfo->route((struct dst_entry **)&rt1, if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
flowi4_to_flowi(&fl1))) { flowi4_to_flowi(&fl1), false)) {
if (!afinfo->route((struct dst_entry **)&rt2, if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
flowi4_to_flowi(&fl2))) { flowi4_to_flowi(&fl2), false)) {
if (rt1->rt_gateway == rt2->rt_gateway && if (rt1->rt_gateway == rt2->rt_gateway &&
rt1->dst.dev == rt2->dst.dev) rt1->dst.dev == rt2->dst.dev)
ret = 1; ret = 1;
...@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src, ...@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2)); memset(&fl2, 0, sizeof(fl2));
ipv6_addr_copy(&fl2.daddr, &dst->in6); ipv6_addr_copy(&fl2.daddr, &dst->in6);
if (!afinfo->route((struct dst_entry **)&rt1, if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
flowi6_to_flowi(&fl1))) { flowi6_to_flowi(&fl1), false)) {
if (!afinfo->route((struct dst_entry **)&rt2, if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
flowi6_to_flowi(&fl2))) { flowi6_to_flowi(&fl2), false)) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) && sizeof(rt1->rt6i_gateway)) &&
rt1->dst.dev == rt2->dst.dev) rt1->dst.dev == rt2->dst.dev)
......
...@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, ...@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
rcu_read_lock(); rcu_read_lock();
ai = nf_get_afinfo(family); ai = nf_get_afinfo(family);
if (ai != NULL) if (ai != NULL)
ai->route((struct dst_entry **)&rt, &fl); ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
rcu_read_unlock(); rcu_read_unlock();
if (rt != NULL) { if (rt != NULL) {
......
...@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype"); ...@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
MODULE_ALIAS("ip6t_addrtype"); MODULE_ALIAS("ip6t_addrtype");
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
const struct in6_addr *addr)
{ {
const struct nf_afinfo *afinfo;
struct flowi6 flow;
struct rt6_info *rt;
u32 ret; u32 ret;
int route_err;
memset(&flow, 0, sizeof(flow));
ipv6_addr_copy(&flow.daddr, addr);
if (dev)
flow.flowi6_oif = dev->ifindex;
rcu_read_lock();
afinfo = nf_get_afinfo(NFPROTO_IPV6);
if (afinfo != NULL)
route_err = afinfo->route(net, (struct dst_entry **)&rt,
flowi6_to_flowi(&flow), !!dev);
else
route_err = 1;
rcu_read_unlock();
if (!rt) if (route_err)
return XT_ADDRTYPE_UNREACHABLE; return XT_ADDRTYPE_UNREACHABLE;
if (rt->rt6i_flags & RTF_REJECT) if (rt->rt6i_flags & RTF_REJECT)
...@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) ...@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
ret |= XT_ADDRTYPE_LOCAL; ret |= XT_ADDRTYPE_LOCAL;
if (rt->rt6i_flags & RTF_ANYCAST) if (rt->rt6i_flags & RTF_ANYCAST)
ret |= XT_ADDRTYPE_ANYCAST; ret |= XT_ADDRTYPE_ANYCAST;
dst_release(&rt->dst);
return ret; return ret;
} }
...@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev, ...@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
return false; return false;
if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
XT_ADDRTYPE_UNREACHABLE) & mask) { XT_ADDRTYPE_UNREACHABLE) & mask)
struct rt6_info *rt; return !!(mask & match_lookup_rt6(net, dev, addr));
u32 type;
int ifindex = dev ? dev->ifindex : 0;
rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
type = xt_addrtype_rt6_to_type(rt);
dst_release(&rt->dst);
return !!(mask & type);
}
return true; return true;
} }
......
...@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, ...@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
return info->match_flags & XT_CONNTRACK_STATE; return info->match_flags & XT_CONNTRACK_STATE;
if ((info->match_flags & XT_CONNTRACK_DIRECTION) && if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
(CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
!!(info->invert_flags & XT_CONNTRACK_DIRECTION)) !(info->invert_flags & XT_CONNTRACK_DIRECTION))
return false; return false;
if (info->match_flags & XT_CONNTRACK_ORIGSRC) if (info->match_flags & XT_CONNTRACK_ORIGSRC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment