Commit 76506a98 authored by Jiri Bohac's avatar Jiri Bohac Committed by David S. Miller

IPv6: fix DESYNC_FACTOR

The IPv6 temporary address generation uses a variable called DESYNC_FACTOR
to prevent hosts updating the addresses at the same time. Quoting RFC 4941:

   ... The value DESYNC_FACTOR is a random value (different for each
   client) that ensures that clients don't synchronize with each other and
   generate new addresses at exactly the same time ...

DESYNC_FACTOR is defined as:

   DESYNC_FACTOR -- A random value within the range 0 - MAX_DESYNC_FACTOR.
   It is computed once at system start (rather than each time it is used)
   and must never be greater than (TEMP_VALID_LIFETIME - REGEN_ADVANCE).

First, I believe the RFC has a typo in it and meant to say: "and must
never be greater than (TEMP_PREFERRED_LIFETIME - REGEN_ADVANCE)"

The reason is that at various places in the RFC, DESYNC_FACTOR is used in
a calculation like (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR) or
(TEMP_PREFERRED_LIFETIME - REGEN_ADVANCE - DESYNC_FACTOR). It needs to be
smaller than (TEMP_PREFERRED_LIFETIME - REGEN_ADVANCE) for the result of
these calculations to be larger than zero. It's never used in a
calculation together with TEMP_VALID_LIFETIME.

I already submitted an errata to the rfc-editor:
https://www.rfc-editor.org/errata_search.php?rfc=4941

The Linux implementation of DESYNC_FACTOR is very wrong:
max_desync_factor is used in places DESYNC_FACTOR should be used.
max_desync_factor is initialized to the RFC-recommended value for
MAX_DESYNC_FACTOR (600) but the whole point is to get a _random_ value.

And nothing ensures that the value used is not greater than
(TEMP_PREFERRED_LIFETIME - REGEN_ADVANCE), which leads to underflows.  The
effect can easily be observed when setting the temp_prefered_lft sysctl
e.g. to 60. The preferred lifetime of the temporary addresses will be
bogus.

TEMP_PREFERRED_LIFETIME and REGEN_ADVANCE are not constants and can be
influenced by these three sysctls: regen_max_retry, dad_transmits and
temp_prefered_lft. Thus, the upper bound for desync_factor needs to be
re-calculated each time a new address is generated and if desync_factor is
larger than the new upper bound, a new random value needs to be
re-generated.

And since we already have max_desync_factor configurable per interface, we
also need to calculate and store desync_factor per interface.
Signed-off-by: default avatarJiri Bohac <jbohac@suse.cz>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9d6280da
...@@ -190,6 +190,7 @@ struct inet6_dev { ...@@ -190,6 +190,7 @@ struct inet6_dev {
__u32 if_flags; __u32 if_flags;
int dead; int dead;
u32 desync_factor;
u8 rndid[8]; u8 rndid[8];
struct list_head tempaddr_list; struct list_head tempaddr_list;
......
...@@ -422,6 +422,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) ...@@ -422,6 +422,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
#endif #endif
INIT_LIST_HEAD(&ndev->tempaddr_list); INIT_LIST_HEAD(&ndev->tempaddr_list);
ndev->desync_factor = U32_MAX;
if ((dev->flags&IFF_LOOPBACK) || if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL || dev->type == ARPHRD_TUNNEL ||
dev->type == ARPHRD_TUNNEL6 || dev->type == ARPHRD_TUNNEL6 ||
...@@ -1183,6 +1184,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i ...@@ -1183,6 +1184,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
int ret = 0; int ret = 0;
u32 addr_flags; u32 addr_flags;
unsigned long now = jiffies; unsigned long now = jiffies;
long max_desync_factor;
write_lock_bh(&idev->lock); write_lock_bh(&idev->lock);
if (ift) { if (ift) {
...@@ -1218,20 +1220,41 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i ...@@ -1218,20 +1220,41 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
ipv6_try_regen_rndid(idev, tmpaddr); ipv6_try_regen_rndid(idev, tmpaddr);
memcpy(&addr.s6_addr[8], idev->rndid, 8); memcpy(&addr.s6_addr[8], idev->rndid, 8);
age = (now - ifp->tstamp) / HZ; age = (now - ifp->tstamp) / HZ;
regen_advance = idev->cnf.regen_max_retry *
idev->cnf.dad_transmits *
NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
*/
max_desync_factor = min_t(__u32,
idev->cnf.max_desync_factor,
idev->cnf.temp_prefered_lft - regen_advance);
if (unlikely(idev->desync_factor > max_desync_factor)) {
if (max_desync_factor > 0) {
get_random_bytes(&idev->desync_factor,
sizeof(idev->desync_factor));
idev->desync_factor %= max_desync_factor;
} else {
idev->desync_factor = 0;
}
}
tmp_valid_lft = min_t(__u32, tmp_valid_lft = min_t(__u32,
ifp->valid_lft, ifp->valid_lft,
idev->cnf.temp_valid_lft + age); idev->cnf.temp_valid_lft + age);
tmp_prefered_lft = min_t(__u32, tmp_prefered_lft = idev->cnf.temp_prefered_lft + age -
ifp->prefered_lft, idev->desync_factor;
idev->cnf.temp_prefered_lft + age - /* guard against underflow in case of concurrent updates to cnf */
idev->cnf.max_desync_factor); if (unlikely(tmp_prefered_lft < 0))
tmp_prefered_lft = 0;
tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
tmp_plen = ifp->prefix_len; tmp_plen = ifp->prefix_len;
tmp_tstamp = ifp->tstamp; tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock); spin_unlock_bh(&ifp->lock);
regen_advance = idev->cnf.regen_max_retry *
idev->cnf.dad_transmits *
NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
/* A temporary address is created only if this calculated Preferred /* A temporary address is created only if this calculated Preferred
...@@ -2316,7 +2339,7 @@ static void manage_tempaddrs(struct inet6_dev *idev, ...@@ -2316,7 +2339,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
max_valid = 0; max_valid = 0;
max_prefered = idev->cnf.temp_prefered_lft - max_prefered = idev->cnf.temp_prefered_lft -
idev->cnf.max_desync_factor - age; idev->desync_factor - age;
if (max_prefered < 0) if (max_prefered < 0)
max_prefered = 0; max_prefered = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment