Commit 9f780efa authored by David S. Miller's avatar David S. Miller

Merge branch 'ipv6-devconf-lockless'

Eric Dumazet says:

====================
ipv6: lockless accesses to devconf

- First patch puts in a cacheline_group the fields used in fast paths.

- Annotate all data races around idev->cnf fields.

- Last patch in this series removes RTNL use for RTM_GETNETCONF dumps.

v3: addressed Jakub Kicinski feedback in addrconf_disable_ipv6()
    Added tags from Jiri and Florian.

v2: addressed Jiri Pirko feedback
 - Added "ipv6: addrconf_disable_ipv6() optimizations"
   and "ipv6: addrconf_disable_policy() optimization"
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 65f5dd4f 2a02f837
......@@ -460,7 +460,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip6_dst_hoplimit(dst);
dst_release(dst);
} else {
set_tun->ttl = net->ipv6.devconf_all->hop_limit;
set_tun->ttl = READ_ONCE(net->ipv6.devconf_all->hop_limit);
}
#endif
} else {
......
......@@ -339,7 +339,7 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
goto out;
is_router = !!in6_dev->cnf.forwarding;
is_router = !!READ_ONCE(in6_dev->cnf.forwarding);
in6_dev_put(in6_dev);
/* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
......
......@@ -3,6 +3,7 @@
#define _IPV6_H
#include <uapi/linux/ipv6.h>
#include <linux/cache.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
......@@ -10,9 +11,16 @@
* This structure contains configuration options per IPv6 link.
*/
struct ipv6_devconf {
__s32 forwarding;
/* RX & TX fastpath fields. */
__cacheline_group_begin(ipv6_devconf_read_txrx);
__s32 disable_ipv6;
__s32 hop_limit;
__s32 mtu6;
__s32 forwarding;
__s32 disable_policy;
__s32 proxy_ndp;
__cacheline_group_end(ipv6_devconf_read_txrx);
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
......@@ -45,7 +53,6 @@ struct ipv6_devconf {
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
__s32 proxy_ndp;
__s32 accept_source_route;
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
......@@ -55,7 +62,6 @@ struct ipv6_devconf {
#ifdef CONFIG_IPV6_MROUTE
atomic_t mc_forwarding;
#endif
__s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
......@@ -76,7 +82,6 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
__s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
__u32 ioam6_id;
......
......@@ -417,7 +417,7 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
if (unlikely(!idev))
return true;
return !!idev->cnf.ignore_routes_with_linkdown;
return !!READ_ONCE(idev->cnf.ignore_routes_with_linkdown);
}
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
......
......@@ -332,7 +332,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
mtu = idev->cnf.mtu6;
mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
out:
......
......@@ -534,13 +534,15 @@ static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb)
return 0;
}
static inline bool ipv6_accept_ra(struct inet6_dev *idev)
static inline bool ipv6_accept_ra(const struct inet6_dev *idev)
{
s32 accept_ra = READ_ONCE(idev->cnf.accept_ra);
/* If forwarding is enabled, RA are not accepted unless the special
* hybrid mode (accept_ra=2) is enabled.
*/
return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
idev->cnf.accept_ra;
return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 :
accept_ra;
}
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
......
......@@ -5988,7 +5988,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return -ENODEV;
idev = __in6_dev_get_safely(dev);
if (unlikely(!idev || !idev->cnf.forwarding))
if (unlikely(!idev || !READ_ONCE(idev->cnf.forwarding)))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
......
This diff is collapsed.
......@@ -379,9 +379,8 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
if (accept_seg6 > idev->cnf.seg6_enabled)
accept_seg6 = idev->cnf.seg6_enabled;
accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled),
READ_ONCE(idev->cnf.seg6_enabled));
if (!accept_seg6) {
kfree_skb(skb);
......@@ -655,10 +654,13 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct ipv6_rt_hdr *hdr;
struct rt0_hdr *rthdr;
struct net *net = dev_net(skb->dev);
int accept_source_route = net->ipv6.devconf_all->accept_source_route;
int accept_source_route;
if (idev && accept_source_route > idev->cnf.accept_source_route)
accept_source_route = idev->cnf.accept_source_route;
accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route);
if (idev)
accept_source_route = min(accept_source_route,
READ_ONCE(idev->cnf.accept_source_route));
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
......@@ -919,7 +921,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if IOAM is not enabled on ingress */
if (!__in6_dev_get(skb->dev)->cnf.ioam6_enabled)
if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled))
goto ignore;
/* Truncated Option header */
......
......@@ -727,7 +727,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw16 = IOAM6_U16_UNAVAILABLE;
else
raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id;
raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
......@@ -735,7 +735,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw16 = IOAM6_U16_UNAVAILABLE;
else
raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id;
raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
......@@ -822,7 +822,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw32 = IOAM6_U32_UNAVAILABLE;
else
raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide;
raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
......@@ -830,7 +830,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw32 = IOAM6_U32_UNAVAILABLE;
else
raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide;
raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
......
......@@ -168,9 +168,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
SKB_DR_SET(reason, NOT_SPECIFIED);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
!idev || unlikely(idev->cnf.disable_ipv6)) {
!idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
if (idev && unlikely(idev->cnf.disable_ipv6))
if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
SKB_DR_SET(reason, IPV6DISABLED);
goto drop;
}
......@@ -236,7 +236,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (!ipv6_addr_is_multicast(&hdr->daddr) &&
(skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) &&
idev->cnf.drop_unicast_in_l2_multicast) {
READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) {
SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
goto err;
}
......
......@@ -234,7 +234,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (unlikely(idev->cnf.disable_ipv6)) {
if (unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
return 0;
......@@ -501,7 +501,7 @@ int ip6_forward(struct sk_buff *skb)
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
if (net->ipv6.devconf_all->forwarding == 0)
if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
goto error;
if (skb->pkt_type != PACKET_HOST)
......@@ -513,8 +513,8 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
if (!net->ipv6.devconf_all->disable_policy &&
(!idev || !idev->cnf.disable_policy) &&
if (!READ_ONCE(net->ipv6.devconf_all->disable_policy) &&
(!idev || !READ_ONCE(idev->cnf.disable_policy)) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
......@@ -552,7 +552,7 @@ int ip6_forward(struct sk_buff *skb)
}
/* XXX: idev->cnf.proxy_ndp? */
if (net->ipv6.devconf_all->proxy_ndp &&
if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0) {
......
......@@ -1346,7 +1346,7 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
if (val < 0)
val = sock_net(sk)->ipv6.devconf_all->hop_limit;
val = READ_ONCE(sock_net(sk)->ipv6.devconf_all->hop_limit);
break;
}
......
......@@ -159,9 +159,9 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
int iv;
if (mld_in_v1_mode(idev))
iv = idev->cnf.mldv1_unsolicited_report_interval;
iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
else
iv = idev->cnf.mldv2_unsolicited_report_interval;
iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
return iv > 0 ? iv : 1;
}
......@@ -1202,15 +1202,15 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
static int mld_force_mld_version(const struct inet6_dev *idev)
{
const struct net *net = dev_net(idev->dev);
int all_force;
all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
/* Normally, both are 0 here. If enforcement to a particular is
* being used, individual device enforcement will have a lower
* precedence over 'all' device (.../conf/all/force_mld_version).
*/
if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
else
return idev->cnf.force_mld_version;
return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
}
static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
......
......@@ -451,7 +451,7 @@ static void ip6_nd_hdr(struct sk_buff *skb,
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
tclass = idev ? idev->cnf.ndisc_tclass : 0;
tclass = idev ? READ_ONCE(idev->cnf.ndisc_tclass) : 0;
rcu_read_unlock();
skb_push(skb, sizeof(*hdr));
......@@ -535,7 +535,7 @@ void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
override = false;
inc_opt |= ifp->idev->cnf.force_tllao;
inc_opt |= READ_ONCE(ifp->idev->cnf.force_tllao);
in6_ifa_put(ifp);
} else {
if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
......@@ -903,8 +903,9 @@ static enum skb_drop_reason ndisc_recv_ns(struct sk_buff *skb)
}
if (ipv6_chk_acast_addr(net, dev, &msg->target) ||
(idev->cnf.forwarding &&
(net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) &&
(READ_ONCE(idev->cnf.forwarding) &&
(READ_ONCE(net->ipv6.devconf_all->proxy_ndp) ||
READ_ONCE(idev->cnf.proxy_ndp)) &&
(is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb->pkt_type != PACKET_HOST &&
......@@ -929,7 +930,7 @@ static enum skb_drop_reason ndisc_recv_ns(struct sk_buff *skb)
}
if (is_router < 0)
is_router = idev->cnf.forwarding;
is_router = READ_ONCE(idev->cnf.forwarding);
if (dad) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &msg->target,
......@@ -973,7 +974,7 @@ static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
{
struct inet6_dev *idev = __in6_dev_get(dev);
switch (idev->cnf.accept_untracked_na) {
switch (READ_ONCE(idev->cnf.accept_untracked_na)) {
case 0: /* Don't accept untracked na (absent in neighbor cache) */
return 0;
case 1: /* Create new entries from na if currently untracked */
......@@ -1024,7 +1025,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* drop_unsolicited_na takes precedence over accept_untracked_na
*/
if (!msg->icmph.icmp6_solicited && idev &&
idev->cnf.drop_unsolicited_na)
READ_ONCE(idev->cnf.drop_unsolicited_na))
return reason;
if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts))
......@@ -1080,7 +1081,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* Note that we don't do a (daddr == all-routers-mcast) check.
*/
new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
if (!neigh && lladdr && idev && idev->cnf.forwarding) {
if (!neigh && lladdr && idev && READ_ONCE(idev->cnf.forwarding)) {
if (accept_untracked_na(dev, saddr)) {
neigh = neigh_create(&nd_tbl, &msg->target, dev);
new_state = NUD_STALE;
......@@ -1100,7 +1101,8 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* has already sent a NA to us.
*/
if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp &&
READ_ONCE(net->ipv6.devconf_all->forwarding) &&
READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
/* XXX: idev->cnf.proxy_ndp */
goto out;
......@@ -1148,7 +1150,7 @@ static enum skb_drop_reason ndisc_recv_rs(struct sk_buff *skb)
}
/* Don't accept RS if we're not in router mode */
if (!idev->cnf.forwarding)
if (!READ_ONCE(idev->cnf.forwarding))
goto out;
/*
......@@ -1318,7 +1320,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
if (old_if_flags != in6_dev->if_flags)
send_ifinfo_notify = true;
if (!in6_dev->cnf.accept_ra_defrtr) {
if (!READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) {
ND_PRINTK(2, info,
"RA: %s, defrtr is false for dev: %s\n",
__func__, skb->dev->name);
......@@ -1326,7 +1328,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
if (lifetime != 0 &&
lifetime < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) {
ND_PRINTK(2, info,
"RA: router lifetime (%ds) is too short: %s\n",
lifetime, skb->dev->name);
......@@ -1337,7 +1340,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
* accept_ra_from_local is set to true.
*/
net = dev_net(in6_dev->dev);
if (!in6_dev->cnf.accept_ra_from_local &&
if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) {
ND_PRINTK(2, info,
"RA from local address detected on dev: %s: default router ignored\n",
......@@ -1349,7 +1352,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
if (pref == ICMPV6_ROUTER_PREF_INVALID ||
!in6_dev->cnf.accept_ra_rtr_pref)
!READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref))
pref = ICMPV6_ROUTER_PREF_MEDIUM;
#endif
/* routes added from RAs do not use nexthop objects */
......@@ -1420,10 +1423,12 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
spin_unlock_bh(&table->tb6_lock);
}
if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) < 256 &&
ra_msg->icmph.icmp6_hop_limit) {
if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) <=
ra_msg->icmph.icmp6_hop_limit) {
WRITE_ONCE(in6_dev->cnf.hop_limit,
ra_msg->icmph.icmp6_hop_limit);
fib6_metric_set(rt, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
} else {
......@@ -1505,7 +1510,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
#ifdef CONFIG_IPV6_ROUTE_INFO
if (!in6_dev->cnf.accept_ra_from_local &&
if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
in6_dev->dev, 0)) {
ND_PRINTK(2, info,
......@@ -1514,7 +1519,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
goto skip_routeinfo;
}
if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
if (READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref) && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
p;
......@@ -1526,14 +1531,14 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
continue;
#endif
if (ri->prefix_len == 0 &&
!in6_dev->cnf.accept_ra_defrtr)
!READ_ONCE(in6_dev->cnf.accept_ra_defrtr))
continue;
if (ri->lifetime != 0 &&
ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
ntohl(ri->lifetime) < READ_ONCE(in6_dev->cnf.accept_ra_min_lft))
continue;
if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
if (ri->prefix_len < READ_ONCE(in6_dev->cnf.accept_ra_rt_info_min_plen))
continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
if (ri->prefix_len > READ_ONCE(in6_dev->cnf.accept_ra_rt_info_max_plen))
continue;
rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
&ipv6_hdr(skb)->saddr);
......@@ -1553,7 +1558,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
#endif
if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
if (READ_ONCE(in6_dev->cnf.accept_ra_pinfo) && ndopts.nd_opts_pi) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_pi;
p;
......@@ -1564,7 +1569,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
}
if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
if (ndopts.nd_opts_mtu && READ_ONCE(in6_dev->cnf.accept_ra_mtu)) {
__be32 n;
u32 mtu;
......@@ -1578,8 +1583,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
} else if (in6_dev->cnf.mtu6 != mtu) {
in6_dev->cnf.mtu6 = mtu;
} else if (READ_ONCE(in6_dev->cnf.mtu6) != mtu) {
WRITE_ONCE(in6_dev->cnf.mtu6, mtu);
fib6_metric_set(rt, RTAX_MTU, mtu);
rt6_mtu_change(skb->dev, mtu);
}
......@@ -1813,7 +1818,7 @@ static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
if (!idev)
return true;
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
idev->cnf.suppress_frag_ndisc) {
READ_ONCE(idev->cnf.suppress_frag_ndisc)) {
net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n");
return true;
}
......@@ -1890,8 +1895,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
idev = in6_dev_get(dev);
if (!idev)
break;
if (idev->cnf.ndisc_notify ||
net->ipv6.devconf_all->ndisc_notify)
if (READ_ONCE(idev->cnf.ndisc_notify) ||
READ_ONCE(net->ipv6.devconf_all->ndisc_notify))
ndisc_send_unsol_na(dev);
in6_dev_put(idev);
break;
......@@ -1900,8 +1905,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
if (!idev)
evict_nocarrier = true;
else {
evict_nocarrier = idev->cnf.ndisc_evict_nocarrier &&
net->ipv6.devconf_all->ndisc_evict_nocarrier;
evict_nocarrier = READ_ONCE(idev->cnf.ndisc_evict_nocarrier) &&
READ_ONCE(net->ipv6.devconf_all->ndisc_evict_nocarrier);
in6_dev_put(idev);
}
......
......@@ -83,7 +83,7 @@ struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv6.devconf_all->hop_limit);
READ_ONCE(net->ipv6.devconf_all->hop_limit));
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
......@@ -124,7 +124,7 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
net->ipv6.devconf_all->hop_limit);
READ_ONCE(net->ipv6.devconf_all->hop_limit));
skb_reset_transport_header(nskb);
icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
......
......@@ -111,9 +111,9 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
hoplimit = idev->cnf.hop_limit;
hoplimit = READ_ONCE(idev->cnf.hop_limit);
else
hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit);
rcu_read_unlock();
}
return hoplimit;
......
......@@ -645,14 +645,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
write_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
neigh->updated + idev->cnf.rtr_probe_interval)) {
neigh->updated +
READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work)
__neigh_set_probe_once(neigh);
}
write_unlock_bh(&neigh->lock);
} else if (time_after(jiffies, last_probe +
idev->cnf.rtr_probe_interval)) {
READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
......@@ -1596,7 +1597,7 @@ static unsigned int fib6_mtu(const struct fib6_result *res)
rcu_read_lock();
idev = __in6_dev_get(dev);
mtu = idev->cnf.mtu6;
mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
}
......@@ -2220,7 +2221,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
strict |= flags & RT6_LOOKUP_F_IFACE;
strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
if (net->ipv6.devconf_all->forwarding == 0)
if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
rcu_read_lock();
......@@ -3249,8 +3250,8 @@ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
mtu = IPV6_MIN_MTU;
idev = __in6_dev_get(dev);
if (idev && idev->cnf.mtu6 > mtu)
mtu = idev->cnf.mtu6;
if (idev)
mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
}
mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
......@@ -4149,7 +4150,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
if (READ_ONCE(in6_dev->cnf.forwarding) ||
!READ_ONCE(in6_dev->cnf.accept_redirects))
return;
/* RFC2461 8.1:
......@@ -4583,8 +4585,8 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
f6i->dst_nocount = true;
if (!anycast &&
(net->ipv6.devconf_all->disable_policy ||
idev->cnf.disable_policy))
(READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
READ_ONCE(idev->cnf.disable_policy)))
f6i->dst_nopolicy = true;
}
......
......@@ -241,6 +241,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
struct sr6_tlv_hmac *tlv;
struct ipv6_sr_hdr *srh;
struct inet6_dev *idev;
int require_hmac;
idev = __in6_dev_get(skb->dev);
......@@ -248,16 +249,17 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
tlv = seg6_get_tlv_hmac(srh);
require_hmac = READ_ONCE(idev->cnf.seg6_require_hmac);
/* mandatory check but no tlv */
if (idev->cnf.seg6_require_hmac > 0 && !tlv)
if (require_hmac > 0 && !tlv)
return false;
/* no check */
if (idev->cnf.seg6_require_hmac < 0)
if (require_hmac < 0)
return true;
/* check only if present */
if (idev->cnf.seg6_require_hmac == 0 && !tlv)
if (require_hmac == 0 && !tlv)
return true;
/* now, seg6_require_hmac >= 0 && tlv */
......
......@@ -800,7 +800,7 @@ synproxy_build_ip_ipv6(struct net *net, struct sk_buff *skb,
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
iph->hop_limit = net->ipv6.devconf_all->hop_limit;
iph->hop_limit = READ_ONCE(net->ipv6.devconf_all->hop_limit);
iph->nexthdr = IPPROTO_TCP;
iph->saddr = *saddr;
iph->daddr = *daddr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment