route.c 66 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		ROUTE - implementation of the IP router.
 *
8
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12 13 14 15 16 17 18 19 20
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 * Fixes:
 *		Alan Cox	:	Verify area fixes.
 *		Alan Cox	:	cli() protects routing changes
 *		Rui Oliveira	:	ICMP routing table updates
 *		(rco@di.uminho.pt)	Routing table insertion and update
 *		Linus Torvalds	:	Rewrote bits to be sensible
 *		Alan Cox	:	Added BSD route gw semantics
21
 *		Alan Cox	:	Super /proc >4K
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *		Alan Cox	:	MTU in route table
 *		Alan Cox	: 	MSS actually. Also added the window
 *					clamper.
 *		Sam Lantinga	:	Fixed route matching in rt_del()
 *		Alan Cox	:	Routing cache support.
 *		Alan Cox	:	Removed compatibility cruft.
 *		Alan Cox	:	RTF_REJECT support.
 *		Alan Cox	:	TCP irtt support.
 *		Jonathan Naylor	:	Added Metric support.
 *	Miquel van Smoorenburg	:	BSD API fixes.
 *	Miquel van Smoorenburg	:	Metrics.
 *		Alan Cox	:	Use __u32 properly
 *		Alan Cox	:	Aligned routing errors more closely with BSD
 *					our system is still very different.
 *		Alan Cox	:	Faster /proc handling
 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
 *					routing caches and better behaviour.
39
 *
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 *		Olaf Erb	:	irtt wasn't being copied right.
 *		Bjorn Ekwall	:	Kerneld route support.
 *		Alan Cox	:	Multicast fixed (I hope)
 * 		Pavel Krauz	:	Limited broadcast fixed
 *		Mike McLagan	:	Routing by source
 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
 *					route.c and rewritten from scratch.
 *		Andi Kleen	:	Load-limit warning messages.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
 *		Marc Boucher	:	routing by fwmark
 *	Robert Olsson		:	Added rt_cache statistics
 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
55
 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
56 57
 * 	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
 * 	Ilia Sotnikov		:	Removed TOS from hash calculations
Linus Torvalds's avatar
Linus Torvalds committed
58 59 60 61 62 63 64
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

65 66
#define pr_fmt(fmt) "IPv4: " fmt

Linus Torvalds's avatar
Linus Torvalds committed
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#include <linux/module.h>
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
91
#include <linux/slab.h>
92
#include <net/dst.h>
93
#include <net/net_namespace.h>
Linus Torvalds's avatar
Linus Torvalds committed
94 95 96 97 98 99 100 101 102 103
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
104
#include <net/netevent.h>
105
#include <net/rtnetlink.h>
Linus Torvalds's avatar
Linus Torvalds committed
106 107
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
108
#include <linux/kmemleak.h>
Linus Torvalds's avatar
Linus Torvalds committed
109
#endif
110
#include <net/secure_seq.h>
Linus Torvalds's avatar
Linus Torvalds committed
111

112
#define RT_FL_TOS(oldflp4) \
113
	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
Linus Torvalds's avatar
Linus Torvalds committed
114 115 116 117

#define RT_GC_TIMEOUT (300*HZ)

static int ip_rt_max_size;
118 119 120 121 122 123 124 125
static int ip_rt_redirect_number __read_mostly	= 9;
static int ip_rt_redirect_load __read_mostly	= HZ / 50;
static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly	= HZ;
static int ip_rt_error_burst __read_mostly	= 5 * HZ;
static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly	= 256;
126

Linus Torvalds's avatar
Linus Torvalds committed
127 128 129 130 131
/*
 *	Interface to generic destination cache.
 */

static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
132
static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
133
static unsigned int	 ipv4_mtu(const struct dst_entry *dst);
Linus Torvalds's avatar
Linus Torvalds committed
134 135
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void		 ipv4_link_failure(struct sk_buff *skb);
136 137 138 139
static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
140
static void		ipv4_dst_destroy(struct dst_entry *dst);
Linus Torvalds's avatar
Linus Torvalds committed
141

142 143
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
144 145
	WARN_ON(1);
	return NULL;
146 147
}

148 149 150
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
					   struct sk_buff *skb,
					   const void *daddr);
151

Linus Torvalds's avatar
Linus Torvalds committed
152 153
static struct dst_ops ipv4_dst_ops = {
	.family =		AF_INET,
154
	.protocol =		cpu_to_be16(ETH_P_IP),
Linus Torvalds's avatar
Linus Torvalds committed
155
	.check =		ipv4_dst_check,
156
	.default_advmss =	ipv4_default_advmss,
157
	.mtu =			ipv4_mtu,
158
	.cow_metrics =		ipv4_cow_metrics,
159
	.destroy =		ipv4_dst_destroy,
Linus Torvalds's avatar
Linus Torvalds committed
160 161 162
	.negative_advice =	ipv4_negative_advice,
	.link_failure =		ipv4_link_failure,
	.update_pmtu =		ip_rt_update_pmtu,
163
	.redirect =		ip_do_redirect,
164
	.local_out =		__ip_local_out,
165
	.neigh_lookup =		ipv4_neigh_lookup,
Linus Torvalds's avatar
Linus Torvalds committed
166 167 168 169
};

#define ECN_OR_COST(class)	TC_PRIO_##class

170
const __u8 ip_tos2prio[16] = {
Linus Torvalds's avatar
Linus Torvalds committed
171
	TC_PRIO_BESTEFFORT,
Dan Siemon's avatar
Dan Siemon committed
172
	ECN_OR_COST(BESTEFFORT),
Linus Torvalds's avatar
Linus Torvalds committed
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	TC_PRIO_BESTEFFORT,
	ECN_OR_COST(BESTEFFORT),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK)
};
188
EXPORT_SYMBOL(ip_tos2prio);
Linus Torvalds's avatar
Linus Torvalds committed
189

190
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
191
#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
Linus Torvalds's avatar
Linus Torvalds committed
192 193 194 195

#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
196
	if (*pos)
David S. Miller's avatar
David S. Miller committed
197
		return NULL;
198
	return SEQ_START_TOKEN;
Linus Torvalds's avatar
Linus Torvalds committed
199 200 201 202 203
}

static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
David S. Miller's avatar
David S. Miller committed
204
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
205 206 207 208 209 210 211 212 213 214 215 216 217
}

static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
}

static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN)
		seq_printf(seq, "%-127s\n",
			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
			   "HHUptod\tSpecDst");
218
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
219 220
}

221
static const struct seq_operations rt_cache_seq_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
222 223 224 225 226 227 228 229
	.start  = rt_cache_seq_start,
	.next   = rt_cache_seq_next,
	.stop   = rt_cache_seq_stop,
	.show   = rt_cache_seq_show,
};

static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
David S. Miller's avatar
David S. Miller committed
230
	return seq_open(file, &rt_cache_seq_ops);
Linus Torvalds's avatar
Linus Torvalds committed
231 232
}

233
static const struct file_operations rt_cache_seq_fops = {
Linus Torvalds's avatar
Linus Torvalds committed
234 235 236 237
	.owner	 = THIS_MODULE,
	.open	 = rt_cache_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
David S. Miller's avatar
David S. Miller committed
238
	.release = seq_release,
Linus Torvalds's avatar
Linus Torvalds committed
239 240 241 242 243 244 245 246 247 248
};


static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
	int cpu;

	if (*pos == 0)
		return SEQ_START_TOKEN;

249
	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds's avatar
Linus Torvalds committed
250 251 252
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
253
		return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
254 255 256 257 258 259 260 261
	}
	return NULL;
}

static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	int cpu;

262
	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds's avatar
Linus Torvalds committed
263 264 265
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
266
		return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
267 268
	}
	return NULL;
269

Linus Torvalds's avatar
Linus Torvalds committed
270 271 272 273 274 275 276 277 278 279 280 281
}

static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{

}

static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
	struct rt_cache_stat *st = v;

	if (v == SEQ_START_TOKEN) {
282
		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
Linus Torvalds's avatar
Linus Torvalds committed
283 284
		return 0;
	}
285

Linus Torvalds's avatar
Linus Torvalds committed
286 287
	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
288
		   dst_entries_get_slow(&ipv4_dst_ops),
Eric Dumazet's avatar
Eric Dumazet committed
289
		   0, /* st->in_hit */
Linus Torvalds's avatar
Linus Torvalds committed
290 291 292 293 294 295 296
		   st->in_slow_tot,
		   st->in_slow_mc,
		   st->in_no_route,
		   st->in_brd,
		   st->in_martian_dst,
		   st->in_martian_src,

Eric Dumazet's avatar
Eric Dumazet committed
297
		   0, /* st->out_hit */
Linus Torvalds's avatar
Linus Torvalds committed
298
		   st->out_slow_tot,
299
		   st->out_slow_mc,
Linus Torvalds's avatar
Linus Torvalds committed
300

Eric Dumazet's avatar
Eric Dumazet committed
301 302 303 304 305 306
		   0, /* st->gc_total */
		   0, /* st->gc_ignored */
		   0, /* st->gc_goal_miss */
		   0, /* st->gc_dst_overflow */
		   0, /* st->in_hlist_search */
		   0  /* st->out_hlist_search */
Linus Torvalds's avatar
Linus Torvalds committed
307 308 309 310
		);
	return 0;
}

311
static const struct seq_operations rt_cpu_seq_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
312 313 314 315 316 317 318 319 320 321 322 323
	.start  = rt_cpu_seq_start,
	.next   = rt_cpu_seq_next,
	.stop   = rt_cpu_seq_stop,
	.show   = rt_cpu_seq_show,
};


static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &rt_cpu_seq_ops);
}

324
static const struct file_operations rt_cpu_seq_fops = {
Linus Torvalds's avatar
Linus Torvalds committed
325 326 327 328 329 330 331
	.owner	 = THIS_MODULE,
	.open	 = rt_cpu_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
	.release = seq_release,
};

332
#ifdef CONFIG_IP_ROUTE_CLASSID
333
static int rt_acct_proc_show(struct seq_file *m, void *v)
334
{
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	struct ip_rt_acct *dst, *src;
	unsigned int i, j;

	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
	if (!dst)
		return -ENOMEM;

	for_each_possible_cpu(i) {
		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
		for (j = 0; j < 256; j++) {
			dst[j].o_bytes   += src[j].o_bytes;
			dst[j].o_packets += src[j].o_packets;
			dst[j].i_bytes   += src[j].i_bytes;
			dst[j].i_packets += src[j].i_packets;
		}
350 351
	}

352 353 354 355
	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
	kfree(dst);
	return 0;
}
356

357 358 359
static int rt_acct_proc_open(struct inode *inode, struct file *file)
{
	return single_open(file, rt_acct_proc_show, NULL);
360
}
361 362 363 364 365 366 367 368

static const struct file_operations rt_acct_proc_fops = {
	.owner		= THIS_MODULE,
	.open		= rt_acct_proc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
369
#endif
370

371
static int __net_init ip_rt_do_proc_init(struct net *net)
372 373 374
{
	struct proc_dir_entry *pde;

375 376
	pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
			  &rt_cache_seq_fops);
377 378 379
	if (!pde)
		goto err1;

380 381
	pde = proc_create("rt_cache", S_IRUGO,
			  net->proc_net_stat, &rt_cpu_seq_fops);
382 383 384
	if (!pde)
		goto err2;

385
#ifdef CONFIG_IP_ROUTE_CLASSID
386
	pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
387 388 389 390 391
	if (!pde)
		goto err3;
#endif
	return 0;

392
#ifdef CONFIG_IP_ROUTE_CLASSID
393 394 395 396 397 398 399 400
err3:
	remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
	remove_proc_entry("rt_cache", net->proc_net);
err1:
	return -ENOMEM;
}
401 402 403 404 405

static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
	remove_proc_entry("rt_cache", net->proc_net_stat);
	remove_proc_entry("rt_cache", net->proc_net);
406
#ifdef CONFIG_IP_ROUTE_CLASSID
407
	remove_proc_entry("rt_acct", net->proc_net);
408
#endif
409 410 411 412 413 414 415 416 417 418 419 420
}

static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
	.init = ip_rt_do_proc_init,
	.exit = ip_rt_do_proc_exit,
};

static int __init ip_rt_proc_init(void)
{
	return register_pernet_subsys(&ip_rt_proc_ops);
}

421
#else
422
static inline int ip_rt_proc_init(void)
423 424 425
{
	return 0;
}
Linus Torvalds's avatar
Linus Torvalds committed
426
#endif /* CONFIG_PROC_FS */
427

428
static inline bool rt_is_expired(const struct rtable *rth)
429
{
fan.du's avatar
fan.du committed
430
	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
431 432
}

433
void rt_cache_flush(struct net *net)
Linus Torvalds's avatar
Linus Torvalds committed
434
{
fan.du's avatar
fan.du committed
435
	rt_genid_bump_ipv4(net);
Eric Dumazet's avatar
Eric Dumazet committed
436 437
}

438 439 440
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
					   struct sk_buff *skb,
					   const void *daddr)
David Miller's avatar
David Miller committed
441
{
442 443
	struct net_device *dev = dst->dev;
	const __be32 *pkey = daddr;
444
	const struct rtable *rt;
David Miller's avatar
David Miller committed
445 446
	struct neighbour *n;

447
	rt = (const struct rtable *) dst;
448
	if (rt->rt_gateway)
449
		pkey = (const __be32 *) &rt->rt_gateway;
450 451
	else if (skb)
		pkey = &ip_hdr(skb)->daddr;
452

453
	n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
454 455
	if (n)
		return n;
456
	return neigh_create(&arp_tbl, pkey, dev);
457 458
}

Linus Torvalds's avatar
Linus Torvalds committed
459 460 461 462 463 464 465 466 467 468 469 470 471 472
/*
 * Peer allocation may fail only in serious out-of-memory conditions.  However
 * we still can generate some output.
 * Random ID selection looks a bit dangerous because we have no chances to
 * select ID being unique in a reasonable period of time.
 * But broken packet identifier may be better than no packet at all.
 */
static void ip_select_fb_ident(struct iphdr *iph)
{
	static DEFINE_SPINLOCK(ip_fb_id_lock);
	static u32 ip_fallback_id;
	u32 salt;

	spin_lock_bh(&ip_fb_id_lock);
473
	salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
Linus Torvalds's avatar
Linus Torvalds committed
474 475 476 477 478 479 480
	iph->id = htons(salt & 0xFFFF);
	ip_fallback_id = salt;
	spin_unlock_bh(&ip_fb_id_lock);
}

void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
481 482
	struct net *net = dev_net(dst->dev);
	struct inet_peer *peer;
Linus Torvalds's avatar
Linus Torvalds committed
483

484 485 486 487 488 489
	peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
	if (peer) {
		iph->id = htons(inet_getid(peer, more));
		inet_putpeer(peer);
		return;
	}
Linus Torvalds's avatar
Linus Torvalds committed
490 491 492

	ip_select_fb_ident(iph);
}
493
EXPORT_SYMBOL(__ip_select_ident);
Linus Torvalds's avatar
Linus Torvalds committed
494

Eric Dumazet's avatar
Eric Dumazet committed
495
static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
			     const struct iphdr *iph,
			     int oif, u8 tos,
			     u8 prot, u32 mark, int flow_flags)
{
	if (sk) {
		const struct inet_sock *inet = inet_sk(sk);

		oif = sk->sk_bound_dev_if;
		mark = sk->sk_mark;
		tos = RT_CONN_FLAGS(sk);
		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
	}
	flowi4_init_output(fl4, oif, mark, tos,
			   RT_SCOPE_UNIVERSE, prot,
			   flow_flags,
			   iph->daddr, iph->saddr, 0, 0);
}

Eric Dumazet's avatar
Eric Dumazet committed
514 515
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
			       const struct sock *sk)
516 517 518 519 520 521 522 523 524 525
{
	const struct iphdr *iph = ip_hdr(skb);
	int oif = skb->dev->ifindex;
	u8 tos = RT_TOS(iph->tos);
	u8 prot = iph->protocol;
	u32 mark = skb->mark;

	__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
}

Eric Dumazet's avatar
Eric Dumazet committed
526
static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
527 528
{
	const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet's avatar
Eric Dumazet committed
529
	const struct ip_options_rcu *inet_opt;
530 531 532 533 534 535 536 537 538 539 540 541 542 543
	__be32 daddr = inet->inet_daddr;

	rcu_read_lock();
	inet_opt = rcu_dereference(inet->inet_opt);
	if (inet_opt && inet_opt->opt.srr)
		daddr = inet_opt->opt.faddr;
	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
			   inet_sk_flowi_flags(sk),
			   daddr, inet->inet_saddr, 0, 0);
	rcu_read_unlock();
}

Eric Dumazet's avatar
Eric Dumazet committed
544 545
static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
				 const struct sk_buff *skb)
546 547 548 549 550 551 552
{
	if (skb)
		build_skb_flow_key(fl4, skb, sk);
	else
		build_sk_flow_key(fl4, sk);
}

553 554 555 556 557 558
static inline void rt_free(struct rtable *rt)
{
	call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}

static DEFINE_SPINLOCK(fnhe_lock);
559

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
{
	struct rtable *rt;

	rt = rcu_dereference(fnhe->fnhe_rth_input);
	if (rt) {
		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
		rt_free(rt);
	}
	rt = rcu_dereference(fnhe->fnhe_rth_output);
	if (rt) {
		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
		rt_free(rt);
	}
}

576
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
577 578 579 580 581 582 583 584 585
{
	struct fib_nh_exception *fnhe, *oldest;

	oldest = rcu_dereference(hash->chain);
	for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
		if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
			oldest = fnhe;
	}
586
	fnhe_flush_routes(oldest);
587 588 589
	return oldest;
}

590 591 592 593 594 595 596 597 598 599
static inline u32 fnhe_hashfun(__be32 daddr)
{
	u32 hval;

	hval = (__force u32) daddr;
	hval ^= (hval >> 11) ^ (hval >> 22);

	return hval & (FNHE_HASH_SIZE - 1);
}

600 601 602 603 604 605 606 607 608 609 610 611
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
	rt->rt_pmtu = fnhe->fnhe_pmtu;
	rt->dst.expires = fnhe->fnhe_expires;

	if (fnhe->fnhe_gw) {
		rt->rt_flags |= RTCF_REDIRECTED;
		rt->rt_gateway = fnhe->fnhe_gw;
		rt->rt_uses_gateway = 1;
	}
}

612 613
static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
				  u32 pmtu, unsigned long expires)
614
{
615
	struct fnhe_hash_bucket *hash;
616
	struct fib_nh_exception *fnhe;
617 618
	struct rtable *rt;
	unsigned int i;
619
	int depth;
620 621
	u32 hval = fnhe_hashfun(daddr);

622
	spin_lock_bh(&fnhe_lock);
623

624
	hash = nh->nh_exceptions;
625
	if (!hash) {
626
		hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
627
		if (!hash)
628 629
			goto out_unlock;
		nh->nh_exceptions = hash;
630 631 632 633 634 635 636 637
	}

	hash += hval;

	depth = 0;
	for (fnhe = rcu_dereference(hash->chain); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
		if (fnhe->fnhe_daddr == daddr)
638
			break;
639 640 641
		depth++;
	}

642 643 644 645 646
	if (fnhe) {
		if (gw)
			fnhe->fnhe_gw = gw;
		if (pmtu) {
			fnhe->fnhe_pmtu = pmtu;
647
			fnhe->fnhe_expires = max(1UL, expires);
648
		}
649
		/* Update all cached dsts too */
650 651 652 653
		rt = rcu_dereference(fnhe->fnhe_rth_input);
		if (rt)
			fill_route_from_fnhe(rt, fnhe);
		rt = rcu_dereference(fnhe->fnhe_rth_output);
654 655
		if (rt)
			fill_route_from_fnhe(rt, fnhe);
656 657 658 659 660 661 662 663 664 665 666
	} else {
		if (depth > FNHE_RECLAIM_DEPTH)
			fnhe = fnhe_oldest(hash);
		else {
			fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
			if (!fnhe)
				goto out_unlock;

			fnhe->fnhe_next = hash->chain;
			rcu_assign_pointer(hash->chain, fnhe);
		}
667
		fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
668 669 670 671
		fnhe->fnhe_daddr = daddr;
		fnhe->fnhe_gw = gw;
		fnhe->fnhe_pmtu = pmtu;
		fnhe->fnhe_expires = expires;
672 673 674 675 676

		/* Exception created; mark the cached routes for the nexthop
		 * stale, so anyone caching it rechecks if this exception
		 * applies to them.
		 */
677 678 679 680
		rt = rcu_dereference(nh->nh_rth_input);
		if (rt)
			rt->dst.obsolete = DST_OBSOLETE_KILL;

681 682 683 684 685 686 687
		for_each_possible_cpu(i) {
			struct rtable __rcu **prt;
			prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
			rt = rcu_dereference(*prt);
			if (rt)
				rt->dst.obsolete = DST_OBSOLETE_KILL;
		}
688 689 690
	}

	fnhe->fnhe_stamp = jiffies;
691 692

out_unlock:
693
	spin_unlock_bh(&fnhe_lock);
694 695
}

696 697
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
			     bool kill_route)
Linus Torvalds's avatar
Linus Torvalds committed
698
{
699
	__be32 new_gw = icmp_hdr(skb)->un.gateway;
700
	__be32 old_gw = ip_hdr(skb)->saddr;
701 702
	struct net_device *dev = skb->dev;
	struct in_device *in_dev;
703
	struct fib_result res;
704
	struct neighbour *n;
705
	struct net *net;
Linus Torvalds's avatar
Linus Torvalds committed
706

707 708 709 710 711 712 713 714 715 716 717
	switch (icmp_hdr(skb)->code & 7) {
	case ICMP_REDIR_NET:
	case ICMP_REDIR_NETTOS:
	case ICMP_REDIR_HOST:
	case ICMP_REDIR_HOSTTOS:
		break;

	default:
		return;
	}

718 719 720 721 722 723 724
	if (rt->rt_gateway != old_gw)
		return;

	in_dev = __in_dev_get_rcu(dev);
	if (!in_dev)
		return;

725
	net = dev_net(dev);
726 727 728
	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
	    ipv4_is_zeronet(new_gw))
Linus Torvalds's avatar
Linus Torvalds committed
729 730 731 732 733 734 735 736
		goto reject_redirect;

	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
			goto reject_redirect;
		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
			goto reject_redirect;
	} else {
737
		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
Linus Torvalds's avatar
Linus Torvalds committed
738 739 740
			goto reject_redirect;
	}

741
	n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
742 743 744 745
	if (n) {
		if (!(n->nud_state & NUD_VALID)) {
			neigh_event_send(n, NULL);
		} else {
746 747 748
			if (fib_lookup(net, fl4, &res) == 0) {
				struct fib_nh *nh = &FIB_RES_NH(res);

749 750
				update_or_create_fnhe(nh, fl4->daddr, new_gw,
						      0, 0);
751
			}
752 753
			if (kill_route)
				rt->dst.obsolete = DST_OBSOLETE_KILL;
754 755 756 757 758 759 760 761
			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
		}
		neigh_release(n);
	}
	return;

reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
762 763 764 765 766
	if (IN_DEV_LOG_MARTIANS(in_dev)) {
		const struct iphdr *iph = (const struct iphdr *) skb->data;
		__be32 daddr = iph->daddr;
		__be32 saddr = iph->saddr;

767 768 769 770
		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
				     "  Advised path = %pI4 -> %pI4\n",
				     &old_gw, dev->name, &new_gw,
				     &saddr, &daddr);
771
	}
772 773 774 775
#endif
	;
}

776 777 778 779
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
	struct rtable *rt;
	struct flowi4 fl4;
780 781 782 783 784
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	int oif = skb->dev->ifindex;
	u8 tos = RT_TOS(iph->tos);
	u8 prot = iph->protocol;
	u32 mark = skb->mark;
785 786 787

	rt = (struct rtable *) dst;

788
	__build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
789
	__ip_do_redirect(rt, skb, &fl4, true);
790 791
}

Linus Torvalds's avatar
Linus Torvalds committed
792 793
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
794
	struct rtable *rt = (struct rtable *)dst;
Linus Torvalds's avatar
Linus Torvalds committed
795 796 797
	struct dst_entry *ret = dst;

	if (rt) {
798
		if (dst->obsolete > 0) {
Linus Torvalds's avatar
Linus Torvalds committed
799 800
			ip_rt_put(rt);
			ret = NULL;
801 802
		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
			   rt->dst.expires) {
David S. Miller's avatar
David S. Miller committed
803
			ip_rt_put(rt);
Linus Torvalds's avatar
Linus Torvalds committed
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
			ret = NULL;
		}
	}
	return ret;
}

/*
 * Algorithm:
 *	1. The first ip_rt_redirect_number redirects are sent
 *	   with exponential backoff, then we stop sending them at all,
 *	   assuming that the host ignores our redirects.
 *	2. If we did not see packets requiring redirects
 *	   during ip_rt_redirect_silence, we assume that the host
 *	   forgot redirected route and start to send redirects again.
 *
 * This algorithm is much cheaper and more intelligent than dumb load limiting
 * in icmp.c.
 *
 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
 * and "frag. need" (breaks PMTU discovery) in icmp.c.
 */

void ip_rt_send_redirect(struct sk_buff *skb)
{
Eric Dumazet's avatar
Eric Dumazet committed
828
	struct rtable *rt = skb_rtable(skb);
829
	struct in_device *in_dev;
830
	struct inet_peer *peer;
831
	struct net *net;
832
	int log_martians;
Linus Torvalds's avatar
Linus Torvalds committed
833

834
	rcu_read_lock();
835
	in_dev = __in_dev_get_rcu(rt->dst.dev);
836 837
	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
		rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
838
		return;
839 840 841
	}
	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
842

843 844
	net = dev_net(rt->dst.dev);
	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
845
	if (!peer) {
846 847
		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
			  rt_nexthop(rt, ip_hdr(skb)->daddr));
848 849 850
		return;
	}

Linus Torvalds's avatar
Linus Torvalds committed
851 852 853
	/* No redirected packets during ip_rt_redirect_silence;
	 * reset the algorithm.
	 */
854 855
	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
		peer->rate_tokens = 0;
Linus Torvalds's avatar
Linus Torvalds committed
856 857

	/* Too many ignored redirects; do not send anything
858
	 * set dst.rate_last to the last seen redirected packet.
Linus Torvalds's avatar
Linus Torvalds committed
859
	 */
860 861
	if (peer->rate_tokens >= ip_rt_redirect_number) {
		peer->rate_last = jiffies;
862
		goto out_put_peer;
Linus Torvalds's avatar
Linus Torvalds committed
863 864 865 866 867
	}

	/* Check for load limit; set rate_last to the latest sent
	 * redirect.
	 */
868
	if (peer->rate_tokens == 0 ||
869
	    time_after(jiffies,
870 871
		       (peer->rate_last +
			(ip_rt_redirect_load << peer->rate_tokens)))) {
872 873 874
		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);

		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
875 876
		peer->rate_last = jiffies;
		++peer->rate_tokens;
Linus Torvalds's avatar
Linus Torvalds committed
877
#ifdef CONFIG_IP_ROUTE_VERBOSE
878
		if (log_martians &&
879 880
		    peer->rate_tokens == ip_rt_redirect_number)
			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
881
					     &ip_hdr(skb)->saddr, inet_iif(skb),
882
					     &ip_hdr(skb)->daddr, &gw);
Linus Torvalds's avatar
Linus Torvalds committed
883 884
#endif
	}
885 886
out_put_peer:
	inet_putpeer(peer);
Linus Torvalds's avatar
Linus Torvalds committed
887 888 889 890
}

static int ip_error(struct sk_buff *skb)
{
891
	struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
Eric Dumazet's avatar
Eric Dumazet committed
892
	struct rtable *rt = skb_rtable(skb);
893
	struct inet_peer *peer;
Linus Torvalds's avatar
Linus Torvalds committed
894
	unsigned long now;
895
	struct net *net;
896
	bool send;
Linus Torvalds's avatar
Linus Torvalds committed
897 898
	int code;

899 900 901 902 903 904 905 906 907 908 909 910 911 912
	net = dev_net(rt->dst.dev);
	if (!IN_DEV_FORWARD(in_dev)) {
		switch (rt->dst.error) {
		case EHOSTUNREACH:
			IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
			break;

		case ENETUNREACH:
			IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
			break;
		}
		goto out;
	}

913
	switch (rt->dst.error) {
Joe Perches's avatar
Joe Perches committed
914 915 916 917 918 919 920 921
	case EINVAL:
	default:
		goto out;
	case EHOSTUNREACH:
		code = ICMP_HOST_UNREACH;
		break;
	case ENETUNREACH:
		code = ICMP_NET_UNREACH;
922
		IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
Joe Perches's avatar
Joe Perches committed
923 924 925 926
		break;
	case EACCES:
		code = ICMP_PKT_FILTERED;
		break;
Linus Torvalds's avatar
Linus Torvalds committed
927 928
	}

929
	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
930 931 932 933 934 935 936 937 938 939 940 941

	send = true;
	if (peer) {
		now = jiffies;
		peer->rate_tokens += now - peer->rate_last;
		if (peer->rate_tokens > ip_rt_error_burst)
			peer->rate_tokens = ip_rt_error_burst;
		peer->rate_last = now;
		if (peer->rate_tokens >= ip_rt_error_cost)
			peer->rate_tokens -= ip_rt_error_cost;
		else
			send = false;
942
		inet_putpeer(peer);
Linus Torvalds's avatar
Linus Torvalds committed
943
	}
944 945
	if (send)
		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
Linus Torvalds's avatar
Linus Torvalds committed
946 947 948

out:	kfree_skb(skb);
	return 0;
949
}
Linus Torvalds's avatar
Linus Torvalds committed
950

951
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
Linus Torvalds's avatar
Linus Torvalds committed
952
{
953
	struct dst_entry *dst = &rt->dst;
954
	struct fib_result res;
955

956 957 958
	if (dst_metric_locked(dst, RTAX_MTU))
		return;

959 960 961
	if (dst->dev->mtu < mtu)
		return;

962 963
	if (mtu < ip_rt_min_pmtu)
		mtu = ip_rt_min_pmtu;
964

965 966 967 968
	if (rt->rt_pmtu == mtu &&
	    time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
		return;

969
	rcu_read_lock();
970
	if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
971 972
		struct fib_nh *nh = &FIB_RES_NH(res);

973 974
		update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
				      jiffies + ip_rt_mtu_expires);
975
	}
976
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
977 978
}

979 980 981 982 983 984 985
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
			      struct sk_buff *skb, u32 mtu)
{
	struct rtable *rt = (struct rtable *) dst;
	struct flowi4 fl4;

	ip_rt_build_flow_key(&fl4, sk, skb);
986
	__ip_rt_update_pmtu(rt, &fl4, mtu);
987 988
}

989 990 991
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
		      int oif, u32 mark, u8 protocol, int flow_flags)
{
992
	const struct iphdr *iph = (const struct iphdr *) skb->data;
993 994 995
	struct flowi4 fl4;
	struct rtable *rt;

996 997
	__build_flow_key(&fl4, NULL, iph, oif,
			 RT_TOS(iph->tos), protocol, mark, flow_flags);
998 999
	rt = __ip_route_output_key(net, &fl4);
	if (!IS_ERR(rt)) {
1000
		__ip_rt_update_pmtu(rt, &fl4, mtu);
1001 1002 1003 1004 1005
		ip_rt_put(rt);
	}
}
EXPORT_SYMBOL_GPL(ipv4_update_pmtu);

1006
static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1007
{
1008 1009 1010
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1011

1012 1013 1014 1015 1016 1017
	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
	rt = __ip_route_output_key(sock_net(sk), &fl4);
	if (!IS_ERR(rt)) {
		__ip_rt_update_pmtu(rt, &fl4, mtu);
		ip_rt_put(rt);
	}
1018
}
1019 1020 1021 1022 1023 1024

void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1025
	struct dst_entry *odst = NULL;
1026
	bool new = false;
1027 1028

	bh_lock_sock(sk);
1029 1030 1031 1032

	if (!ip_sk_accept_pmtu(sk))
		goto out;

1033
	odst = sk_dst_get(sk);
1034

1035
	if (sock_owned_by_user(sk) || !odst) {
1036 1037 1038 1039 1040 1041
		__ipv4_sk_update_pmtu(skb, sk, mtu);
		goto out;
	}

	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);

1042 1043
	rt = (struct rtable *)odst;
	if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1044 1045 1046
		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
		if (IS_ERR(rt))
			goto out;
1047 1048

		new = true;
1049 1050 1051 1052
	}

	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);

1053
	if (!dst_check(&rt->dst, 0)) {
1054 1055 1056
		if (new)
			dst_release(&rt->dst);

1057 1058 1059 1060
		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
		if (IS_ERR(rt))
			goto out;

1061
		new = true;
1062 1063
	}

1064
	if (new)
1065
		sk_dst_set(sk, &rt->dst);
1066 1067 1068

out:
	bh_unlock_sock(sk);
1069
	dst_release(odst);
1070
}
1071
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1072

1073 1074 1075
void ipv4_redirect(struct sk_buff *skb, struct net *net,
		   int oif, u32 mark, u8 protocol, int flow_flags)
{
1076
	const struct iphdr *iph = (const struct iphdr *) skb->data;
1077 1078 1079
	struct flowi4 fl4;
	struct rtable *rt;

1080 1081
	__build_flow_key(&fl4, NULL, iph, oif,
			 RT_TOS(iph->tos), protocol, mark, flow_flags);
1082 1083
	rt = __ip_route_output_key(net, &fl4);
	if (!IS_ERR(rt)) {
1084
		__ip_do_redirect(rt, skb, &fl4, false);
1085 1086 1087 1088 1089 1090 1091
		ip_rt_put(rt);
	}
}
EXPORT_SYMBOL_GPL(ipv4_redirect);

void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
1092 1093 1094
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1095

1096 1097 1098
	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
	rt = __ip_route_output_key(sock_net(sk), &fl4);
	if (!IS_ERR(rt)) {
1099
		__ip_do_redirect(rt, skb, &fl4, false);
1100 1101
		ip_rt_put(rt);
	}
1102 1103 1104
}
EXPORT_SYMBOL_GPL(ipv4_sk_redirect);

1105 1106 1107 1108
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
	struct rtable *rt = (struct rtable *) dst;

1109 1110 1111 1112
	/* All IPV4 dsts are created with ->obsolete set to the value
	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
	 * into this function always.
	 *
1113 1114 1115
	 * When a PMTU/redirect information update invalidates a route,
	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
	 * DST_OBSOLETE_DEAD by dst_free().
1116
	 */
1117
	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1118
		return NULL;
1119
	return dst;
Linus Torvalds's avatar
Linus Torvalds committed
1120 1121 1122 1123 1124 1125 1126 1127
}

static void ipv4_link_failure(struct sk_buff *skb)
{
	struct rtable *rt;

	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);

Eric Dumazet's avatar
Eric Dumazet committed
1128
	rt = skb_rtable(skb);
1129 1130
	if (rt)
		dst_set_expires(&rt->dst, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1131 1132
}

1133
static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
1134
{
1135 1136 1137
	pr_debug("%s: %pI4 -> %pI4, %s\n",
		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
		 skb->dev ? skb->dev->name : "?");
Linus Torvalds's avatar
Linus Torvalds committed
1138
	kfree_skb(skb);
1139
	WARN_ON(1);
Linus Torvalds's avatar
Linus Torvalds committed
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
	return 0;
}

/*
   We do not cache source address of outgoing interface,
   because it is used only by IP RR, TS and SRR options,
   so that it out of fast path.

   BTW remember: "addr" is allowed to be not aligned
   in IP options!
 */

1152
void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
Linus Torvalds's avatar
Linus Torvalds committed
1153
{
1154
	__be32 src;
Linus Torvalds's avatar
Linus Torvalds committed
1155

1156
	if (rt_is_output_route(rt))
1157
		src = ip_hdr(skb)->saddr;
1158
	else {
1159 1160 1161 1162 1163 1164 1165 1166 1167
		struct fib_result res;
		struct flowi4 fl4;
		struct iphdr *iph;

		iph = ip_hdr(skb);

		memset(&fl4, 0, sizeof(fl4));
		fl4.daddr = iph->daddr;
		fl4.saddr = iph->saddr;
1168
		fl4.flowi4_tos = RT_TOS(iph->tos);
1169 1170 1171
		fl4.flowi4_oif = rt->dst.dev->ifindex;
		fl4.flowi4_iif = skb->dev->ifindex;
		fl4.flowi4_mark = skb->mark;
1172

1173
		rcu_read_lock();
1174
		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1175
			src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1176
		else
1177 1178 1179
			src = inet_select_addr(rt->dst.dev,
					       rt_nexthop(rt, iph->daddr),
					       RT_SCOPE_UNIVERSE);
1180 1181
		rcu_read_unlock();
	}
Linus Torvalds's avatar
Linus Torvalds committed
1182 1183 1184
	memcpy(addr, &src, 4);
}

1185
#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds's avatar
Linus Torvalds committed
1186 1187
static void set_class_tag(struct rtable *rt, u32 tag)
{
1188 1189 1190 1191
	if (!(rt->dst.tclassid & 0xFFFF))
		rt->dst.tclassid |= tag & 0xFFFF;
	if (!(rt->dst.tclassid & 0xFFFF0000))
		rt->dst.tclassid |= tag & 0xFFFF0000;
Linus Torvalds's avatar
Linus Torvalds committed
1192 1193 1194
}
#endif

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
{
	unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);

	if (advmss == 0) {
		advmss = max_t(unsigned int, dst->dev->mtu - 40,
			       ip_rt_min_advmss);
		if (advmss > 65535 - 40)
			advmss = 65535 - 40;
	}
	return advmss;
}

1208
static unsigned int ipv4_mtu(const struct dst_entry *dst)
1209
{
1210
	const struct rtable *rt = (const struct rtable *) dst;
1211 1212
	unsigned int mtu = rt->rt_pmtu;

1213
	if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1214
		mtu = dst_metric_raw(dst, RTAX_MTU);
1215

1216
	if (mtu)
1217 1218 1219
		return mtu;

	mtu = dst->dev->mtu;
1220 1221

	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1222
		if (rt->rt_uses_gateway && mtu > 576)
1223 1224 1225
			mtu = 576;
	}

1226
	return min_t(unsigned int, mtu, IP_MAX_MTU);
1227 1228
}

1229
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1230 1231 1232 1233 1234
{
	struct fnhe_hash_bucket *hash = nh->nh_exceptions;
	struct fib_nh_exception *fnhe;
	u32 hval;

1235 1236 1237
	if (!hash)
		return NULL;

1238
	hval = fnhe_hashfun(daddr);
1239 1240 1241

	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1242 1243 1244 1245 1246
		if (fnhe->fnhe_daddr == daddr)
			return fnhe;
	}
	return NULL;
}
1247

1248
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1249 1250
			      __be32 daddr)
{
1251 1252
	bool ret = false;

1253
	spin_lock_bh(&fnhe_lock);
1254

1255
	if (daddr == fnhe->fnhe_daddr) {
1256 1257
		struct rtable __rcu **porig;
		struct rtable *orig;
1258
		int genid = fnhe_genid(dev_net(rt->dst.dev));
1259 1260 1261 1262 1263 1264

		if (rt_is_input_route(rt))
			porig = &fnhe->fnhe_rth_input;
		else
			porig = &fnhe->fnhe_rth_output;
		orig = rcu_dereference(*porig);
1265 1266 1267

		if (fnhe->fnhe_genid != genid) {
			fnhe->fnhe_genid = genid;
1268 1269 1270
			fnhe->fnhe_gw = 0;
			fnhe->fnhe_pmtu = 0;
			fnhe->fnhe_expires = 0;
1271 1272
			fnhe_flush_routes(fnhe);
			orig = NULL;
1273
		}
1274 1275
		fill_route_from_fnhe(rt, fnhe);
		if (!rt->rt_gateway)
1276
			rt->rt_gateway = daddr;
1277

1278 1279 1280 1281 1282 1283
		if (!(rt->dst.flags & DST_NOCACHE)) {
			rcu_assign_pointer(*porig, rt);
			if (orig)
				rt_free(orig);
			ret = true;
		}
1284 1285 1286 1287

		fnhe->fnhe_stamp = jiffies;
	}
	spin_unlock_bh(&fnhe_lock);
1288 1289

	return ret;
1290 1291
}

1292
static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1293
{
1294
	struct rtable *orig, *prev, **p;
1295
	bool ret = true;
1296

1297
	if (rt_is_input_route(rt)) {
1298
		p = (struct rtable **)&nh->nh_rth_input;
1299 1300 1301
	} else {
		p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
	}
1302 1303 1304 1305 1306
	orig = *p;

	prev = cmpxchg(p, orig, rt);
	if (prev == orig) {
		if (orig)
1307
			rt_free(orig);
1308
	} else
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
		ret = false;

	return ret;
}

static DEFINE_SPINLOCK(rt_uncached_lock);
static LIST_HEAD(rt_uncached_list);

static void rt_add_uncached_list(struct rtable *rt)
{
	spin_lock_bh(&rt_uncached_lock);
	list_add_tail(&rt->rt_uncached, &rt_uncached_list);
	spin_unlock_bh(&rt_uncached_lock);
}

static void ipv4_dst_destroy(struct dst_entry *dst)
{
	struct rtable *rt = (struct rtable *) dst;

1328
	if (!list_empty(&rt->rt_uncached)) {
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
		spin_lock_bh(&rt_uncached_lock);
		list_del(&rt->rt_uncached);
		spin_unlock_bh(&rt_uncached_lock);
	}
}

void rt_flush_dev(struct net_device *dev)
{
	if (!list_empty(&rt_uncached_list)) {
		struct net *net = dev_net(dev);
		struct rtable *rt;

		spin_lock_bh(&rt_uncached_lock);
		list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
			if (rt->dst.dev != dev)
				continue;
			rt->dst.dev = net->loopback_dev;
			dev_hold(rt->dst.dev);
			dev_put(dev);
		}
		spin_unlock_bh(&rt_uncached_lock);
1350 1351 1352
	}
}

1353
static bool rt_cache_valid(const struct rtable *rt)
1354
{
1355 1356 1357
	return	rt &&
		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
		!rt_is_expired(rt);
1358 1359
}

1360
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1361
			   const struct fib_result *res,
1362
			   struct fib_nh_exception *fnhe,
1363
			   struct fib_info *fi, u16 type, u32 itag)
Linus Torvalds's avatar
Linus Torvalds committed
1364
{
1365 1366
	bool cached = false;

Linus Torvalds's avatar
Linus Torvalds committed
1367
	if (fi) {
1368 1369
		struct fib_nh *nh = &FIB_RES_NH(*res);

1370
		if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1371
			rt->rt_gateway = nh->nh_gw;
1372 1373
			rt->rt_uses_gateway = 1;
		}
David S. Miller's avatar
David S. Miller committed
1374
		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1375
#ifdef CONFIG_IP_ROUTE_CLASSID
1376
		rt->dst.tclassid = nh->nh_tclassid;
Linus Torvalds's avatar
Linus Torvalds committed
1377
#endif
1378
		if (unlikely(fnhe))
1379
			cached = rt_bind_exception(rt, fnhe, daddr);
1380
		else if (!(rt->dst.flags & DST_NOCACHE))
1381
			cached = rt_cache_route(nh, rt);
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
		if (unlikely(!cached)) {
			/* Routes we intend to cache in nexthop exception or
			 * FIB nexthop have the DST_NOCACHE bit clear.
			 * However, if we are unsuccessful at storing this
			 * route into the cache we really need to set it.
			 */
			rt->dst.flags |= DST_NOCACHE;
			if (!rt->rt_gateway)
				rt->rt_gateway = daddr;
			rt_add_uncached_list(rt);
		}
	} else
1394
		rt_add_uncached_list(rt);
1395

1396
#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds's avatar
Linus Torvalds committed
1397
#ifdef CONFIG_IP_MULTIPLE_TABLES
1398
	set_class_tag(rt, res->tclassid);
Linus Torvalds's avatar
Linus Torvalds committed
1399 1400 1401 1402 1403
#endif
	set_class_tag(rt, itag);
#endif
}

1404
static struct rtable *rt_dst_alloc(struct net_device *dev,
1405
				   bool nopolicy, bool noxfrm, bool will_cache)
1406
{
1407
	return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1408
			 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1409 1410
			 (nopolicy ? DST_NOPOLICY : 0) |
			 (noxfrm ? DST_NOXFRM : 0));
1411 1412
}

1413
/* called in rcu_read_lock() section */
Al Viro's avatar
Al Viro committed
1414
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds's avatar
Linus Torvalds committed
1415 1416 1417
				u8 tos, struct net_device *dev, int our)
{
	struct rtable *rth;
1418
	struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds's avatar
Linus Torvalds committed
1419
	u32 itag = 0;
1420
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
1421 1422 1423 1424 1425 1426

	/* Primary sanity checks. */

	if (in_dev == NULL)
		return -EINVAL;

1427
	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1428
	    skb->protocol != htons(ETH_P_IP))
Linus Torvalds's avatar
Linus Torvalds committed
1429 1430
		goto e_inval;

1431 1432 1433 1434
	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
		if (ipv4_is_loopback(saddr))
			goto e_inval;

1435 1436
	if (ipv4_is_zeronet(saddr)) {
		if (!ipv4_is_local_multicast(daddr))
Linus Torvalds's avatar
Linus Torvalds committed
1437
			goto e_inval;
1438
	} else {
1439 1440
		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
					  in_dev, &itag);
1441 1442 1443
		if (err < 0)
			goto e_err;
	}
1444
	rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1445
			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
Linus Torvalds's avatar
Linus Torvalds committed
1446 1447 1448
	if (!rth)
		goto e_nobufs;

1449 1450 1451
#ifdef CONFIG_IP_ROUTE_CLASSID
	rth->dst.tclassid = itag;
#endif
1452
	rth->dst.output = ip_rt_bug;
Linus Torvalds's avatar
Linus Torvalds committed
1453

fan.du's avatar
fan.du committed
1454
	rth->rt_genid	= rt_genid_ipv4(dev_net(dev));
1455 1456
	rth->rt_flags	= RTCF_MULTICAST;
	rth->rt_type	= RTN_MULTICAST;
1457
	rth->rt_is_input= 1;
1458
	rth->rt_iif	= 0;
1459
	rth->rt_pmtu	= 0;
1460
	rth->rt_gateway	= 0;
1461
	rth->rt_uses_gateway = 0;
1462
	INIT_LIST_HEAD(&rth->rt_uncached);
Linus Torvalds's avatar
Linus Torvalds committed
1463
	if (our) {
1464
		rth->dst.input= ip_local_deliver;
Linus Torvalds's avatar
Linus Torvalds committed
1465 1466 1467 1468
		rth->rt_flags |= RTCF_LOCAL;
	}

#ifdef CONFIG_IP_MROUTE
1469
	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1470
		rth->dst.input = ip_mr_input;
Linus Torvalds's avatar
Linus Torvalds committed
1471 1472 1473
#endif
	RT_CACHE_STAT_INC(in_slow_mc);

David S. Miller's avatar
David S. Miller committed
1474 1475
	skb_dst_set(skb, &rth->dst);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1476 1477 1478 1479

e_nobufs:
	return -ENOBUFS;
e_inval:
1480
	return -EINVAL;
1481 1482
e_err:
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
1483 1484 1485 1486 1487 1488
}


static void ip_handle_martian_source(struct net_device *dev,
				     struct in_device *in_dev,
				     struct sk_buff *skb,
Al Viro's avatar
Al Viro committed
1489 1490
				     __be32 daddr,
				     __be32 saddr)
Linus Torvalds's avatar
Linus Torvalds committed
1491 1492 1493 1494 1495 1496 1497 1498
{
	RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
		/*
		 *	RFC1812 recommendation, if source is martian,
		 *	the only hint is MAC header.
		 */
1499
		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1500
			&daddr, &saddr, dev->name);
1501
		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1502 1503 1504 1505
			print_hex_dump(KERN_WARNING, "ll header: ",
				       DUMP_PREFIX_OFFSET, 16, 1,
				       skb_mac_header(skb),
				       dev->hard_header_len, true);
Linus Torvalds's avatar
Linus Torvalds committed
1506 1507 1508 1509 1510
		}
	}
#endif
}

1511
/* called in rcu_read_lock() section */
Stephen Hemminger's avatar
Stephen Hemminger committed
1512
static int __mkroute_input(struct sk_buff *skb,
1513
			   const struct fib_result *res,
Stephen Hemminger's avatar
Stephen Hemminger committed
1514
			   struct in_device *in_dev,
1515
			   __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds's avatar
Linus Torvalds committed
1516
{
1517
	struct fib_nh_exception *fnhe;
Linus Torvalds's avatar
Linus Torvalds committed
1518 1519 1520
	struct rtable *rth;
	int err;
	struct in_device *out_dev;
1521
	unsigned int flags = 0;
1522
	bool do_cache;
1523
	u32 itag = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1524 1525

	/* get a working reference to the output device */
1526
	out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
Linus Torvalds's avatar
Linus Torvalds committed
1527
	if (out_dev == NULL) {
1528
		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
Linus Torvalds's avatar
Linus Torvalds committed
1529 1530 1531
		return -EINVAL;
	}

1532
	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1533
				  in_dev->dev, in_dev, &itag);
Linus Torvalds's avatar
Linus Torvalds committed
1534
	if (err < 0) {
1535
		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
Linus Torvalds's avatar
Linus Torvalds committed
1536
					 saddr);
1537

Linus Torvalds's avatar
Linus Torvalds committed
1538 1539 1540
		goto cleanup;
	}

1541 1542
	do_cache = res->fi && !itag;
	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
Linus Torvalds's avatar
Linus Torvalds committed
1543
	    (IN_DEV_SHARED_MEDIA(out_dev) ||
1544
	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
Linus Torvalds's avatar
Linus Torvalds committed
1545
		flags |= RTCF_DOREDIRECT;
1546 1547
		do_cache = false;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1548 1549 1550 1551

	if (skb->protocol != htons(ETH_P_IP)) {
		/* Not IP (i.e. ARP). Do not create route, if it is
		 * invalid for proxy arp. DNAT routes are always valid.
1552 1553 1554 1555
		 *
		 * Proxy arp feature have been extended to allow, ARP
		 * replies back to the same interface, to support
		 * Private VLAN switch technologies. See arp.c.
Linus Torvalds's avatar
Linus Torvalds committed
1556
		 */
1557 1558
		if (out_dev == in_dev &&
		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
1559 1560 1561 1562 1563
			err = -EINVAL;
			goto cleanup;
		}
	}

1564
	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1565
	if (do_cache) {
1566 1567 1568 1569 1570
		if (fnhe != NULL)
			rth = rcu_dereference(fnhe->fnhe_rth_input);
		else
			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);

1571 1572 1573
		if (rt_cache_valid(rth)) {
			skb_dst_set_noref(skb, &rth->dst);
			goto out;
1574 1575
		}
	}
1576

1577 1578
	rth = rt_dst_alloc(out_dev->dev,
			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
1579
			   IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
Linus Torvalds's avatar
Linus Torvalds committed
1580 1581 1582 1583 1584
	if (!rth) {
		err = -ENOBUFS;
		goto cleanup;
	}

fan.du's avatar
fan.du committed
1585
	rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
1586 1587
	rth->rt_flags = flags;
	rth->rt_type = res->type;
1588
	rth->rt_is_input = 1;
1589
	rth->rt_iif 	= 0;
1590
	rth->rt_pmtu	= 0;
1591
	rth->rt_gateway	= 0;
1592
	rth->rt_uses_gateway = 0;
1593
	INIT_LIST_HEAD(&rth->rt_uncached);
Duan Jiong's avatar
Duan Jiong committed
1594
	RT_CACHE_STAT_INC(in_slow_tot);
Linus Torvalds's avatar
Linus Torvalds committed
1595

1596 1597
	rth->dst.input = ip_forward;
	rth->dst.output = ip_output;
Linus Torvalds's avatar
Linus Torvalds committed
1598

1599
	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
1600
	skb_dst_set(skb, &rth->dst);
1601
out:
Linus Torvalds's avatar
Linus Torvalds committed
1602 1603 1604
	err = 0;
 cleanup:
	return err;
1605
}
Linus Torvalds's avatar
Linus Torvalds committed
1606

Stephen Hemminger's avatar
Stephen Hemminger committed
1607 1608
static int ip_mkroute_input(struct sk_buff *skb,
			    struct fib_result *res,
1609
			    const struct flowi4 *fl4,
Stephen Hemminger's avatar
Stephen Hemminger committed
1610 1611
			    struct in_device *in_dev,
			    __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds's avatar
Linus Torvalds committed
1612 1613
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
1614
	if (res->fi && res->fi->fib_nhs > 1)
1615
		fib_select_multipath(res);
Linus Torvalds's avatar
Linus Torvalds committed
1616 1617 1618
#endif

	/* create a routing cache entry */
1619
	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
Linus Torvalds's avatar
Linus Torvalds committed
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
}

/*
 *	NOTE. We drop all the packets that has local source
 *	addresses, because every properly looped back packet
 *	must have correct destination already attached by output routine.
 *
 *	Such approach solves two big problems:
 *	1. Not simplex devices are handled properly.
 *	2. IP spoofing attempts are filtered with 100% of guarantee.
1630
 *	called with rcu_read_lock()
Linus Torvalds's avatar
Linus Torvalds committed
1631 1632
 */

Al Viro's avatar
Al Viro committed
1633
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1634
			       u8 tos, struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
1635 1636
{
	struct fib_result res;
1637
	struct in_device *in_dev = __in_dev_get_rcu(dev);
1638
	struct flowi4	fl4;
1639
	unsigned int	flags = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1640
	u32		itag = 0;
1641
	struct rtable	*rth;
Linus Torvalds's avatar
Linus Torvalds committed
1642
	int		err = -EINVAL;
Daniel Baluta's avatar
Daniel Baluta committed
1643
	struct net    *net = dev_net(dev);
1644
	bool do_cache;
Linus Torvalds's avatar
Linus Torvalds committed
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654

	/* IP on this device is disabled. */

	if (!in_dev)
		goto out;

	/* Check for the most weird martians, which can be not detected
	   by fib_lookup.
	 */

1655
	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
Linus Torvalds's avatar
Linus Torvalds committed
1656 1657
		goto martian_source;

1658
	res.fi = NULL;
1659
	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
Linus Torvalds's avatar
Linus Torvalds committed
1660 1661 1662 1663 1664
		goto brd_input;

	/* Accept zero addresses only to limited broadcast;
	 * I even do not know to fix it or not. Waiting for complains :-)
	 */
1665
	if (ipv4_is_zeronet(saddr))
Linus Torvalds's avatar
Linus Torvalds committed
1666 1667
		goto martian_source;

1668
	if (ipv4_is_zeronet(daddr))
Linus Torvalds's avatar
Linus Torvalds committed
1669 1670
		goto martian_destination;

1671 1672 1673 1674 1675
	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
	 * and call it once if daddr or/and saddr are loopback addresses
	 */
	if (ipv4_is_loopback(daddr)) {
		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1676
			goto martian_destination;
1677 1678
	} else if (ipv4_is_loopback(saddr)) {
		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1679 1680 1681
			goto martian_source;
	}

Linus Torvalds's avatar
Linus Torvalds committed
1682 1683 1684
	/*
	 *	Now we are ready to route packet.
	 */
1685 1686 1687 1688 1689 1690 1691 1692
	fl4.flowi4_oif = 0;
	fl4.flowi4_iif = dev->ifindex;
	fl4.flowi4_mark = skb->mark;
	fl4.flowi4_tos = tos;
	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
	fl4.daddr = daddr;
	fl4.saddr = saddr;
	err = fib_lookup(net, &fl4, &res);
1693 1694 1695
	if (err != 0) {
		if (!IN_DEV_FORWARD(in_dev))
			err = -EHOSTUNREACH;
Linus Torvalds's avatar
Linus Torvalds committed
1696
		goto no_route;
1697
	}
Linus Torvalds's avatar
Linus Torvalds committed
1698 1699 1700 1701 1702

	if (res.type == RTN_BROADCAST)
		goto brd_input;

	if (res.type == RTN_LOCAL) {
1703
		err = fib_validate_source(skb, saddr, daddr, tos,
1704
					  0, dev, in_dev, &itag);
1705 1706
		if (err < 0)
			goto martian_source_keep_err;
Linus Torvalds's avatar
Linus Torvalds committed
1707 1708 1709
		goto local_input;
	}

1710 1711
	if (!IN_DEV_FORWARD(in_dev)) {
		err = -EHOSTUNREACH;
1712
		goto no_route;
1713
	}
Linus Torvalds's avatar
Linus Torvalds committed
1714 1715 1716
	if (res.type != RTN_UNICAST)
		goto martian_destination;

1717
	err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
Linus Torvalds's avatar
Linus Torvalds committed
1718 1719 1720 1721 1722 1723
out:	return err;

brd_input:
	if (skb->protocol != htons(ETH_P_IP))
		goto e_inval;

1724
	if (!ipv4_is_zeronet(saddr)) {
1725 1726
		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
					  in_dev, &itag);
Linus Torvalds's avatar
Linus Torvalds committed
1727
		if (err < 0)
1728
			goto martian_source_keep_err;
Linus Torvalds's avatar
Linus Torvalds committed
1729 1730 1731 1732 1733 1734
	}
	flags |= RTCF_BROADCAST;
	res.type = RTN_BROADCAST;
	RT_CACHE_STAT_INC(in_brd);

local_input:
1735 1736
	do_cache = false;
	if (res.fi) {
1737
		if (!itag) {
1738
			rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1739
			if (rt_cache_valid(rth)) {
1740 1741 1742
				skb_dst_set_noref(skb, &rth->dst);
				err = 0;
				goto out;
1743 1744 1745 1746 1747
			}
			do_cache = true;
		}
	}

1748
	rth = rt_dst_alloc(net->loopback_dev,
1749
			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
Linus Torvalds's avatar
Linus Torvalds committed
1750 1751 1752
	if (!rth)
		goto e_nobufs;

1753
	rth->dst.input= ip_local_deliver;
1754
	rth->dst.output= ip_rt_bug;
1755 1756 1757
#ifdef CONFIG_IP_ROUTE_CLASSID
	rth->dst.tclassid = itag;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1758

fan.du's avatar
fan.du committed
1759
	rth->rt_genid = rt_genid_ipv4(net);
1760 1761
	rth->rt_flags 	= flags|RTCF_LOCAL;
	rth->rt_type	= res.type;
1762
	rth->rt_is_input = 1;
1763
	rth->rt_iif	= 0;
1764
	rth->rt_pmtu	= 0;
1765
	rth->rt_gateway	= 0;
1766
	rth->rt_uses_gateway = 0;
1767
	INIT_LIST_HEAD(&rth->rt_uncached);
Duan Jiong's avatar
Duan Jiong committed
1768
	RT_CACHE_STAT_INC(in_slow_tot);
Linus Torvalds's avatar
Linus Torvalds committed
1769
	if (res.type == RTN_UNREACHABLE) {
1770 1771
		rth->dst.input= ip_error;
		rth->dst.error= -err;
Linus Torvalds's avatar
Linus Torvalds committed
1772 1773
		rth->rt_flags 	&= ~RTCF_LOCAL;
	}
1774 1775 1776 1777 1778 1779
	if (do_cache) {
		if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
			rth->dst.flags |= DST_NOCACHE;
			rt_add_uncached_list(rth);
		}
	}
David S. Miller's avatar
David S. Miller committed
1780
	skb_dst_set(skb, &rth->dst);
1781
	err = 0;
1782
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1783 1784 1785 1786

no_route:
	RT_CACHE_STAT_INC(in_no_route);
	res.type = RTN_UNREACHABLE;
1787 1788
	if (err == -ESRCH)
		err = -ENETUNREACH;
Linus Torvalds's avatar
Linus Torvalds committed
1789 1790 1791 1792 1793 1794 1795 1796
	goto local_input;

	/*
	 *	Do not cache martian addresses: they should be logged (RFC1812)
	 */
martian_destination:
	RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
1797 1798 1799
	if (IN_DEV_LOG_MARTIANS(in_dev))
		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
				     &daddr, &saddr, dev->name);
Linus Torvalds's avatar
Linus Torvalds committed
1800
#endif
1801

Linus Torvalds's avatar
Linus Torvalds committed
1802 1803
e_inval:
	err = -EINVAL;
1804
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1805 1806 1807

e_nobufs:
	err = -ENOBUFS;
1808
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1809 1810

martian_source:
1811 1812
	err = -EINVAL;
martian_source_keep_err:
Linus Torvalds's avatar
Linus Torvalds committed
1813
	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1814
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1815 1816
}

1817 1818
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
			 u8 tos, struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
1819
{
1820
	int res;
Linus Torvalds's avatar
Linus Torvalds committed
1821

1822 1823
	rcu_read_lock();

Linus Torvalds's avatar
Linus Torvalds committed
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
	/* Multicast recognition logic is moved from route cache to here.
	   The problem was that too many Ethernet cards have broken/missing
	   hardware multicast filters :-( As result the host on multicasting
	   network acquires a lot of useless route cache entries, sort of
	   SDR messages from all the world. Now we try to get rid of them.
	   Really, provided software IP multicast filter is organized
	   reasonably (at least, hashed), it does not result in a slowdown
	   comparing with route cache reject entries.
	   Note, that multicast routers are not affected, because
	   route cache entry is created eventually.
	 */
1835
	if (ipv4_is_multicast(daddr)) {
1836
		struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds's avatar
Linus Torvalds committed
1837

1838
		if (in_dev) {
1839 1840
			int our = ip_check_mc_rcu(in_dev, daddr, saddr,
						  ip_hdr(skb)->protocol);
Linus Torvalds's avatar
Linus Torvalds committed
1841 1842
			if (our
#ifdef CONFIG_IP_MROUTE
1843 1844 1845
				||
			    (!ipv4_is_local_multicast(daddr) &&
			     IN_DEV_MFORWARD(in_dev))
Linus Torvalds's avatar
Linus Torvalds committed
1846
#endif
1847
			   ) {
1848 1849
				int res = ip_route_input_mc(skb, daddr, saddr,
							    tos, dev, our);
Linus Torvalds's avatar
Linus Torvalds committed
1850
				rcu_read_unlock();
1851
				return res;
Linus Torvalds's avatar
Linus Torvalds committed
1852 1853 1854 1855 1856
			}
		}
		rcu_read_unlock();
		return -EINVAL;
	}
1857
	res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1858 1859
	rcu_read_unlock();
	return res;
Linus Torvalds's avatar
Linus Torvalds committed
1860
}
1861
EXPORT_SYMBOL(ip_route_input_noref);
Linus Torvalds's avatar
Linus Torvalds committed
1862

1863
/* called with rcu_read_lock() */
1864
static struct rtable *__mkroute_output(const struct fib_result *res,
1865
				       const struct flowi4 *fl4, int orig_oif,
1866
				       struct net_device *dev_out,
1867
				       unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
1868
{
1869
	struct fib_info *fi = res->fi;
1870
	struct fib_nh_exception *fnhe;
1871
	struct in_device *in_dev;
1872
	u16 type = res->type;
1873
	struct rtable *rth;
1874
	bool do_cache;
Linus Torvalds's avatar
Linus Torvalds committed
1875

1876 1877
	in_dev = __in_dev_get_rcu(dev_out);
	if (!in_dev)
1878
		return ERR_PTR(-EINVAL);
Linus Torvalds's avatar
Linus Torvalds committed
1879

1880 1881 1882 1883
	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
		if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
			return ERR_PTR(-EINVAL);

1884
	if (ipv4_is_lbcast(fl4->daddr))
1885
		type = RTN_BROADCAST;
1886
	else if (ipv4_is_multicast(fl4->daddr))
1887
		type = RTN_MULTICAST;
1888
	else if (ipv4_is_zeronet(fl4->daddr))
1889
		return ERR_PTR(-EINVAL);
Linus Torvalds's avatar
Linus Torvalds committed
1890 1891 1892 1893

	if (dev_out->flags & IFF_LOOPBACK)
		flags |= RTCF_LOCAL;

1894
	do_cache = true;
1895
	if (type == RTN_BROADCAST) {
Linus Torvalds's avatar
Linus Torvalds committed
1896
		flags |= RTCF_BROADCAST | RTCF_LOCAL;
1897 1898
		fi = NULL;
	} else if (type == RTN_MULTICAST) {
1899
		flags |= RTCF_MULTICAST | RTCF_LOCAL;
1900 1901
		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
				     fl4->flowi4_proto))
Linus Torvalds's avatar
Linus Torvalds committed
1902
			flags &= ~RTCF_LOCAL;
1903 1904
		else
			do_cache = false;
Linus Torvalds's avatar
Linus Torvalds committed
1905
		/* If multicast route do not exist use
1906 1907
		 * default one, but do not gateway in this case.
		 * Yes, it is hack.
Linus Torvalds's avatar
Linus Torvalds committed
1908
		 */
1909 1910
		if (fi && res->prefixlen < 4)
			fi = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1911 1912
	}

1913
	fnhe = NULL;
1914 1915
	do_cache &= fi != NULL;
	if (do_cache) {
1916
		struct rtable __rcu **prth;
1917
		struct fib_nh *nh = &FIB_RES_NH(*res);
1918

1919
		fnhe = find_exception(nh, fl4->daddr);
1920
		if (fnhe)
1921
			prth = &fnhe->fnhe_rth_output;
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
		else {
			if (unlikely(fl4->flowi4_flags &
				     FLOWI_FLAG_KNOWN_NH &&
				     !(nh->nh_gw &&
				       nh->nh_scope == RT_SCOPE_LINK))) {
				do_cache = false;
				goto add;
			}
			prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
		}
1932 1933 1934 1935
		rth = rcu_dereference(*prth);
		if (rt_cache_valid(rth)) {
			dst_hold(&rth->dst);
			return rth;
1936 1937
		}
	}
1938 1939

add:
1940 1941
	rth = rt_dst_alloc(dev_out,
			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
1942
			   IN_DEV_CONF_GET(in_dev, NOXFRM),
1943
			   do_cache);
1944
	if (!rth)
1945
		return ERR_PTR(-ENOBUFS);
1946

1947 1948
	rth->dst.output = ip_output;

fan.du's avatar
fan.du committed
1949
	rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
1950 1951
	rth->rt_flags	= flags;
	rth->rt_type	= type;
1952
	rth->rt_is_input = 0;
1953
	rth->rt_iif	= orig_oif ? : 0;
1954
	rth->rt_pmtu	= 0;
1955
	rth->rt_gateway = 0;
1956
	rth->rt_uses_gateway = 0;
1957
	INIT_LIST_HEAD(&rth->rt_uncached);
Linus Torvalds's avatar
Linus Torvalds committed
1958 1959 1960

	RT_CACHE_STAT_INC(out_slow_tot);

1961
	if (flags & RTCF_LOCAL)
1962
		rth->dst.input = ip_local_deliver;
Linus Torvalds's avatar
Linus Torvalds committed
1963
	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1964
		if (flags & RTCF_LOCAL &&
Linus Torvalds's avatar
Linus Torvalds committed
1965
		    !(dev_out->flags & IFF_LOOPBACK)) {
1966
			rth->dst.output = ip_mc_output;
Linus Torvalds's avatar
Linus Torvalds committed
1967 1968 1969
			RT_CACHE_STAT_INC(out_slow_mc);
		}
#ifdef CONFIG_IP_MROUTE
1970
		if (type == RTN_MULTICAST) {
Linus Torvalds's avatar
Linus Torvalds committed
1971
			if (IN_DEV_MFORWARD(in_dev) &&
1972
			    !ipv4_is_local_multicast(fl4->daddr)) {
1973 1974
				rth->dst.input = ip_mr_input;
				rth->dst.output = ip_mc_output;
Linus Torvalds's avatar
Linus Torvalds committed
1975 1976 1977 1978 1979
			}
		}
#endif
	}

1980
	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1981

1982
	return rth;
Linus Torvalds's avatar
Linus Torvalds committed
1983 1984 1985 1986 1987 1988
}

/*
 * Major route resolver routine.
 */

David S. Miller's avatar
David S. Miller committed
1989
struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
Linus Torvalds's avatar
Linus Torvalds committed
1990 1991
{
	struct net_device *dev_out = NULL;
1992
	__u8 tos = RT_FL_TOS(fl4);
1993 1994
	unsigned int flags = 0;
	struct fib_result res;
1995
	struct rtable *rth;
1996
	int orig_oif;
Linus Torvalds's avatar
Linus Torvalds committed
1997

1998
	res.tclassid	= 0;
Linus Torvalds's avatar
Linus Torvalds committed
1999
	res.fi		= NULL;
2000
	res.table	= NULL;
Linus Torvalds's avatar
Linus Torvalds committed
2001

2002 2003
	orig_oif = fl4->flowi4_oif;

2004
	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2005 2006 2007
	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2008

2009
	rcu_read_lock();
2010
	if (fl4->saddr) {
2011
		rth = ERR_PTR(-EINVAL);
2012 2013 2014
		if (ipv4_is_multicast(fl4->saddr) ||
		    ipv4_is_lbcast(fl4->saddr) ||
		    ipv4_is_zeronet(fl4->saddr))
Linus Torvalds's avatar
Linus Torvalds committed
2015 2016 2017 2018
			goto out;

		/* I removed check for oif == dev_out->oif here.
		   It was wrong for two reasons:
2019 2020
		   1. ip_dev_find(net, saddr) can return wrong iface, if saddr
		      is assigned to multiple interfaces.
Linus Torvalds's avatar
Linus Torvalds committed
2021 2022 2023 2024
		   2. Moreover, we are allowed to send packets with saddr
		      of another iface. --ANK
		 */

2025 2026 2027
		if (fl4->flowi4_oif == 0 &&
		    (ipv4_is_multicast(fl4->daddr) ||
		     ipv4_is_lbcast(fl4->daddr))) {
2028
			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2029
			dev_out = __ip_dev_find(net, fl4->saddr, false);
2030 2031 2032
			if (dev_out == NULL)
				goto out;

Linus Torvalds's avatar
Linus Torvalds committed
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
			/* Special hack: user can direct multicasts
			   and limited broadcast via necessary interface
			   without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
			   This hack is not just for fun, it allows
			   vic,vat and friends to work.
			   They bind socket to loopback, set ttl to zero
			   and expect that it will work.
			   From the viewpoint of routing cache they are broken,
			   because we are not allowed to build multicast path
			   with loopback source addr (look, routing cache
			   cannot know, that ttl is zero, so that packet
			   will not leave this host and route is valid).
			   Luckily, this hack is good workaround.
			 */

2048
			fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds's avatar
Linus Torvalds committed
2049 2050
			goto make_route;
		}
2051

2052
		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2053
			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2054
			if (!__ip_dev_find(net, fl4->saddr, false))
2055 2056
				goto out;
		}
Linus Torvalds's avatar
Linus Torvalds committed
2057 2058 2059
	}


2060 2061
	if (fl4->flowi4_oif) {
		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2062
		rth = ERR_PTR(-ENODEV);
Linus Torvalds's avatar
Linus Torvalds committed
2063 2064
		if (dev_out == NULL)
			goto out;
2065 2066

		/* RACE: Check return value of inet_select_addr instead. */
2067
		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2068
			rth = ERR_PTR(-ENETUNREACH);
2069 2070
			goto out;
		}
2071 2072 2073 2074 2075
		if (ipv4_is_local_multicast(fl4->daddr) ||
		    ipv4_is_lbcast(fl4->daddr)) {
			if (!fl4->saddr)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_LINK);
Linus Torvalds's avatar
Linus Torvalds committed
2076 2077
			goto make_route;
		}
2078
		if (!fl4->saddr) {
2079 2080 2081 2082 2083 2084
			if (ipv4_is_multicast(fl4->daddr))
				fl4->saddr = inet_select_addr(dev_out, 0,
							      fl4->flowi4_scope);
			else if (!fl4->daddr)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_HOST);
Linus Torvalds's avatar
Linus Torvalds committed
2085 2086 2087
		}
	}

2088 2089 2090 2091
	if (!fl4->daddr) {
		fl4->daddr = fl4->saddr;
		if (!fl4->daddr)
			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2092
		dev_out = net->loopback_dev;
2093
		fl4->flowi4_oif = LOOPBACK_IFINDEX;
Linus Torvalds's avatar
Linus Torvalds committed
2094 2095 2096 2097 2098
		res.type = RTN_LOCAL;
		flags |= RTCF_LOCAL;
		goto make_route;
	}

2099
	if (fib_lookup(net, fl4, &res)) {
Linus Torvalds's avatar
Linus Torvalds committed
2100
		res.fi = NULL;
2101
		res.table = NULL;
2102
		if (fl4->flowi4_oif) {
Linus Torvalds's avatar
Linus Torvalds committed
2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
			/* Apparently, routing tables are wrong. Assume,
			   that the destination is on link.

			   WHY? DW.
			   Because we are allowed to send to iface
			   even if it has NO routes and NO assigned
			   addresses. When oif is specified, routing
			   tables are looked up with only one purpose:
			   to catch if destination is gatewayed, rather than
			   direct. Moreover, if MSG_DONTROUTE is set,
			   we send packet, ignoring both routing tables
			   and ifaddr state. --ANK


			   We could make it even if oif is unknown,
			   likely IPv6, but we do not.
			 */

2121 2122 2123
			if (fl4->saddr == 0)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_LINK);
Linus Torvalds's avatar
Linus Torvalds committed
2124 2125 2126
			res.type = RTN_UNICAST;
			goto make_route;
		}
2127
		rth = ERR_PTR(-ENETUNREACH);
Linus Torvalds's avatar
Linus Torvalds committed
2128 2129 2130 2131
		goto out;
	}

	if (res.type == RTN_LOCAL) {
2132
		if (!fl4->saddr) {
2133
			if (res.fi->fib_prefsrc)
2134
				fl4->saddr = res.fi->fib_prefsrc;
2135
			else
2136
				fl4->saddr = fl4->daddr;
2137
		}
2138
		dev_out = net->loopback_dev;
2139
		fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds's avatar
Linus Torvalds committed
2140 2141 2142 2143 2144
		flags |= RTCF_LOCAL;
		goto make_route;
	}

#ifdef CONFIG_IP_ROUTE_MULTIPATH
2145
	if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2146
		fib_select_multipath(&res);
Linus Torvalds's avatar
Linus Torvalds committed
2147 2148
	else
#endif
2149 2150
	if (!res.prefixlen &&
	    res.table->tb_num_default > 1 &&
2151
	    res.type == RTN_UNICAST && !fl4->flowi4_oif)
2152
		fib_select_default(&res);
Linus Torvalds's avatar
Linus Torvalds committed
2153

2154 2155
	if (!fl4->saddr)
		fl4->saddr = FIB_RES_PREFSRC(net, res);
Linus Torvalds's avatar
Linus Torvalds committed
2156 2157

	dev_out = FIB_RES_DEV(res);
2158
	fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds's avatar
Linus Torvalds committed
2159 2160 2161


make_route:
2162
	rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
Linus Torvalds's avatar
Linus Torvalds committed
2163

2164 2165
out:
	rcu_read_unlock();
2166
	return rth;
Linus Torvalds's avatar
Linus Torvalds committed
2167
}
2168 2169
EXPORT_SYMBOL_GPL(__ip_route_output_key);

2170 2171 2172 2173 2174
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
	return NULL;
}

2175
static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2176
{
2177 2178 2179
	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);

	return mtu ? : dst->dev->mtu;
2180 2181
}

2182 2183
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
					  struct sk_buff *skb, u32 mtu)
2184 2185 2186
{
}

2187 2188
static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
				       struct sk_buff *skb)
2189 2190 2191
{
}

2192 2193 2194 2195 2196 2197
static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
					  unsigned long old)
{
	return NULL;
}

2198 2199
static struct dst_ops ipv4_dst_blackhole_ops = {
	.family			=	AF_INET,
2200
	.protocol		=	cpu_to_be16(ETH_P_IP),
2201
	.check			=	ipv4_blackhole_dst_check,
2202
	.mtu			=	ipv4_blackhole_mtu,
2203
	.default_advmss		=	ipv4_default_advmss,
2204
	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
2205
	.redirect		=	ipv4_rt_blackhole_redirect,
2206
	.cow_metrics		=	ipv4_rt_blackhole_cow_metrics,
2207
	.neigh_lookup		=	ipv4_neigh_lookup,
2208 2209
};

2210
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2211
{
2212
	struct rtable *ort = (struct rtable *) dst_orig;
2213
	struct rtable *rt;
2214

2215
	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2216
	if (rt) {
2217
		struct dst_entry *new = &rt->dst;
2218 2219

		new->__use = 1;
2220
		new->input = dst_discard;
2221
		new->output = dst_discard_sk;
2222

2223
		new->dev = ort->dst.dev;
2224 2225 2226
		if (new->dev)
			dev_hold(new->dev);

2227
		rt->rt_is_input = ort->rt_is_input;
2228
		rt->rt_iif = ort->rt_iif;
2229
		rt->rt_pmtu = ort->rt_pmtu;
2230

fan.du's avatar
fan.du committed
2231
		rt->rt_genid = rt_genid_ipv4(net);
2232 2233 2234
		rt->rt_flags = ort->rt_flags;
		rt->rt_type = ort->rt_type;
		rt->rt_gateway = ort->rt_gateway;
2235
		rt->rt_uses_gateway = ort->rt_uses_gateway;
2236

2237 2238
		INIT_LIST_HEAD(&rt->rt_uncached);

2239 2240 2241
		dst_free(new);
	}

2242 2243 2244
	dst_release(dst_orig);

	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2245 2246
}

2247
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2248
				    struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
2249
{
2250
	struct rtable *rt = __ip_route_output_key(net, flp4);
Linus Torvalds's avatar
Linus Torvalds committed
2251

2252 2253
	if (IS_ERR(rt))
		return rt;
Linus Torvalds's avatar
Linus Torvalds committed
2254

2255
	if (flp4->flowi4_proto)
2256 2257 2258
		rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
						   flowi4_to_flowi(flp4),
						   sk, 0);
Linus Torvalds's avatar
Linus Torvalds committed
2259

2260
	return rt;
Linus Torvalds's avatar
Linus Torvalds committed
2261
}
2262 2263
EXPORT_SYMBOL_GPL(ip_route_output_flow);

2264
static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
2265
			struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2266
			u32 seq, int event, int nowait, unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
2267
{
Eric Dumazet's avatar
Eric Dumazet committed
2268
	struct rtable *rt = skb_rtable(skb);
Linus Torvalds's avatar
Linus Torvalds committed
2269
	struct rtmsg *r;
2270
	struct nlmsghdr *nlh;
2271
	unsigned long expires = 0;
2272
	u32 error;
2273
	u32 metrics[RTAX_MAX];
2274

2275
	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2276
	if (nlh == NULL)
2277
		return -EMSGSIZE;
2278 2279

	r = nlmsg_data(nlh);
Linus Torvalds's avatar
Linus Torvalds committed
2280 2281 2282
	r->rtm_family	 = AF_INET;
	r->rtm_dst_len	= 32;
	r->rtm_src_len	= 0;
2283
	r->rtm_tos	= fl4->flowi4_tos;
Linus Torvalds's avatar
Linus Torvalds committed
2284
	r->rtm_table	= RT_TABLE_MAIN;
David S. Miller's avatar
David S. Miller committed
2285 2286
	if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
		goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2287 2288 2289 2290 2291 2292
	r->rtm_type	= rt->rt_type;
	r->rtm_scope	= RT_SCOPE_UNIVERSE;
	r->rtm_protocol = RTPROT_UNSPEC;
	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
	if (rt->rt_flags & RTCF_NOTIFY)
		r->rtm_flags |= RTM_F_NOTIFY;
2293

2294
	if (nla_put_be32(skb, RTA_DST, dst))
David S. Miller's avatar
David S. Miller committed
2295
		goto nla_put_failure;
2296
	if (src) {
Linus Torvalds's avatar
Linus Torvalds committed
2297
		r->rtm_src_len = 32;
2298
		if (nla_put_be32(skb, RTA_SRC, src))
David S. Miller's avatar
David S. Miller committed
2299
			goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2300
	}
David S. Miller's avatar
David S. Miller committed
2301 2302 2303
	if (rt->dst.dev &&
	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
		goto nla_put_failure;
2304
#ifdef CONFIG_IP_ROUTE_CLASSID
David S. Miller's avatar
David S. Miller committed
2305 2306 2307
	if (rt->dst.tclassid &&
	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
		goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2308
#endif
2309
	if (!rt_is_input_route(rt) &&
2310 2311
	    fl4->saddr != src) {
		if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
David S. Miller's avatar
David S. Miller committed
2312 2313
			goto nla_put_failure;
	}
2314
	if (rt->rt_uses_gateway &&
David S. Miller's avatar
David S. Miller committed
2315 2316
	    nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
		goto nla_put_failure;
2317

2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
	expires = rt->dst.expires;
	if (expires) {
		unsigned long now = jiffies;

		if (time_before(now, expires))
			expires -= now;
		else
			expires = 0;
	}

2328
	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2329
	if (rt->rt_pmtu && expires)
2330 2331
		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
	if (rtnetlink_put_metrics(skb, metrics) < 0)
2332 2333
		goto nla_put_failure;

2334
	if (fl4->flowi4_mark &&
2335
	    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
David S. Miller's avatar
David S. Miller committed
2336
		goto nla_put_failure;
Eric Dumazet's avatar
Eric Dumazet committed
2337

2338
	error = rt->dst.error;
2339

2340
	if (rt_is_input_route(rt)) {
2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
#ifdef CONFIG_IP_MROUTE
		if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
		    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
			int err = ipmr_get_route(net, skb,
						 fl4->saddr, fl4->daddr,
						 r, nowait);
			if (err <= 0) {
				if (!nowait) {
					if (err == 0)
						return 0;
					goto nla_put_failure;
				} else {
					if (err == -EMSGSIZE)
						goto nla_put_failure;
					error = err;
				}
			}
		} else
#endif
2360
			if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2361
				goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2362 2363
	}

2364
	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2365
		goto nla_put_failure;
2366 2367

	return nlmsg_end(skb, nlh);
Linus Torvalds's avatar
Linus Torvalds committed
2368

2369
nla_put_failure:
2370 2371
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
Linus Torvalds's avatar
Linus Torvalds committed
2372 2373
}

2374
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
Linus Torvalds's avatar
Linus Torvalds committed
2375
{
2376
	struct net *net = sock_net(in_skb->sk);
2377 2378
	struct rtmsg *rtm;
	struct nlattr *tb[RTA_MAX+1];
Linus Torvalds's avatar
Linus Torvalds committed
2379
	struct rtable *rt = NULL;
2380
	struct flowi4 fl4;
Al Viro's avatar
Al Viro committed
2381 2382 2383
	__be32 dst = 0;
	__be32 src = 0;
	u32 iif;
2384
	int err;
Eric Dumazet's avatar
Eric Dumazet committed
2385
	int mark;
Linus Torvalds's avatar
Linus Torvalds committed
2386 2387
	struct sk_buff *skb;

2388 2389 2390 2391 2392 2393
	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
	if (err < 0)
		goto errout;

	rtm = nlmsg_data(nlh);

Linus Torvalds's avatar
Linus Torvalds committed
2394
	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2395 2396 2397 2398
	if (skb == NULL) {
		err = -ENOBUFS;
		goto errout;
	}
Linus Torvalds's avatar
Linus Torvalds committed
2399 2400 2401 2402

	/* Reserve room for dummy headers, this skb can pass
	   through good chunk of routing engine.
	 */
2403
	skb_reset_mac_header(skb);
2404
	skb_reset_network_header(skb);
2405 2406

	/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2407
	ip_hdr(skb)->protocol = IPPROTO_ICMP;
Linus Torvalds's avatar
Linus Torvalds committed
2408 2409
	skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));

2410 2411
	src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
	dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2412
	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
Eric Dumazet's avatar
Eric Dumazet committed
2413
	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
Linus Torvalds's avatar
Linus Torvalds committed
2414

2415 2416 2417 2418 2419 2420 2421
	memset(&fl4, 0, sizeof(fl4));
	fl4.daddr = dst;
	fl4.saddr = src;
	fl4.flowi4_tos = rtm->rtm_tos;
	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
	fl4.flowi4_mark = mark;

Linus Torvalds's avatar
Linus Torvalds committed
2422
	if (iif) {
2423 2424
		struct net_device *dev;

2425
		dev = __dev_get_by_index(net, iif);
2426 2427 2428 2429 2430
		if (dev == NULL) {
			err = -ENODEV;
			goto errout_free;
		}

Linus Torvalds's avatar
Linus Torvalds committed
2431 2432
		skb->protocol	= htons(ETH_P_IP);
		skb->dev	= dev;
Eric Dumazet's avatar
Eric Dumazet committed
2433
		skb->mark	= mark;
Linus Torvalds's avatar
Linus Torvalds committed
2434 2435 2436
		local_bh_disable();
		err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
		local_bh_enable();
2437

Eric Dumazet's avatar
Eric Dumazet committed
2438
		rt = skb_rtable(skb);
2439 2440
		if (err == 0 && rt->dst.error)
			err = -rt->dst.error;
Linus Torvalds's avatar
Linus Torvalds committed
2441
	} else {
2442
		rt = ip_route_output_key(net, &fl4);
2443 2444 2445 2446

		err = 0;
		if (IS_ERR(rt))
			err = PTR_ERR(rt);
Linus Torvalds's avatar
Linus Torvalds committed
2447
	}
2448

Linus Torvalds's avatar
Linus Torvalds committed
2449
	if (err)
2450
		goto errout_free;
Linus Torvalds's avatar
Linus Torvalds committed
2451

2452
	skb_dst_set(skb, &rt->dst);
Linus Torvalds's avatar
Linus Torvalds committed
2453 2454 2455
	if (rtm->rtm_flags & RTM_F_NOTIFY)
		rt->rt_flags |= RTCF_NOTIFY;

2456
	err = rt_fill_info(net, dst, src, &fl4, skb,
2457
			   NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2458
			   RTM_NEWROUTE, 0, 0);
2459 2460
	if (err <= 0)
		goto errout_free;
Linus Torvalds's avatar
Linus Torvalds committed
2461

2462
	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2463
errout:
2464
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
2465

2466
errout_free:
Linus Torvalds's avatar
Linus Torvalds committed
2467
	kfree_skb(skb);
2468
	goto errout;
Linus Torvalds's avatar
Linus Torvalds committed
2469 2470 2471 2472
}

void ip_rt_multicast_event(struct in_device *in_dev)
{
2473
	rt_cache_flush(dev_net(in_dev->dev));
Linus Torvalds's avatar
Linus Torvalds committed
2474 2475 2476
}

#ifdef CONFIG_SYSCTL
2477 2478 2479 2480 2481
static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
static int ip_rt_gc_elasticity __read_mostly	= 8;

2482
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2483
					void __user *buffer,
Linus Torvalds's avatar
Linus Torvalds committed
2484 2485
					size_t *lenp, loff_t *ppos)
{
2486 2487
	struct net *net = (struct net *)__ctl->extra1;

Linus Torvalds's avatar
Linus Torvalds committed
2488
	if (write) {
2489 2490
		rt_cache_flush(net);
		fnhe_genid_bump(net);
Linus Torvalds's avatar
Linus Torvalds committed
2491
		return 0;
2492
	}
Linus Torvalds's avatar
Linus Torvalds committed
2493 2494 2495 2496

	return -EINVAL;
}

2497
static struct ctl_table ipv4_route_table[] = {
Linus Torvalds's avatar
Linus Torvalds committed
2498 2499 2500 2501 2502
	{
		.procname	= "gc_thresh",
		.data		= &ipv4_dst_ops.gc_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2503
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2504 2505 2506 2507 2508 2509
	},
	{
		.procname	= "max_size",
		.data		= &ip_rt_max_size,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2510
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2511 2512 2513
	},
	{
		/*  Deprecated. Use gc_min_interval_ms */
2514

Linus Torvalds's avatar
Linus Torvalds committed
2515 2516 2517 2518
		.procname	= "gc_min_interval",
		.data		= &ip_rt_gc_min_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2519
		.proc_handler	= proc_dointvec_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2520 2521 2522 2523 2524 2525
	},
	{
		.procname	= "gc_min_interval_ms",
		.data		= &ip_rt_gc_min_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2526
		.proc_handler	= proc_dointvec_ms_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2527 2528 2529 2530 2531 2532
	},
	{
		.procname	= "gc_timeout",
		.data		= &ip_rt_gc_timeout,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2533
		.proc_handler	= proc_dointvec_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2534
	},
2535 2536 2537 2538 2539 2540 2541
	{
		.procname	= "gc_interval",
		.data		= &ip_rt_gc_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
Linus Torvalds's avatar
Linus Torvalds committed
2542 2543 2544 2545 2546
	{
		.procname	= "redirect_load",
		.data		= &ip_rt_redirect_load,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2547
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2548 2549 2550 2551 2552 2553
	},
	{
		.procname	= "redirect_number",
		.data		= &ip_rt_redirect_number,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2554
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2555 2556 2557 2558 2559 2560
	},
	{
		.procname	= "redirect_silence",
		.data		= &ip_rt_redirect_silence,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2561
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2562 2563 2564 2565 2566 2567
	},
	{
		.procname	= "error_cost",
		.data		= &ip_rt_error_cost,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2568
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2569 2570 2571 2572 2573 2574
	},
	{
		.procname	= "error_burst",
		.data		= &ip_rt_error_burst,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2575
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2576 2577 2578 2579 2580 2581
	},
	{
		.procname	= "gc_elasticity",
		.data		= &ip_rt_gc_elasticity,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2582
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2583 2584 2585 2586 2587 2588
	},
	{
		.procname	= "mtu_expires",
		.data		= &ip_rt_mtu_expires,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2589
		.proc_handler	= proc_dointvec_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2590 2591 2592 2593 2594 2595
	},
	{
		.procname	= "min_pmtu",
		.data		= &ip_rt_min_pmtu,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2596
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2597 2598 2599 2600 2601 2602
	},
	{
		.procname	= "min_adv_mss",
		.data		= &ip_rt_min_advmss,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2603
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2604
	},
2605
	{ }
Linus Torvalds's avatar
Linus Torvalds committed
2606
};
2607 2608 2609 2610 2611 2612

static struct ctl_table ipv4_route_flush_table[] = {
	{
		.procname	= "flush",
		.maxlen		= sizeof(int),
		.mode		= 0200,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2613
		.proc_handler	= ipv4_sysctl_rtcache_flush,
2614
	},
2615
	{ },
2616 2617 2618 2619 2620 2621 2622
};

static __net_init int sysctl_route_net_init(struct net *net)
{
	struct ctl_table *tbl;

	tbl = ipv4_route_flush_table;
2623
	if (!net_eq(net, &init_net)) {
2624 2625 2626
		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
		if (tbl == NULL)
			goto err_dup;
2627 2628 2629 2630

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			tbl[0].procname = NULL;
2631 2632 2633
	}
	tbl[0].extra1 = net;

2634
	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
	if (net->ipv4.route_hdr == NULL)
		goto err_reg;
	return 0;

err_reg:
	if (tbl != ipv4_route_flush_table)
		kfree(tbl);
err_dup:
	return -ENOMEM;
}

static __net_exit void sysctl_route_net_exit(struct net *net)
{
	struct ctl_table *tbl;

	tbl = net->ipv4.route_hdr->ctl_table_arg;
	unregister_net_sysctl_table(net->ipv4.route_hdr);
	BUG_ON(tbl == ipv4_route_flush_table);
	kfree(tbl);
}

static __net_initdata struct pernet_operations sysctl_route_ops = {
	.init = sysctl_route_net_init,
	.exit = sysctl_route_net_exit,
};
Linus Torvalds's avatar
Linus Torvalds committed
2660 2661
#endif

2662
static __net_init int rt_genid_init(struct net *net)
2663
{
fan.du's avatar
fan.du committed
2664
	atomic_set(&net->ipv4.rt_genid, 0);
2665
	atomic_set(&net->fnhe_genid, 0);
2666 2667
	get_random_bytes(&net->ipv4.dev_addr_genid,
			 sizeof(net->ipv4.dev_addr_genid));
2668 2669 2670
	return 0;
}

2671 2672
static __net_initdata struct pernet_operations rt_genid_ops = {
	.init = rt_genid_init,
2673 2674
};

2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690
static int __net_init ipv4_inetpeer_init(struct net *net)
{
	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);

	if (!bp)
		return -ENOMEM;
	inet_peer_base_init(bp);
	net->ipv4.peers = bp;
	return 0;
}

static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
	struct inet_peer_base *bp = net->ipv4.peers;

	net->ipv4.peers = NULL;
2691
	inetpeer_invalidate_tree(bp);
2692 2693 2694 2695 2696 2697 2698
	kfree(bp);
}

static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
	.init	=	ipv4_inetpeer_init,
	.exit	=	ipv4_inetpeer_exit,
};
2699

2700
#ifdef CONFIG_IP_ROUTE_CLASSID
2701
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2702
#endif /* CONFIG_IP_ROUTE_CLASSID */
Linus Torvalds's avatar
Linus Torvalds committed
2703 2704 2705

int __init ip_rt_init(void)
{
2706
	int rc = 0;
Linus Torvalds's avatar
Linus Torvalds committed
2707

2708
#ifdef CONFIG_IP_ROUTE_CLASSID
2709
	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
Linus Torvalds's avatar
Linus Torvalds committed
2710 2711 2712 2713
	if (!ip_rt_acct)
		panic("IP: failed to allocate ip_rt_acct\n");
#endif

Alexey Dobriyan's avatar
Alexey Dobriyan committed
2714 2715
	ipv4_dst_ops.kmem_cachep =
		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2716
				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
2717

2718 2719
	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;

2720 2721 2722 2723 2724 2725
	if (dst_entries_init(&ipv4_dst_ops) < 0)
		panic("IP: failed to allocate ipv4_dst_ops counter\n");

	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");

David S. Miller's avatar
David S. Miller committed
2726 2727
	ipv4_dst_ops.gc_thresh = ~0;
	ip_rt_max_size = INT_MAX;
Linus Torvalds's avatar
Linus Torvalds committed
2728 2729 2730 2731

	devinet_init();
	ip_fib_init();

2732
	if (ip_rt_proc_init())
2733
		pr_err("Unable to create route proc files\n");
Linus Torvalds's avatar
Linus Torvalds committed
2734 2735
#ifdef CONFIG_XFRM
	xfrm_init();
2736
	xfrm4_init();
Linus Torvalds's avatar
Linus Torvalds committed
2737
#endif
2738
	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2739

2740 2741 2742
#ifdef CONFIG_SYSCTL
	register_pernet_subsys(&sysctl_route_ops);
#endif
2743
	register_pernet_subsys(&rt_genid_ops);
2744
	register_pernet_subsys(&ipv4_inetpeer_ops);
Linus Torvalds's avatar
Linus Torvalds committed
2745 2746 2747
	return rc;
}

2748
#ifdef CONFIG_SYSCTL
2749 2750 2751 2752 2753 2754
/*
 * We really need to sanitize the damn ipv4 init order, then all
 * this nonsense will go away.
 */
void __init ip_static_sysctl_init(void)
{
2755
	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
2756
}
2757
#endif