route.c 66.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		ROUTE - implementation of the IP router.
 *
8
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12 13 14 15 16 17 18 19 20
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 * Fixes:
 *		Alan Cox	:	Verify area fixes.
 *		Alan Cox	:	cli() protects routing changes
 *		Rui Oliveira	:	ICMP routing table updates
 *		(rco@di.uminho.pt)	Routing table insertion and update
 *		Linus Torvalds	:	Rewrote bits to be sensible
 *		Alan Cox	:	Added BSD route gw semantics
21
 *		Alan Cox	:	Super /proc >4K
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *		Alan Cox	:	MTU in route table
 *		Alan Cox	: 	MSS actually. Also added the window
 *					clamper.
 *		Sam Lantinga	:	Fixed route matching in rt_del()
 *		Alan Cox	:	Routing cache support.
 *		Alan Cox	:	Removed compatibility cruft.
 *		Alan Cox	:	RTF_REJECT support.
 *		Alan Cox	:	TCP irtt support.
 *		Jonathan Naylor	:	Added Metric support.
 *	Miquel van Smoorenburg	:	BSD API fixes.
 *	Miquel van Smoorenburg	:	Metrics.
 *		Alan Cox	:	Use __u32 properly
 *		Alan Cox	:	Aligned routing errors more closely with BSD
 *					our system is still very different.
 *		Alan Cox	:	Faster /proc handling
 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
 *					routing caches and better behaviour.
39
 *
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 *		Olaf Erb	:	irtt wasn't being copied right.
 *		Bjorn Ekwall	:	Kerneld route support.
 *		Alan Cox	:	Multicast fixed (I hope)
 * 		Pavel Krauz	:	Limited broadcast fixed
 *		Mike McLagan	:	Routing by source
 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
 *					route.c and rewritten from scratch.
 *		Andi Kleen	:	Load-limit warning messages.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
 *		Marc Boucher	:	routing by fwmark
 *	Robert Olsson		:	Added rt_cache statistics
 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
55
 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
56 57
 * 	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
 * 	Ilia Sotnikov		:	Removed TOS from hash calculations
Linus Torvalds's avatar
Linus Torvalds committed
58 59 60 61 62 63 64
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

65 66
#define pr_fmt(fmt) "IPv4: " fmt

Linus Torvalds's avatar
Linus Torvalds committed
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#include <linux/module.h>
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
91
#include <linux/slab.h>
92
#include <net/dst.h>
93
#include <net/net_namespace.h>
Linus Torvalds's avatar
Linus Torvalds committed
94 95 96 97 98 99 100 101 102 103
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
104
#include <net/netevent.h>
105
#include <net/rtnetlink.h>
Linus Torvalds's avatar
Linus Torvalds committed
106 107
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
108
#include <linux/kmemleak.h>
Linus Torvalds's avatar
Linus Torvalds committed
109
#endif
110
#include <net/secure_seq.h>
Linus Torvalds's avatar
Linus Torvalds committed
111

112
#define RT_FL_TOS(oldflp4) \
113
	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
Linus Torvalds's avatar
Linus Torvalds committed
114

115 116
/* IPv4 datagram length is stored into 16bit field (tot_len) */
#define IP_MAX_MTU	0xFFFF
Linus Torvalds's avatar
Linus Torvalds committed
117 118 119 120

#define RT_GC_TIMEOUT (300*HZ)

static int ip_rt_max_size;
121 122 123 124 125 126 127 128
static int ip_rt_redirect_number __read_mostly	= 9;
static int ip_rt_redirect_load __read_mostly	= HZ / 50;
static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly	= HZ;
static int ip_rt_error_burst __read_mostly	= 5 * HZ;
static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly	= 256;
129

Linus Torvalds's avatar
Linus Torvalds committed
130 131 132 133 134
/*
 *	Interface to generic destination cache.
 */

static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
135
static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
136
static unsigned int	 ipv4_mtu(const struct dst_entry *dst);
Linus Torvalds's avatar
Linus Torvalds committed
137 138
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void		 ipv4_link_failure(struct sk_buff *skb);
139 140 141 142
static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
143
static void		ipv4_dst_destroy(struct dst_entry *dst);
Linus Torvalds's avatar
Linus Torvalds committed
144

Eric Dumazet's avatar
Eric Dumazet committed
145 146 147 148
static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
			    int how)
{
}
Linus Torvalds's avatar
Linus Torvalds committed
149

150 151
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
152 153
	WARN_ON(1);
	return NULL;
154 155
}

156 157 158
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
					   struct sk_buff *skb,
					   const void *daddr);
159

Linus Torvalds's avatar
Linus Torvalds committed
160 161
static struct dst_ops ipv4_dst_ops = {
	.family =		AF_INET,
162
	.protocol =		cpu_to_be16(ETH_P_IP),
Linus Torvalds's avatar
Linus Torvalds committed
163
	.check =		ipv4_dst_check,
164
	.default_advmss =	ipv4_default_advmss,
165
	.mtu =			ipv4_mtu,
166
	.cow_metrics =		ipv4_cow_metrics,
167
	.destroy =		ipv4_dst_destroy,
Linus Torvalds's avatar
Linus Torvalds committed
168 169 170 171
	.ifdown =		ipv4_dst_ifdown,
	.negative_advice =	ipv4_negative_advice,
	.link_failure =		ipv4_link_failure,
	.update_pmtu =		ip_rt_update_pmtu,
172
	.redirect =		ip_do_redirect,
173
	.local_out =		__ip_local_out,
174
	.neigh_lookup =		ipv4_neigh_lookup,
Linus Torvalds's avatar
Linus Torvalds committed
175 176 177 178
};

#define ECN_OR_COST(class)	TC_PRIO_##class

179
const __u8 ip_tos2prio[16] = {
Linus Torvalds's avatar
Linus Torvalds committed
180
	TC_PRIO_BESTEFFORT,
Dan Siemon's avatar
Dan Siemon committed
181
	ECN_OR_COST(BESTEFFORT),
Linus Torvalds's avatar
Linus Torvalds committed
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	TC_PRIO_BESTEFFORT,
	ECN_OR_COST(BESTEFFORT),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK)
};
197
EXPORT_SYMBOL(ip_tos2prio);
Linus Torvalds's avatar
Linus Torvalds committed
198

199
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200
#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
Linus Torvalds's avatar
Linus Torvalds committed
201 202 203 204

#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
205
	if (*pos)
David S. Miller's avatar
David S. Miller committed
206
		return NULL;
207
	return SEQ_START_TOKEN;
Linus Torvalds's avatar
Linus Torvalds committed
208 209 210 211 212
}

static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
David S. Miller's avatar
David S. Miller committed
213
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
214 215 216 217 218 219 220 221 222 223 224 225 226
}

static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
}

static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN)
		seq_printf(seq, "%-127s\n",
			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
			   "HHUptod\tSpecDst");
227
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
228 229
}

230
static const struct seq_operations rt_cache_seq_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
231 232 233 234 235 236 237 238
	.start  = rt_cache_seq_start,
	.next   = rt_cache_seq_next,
	.stop   = rt_cache_seq_stop,
	.show   = rt_cache_seq_show,
};

static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
David S. Miller's avatar
David S. Miller committed
239
	return seq_open(file, &rt_cache_seq_ops);
Linus Torvalds's avatar
Linus Torvalds committed
240 241
}

242
static const struct file_operations rt_cache_seq_fops = {
Linus Torvalds's avatar
Linus Torvalds committed
243 244 245 246
	.owner	 = THIS_MODULE,
	.open	 = rt_cache_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
David S. Miller's avatar
David S. Miller committed
247
	.release = seq_release,
Linus Torvalds's avatar
Linus Torvalds committed
248 249 250 251 252 253 254 255 256 257
};


static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
	int cpu;

	if (*pos == 0)
		return SEQ_START_TOKEN;

258
	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds's avatar
Linus Torvalds committed
259 260 261
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
262
		return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
263 264 265 266 267 268 269 270
	}
	return NULL;
}

static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	int cpu;

271
	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds's avatar
Linus Torvalds committed
272 273 274
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
275
		return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
276 277
	}
	return NULL;
278

Linus Torvalds's avatar
Linus Torvalds committed
279 280 281 282 283 284 285 286 287 288 289 290
}

static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{

}

static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
	struct rt_cache_stat *st = v;

	if (v == SEQ_START_TOKEN) {
291
		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
Linus Torvalds's avatar
Linus Torvalds committed
292 293
		return 0;
	}
294

Linus Torvalds's avatar
Linus Torvalds committed
295 296
	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
297
		   dst_entries_get_slow(&ipv4_dst_ops),
Eric Dumazet's avatar
Eric Dumazet committed
298
		   0, /* st->in_hit */
Linus Torvalds's avatar
Linus Torvalds committed
299 300 301 302 303 304 305
		   st->in_slow_tot,
		   st->in_slow_mc,
		   st->in_no_route,
		   st->in_brd,
		   st->in_martian_dst,
		   st->in_martian_src,

Eric Dumazet's avatar
Eric Dumazet committed
306
		   0, /* st->out_hit */
Linus Torvalds's avatar
Linus Torvalds committed
307
		   st->out_slow_tot,
308
		   st->out_slow_mc,
Linus Torvalds's avatar
Linus Torvalds committed
309

Eric Dumazet's avatar
Eric Dumazet committed
310 311 312 313 314 315
		   0, /* st->gc_total */
		   0, /* st->gc_ignored */
		   0, /* st->gc_goal_miss */
		   0, /* st->gc_dst_overflow */
		   0, /* st->in_hlist_search */
		   0  /* st->out_hlist_search */
Linus Torvalds's avatar
Linus Torvalds committed
316 317 318 319
		);
	return 0;
}

320
static const struct seq_operations rt_cpu_seq_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
321 322 323 324 325 326 327 328 329 330 331 332
	.start  = rt_cpu_seq_start,
	.next   = rt_cpu_seq_next,
	.stop   = rt_cpu_seq_stop,
	.show   = rt_cpu_seq_show,
};


static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &rt_cpu_seq_ops);
}

333
static const struct file_operations rt_cpu_seq_fops = {
Linus Torvalds's avatar
Linus Torvalds committed
334 335 336 337 338 339 340
	.owner	 = THIS_MODULE,
	.open	 = rt_cpu_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
	.release = seq_release,
};

341
#ifdef CONFIG_IP_ROUTE_CLASSID
342
static int rt_acct_proc_show(struct seq_file *m, void *v)
343
{
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	struct ip_rt_acct *dst, *src;
	unsigned int i, j;

	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
	if (!dst)
		return -ENOMEM;

	for_each_possible_cpu(i) {
		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
		for (j = 0; j < 256; j++) {
			dst[j].o_bytes   += src[j].o_bytes;
			dst[j].o_packets += src[j].o_packets;
			dst[j].i_bytes   += src[j].i_bytes;
			dst[j].i_packets += src[j].i_packets;
		}
359 360
	}

361 362 363 364
	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
	kfree(dst);
	return 0;
}
365

366 367 368
static int rt_acct_proc_open(struct inode *inode, struct file *file)
{
	return single_open(file, rt_acct_proc_show, NULL);
369
}
370 371 372 373 374 375 376 377

static const struct file_operations rt_acct_proc_fops = {
	.owner		= THIS_MODULE,
	.open		= rt_acct_proc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
378
#endif
379

380
static int __net_init ip_rt_do_proc_init(struct net *net)
381 382 383
{
	struct proc_dir_entry *pde;

384 385
	pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
			  &rt_cache_seq_fops);
386 387 388
	if (!pde)
		goto err1;

389 390
	pde = proc_create("rt_cache", S_IRUGO,
			  net->proc_net_stat, &rt_cpu_seq_fops);
391 392 393
	if (!pde)
		goto err2;

394
#ifdef CONFIG_IP_ROUTE_CLASSID
395
	pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
396 397 398 399 400
	if (!pde)
		goto err3;
#endif
	return 0;

401
#ifdef CONFIG_IP_ROUTE_CLASSID
402 403 404 405 406 407 408 409
err3:
	remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
	remove_proc_entry("rt_cache", net->proc_net);
err1:
	return -ENOMEM;
}
410 411 412 413 414

static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
	remove_proc_entry("rt_cache", net->proc_net_stat);
	remove_proc_entry("rt_cache", net->proc_net);
415
#ifdef CONFIG_IP_ROUTE_CLASSID
416
	remove_proc_entry("rt_acct", net->proc_net);
417
#endif
418 419 420 421 422 423 424 425 426 427 428 429
}

static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
	.init = ip_rt_do_proc_init,
	.exit = ip_rt_do_proc_exit,
};

static int __init ip_rt_proc_init(void)
{
	return register_pernet_subsys(&ip_rt_proc_ops);
}

430
#else
431
static inline int ip_rt_proc_init(void)
432 433 434
{
	return 0;
}
Linus Torvalds's avatar
Linus Torvalds committed
435
#endif /* CONFIG_PROC_FS */
436

437
static inline bool rt_is_expired(const struct rtable *rth)
438
{
fan.du's avatar
fan.du committed
439
	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
440 441
}

442
void rt_cache_flush(struct net *net)
Linus Torvalds's avatar
Linus Torvalds committed
443
{
fan.du's avatar
fan.du committed
444
	rt_genid_bump_ipv4(net);
Eric Dumazet's avatar
Eric Dumazet committed
445 446
}

447 448 449
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
					   struct sk_buff *skb,
					   const void *daddr)
David Miller's avatar
David Miller committed
450
{
451 452
	struct net_device *dev = dst->dev;
	const __be32 *pkey = daddr;
453
	const struct rtable *rt;
David Miller's avatar
David Miller committed
454 455
	struct neighbour *n;

456
	rt = (const struct rtable *) dst;
457
	if (rt->rt_gateway)
458
		pkey = (const __be32 *) &rt->rt_gateway;
459 460
	else if (skb)
		pkey = &ip_hdr(skb)->daddr;
461

462
	n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
463 464
	if (n)
		return n;
465
	return neigh_create(&arp_tbl, pkey, dev);
466 467
}

Linus Torvalds's avatar
Linus Torvalds committed
468 469 470 471 472 473 474 475 476 477 478 479 480 481
/*
 * Peer allocation may fail only in serious out-of-memory conditions.  However
 * we still can generate some output.
 * Random ID selection looks a bit dangerous because we have no chances to
 * select ID being unique in a reasonable period of time.
 * But broken packet identifier may be better than no packet at all.
 */
static void ip_select_fb_ident(struct iphdr *iph)
{
	static DEFINE_SPINLOCK(ip_fb_id_lock);
	static u32 ip_fallback_id;
	u32 salt;

	spin_lock_bh(&ip_fb_id_lock);
482
	salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
Linus Torvalds's avatar
Linus Torvalds committed
483 484 485 486 487 488 489
	iph->id = htons(salt & 0xFFFF);
	ip_fallback_id = salt;
	spin_unlock_bh(&ip_fb_id_lock);
}

void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
490 491
	struct net *net = dev_net(dst->dev);
	struct inet_peer *peer;
Linus Torvalds's avatar
Linus Torvalds committed
492

493 494 495 496 497 498
	peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
	if (peer) {
		iph->id = htons(inet_getid(peer, more));
		inet_putpeer(peer);
		return;
	}
Linus Torvalds's avatar
Linus Torvalds committed
499 500 501

	ip_select_fb_ident(iph);
}
502
EXPORT_SYMBOL(__ip_select_ident);
Linus Torvalds's avatar
Linus Torvalds committed
503

Eric Dumazet's avatar
Eric Dumazet committed
504
static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
			     const struct iphdr *iph,
			     int oif, u8 tos,
			     u8 prot, u32 mark, int flow_flags)
{
	if (sk) {
		const struct inet_sock *inet = inet_sk(sk);

		oif = sk->sk_bound_dev_if;
		mark = sk->sk_mark;
		tos = RT_CONN_FLAGS(sk);
		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
	}
	flowi4_init_output(fl4, oif, mark, tos,
			   RT_SCOPE_UNIVERSE, prot,
			   flow_flags,
			   iph->daddr, iph->saddr, 0, 0);
}

Eric Dumazet's avatar
Eric Dumazet committed
523 524
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
			       const struct sock *sk)
525 526 527 528 529 530 531 532 533 534
{
	const struct iphdr *iph = ip_hdr(skb);
	int oif = skb->dev->ifindex;
	u8 tos = RT_TOS(iph->tos);
	u8 prot = iph->protocol;
	u32 mark = skb->mark;

	__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
}

Eric Dumazet's avatar
Eric Dumazet committed
535
static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
536 537
{
	const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet's avatar
Eric Dumazet committed
538
	const struct ip_options_rcu *inet_opt;
539 540 541 542 543 544 545 546 547 548 549 550 551 552
	__be32 daddr = inet->inet_daddr;

	rcu_read_lock();
	inet_opt = rcu_dereference(inet->inet_opt);
	if (inet_opt && inet_opt->opt.srr)
		daddr = inet_opt->opt.faddr;
	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
			   inet_sk_flowi_flags(sk),
			   daddr, inet->inet_saddr, 0, 0);
	rcu_read_unlock();
}

Eric Dumazet's avatar
Eric Dumazet committed
553 554
static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
				 const struct sk_buff *skb)
555 556 557 558 559 560 561
{
	if (skb)
		build_skb_flow_key(fl4, skb, sk);
	else
		build_sk_flow_key(fl4, sk);
}

562 563 564 565 566 567
static inline void rt_free(struct rtable *rt)
{
	call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}

static DEFINE_SPINLOCK(fnhe_lock);
568

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
{
	struct rtable *rt;

	rt = rcu_dereference(fnhe->fnhe_rth_input);
	if (rt) {
		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
		rt_free(rt);
	}
	rt = rcu_dereference(fnhe->fnhe_rth_output);
	if (rt) {
		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
		rt_free(rt);
	}
}

585
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
586 587 588 589 590 591 592 593 594
{
	struct fib_nh_exception *fnhe, *oldest;

	oldest = rcu_dereference(hash->chain);
	for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
		if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
			oldest = fnhe;
	}
595
	fnhe_flush_routes(oldest);
596 597 598
	return oldest;
}

599 600 601 602 603 604 605 606 607 608
static inline u32 fnhe_hashfun(__be32 daddr)
{
	u32 hval;

	hval = (__force u32) daddr;
	hval ^= (hval >> 11) ^ (hval >> 22);

	return hval & (FNHE_HASH_SIZE - 1);
}

609 610 611 612 613 614 615 616 617 618 619 620
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
	rt->rt_pmtu = fnhe->fnhe_pmtu;
	rt->dst.expires = fnhe->fnhe_expires;

	if (fnhe->fnhe_gw) {
		rt->rt_flags |= RTCF_REDIRECTED;
		rt->rt_gateway = fnhe->fnhe_gw;
		rt->rt_uses_gateway = 1;
	}
}

621 622
static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
				  u32 pmtu, unsigned long expires)
623
{
624
	struct fnhe_hash_bucket *hash;
625
	struct fib_nh_exception *fnhe;
626 627
	struct rtable *rt;
	unsigned int i;
628
	int depth;
629 630
	u32 hval = fnhe_hashfun(daddr);

631
	spin_lock_bh(&fnhe_lock);
632

633
	hash = nh->nh_exceptions;
634
	if (!hash) {
635
		hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
636
		if (!hash)
637 638
			goto out_unlock;
		nh->nh_exceptions = hash;
639 640 641 642 643 644 645 646
	}

	hash += hval;

	depth = 0;
	for (fnhe = rcu_dereference(hash->chain); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
		if (fnhe->fnhe_daddr == daddr)
647
			break;
648 649 650
		depth++;
	}

651 652 653 654 655
	if (fnhe) {
		if (gw)
			fnhe->fnhe_gw = gw;
		if (pmtu) {
			fnhe->fnhe_pmtu = pmtu;
656
			fnhe->fnhe_expires = max(1UL, expires);
657
		}
658
		/* Update all cached dsts too */
659 660 661 662
		rt = rcu_dereference(fnhe->fnhe_rth_input);
		if (rt)
			fill_route_from_fnhe(rt, fnhe);
		rt = rcu_dereference(fnhe->fnhe_rth_output);
663 664
		if (rt)
			fill_route_from_fnhe(rt, fnhe);
665 666 667 668 669 670 671 672 673 674 675
	} else {
		if (depth > FNHE_RECLAIM_DEPTH)
			fnhe = fnhe_oldest(hash);
		else {
			fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
			if (!fnhe)
				goto out_unlock;

			fnhe->fnhe_next = hash->chain;
			rcu_assign_pointer(hash->chain, fnhe);
		}
676
		fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
677 678 679 680
		fnhe->fnhe_daddr = daddr;
		fnhe->fnhe_gw = gw;
		fnhe->fnhe_pmtu = pmtu;
		fnhe->fnhe_expires = expires;
681 682 683 684 685

		/* Exception created; mark the cached routes for the nexthop
		 * stale, so anyone caching it rechecks if this exception
		 * applies to them.
		 */
686 687 688 689
		rt = rcu_dereference(nh->nh_rth_input);
		if (rt)
			rt->dst.obsolete = DST_OBSOLETE_KILL;

690 691 692 693 694 695 696
		for_each_possible_cpu(i) {
			struct rtable __rcu **prt;
			prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
			rt = rcu_dereference(*prt);
			if (rt)
				rt->dst.obsolete = DST_OBSOLETE_KILL;
		}
697 698 699
	}

	fnhe->fnhe_stamp = jiffies;
700 701

out_unlock:
702
	spin_unlock_bh(&fnhe_lock);
703
	return;
704 705
}

706 707
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
			     bool kill_route)
Linus Torvalds's avatar
Linus Torvalds committed
708
{
709
	__be32 new_gw = icmp_hdr(skb)->un.gateway;
710
	__be32 old_gw = ip_hdr(skb)->saddr;
711 712
	struct net_device *dev = skb->dev;
	struct in_device *in_dev;
713
	struct fib_result res;
714
	struct neighbour *n;
715
	struct net *net;
Linus Torvalds's avatar
Linus Torvalds committed
716

717 718 719 720 721 722 723 724 725 726 727
	switch (icmp_hdr(skb)->code & 7) {
	case ICMP_REDIR_NET:
	case ICMP_REDIR_NETTOS:
	case ICMP_REDIR_HOST:
	case ICMP_REDIR_HOSTTOS:
		break;

	default:
		return;
	}

728 729 730 731 732 733 734
	if (rt->rt_gateway != old_gw)
		return;

	in_dev = __in_dev_get_rcu(dev);
	if (!in_dev)
		return;

735
	net = dev_net(dev);
736 737 738
	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
	    ipv4_is_zeronet(new_gw))
Linus Torvalds's avatar
Linus Torvalds committed
739 740 741 742 743 744 745 746
		goto reject_redirect;

	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
			goto reject_redirect;
		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
			goto reject_redirect;
	} else {
747
		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
Linus Torvalds's avatar
Linus Torvalds committed
748 749 750
			goto reject_redirect;
	}

751
	n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
752 753 754 755
	if (n) {
		if (!(n->nud_state & NUD_VALID)) {
			neigh_event_send(n, NULL);
		} else {
756 757 758
			if (fib_lookup(net, fl4, &res) == 0) {
				struct fib_nh *nh = &FIB_RES_NH(res);

759 760
				update_or_create_fnhe(nh, fl4->daddr, new_gw,
						      0, 0);
761
			}
762 763
			if (kill_route)
				rt->dst.obsolete = DST_OBSOLETE_KILL;
764 765 766 767 768 769 770 771
			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
		}
		neigh_release(n);
	}
	return;

reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
772 773 774 775 776
	if (IN_DEV_LOG_MARTIANS(in_dev)) {
		const struct iphdr *iph = (const struct iphdr *) skb->data;
		__be32 daddr = iph->daddr;
		__be32 saddr = iph->saddr;

777 778 779 780
		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
				     "  Advised path = %pI4 -> %pI4\n",
				     &old_gw, dev->name, &new_gw,
				     &saddr, &daddr);
781
	}
782 783 784 785
#endif
	;
}

786 787 788 789
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
	struct rtable *rt;
	struct flowi4 fl4;
790 791 792 793 794
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	int oif = skb->dev->ifindex;
	u8 tos = RT_TOS(iph->tos);
	u8 prot = iph->protocol;
	u32 mark = skb->mark;
795 796 797

	rt = (struct rtable *) dst;

798
	__build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
799
	__ip_do_redirect(rt, skb, &fl4, true);
800 801
}

Linus Torvalds's avatar
Linus Torvalds committed
802 803
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
804
	struct rtable *rt = (struct rtable *)dst;
Linus Torvalds's avatar
Linus Torvalds committed
805 806 807
	struct dst_entry *ret = dst;

	if (rt) {
808
		if (dst->obsolete > 0) {
Linus Torvalds's avatar
Linus Torvalds committed
809 810
			ip_rt_put(rt);
			ret = NULL;
811 812
		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
			   rt->dst.expires) {
David S. Miller's avatar
David S. Miller committed
813
			ip_rt_put(rt);
Linus Torvalds's avatar
Linus Torvalds committed
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
			ret = NULL;
		}
	}
	return ret;
}

/*
 * Algorithm:
 *	1. The first ip_rt_redirect_number redirects are sent
 *	   with exponential backoff, then we stop sending them at all,
 *	   assuming that the host ignores our redirects.
 *	2. If we did not see packets requiring redirects
 *	   during ip_rt_redirect_silence, we assume that the host
 *	   forgot redirected route and start to send redirects again.
 *
 * This algorithm is much cheaper and more intelligent than dumb load limiting
 * in icmp.c.
 *
 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
 * and "frag. need" (breaks PMTU discovery) in icmp.c.
 */

void ip_rt_send_redirect(struct sk_buff *skb)
{
Eric Dumazet's avatar
Eric Dumazet committed
838
	struct rtable *rt = skb_rtable(skb);
839
	struct in_device *in_dev;
840
	struct inet_peer *peer;
841
	struct net *net;
842
	int log_martians;
Linus Torvalds's avatar
Linus Torvalds committed
843

844
	rcu_read_lock();
845
	in_dev = __in_dev_get_rcu(rt->dst.dev);
846 847
	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
		rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
848
		return;
849 850 851
	}
	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
852

853 854
	net = dev_net(rt->dst.dev);
	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
855
	if (!peer) {
856 857
		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
			  rt_nexthop(rt, ip_hdr(skb)->daddr));
858 859 860
		return;
	}

Linus Torvalds's avatar
Linus Torvalds committed
861 862 863
	/* No redirected packets during ip_rt_redirect_silence;
	 * reset the algorithm.
	 */
864 865
	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
		peer->rate_tokens = 0;
Linus Torvalds's avatar
Linus Torvalds committed
866 867

	/* Too many ignored redirects; do not send anything
868
	 * set dst.rate_last to the last seen redirected packet.
Linus Torvalds's avatar
Linus Torvalds committed
869
	 */
870 871
	if (peer->rate_tokens >= ip_rt_redirect_number) {
		peer->rate_last = jiffies;
872
		goto out_put_peer;
Linus Torvalds's avatar
Linus Torvalds committed
873 874 875 876 877
	}

	/* Check for load limit; set rate_last to the latest sent
	 * redirect.
	 */
878
	if (peer->rate_tokens == 0 ||
879
	    time_after(jiffies,
880 881
		       (peer->rate_last +
			(ip_rt_redirect_load << peer->rate_tokens)))) {
882 883 884
		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);

		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
885 886
		peer->rate_last = jiffies;
		++peer->rate_tokens;
Linus Torvalds's avatar
Linus Torvalds committed
887
#ifdef CONFIG_IP_ROUTE_VERBOSE
888
		if (log_martians &&
889 890
		    peer->rate_tokens == ip_rt_redirect_number)
			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
891
					     &ip_hdr(skb)->saddr, inet_iif(skb),
892
					     &ip_hdr(skb)->daddr, &gw);
Linus Torvalds's avatar
Linus Torvalds committed
893 894
#endif
	}
895 896
out_put_peer:
	inet_putpeer(peer);
Linus Torvalds's avatar
Linus Torvalds committed
897 898 899 900
}

static int ip_error(struct sk_buff *skb)
{
901
	struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
Eric Dumazet's avatar
Eric Dumazet committed
902
	struct rtable *rt = skb_rtable(skb);
903
	struct inet_peer *peer;
Linus Torvalds's avatar
Linus Torvalds committed
904
	unsigned long now;
905
	struct net *net;
906
	bool send;
Linus Torvalds's avatar
Linus Torvalds committed
907 908
	int code;

909 910 911 912 913 914 915 916 917 918 919 920 921 922
	net = dev_net(rt->dst.dev);
	if (!IN_DEV_FORWARD(in_dev)) {
		switch (rt->dst.error) {
		case EHOSTUNREACH:
			IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
			break;

		case ENETUNREACH:
			IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
			break;
		}
		goto out;
	}

923
	switch (rt->dst.error) {
Joe Perches's avatar
Joe Perches committed
924 925 926 927 928 929 930 931
	case EINVAL:
	default:
		goto out;
	case EHOSTUNREACH:
		code = ICMP_HOST_UNREACH;
		break;
	case ENETUNREACH:
		code = ICMP_NET_UNREACH;
932
		IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
Joe Perches's avatar
Joe Perches committed
933 934 935 936
		break;
	case EACCES:
		code = ICMP_PKT_FILTERED;
		break;
Linus Torvalds's avatar
Linus Torvalds committed
937 938
	}

939
	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
940 941 942 943 944 945 946 947 948 949 950 951

	send = true;
	if (peer) {
		now = jiffies;
		peer->rate_tokens += now - peer->rate_last;
		if (peer->rate_tokens > ip_rt_error_burst)
			peer->rate_tokens = ip_rt_error_burst;
		peer->rate_last = now;
		if (peer->rate_tokens >= ip_rt_error_cost)
			peer->rate_tokens -= ip_rt_error_cost;
		else
			send = false;
952
		inet_putpeer(peer);
Linus Torvalds's avatar
Linus Torvalds committed
953
	}
954 955
	if (send)
		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
Linus Torvalds's avatar
Linus Torvalds committed
956 957 958

out:	kfree_skb(skb);
	return 0;
959
}
Linus Torvalds's avatar
Linus Torvalds committed
960

961
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
Linus Torvalds's avatar
Linus Torvalds committed
962
{
963
	struct dst_entry *dst = &rt->dst;
964
	struct fib_result res;
965

966 967 968
	if (dst_metric_locked(dst, RTAX_MTU))
		return;

969 970 971
	if (dst->dev->mtu < mtu)
		return;

972 973
	if (mtu < ip_rt_min_pmtu)
		mtu = ip_rt_min_pmtu;
974

975 976 977 978
	if (rt->rt_pmtu == mtu &&
	    time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
		return;

979
	rcu_read_lock();
980
	if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
981 982
		struct fib_nh *nh = &FIB_RES_NH(res);

983 984
		update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
				      jiffies + ip_rt_mtu_expires);
985
	}
986
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
987 988
}

989 990 991 992 993 994 995
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
			      struct sk_buff *skb, u32 mtu)
{
	struct rtable *rt = (struct rtable *) dst;
	struct flowi4 fl4;

	ip_rt_build_flow_key(&fl4, sk, skb);
996
	__ip_rt_update_pmtu(rt, &fl4, mtu);
997 998
}

999 1000 1001
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
		      int oif, u32 mark, u8 protocol, int flow_flags)
{
1002
	const struct iphdr *iph = (const struct iphdr *) skb->data;
1003 1004 1005
	struct flowi4 fl4;
	struct rtable *rt;

1006 1007
	__build_flow_key(&fl4, NULL, iph, oif,
			 RT_TOS(iph->tos), protocol, mark, flow_flags);
1008 1009
	rt = __ip_route_output_key(net, &fl4);
	if (!IS_ERR(rt)) {
1010
		__ip_rt_update_pmtu(rt, &fl4, mtu);
1011 1012 1013 1014 1015
		ip_rt_put(rt);
	}
}
EXPORT_SYMBOL_GPL(ipv4_update_pmtu);

1016
static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1017
{
1018 1019 1020
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1021

1022 1023 1024 1025 1026 1027
	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
	rt = __ip_route_output_key(sock_net(sk), &fl4);
	if (!IS_ERR(rt)) {
		__ip_rt_update_pmtu(rt, &fl4, mtu);
		ip_rt_put(rt);
	}
1028
}
1029 1030 1031 1032 1033 1034 1035

void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
	struct dst_entry *dst;
1036
	bool new = false;
1037 1038

	bh_lock_sock(sk);
1039 1040 1041 1042

	if (!ip_sk_accept_pmtu(sk))
		goto out;

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	rt = (struct rtable *) __sk_dst_get(sk);

	if (sock_owned_by_user(sk) || !rt) {
		__ipv4_sk_update_pmtu(skb, sk, mtu);
		goto out;
	}

	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);

	if (!__sk_dst_check(sk, 0)) {
		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
		if (IS_ERR(rt))
			goto out;
1056 1057

		new = true;
1058 1059 1060 1061 1062 1063
	}

	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);

	dst = dst_check(&rt->dst, 0);
	if (!dst) {
1064 1065 1066
		if (new)
			dst_release(&rt->dst);

1067 1068 1069 1070
		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
		if (IS_ERR(rt))
			goto out;

1071
		new = true;
1072 1073
	}

1074 1075
	if (new)
		__sk_dst_set(sk, &rt->dst);
1076 1077 1078 1079

out:
	bh_unlock_sock(sk);
}
1080
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1081

1082 1083 1084
void ipv4_redirect(struct sk_buff *skb, struct net *net,
		   int oif, u32 mark, u8 protocol, int flow_flags)
{
1085
	const struct iphdr *iph = (const struct iphdr *) skb->data;
1086 1087 1088
	struct flowi4 fl4;
	struct rtable *rt;

1089 1090
	__build_flow_key(&fl4, NULL, iph, oif,
			 RT_TOS(iph->tos), protocol, mark, flow_flags);
1091 1092
	rt = __ip_route_output_key(net, &fl4);
	if (!IS_ERR(rt)) {
1093
		__ip_do_redirect(rt, skb, &fl4, false);
1094 1095 1096 1097 1098 1099 1100
		ip_rt_put(rt);
	}
}
EXPORT_SYMBOL_GPL(ipv4_redirect);

void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
1101 1102 1103
	const struct iphdr *iph = (const struct iphdr *) skb->data;
	struct flowi4 fl4;
	struct rtable *rt;
1104

1105 1106 1107
	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
	rt = __ip_route_output_key(sock_net(sk), &fl4);
	if (!IS_ERR(rt)) {
1108
		__ip_do_redirect(rt, skb, &fl4, false);
1109 1110
		ip_rt_put(rt);
	}
1111 1112 1113
}
EXPORT_SYMBOL_GPL(ipv4_sk_redirect);

1114 1115 1116 1117
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
	struct rtable *rt = (struct rtable *) dst;

1118 1119 1120 1121
	/* All IPV4 dsts are created with ->obsolete set to the value
	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
	 * into this function always.
	 *
1122 1123 1124
	 * When a PMTU/redirect information update invalidates a route,
	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
	 * DST_OBSOLETE_DEAD by dst_free().
1125
	 */
1126
	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1127
		return NULL;
1128
	return dst;
Linus Torvalds's avatar
Linus Torvalds committed
1129 1130 1131 1132 1133 1134 1135 1136
}

static void ipv4_link_failure(struct sk_buff *skb)
{
	struct rtable *rt;

	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);

Eric Dumazet's avatar
Eric Dumazet committed
1137
	rt = skb_rtable(skb);
1138 1139
	if (rt)
		dst_set_expires(&rt->dst, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1140 1141 1142 1143
}

static int ip_rt_bug(struct sk_buff *skb)
{
1144 1145 1146
	pr_debug("%s: %pI4 -> %pI4, %s\n",
		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
		 skb->dev ? skb->dev->name : "?");
Linus Torvalds's avatar
Linus Torvalds committed
1147
	kfree_skb(skb);
1148
	WARN_ON(1);
Linus Torvalds's avatar
Linus Torvalds committed
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	return 0;
}

/*
   We do not cache source address of outgoing interface,
   because it is used only by IP RR, TS and SRR options,
   so that it out of fast path.

   BTW remember: "addr" is allowed to be not aligned
   in IP options!
 */

1161
void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
Linus Torvalds's avatar
Linus Torvalds committed
1162
{
1163
	__be32 src;
Linus Torvalds's avatar
Linus Torvalds committed
1164

1165
	if (rt_is_output_route(rt))
1166
		src = ip_hdr(skb)->saddr;
1167
	else {
1168 1169 1170 1171 1172 1173 1174 1175 1176
		struct fib_result res;
		struct flowi4 fl4;
		struct iphdr *iph;

		iph = ip_hdr(skb);

		memset(&fl4, 0, sizeof(fl4));
		fl4.daddr = iph->daddr;
		fl4.saddr = iph->saddr;
1177
		fl4.flowi4_tos = RT_TOS(iph->tos);
1178 1179 1180
		fl4.flowi4_oif = rt->dst.dev->ifindex;
		fl4.flowi4_iif = skb->dev->ifindex;
		fl4.flowi4_mark = skb->mark;
1181

1182
		rcu_read_lock();
1183
		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1184
			src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1185
		else
1186 1187 1188
			src = inet_select_addr(rt->dst.dev,
					       rt_nexthop(rt, iph->daddr),
					       RT_SCOPE_UNIVERSE);
1189 1190
		rcu_read_unlock();
	}
Linus Torvalds's avatar
Linus Torvalds committed
1191 1192 1193
	memcpy(addr, &src, 4);
}

1194
#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds's avatar
Linus Torvalds committed
1195 1196
static void set_class_tag(struct rtable *rt, u32 tag)
{
1197 1198 1199 1200
	if (!(rt->dst.tclassid & 0xFFFF))
		rt->dst.tclassid |= tag & 0xFFFF;
	if (!(rt->dst.tclassid & 0xFFFF0000))
		rt->dst.tclassid |= tag & 0xFFFF0000;
Linus Torvalds's avatar
Linus Torvalds committed
1201 1202 1203
}
#endif

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
{
	unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);

	if (advmss == 0) {
		advmss = max_t(unsigned int, dst->dev->mtu - 40,
			       ip_rt_min_advmss);
		if (advmss > 65535 - 40)
			advmss = 65535 - 40;
	}
	return advmss;
}

1217
static unsigned int ipv4_mtu(const struct dst_entry *dst)
1218
{
1219
	const struct rtable *rt = (const struct rtable *) dst;
1220 1221
	unsigned int mtu = rt->rt_pmtu;

1222
	if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1223
		mtu = dst_metric_raw(dst, RTAX_MTU);
1224

1225
	if (mtu)
1226 1227 1228
		return mtu;

	mtu = dst->dev->mtu;
1229 1230

	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1231
		if (rt->rt_uses_gateway && mtu > 576)
1232 1233 1234
			mtu = 576;
	}

1235
	return min_t(unsigned int, mtu, IP_MAX_MTU);
1236 1237
}

1238
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1239 1240 1241 1242 1243
{
	struct fnhe_hash_bucket *hash = nh->nh_exceptions;
	struct fib_nh_exception *fnhe;
	u32 hval;

1244 1245 1246
	if (!hash)
		return NULL;

1247
	hval = fnhe_hashfun(daddr);
1248 1249 1250

	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1251 1252 1253 1254 1255
		if (fnhe->fnhe_daddr == daddr)
			return fnhe;
	}
	return NULL;
}
1256

1257
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1258 1259
			      __be32 daddr)
{
1260 1261
	bool ret = false;

1262
	spin_lock_bh(&fnhe_lock);
1263

1264
	if (daddr == fnhe->fnhe_daddr) {
1265 1266
		struct rtable __rcu **porig;
		struct rtable *orig;
1267
		int genid = fnhe_genid(dev_net(rt->dst.dev));
1268 1269 1270 1271 1272 1273

		if (rt_is_input_route(rt))
			porig = &fnhe->fnhe_rth_input;
		else
			porig = &fnhe->fnhe_rth_output;
		orig = rcu_dereference(*porig);
1274 1275 1276

		if (fnhe->fnhe_genid != genid) {
			fnhe->fnhe_genid = genid;
1277 1278 1279
			fnhe->fnhe_gw = 0;
			fnhe->fnhe_pmtu = 0;
			fnhe->fnhe_expires = 0;
1280 1281
			fnhe_flush_routes(fnhe);
			orig = NULL;
1282
		}
1283 1284
		fill_route_from_fnhe(rt, fnhe);
		if (!rt->rt_gateway)
1285
			rt->rt_gateway = daddr;
1286

1287 1288 1289 1290 1291 1292
		if (!(rt->dst.flags & DST_NOCACHE)) {
			rcu_assign_pointer(*porig, rt);
			if (orig)
				rt_free(orig);
			ret = true;
		}
1293 1294 1295 1296

		fnhe->fnhe_stamp = jiffies;
	}
	spin_unlock_bh(&fnhe_lock);
1297 1298

	return ret;
1299 1300
}

1301
static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1302
{
1303
	struct rtable *orig, *prev, **p;
1304
	bool ret = true;
1305

1306
	if (rt_is_input_route(rt)) {
1307
		p = (struct rtable **)&nh->nh_rth_input;
1308 1309 1310
	} else {
		p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
	}
1311 1312 1313 1314 1315
	orig = *p;

	prev = cmpxchg(p, orig, rt);
	if (prev == orig) {
		if (orig)
1316
			rt_free(orig);
1317
	} else
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
		ret = false;

	return ret;
}

static DEFINE_SPINLOCK(rt_uncached_lock);
static LIST_HEAD(rt_uncached_list);

static void rt_add_uncached_list(struct rtable *rt)
{
	spin_lock_bh(&rt_uncached_lock);
	list_add_tail(&rt->rt_uncached, &rt_uncached_list);
	spin_unlock_bh(&rt_uncached_lock);
}

static void ipv4_dst_destroy(struct dst_entry *dst)
{
	struct rtable *rt = (struct rtable *) dst;

1337
	if (!list_empty(&rt->rt_uncached)) {
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
		spin_lock_bh(&rt_uncached_lock);
		list_del(&rt->rt_uncached);
		spin_unlock_bh(&rt_uncached_lock);
	}
}

void rt_flush_dev(struct net_device *dev)
{
	if (!list_empty(&rt_uncached_list)) {
		struct net *net = dev_net(dev);
		struct rtable *rt;

		spin_lock_bh(&rt_uncached_lock);
		list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
			if (rt->dst.dev != dev)
				continue;
			rt->dst.dev = net->loopback_dev;
			dev_hold(rt->dst.dev);
			dev_put(dev);
		}
		spin_unlock_bh(&rt_uncached_lock);
1359 1360 1361
	}
}

1362
static bool rt_cache_valid(const struct rtable *rt)
1363
{
1364 1365 1366
	return	rt &&
		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
		!rt_is_expired(rt);
1367 1368
}

1369
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1370
			   const struct fib_result *res,
1371
			   struct fib_nh_exception *fnhe,
1372
			   struct fib_info *fi, u16 type, u32 itag)
Linus Torvalds's avatar
Linus Torvalds committed
1373
{
1374 1375
	bool cached = false;

Linus Torvalds's avatar
Linus Torvalds committed
1376
	if (fi) {
1377 1378
		struct fib_nh *nh = &FIB_RES_NH(*res);

1379
		if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1380
			rt->rt_gateway = nh->nh_gw;
1381 1382
			rt->rt_uses_gateway = 1;
		}
David S. Miller's avatar
David S. Miller committed
1383
		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1384
#ifdef CONFIG_IP_ROUTE_CLASSID
1385
		rt->dst.tclassid = nh->nh_tclassid;
Linus Torvalds's avatar
Linus Torvalds committed
1386
#endif
1387
		if (unlikely(fnhe))
1388
			cached = rt_bind_exception(rt, fnhe, daddr);
1389
		else if (!(rt->dst.flags & DST_NOCACHE))
1390
			cached = rt_cache_route(nh, rt);
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
		if (unlikely(!cached)) {
			/* Routes we intend to cache in nexthop exception or
			 * FIB nexthop have the DST_NOCACHE bit clear.
			 * However, if we are unsuccessful at storing this
			 * route into the cache we really need to set it.
			 */
			rt->dst.flags |= DST_NOCACHE;
			if (!rt->rt_gateway)
				rt->rt_gateway = daddr;
			rt_add_uncached_list(rt);
		}
	} else
1403
		rt_add_uncached_list(rt);
1404

1405
#ifdef CONFIG_IP_ROUTE_CLASSID
Linus Torvalds's avatar
Linus Torvalds committed
1406
#ifdef CONFIG_IP_MULTIPLE_TABLES
1407
	set_class_tag(rt, res->tclassid);
Linus Torvalds's avatar
Linus Torvalds committed
1408 1409 1410 1411 1412
#endif
	set_class_tag(rt, itag);
#endif
}

1413
static struct rtable *rt_dst_alloc(struct net_device *dev,
1414
				   bool nopolicy, bool noxfrm, bool will_cache)
1415
{
1416
	return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1417
			 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1418 1419
			 (nopolicy ? DST_NOPOLICY : 0) |
			 (noxfrm ? DST_NOXFRM : 0));
1420 1421
}

1422
/* called in rcu_read_lock() section */
Al Viro's avatar
Al Viro committed
1423
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
Linus Torvalds's avatar
Linus Torvalds committed
1424 1425 1426
				u8 tos, struct net_device *dev, int our)
{
	struct rtable *rth;
1427
	struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds's avatar
Linus Torvalds committed
1428
	u32 itag = 0;
1429
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
1430 1431 1432 1433 1434 1435

	/* Primary sanity checks. */

	if (in_dev == NULL)
		return -EINVAL;

1436
	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1437
	    skb->protocol != htons(ETH_P_IP))
Linus Torvalds's avatar
Linus Torvalds committed
1438 1439
		goto e_inval;

1440 1441 1442 1443
	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
		if (ipv4_is_loopback(saddr))
			goto e_inval;

1444 1445
	if (ipv4_is_zeronet(saddr)) {
		if (!ipv4_is_local_multicast(daddr))
Linus Torvalds's avatar
Linus Torvalds committed
1446
			goto e_inval;
1447
	} else {
1448 1449
		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
					  in_dev, &itag);
1450 1451 1452
		if (err < 0)
			goto e_err;
	}
1453
	rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1454
			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
Linus Torvalds's avatar
Linus Torvalds committed
1455 1456 1457
	if (!rth)
		goto e_nobufs;

1458 1459 1460
#ifdef CONFIG_IP_ROUTE_CLASSID
	rth->dst.tclassid = itag;
#endif
1461
	rth->dst.output = ip_rt_bug;
Linus Torvalds's avatar
Linus Torvalds committed
1462

fan.du's avatar
fan.du committed
1463
	rth->rt_genid	= rt_genid_ipv4(dev_net(dev));
1464 1465
	rth->rt_flags	= RTCF_MULTICAST;
	rth->rt_type	= RTN_MULTICAST;
1466
	rth->rt_is_input= 1;
1467
	rth->rt_iif	= 0;
1468
	rth->rt_pmtu	= 0;
1469
	rth->rt_gateway	= 0;
1470
	rth->rt_uses_gateway = 0;
1471
	INIT_LIST_HEAD(&rth->rt_uncached);
Linus Torvalds's avatar
Linus Torvalds committed
1472
	if (our) {
1473
		rth->dst.input= ip_local_deliver;
Linus Torvalds's avatar
Linus Torvalds committed
1474 1475 1476 1477
		rth->rt_flags |= RTCF_LOCAL;
	}

#ifdef CONFIG_IP_MROUTE
1478
	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1479
		rth->dst.input = ip_mr_input;
Linus Torvalds's avatar
Linus Torvalds committed
1480 1481 1482
#endif
	RT_CACHE_STAT_INC(in_slow_mc);

David S. Miller's avatar
David S. Miller committed
1483 1484
	skb_dst_set(skb, &rth->dst);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1485 1486 1487 1488

e_nobufs:
	return -ENOBUFS;
e_inval:
1489
	return -EINVAL;
1490 1491
e_err:
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
1492 1493 1494 1495 1496 1497
}


static void ip_handle_martian_source(struct net_device *dev,
				     struct in_device *in_dev,
				     struct sk_buff *skb,
Al Viro's avatar
Al Viro committed
1498 1499
				     __be32 daddr,
				     __be32 saddr)
Linus Torvalds's avatar
Linus Torvalds committed
1500 1501 1502 1503 1504 1505 1506 1507
{
	RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
		/*
		 *	RFC1812 recommendation, if source is martian,
		 *	the only hint is MAC header.
		 */
1508
		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1509
			&daddr, &saddr, dev->name);
1510
		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1511 1512 1513 1514
			print_hex_dump(KERN_WARNING, "ll header: ",
				       DUMP_PREFIX_OFFSET, 16, 1,
				       skb_mac_header(skb),
				       dev->hard_header_len, true);
Linus Torvalds's avatar
Linus Torvalds committed
1515 1516 1517 1518 1519
		}
	}
#endif
}

1520
/* called in rcu_read_lock() section */
Stephen Hemminger's avatar
Stephen Hemminger committed
1521
static int __mkroute_input(struct sk_buff *skb,
1522
			   const struct fib_result *res,
Stephen Hemminger's avatar
Stephen Hemminger committed
1523
			   struct in_device *in_dev,
1524
			   __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds's avatar
Linus Torvalds committed
1525
{
1526
	struct fib_nh_exception *fnhe;
Linus Torvalds's avatar
Linus Torvalds committed
1527 1528 1529
	struct rtable *rth;
	int err;
	struct in_device *out_dev;
1530
	unsigned int flags = 0;
1531
	bool do_cache;
1532
	u32 itag;
Linus Torvalds's avatar
Linus Torvalds committed
1533 1534

	/* get a working reference to the output device */
1535
	out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
Linus Torvalds's avatar
Linus Torvalds committed
1536
	if (out_dev == NULL) {
1537
		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
Linus Torvalds's avatar
Linus Torvalds committed
1538 1539 1540
		return -EINVAL;
	}

1541
	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1542
				  in_dev->dev, in_dev, &itag);
Linus Torvalds's avatar
Linus Torvalds committed
1543
	if (err < 0) {
1544
		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
Linus Torvalds's avatar
Linus Torvalds committed
1545
					 saddr);
1546

Linus Torvalds's avatar
Linus Torvalds committed
1547 1548 1549
		goto cleanup;
	}

1550 1551
	do_cache = res->fi && !itag;
	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
Linus Torvalds's avatar
Linus Torvalds committed
1552
	    (IN_DEV_SHARED_MEDIA(out_dev) ||
1553
	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
Linus Torvalds's avatar
Linus Torvalds committed
1554
		flags |= RTCF_DOREDIRECT;
1555 1556
		do_cache = false;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1557 1558 1559 1560

	if (skb->protocol != htons(ETH_P_IP)) {
		/* Not IP (i.e. ARP). Do not create route, if it is
		 * invalid for proxy arp. DNAT routes are always valid.
1561 1562 1563 1564
		 *
		 * Proxy arp feature have been extended to allow, ARP
		 * replies back to the same interface, to support
		 * Private VLAN switch technologies. See arp.c.
Linus Torvalds's avatar
Linus Torvalds committed
1565
		 */
1566 1567
		if (out_dev == in_dev &&
		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
1568 1569 1570 1571 1572
			err = -EINVAL;
			goto cleanup;
		}
	}

1573
	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1574
	if (do_cache) {
1575 1576 1577 1578 1579
		if (fnhe != NULL)
			rth = rcu_dereference(fnhe->fnhe_rth_input);
		else
			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);

1580 1581 1582
		if (rt_cache_valid(rth)) {
			skb_dst_set_noref(skb, &rth->dst);
			goto out;
1583 1584
		}
	}
1585

1586 1587
	rth = rt_dst_alloc(out_dev->dev,
			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
1588
			   IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
Linus Torvalds's avatar
Linus Torvalds committed
1589 1590 1591 1592 1593
	if (!rth) {
		err = -ENOBUFS;
		goto cleanup;
	}

fan.du's avatar
fan.du committed
1594
	rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
1595 1596
	rth->rt_flags = flags;
	rth->rt_type = res->type;
1597
	rth->rt_is_input = 1;
1598
	rth->rt_iif 	= 0;
1599
	rth->rt_pmtu	= 0;
1600
	rth->rt_gateway	= 0;
1601
	rth->rt_uses_gateway = 0;
1602
	INIT_LIST_HEAD(&rth->rt_uncached);
Linus Torvalds's avatar
Linus Torvalds committed
1603

1604 1605
	rth->dst.input = ip_forward;
	rth->dst.output = ip_output;
Linus Torvalds's avatar
Linus Torvalds committed
1606

1607
	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
1608
	skb_dst_set(skb, &rth->dst);
1609
out:
Linus Torvalds's avatar
Linus Torvalds committed
1610 1611 1612
	err = 0;
 cleanup:
	return err;
1613
}
Linus Torvalds's avatar
Linus Torvalds committed
1614

Stephen Hemminger's avatar
Stephen Hemminger committed
1615 1616
static int ip_mkroute_input(struct sk_buff *skb,
			    struct fib_result *res,
1617
			    const struct flowi4 *fl4,
Stephen Hemminger's avatar
Stephen Hemminger committed
1618 1619
			    struct in_device *in_dev,
			    __be32 daddr, __be32 saddr, u32 tos)
Linus Torvalds's avatar
Linus Torvalds committed
1620 1621
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
1622
	if (res->fi && res->fi->fib_nhs > 1)
1623
		fib_select_multipath(res);
Linus Torvalds's avatar
Linus Torvalds committed
1624 1625 1626
#endif

	/* create a routing cache entry */
1627
	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
Linus Torvalds's avatar
Linus Torvalds committed
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
}

/*
 *	NOTE. We drop all the packets that has local source
 *	addresses, because every properly looped back packet
 *	must have correct destination already attached by output routine.
 *
 *	Such approach solves two big problems:
 *	1. Not simplex devices are handled properly.
 *	2. IP spoofing attempts are filtered with 100% of guarantee.
1638
 *	called with rcu_read_lock()
Linus Torvalds's avatar
Linus Torvalds committed
1639 1640
 */

Al Viro's avatar
Al Viro committed
1641
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1642
			       u8 tos, struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
1643 1644
{
	struct fib_result res;
1645
	struct in_device *in_dev = __in_dev_get_rcu(dev);
1646
	struct flowi4	fl4;
1647
	unsigned int	flags = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1648
	u32		itag = 0;
1649
	struct rtable	*rth;
Linus Torvalds's avatar
Linus Torvalds committed
1650
	int		err = -EINVAL;
Daniel Baluta's avatar
Daniel Baluta committed
1651
	struct net    *net = dev_net(dev);
1652
	bool do_cache;
Linus Torvalds's avatar
Linus Torvalds committed
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662

	/* IP on this device is disabled. */

	if (!in_dev)
		goto out;

	/* Check for the most weird martians, which can be not detected
	   by fib_lookup.
	 */

1663
	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
Linus Torvalds's avatar
Linus Torvalds committed
1664 1665
		goto martian_source;

1666
	res.fi = NULL;
1667
	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
Linus Torvalds's avatar
Linus Torvalds committed
1668 1669 1670 1671 1672
		goto brd_input;

	/* Accept zero addresses only to limited broadcast;
	 * I even do not know to fix it or not. Waiting for complains :-)
	 */
1673
	if (ipv4_is_zeronet(saddr))
Linus Torvalds's avatar
Linus Torvalds committed
1674 1675
		goto martian_source;

1676
	if (ipv4_is_zeronet(daddr))
Linus Torvalds's avatar
Linus Torvalds committed
1677 1678
		goto martian_destination;

1679 1680 1681 1682 1683
	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
	 * and call it once if daddr or/and saddr are loopback addresses
	 */
	if (ipv4_is_loopback(daddr)) {
		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1684
			goto martian_destination;
1685 1686
	} else if (ipv4_is_loopback(saddr)) {
		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1687 1688 1689
			goto martian_source;
	}

Linus Torvalds's avatar
Linus Torvalds committed
1690 1691 1692
	/*
	 *	Now we are ready to route packet.
	 */
1693 1694 1695 1696 1697 1698 1699 1700
	fl4.flowi4_oif = 0;
	fl4.flowi4_iif = dev->ifindex;
	fl4.flowi4_mark = skb->mark;
	fl4.flowi4_tos = tos;
	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
	fl4.daddr = daddr;
	fl4.saddr = saddr;
	err = fib_lookup(net, &fl4, &res);
1701
	if (err != 0)
Linus Torvalds's avatar
Linus Torvalds committed
1702 1703 1704 1705 1706 1707 1708 1709
		goto no_route;

	RT_CACHE_STAT_INC(in_slow_tot);

	if (res.type == RTN_BROADCAST)
		goto brd_input;

	if (res.type == RTN_LOCAL) {
1710
		err = fib_validate_source(skb, saddr, daddr, tos,
1711
					  LOOPBACK_IFINDEX,
1712
					  dev, in_dev, &itag);
1713 1714
		if (err < 0)
			goto martian_source_keep_err;
Linus Torvalds's avatar
Linus Torvalds committed
1715 1716 1717 1718
		goto local_input;
	}

	if (!IN_DEV_FORWARD(in_dev))
1719
		goto no_route;
Linus Torvalds's avatar
Linus Torvalds committed
1720 1721 1722
	if (res.type != RTN_UNICAST)
		goto martian_destination;

1723
	err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
Linus Torvalds's avatar
Linus Torvalds committed
1724 1725 1726 1727 1728 1729
out:	return err;

brd_input:
	if (skb->protocol != htons(ETH_P_IP))
		goto e_inval;

1730
	if (!ipv4_is_zeronet(saddr)) {
1731 1732
		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
					  in_dev, &itag);
Linus Torvalds's avatar
Linus Torvalds committed
1733
		if (err < 0)
1734
			goto martian_source_keep_err;
Linus Torvalds's avatar
Linus Torvalds committed
1735 1736 1737 1738 1739 1740
	}
	flags |= RTCF_BROADCAST;
	res.type = RTN_BROADCAST;
	RT_CACHE_STAT_INC(in_brd);

local_input:
1741 1742
	do_cache = false;
	if (res.fi) {
1743
		if (!itag) {
1744
			rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1745
			if (rt_cache_valid(rth)) {
1746 1747 1748
				skb_dst_set_noref(skb, &rth->dst);
				err = 0;
				goto out;
1749 1750 1751 1752 1753
			}
			do_cache = true;
		}
	}

1754
	rth = rt_dst_alloc(net->loopback_dev,
1755
			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
Linus Torvalds's avatar
Linus Torvalds committed
1756 1757 1758
	if (!rth)
		goto e_nobufs;

1759
	rth->dst.input= ip_local_deliver;
1760
	rth->dst.output= ip_rt_bug;
1761 1762 1763
#ifdef CONFIG_IP_ROUTE_CLASSID
	rth->dst.tclassid = itag;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1764

fan.du's avatar
fan.du committed
1765
	rth->rt_genid = rt_genid_ipv4(net);
1766 1767
	rth->rt_flags 	= flags|RTCF_LOCAL;
	rth->rt_type	= res.type;
1768
	rth->rt_is_input = 1;
1769
	rth->rt_iif	= 0;
1770
	rth->rt_pmtu	= 0;
1771
	rth->rt_gateway	= 0;
1772
	rth->rt_uses_gateway = 0;
1773
	INIT_LIST_HEAD(&rth->rt_uncached);
Linus Torvalds's avatar
Linus Torvalds committed
1774
	if (res.type == RTN_UNREACHABLE) {
1775 1776
		rth->dst.input= ip_error;
		rth->dst.error= -err;
Linus Torvalds's avatar
Linus Torvalds committed
1777 1778
		rth->rt_flags 	&= ~RTCF_LOCAL;
	}
1779 1780
	if (do_cache)
		rt_cache_route(&FIB_RES_NH(res), rth);
David S. Miller's avatar
David S. Miller committed
1781
	skb_dst_set(skb, &rth->dst);
1782
	err = 0;
1783
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1784 1785 1786 1787

no_route:
	RT_CACHE_STAT_INC(in_no_route);
	res.type = RTN_UNREACHABLE;
1788 1789
	if (err == -ESRCH)
		err = -ENETUNREACH;
Linus Torvalds's avatar
Linus Torvalds committed
1790 1791 1792 1793 1794 1795 1796 1797
	goto local_input;

	/*
	 *	Do not cache martian addresses: they should be logged (RFC1812)
	 */
martian_destination:
	RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
1798 1799 1800
	if (IN_DEV_LOG_MARTIANS(in_dev))
		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
				     &daddr, &saddr, dev->name);
Linus Torvalds's avatar
Linus Torvalds committed
1801
#endif
1802

Linus Torvalds's avatar
Linus Torvalds committed
1803 1804
e_inval:
	err = -EINVAL;
1805
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1806 1807 1808

e_nobufs:
	err = -ENOBUFS;
1809
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1810 1811

martian_source:
1812 1813
	err = -EINVAL;
martian_source_keep_err:
Linus Torvalds's avatar
Linus Torvalds committed
1814
	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1815
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1816 1817
}

1818 1819
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
			 u8 tos, struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
1820
{
1821
	int res;
Linus Torvalds's avatar
Linus Torvalds committed
1822

1823 1824
	rcu_read_lock();

Linus Torvalds's avatar
Linus Torvalds committed
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
	/* Multicast recognition logic is moved from route cache to here.
	   The problem was that too many Ethernet cards have broken/missing
	   hardware multicast filters :-( As result the host on multicasting
	   network acquires a lot of useless route cache entries, sort of
	   SDR messages from all the world. Now we try to get rid of them.
	   Really, provided software IP multicast filter is organized
	   reasonably (at least, hashed), it does not result in a slowdown
	   comparing with route cache reject entries.
	   Note, that multicast routers are not affected, because
	   route cache entry is created eventually.
	 */
1836
	if (ipv4_is_multicast(daddr)) {
1837
		struct in_device *in_dev = __in_dev_get_rcu(dev);
Linus Torvalds's avatar
Linus Torvalds committed
1838

1839
		if (in_dev) {
1840 1841
			int our = ip_check_mc_rcu(in_dev, daddr, saddr,
						  ip_hdr(skb)->protocol);
Linus Torvalds's avatar
Linus Torvalds committed
1842 1843
			if (our
#ifdef CONFIG_IP_MROUTE
1844 1845 1846
				||
			    (!ipv4_is_local_multicast(daddr) &&
			     IN_DEV_MFORWARD(in_dev))
Linus Torvalds's avatar
Linus Torvalds committed
1847
#endif
1848
			   ) {
1849 1850
				int res = ip_route_input_mc(skb, daddr, saddr,
							    tos, dev, our);
Linus Torvalds's avatar
Linus Torvalds committed
1851
				rcu_read_unlock();
1852
				return res;
Linus Torvalds's avatar
Linus Torvalds committed
1853 1854 1855 1856 1857
			}
		}
		rcu_read_unlock();
		return -EINVAL;
	}
1858
	res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1859 1860
	rcu_read_unlock();
	return res;
Linus Torvalds's avatar
Linus Torvalds committed
1861
}
1862
EXPORT_SYMBOL(ip_route_input_noref);
Linus Torvalds's avatar
Linus Torvalds committed
1863

1864
/* called with rcu_read_lock() */
1865
static struct rtable *__mkroute_output(const struct fib_result *res,
1866
				       const struct flowi4 *fl4, int orig_oif,
1867
				       struct net_device *dev_out,
1868
				       unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
1869
{
1870
	struct fib_info *fi = res->fi;
1871
	struct fib_nh_exception *fnhe;
1872
	struct in_device *in_dev;
1873
	u16 type = res->type;
1874
	struct rtable *rth;
1875
	bool do_cache;
Linus Torvalds's avatar
Linus Torvalds committed
1876

1877 1878
	in_dev = __in_dev_get_rcu(dev_out);
	if (!in_dev)
1879
		return ERR_PTR(-EINVAL);
Linus Torvalds's avatar
Linus Torvalds committed
1880

1881 1882 1883 1884
	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
		if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
			return ERR_PTR(-EINVAL);

1885
	if (ipv4_is_lbcast(fl4->daddr))
1886
		type = RTN_BROADCAST;
1887
	else if (ipv4_is_multicast(fl4->daddr))
1888
		type = RTN_MULTICAST;
1889
	else if (ipv4_is_zeronet(fl4->daddr))
1890
		return ERR_PTR(-EINVAL);
Linus Torvalds's avatar
Linus Torvalds committed
1891 1892 1893 1894

	if (dev_out->flags & IFF_LOOPBACK)
		flags |= RTCF_LOCAL;

1895
	do_cache = true;
1896
	if (type == RTN_BROADCAST) {
Linus Torvalds's avatar
Linus Torvalds committed
1897
		flags |= RTCF_BROADCAST | RTCF_LOCAL;
1898 1899
		fi = NULL;
	} else if (type == RTN_MULTICAST) {
1900
		flags |= RTCF_MULTICAST | RTCF_LOCAL;
1901 1902
		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
				     fl4->flowi4_proto))
Linus Torvalds's avatar
Linus Torvalds committed
1903
			flags &= ~RTCF_LOCAL;
1904 1905
		else
			do_cache = false;
Linus Torvalds's avatar
Linus Torvalds committed
1906
		/* If multicast route do not exist use
1907 1908
		 * default one, but do not gateway in this case.
		 * Yes, it is hack.
Linus Torvalds's avatar
Linus Torvalds committed
1909
		 */
1910 1911
		if (fi && res->prefixlen < 4)
			fi = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1912 1913
	}

1914
	fnhe = NULL;
1915 1916
	do_cache &= fi != NULL;
	if (do_cache) {
1917
		struct rtable __rcu **prth;
1918
		struct fib_nh *nh = &FIB_RES_NH(*res);
1919

1920
		fnhe = find_exception(nh, fl4->daddr);
1921
		if (fnhe)
1922
			prth = &fnhe->fnhe_rth_output;
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
		else {
			if (unlikely(fl4->flowi4_flags &
				     FLOWI_FLAG_KNOWN_NH &&
				     !(nh->nh_gw &&
				       nh->nh_scope == RT_SCOPE_LINK))) {
				do_cache = false;
				goto add;
			}
			prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
		}
1933 1934 1935 1936
		rth = rcu_dereference(*prth);
		if (rt_cache_valid(rth)) {
			dst_hold(&rth->dst);
			return rth;
1937 1938
		}
	}
1939 1940

add:
1941 1942
	rth = rt_dst_alloc(dev_out,
			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
1943
			   IN_DEV_CONF_GET(in_dev, NOXFRM),
1944
			   do_cache);
1945
	if (!rth)
1946
		return ERR_PTR(-ENOBUFS);
1947

1948 1949
	rth->dst.output = ip_output;

fan.du's avatar
fan.du committed
1950
	rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
1951 1952
	rth->rt_flags	= flags;
	rth->rt_type	= type;
1953
	rth->rt_is_input = 0;
1954
	rth->rt_iif	= orig_oif ? : 0;
1955
	rth->rt_pmtu	= 0;
1956
	rth->rt_gateway = 0;
1957
	rth->rt_uses_gateway = 0;
1958
	INIT_LIST_HEAD(&rth->rt_uncached);
Linus Torvalds's avatar
Linus Torvalds committed
1959 1960 1961

	RT_CACHE_STAT_INC(out_slow_tot);

1962
	if (flags & RTCF_LOCAL)
1963
		rth->dst.input = ip_local_deliver;
Linus Torvalds's avatar
Linus Torvalds committed
1964
	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1965
		if (flags & RTCF_LOCAL &&
Linus Torvalds's avatar
Linus Torvalds committed
1966
		    !(dev_out->flags & IFF_LOOPBACK)) {
1967
			rth->dst.output = ip_mc_output;
Linus Torvalds's avatar
Linus Torvalds committed
1968 1969 1970
			RT_CACHE_STAT_INC(out_slow_mc);
		}
#ifdef CONFIG_IP_MROUTE
1971
		if (type == RTN_MULTICAST) {
Linus Torvalds's avatar
Linus Torvalds committed
1972
			if (IN_DEV_MFORWARD(in_dev) &&
1973
			    !ipv4_is_local_multicast(fl4->daddr)) {
1974 1975
				rth->dst.input = ip_mr_input;
				rth->dst.output = ip_mc_output;
Linus Torvalds's avatar
Linus Torvalds committed
1976 1977 1978 1979 1980
			}
		}
#endif
	}

1981
	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1982

1983
	return rth;
Linus Torvalds's avatar
Linus Torvalds committed
1984 1985 1986 1987 1988 1989
}

/*
 * Major route resolver routine.
 */

David S. Miller's avatar
David S. Miller committed
1990
struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
Linus Torvalds's avatar
Linus Torvalds committed
1991 1992
{
	struct net_device *dev_out = NULL;
1993
	__u8 tos = RT_FL_TOS(fl4);
1994 1995
	unsigned int flags = 0;
	struct fib_result res;
1996
	struct rtable *rth;
1997
	int orig_oif;
Linus Torvalds's avatar
Linus Torvalds committed
1998

1999
	res.tclassid	= 0;
Linus Torvalds's avatar
Linus Torvalds committed
2000
	res.fi		= NULL;
2001
	res.table	= NULL;
Linus Torvalds's avatar
Linus Torvalds committed
2002

2003 2004
	orig_oif = fl4->flowi4_oif;

2005
	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2006 2007 2008
	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2009

2010
	rcu_read_lock();
2011
	if (fl4->saddr) {
2012
		rth = ERR_PTR(-EINVAL);
2013 2014 2015
		if (ipv4_is_multicast(fl4->saddr) ||
		    ipv4_is_lbcast(fl4->saddr) ||
		    ipv4_is_zeronet(fl4->saddr))
Linus Torvalds's avatar
Linus Torvalds committed
2016 2017 2018 2019
			goto out;

		/* I removed check for oif == dev_out->oif here.
		   It was wrong for two reasons:
2020 2021
		   1. ip_dev_find(net, saddr) can return wrong iface, if saddr
		      is assigned to multiple interfaces.
Linus Torvalds's avatar
Linus Torvalds committed
2022 2023 2024 2025
		   2. Moreover, we are allowed to send packets with saddr
		      of another iface. --ANK
		 */

2026 2027 2028
		if (fl4->flowi4_oif == 0 &&
		    (ipv4_is_multicast(fl4->daddr) ||
		     ipv4_is_lbcast(fl4->daddr))) {
2029
			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2030
			dev_out = __ip_dev_find(net, fl4->saddr, false);
2031 2032 2033
			if (dev_out == NULL)
				goto out;

Linus Torvalds's avatar
Linus Torvalds committed
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
			/* Special hack: user can direct multicasts
			   and limited broadcast via necessary interface
			   without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
			   This hack is not just for fun, it allows
			   vic,vat and friends to work.
			   They bind socket to loopback, set ttl to zero
			   and expect that it will work.
			   From the viewpoint of routing cache they are broken,
			   because we are not allowed to build multicast path
			   with loopback source addr (look, routing cache
			   cannot know, that ttl is zero, so that packet
			   will not leave this host and route is valid).
			   Luckily, this hack is good workaround.
			 */

2049
			fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds's avatar
Linus Torvalds committed
2050 2051
			goto make_route;
		}
2052

2053
		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2054
			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2055
			if (!__ip_dev_find(net, fl4->saddr, false))
2056 2057
				goto out;
		}
Linus Torvalds's avatar
Linus Torvalds committed
2058 2059 2060
	}


2061 2062
	if (fl4->flowi4_oif) {
		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2063
		rth = ERR_PTR(-ENODEV);
Linus Torvalds's avatar
Linus Torvalds committed
2064 2065
		if (dev_out == NULL)
			goto out;
2066 2067

		/* RACE: Check return value of inet_select_addr instead. */
2068
		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2069
			rth = ERR_PTR(-ENETUNREACH);
2070 2071
			goto out;
		}
2072 2073 2074 2075 2076
		if (ipv4_is_local_multicast(fl4->daddr) ||
		    ipv4_is_lbcast(fl4->daddr)) {
			if (!fl4->saddr)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_LINK);
Linus Torvalds's avatar
Linus Torvalds committed
2077 2078
			goto make_route;
		}
2079
		if (!fl4->saddr) {
2080 2081 2082 2083 2084 2085
			if (ipv4_is_multicast(fl4->daddr))
				fl4->saddr = inet_select_addr(dev_out, 0,
							      fl4->flowi4_scope);
			else if (!fl4->daddr)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_HOST);
Linus Torvalds's avatar
Linus Torvalds committed
2086 2087 2088
		}
	}

2089 2090 2091 2092
	if (!fl4->daddr) {
		fl4->daddr = fl4->saddr;
		if (!fl4->daddr)
			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2093
		dev_out = net->loopback_dev;
2094
		fl4->flowi4_oif = LOOPBACK_IFINDEX;
Linus Torvalds's avatar
Linus Torvalds committed
2095 2096 2097 2098 2099
		res.type = RTN_LOCAL;
		flags |= RTCF_LOCAL;
		goto make_route;
	}

2100
	if (fib_lookup(net, fl4, &res)) {
Linus Torvalds's avatar
Linus Torvalds committed
2101
		res.fi = NULL;
2102
		res.table = NULL;
2103
		if (fl4->flowi4_oif) {
Linus Torvalds's avatar
Linus Torvalds committed
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
			/* Apparently, routing tables are wrong. Assume,
			   that the destination is on link.

			   WHY? DW.
			   Because we are allowed to send to iface
			   even if it has NO routes and NO assigned
			   addresses. When oif is specified, routing
			   tables are looked up with only one purpose:
			   to catch if destination is gatewayed, rather than
			   direct. Moreover, if MSG_DONTROUTE is set,
			   we send packet, ignoring both routing tables
			   and ifaddr state. --ANK


			   We could make it even if oif is unknown,
			   likely IPv6, but we do not.
			 */

2122 2123 2124
			if (fl4->saddr == 0)
				fl4->saddr = inet_select_addr(dev_out, 0,
							      RT_SCOPE_LINK);
Linus Torvalds's avatar
Linus Torvalds committed
2125 2126 2127
			res.type = RTN_UNICAST;
			goto make_route;
		}
2128
		rth = ERR_PTR(-ENETUNREACH);
Linus Torvalds's avatar
Linus Torvalds committed
2129 2130 2131 2132
		goto out;
	}

	if (res.type == RTN_LOCAL) {
2133
		if (!fl4->saddr) {
2134
			if (res.fi->fib_prefsrc)
2135
				fl4->saddr = res.fi->fib_prefsrc;
2136
			else
2137
				fl4->saddr = fl4->daddr;
2138
		}
2139
		dev_out = net->loopback_dev;
2140
		fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds's avatar
Linus Torvalds committed
2141 2142 2143 2144 2145
		flags |= RTCF_LOCAL;
		goto make_route;
	}

#ifdef CONFIG_IP_ROUTE_MULTIPATH
2146
	if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2147
		fib_select_multipath(&res);
Linus Torvalds's avatar
Linus Torvalds committed
2148 2149
	else
#endif
2150 2151
	if (!res.prefixlen &&
	    res.table->tb_num_default > 1 &&
2152
	    res.type == RTN_UNICAST && !fl4->flowi4_oif)
2153
		fib_select_default(&res);
Linus Torvalds's avatar
Linus Torvalds committed
2154

2155 2156
	if (!fl4->saddr)
		fl4->saddr = FIB_RES_PREFSRC(net, res);
Linus Torvalds's avatar
Linus Torvalds committed
2157 2158

	dev_out = FIB_RES_DEV(res);
2159
	fl4->flowi4_oif = dev_out->ifindex;
Linus Torvalds's avatar
Linus Torvalds committed
2160 2161 2162


make_route:
2163
	rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
Linus Torvalds's avatar
Linus Torvalds committed
2164

2165 2166
out:
	rcu_read_unlock();
2167
	return rth;
Linus Torvalds's avatar
Linus Torvalds committed
2168
}
2169 2170
EXPORT_SYMBOL_GPL(__ip_route_output_key);

2171 2172 2173 2174 2175
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
	return NULL;
}

2176
static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2177
{
2178 2179 2180
	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);

	return mtu ? : dst->dev->mtu;
2181 2182
}

2183 2184
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
					  struct sk_buff *skb, u32 mtu)
2185 2186 2187
{
}

2188 2189
static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
				       struct sk_buff *skb)
2190 2191 2192
{
}

2193 2194 2195 2196 2197 2198
static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
					  unsigned long old)
{
	return NULL;
}

2199 2200
static struct dst_ops ipv4_dst_blackhole_ops = {
	.family			=	AF_INET,
2201
	.protocol		=	cpu_to_be16(ETH_P_IP),
2202
	.check			=	ipv4_blackhole_dst_check,
2203
	.mtu			=	ipv4_blackhole_mtu,
2204
	.default_advmss		=	ipv4_default_advmss,
2205
	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
2206
	.redirect		=	ipv4_rt_blackhole_redirect,
2207
	.cow_metrics		=	ipv4_rt_blackhole_cow_metrics,
2208
	.neigh_lookup		=	ipv4_neigh_lookup,
2209 2210
};

2211
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2212
{
2213
	struct rtable *ort = (struct rtable *) dst_orig;
2214
	struct rtable *rt;
2215

2216
	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2217
	if (rt) {
2218
		struct dst_entry *new = &rt->dst;
2219 2220

		new->__use = 1;
2221 2222
		new->input = dst_discard;
		new->output = dst_discard;
2223

2224
		new->dev = ort->dst.dev;
2225 2226 2227
		if (new->dev)
			dev_hold(new->dev);

2228
		rt->rt_is_input = ort->rt_is_input;
2229
		rt->rt_iif = ort->rt_iif;
2230
		rt->rt_pmtu = ort->rt_pmtu;
2231

fan.du's avatar
fan.du committed
2232
		rt->rt_genid = rt_genid_ipv4(net);
2233 2234 2235
		rt->rt_flags = ort->rt_flags;
		rt->rt_type = ort->rt_type;
		rt->rt_gateway = ort->rt_gateway;
2236
		rt->rt_uses_gateway = ort->rt_uses_gateway;
2237

2238 2239
		INIT_LIST_HEAD(&rt->rt_uncached);

2240 2241 2242
		dst_free(new);
	}

2243 2244 2245
	dst_release(dst_orig);

	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2246 2247
}

2248
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2249
				    struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
2250
{
2251
	struct rtable *rt = __ip_route_output_key(net, flp4);
Linus Torvalds's avatar
Linus Torvalds committed
2252

2253 2254
	if (IS_ERR(rt))
		return rt;
Linus Torvalds's avatar
Linus Torvalds committed
2255

2256
	if (flp4->flowi4_proto)
2257 2258 2259
		rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
						   flowi4_to_flowi(flp4),
						   sk, 0);
Linus Torvalds's avatar
Linus Torvalds committed
2260

2261
	return rt;
Linus Torvalds's avatar
Linus Torvalds committed
2262
}
2263 2264
EXPORT_SYMBOL_GPL(ip_route_output_flow);

2265
static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
2266
			struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2267
			u32 seq, int event, int nowait, unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
2268
{
Eric Dumazet's avatar
Eric Dumazet committed
2269
	struct rtable *rt = skb_rtable(skb);
Linus Torvalds's avatar
Linus Torvalds committed
2270
	struct rtmsg *r;
2271
	struct nlmsghdr *nlh;
2272
	unsigned long expires = 0;
2273
	u32 error;
2274
	u32 metrics[RTAX_MAX];
2275

2276
	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2277
	if (nlh == NULL)
2278
		return -EMSGSIZE;
2279 2280

	r = nlmsg_data(nlh);
Linus Torvalds's avatar
Linus Torvalds committed
2281 2282 2283
	r->rtm_family	 = AF_INET;
	r->rtm_dst_len	= 32;
	r->rtm_src_len	= 0;
2284
	r->rtm_tos	= fl4->flowi4_tos;
Linus Torvalds's avatar
Linus Torvalds committed
2285
	r->rtm_table	= RT_TABLE_MAIN;
David S. Miller's avatar
David S. Miller committed
2286 2287
	if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
		goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2288 2289 2290 2291 2292 2293
	r->rtm_type	= rt->rt_type;
	r->rtm_scope	= RT_SCOPE_UNIVERSE;
	r->rtm_protocol = RTPROT_UNSPEC;
	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
	if (rt->rt_flags & RTCF_NOTIFY)
		r->rtm_flags |= RTM_F_NOTIFY;
2294

2295
	if (nla_put_be32(skb, RTA_DST, dst))
David S. Miller's avatar
David S. Miller committed
2296
		goto nla_put_failure;
2297
	if (src) {
Linus Torvalds's avatar
Linus Torvalds committed
2298
		r->rtm_src_len = 32;
2299
		if (nla_put_be32(skb, RTA_SRC, src))
David S. Miller's avatar
David S. Miller committed
2300
			goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2301
	}
David S. Miller's avatar
David S. Miller committed
2302 2303 2304
	if (rt->dst.dev &&
	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
		goto nla_put_failure;
2305
#ifdef CONFIG_IP_ROUTE_CLASSID
David S. Miller's avatar
David S. Miller committed
2306 2307 2308
	if (rt->dst.tclassid &&
	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
		goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2309
#endif
2310
	if (!rt_is_input_route(rt) &&
2311 2312
	    fl4->saddr != src) {
		if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
David S. Miller's avatar
David S. Miller committed
2313 2314
			goto nla_put_failure;
	}
2315
	if (rt->rt_uses_gateway &&
David S. Miller's avatar
David S. Miller committed
2316 2317
	    nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
		goto nla_put_failure;
2318

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
	expires = rt->dst.expires;
	if (expires) {
		unsigned long now = jiffies;

		if (time_before(now, expires))
			expires -= now;
		else
			expires = 0;
	}

2329
	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2330
	if (rt->rt_pmtu && expires)
2331 2332
		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
	if (rtnetlink_put_metrics(skb, metrics) < 0)
2333 2334
		goto nla_put_failure;

2335
	if (fl4->flowi4_mark &&
2336
	    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
David S. Miller's avatar
David S. Miller committed
2337
		goto nla_put_failure;
Eric Dumazet's avatar
Eric Dumazet committed
2338

2339
	error = rt->dst.error;
2340

2341
	if (rt_is_input_route(rt)) {
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
#ifdef CONFIG_IP_MROUTE
		if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
		    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
			int err = ipmr_get_route(net, skb,
						 fl4->saddr, fl4->daddr,
						 r, nowait);
			if (err <= 0) {
				if (!nowait) {
					if (err == 0)
						return 0;
					goto nla_put_failure;
				} else {
					if (err == -EMSGSIZE)
						goto nla_put_failure;
					error = err;
				}
			}
		} else
#endif
			if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
				goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
2363 2364
	}

2365
	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2366
		goto nla_put_failure;
2367 2368

	return nlmsg_end(skb, nlh);
Linus Torvalds's avatar
Linus Torvalds committed
2369

2370
nla_put_failure:
2371 2372
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
Linus Torvalds's avatar
Linus Torvalds committed
2373 2374
}

2375
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
Linus Torvalds's avatar
Linus Torvalds committed
2376
{
2377
	struct net *net = sock_net(in_skb->sk);
2378 2379
	struct rtmsg *rtm;
	struct nlattr *tb[RTA_MAX+1];
Linus Torvalds's avatar
Linus Torvalds committed
2380
	struct rtable *rt = NULL;
2381
	struct flowi4 fl4;
Al Viro's avatar
Al Viro committed
2382 2383 2384
	__be32 dst = 0;
	__be32 src = 0;
	u32 iif;
2385
	int err;
Eric Dumazet's avatar
Eric Dumazet committed
2386
	int mark;
Linus Torvalds's avatar
Linus Torvalds committed
2387 2388
	struct sk_buff *skb;

2389 2390 2391 2392 2393 2394
	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
	if (err < 0)
		goto errout;

	rtm = nlmsg_data(nlh);

Linus Torvalds's avatar
Linus Torvalds committed
2395
	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2396 2397 2398 2399
	if (skb == NULL) {
		err = -ENOBUFS;
		goto errout;
	}
Linus Torvalds's avatar
Linus Torvalds committed
2400 2401 2402 2403

	/* Reserve room for dummy headers, this skb can pass
	   through good chunk of routing engine.
	 */
2404
	skb_reset_mac_header(skb);
2405
	skb_reset_network_header(skb);
2406 2407

	/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2408
	ip_hdr(skb)->protocol = IPPROTO_ICMP;
Linus Torvalds's avatar
Linus Torvalds committed
2409 2410
	skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));

2411 2412
	src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
	dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2413
	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
Eric Dumazet's avatar
Eric Dumazet committed
2414
	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
Linus Torvalds's avatar
Linus Torvalds committed
2415

2416 2417 2418 2419 2420 2421 2422
	memset(&fl4, 0, sizeof(fl4));
	fl4.daddr = dst;
	fl4.saddr = src;
	fl4.flowi4_tos = rtm->rtm_tos;
	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
	fl4.flowi4_mark = mark;

Linus Torvalds's avatar
Linus Torvalds committed
2423
	if (iif) {
2424 2425
		struct net_device *dev;

2426
		dev = __dev_get_by_index(net, iif);
2427 2428 2429 2430 2431
		if (dev == NULL) {
			err = -ENODEV;
			goto errout_free;
		}

Linus Torvalds's avatar
Linus Torvalds committed
2432 2433
		skb->protocol	= htons(ETH_P_IP);
		skb->dev	= dev;
Eric Dumazet's avatar
Eric Dumazet committed
2434
		skb->mark	= mark;
Linus Torvalds's avatar
Linus Torvalds committed
2435 2436 2437
		local_bh_disable();
		err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
		local_bh_enable();
2438

Eric Dumazet's avatar
Eric Dumazet committed
2439
		rt = skb_rtable(skb);
2440 2441
		if (err == 0 && rt->dst.error)
			err = -rt->dst.error;
Linus Torvalds's avatar
Linus Torvalds committed
2442
	} else {
2443
		rt = ip_route_output_key(net, &fl4);
2444 2445 2446 2447

		err = 0;
		if (IS_ERR(rt))
			err = PTR_ERR(rt);
Linus Torvalds's avatar
Linus Torvalds committed
2448
	}
2449

Linus Torvalds's avatar
Linus Torvalds committed
2450
	if (err)
2451
		goto errout_free;
Linus Torvalds's avatar
Linus Torvalds committed
2452

2453
	skb_dst_set(skb, &rt->dst);
Linus Torvalds's avatar
Linus Torvalds committed
2454 2455 2456
	if (rtm->rtm_flags & RTM_F_NOTIFY)
		rt->rt_flags |= RTCF_NOTIFY;

2457
	err = rt_fill_info(net, dst, src, &fl4, skb,
2458
			   NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2459
			   RTM_NEWROUTE, 0, 0);
2460 2461
	if (err <= 0)
		goto errout_free;
Linus Torvalds's avatar
Linus Torvalds committed
2462

2463
	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2464
errout:
2465
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
2466

2467
errout_free:
Linus Torvalds's avatar
Linus Torvalds committed
2468
	kfree_skb(skb);
2469
	goto errout;
Linus Torvalds's avatar
Linus Torvalds committed
2470 2471 2472 2473 2474 2475 2476 2477 2478
}

int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
{
	return skb->len;
}

void ip_rt_multicast_event(struct in_device *in_dev)
{
2479
	rt_cache_flush(dev_net(in_dev->dev));
Linus Torvalds's avatar
Linus Torvalds committed
2480 2481 2482
}

#ifdef CONFIG_SYSCTL
2483 2484 2485 2486 2487
static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
static int ip_rt_gc_elasticity __read_mostly	= 8;

2488
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2489
					void __user *buffer,
Linus Torvalds's avatar
Linus Torvalds committed
2490 2491
					size_t *lenp, loff_t *ppos)
{
2492 2493
	struct net *net = (struct net *)__ctl->extra1;

Linus Torvalds's avatar
Linus Torvalds committed
2494
	if (write) {
2495 2496
		rt_cache_flush(net);
		fnhe_genid_bump(net);
Linus Torvalds's avatar
Linus Torvalds committed
2497
		return 0;
2498
	}
Linus Torvalds's avatar
Linus Torvalds committed
2499 2500 2501 2502

	return -EINVAL;
}

2503
static struct ctl_table ipv4_route_table[] = {
Linus Torvalds's avatar
Linus Torvalds committed
2504 2505 2506 2507 2508
	{
		.procname	= "gc_thresh",
		.data		= &ipv4_dst_ops.gc_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2509
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2510 2511 2512 2513 2514 2515
	},
	{
		.procname	= "max_size",
		.data		= &ip_rt_max_size,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2516
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2517 2518 2519
	},
	{
		/*  Deprecated. Use gc_min_interval_ms */
2520

Linus Torvalds's avatar
Linus Torvalds committed
2521 2522 2523 2524
		.procname	= "gc_min_interval",
		.data		= &ip_rt_gc_min_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2525
		.proc_handler	= proc_dointvec_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2526 2527 2528 2529 2530 2531
	},
	{
		.procname	= "gc_min_interval_ms",
		.data		= &ip_rt_gc_min_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2532
		.proc_handler	= proc_dointvec_ms_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2533 2534 2535 2536 2537 2538
	},
	{
		.procname	= "gc_timeout",
		.data		= &ip_rt_gc_timeout,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2539
		.proc_handler	= proc_dointvec_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2540
	},
2541 2542 2543 2544 2545 2546 2547
	{
		.procname	= "gc_interval",
		.data		= &ip_rt_gc_interval,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
Linus Torvalds's avatar
Linus Torvalds committed
2548 2549 2550 2551 2552
	{
		.procname	= "redirect_load",
		.data		= &ip_rt_redirect_load,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2553
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2554 2555 2556 2557 2558 2559
	},
	{
		.procname	= "redirect_number",
		.data		= &ip_rt_redirect_number,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2560
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2561 2562 2563 2564 2565 2566
	},
	{
		.procname	= "redirect_silence",
		.data		= &ip_rt_redirect_silence,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2567
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2568 2569 2570 2571 2572 2573
	},
	{
		.procname	= "error_cost",
		.data		= &ip_rt_error_cost,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2574
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2575 2576 2577 2578 2579 2580
	},
	{
		.procname	= "error_burst",
		.data		= &ip_rt_error_burst,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2581
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2582 2583 2584 2585 2586 2587
	},
	{
		.procname	= "gc_elasticity",
		.data		= &ip_rt_gc_elasticity,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2588
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2589 2590 2591 2592 2593 2594
	},
	{
		.procname	= "mtu_expires",
		.data		= &ip_rt_mtu_expires,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2595
		.proc_handler	= proc_dointvec_jiffies,
Linus Torvalds's avatar
Linus Torvalds committed
2596 2597 2598 2599 2600 2601
	},
	{
		.procname	= "min_pmtu",
		.data		= &ip_rt_min_pmtu,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2602
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2603 2604 2605 2606 2607 2608
	},
	{
		.procname	= "min_adv_mss",
		.data		= &ip_rt_min_advmss,
		.maxlen		= sizeof(int),
		.mode		= 0644,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2609
		.proc_handler	= proc_dointvec,
Linus Torvalds's avatar
Linus Torvalds committed
2610
	},
2611
	{ }
Linus Torvalds's avatar
Linus Torvalds committed
2612
};
2613 2614 2615 2616 2617 2618

static struct ctl_table ipv4_route_flush_table[] = {
	{
		.procname	= "flush",
		.maxlen		= sizeof(int),
		.mode		= 0200,
Alexey Dobriyan's avatar
Alexey Dobriyan committed
2619
		.proc_handler	= ipv4_sysctl_rtcache_flush,
2620
	},
2621
	{ },
2622 2623 2624 2625 2626 2627 2628
};

static __net_init int sysctl_route_net_init(struct net *net)
{
	struct ctl_table *tbl;

	tbl = ipv4_route_flush_table;
2629
	if (!net_eq(net, &init_net)) {
2630 2631 2632
		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
		if (tbl == NULL)
			goto err_dup;
2633 2634 2635 2636

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			tbl[0].procname = NULL;
2637 2638 2639
	}
	tbl[0].extra1 = net;

2640
	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
	if (net->ipv4.route_hdr == NULL)
		goto err_reg;
	return 0;

err_reg:
	if (tbl != ipv4_route_flush_table)
		kfree(tbl);
err_dup:
	return -ENOMEM;
}

static __net_exit void sysctl_route_net_exit(struct net *net)
{
	struct ctl_table *tbl;

	tbl = net->ipv4.route_hdr->ctl_table_arg;
	unregister_net_sysctl_table(net->ipv4.route_hdr);
	BUG_ON(tbl == ipv4_route_flush_table);
	kfree(tbl);
}

static __net_initdata struct pernet_operations sysctl_route_ops = {
	.init = sysctl_route_net_init,
	.exit = sysctl_route_net_exit,
};
Linus Torvalds's avatar
Linus Torvalds committed
2666 2667
#endif

2668
static __net_init int rt_genid_init(struct net *net)
2669
{
fan.du's avatar
fan.du committed
2670
	atomic_set(&net->ipv4.rt_genid, 0);
2671
	atomic_set(&net->fnhe_genid, 0);
2672 2673
	get_random_bytes(&net->ipv4.dev_addr_genid,
			 sizeof(net->ipv4.dev_addr_genid));
2674 2675 2676
	return 0;
}

2677 2678
static __net_initdata struct pernet_operations rt_genid_ops = {
	.init = rt_genid_init,
2679 2680
};

2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
static int __net_init ipv4_inetpeer_init(struct net *net)
{
	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);

	if (!bp)
		return -ENOMEM;
	inet_peer_base_init(bp);
	net->ipv4.peers = bp;
	return 0;
}

static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
	struct inet_peer_base *bp = net->ipv4.peers;

	net->ipv4.peers = NULL;
2697
	inetpeer_invalidate_tree(bp);
2698 2699 2700 2701 2702 2703 2704
	kfree(bp);
}

static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
	.init	=	ipv4_inetpeer_init,
	.exit	=	ipv4_inetpeer_exit,
};
2705

2706
#ifdef CONFIG_IP_ROUTE_CLASSID
2707
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2708
#endif /* CONFIG_IP_ROUTE_CLASSID */
Linus Torvalds's avatar
Linus Torvalds committed
2709 2710 2711

int __init ip_rt_init(void)
{
2712
	int rc = 0;
Linus Torvalds's avatar
Linus Torvalds committed
2713

2714
#ifdef CONFIG_IP_ROUTE_CLASSID
2715
	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
Linus Torvalds's avatar
Linus Torvalds committed
2716 2717 2718 2719
	if (!ip_rt_acct)
		panic("IP: failed to allocate ip_rt_acct\n");
#endif

Alexey Dobriyan's avatar
Alexey Dobriyan committed
2720 2721
	ipv4_dst_ops.kmem_cachep =
		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2722
				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
2723

2724 2725
	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;

2726 2727 2728 2729 2730 2731
	if (dst_entries_init(&ipv4_dst_ops) < 0)
		panic("IP: failed to allocate ipv4_dst_ops counter\n");

	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");

David S. Miller's avatar
David S. Miller committed
2732 2733
	ipv4_dst_ops.gc_thresh = ~0;
	ip_rt_max_size = INT_MAX;
Linus Torvalds's avatar
Linus Torvalds committed
2734 2735 2736 2737

	devinet_init();
	ip_fib_init();

2738
	if (ip_rt_proc_init())
2739
		pr_err("Unable to create route proc files\n");
Linus Torvalds's avatar
Linus Torvalds committed
2740 2741
#ifdef CONFIG_XFRM
	xfrm_init();
2742
	xfrm4_init();
Linus Torvalds's avatar
Linus Torvalds committed
2743
#endif
2744
	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2745

2746 2747 2748
#ifdef CONFIG_SYSCTL
	register_pernet_subsys(&sysctl_route_ops);
#endif
2749
	register_pernet_subsys(&rt_genid_ops);
2750
	register_pernet_subsys(&ipv4_inetpeer_ops);
Linus Torvalds's avatar
Linus Torvalds committed
2751 2752 2753
	return rc;
}

2754
#ifdef CONFIG_SYSCTL
2755 2756 2757 2758 2759 2760
/*
 * We really need to sanitize the damn ipv4 init order, then all
 * this nonsense will go away.
 */
void __init ip_static_sysctl_init(void)
{
2761
	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
2762
}
2763
#endif