ip_vti.c 16.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 *	Linux NET3: IP/IP protocol decoder modified to support
 *		    virtual tunnel interface
 *
 *	Authors:
 *		Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
 */

/*
   This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c

   For comments look at net/ipv4/ip_gre.c --ANK
 */


#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/netfilter_ipv4.h>
#include <linux/if_ether.h>
31
#include <linux/icmpv6.h>
32 33 34 35

#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
36
#include <net/ip_tunnels.h>
37 38 39 40 41 42 43
#include <net/inet_ecn.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>

static struct rtnl_link_ops vti_link_ops __read_mostly;

44
static unsigned int vti_net_id __read_mostly;
45 46
static int vti_tunnel_init(struct net_device *dev);

47
static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
48
		     int encap_type, bool update_skb_dev)
49 50 51
{
	struct ip_tunnel *tunnel;
	const struct iphdr *iph = ip_hdr(skb);
52 53
	struct net *net = dev_net(skb->dev);
	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
54

55 56
	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
				  iph->saddr, iph->daddr, 0);
57
	if (tunnel) {
58 59 60 61 62
		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
			goto drop;

		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;

63 64 65
		if (update_skb_dev)
			skb->dev = tunnel->dev;

66 67 68 69 70 71 72 73 74
		return xfrm_input(skb, nexthdr, spi, encap_type);
	}

	return -EINVAL;
drop:
	kfree_skb(skb);
	return 0;
}

75 76
static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
			   int encap_type)
77
{
78
	return vti_input(skb, nexthdr, spi, encap_type, false);
79 80
}

81
static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
82 83 84 85
{
	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);

86
	return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
87 88
}

89
static int vti_rcv_proto(struct sk_buff *skb)
90
{
91 92
	return vti_rcv(skb, 0, false);
}
93

94 95 96 97 98 99
static int vti_rcv_cb(struct sk_buff *skb, int err)
{
	unsigned short family;
	struct net_device *dev;
	struct pcpu_sw_netstats *tstats;
	struct xfrm_state *x;
100
	const struct xfrm_mode *inner_mode;
101
	struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
102 103
	u32 orig_mark = skb->mark;
	int ret;
104 105

	if (!tunnel)
106
		return 1;
107 108 109 110 111 112 113 114

	dev = tunnel->dev;

	if (err) {
		dev->stats.rx_errors++;
		dev->stats.rx_dropped++;

		return 0;
115 116
	}

117
	x = xfrm_input_state(skb);
118

119
	inner_mode = &x->inner_mode;
120 121 122 123 124 125 126 127 128 129

	if (x->sel.family == AF_UNSPEC) {
		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
		if (inner_mode == NULL) {
			XFRM_INC_STATS(dev_net(skb->dev),
				       LINUX_MIB_XFRMINSTATEMODEERROR);
			return -EINVAL;
		}
	}

130
	family = inner_mode->family;
131

132 133 134 135 136
	skb->mark = be32_to_cpu(tunnel->parms.i_key);
	ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
	skb->mark = orig_mark;

	if (!ret)
137 138 139 140 141 142 143 144 145 146 147 148 149
		return -EPERM;

	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
	skb->dev = dev;

	tstats = this_cpu_ptr(dev->tstats);

	u64_stats_update_begin(&tstats->syncp);
	tstats->rx_packets++;
	tstats->rx_bytes += skb->len;
	u64_stats_update_end(&tstats->syncp);

	return 0;
150 151
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src)
{
	xfrm_address_t *daddr = (xfrm_address_t *)&dst;
	xfrm_address_t *saddr = (xfrm_address_t *)&src;

	/* if there is no transform then this tunnel is not functional.
	 * Or if the xfrm is not mode tunnel.
	 */
	if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
	    x->props.family != AF_INET)
		return false;

	if (!dst)
		return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET);

	if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET))
		return false;

	return true;
}

173 174
static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
			    struct flowi *fl)
175 176
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
177
	struct ip_tunnel_parm *parms = &tunnel->parms;
178
	struct dst_entry *dst = skb_dst(skb);
179
	struct net_device *tdev;	/* Device to other host */
180
	int pkt_len = skb->len;
181
	int err;
182
	int mtu;
183

184
	if (!dst) {
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
		switch (skb->protocol) {
		case htons(ETH_P_IP): {
			struct rtable *rt;

			fl->u.ip4.flowi4_oif = dev->ifindex;
			fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
			rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
			if (IS_ERR(rt)) {
				dev->stats.tx_carrier_errors++;
				goto tx_error_icmp;
			}
			dst = &rt->dst;
			skb_dst_set(skb, dst);
			break;
		}
#if IS_ENABLED(CONFIG_IPV6)
		case htons(ETH_P_IPV6):
			fl->u.ip6.flowi6_oif = dev->ifindex;
			fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
			dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
			if (dst->error) {
				dst_release(dst);
				dst = NULL;
				dev->stats.tx_carrier_errors++;
				goto tx_error_icmp;
			}
			skb_dst_set(skb, dst);
			break;
#endif
		default:
215 216 217
			dev->stats.tx_carrier_errors++;
			goto tx_error_icmp;
		}
218
	}
219

220
	dst_hold(dst);
221
	dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0);
222
	if (IS_ERR(dst)) {
223 224 225
		dev->stats.tx_carrier_errors++;
		goto tx_error_icmp;
	}
226

227
	if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
228
		dev->stats.tx_carrier_errors++;
229
		dst_release(dst);
230 231
		goto tx_error_icmp;
	}
232

233
	tdev = dst->dev;
234 235

	if (tdev == dev) {
236
		dst_release(dst);
237 238 239 240
		dev->stats.collisions++;
		goto tx_error;
	}

241 242
	mtu = dst_mtu(dst);
	if (skb->len > mtu) {
243
		skb_dst_update_pmtu_no_confirm(skb, mtu);
244 245 246 247 248 249 250 251 252 253 254 255 256 257
		if (skb->protocol == htons(ETH_P_IP)) {
			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
				  htonl(mtu));
		} else {
			if (mtu < IPV6_MIN_MTU)
				mtu = IPV6_MIN_MTU;

			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
		}

		dst_release(dst);
		goto tx_error;
	}

258
	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
259
	skb_dst_set(skb, dst);
260 261
	skb->dev = skb_dst(skb)->dev;

262
	err = dst_output(tunnel->net, skb->sk, skb);
263
	if (net_xmit_eval(err) == 0)
264
		err = pkt_len;
265
	iptunnel_xmit_stats(dev, err);
266 267 268 269 270 271
	return NETDEV_TX_OK;

tx_error_icmp:
	dst_link_failure(skb);
tx_error:
	dev->stats.tx_errors++;
272
	kfree_skb(skb);
273 274 275
	return NETDEV_TX_OK;
}

276 277 278 279 280 281 282 283
/* This function assumes it is being called from dev_queue_xmit()
 * and that skb is filled properly by that function.
 */
static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct flowi fl;

284 285 286
	if (!pskb_inet_may_pull(skb))
		goto tx_err;

287 288 289 290 291 292 293 294 295 296 297 298
	memset(&fl, 0, sizeof(fl));

	switch (skb->protocol) {
	case htons(ETH_P_IP):
		xfrm_decode_session(skb, &fl, AF_INET);
		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
		break;
	case htons(ETH_P_IPV6):
		xfrm_decode_session(skb, &fl, AF_INET6);
		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
		break;
	default:
299
		goto tx_err;
300 301
	}

302 303 304
	/* override mark with tunnel output key */
	fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);

305
	return vti_xmit(skb, dev, &fl);
306 307 308 309 310

tx_err:
	dev->stats.tx_errors++;
	kfree_skb(skb);
	return NETDEV_TX_OK;
311 312
}

313 314 315
static int vti4_err(struct sk_buff *skb, u32 info)
{
	__be32 spi;
316
	__u32 mark;
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	struct xfrm_state *x;
	struct ip_tunnel *tunnel;
	struct ip_esp_hdr *esph;
	struct ip_auth_hdr *ah ;
	struct ip_comp_hdr *ipch;
	struct net *net = dev_net(skb->dev);
	const struct iphdr *iph = (const struct iphdr *)skb->data;
	int protocol = iph->protocol;
	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);

	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
				  iph->daddr, iph->saddr, 0);
	if (!tunnel)
		return -1;

332 333
	mark = be32_to_cpu(tunnel->parms.o_key);

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	switch (protocol) {
	case IPPROTO_ESP:
		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
		spi = esph->spi;
		break;
	case IPPROTO_AH:
		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
		spi = ah->spi;
		break;
	case IPPROTO_COMP:
		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
		spi = htonl(ntohs(ipch->cpi));
		break;
	default:
		return 0;
	}

	switch (icmp_hdr(skb)->type) {
	case ICMP_DEST_UNREACH:
		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
			return 0;
	case ICMP_REDIRECT:
		break;
	default:
		return 0;
	}

361
	x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
362 363 364 365 366
			      spi, protocol, AF_INET);
	if (!x)
		return 0;

	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
367
		ipv4_update_pmtu(skb, net, info, 0, protocol);
368
	else
369
		ipv4_redirect(skb, net, 0, protocol);
370 371 372 373 374
	xfrm_state_put(x);

	return 0;
}

375
static int
376
vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
377 378 379
{
	int err = 0;

380
	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
381 382
		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_IPIP ||
		    p->iph.ihl != 5)
383 384
			return -EINVAL;
	}
385

386 387 388 389
	if (!(p->i_flags & GRE_KEY))
		p->i_key = 0;
	if (!(p->o_flags & GRE_KEY))
		p->o_key = 0;
390

391
	p->i_flags = VTI_ISVTI;
392

393
	err = ip_tunnel_ctl(dev, p, cmd);
394 395
	if (err)
		return err;
396

397
	if (cmd != SIOCDELTUNNEL) {
398 399
		p->i_flags |= GRE_KEY;
		p->o_flags |= GRE_KEY;
400 401 402 403 404 405
	}
	return 0;
}

static const struct net_device_ops vti_netdev_ops = {
	.ndo_init	= vti_tunnel_init,
406
	.ndo_uninit	= ip_tunnel_uninit,
407
	.ndo_start_xmit	= vti_tunnel_xmit,
408
	.ndo_do_ioctl	= ip_tunnel_ioctl,
409
	.ndo_change_mtu	= ip_tunnel_change_mtu,
410
	.ndo_get_stats64 = ip_tunnel_get_stats64,
411
	.ndo_get_iflink = ip_tunnel_get_iflink,
412
	.ndo_tunnel_ctl	= vti_tunnel_ctl,
413 414
};

415
static void vti_tunnel_setup(struct net_device *dev)
416
{
417
	dev->netdev_ops		= &vti_netdev_ops;
418
	dev->type		= ARPHRD_TUNNEL;
419
	ip_tunnel_setup(dev, vti_net_id);
420 421
}

422
static int vti_tunnel_init(struct net_device *dev)
423
{
424 425 426 427 428
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct iphdr *iph = &tunnel->parms.iph;

	memcpy(dev->dev_addr, &iph->saddr, 4);
	memcpy(dev->broadcast, &iph->daddr, 4);
429 430 431 432

	dev->flags		= IFF_NOARP;
	dev->addr_len		= 4;
	dev->features		|= NETIF_F_LLTX;
433
	netif_keep_dst(dev);
434

435
	return ip_tunnel_init(dev);
436 437
}

438
static void __net_init vti_fb_tunnel_init(struct net_device *dev)
439 440 441 442 443 444 445 446 447
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct iphdr *iph = &tunnel->parms.iph;

	iph->version		= 4;
	iph->protocol		= IPPROTO_IPIP;
	iph->ihl		= 5;
}

448
static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
449 450
	.handler	=	vti_rcv_proto,
	.input_handler	=	vti_input_proto,
451 452 453 454 455 456
	.cb_handler	=	vti_rcv_cb,
	.err_handler	=	vti4_err,
	.priority	=	100,
};

static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
457 458
	.handler	=	vti_rcv_proto,
	.input_handler	=	vti_input_proto,
459 460 461 462 463 464
	.cb_handler	=	vti_rcv_cb,
	.err_handler	=	vti4_err,
	.priority	=	100,
};

static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
465 466
	.handler	=	vti_rcv_proto,
	.input_handler	=	vti_input_proto,
467 468 469
	.cb_handler	=	vti_rcv_cb,
	.err_handler	=	vti4_err,
	.priority	=	100,
470 471
};

472 473 474 475 476 477 478 479 480 481
#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
static int vti_rcv_tunnel(struct sk_buff *skb)
{
	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);

	return vti_input(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr, 0, false);
}

static struct xfrm_tunnel vti_ipip_handler __read_mostly = {
482
	.handler	=	vti_rcv_tunnel,
483
	.cb_handler	=	vti_rcv_cb,
484 485 486
	.err_handler	=	vti4_err,
	.priority	=	0,
};
487
#endif
488

489 490 491
static int __net_init vti_init_net(struct net *net)
{
	int err;
492
	struct ip_tunnel_net *itn;
493

494
	err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
495
	if (err)
496 497
		return err;
	itn = net_generic(net, vti_net_id);
498 499
	if (itn->fb_tunnel_dev)
		vti_fb_tunnel_init(itn->fb_tunnel_dev);
500 501 502
	return 0;
}

503
static void __net_exit vti_exit_batch_net(struct list_head *list_net)
504
{
505
	ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
506 507 508 509
}

static struct pernet_operations vti_net_ops = {
	.init = vti_init_net,
510
	.exit_batch = vti_exit_batch_net,
511
	.id   = &vti_net_id,
512
	.size = sizeof(struct ip_tunnel_net),
513 514
};

515 516
static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
			       struct netlink_ext_ack *extack)
517 518 519 520 521
{
	return 0;
}

static void vti_netlink_parms(struct nlattr *data[],
522 523
			      struct ip_tunnel_parm *parms,
			      __u32 *fwmark)
524 525 526 527 528 529 530 531
{
	memset(parms, 0, sizeof(*parms));

	parms->iph.protocol = IPPROTO_IPIP;

	if (!data)
		return;

532 533
	parms->i_flags = VTI_ISVTI;

534 535 536 537 538 539 540 541 542 543
	if (data[IFLA_VTI_LINK])
		parms->link = nla_get_u32(data[IFLA_VTI_LINK]);

	if (data[IFLA_VTI_IKEY])
		parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);

	if (data[IFLA_VTI_OKEY])
		parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);

	if (data[IFLA_VTI_LOCAL])
544
		parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]);
545 546

	if (data[IFLA_VTI_REMOTE])
547
		parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
548

549 550
	if (data[IFLA_VTI_FWMARK])
		*fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]);
551 552 553
}

static int vti_newlink(struct net *src_net, struct net_device *dev,
554 555
		       struct nlattr *tb[], struct nlattr *data[],
		       struct netlink_ext_ack *extack)
556
{
557
	struct ip_tunnel_parm parms;
558
	__u32 fwmark = 0;
559

560 561
	vti_netlink_parms(data, &parms, &fwmark);
	return ip_tunnel_newlink(dev, tb, &parms, fwmark);
562 563 564
}

static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
565 566
			  struct nlattr *data[],
			  struct netlink_ext_ack *extack)
567
{
568 569
	struct ip_tunnel *t = netdev_priv(dev);
	__u32 fwmark = t->fwmark;
570 571
	struct ip_tunnel_parm p;

572 573
	vti_netlink_parms(data, &p, &fwmark);
	return ip_tunnel_changelink(dev, tb, &p, fwmark);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
}

static size_t vti_get_size(const struct net_device *dev)
{
	return
		/* IFLA_VTI_LINK */
		nla_total_size(4) +
		/* IFLA_VTI_IKEY */
		nla_total_size(4) +
		/* IFLA_VTI_OKEY */
		nla_total_size(4) +
		/* IFLA_VTI_LOCAL */
		nla_total_size(4) +
		/* IFLA_VTI_REMOTE */
		nla_total_size(4) +
589 590
		/* IFLA_VTI_FWMARK */
		nla_total_size(4) +
591 592 593 594 595 596 597 598
		0;
}

static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
	struct ip_tunnel *t = netdev_priv(dev);
	struct ip_tunnel_parm *p = &t->parms;

599 600 601 602 603 604 605
	if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
	    nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
	    nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
	    nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
	    nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
	    nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
		return -EMSGSIZE;
606 607 608 609 610 611 612 613

	return 0;
}

static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
	[IFLA_VTI_LINK]		= { .type = NLA_U32 },
	[IFLA_VTI_IKEY]		= { .type = NLA_U32 },
	[IFLA_VTI_OKEY]		= { .type = NLA_U32 },
614 615
	[IFLA_VTI_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
	[IFLA_VTI_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
616
	[IFLA_VTI_FWMARK]	= { .type = NLA_U32 },
617 618 619 620 621 622 623 624 625 626 627
};

static struct rtnl_link_ops vti_link_ops __read_mostly = {
	.kind		= "vti",
	.maxtype	= IFLA_VTI_MAX,
	.policy		= vti_policy,
	.priv_size	= sizeof(struct ip_tunnel),
	.setup		= vti_tunnel_setup,
	.validate	= vti_tunnel_validate,
	.newlink	= vti_newlink,
	.changelink	= vti_changelink,
628
	.dellink        = ip_tunnel_dellink,
629 630
	.get_size	= vti_get_size,
	.fill_info	= vti_fill_info,
631
	.get_link_net	= ip_tunnel_get_link_net,
632 633 634 635
};

static int __init vti_init(void)
{
636
	const char *msg;
637 638
	int err;

639
	pr_info("IPv4 over IPsec tunneling driver\n");
640

641
	msg = "tunnel device";
642 643
	err = register_pernet_device(&vti_net_ops);
	if (err < 0)
644
		goto pernet_dev_failed;
645

646 647 648 649
	msg = "tunnel protocols";
	err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
	if (err < 0)
		goto xfrm_proto_esp_failed;
650
	err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
651 652
	if (err < 0)
		goto xfrm_proto_ah_failed;
653
	err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
654 655
	if (err < 0)
		goto xfrm_proto_comp_failed;
656

657
#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
658
	msg = "ipip tunnel";
659
	err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET);
660
	if (err < 0)
661 662 663 664 665 666
		goto xfrm_tunnel_ipip_failed;
#if IS_ENABLED(CONFIG_IPV6)
	err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET6);
	if (err < 0)
		goto xfrm_tunnel_ipip6_failed;
#endif
667
#endif
668

669
	msg = "netlink interface";
670 671 672 673 674 675 676
	err = rtnl_link_register(&vti_link_ops);
	if (err < 0)
		goto rtnl_link_failed;

	return err;

rtnl_link_failed:
677
#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
678 679 680 681
#if IS_ENABLED(CONFIG_IPV6)
	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET6);
xfrm_tunnel_ipip6_failed:
#endif
682
	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET);
683
xfrm_tunnel_ipip_failed:
684
#endif
685
	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
686
xfrm_proto_comp_failed:
687
	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
688
xfrm_proto_ah_failed:
689
	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
690
xfrm_proto_esp_failed:
691
	unregister_pernet_device(&vti_net_ops);
692 693
pernet_dev_failed:
	pr_err("vti init: failed to register %s\n", msg);
694 695 696 697 698 699
	return err;
}

static void __exit vti_fini(void)
{
	rtnl_link_unregister(&vti_link_ops);
700
#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)
701 702 703
#if IS_ENABLED(CONFIG_IPV6)
	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET6);
#endif
704 705
	xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET);
#endif
706 707 708
	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
709 710 711 712 713 714 715 716
	unregister_pernet_device(&vti_net_ops);
}

module_init(vti_init);
module_exit(vti_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("vti");
MODULE_ALIAS_NETDEV("ip_vti0");