22 #include <linux/capability.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
35 #include <linux/netfilter_ipv4.h>
36 #include <linux/if_ether.h>
48 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
71 #define for_each_ip_tunnel_rcu(start) \
72 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
83 #define VTI_XMIT(stats1, stats2) do { \
85 int pkt_len = skb->len; \
86 err = dst_output(skb); \
87 if (net_xmit_eval(err) == 0) { \
88 u64_stats_update_begin(&(stats1)->syncp); \
89 (stats1)->tx_bytes += pkt_len; \
90 (stats1)->tx_packets++; \
91 u64_stats_update_end(&(stats1)->syncp); \
93 (stats2)->tx_errors++; \
94 (stats2)->tx_aborted_errors++; \
110 start = u64_stats_fetch_begin_bh(&tstats->
syncp);
115 }
while (u64_stats_fetch_retry_bh(&tstats->
syncp, start));
140 unsigned h0 =
HASH(remote);
141 unsigned h1 =
HASH(local);
146 if (local == t->
parms.iph.saddr &&
166 __be32 remote = parms->iph.daddr;
167 __be32 local = parms->iph.saddr;
179 return &ipn->tunnels[
prio][
h];
185 return __vti_bucket(ipn, &t->
parms);
193 for (tp = vti_bucket(ipn, t);
211 static struct ip_tunnel *vti_tunnel_locate(
struct net *net,
223 for (tp = __vti_bucket(ipn, parms);
226 if (local == t->
parms.iph.saddr && remote == t->
parms.iph.daddr)
241 dev_net_set(dev, net);
243 nt = netdev_priv(dev);
247 vti_tunnel_bind_dev(dev);
253 vti_tunnel_link(ipn, nt);
261 static void vti_tunnel_uninit(
struct net_device *dev)
263 struct net *net = dev_net(dev);
266 vti_tunnel_unlink(ipn, netdev_priv(dev));
278 const int type = icmp_hdr(skb)->type;
279 const int code = icmp_hdr(skb)->code;
307 t = vti_tunnel_lookup(dev_net(skb->
dev), iph->
daddr, iph->
saddr);
332 static int vti_rcv(
struct sk_buff *skb)
335 const struct iphdr *iph = ip_hdr(skb);
337 tunnel = vti_tunnel_lookup(dev_net(skb->
dev), iph->
saddr, iph->
daddr);
338 if (tunnel != NULL) {
345 u64_stats_update_begin(&tstats->
syncp);
348 u64_stats_update_end(&tstats->
syncp);
365 struct ip_tunnel *tunnel = netdev_priv(dev);
371 struct iphdr *old_iph = ip_hdr(skb);
380 memset(&fl4, 0,
sizeof(fl4));
381 flowi4_init_output(&fl4, tunnel->
parms.link,
385 dst, tiph->
saddr, 0, 0);
386 rt = ip_route_output_key(dev_net(dev), &fl4);
388 dev->
stats.tx_carrier_errors++;
396 dev->
stats.tx_carrier_errors++;
403 dev->
stats.collisions++;
411 dst_link_failure(skb);
419 skb_dst_set(skb, &rt->
dst);
421 skb->
dev = skb_dst(skb)->dev;
428 dst_link_failure(skb);
430 dev->
stats.tx_errors++;
435 static int vti_tunnel_bind_dev(
struct net_device *dev)
441 tunnel = netdev_priv(dev);
442 iph = &tunnel->
parms.iph;
447 memset(&fl4, 0,
sizeof(fl4));
448 flowi4_init_output(&fl4, tunnel->
parms.link,
453 rt = ip_route_output_key(dev_net(dev), &fl4);
461 if (!tdev && tunnel->
parms.link)
466 sizeof(
struct iphdr);
479 struct net *net = dev_net(dev);
491 t = vti_tunnel_locate(net, &
p, 0);
494 t = netdev_priv(dev);
533 t = netdev_priv(dev);
534 vti_tunnel_unlink(ipn, t);
536 t->
parms.iph.saddr =
p.iph.saddr;
537 t->
parms.iph.daddr =
p.iph.daddr;
543 vti_tunnel_link(ipn, t);
553 if (t->
parms.link !=
p.link) {
555 vti_tunnel_bind_dev(dev);
580 t = vti_tunnel_locate(net, &
p, 0);
588 unregister_netdevice(dev);
600 static int vti_tunnel_change_mtu(
struct net_device *dev,
int new_mtu)
602 if (new_mtu < 68 || new_mtu > 0xFFF8)
609 .ndo_init = vti_tunnel_init,
610 .ndo_uninit = vti_tunnel_uninit,
611 .ndo_start_xmit = vti_tunnel_xmit,
612 .ndo_do_ioctl = vti_tunnel_ioctl,
613 .ndo_change_mtu = vti_tunnel_change_mtu,
614 .ndo_get_stats64 = vti_get_stats64,
617 static void vti_dev_free(
struct net_device *dev)
623 static void vti_tunnel_setup(
struct net_device *dev)
639 static int vti_tunnel_init(
struct net_device *dev)
641 struct ip_tunnel *tunnel = netdev_priv(dev);
658 struct ip_tunnel *tunnel = netdev_priv(dev);
680 .err_handler = vti_err,
688 for (prio = 1; prio < 4; prio++) {
702 static int __net_init vti_init_net(
struct net *net)
738 static void __net_exit vti_exit_net(
struct net *net)
744 vti_destroy_tunnels(ipn, &
list);
750 .init = vti_init_net,
751 .exit = vti_exit_net,
753 .size =
sizeof(
struct vti_net),
761 static void vti_netlink_parms(
struct nlattr *
data[],
764 memset(parms, 0,
sizeof(*parms));
772 parms->
link = nla_get_u32(data[IFLA_VTI_LINK]);
775 parms->
i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
778 parms->
o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
781 parms->
iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
784 parms->
iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
788 static int vti_newlink(
struct net *src_net,
struct net_device *dev,
792 struct net *net = dev_net(dev);
797 nt = netdev_priv(dev);
798 vti_netlink_parms(data, &nt->
parms);
800 if (vti_tunnel_locate(net, &nt->
parms, 0))
803 mtu = vti_tunnel_bind_dev(dev);
812 vti_tunnel_link(ipn, nt);
822 struct net *net = dev_net(dev);
830 nt = netdev_priv(dev);
831 vti_netlink_parms(data, &
p);
833 t = vti_tunnel_locate(net, &
p, 0);
841 vti_tunnel_unlink(ipn, t);
842 t->
parms.iph.saddr =
p.iph.saddr;
843 t->
parms.iph.daddr =
p.iph.daddr;
850 vti_tunnel_link(ipn, t);
854 if (t->
parms.link !=
p.link) {
856 mtu = vti_tunnel_bind_dev(dev);
865 static size_t vti_get_size(
const struct net_device *dev)
886 nla_put_u32(skb, IFLA_VTI_LINK, p->
link);
887 nla_put_be32(skb, IFLA_VTI_IKEY, p->
i_key);
888 nla_put_be32(skb, IFLA_VTI_OKEY, p->
o_key);
889 nla_put_be32(skb, IFLA_VTI_LOCAL, p->
iph.saddr);
906 .policy = vti_policy,
908 .
setup = vti_tunnel_setup,
909 .validate = vti_tunnel_validate,
910 .newlink = vti_newlink,
911 .changelink = vti_changelink,
912 .get_size = vti_get_size,
913 .fill_info = vti_fill_info,
916 static int __init vti_init(
void)
920 pr_info(
"IPv4 over IPSec tunneling driver\n");
933 goto rtnl_link_failed;
943 static void __exit vti_fini(
void)
947 pr_info(
"vti close: can't deregister tunnel\n");