94 #include <linux/capability.h>
95 #include <linux/module.h>
96 #include <linux/types.h>
97 #include <linux/kernel.h>
98 #include <linux/slab.h>
99 #include <asm/uaccess.h>
101 #include <linux/netdevice.h>
102 #include <linux/in.h>
103 #include <linux/tcp.h>
104 #include <linux/udp.h>
105 #include <linux/if_arp.h>
106 #include <linux/mroute.h>
108 #include <linux/netfilter_ipv4.h>
109 #include <linux/if_ether.h>
121 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
123 static bool log_ecn_error =
true;
146 #define for_each_ip_tunnel_rcu(start) \
147 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
169 start = u64_stats_fetch_begin_bh(&tstats->
syncp);
174 }
while (u64_stats_fetch_retry_bh(&tstats->
syncp, start));
195 unsigned int h0 =
HASH(remote);
196 unsigned int h1 =
HASH(local);
201 if (local == t->
parms.iph.saddr &&
222 __be32 remote = parms->iph.daddr;
223 __be32 local = parms->iph.saddr;
235 return &ipn->tunnels[
prio][
h];
241 return __ipip_bucket(ipn, &t->
parms);
249 for (tp = ipip_bucket(ipn, t);
267 static struct ip_tunnel *ipip_tunnel_locate(
struct net *net,
278 for (tp = __ipip_bucket(ipn, parms);
281 if (local == t->
parms.iph.saddr && remote == t->
parms.iph.daddr)
292 dev =
alloc_netdev(
sizeof(*t), name, ipip_tunnel_setup);
296 dev_net_set(dev, net);
298 nt = netdev_priv(dev);
301 if (ipip_tunnel_init(dev) < 0)
310 ipip_tunnel_link(ipn, nt);
319 static void ipip_tunnel_uninit(
struct net_device *dev)
321 struct net *net = dev_net(dev);
327 ipip_tunnel_unlink(ipn, netdev_priv(dev));
339 const int type = icmp_hdr(skb)->type;
340 const int code = icmp_hdr(skb)->code;
372 t = ipip_tunnel_lookup(dev_net(skb->
dev), iph->
daddr, iph->
saddr);
390 if (t->
parms.iph.daddr == 0)
407 static int ipip_rcv(
struct sk_buff *skb)
410 const struct iphdr *iph = ip_hdr(skb);
413 tunnel = ipip_tunnel_lookup(dev_net(skb->
dev), iph->
saddr, iph->
daddr);
414 if (tunnel != NULL) {
423 skb_reset_network_header(skb);
427 __skb_tunnel_rx(skb, tunnel->
dev);
429 err = IP_ECN_decapsulate(iph, skb);
435 ++tunnel->
dev->stats.rx_frame_errors;
436 ++tunnel->
dev->stats.rx_errors;
442 u64_stats_update_begin(&tstats->
syncp);
445 u64_stats_update_end(&tstats->
syncp);
465 struct ip_tunnel *tunnel = netdev_priv(dev);
467 const struct iphdr *tiph = &tunnel->
parms.iph;
472 const struct iphdr *old_iph = ip_hdr(skb);
474 unsigned int max_headroom;
487 if ((rt = skb_rtable(skb)) == NULL) {
488 dev->
stats.tx_fifo_errors++;
491 dst = rt_nexthop(rt, old_iph->
daddr);
494 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
500 dev->
stats.tx_carrier_errors++;
507 dev->
stats.collisions++;
514 mtu = dst_mtu(&rt->
dst) -
sizeof(
struct iphdr);
517 dev->
stats.collisions++;
523 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
538 dst_link_failure(skb);
548 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
549 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
553 dev->
stats.tx_dropped++;
558 skb_set_owner_w(new_skb, skb->
sk);
561 old_iph = ip_hdr(skb);
566 skb_reset_network_header(skb);
571 skb_dst_set(skb, &rt->
dst);
579 iph->ihl =
sizeof(
struct iphdr)>>2;
582 iph->
tos = INET_ECN_encapsulate(tos, old_iph->
tos);
583 iph->
daddr = fl4.daddr;
584 iph->
saddr = fl4.saddr;
586 if ((iph->
ttl = tiph->
ttl) == 0)
595 dst_link_failure(skb);
597 dev->
stats.tx_errors++;
602 static void ipip_tunnel_bind_dev(
struct net_device *dev)
606 const struct iphdr *iph;
608 tunnel = netdev_priv(dev);
609 iph = &tunnel->
parms.iph;
615 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
628 if (!tdev && tunnel->
parms.link)
644 struct net *net = dev_net(dev);
655 t = ipip_tunnel_locate(net, &
p, 0);
658 t = netdev_priv(dev);
695 t = netdev_priv(dev);
696 ipip_tunnel_unlink(ipn, t);
698 t->
parms.iph.saddr =
p.iph.saddr;
699 t->
parms.iph.daddr =
p.iph.daddr;
702 ipip_tunnel_link(ipn, t);
710 t->
parms.iph.ttl =
p.iph.ttl;
711 t->
parms.iph.tos =
p.iph.tos;
712 t->
parms.iph.frag_off =
p.iph.frag_off;
713 if (t->
parms.link !=
p.link) {
715 ipip_tunnel_bind_dev(dev);
735 if ((t = ipip_tunnel_locate(net, &
p, 0)) ==
NULL)
742 unregister_netdevice(dev);
754 static int ipip_tunnel_change_mtu(
struct net_device *dev,
int new_mtu)
756 if (new_mtu < 68 || new_mtu > 0xFFF8 -
sizeof(
struct iphdr))
763 .ndo_uninit = ipip_tunnel_uninit,
764 .ndo_start_xmit = ipip_tunnel_xmit,
765 .ndo_do_ioctl = ipip_tunnel_ioctl,
766 .ndo_change_mtu = ipip_tunnel_change_mtu,
767 .ndo_get_stats64 = ipip_get_stats64,
770 static void ipip_dev_free(
struct net_device *dev)
776 static void ipip_tunnel_setup(
struct net_device *dev)
792 static int ipip_tunnel_init(
struct net_device *dev)
794 struct ip_tunnel *tunnel = netdev_priv(dev);
801 ipip_tunnel_bind_dev(dev);
812 struct ip_tunnel *tunnel = netdev_priv(dev);
834 .err_handler = ipip_err,
839 KERN_INFO "IPv4 over IPv4 tunneling driver\n";
845 for (prio = 1; prio < 4; prio++) {
859 static int __net_init ipip_init_net(
struct net *net)
898 static void __net_exit ipip_exit_net(
struct net *net)
904 ipip_destroy_tunnels(ipn, &
list);
911 .init = ipip_init_net,
912 .exit = ipip_exit_net,
917 static int __init ipip_init(
void)
929 pr_info(
"%s: can't register tunnel\n", __func__);
934 static void __exit ipip_fini(
void)
937 pr_info(
"%s: can't deregister tunnel\n", __func__);