32 #define pr_fmt(fmt) "IPv6: " fmt
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
41 #include <linux/net.h>
44 #include <linux/netfilter.h>
45 #include <linux/slab.h>
48 #include <linux/sysctl.h>
52 #include <linux/netdevice.h>
53 #include <linux/icmpv6.h>
70 #include <asm/uaccess.h>
79 static inline struct sock *icmpv6_sk(
struct net *
net)
86 static const struct inet6_protocol icmpv6_protocol = {
87 .handler = icmpv6_rcv,
88 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
134 static bool is_ineligible(
const struct sk_buff *
skb)
136 int ptr = (
u8 *)(ipv6_hdr(skb) + 1) - skb->
data;
149 tp = skb_header_pointer(skb,
151 sizeof(_type), &_type);
162 static inline bool icmpv6_xrlim_allow(
struct sock *sk,
u8 type,
166 struct net *net = sock_net(sk);
190 int tmo = net->ipv6.sysctl.icmpv6_time;
194 if (rt->rt6i_dst.plen < 128)
195 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
197 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
217 offset += skb_network_offset(skb);
218 op = skb_header_pointer(skb, offset,
sizeof(_optval), &_optval);
221 return (*op & 0xC0) == 0x80;
224 static int icmpv6_push_pending_frames(
struct sock *sk,
struct flowi6 *fl6,
struct icmp6hdr *
thdr,
int len)
233 icmp6h = icmp6_hdr(skb);
242 len, fl6->flowi6_proto,
248 tmp_csum = csum_add(tmp_csum, skb->
csum);
255 len, fl6->flowi6_proto,
269 static int icmpv6_getfrag(
void *
from,
char *to,
int offset,
int len,
int odd,
struct sk_buff *skb)
277 skb->
csum = csum_block_add(skb->
csum, csum, odd);
278 if (!(msg->
type & ICMPV6_INFOMSG_MASK))
279 nf_ct_attach(skb, org_skb);
283 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
284 static void mip6_addr_swap(
struct sk_buff *skb)
286 struct ipv6hdr *iph = ipv6_hdr(skb);
296 (skb_network_header(skb) + off);
304 static inline void mip6_addr_swap(
struct sk_buff *skb) {}
307 static struct dst_entry *icmpv6_route_lookup(
struct net *net,
struct sk_buff *skb,
331 dst =
xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
336 if (PTR_ERR(dst) == -
EPERM)
342 err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2),
AF_INET6);
344 goto relookup_failed;
348 goto relookup_failed;
360 goto relookup_failed;
374 struct net *net = dev_net(skb->
dev);
400 addr_type = ipv6_addr_type(&hdr->
daddr);
413 (opt_unrec(skb, info))))
419 addr_type = ipv6_addr_type(&hdr->
saddr);
426 iif = skb->
dev->ifindex;
442 if (is_ineligible(skb)) {
449 memset(&fl6, 0,
sizeof(fl6));
454 fl6.flowi6_oif = iif;
455 fl6.fl6_icmp_type =
type;
456 fl6.fl6_icmp_code =
code;
457 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
459 sk = icmpv6_xmit_lock(net);
464 if (!icmpv6_xrlim_allow(sk, type, &fl6))
470 tmp_hdr.icmp6_pointer =
htonl(info);
472 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.
daddr))
474 else if (!fl6.flowi6_oif)
477 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
481 if (ipv6_addr_is_multicast(&fl6.
daddr))
489 msg.
offset = skb_network_offset(skb);
496 goto out_dst_release;
500 idev = __in6_dev_get(skb->
dev);
511 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
518 icmpv6_xmit_unlock(sk);
522 static void icmpv6_echo_reply(
struct sk_buff *skb)
524 struct net *net = dev_net(skb->
dev);
529 struct icmp6hdr *icmph = icmp6_hdr(skb);
537 saddr = &ipv6_hdr(skb)->daddr;
539 if (!ipv6_unicast_destination(skb))
542 memcpy(&tmp_hdr, icmph,
sizeof(tmp_hdr));
545 memset(&fl6, 0,
sizeof(fl6));
547 fl6.
daddr = ipv6_hdr(skb)->saddr;
550 fl6.flowi6_oif = skb->
dev->ifindex;
552 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
554 sk = icmpv6_xmit_lock(net);
559 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.
daddr))
561 else if (!fl6.flowi6_oif)
567 dst =
xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
571 if (ipv6_addr_is_multicast(&fl6.
daddr))
578 idev = __in6_dev_get(skb->
dev);
593 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
598 icmpv6_xmit_unlock(sk);
603 const struct inet6_protocol *ipprot;
608 if (!pskb_may_pull(skb,
sizeof(
struct ipv6hdr)))
615 &nexthdr, &frag_off);
619 inner_offset =
sizeof(
struct ipv6hdr);
623 if (!pskb_may_pull(skb, inner_offset+8))
635 if (ipprot && ipprot->err_handler)
636 ipprot->err_handler(skb,
NULL, type, code, inner_offset, info);
646 static int icmpv6_rcv(
struct sk_buff *skb)
649 struct inet6_dev *idev = __in6_dev_get(dev);
658 if (!(sp && sp->
xvec[sp->
len - 1]->props.flags &
662 if (!pskb_may_pull(skb,
sizeof(*hdr) +
sizeof(
struct ipv6hdr)))
665 nh = skb_network_offset(skb);
666 skb_set_network_header(skb,
sizeof(*hdr));
671 skb_set_network_header(skb, nh);
676 saddr = &ipv6_hdr(skb)->saddr;
677 daddr = &ipv6_hdr(skb)->daddr;
696 if (!pskb_pull(skb,
sizeof(*hdr)))
699 hdr = icmp6_hdr(skb);
707 icmpv6_echo_reply(skb);
720 if (!pskb_may_pull(skb,
sizeof(
struct ipv6hdr)))
722 hdr = icmp6_hdr(skb);
764 if (type & ICMPV6_INFOMSG_MASK)
791 memset(fl6, 0,
sizeof(*fl6));
795 fl6->fl6_icmp_type =
type;
796 fl6->fl6_icmp_code = 0;
797 fl6->flowi6_oif = oif;
798 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
806 static int __net_init icmpv6_sk_init(
struct net *net)
813 if (net->ipv6.icmp_sk ==
NULL)
820 pr_err(
"Failed to initialize the ICMP6 control socket (err %d)\n",
825 net->ipv6.icmp_sk[
i] =
sk;
834 &icmpv6_socket_sk_dst_lock_key);
844 for (j = 0; j <
i; j++)
845 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
846 kfree(net->ipv6.icmp_sk);
850 static void __net_exit icmpv6_sk_exit(
struct net *net)
855 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
857 kfree(net->ipv6.icmp_sk);
861 .init = icmpv6_sk_init,
862 .exit = icmpv6_sk_exit,
879 pr_err(
"Failed to register ICMP6 protocol\n");
891 static const struct icmp6_err {
927 *err = tab_unreach[
code].err;
928 fatal = tab_unreach[
code].fatal;
954 .data = &
init_net.ipv6.sysctl.icmpv6_time,
955 .maxlen =
sizeof(
int),
966 table =
kmemdup(ipv6_icmp_table_template,
967 sizeof(ipv6_icmp_table_template),
971 table[0].
data = &net->ipv6.sysctl.icmpv6_time;