29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
57 #include <linux/mroute6.h>
68 ipv6_hdr(skb)->payload_len =
htons(len);
71 skb_dst(skb)->
dev, dst_output);
80 err = dst_output(skb);
86 static int ip6_finish_output2(
struct sk_buff *skb)
96 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->
daddr)) {
103 &ipv6_hdr(skb)->
saddr))) {
129 return dst_neigh_output(dst, neigh, skb);
137 static int ip6_finish_output(
struct sk_buff *skb)
139 if ((skb->
len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
140 dst_allfrag(skb_dst(skb)))
143 return ip6_finish_output2(skb);
149 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
169 struct net *
net = sock_net(sk);
175 int seg_len = skb->
len;
180 unsigned int head_room;
186 seg_len += head_room;
187 head_room +=
sizeof(
struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
189 if (skb_headroom(skb) < head_room) {
199 skb_set_owner_w(skb, sk);
208 skb_reset_network_header(skb);
226 hdr->
daddr = *first_hop;
232 if ((skb->
len <= mtu) || skb->
local_df || skb_is_gso(skb)) {
234 IPSTATS_MIB_OUT, skb->
len);
236 dst->
dev, dst_output);
266 skb_reset_network_header(skb);
282 static int ip6_call_ra_chain(
struct sk_buff *skb,
int sel)
290 if (sk && ra->
sel == sel &&
291 (!sk->sk_bound_dev_if ||
292 sk->sk_bound_dev_if == skb->
dev->ifindex)) {
311 static int ip6_forward_proxy_check(
struct sk_buff *skb)
323 offset =
sizeof(
struct ipv6hdr);
328 if (!pskb_may_pull(skb, (skb_network_header(skb) +
329 offset + 1 - skb->
data)))
356 dst_link_failure(skb);
363 static inline int ip6_forward_finish(
struct sk_buff *skb)
365 return dst_output(skb);
371 struct ipv6hdr *hdr = ipv6_hdr(skb);
376 if (net->ipv6.devconf_all->forwarding == 0)
379 if (skb_warn_if_lro(skb))
390 skb_forward_csum(skb);
406 u8 *
ptr = skb_network_header(skb) + opt->
ra;
407 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
426 if (net->ipv6.devconf_all->proxy_ndp &&
428 int proxied = ip6_forward_proxy_check(skb);
431 else if (proxied < 0) {
438 if (!xfrm6_route_forward(skb)) {
448 if (skb->
dev == dst->
dev && opt->
srcrt == 0 && !skb_sec_path(skb)) {
462 target = &hdr->
daddr;
464 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
474 int addrtype = ipv6_addr_type(&hdr->
saddr);
491 if ((!skb->
local_df && skb->
len > mtu && !skb_is_gso(skb)) ||
492 (
IP6CB(skb)->frag_max_size &&
IP6CB(skb)->frag_max_size > mtu)) {
504 if (skb_cow(skb, dst->
dev->hard_header_len)) {
533 skb_dst_set(to, dst_clone(skb_dst(from)));
537 #ifdef CONFIG_NET_SCHED
538 to->tc_index = from->tc_index;
541 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
542 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
545 skb_copy_secmark(to, from);
555 *nexthdr = &ipv6_hdr(skb)->nexthdr;
557 while (offset + 1 <= packet_len) {
567 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
580 exthdr = (
struct ipv6_opt_hdr *)(skb_network_header(skb) +
589 static atomic_t ipv6_fragmentation_id;
596 net = dev_net(rt->
dst.dev);
597 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
609 }
while (
atomic_cmpxchg(&ipv6_fragmentation_id, old,
new) != old);
620 unsigned int mtu, hlen,
left,
len;
623 int ptr, offset = 0,
err=0;
624 u8 *prevhdr, nexthdr = 0;
625 struct net *
net = dev_net(skb_dst(skb)->dev);
630 mtu = ip6_skb_dst_mtu(skb);
636 (
IP6CB(skb)->frag_max_size &&
637 IP6CB(skb)->frag_max_size > mtu)) {
638 if (skb->
sk && dst_allfrag(skb_dst(skb)))
641 skb->
dev = skb_dst(skb)->dev;
653 mtu -= hlen +
sizeof(
struct frag_hdr);
655 if (skb_has_frag_list(skb)) {
656 int first_len = skb_pagelen(skb);
659 if (first_len - hlen > mtu ||
660 ((first_len - hlen) & 7) ||
664 skb_walk_frags(skb, frag) {
666 if (frag->
len > mtu ||
667 ((frag->
len & 7) && frag->
next) ||
668 skb_headroom(frag) < hlen)
669 goto slow_path_clean;
672 if (skb_shared(frag))
673 goto slow_path_clean;
685 frag = skb_shinfo(skb)->frag_list;
686 skb_frag_list_init(skb);
697 __skb_pull(skb, hlen);
699 __skb_push(skb, hlen);
700 skb_reset_network_header(skb);
701 memcpy(skb_network_header(skb), tmp_hdr, hlen);
709 first_len = skb_pagelen(skb);
710 skb->
data_len = first_len - skb_headlen(skb);
711 skb->
len = first_len;
712 ipv6_hdr(skb)->payload_len =
htons(first_len -
722 skb_reset_transport_header(frag);
724 __skb_push(frag, hlen);
725 skb_reset_network_header(frag);
726 memcpy(skb_network_header(frag), tmp_hdr,
728 offset += skb->
len - hlen -
sizeof(
struct frag_hdr);
735 ipv6_hdr(frag)->payload_len =
738 ip6_copy_metadata(frag, skb);
775 skb_walk_frags(skb, frag2) {
789 left = skb->
len - hlen;
798 troom = rt->
dst.dev->needed_tailroom;
817 if ((frag = alloc_skb(len + hlen +
sizeof(
struct frag_hdr) +
830 ip6_copy_metadata(frag, skb);
831 skb_reserve(frag, hroom);
833 skb_reset_network_header(frag);
834 fh = (
struct frag_hdr *)(skb_network_header(frag) + hlen);
843 skb_set_owner_w(frag, skb->
sk);
848 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
864 if (
skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
871 ipv6_hdr(frag)->payload_len =
htons(frag->
len -
899 static inline int ip6_rt_check(
const struct rt6key *rt_key,
903 return (rt_key->
plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->
addr)) &&
904 (addr_cache ==
NULL || !ipv6_addr_equal(fl_addr, addr_cache));
935 #ifdef CONFIG_IPV6_SUBTREES
938 (fl6->flowi6_oif && fl6->flowi6_oif != dst->
dev->ifindex)) {
947 static int ip6_dst_lookup_tail(
struct sock *sk,
950 struct net *
net = sock_net(sk);
951 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
960 if ((err = (*dst)->error))
961 goto out_err_release;
963 if (ipv6_addr_any(&fl6->
saddr)) {
966 sk ? inet6_sk(sk)->srcprefs : 0,
969 goto out_err_release;
972 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1004 if ((err = (*dst)->error))
1005 goto out_err_release;
1033 return ip6_dst_lookup_tail(sk, dst, fl6);
1056 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1058 return ERR_PTR(err);
1060 fl6->
daddr = *final_dst;
1064 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1090 dst = ip6_sk_dst_check(sk, dst, fl6);
1092 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1094 return ERR_PTR(err);
1096 fl6->
daddr = *final_dst;
1100 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1104 static inline int ip6_ufo_append_data(
struct sock *sk,
1105 int getfrag(
void *from,
char *to,
int offset,
int len,
1106 int odd,
struct sk_buff *skb),
1107 void *from,
int length,
int hh_len,
int fragheaderlen,
1108 int transhdrlen,
int mtu,
unsigned int flags,
1121 hh_len + fragheaderlen + transhdrlen + 20,
1127 skb_reserve(skb, hh_len);
1130 skb_put(skb,fragheaderlen + transhdrlen);
1133 skb_reset_network_header(skb);
1143 (length - transhdrlen));
1150 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1154 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1179 static void ip6_append_data_mtu(
int *mtu,
1181 unsigned int fragheaderlen,
1188 *mtu = *mtu - rt->
dst.header_len;
1195 *mtu = dst_mtu(rt->
dst.path);
1197 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1198 + fragheaderlen -
sizeof(
struct frag_hdr);
1203 int offset,
int len,
int odd,
struct sk_buff *skb),
1204 void *from,
int length,
int transhdrlen,
1206 struct rt6_info *rt,
unsigned int flags,
int dontfrag)
1212 unsigned int maxfraglen, fragheaderlen;
1224 cork = &inet->
cork.base;
1251 np->
cork.opt->hopopt = ip6_opt_dup(opt->
hopopt,
1256 np->
cork.opt->srcrt = ip6_rthdr_dup(opt->
srcrt,
1265 inet->
cork.fl.u.ip6 = *fl6;
1266 np->
cork.hop_limit = hlimit;
1267 np->
cork.tclass = tclass;
1270 rt->
dst.dev->mtu : dst_mtu(&rt->
dst);
1273 rt->
dst.dev->mtu : dst_mtu(rt->
dst.path);
1279 if (dst_allfrag(rt->
dst.path))
1283 length += exthdrlen;
1284 transhdrlen += exthdrlen;
1285 dst_exthdrlen = rt->
dst.header_len;
1288 fl6 = &inet->
cork.fl.u.ip6;
1298 fragheaderlen =
sizeof(
struct ipv6hdr) + rt->rt6i_nfheader_len +
1299 (opt ? opt->opt_nflen : 0);
1300 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
sizeof(struct frag_hdr);
1302 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1343 err = ip6_ufo_append_data(sk, getfrag, from, length,
1344 hh_len, fragheaderlen,
1345 transhdrlen, mtu, flags, rt);
1355 while (length > 0) {
1357 copy = (cork->length <= mtu && !(cork->flags &
IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->
len;
1359 copy = maxfraglen - skb->
len;
1364 unsigned int fraglen;
1365 unsigned int fraggap;
1366 unsigned int alloclen;
1370 fraggap = skb->
len - maxfraglen;
1374 if (skb ==
NULL || skb_prev ==
NULL)
1375 ip6_append_data_mtu(&mtu, &maxfraglen,
1376 fragheaderlen, skb, rt);
1384 datalen = length + fraggap;
1386 if (datalen > (cork->length <= mtu && !(cork->flags &
IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1387 datalen = maxfraglen - fragheaderlen - rt->
dst.trailer_len;
1392 alloclen = datalen + fragheaderlen;
1394 alloclen += dst_exthdrlen;
1396 if (datalen != length + fraggap) {
1401 datalen += rt->
dst.trailer_len;
1404 alloclen += rt->
dst.trailer_len;
1405 fraglen = datalen + fragheaderlen;
1412 alloclen +=
sizeof(
struct frag_hdr);
1423 alloclen + hh_len, 1,
1442 skb_reserve(skb, hh_len +
sizeof(
struct frag_hdr) +
1446 skb_shinfo(skb)->tx_flags = tx_flags;
1452 skb_set_network_header(skb, exthdrlen);
1453 data += fragheaderlen;
1458 skb_prev, maxfraglen,
1459 data + transhdrlen, fraggap, 0);
1460 skb_prev->csum = csum_sub(skb_prev->csum,
1463 pskb_trim_unique(skb_prev, maxfraglen);
1465 copy = datalen - transhdrlen - fraggap;
1471 }
else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1478 length -= datalen - fraggap;
1497 if (getfrag(from,
skb_put(skb, copy),
1498 offset, copy, off, skb) < 0) {
1499 __skb_trim(skb, off);
1504 int i = skb_shinfo(skb)->nr_frags;
1505 struct page_frag *pfrag = sk_page_frag(sk);
1511 if (!skb_can_coalesce(skb, i, pfrag->
page,
1517 __skb_fill_page_desc(skb, i, pfrag->
page,
1519 skb_shinfo(skb)->nr_frags = ++
i;
1520 get_page(pfrag->
page);
1525 offset, copy, skb->
len, skb) < 0)
1529 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1561 if (inet->
cork.base.dst) {
1573 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1576 struct net *net = sock_net(sk);
1581 unsigned char proto = fl6->flowi6_proto;
1586 tail_skb = &(skb_shinfo(skb)->frag_list);
1589 if (skb->
data < skb_network_header(skb))
1590 __skb_pull(skb, skb_network_offset(skb));
1592 __skb_pull(tmp_skb, skb_network_header_len(skb));
1593 *tail_skb = tmp_skb;
1594 tail_skb = &(tmp_skb->
next);
1595 skb->
len += tmp_skb->
len;
1606 *final_dst = fl6->
daddr;
1607 __skb_pull(skb, skb_network_header_len(skb));
1614 skb_reset_network_header(skb);
1615 hdr = ipv6_hdr(skb);
1618 htonl(0x60000000 | ((
int)np->
cork.tclass << 20));
1623 hdr->
daddr = *final_dst;
1628 skb_dst_set(skb, dst_clone(&rt->
dst));
1631 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1646 ip6_cork_release(inet, np);
1665 ip6_cork_release(inet_sk(sk), inet6_sk(sk));