24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
28 #include <linux/net.h>
29 #include <linux/in6.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
35 #include <linux/module.h>
37 #include <linux/slab.h>
38 #include <asm/uaccess.h>
56 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
58 __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
59 __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
62 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
63 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) :
IPV6_ADDR_MAPPED;
67 return (!sk2_ipv6only &&
68 (!sk1_rcv_saddr || !sk2_rcv_saddr ||
69 sk1_rcv_saddr == sk2_rcv_saddr));
80 ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
86 static unsigned int udp6_portaddr_hash(
struct net *
net,
90 unsigned int hash,
mix = net_hash_mix(net);
92 if (ipv6_addr_any(addr6))
93 hash = jhash_1word(0, mix);
94 else if (ipv6_addr_v4mapped(addr6))
95 hash = jhash_1word((
__force u32)addr6->s6_addr32[3], mix);
97 hash = jhash2((
__force u32 *)addr6->s6_addr32, 4, mix);
105 unsigned int hash2_nulladdr =
106 udp6_portaddr_hash(sock_net(sk), &
in6addr_any, snum);
107 unsigned int hash2_partial =
108 udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
111 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
115 static void udp_v6_rehash(
struct sock *
sk)
117 u16 new_hash = udp6_portaddr_hash(sock_net(sk),
118 &inet6_sk(sk)->rcv_saddr,
119 inet_sk(sk)->inet_num);
124 static inline int compute_score(
struct sock *sk,
struct net *net,
132 if (net_eq(sock_net(sk), net) && udp_sk(sk)->
udp_port_hash == hnum &&
144 if (!ipv6_addr_equal(&np->
rcv_saddr, daddr))
148 if (!ipv6_addr_any(&np->
daddr)) {
149 if (!ipv6_addr_equal(&np->
daddr, saddr))
153 if (sk->sk_bound_dev_if) {
154 if (sk->sk_bound_dev_if != dif)
162 #define SCORE2_MAX (1 + 1 + 1)
163 static inline int compute_score2(
struct sock *sk,
struct net *net,
165 const struct in6_addr *daddr,
unsigned short hnum,
170 if (net_eq(sock_net(sk), net) && udp_sk(sk)->
udp_port_hash == hnum &&
175 if (!ipv6_addr_equal(&np->
rcv_saddr, daddr))
183 if (!ipv6_addr_any(&np->
daddr)) {
184 if (!ipv6_addr_equal(&np->
daddr, saddr))
188 if (sk->sk_bound_dev_if) {
189 if (sk->sk_bound_dev_if != dif)
199 static struct sock *udp6_lib_lookup2(
struct net *net,
201 const struct in6_addr *daddr,
unsigned int hnum,
int dif,
212 score = compute_score2(sk, net, saddr, sport,
214 if (score > badness) {
226 if (get_nulls_value(node) != slot2)
231 if (
unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
233 else if (
unlikely(compute_score2(result, net, saddr, sport,
234 daddr, hnum, dif) < badness)) {
249 unsigned short hnum =
ntohs(dport);
250 unsigned int hash2,
slot2,
slot = udp_hashfn(net, hnum, udptable->
mask);
255 if (hslot->
count > 10) {
256 hash2 = udp6_portaddr_hash(net, daddr, hnum);
257 slot2 = hash2 & udptable->
mask;
262 result = udp6_lib_lookup2(net, saddr, sport,
266 hash2 = udp6_portaddr_hash(net, &
in6addr_any, hnum);
267 slot2 = hash2 & udptable->
mask;
272 result = udp6_lib_lookup2(net, saddr, sport,
283 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
284 if (score > badness) {
294 if (get_nulls_value(node) != slot)
298 if (
unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
300 else if (
unlikely(compute_score(result, net, hnum, saddr, sport,
301 daddr, dport, dif) < badness)) {
316 const struct ipv6hdr *iph = ipv6_hdr(skb);
318 if (
unlikely(sk = skb_steal_sock(skb)))
321 &iph->
daddr, dport, inet6_iif(skb),
340 int noblock,
int flags,
int *addr_len)
345 unsigned int ulen, copied;
363 &peeked, &off, &err);
371 else if (copied < ulen)
382 if (copied < ulen ||
UDP_SKB_CB(skb)->partial_cov) {
383 if (udp_lib_checksum_complete(skb))
387 if (skb_csum_unnecessary(skb))
419 sock_recv_ts_and_drops(msg, sk, skb);
432 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
468 unlock_sock_fast(sk, slow);
491 saddr, uh->source, inet6_iif(skb), udptable);
517 static int __udpv6_queue_rcv_skb(
struct sock *sk,
struct sk_buff *skb)
521 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
522 sock_rps_save_rxhash(sk, skb);
549 if (!static_key_enabled(&udpv6_encap_needed))
550 static_key_slow_inc(&udpv6_encap_needed);
563 if (static_key_false(&udpv6_encap_needed) && up->
encap_type) {
582 ret = encap_rcv(sk, skb);
601 " %d while full coverage %d requested\n",
607 "too small, need min %d\n",
614 if (udp_lib_checksum_complete(skb))
618 if (sk_rcvqueues_full(sk, skb, sk->
sk_rcvbuf))
626 rc = __udpv6_queue_rcv_skb(sk, skb);
627 else if (sk_add_backlog(sk, skb, sk->
sk_rcvbuf)) {
641 static struct sock *udp_v6_mcast_next(
struct net *net,
struct sock *sk,
648 unsigned short num =
ntohs(loc_port);
653 if (!net_eq(sock_net(s), net))
663 if (!ipv6_addr_any(&np->
daddr) &&
664 !ipv6_addr_equal(&np->
daddr, rmt_addr))
667 if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
671 if (!ipv6_addr_equal(&np->
rcv_saddr, loc_addr))
683 struct sk_buff *skb,
unsigned int final)
689 for (i = 0; i <
count; i++) {
711 static int __udp6_lib_mcast_deliver(
struct net *net,
struct sk_buff *skb,
715 struct sock *
sk, *stack[256 /
sizeof(
struct sock *)];
716 const struct udphdr *uh = udp_hdr(skb);
719 unsigned int i, count = 0;
721 spin_lock(&hslot->
lock);
722 sk = sk_nulls_head(&hslot->
head);
723 dif = inet6_iif(skb);
724 sk = udp_v6_mcast_next(net, sk, uh->
dest, daddr, uh->
source, saddr, dif);
727 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->
dest, daddr,
732 flush_stack(stack, count, skb, ~0);
739 for (i = 0; i <
count; i++)
742 spin_unlock(&hslot->
lock);
745 flush_stack(stack, count, skb, count - 1);
747 for (i = 0; i <
count; i++)
755 static inline int udp6_csum_init(
struct sk_buff *skb,
struct udphdr *uh,
764 err = udplite_checksum_init(skb, uh);
769 if (uh->
check == 0) {
781 if (!skb_csum_unnecessary(skb))
783 &ipv6_hdr(skb)->daddr,
784 skb->
len, proto, 0));
792 struct net *net = dev_net(skb->
dev);
798 if (!pskb_may_pull(skb,
sizeof(
struct udphdr)))
801 saddr = &ipv6_hdr(skb)->saddr;
802 daddr = &ipv6_hdr(skb)->daddr;
816 if (ulen <
sizeof(*uh))
819 if (ulen < skb->
len) {
820 if (pskb_trim_rcsum(skb, ulen))
822 saddr = &ipv6_hdr(skb)->saddr;
823 daddr = &ipv6_hdr(skb)->daddr;
828 if (udp6_csum_init(skb, uh, proto))
834 if (ipv6_addr_is_multicast(daddr))
835 return __udp6_lib_mcast_deliver(net, skb,
836 saddr, daddr, udptable);
844 sk = __udp6_lib_lookup_skb(skb, uh->
source, uh->
dest, udptable);
861 if (udp_lib_checksum_complete(skb))
894 static void udp_v6_flush_pending_frames(
struct sock *sk)
913 static void udp6_hwcsum_outgoing(
struct sock *sk,
struct sk_buff *skb,
918 struct udphdr *uh = udp_hdr(skb);
932 offset = skb_transport_offset(skb);
938 csum = csum_add(csum, skb->
csum);
952 static int udp_v6_push_pending_frames(
struct sock *sk)
971 uh->
source = fl6->fl6_sport;
972 uh->
dest = fl6->fl6_dport;
977 csum = udplite_csum_outgoing(sk, skb);
979 udp6_hwcsum_outgoing(sk, skb, &fl6->
saddr, &fl6->
daddr,
983 csum = udp_csum_outgoing(sk, skb);
987 up->
len, fl6->flowi6_proto, csum);
994 if (err == -
ENOBUFS && !inet6_sk(sk)->recverr) {
1016 struct in6_addr *daddr, *final_p,
final;
1044 goto do_udp_sendmsg;
1061 if (ipv6_addr_v4mapped(daddr)) {
1065 sin.
sin_addr.s_addr = daddr->s6_addr32[3];
1096 goto do_append_data;
1102 memset(&fl6, 0,
sizeof(fl6));
1126 ipv6_addr_equal(daddr, &np->
daddr))
1143 if (!fl6.flowi6_oif)
1144 fl6.flowi6_oif = sk->sk_bound_dev_if;
1146 if (!fl6.flowi6_oif)
1149 fl6.flowi6_mark = sk->
sk_mark;
1154 opt->tot_len =
sizeof(*opt);
1157 &hlimit, &tclass, &dontfrag);
1167 if (!(
opt->opt_nflen|
opt->opt_flen))
1178 if (!ipv6_addr_any(daddr))
1181 fl6.
daddr.s6_addr[15] = 0x1;
1182 if (ipv6_addr_any(&fl6.
saddr) && !ipv6_addr_any(&np->
saddr))
1190 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.
daddr)) {
1193 }
else if (!fl6.flowi6_oif)
1196 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1206 if (ipv6_addr_is_multicast(&fl6.
daddr))
1241 sizeof(
struct udphdr), hlimit, tclass,
opt, &fl6,
1245 udp_v6_flush_pending_frames(sk);
1247 err = udp_v6_push_pending_frames(sk);
1253 ip6_dst_store(sk,
dst,
1256 #ifdef CONFIG_IPV6_SUBTREES
1291 goto back_from_confirm;
1299 udp_v6_flush_pending_frames(sk);
1309 char __user *optval,
unsigned int optlen)
1313 udp_v6_push_pending_frames);
1317 #ifdef CONFIG_COMPAT
1318 int compat_udpv6_setsockopt(
struct sock *sk,
int level,
int optname,
1319 char __user *optval,
unsigned int optlen)
1323 udp_v6_push_pending_frames);
1329 char __user *optval,
int __user *optlen)
1336 #ifdef CONFIG_COMPAT
1337 int compat_udpv6_getsockopt(
struct sock *sk,
int level,
int optname,
1338 char __user *optval,
int __user *optlen)
1346 static int udp6_ufo_send_check(
struct sk_buff *skb)
1351 if (!pskb_may_pull(skb,
sizeof(*uh)))
1354 ipv6h = ipv6_hdr(skb);
1370 unsigned int unfrag_ip6hlen, unfrag_len;
1372 u8 *mac_start, *prevhdr;
1378 mss = skb_shinfo(skb)->gso_size;
1384 int type = skb_shinfo(skb)->gso_type;
1399 offset = skb_checksum_start_offset(skb);
1402 *(
__sum16 *)(skb->
data + offset) = csum_fold(csum);
1406 if ((skb_mac_header(skb) < skb->
head + frag_hdr_sz) &&
1416 unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
1418 mac_start = skb_mac_header(skb);
1419 memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
1424 fptr = (
struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
1438 static const struct inet6_protocol udpv6_protocol = {
1439 .handler = udpv6_rcv,
1440 .err_handler = udpv6_err,
1441 .gso_send_check = udp6_ufo_send_check,
1442 .gso_segment = udp6_ufo_fragment,
1443 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1447 #ifdef CONFIG_PROC_FS
1449 static void udp6_sock_seq_show(
struct seq_file *seq,
struct sock *
sp,
int bucket)
1461 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1462 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1464 src->s6_addr32[0], src->s6_addr32[1],
1465 src->s6_addr32[2], src->s6_addr32[3], srcp,
1466 dest->s6_addr32[0], dest->s6_addr32[1],
1467 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1469 sk_wmem_alloc_get(sp),
1470 sk_rmem_alloc_get(sp),
1479 int udp6_seq_show(
struct seq_file *seq,
void *
v)
1486 "st tx_queue rx_queue tr tm->when retrnsmt"
1487 " uid timeout inode ref pointer drops\n");
1505 .seq_fops = &udp6_afinfo_seq_fops,
1507 .show = udp6_seq_show,
1511 int __net_init udp6_proc_init(
struct net *net)
1513 return udp_proc_register(net, &udp6_seq_afinfo);
1516 void udp6_proc_exit(
struct net *net) {
1517 udp_proc_unregister(net, &udp6_seq_afinfo);
1526 .close = udp_lib_close,
1535 .backlog_rcv = __udpv6_queue_rcv_skb,
1536 .hash = udp_lib_hash,
1538 .rehash = udp_v6_rehash,
1547 #ifdef CONFIG_COMPAT
1548 .compat_setsockopt = compat_udpv6_setsockopt,
1549 .compat_getsockopt = compat_udpv6_getsockopt,
1574 goto out_udpv6_protocol;