17 #include <linux/tcp.h>
18 #include <linux/random.h>
20 #include <linux/kernel.h>
25 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
28 static __u16 const msstab[] = {
45 #define COUNTER_TRIES 4
54 child = icsk->
icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
56 inet_csk_reqsk_queue_add(sk, req, child);
78 memcpy(tmp + 4, daddr, 16);
86 static __u32 secure_tcp_syn_cookie(
const struct in6_addr *saddr,
91 return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
93 ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
104 cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
111 cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
117 const struct ipv6hdr *iph = ipv6_hdr(skb);
118 const struct tcphdr *
th = tcp_hdr(skb);
122 tcp_synq_overflow(sk);
124 for (mssind =
ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
125 if (mss >= msstab[mssind])
128 *mssp = msstab[mssind];
137 static inline int cookie_check(
const struct sk_buff *
skb,
__u32 cookie)
139 const struct ipv6hdr *iph = ipv6_hdr(skb);
140 const struct tcphdr *
th = tcp_hdr(skb);
146 return mssind <
ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
152 const u8 *hash_location;
158 const struct tcphdr *
th = tcp_hdr(skb);
170 if (tcp_synq_no_recent_overflow(sk) ||
171 (mss = cookie_check(skb, cookie)) == 0) {
179 memset(&tcp_opt, 0,
sizeof(tcp_opt));
190 ireq = inet_rsk(req);
191 ireq6 = inet6_rsk(req);
195 if (security_inet_conn_request(sk, skb, req))
201 ireq6->
rmt_addr = ipv6_hdr(skb)->saddr;
202 ireq6->
loc_addr = ipv6_hdr(skb)->daddr;
210 ireq6->
iif = sk->sk_bound_dev_if;
212 if (!sk->sk_bound_dev_if &&
214 ireq6->
iif = inet6_iif(skb);
236 memset(&fl6, 0,
sizeof(fl6));
241 fl6.flowi6_oif = sk->sk_bound_dev_if;
243 fl6.fl6_dport = inet_rsk(req)->rmt_port;
244 fl6.fl6_sport = inet_sk(sk)->inet_sport;
245 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
260 ret = get_cookie_sock(sk, skb, req, dst);