22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
36 .sysctl_max_tw_buckets =
NR_FILE * 2,
41 (
unsigned long)&tcp_death_row),
48 (
unsigned long)&tcp_death_row),
52 static bool tcp_in_window(
u32 seq,
u32 end_seq,
u32 s_win,
u32 e_win)
56 if (
after(end_seq, s_win) && before(seq, e_win))
58 return seq == e_win && seq == end_seq;
96 const u8 *hash_location;
98 bool paws_reject =
false;
107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
228 if (th->syn && !th->rst && !th->ack && !paws_reject &&
249 if (paws_reject || th->ack)
270 const struct tcp_sock *tp = tcp_sk(sk);
271 bool recycle_ok =
false;
292 #if IS_ENABLED(CONFIG_IPV6)
298 tw6 = inet6_twsk((
struct sock *)tw);
301 tw->tw_tclass = np->
tclass;
306 #ifdef CONFIG_TCP_MD5SIG
315 tcptw->tw_md5_key =
NULL;
316 key = tp->af_specific->md5_lookup(sk, sk);
357 #ifdef CONFIG_TCP_MD5SIG
360 if (twsk->tw_md5_key) {
368 static inline void TCP_ECN_openreq_child(
struct tcp_sock *tp,
388 struct tcp_sock *newtp = tcp_sk(newsk);
389 struct tcp_sock *oldtp = tcp_sk(sk);
400 if (oldcvp !=
NULL) {
405 if (newcvp !=
NULL) {
406 kref_init(&newcvp->
kref);
424 treq->
snt_isn + 1 + tcp_s_data_size(oldtp);
426 tcp_prequeue_init(newtp);
429 tcp_init_wl(newtp, treq->
rcv_isn);
440 tcp_enable_early_retrans(newtp);
462 treq->
snt_isn + 1 + tcp_s_data_size(oldtp);
464 newtp->
rx_opt.saw_tstamp = 0;
467 newtp->
rx_opt.num_sacks = 0;
473 keepalive_time_when(newtp));
478 tcp_enable_fack(newtp);
484 if (newtp->
rx_opt.wscale_ok) {
488 newtp->
rx_opt.snd_wscale = newtp->
rx_opt.rcv_wscale = 0;
492 newtp->
rx_opt.snd_wscale);
495 if (newtp->
rx_opt.tstamp_ok) {
500 newtp->
rx_opt.ts_recent_stamp = 0;
503 #ifdef CONFIG_TCP_MD5SIG
504 newtp->md5sig_info =
NULL;
505 if (newtp->af_specific->md5_lookup(sk, newsk))
511 TCP_ECN_openreq_child(newtp, req);
538 const u8 *hash_location;
540 const struct tcphdr *
th = tcp_hdr(skb);
542 bool paws_reject =
false;
547 if (th->doff > (
sizeof(
struct tcphdr)>>2)) {
557 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
562 if (
TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
648 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
659 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->
rcv_wnd)) {
662 req->
rsk_ops->send_ack(sk, skb, req);
673 if (
TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
684 goto embryonic_reset;
693 if (!(flg & TCP_FLAG_ACK))
698 tcp_rsk(req)->snt_synack = tmp_opt.
rcv_tsecr;
700 tcp_rsk(req)->snt_synack = 0;
709 if (req->
retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
710 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
711 inet_rsk(req)->acked = 1;
722 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req,
NULL);
724 goto listen_overflow;
726 inet_csk_reqsk_queue_unlink(sk, req, prev);
727 inet_csk_reqsk_queue_removed(sk, req);
729 inet_csk_reqsk_queue_add(sk, req, child);
734 inet_rsk(req)->acked = 1;
745 req->
rsk_ops->send_reset(sk, skb);
746 }
else if (fastopen) {
751 inet_csk_reqsk_queue_drop(sk, req, prev);
774 int state = child->sk_state;
787 __sk_add_backlog(child, skb);