53 #define pr_fmt(fmt) "TCP: " fmt
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
64 #include <linux/slab.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
93 #ifdef CONFIG_TCP_MD5SIG
141 static int tcp_repair_connect(
struct sock *
sk)
155 __be16 orig_sport, orig_dport;
168 nexthop = daddr = usin->
sin_addr.s_addr;
171 if (inet_opt && inet_opt->
opt.srr) {
174 nexthop = inet_opt->
opt.faddr;
179 fl4 = &inet->
cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->
inet_saddr,
183 orig_sport, orig_dport, sk,
true);
196 if (!inet_opt || !inet_opt->
opt.srr)
203 if (tp->
rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
206 tp->
rx_opt.ts_recent_stamp = 0;
212 !tp->
rx_opt.ts_recent_stamp && fl4->
daddr == daddr)
216 inet->inet_daddr =
daddr;
218 inet_csk(sk)->icsk_ext_hdr_len = 0;
220 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->
opt.optlen;
234 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
256 err = tcp_repair_connect(sk);
282 static void tcp_v4_mtu_reduced(
struct sock *
sk)
286 u32 mtu = tcp_sk(sk)->mtu_info;
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
320 static void do_redirect(
struct sk_buff *skb,
struct sock *sk)
325 dst->
ops->redirect(dst, sk, skb);
351 const int type = icmp_hdr(icmp_skb)->type;
352 const int code = icmp_hdr(icmp_skb)->code;
359 struct net *
net = dev_net(icmp_skb->
dev);
361 if (icmp_skb->
len < (iph->ihl << 2) + 8) {
367 iph->
saddr,
th->source, inet_iif(icmp_skb));
402 (req ==
NULL || seq != tcp_rsk(req)->snt_isn)) {
410 do_redirect(icmp_skb, sk);
425 tcp_v4_mtu_reduced(sk);
448 inet_csk(sk)->icsk_rto = (tp->
srtt ? __tcp_set_rto(tp) :
452 skb = tcp_write_queue_head(sk);
480 if (req && req->
sk ==
NULL)
483 switch (sk->sk_state) {
499 if (seq != tcp_rsk(req)->snt_isn) {
510 inet_csk_reqsk_queue_drop(sk, req, prev);
559 static void __tcp_v4_send_check(
struct sk_buff *skb,
565 th->
check = ~tcp_v4_check(skb->
len, saddr, daddr, 0);
569 th->
check = tcp_v4_check(skb->
len, saddr, daddr,
579 const struct inet_sock *inet = inet_sk(sk);
581 __tcp_v4_send_check(skb, inet->
inet_saddr, inet->inet_daddr);
587 const struct iphdr *iph;
590 if (!pskb_may_pull(skb,
sizeof(*th)))
598 __tcp_v4_send_check(skb, iph->
saddr, iph->
daddr);
615 static void tcp_v4_send_reset(
struct sock *sk,
struct sk_buff *skb)
617 const struct tcphdr *th = tcp_hdr(skb);
620 #ifdef CONFIG_TCP_MD5SIG
625 #ifdef CONFIG_TCP_MD5SIG
628 unsigned char newhash[16];
638 if (skb_rtable(skb)->rt_type !=
RTN_LOCAL)
653 skb->
len - (th->doff << 2));
657 arg.iov[0].iov_base = (
unsigned char *)&
rep;
658 arg.iov[0].iov_len =
sizeof(
rep.th);
660 #ifdef CONFIG_TCP_MD5SIG
662 if (!sk && hash_location) {
683 if (genhash ||
memcmp(hash_location, newhash, 16) != 0)
698 rep.th.doff =
arg.iov[0].iov_len / 4;
700 tcp_v4_md5_hash_hdr((
__u8 *) &
rep.opt[1],
701 key, ip_hdr(skb)->saddr,
702 ip_hdr(skb)->daddr, &
rep.th);
715 arg.bound_dev_if = sk->sk_bound_dev_if;
717 net = dev_net(skb_dst(skb)->
dev);
718 arg.tos = ip_hdr(skb)->tos;
720 ip_hdr(skb)->daddr, &
arg,
arg.iov[0].iov_len);
725 #ifdef CONFIG_TCP_MD5SIG
741 int reply_flags,
u8 tos)
743 const struct tcphdr *th = tcp_hdr(skb);
747 #ifdef CONFIG_TCP_MD5SIG
753 struct net *net = dev_net(skb_dst(skb)->
dev);
758 arg.iov[0].iov_base = (
unsigned char *)&
rep;
759 arg.iov[0].iov_len =
sizeof(
rep.th);
772 rep.th.doff =
arg.iov[0].iov_len / 4;
778 #ifdef CONFIG_TCP_MD5SIG
787 rep.th.doff =
arg.iov[0].iov_len/4;
789 tcp_v4_md5_hash_hdr((
__u8 *) &
rep.opt[offset],
790 key, ip_hdr(skb)->saddr,
791 ip_hdr(skb)->daddr, &
rep.th);
794 arg.flags = reply_flags;
800 arg.bound_dev_if = oif;
803 ip_hdr(skb)->daddr, &
arg,
arg.iov[0].iov_len);
808 static void tcp_v4_timewait_ack(
struct sock *sk,
struct sk_buff *skb)
825 static void tcp_v4_reqsk_send_ack(
struct sock *sk,
struct sk_buff *skb,
831 tcp_v4_send_ack(skb, (sk->sk_state ==
TCP_LISTEN) ?
832 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
833 tcp_rsk(req)->rcv_nxt, req->
rcv_wnd,
836 tcp_md5_do_lookup(sk, (
union tcp_md5_addr *)&ip_hdr(skb)->daddr,
847 static int tcp_v4_send_synack(
struct sock *sk,
struct dst_entry *dst,
867 skb_set_queue_mapping(skb, queue_mapping);
872 if (!tcp_rsk(req)->snt_synack && !err)
883 return tcp_v4_send_synack(sk,
NULL, req, rvp, 0,
false);
889 static void tcp_v4_reqsk_destructor(
struct request_sock *req)
901 const char *
msg =
"Dropping request";
902 bool want_cookie =
false;
907 #ifdef CONFIG_SYN_COOKIES
909 msg =
"Sending cookies";
916 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
919 pr_info(
"%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
935 int opt_size =
sizeof(*dopt) + opt->
optlen;
948 #ifdef CONFIG_TCP_MD5SIG
972 #if IS_ENABLED(CONFIG_IPV6)
976 hlist_for_each_entry_rcu(key, pos, &md5sig->
head,
node) {
977 if (key->
family != family)
987 struct sock *addr_sk)
991 addr = (
union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
992 return tcp_md5_do_lookup(sk, addr,
AF_INET);
1002 return tcp_md5_do_lookup(sk, addr,
AF_INET);
1007 int family,
const u8 *newkey,
u8 newkeylen,
gfp_t gfp)
1025 md5sig =
kmalloc(
sizeof(*md5sig), gfp);
1048 hlist_add_head_rcu(&key->
node, &md5sig->
head);
1062 hlist_del_rcu(&key->
node);
1067 if (hlist_empty(&md5sig->
head))
1073 void tcp_clear_md5_list(
struct sock *sk)
1082 if (!hlist_empty(&md5sig->
head))
1085 hlist_del_rcu(&key->
node);
1091 static int tcp_v4_parse_md5_keys(
struct sock *sk,
char __user *optval,
1097 if (optlen <
sizeof(
cmd))
1106 if (!
cmd.tcpm_key || !
cmd.tcpm_keylen)
1138 return crypto_hash_update(&hp->
md5_desc, &
sg,
sizeof(*bp));
1141 static int tcp_v4_md5_hash_hdr(
char *md5_hash,
const struct tcp_md5sig_key *key,
1149 goto clear_hash_noput;
1152 if (crypto_hash_init(desc))
1154 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1160 if (crypto_hash_final(desc, md5_hash))
1179 const struct tcphdr *th = tcp_hdr(skb);
1183 saddr = inet_sk(sk)->inet_saddr;
1184 daddr = inet_sk(sk)->inet_daddr;
1186 saddr = inet_rsk(req)->loc_addr;
1187 daddr = inet_rsk(req)->rmt_addr;
1189 const struct iphdr *iph = ip_hdr(skb);
1196 goto clear_hash_noput;
1199 if (crypto_hash_init(desc))
1202 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->
len))
1210 if (crypto_hash_final(desc, md5_hash))
1224 static bool tcp_v4_inbound_md5_hash(
struct sock *sk,
const struct sk_buff *skb)
1236 const struct iphdr *iph = ip_hdr(skb);
1237 const struct tcphdr *th = tcp_hdr(skb);
1239 unsigned char newhash[16];
1246 if (!hash_expected && !hash_location)
1249 if (hash_expected && !hash_location) {
1254 if (!hash_expected && hash_location) {
1266 if (genhash ||
memcmp(hash_location, newhash, 16) != 0) {
1270 genhash ?
" tcp_v4_calc_md5_hash failed"
1282 .rtx_syn_ack = tcp_v4_rtx_synack,
1283 .send_ack = tcp_v4_reqsk_send_ack,
1284 .destructor = tcp_v4_reqsk_destructor,
1285 .send_reset = tcp_v4_send_reset,
1289 #ifdef CONFIG_TCP_MD5SIG
1291 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1296 static bool tcp_fastopen_check(
struct sock *sk,
struct sk_buff *skb,
1301 bool skip_cookie =
false;
1304 if (
likely(!fastopen_cookie_present(foc))) {
1313 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1333 spin_lock(&fastopenq->
lock);
1336 spin_unlock(&fastopenq->
lock);
1345 spin_unlock(&fastopenq->
lock);
1349 tcp_rsk(req)->rcv_nxt =
TCP_SKB_CB(skb)->end_seq;
1359 valid_foc->
len = -1;
1362 tcp_rsk(req)->rcv_nxt =
TCP_SKB_CB(skb)->end_seq;
1364 }
else if (foc->
len == 0) {
1377 static int tcp_v4_conn_req_fastopen(
struct sock *sk,
1392 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req,
NULL);
1393 if (child ==
NULL) {
1422 tcp_rsk(req)->listener =
sk;
1437 inet_csk_reqsk_queue_add(sk, req, child);
1440 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1460 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1461 skb_set_owner_r(skb, child);
1477 const u8 *hash_location;
1482 __be32 saddr = ip_hdr(skb)->saddr;
1483 __be32 daddr = ip_hdr(skb)->daddr;
1485 bool want_cookie =
false;
1500 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1511 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1518 #ifdef CONFIG_TCP_MD5SIG
1519 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1522 tcp_clear_options(&tmp_opt);
1526 want_cookie ?
NULL : &foc);
1530 !tp->
rx_opt.cookie_out_never &&
1539 goto drop_and_release;
1548 *c++ ^= *hash_location++;
1550 want_cookie =
false;
1553 }
else if (!tp->
rx_opt.cookie_in_always) {
1558 goto drop_and_release;
1563 tcp_clear_options(&tmp_opt);
1566 tcp_openreq_init(req, &tmp_opt, skb);
1568 ireq = inet_rsk(req);
1572 ireq->
opt = tcp_v4_save_options(skb);
1574 if (security_inet_conn_request(sk, skb, req))
1578 TCP_ECN_create_request(req, skb);
1596 fl4.
daddr == saddr) {
1599 goto drop_and_release;
1616 goto drop_and_release;
1619 isn = tcp_v4_init_sequence(skb);
1621 tcp_rsk(req)->snt_isn = isn;
1628 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1643 fastopen_cookie_present(&valid_foc) ? &valid_foc :
NULL);
1647 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1651 if (
likely(!do_fastopen)) {
1656 if (err || want_cookie)
1660 tcp_rsk(req)->listener =
NULL;
1663 if (fastopen_cookie_present(&foc) && foc.len != 0)
1666 }
else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1694 #ifdef CONFIG_TCP_MD5SIG
1699 if (sk_acceptq_is_full(sk))
1709 newtp = tcp_sk(newsk);
1710 newinet = inet_sk(newsk);
1711 ireq = inet_rsk(req);
1712 newinet->inet_daddr = ireq->
rmt_addr;
1713 newinet->inet_rcv_saddr = ireq->
loc_addr;
1715 inet_opt = ireq->
opt;
1719 newinet->
mc_ttl = ip_hdr(skb)->ttl;
1720 newinet->
rcv_tos = ip_hdr(skb)->tos;
1721 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1723 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->
opt.optlen;
1737 newtp->
advmss = dst_metric_advmss(dst);
1738 if (tcp_sk(sk)->rx_opt.user_mss &&
1739 tcp_sk(sk)->rx_opt.user_mss < newtp->
advmss)
1740 newtp->
advmss = tcp_sk(sk)->rx_opt.user_mss;
1743 tcp_synack_rtt_meas(newsk, req);
1746 #ifdef CONFIG_TCP_MD5SIG
1748 key = tcp_md5_do_lookup(sk, (
union tcp_md5_addr *)&newinet->inet_daddr,
1777 tcp_clear_xmit_timers(newsk);
1785 static struct sock *tcp_v4_hnd_req(
struct sock *sk,
struct sk_buff *skb)
1787 struct tcphdr *th = tcp_hdr(skb);
1788 const struct iphdr *iph = ip_hdr(skb);
1809 #ifdef CONFIG_SYN_COOKIES
1818 const struct iphdr *iph = ip_hdr(skb);
1821 if (!tcp_v4_check(skb->
len, iph->
saddr,
1831 if (skb->
len <= 76) {
1849 #ifdef CONFIG_TCP_MD5SIG
1856 if (tcp_v4_inbound_md5_hash(sk, skb))
1863 sock_rps_save_rxhash(sk, skb);
1865 if (inet_sk(sk)->rx_dst_ifindex != skb->
skb_iif ||
1866 dst->
ops->check(dst, 0) ==
NULL) {
1878 if (skb->
len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1882 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1887 sock_rps_save_rxhash(nsk, skb);
1895 sock_rps_save_rxhash(sk, skb);
1904 tcp_v4_send_reset(rsk, skb);
1922 struct net *net = dev_net(skb->
dev);
1923 const struct iphdr *iph;
1930 if (!pskb_may_pull(skb, ip_hdrlen(skb) +
sizeof(
struct tcphdr)))
1934 th = (
struct tcphdr *) ((
char *)iph + ip_hdrlen(skb));
1936 if (th->doff <
sizeof(
struct tcphdr) / 4)
1950 dst = dst_check(dst, 0);
1952 inet_sk(sk)->rx_dst_ifindex == skb->
skb_iif)
1964 const struct iphdr *iph;
1968 struct net *net = dev_net(skb->
dev);
1976 if (!pskb_may_pull(skb,
sizeof(
struct tcphdr)))
1981 if (th->doff <
sizeof(
struct tcphdr) / 4)
1983 if (!pskb_may_pull(skb, th->doff * 4))
1990 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1997 skb->
len - th->doff * 4);
2000 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2013 goto discard_and_relse;
2017 goto discard_and_relse;
2021 goto discard_and_relse;
2028 #ifdef CONFIG_NET_DMA
2030 if (!tp->
ucopy.dma_chan && tp->
ucopy.pinned_list)
2032 if (tp->
ucopy.dma_chan)
2037 if (!tcp_prequeue(sk, skb))
2040 }
else if (
unlikely(sk_add_backlog(sk, skb,
2044 goto discard_and_relse;
2056 if (skb->
len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2060 tcp_v4_send_reset(
NULL, skb);
2078 if (skb->
len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2085 struct sock *sk2 = inet_lookup_listener(dev_net(skb->
dev),
2098 tcp_v4_timewait_ack(sk, skb);
2119 inet_sk(sk)->rx_dst_ifindex = skb->
skb_iif;
2130 .net_header_len =
sizeof(
struct iphdr),
2136 #ifdef CONFIG_COMPAT
2143 #ifdef CONFIG_TCP_MD5SIG
2147 .md5_parse = tcp_v4_parse_md5_keys,
2154 static int tcp_v4_init_sock(
struct sock *sk)
2162 #ifdef CONFIG_TCP_MD5SIG
2163 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2173 tcp_clear_xmit_timers(sk);
2178 tcp_write_queue_purge(sk);
2183 #ifdef CONFIG_TCP_MD5SIG
2185 if (tp->md5sig_info) {
2186 tcp_clear_md5_list(sk);
2188 tp->md5sig_info =
NULL;
2192 #ifdef CONFIG_NET_DMA
2194 __skb_queue_purge(&sk->sk_async_wait_queue);
2198 __skb_queue_purge(&tp->
ucopy.prequeue);
2201 if (inet_csk(sk)->icsk_bind_hash)
2207 tcp_cookie_values_release);
2215 sk_sockets_allocated_dec(sk);
2216 sock_release_memcg(sk);
2220 #ifdef CONFIG_PROC_FS
2225 return hlist_nulls_empty(head) ?
NULL :
2231 return !is_a_nulls(tw->tw_node.next) ?
2240 static void *listening_get_next(
struct seq_file *seq,
void *
cur)
2247 struct net *net = seq_file_net(seq);
2251 spin_lock_bh(&ilb->
lock);
2252 sk = sk_nulls_head(&ilb->
head);
2282 icsk = inet_csk(sk);
2287 sk = sk_nulls_next(sk);
2291 if (!net_eq(sock_net(sk), net))
2293 if (sk->sk_family == st->
family) {
2297 icsk = inet_csk(sk);
2309 spin_unlock_bh(&ilb->
lock);
2313 spin_lock_bh(&ilb->
lock);
2314 sk = sk_nulls_head(&ilb->
head);
2322 static void *listening_get_idx(
struct seq_file *seq, loff_t *pos)
2329 rc = listening_get_next(seq,
NULL);
2331 while (rc && *pos) {
2332 rc = listening_get_next(seq, rc);
2348 static void *established_get_first(
struct seq_file *seq)
2351 struct net *net = seq_file_net(seq);
2362 if (empty_bucket(st))
2367 if (sk->sk_family != st->
family ||
2368 !net_eq(sock_net(sk), net)) {
2377 if (tw->tw_family != st->
family ||
2378 !net_eq(twsk_net(tw), net)) {
2384 spin_unlock_bh(lock);
2391 static void *established_get_next(
struct seq_file *seq,
void *cur)
2397 struct net *net = seq_file_net(seq);
2406 while (tw && (tw->tw_family != st->
family || !net_eq(twsk_net(tw), net))) {
2427 sk = sk_nulls_next(sk);
2430 if (sk->sk_family == st->
family && net_eq(sock_net(sk), net))
2443 static void *established_get_idx(
struct seq_file *seq, loff_t pos)
2449 rc = established_get_first(seq);
2452 rc = established_get_next(seq, rc);
2458 static void *tcp_get_idx(
struct seq_file *seq, loff_t pos)
2464 rc = listening_get_idx(seq, &pos);
2468 rc = established_get_idx(seq, pos);
2474 static void *tcp_seek_last_pos(
struct seq_file *seq)
2478 int orig_num = st->
num;
2481 switch (st->
state) {
2487 rc = listening_get_next(seq,
NULL);
2488 while (offset-- && rc)
2489 rc = listening_get_next(seq, rc);
2499 rc = established_get_first(seq);
2500 while (offset-- && rc)
2501 rc = established_get_next(seq, rc);
2509 static void *tcp_seq_start(
struct seq_file *seq, loff_t *pos)
2514 if (*pos && *pos == st->
last_pos) {
2515 rc = tcp_seek_last_pos(seq);
2531 static void *tcp_seq_next(
struct seq_file *seq,
void *
v, loff_t *pos)
2537 rc = tcp_get_idx(seq, 0);
2541 switch (st->
state) {
2544 rc = listening_get_next(seq, v);
2549 rc = established_get_first(seq);
2554 rc = established_get_next(seq, v);
2563 static void tcp_seq_stop(
struct seq_file *seq,
void *v)
2567 switch (st->
state) {
2608 afinfo->
seq_ops.start = tcp_seq_start;
2609 afinfo->
seq_ops.next = tcp_seq_next;
2610 afinfo->
seq_ops.stop = tcp_seq_stop;
2626 static void get_openreq4(
const struct sock *sk,
const struct request_sock *req,
2633 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2636 ntohs(inet_sk(sk)->inet_sport),
2642 jiffies_delta_to_clock_t(delta),
2652 static void get_tcp4_sock(
struct sock *sk,
struct seq_file *f,
int i,
int *len)
2656 const struct tcp_sock *tp = tcp_sk(sk);
2658 const struct inet_sock *inet = inet_sk(sk);
2672 }
else if (timer_pending(&sk->
sk_timer)) {
2674 timer_expires = sk->
sk_timer.expires;
2688 seq_printf(f,
"%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2689 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2690 i, src, srcp, dest, destp, sk->sk_state,
2694 jiffies_delta_to_clock_t(timer_expires - jiffies),
2705 (fastopenq ? fastopenq->
max_qlen : 0) :
2706 (tcp_in_initial_slowstart(tp) ? -1 : tp->
snd_ssthresh),
2711 struct seq_file *f,
int i,
int *len)
2717 dest = tw->tw_daddr;
2718 src = tw->tw_rcv_saddr;
2723 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2725 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2731 static int tcp4_seq_show(
struct seq_file *seq,
void *v)
2738 " sl local_address rem_address st tx_queue "
2739 "rx_queue tr tm->when retrnsmt uid timeout "
2745 switch (st->
state) {
2748 get_tcp4_sock(v, seq, st->
num, &len);
2754 get_timewait4_sock(v, seq, st->
num, &len);
2757 seq_printf(seq,
"%*s\n", TMPSZ - 1 - len,
"");
2773 .seq_fops = &tcp_afinfo_seq_fops,
2775 .show = tcp4_seq_show,
2779 static int __net_init tcp4_proc_init_net(
struct net *net)
2784 static void __net_exit tcp4_proc_exit_net(
struct net *net)
2790 .
init = tcp4_proc_init_net,
2791 .exit = tcp4_proc_exit_net,
2794 int __init tcp4_proc_init(
void)
2799 void tcp4_proc_exit(
void)
2807 const struct iphdr *iph = skb_gro_network_header(skb);
2813 if (!tcp_v4_check(skb_gro_len(skb), iph->
saddr, iph->
daddr,
2826 skb_gro_offset(skb),
2841 const struct iphdr *iph = ip_hdr(skb);
2842 struct tcphdr *th = tcp_hdr(skb);
2844 th->
check = ~tcp_v4_check(skb->
len - skb_transport_offset(skb),
2859 .init = tcp_v4_init_sock,
2869 .mtu_reduced = tcp_v4_mtu_reduced,
2881 .obj_size =
sizeof(
struct tcp_sock),
2883 .twsk_prot = &tcp_timewait_sock_ops,
2886 .no_autobind =
true,
2887 #ifdef CONFIG_COMPAT
2891 #ifdef CONFIG_MEMCG_KMEM
2899 static int __net_init tcp_sk_init(
struct net *net)
2904 static void __net_exit tcp_sk_exit(
struct net *net)
2914 .init = tcp_sk_init,
2915 .exit = tcp_sk_exit,
2916 .exit_batch = tcp_sk_exit_batch,
2923 panic(
"Failed to create the TCP control socket.\n");