248 #define pr_fmt(fmt) "TCP: " fmt
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
256 #include <linux/fs.h>
260 #include <linux/net.h>
261 #include <linux/socket.h>
262 #include <linux/random.h>
269 #include <linux/time.h>
270 #include <linux/slab.h>
280 #include <asm/uaccess.h>
281 #include <asm/ioctls.h>
331 static u8 secs_to_retrans(
int seconds,
int timeout,
int rto_max)
339 while (seconds > period && res < 255) {
342 if (timeout > rto_max)
351 static int retrans_to_secs(
u8 retrans,
int timeout,
int rto_max)
359 if (timeout > rto_max)
379 tcp_prequeue_init(tp);
400 tcp_enable_early_retrans(tp);
427 sock_update_memcg(sk);
428 sk_sockets_allocated_inc(sk);
443 struct sock *
sk = sock->
sk;
444 const struct tcp_sock *tp = tcp_sk(sk);
446 sock_poll_wait(file, sk_sleep(sk), wait);
448 return inet_csk_listen_poll(sk);
506 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
517 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
597 static inline bool forced_push(
const struct tcp_sock *tp)
611 skb_header_release(skb);
612 tcp_add_write_queue_tail(sk, skb);
619 static inline void tcp_mark_urg(
struct tcp_sock *tp,
int flags)
625 static inline void tcp_push(
struct sock *sk,
int flags,
int mss_now,
628 if (tcp_send_head(sk)) {
631 if (!(flags &
MSG_MORE) || forced_push(tp))
632 tcp_mark_push(tp, tcp_write_queue_tail(sk));
634 tcp_mark_urg(tp, flags);
680 struct sock *sk = sock->
sk;
690 sock_rps_record_flow(sk);
703 ret = __tcp_splice_read(sk, &tss);
712 ret = sock_error(sk);
732 ret = sock_intr_errno(timeo);
765 size =
ALIGN(size, 4);
767 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
769 if (sk_wmem_schedule(sk, skb->
truesize)) {
770 skb_reserve(skb, sk->sk_prot->max_header);
780 sk->sk_prot->enter_memory_pressure(sk);
781 sk_stream_moderate_sndbuf(sk);
786 static unsigned int tcp_xmit_size_goal(
struct sock *sk,
u32 mss_now,
790 u32 xmit_size_goal, old_size_goal;
792 xmit_size_goal = mss_now;
794 if (large_allowed && sk_can_gso(sk)) {
796 inet_csk(sk)->icsk_af_ops->net_header_len -
797 inet_csk(sk)->icsk_ext_hdr_len -
801 xmit_size_goal =
min_t(
u32, xmit_size_goal,
802 sysctl_tcp_limit_output_bytes >> 1);
804 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
809 if (
likely(old_size_goal <= xmit_size_goal &&
810 old_size_goal + mss_now > xmit_size_goal)) {
811 xmit_size_goal = old_size_goal;
814 min_t(
u16, xmit_size_goal / mss_now,
820 return max(xmit_size_goal, mss_now);
823 static int tcp_send_mss(
struct sock *sk,
int *size_goal,
int flags)
828 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags &
MSG_OOB));
834 size_t size,
int flags)
837 int mss_now, size_goal;
847 !tcp_passive_fastopen(sk)) {
854 mss_now = tcp_send_mss(sk, &size_goal, flags);
862 struct sk_buff *skb = tcp_write_queue_tail(sk);
866 if (!tcp_send_head(sk) || (copy = size_goal - skb->
len) <= 0) {
868 if (!sk_stream_memory_free(sk))
869 goto wait_for_sndbuf;
873 goto wait_for_memory;
882 i = skb_shinfo(skb)->nr_frags;
883 can_coalesce = skb_can_coalesce(skb, i, page, offset);
885 tcp_mark_push(tp, skb);
888 if (!sk_wmem_schedule(sk, copy))
889 goto wait_for_memory;
892 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
895 skb_fill_page_desc(skb, i, page, offset, copy);
902 sk_mem_charge(sk, copy);
906 skb_shinfo(skb)->gso_segs = 0;
916 if (skb->
len < size_goal || (flags &
MSG_OOB))
919 if (forced_push(tp)) {
920 tcp_mark_push(tp, skb);
922 }
else if (skb == tcp_send_head(sk))
934 mss_now = tcp_send_mss(sk, &size_goal, flags);
939 tcp_push(sk, flags, mss_now, tp->
nonagle);
950 size_t size,
int flags)
960 res = do_tcp_sendpages(sk, page, offset, size, flags);
966 static inline int select_size(
const struct sock *sk,
bool sg)
968 const struct tcp_sock *tp = tcp_sk(sk);
972 if (sk_can_gso(sk)) {
980 if (tmp >= pgbreak &&
997 static int tcp_sendmsg_fastopen(
struct sock *sk,
struct msghdr *
msg,
int *size)
1027 int iovlen,
flags,
err, copied = 0;
1028 int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
1036 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
1041 offset = copied_syn;
1051 !tcp_passive_fastopen(sk)) {
1072 mss_now = tcp_send_mss(sk, &size_goal, flags);
1085 while (--iovlen >= 0) {
1091 if (offset >= seglen) {
1100 while (seglen > 0) {
1102 int max = size_goal;
1104 skb = tcp_write_queue_tail(sk);
1105 if (tcp_send_head(sk)) {
1108 copy = max - skb->
len;
1116 if (!sk_stream_memory_free(sk))
1117 goto wait_for_sndbuf;
1120 select_size(sk, sg),
1123 goto wait_for_memory;
1131 skb_entail(sk, skb);
1141 if (skb_availroom(skb) > 0) {
1143 copy =
min_t(
int, copy, skb_availroom(skb));
1144 err = skb_add_data_nocache(sk, skb, from, copy);
1149 int i = skb_shinfo(skb)->nr_frags;
1150 struct page_frag *pfrag = sk_page_frag(sk);
1153 goto wait_for_memory;
1155 if (!skb_can_coalesce(skb, i, pfrag->
page,
1158 tcp_mark_push(tp, skb);
1166 if (!sk_wmem_schedule(sk, copy))
1167 goto wait_for_memory;
1169 err = skb_copy_to_page_nocache(sk, from, skb,
1178 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1180 skb_fill_page_desc(skb, i, pfrag->
page,
1182 get_page(pfrag->
page);
1192 skb_shinfo(skb)->gso_segs = 0;
1196 if ((seglen -= copy) == 0 && iovlen == 0)
1202 if (forced_push(tp)) {
1203 tcp_mark_push(tp, skb);
1205 }
else if (skb == tcp_send_head(sk))
1218 mss_now = tcp_send_mss(sk, &size_goal, flags);
1224 tcp_push(sk, flags, mss_now, tp->
nonagle);
1226 return copied + copied_syn;
1230 tcp_unlink_write_queue(skb, sk);
1234 tcp_check_send_head(sk, skb);
1235 sk_wmem_free_skb(sk, skb);
1239 if (copied + copied_syn)
1253 static int tcp_recv_urg(
struct sock *sk,
struct msghdr *msg,
int len,
int flags)
1297 static int tcp_peek_sndq(
struct sock *sk,
struct msghdr *msg,
int len)
1300 int copied = 0, err = 0;
1312 return err ?: copied;
1324 bool time_to_ack =
false;
1329 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1332 if (inet_csk_ack_scheduled(sk)) {
1360 __u32 rcv_window_now = tcp_receive_window(tp);
1363 if (2*rcv_window_now <= tp->window_clamp) {
1371 if (new_window && new_window >= 2 * rcv_window_now)
1379 static void tcp_prequeue_process(
struct sock *sk)
1389 while ((skb = __skb_dequeue(&tp->
ucopy.prequeue)) !=
NULL)
1390 sk_backlog_rcv(sk, skb);
1394 tp->
ucopy.memory = 0;
1397 #ifdef CONFIG_NET_DMA
1398 static void tcp_service_net_dma(
struct sock *sk,
bool wait)
1404 if (!tp->
ucopy.dma_chan)
1407 last_issued = tp->
ucopy.dma_cookie;
1415 __skb_queue_purge(&sk->sk_async_wait_queue);
1419 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1420 (dma_async_is_complete(skb->dma_cookie, done,
1422 __skb_dequeue(&sk->sk_async_wait_queue);
1437 if (tcp_hdr(skb)->
syn)
1469 while ((skb = tcp_recv_skb(sk, seq, &offset)) !=
NULL) {
1478 if (urg_offset < len)
1483 used = recv_actor(desc, skb, offset, len);
1488 }
else if (used <= len) {
1499 skb = tcp_recv_skb(sk, seq-1, &offset);
1500 if (!skb || (offset+1 != skb->
len))
1503 if (tcp_hdr(skb)->fin) {
1504 sk_eat_skb(sk, skb,
false);
1508 sk_eat_skb(sk, skb,
false);
1533 size_t len,
int nonblock,
int flags,
int *addr_len)
1544 bool copied_early =
false;
1554 timeo = sock_rcvtimeo(sk, nonblock);
1557 if (flags & MSG_OOB)
1562 if (!(flags & MSG_PEEK))
1576 if (flags & MSG_PEEK) {
1581 target = sock_rcvlowat(sk, flags &
MSG_WAITALL, len);
1583 #ifdef CONFIG_NET_DMA
1592 if ((available < target) &&
1597 tp->
ucopy.pinned_list =
1612 if (signal_pending(
current)) {
1613 copied = timeo ? sock_intr_errno(timeo) : -
EAGAIN;
1625 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1631 if (tcp_hdr(skb)->
syn)
1635 if (tcp_hdr(skb)->fin)
1637 WARN(!(flags & MSG_PEEK),
1638 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1644 if (copied >= target && !sk->
sk_backlog.tail)
1659 copied = sock_error(sk);
1682 if (signal_pending(
current)) {
1683 copied = sock_intr_errno(timeo);
1692 if (!user_recv && !(flags & (
MSG_TRUNC | MSG_PEEK))) {
1694 tp->
ucopy.task = user_recv;
1729 if (!skb_queue_empty(&tp->
ucopy.prequeue))
1735 #ifdef CONFIG_NET_DMA
1736 if (tp->
ucopy.dma_chan) {
1738 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1739 tcp_service_net_dma(sk,
true);
1745 if (copied >= target) {
1752 #ifdef CONFIG_NET_DMA
1753 tcp_service_net_dma(sk,
false);
1754 tp->
ucopy.wakeup = 0;
1762 if ((chunk = len - tp->
ucopy.len) != 0) {
1769 !skb_queue_empty(&tp->
ucopy.prequeue)) {
1771 tcp_prequeue_process(sk);
1773 if ((chunk = len - tp->
ucopy.len) != 0) {
1780 if ((flags & MSG_PEEK) &&
1781 (peek_seq - copied - urg_hole != tp->
copied_seq)) {
1798 if (urg_offset < used) {
1814 #ifdef CONFIG_NET_DMA
1815 if (!tp->
ucopy.dma_chan && tp->
ucopy.pinned_list)
1818 if (tp->
ucopy.dma_chan) {
1820 tp->
ucopy.dma_chan, skb, offset,
1822 tp->
ucopy.pinned_list);
1824 if (tp->
ucopy.dma_cookie < 0) {
1837 if ((offset + used) == skb->
len)
1838 copied_early =
true;
1863 tcp_fast_path_check(sk);
1868 if (tcp_hdr(skb)->fin)
1870 if (!(flags & MSG_PEEK)) {
1871 sk_eat_skb(sk, skb, copied_early);
1872 copied_early =
false;
1879 if (!(flags & MSG_PEEK)) {
1880 sk_eat_skb(sk, skb, copied_early);
1881 copied_early =
false;
1887 if (!skb_queue_empty(&tp->
ucopy.prequeue)) {
1890 tp->
ucopy.len = copied > 0 ? len : 0;
1892 tcp_prequeue_process(sk);
1894 if (copied > 0 && (chunk = len - tp->
ucopy.len) != 0) {
1905 #ifdef CONFIG_NET_DMA
1906 tcp_service_net_dma(sk,
true);
1909 if (tp->
ucopy.pinned_list) {
1930 err = tcp_recv_urg(sk, msg, len, flags);
1934 err = tcp_peek_sndq(sk, msg, len);
1941 int oldstate = sk->sk_state;
1953 sk->sk_prot->unhash(sk);
1954 if (inet_csk(sk)->icsk_bind_hash &&
1966 sk->sk_state =
state;
1969 SOCK_DEBUG(sk,
"TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
1981 static const unsigned char new_state[16] = {
1997 static int tcp_close_state(
struct sock *sk)
1999 int next = (
int)new_state[sk->sk_state];
2022 if ((1 << sk->sk_state) &
2026 if (tcp_close_state(sk))
2034 bool too_many_orphans, out_of_socket_memory;
2036 too_many_orphans = tcp_too_many_orphans(sk, shift);
2037 out_of_socket_memory = tcp_out_of_memory(sk);
2039 if (too_many_orphans)
2041 if (out_of_socket_memory)
2043 return too_many_orphans || out_of_socket_memory;
2049 int data_was_unread = 0;
2061 goto adjudge_to_death;
2071 data_was_unread +=
len;
2079 goto adjudge_to_death;
2088 if (
unlikely(tcp_sk(sk)->repair)) {
2089 sk->sk_prot->disconnect(sk, 0);
2090 }
else if (data_was_unread) {
2097 sk->sk_prot->disconnect(sk, 0);
2099 }
else if (tcp_close_state(sk)) {
2135 state = sk->sk_state;
2150 percpu_counter_inc(sk->sk_prot->orphan_count);
2178 const int tmo = tcp_fin_time(sk);
2220 static inline bool tcp_need_reset(
int state)
2222 return (1 << state) &
2233 int old_state = sk->sk_state;
2243 }
else if (tcp_need_reset(old_state) ||
2254 tcp_clear_xmit_timers(sk);
2256 tcp_write_queue_purge(sk);
2258 #ifdef CONFIG_NET_DMA
2259 __skb_queue_purge(&sk->sk_async_wait_queue);
2265 inet_reset_saddr(sk);
2282 inet_csk_delack_init(sk);
2283 tcp_init_send_head(sk);
2298 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
2301 static inline bool tcp_can_repair_sock(
const struct sock *sk)
2307 static int tcp_repair_options_est(
struct tcp_sock *tp,
2312 while (len >=
sizeof(
opt)) {
2319 switch (
opt.opt_code) {
2325 u16 snd_wscale =
opt.opt_val & 0xFFFF;
2326 u16 rcv_wscale =
opt.opt_val >> 16;
2328 if (snd_wscale > 14 || rcv_wscale > 14)
2331 tp->
rx_opt.snd_wscale = snd_wscale;
2332 tp->
rx_opt.rcv_wscale = rcv_wscale;
2333 tp->
rx_opt.wscale_ok = 1;
2337 if (
opt.opt_val != 0)
2341 if (sysctl_tcp_fack)
2342 tcp_enable_fack(tp);
2345 if (
opt.opt_val != 0)
2348 tp->
rx_opt.tstamp_ok = 1;
2359 static int do_tcp_setsockopt(
struct sock *sk,
int level,
2360 int optname,
char __user *optval,
unsigned int optlen)
2390 if (
sizeof(ctd) > optlen)
2395 if (ctd.tcpct_used >
sizeof(ctd.tcpct_value) ||
2399 if (ctd.tcpct_cookie_desired == 0) {
2401 }
else if ((0x1 & ctd.tcpct_cookie_desired) ||
2412 tcp_cookie_values_release);
2415 tp->
rx_opt.cookie_in_always = 0;
2416 tp->
rx_opt.cookie_out_never = 1;
2423 if (ctd.tcpct_used > 0 ||
2425 (sysctl_tcp_cookie_size > 0 ||
2426 ctd.tcpct_cookie_desired > 0 ||
2427 ctd.tcpct_s_data_desired > 0))) {
2428 cvp = kzalloc(
sizeof(*cvp) + ctd.tcpct_used,
2433 kref_init(&cvp->
kref);
2436 tp->
rx_opt.cookie_in_always =
2438 tp->
rx_opt.cookie_out_never = 0;
2447 tcp_cookie_values_release);
2456 if (ctd.tcpct_used > 0) {
2477 if (optlen <
sizeof(
int))
2480 if (
get_user(val, (
int __user *)optval))
2508 tcp_push_pending_frames(sk);
2515 if (val < 0 || val > 1)
2522 if (val < 0 || val > 1)
2527 tcp_disable_early_retrans(tp);
2531 if (!tcp_can_repair_sock(sk))
2533 else if (val == 1) {
2537 }
else if (val == 0) {
2570 err = tcp_repair_options_est(tp,
2595 tcp_push_pending_frames(sk);
2605 !((1 << sk->sk_state) &
2607 u32 elapsed = keepalive_time_elapsed(tp);
2638 else if (val > sysctl_tcp_fin_timeout / HZ)
2668 if ((1 << sk->sk_state) &
2670 inet_csk_ack_scheduled(sk)) {
2679 #ifdef CONFIG_TCP_MD5SIG
2682 err = tp->af_specific->md5_parse(sk, optval, optlen);
2696 if (val >= 0 && ((1 << sk->sk_state) & (
TCPF_CLOSE |
2698 err = fastopen_init_queue(sk, val);
2712 unsigned int optlen)
2717 return icsk->
icsk_af_ops->setsockopt(sk, level, optname,
2719 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2723 #ifdef CONFIG_COMPAT
2725 char __user *optval,
unsigned int optlen)
2730 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2738 const struct tcp_sock *tp = tcp_sk(sk);
2742 memset(info, 0,
sizeof(*info));
2750 if (tp->
rx_opt.tstamp_ok)
2752 if (tcp_is_sack(tp))
2754 if (tp->
rx_opt.wscale_ok) {
2803 static int do_tcp_getsockopt(
struct sock *sk,
int level,
2804 int optname,
char __user *optval,
int __user *optlen)
2813 len =
min_t(
unsigned int, len,
sizeof(
int));
2822 val = tp->
rx_opt.user_mss;
2824 val = tp->
rx_opt.mss_clamp;
2833 val = keepalive_time_when(tp) /
HZ;
2836 val = keepalive_intvl_when(tp) /
HZ;
2864 len =
min_t(
unsigned int, len,
sizeof(
info));
2891 if (len <
sizeof(ctd))
2894 memset(&ctd, 0,
sizeof(ctd));
2895 ctd.tcpct_flags = (tp->
rx_opt.cookie_in_always ?
2897 | (tp->
rx_opt.cookie_out_never ?
2967 return icsk->
icsk_af_ops->getsockopt(sk, level, optname,
2969 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2973 #ifdef CONFIG_COMPAT
2975 char __user *optval,
int __user *optlen)
2980 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2993 unsigned int oldlen;
2996 if (!pskb_may_pull(skb,
sizeof(*th)))
3000 thlen = th->doff * 4;
3001 if (thlen <
sizeof(*th))
3004 if (!pskb_may_pull(skb, thlen))
3008 __skb_pull(skb, thlen);
3010 mss = skb_shinfo(skb)->gso_size;
3016 int type = skb_shinfo(skb)->gso_type;
3037 delta =
htonl(oldlen + (thlen + mss));
3044 th->fin = th->psh = 0;
3059 }
while (skb->
next);
3083 unsigned int mss = 1;
3089 off = skb_gro_offset(skb);
3090 hlen = off +
sizeof(*th);
3091 th = skb_gro_header_fast(skb, off);
3092 if (skb_gro_header_hard(skb, hlen)) {
3093 th = skb_gro_header_slow(skb, hlen, off);
3098 thlen = th->doff * 4;
3099 if (thlen <
sizeof(*th))
3103 if (skb_gro_header_hard(skb, hlen)) {
3104 th = skb_gro_header_slow(skb, hlen, off);
3109 skb_gro_pull(skb, thlen);
3111 len = skb_gro_len(skb);
3114 for (; (p = *
head); head = &p->
next) {
3128 goto out_check_final;
3136 for (i =
sizeof(*th); i < thlen; i += 4)
3137 flush |= *(
u32 *)((
u8 *)th + i) ^
3140 mss = skb_shinfo(p)->gso_size;
3142 flush |= (len - 1) >= mss;
3143 flush |= (
ntohl(th2->
seq) + skb_gro_len(p)) ^
ntohl(th->seq);
3147 goto out_check_final;
3178 skb_shinfo(skb)->gso_segs =
NAPI_GRO_CB(skb)->count;
3187 #ifdef CONFIG_TCP_MD5SIG
3188 static unsigned long tcp_md5sig_users;
3192 static void __tcp_free_md5sig_pool(
struct tcp_md5sig_pool
__percpu *
pool)
3209 spin_lock_bh(&tcp_md5sig_pool_lock);
3210 if (--tcp_md5sig_users == 0) {
3211 pool = tcp_md5sig_pool;
3212 tcp_md5sig_pool =
NULL;
3214 spin_unlock_bh(&tcp_md5sig_pool_lock);
3216 __tcp_free_md5sig_pool(pool);
3220 static struct tcp_md5sig_pool
__percpu *
3221 __tcp_alloc_md5sig_pool(
struct sock *sk)
3224 struct tcp_md5sig_pool
__percpu *pool;
3234 if (!hash || IS_ERR(hash))
3241 __tcp_free_md5sig_pool(pool);
3247 struct tcp_md5sig_pool
__percpu *pool;
3251 spin_lock_bh(&tcp_md5sig_pool_lock);
3252 pool = tcp_md5sig_pool;
3253 if (tcp_md5sig_users++ == 0) {
3255 spin_unlock_bh(&tcp_md5sig_pool_lock);
3258 spin_unlock_bh(&tcp_md5sig_pool_lock);
3262 spin_unlock_bh(&tcp_md5sig_pool_lock);
3268 p = __tcp_alloc_md5sig_pool(sk);
3269 spin_lock_bh(&tcp_md5sig_pool_lock);
3272 spin_unlock_bh(&tcp_md5sig_pool_lock);
3275 pool = tcp_md5sig_pool;
3278 spin_unlock_bh(&tcp_md5sig_pool_lock);
3279 __tcp_free_md5sig_pool(p);
3281 tcp_md5sig_pool = pool =
p;
3282 spin_unlock_bh(&tcp_md5sig_pool_lock);
3303 spin_lock(&tcp_md5sig_pool_lock);
3304 p = tcp_md5sig_pool;
3307 spin_unlock(&tcp_md5sig_pool_lock);
3337 err = crypto_hash_update(&hp->
md5_desc, &sg,
sizeof(
hdr));
3346 const struct tcphdr *tp = tcp_hdr(skb);
3349 const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3350 skb_headlen(skb) - header_len : 0;
3356 sg_set_buf(&sg, ((
u8 *) tp) + header_len, head_data_len);
3357 if (crypto_hash_update(desc, &sg, head_data_len))
3362 struct page *page = skb_frag_page(f);
3363 sg_set_page(&sg, page, skb_frag_size(f), f->
page_offset);
3364 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3368 skb_walk_frags(skb, frag_iter)
3381 return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3420 #define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
3421 #define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
3422 #define TCP_SECRET_LIFE (HZ * 600)
3437 static inline u32 tcp_cookie_work(
const u32 *
ws,
const int n)
3448 unsigned long jiffy =
jiffies;
3451 spin_lock_bh(&tcp_secret_locker);
3452 if (!
time_after_eq(jiffy, tcp_secret_generating->expires)) {
3455 &tcp_secret_generating->secrets[0],
3470 if (
unlikely(tcp_secret_primary->expires ==
3471 tcp_secret_secondary->expires)) {
3478 tcp_secret_secondary->expires = jiffy
3480 + (0x0f & tcp_cookie_work(bakery, 0));
3482 tcp_secret_secondary->expires = jiffy
3484 + (0xff & tcp_cookie_work(bakery, 1));
3485 tcp_secret_primary->expires = jiffy
3487 + (0x1f & tcp_cookie_work(bakery, 2));
3489 memcpy(&tcp_secret_secondary->secrets[0],
3493 tcp_secret_secondary);
3495 tcp_secret_primary);
3503 spin_unlock_bh(&tcp_secret_locker);
3509 rcu_read_unlock_bh();
3523 tcp_clear_xmit_timers(sk);
3538 static __initdata unsigned long thash_entries;
3539 static int __init set_thash_entries(
char *
str)
3546 ret = kstrtoul(str, 0, &thash_entries);
3552 __setup(
"thash_entries=", set_thash_entries);
3557 limit =
max(limit, 128
UL);
3558 net->
ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
3560 net->
ipv4.sysctl_tcp_mem[2] = net->
ipv4.sysctl_tcp_mem[0] * 2;
3566 unsigned long limit;
3567 int max_rshare, max_wshare,
cnt;
3569 unsigned long jiffy =
jiffies;
3589 (totalram_pages >= 128 * 1024) ?
3595 thash_entries ? 0 : 512 * 1024);
3601 panic(
"TCP: failed to alloc ehash_locks");
3606 (totalram_pages >= 128 * 1024) ?
3629 max_wshare =
min(4
UL*1024*1024, limit);
3630 max_rshare =
min(6
UL*1024*1024, limit);
3640 pr_info(
"Hash tables configured (established %u bind %u)\n",
3647 memset(&tcp_secret_one.secrets[0], 0,
sizeof(tcp_secret_one.secrets));
3648 memset(&tcp_secret_two.secrets[0], 0,
sizeof(tcp_secret_two.secrets));
3649 tcp_secret_one.expires = jiffy;
3650 tcp_secret_two.expires = jiffy;
3651 tcp_secret_generating = &tcp_secret_one;
3652 tcp_secret_primary = &tcp_secret_one;
3653 tcp_secret_retiring = &tcp_secret_two;
3654 tcp_secret_secondary = &tcp_secret_two;