37 #define pr_fmt(fmt) "TCP: " fmt
41 #include <linux/compiler.h>
43 #include <linux/module.h>
71 static bool tcp_write_xmit(
struct sock *
sk,
unsigned int mss_now,
int nonagle,
72 int push_one,
gfp_t gfp);
75 static void tcp_event_new_data_sent(
struct sock *
sk,
const struct sk_buff *
skb)
80 tcp_advance_send_head(sk, skb);
98 static inline __u32 tcp_acceptable_seq(
const struct sock *sk)
100 const struct tcp_sock *tp = tcp_sk(sk);
102 if (!before(tcp_wnd_end(tp), tp->
snd_nxt))
105 return tcp_wnd_end(tp);
122 static __u16 tcp_advertise_mss(
struct sock *sk)
129 unsigned int metric = dst_metric_advmss(dst);
142 static void tcp_cwnd_restart(
struct sock *sk,
const struct dst_entry *dst)
152 restart_cwnd =
min(restart_cwnd, cwnd);
154 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
162 static void tcp_event_data_sent(
struct tcp_sock *tp,
168 if (sysctl_tcp_slow_start_after_idle &&
170 tcp_cwnd_restart(sk, __sk_dst_get(sk));
182 static inline void tcp_event_ack_sent(
struct sock *sk,
unsigned int pkts)
184 tcp_dec_quickack_mode(sk, pkts);
197 int wscale_ok,
__u8 *rcv_wscale,
200 unsigned int space = (__space < 0 ? 0 : __space);
203 if (*window_clamp == 0)
204 (*window_clamp) = (65535 << 14);
205 space =
min(*window_clamp, space);
209 space = (space /
mss) * mss;
230 space =
min_t(
u32, space, *window_clamp);
231 while (space > 65535 && (*rcv_wscale) < 14) {
241 if (mss > (1 << *rcv_wscale)) {
250 *rcv_wnd =
min(*rcv_wnd, init_rcv_wnd * mss);
252 *rcv_wnd =
min(*rcv_wnd, init_cwnd * mss);
256 (*window_clamp) =
min(65535
U << (*rcv_wscale), *window_clamp);
265 static u16 tcp_select_window(
struct sock *sk)
268 u32 cur_win = tcp_receive_window(tp);
272 if (new_win < cur_win) {
280 new_win =
ALIGN(cur_win, 1 << tp->
rx_opt.rcv_wscale);
288 if (!tp->
rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
291 new_win =
min(new_win, (65535
U << tp->
rx_opt.rcv_wscale));
294 new_win >>= tp->
rx_opt.rcv_wscale;
304 static inline void TCP_ECN_send_synack(
const struct tcp_sock *tp,
struct sk_buff *skb)
312 static inline void TCP_ECN_send_syn(
struct sock *sk,
struct sk_buff *skb)
317 if (sysctl_tcp_ecn == 1) {
326 if (inet_rsk(req)->ecn_ok)
333 static inline void TCP_ECN_send(
struct sock *sk,
struct sk_buff *skb,
340 if (skb->
len != tcp_header_len &&
345 tcp_hdr(skb)->cwr = 1;
350 INET_ECN_dontxmit(sk);
353 tcp_hdr(skb)->ece = 1;
368 skb_shinfo(skb)->gso_segs = 1;
369 skb_shinfo(skb)->gso_size = 0;
370 skb_shinfo(skb)->gso_type = 0;
378 static inline bool tcp_urg_mode(
const struct tcp_sock *tp)
383 #define OPTION_SACK_ADVERTISE (1 << 0)
384 #define OPTION_TS (1 << 1)
385 #define OPTION_MD5 (1 << 2)
386 #define OPTION_WSCALE (1 << 3)
387 #define OPTION_COOKIE_EXTENSION (1 << 4)
388 #define OPTION_FAST_OPEN_COOKIE (1 << 8)
403 static u8 tcp_cookie_size_check(
u8 desired)
412 if (cookie_size <= 0)
428 return (
u8)cookie_size;
511 if (0x2 & cookie_size) {
517 *p++ = *cookie_copy++;
518 *p++ = *cookie_copy++;
530 if (cookie_size > 0) {
531 memcpy(ptr, cookie_copy, cookie_size);
532 ptr += (cookie_size / 4);
578 if ((foc->
len & 3) == 2) {
582 ptr += (foc->
len + 3) >> 2;
589 static unsigned int tcp_syn_options(
struct sock *sk,
struct sk_buff *skb,
596 u8 cookie_size = (!tp->
rx_opt.cookie_out_never && cvp !=
NULL) ?
601 #ifdef CONFIG_TCP_MD5SIG
602 *md5 = tp->af_specific->md5_lookup(sk, sk);
620 opts->
mss = tcp_advertise_mss(sk);
623 if (
likely(sysctl_tcp_timestamps && *md5 ==
NULL)) {
629 if (
likely(sysctl_tcp_window_scaling)) {
634 if (
likely(sysctl_tcp_sack)) {
640 if (fastopen && fastopen->
cookie.len >= 0) {
642 need = (need + 3) & ~3
U;
643 if (remaining >= need) {
665 if (need > remaining) {
700 static unsigned int tcp_synack_options(
struct sock *sk,
702 unsigned int mss,
struct sk_buff *skb,
714 #ifdef CONFIG_TCP_MD5SIG
715 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
753 need = (need + 3) & ~3
U;
754 if (remaining >= need) {
766 int need = cookie_plus;
772 if (need <= remaining) {
788 static unsigned int tcp_established_options(
struct sock *sk,
struct sk_buff *skb,
794 unsigned int size = 0;
795 unsigned int eff_sacks;
797 #ifdef CONFIG_TCP_MD5SIG
798 *md5 = tp->af_specific->md5_lookup(sk, sk);
818 min_t(
unsigned int, eff_sacks,
849 static void tcp_tsq_handler(
struct sock *sk)
851 if ((1 << sk->sk_state) &
862 static void tcp_tasklet_func(
unsigned long data)
872 list_splice_init(&tsq->
head, &
list);
879 sk = (
struct sock *)tp;
895 #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
896 (1UL << TCP_WRITE_TIMER_DEFERRED) | \
897 (1UL << TCP_DELACK_TIMER_DEFERRED) | \
898 (1UL << TCP_MTU_REDUCED_DEFERRED))
909 unsigned long flags, nflags;
916 nflags = flags & ~TCP_DEFERRED_ALL;
931 sk->sk_prot->mtu_reduced(sk);
944 INIT_LIST_HEAD(&tsq->
head);
956 static void tcp_wfree(
struct sk_buff *skb)
958 struct sock *sk = skb->
sk;
975 tasklet_schedule(&tsq->
tasklet);
993 static int tcp_transmit_skb(
struct sock *sk,
struct sk_buff *skb,
int clone_it,
1001 unsigned int tcp_options_size, tcp_header_size;
1006 BUG_ON(!skb || !tcp_skb_pcount(skb));
1012 __net_timestamp(skb);
1016 skb = pskb_copy(skb, gfp_mask);
1026 memset(&opts, 0,
sizeof(opts));
1029 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1031 tcp_options_size = tcp_established_options(sk, skb, &opts,
1033 tcp_header_size = tcp_options_size +
sizeof(
struct tcphdr);
1035 if (tcp_packets_in_flight(tp) == 0) {
1042 skb_reset_transport_header(skb);
1056 *(((
__be16 *)th) + 6) =
htons(((tcp_header_size >> 2) << 12) |
1072 if (before(tp->
snd_up, tcb->
seq + 0x10000)) {
1081 tcp_options_write((
__be32 *)(th + 1), tp, &opts);
1083 TCP_ECN_send(sk, skb, tcp_header_size);
1085 #ifdef CONFIG_TCP_MD5SIG
1090 md5, sk,
NULL, skb);
1097 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
1099 if (skb->
len != tcp_header_size)
1100 tcp_event_data_sent(tp, sk);
1104 tcp_skb_pcount(skb));
1120 static void tcp_queue_skb(
struct sock *sk,
struct sk_buff *skb)
1126 skb_header_release(skb);
1127 tcp_add_write_queue_tail(sk, skb);
1133 static void tcp_set_skb_tso_segs(
const struct sock *sk,
struct sk_buff *skb,
1134 unsigned int mss_now)
1136 if (skb->
len <= mss_now || !sk_can_gso(sk) ||
1141 skb_shinfo(skb)->gso_segs = 1;
1142 skb_shinfo(skb)->gso_size = 0;
1143 skb_shinfo(skb)->gso_type = 0;
1146 skb_shinfo(skb)->gso_size = mss_now;
1154 static void tcp_adjust_fackets_out(
struct sock *sk,
const struct sk_buff *skb,
1169 static void tcp_adjust_pcount(
struct sock *sk,
const struct sk_buff *skb,
int decr)
1183 if (tcp_is_reno(tp) && decr > 0)
1186 tcp_adjust_fackets_out(sk, skb, decr);
1190 (tcp_is_fack(tp) || (
TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1202 unsigned int mss_now)
1206 int nsize, old_factor;
1213 nsize = skb_headlen(skb) -
len;
1217 if (skb_cloned(skb) &&
1218 skb_is_nonlinear(skb) &&
1252 skb->
csum = csum_block_sub(skb->
csum, buff->
csum, len);
1266 old_factor = tcp_skb_pcount(skb);
1269 tcp_set_skb_tso_segs(sk, skb, mss_now);
1270 tcp_set_skb_tso_segs(sk, buff, mss_now);
1275 if (!before(tp->
snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1276 int diff = old_factor - tcp_skb_pcount(skb) -
1277 tcp_skb_pcount(buff);
1280 tcp_adjust_pcount(sk, skb, diff);
1284 skb_header_release(buff);
1285 tcp_insert_write_queue_after(skb, buff, sk);
1294 static void __pskb_trim_head(
struct sk_buff *skb,
int len)
1298 eat =
min_t(
int, len, skb_headlen(skb));
1300 __skb_pull(skb, eat);
1308 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1309 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1312 skb_frag_unref(skb, i);
1315 skb_shinfo(skb)->frags[
k] = skb_shinfo(skb)->frags[
i];
1317 skb_shinfo(skb)->frags[
k].page_offset += eat;
1318 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1324 skb_shinfo(skb)->nr_frags =
k;
1326 skb_reset_tail_pointer(skb);
1337 __pskb_trim_head(skb, len);
1344 sk_mem_uncharge(sk, len);
1348 if (tcp_skb_pcount(skb) > 1)
1349 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1357 const struct tcp_sock *tp = tcp_sk(sk);
1368 const struct dst_entry *dst = __sk_dst_get(sk);
1370 if (dst && dst_allfrag(dst))
1371 mss_now -= icsk->
icsk_af_ops->net_frag_header_len;
1375 if (mss_now > tp->
rx_opt.mss_clamp)
1376 mss_now = tp->
rx_opt.mss_clamp;
1394 const struct tcp_sock *tp = tcp_sk(sk);
1405 const struct dst_entry *dst = __sk_dst_get(sk);
1407 if (dst && dst_allfrag(dst))
1421 icsk->icsk_af_ops->net_header_len;
1423 icsk->icsk_mtup.probe_size = 0;
1459 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1476 const struct tcp_sock *tp = tcp_sk(sk);
1477 const struct dst_entry *dst = __sk_dst_get(sk);
1486 u32 mtu = dst_mtu(dst);
1487 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1491 header_len = tcp_established_options(sk,
NULL, &opts, &md5) +
1506 static void tcp_cwnd_validate(
struct sock *sk)
1519 if (sysctl_tcp_slow_start_after_idle &&
1537 static unsigned int tcp_mss_split_point(
const struct sock *sk,
const struct sk_buff *skb,
1538 unsigned int mss_now,
unsigned int max_segs)
1540 const struct tcp_sock *tp = tcp_sk(sk);
1543 window = tcp_wnd_end(tp) -
TCP_SKB_CB(skb)->seq;
1544 max_len = mss_now * max_segs;
1546 if (
likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1549 needed =
min(skb->
len, window);
1551 if (max_len <= needed)
1554 return needed - needed % mss_now;
1560 static inline unsigned int tcp_cwnd_test(
const struct tcp_sock *tp,
1563 u32 in_flight, cwnd;
1567 tcp_skb_pcount(skb) == 1)
1570 in_flight = tcp_packets_in_flight(tp);
1572 if (in_flight < cwnd)
1573 return (cwnd - in_flight);
1582 static int tcp_init_tso_segs(
const struct sock *sk,
struct sk_buff *skb,
1583 unsigned int mss_now)
1585 int tso_segs = tcp_skb_pcount(skb);
1587 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1588 tcp_set_skb_tso_segs(sk, skb, mss_now);
1589 tso_segs = tcp_skb_pcount(skb);
1595 static inline bool tcp_minshall_check(
const struct tcp_sock *tp)
1608 static inline bool tcp_nagle_check(
const struct tcp_sock *tp,
1610 unsigned int mss_now,
int nonagle)
1612 return skb->
len < mss_now &&
1614 (!nonagle && tp->
packets_out && tcp_minshall_check(tp)));
1620 static inline bool tcp_nagle_test(
const struct tcp_sock *tp,
const struct sk_buff *skb,
1621 unsigned int cur_mss,
int nonagle)
1636 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1639 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1646 static bool tcp_snd_wnd_test(
const struct tcp_sock *tp,
1648 unsigned int cur_mss)
1652 if (skb->
len > cur_mss)
1655 return !
after(end_seq, tcp_wnd_end(tp));
1662 static unsigned int tcp_snd_test(
const struct sock *sk,
struct sk_buff *skb,
1663 unsigned int cur_mss,
int nonagle)
1665 const struct tcp_sock *tp = tcp_sk(sk);
1666 unsigned int cwnd_quota;
1668 tcp_init_tso_segs(sk, skb, cur_mss);
1670 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1673 cwnd_quota = tcp_cwnd_test(tp, skb);
1674 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1683 const struct tcp_sock *tp = tcp_sk(sk);
1684 struct sk_buff *skb = tcp_send_head(sk);
1688 (tcp_skb_is_last(sk, skb) ?
1689 tp->
nonagle : TCP_NAGLE_PUSH));
1699 static int tso_fragment(
struct sock *sk,
struct sk_buff *skb,
unsigned int len,
1700 unsigned int mss_now,
gfp_t gfp)
1703 int nlen = skb->
len -
len;
1736 tcp_set_skb_tso_segs(sk, skb, mss_now);
1737 tcp_set_skb_tso_segs(sk, buff, mss_now);
1740 skb_header_release(buff);
1741 tcp_insert_write_queue_after(skb, buff, sk);
1751 static bool tcp_tso_should_defer(
struct sock *sk,
struct sk_buff *skb)
1755 u32 send_win, cong_win,
limit, in_flight;
1769 in_flight = tcp_packets_in_flight(tp);
1773 send_win = tcp_wnd_end(tp) -
TCP_SKB_CB(skb)->seq;
1778 limit =
min(send_win, cong_win);
1786 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->
len))
1789 win_divisor =
ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1796 chunk /= win_divisor;
1805 if (limit > tcp_max_tso_deferred_mss(tp) * tp->
mss_cache)
1828 static int tcp_mtu_probe(
struct sock *sk)
1863 if (tp->
snd_wnd < size_needed)
1869 if (tcp_packets_in_flight(tp) + 2 > tp->
snd_cwnd) {
1870 if (!tcp_packets_in_flight(tp))
1882 skb = tcp_send_head(sk);
1891 tcp_insert_write_queue_before(nskb, skb, sk);
1895 copy =
min_t(
int, skb->
len, probe_size - len);
1903 if (skb->
len <= copy) {
1907 tcp_unlink_write_queue(skb, sk);
1908 sk_wmem_free_skb(sk, skb);
1912 if (!skb_shinfo(skb)->nr_frags) {
1918 __pskb_trim_head(skb, copy);
1919 tcp_set_skb_tso_segs(sk, skb, mss_now);
1926 if (len >= probe_size)
1929 tcp_init_tso_segs(sk, nskb, nskb->
len);
1934 if (!tcp_transmit_skb(sk, nskb, 1,
GFP_ATOMIC)) {
1938 tcp_event_new_data_sent(sk, nskb);
1961 static bool tcp_write_xmit(
struct sock *sk,
unsigned int mss_now,
int nonagle,
1962 int push_one,
gfp_t gfp)
1966 unsigned int tso_segs, sent_pkts;
1974 result = tcp_mtu_probe(sk);
1977 }
else if (result > 0) {
1982 while ((skb = tcp_send_head(sk))) {
1986 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1992 cwnd_quota = tcp_cwnd_test(tp, skb);
1996 if (
unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1999 if (tso_segs == 1) {
2000 if (
unlikely(!tcp_nagle_test(tp, skb, mss_now,
2001 (tcp_skb_is_last(sk, skb) ?
2002 nonagle : TCP_NAGLE_PUSH))))
2005 if (!push_one && tcp_tso_should_defer(sk, skb))
2017 if (tso_segs > 1 && !tcp_urg_mode(tp))
2018 limit = tcp_mss_split_point(sk, skb, mss_now,
2023 if (skb->
len > limit &&
2024 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2029 if (
unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2036 tcp_event_new_data_sent(sk, skb);
2038 tcp_minshall_update(tp, mss_now, skb);
2039 sent_pkts += tcp_skb_pcount(skb);
2046 if (tcp_in_cwnd_reduction(sk))
2048 tcp_cwnd_validate(sk);
2068 if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2070 tcp_check_probe_timer(sk);
2078 struct sk_buff *skb = tcp_send_head(sk);
2082 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->
sk_allocation);
2148 int free_space = tcp_space(sk);
2152 if (mss > full_space)
2155 if (free_space < (full_space >> 1)) {
2158 if (sk_under_memory_pressure(sk))
2162 if (free_space < mss)
2173 if (tp->
rx_opt.rcv_wscale) {
2174 window = free_space;
2180 if (((window >> tp->
rx_opt.rcv_wscale) << tp->
rx_opt.rcv_wscale) != window)
2181 window = (((window >> tp->
rx_opt.rcv_wscale) + 1)
2182 << tp->
rx_opt.rcv_wscale);
2192 if (window <= free_space - mss || window > free_space)
2193 window = (free_space /
mss) * mss;
2194 else if (mss == full_space &&
2195 free_space > window + (full_space >> 1))
2196 window = free_space;
2203 static void tcp_collapse_retrans(
struct sock *sk,
struct sk_buff *skb)
2206 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2207 int skb_size, next_skb_size;
2209 skb_size = skb->
len;
2210 next_skb_size = next_skb->
len;
2212 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
2214 tcp_highest_sack_combine(sk, next_skb, skb);
2216 tcp_unlink_write_queue(next_skb, sk);
2218 skb_copy_from_linear_data(next_skb,
skb_put(skb, next_skb_size),
2225 skb->
csum = csum_block_add(skb->
csum, next_skb->
csum, skb_size);
2239 tcp_clear_retrans_hints_partial(tp);
2243 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2245 sk_wmem_free_skb(sk, next_skb);
2249 static bool tcp_can_collapse(
const struct sock *sk,
const struct sk_buff *skb)
2251 if (tcp_skb_pcount(skb) > 1)
2254 if (skb_shinfo(skb)->nr_frags != 0)
2256 if (skb_cloned(skb))
2258 if (skb == tcp_send_head(sk))
2261 if (
TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2270 static void tcp_retrans_try_collapse(
struct sock *sk,
struct sk_buff *to,
2277 if (!sysctl_tcp_retrans_collapse)
2283 if (!tcp_can_collapse(sk, skb))
2298 if (skb->
len > skb_availroom(to))
2304 tcp_collapse_retrans(sk, to);
2316 unsigned int cur_mss;
2337 if (inet_csk(sk)->
icsk_af_ops->rebuild_header(sk))
2347 if (!before(
TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2351 if (skb->
len > cur_mss) {
2355 int oldpcount = tcp_skb_pcount(skb);
2358 tcp_init_tso_segs(sk, skb, cur_mss);
2359 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2363 tcp_retrans_try_collapse(sk, skb, cur_mss);
2370 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2371 tp->
snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2372 if (!pskb_trim(skb, 0)) {
2374 tcp_init_nondata_skb(skb,
TCP_SKB_CB(skb)->end_seq - 1,
2389 return nskb ? tcp_transmit_skb(sk, nskb, 0,
GFP_ATOMIC) :
2392 return tcp_transmit_skb(sk, skb, 1,
GFP_ATOMIC);
2407 #if FASTRETRANS_DEBUG > 0
2408 if (
TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2434 static bool tcp_can_forward_retransmit(
struct sock *sk)
2437 const struct tcp_sock *tp = tcp_sk(sk);
2444 if (tcp_is_reno(tp))
2477 int fwd_rexmitting = 0;
2491 skb = tcp_write_queue_head(sk);
2498 if (skb == tcp_send_head(sk))
2511 if (tcp_packets_in_flight(tp) >= tp->
snd_cwnd)
2514 if (fwd_rexmitting) {
2516 if (!before(
TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2522 if (!tcp_can_forward_retransmit(sk))
2532 }
else if (!(sacked & TCPCB_LOST)) {
2533 if (hole ==
NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2545 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2554 if (tcp_in_cwnd_reduction(sk))
2555 tp->
prr_out += tcp_skb_pcount(skb);
2557 if (skb == tcp_write_queue_head(sk))
2559 inet_csk(sk)->icsk_rto,
2570 struct sk_buff *skb = tcp_write_queue_tail(sk);
2579 if (tcp_send_head(sk) !=
NULL) {
2596 tcp_init_nondata_skb(skb, tp->
write_seq,
2598 tcp_queue_skb(sk, skb);
2621 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2625 if (tcp_transmit_skb(sk, skb, 0, priority))
2641 skb = tcp_write_queue_head(sk);
2643 pr_debug(
"%s: wrong queue state\n", __func__);
2647 if (skb_cloned(skb)) {
2651 tcp_unlink_write_queue(skb, sk);
2652 skb_header_release(nskb);
2653 __tcp_add_write_queue_head(sk, nskb);
2654 sk_wmem_free_skb(sk, skb);
2661 TCP_ECN_send_synack(tcp_sk(sk), skb);
2664 return tcp_transmit_skb(sk, skb, 1,
GFP_ATOMIC);
2690 int tcp_header_size;
2692 int s_data_desired = 0;
2705 skb_dst_set(skb, dst);
2707 mss = dst_metric_advmss(dst);
2709 mss = tp->
rx_opt.user_mss;
2732 memset(&opts, 0,
sizeof(opts));
2733 #ifdef CONFIG_SYN_COOKIES
2739 tcp_header_size = tcp_synack_options(sk, req, mss,
2740 skb, &opts, &md5, xvp, foc)
2744 skb_reset_transport_header(skb);
2750 TCP_ECN_make_synack(req, th);
2756 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2760 if (s_data_desired) {
2777 *tail-- ^= opts.
tsval;
2778 *tail-- ^= tcp_rsk(req)->rcv_isn + 1;
2783 *tail-- ^= (
u32)(
unsigned long)cvp;
2799 tcp_options_write((
__be32 *)(th + 1), tp, &opts);
2800 th->doff = (tcp_header_size >> 2);
2803 #ifdef CONFIG_TCP_MD5SIG
2806 tcp_rsk(req)->af_specific->calc_md5_hash(opts.
hash_location,
2807 md5,
NULL, req, skb);
2818 const struct dst_entry *dst = __sk_dst_get(sk);
2826 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2828 #ifdef CONFIG_TCP_MD5SIG
2829 if (tp->af_specific->md5_lookup(sk, sk) !=
NULL)
2834 if (tp->rx_opt.user_mss)
2835 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2840 if (!tp->window_clamp)
2841 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2842 tp->advmss = dst_metric_advmss(dst);
2843 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2844 tp->advmss = tp->rx_opt.user_mss;
2850 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2851 tp->window_clamp = tcp_full_space(sk);
2854 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len -
sizeof(struct tcphdr) : 0),
2859 dst_metric(dst, RTAX_INITRWND));
2861 tp->rx_opt.rcv_wscale = rcv_wscale;
2862 tp->rcv_ssthresh = tp->rcv_wnd;
2865 sock_reset_flag(sk, SOCK_DONE);
2868 tp->snd_una = tp->write_seq;
2869 tp->snd_sml = tp->write_seq;
2870 tp->snd_up = tp->write_seq;
2871 tp->snd_nxt = tp->write_seq;
2873 if (likely(!tp->repair))
2875 tp->rcv_wup = tp->rcv_nxt;
2876 tp->copied_seq = tp->rcv_nxt;
2879 inet_csk(sk)->icsk_retransmits = 0;
2883 static
void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2889 skb_header_release(skb);
2890 __tcp_add_write_queue_tail(sk, skb);
2904 static int tcp_send_syn_data(
struct sock *sk,
struct sk_buff *
syn)
2908 int syn_loss = 0, space,
i, err = 0, iovlen = fo->
data->msg_iovlen;
2910 unsigned long last_syn_loss = 0;
2914 &syn_loss, &last_syn_loss);
2924 else if (fo->
cookie.len <= 0)
2938 if (syn_data ==
NULL)
2941 for (i = 0; i < iovlen && syn_data->
len < space; ++
i) {
2946 if (syn_data->
len + len > space)
2947 len = space - syn_data->
len;
2948 else if (i + 1 == iovlen)
2952 if (skb_add_data(syn_data, from, len))
2963 tcp_connect_queue_skb(sk, data);
2966 if (tcp_transmit_skb(sk, syn_data, 0, sk->
sk_allocation) == 0) {
3002 tcp_init_nondata_skb(buff, tp->
write_seq++, TCPHDR_SYN);
3004 tcp_connect_queue_skb(sk, buff);
3005 TCP_ECN_send_syn(sk, buff);
3038 const struct tcp_sock *tp = tcp_sk(sk);
3039 int max_ato =
HZ / 2;
3058 ato =
min(ato, max_ato);
3098 inet_csk_schedule_ack(sk);
3107 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk),
TCPHDR_ACK);
3111 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk,
GFP_ATOMIC));
3125 static int tcp_xmit_probe_skb(
struct sock *sk,
int urgent)
3143 return tcp_transmit_skb(sk, skb, 0,
GFP_ATOMIC);
3149 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3150 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
3151 tcp_xmit_probe_skb(sk, 0);
3164 if ((skb = tcp_send_head(sk)) !=
NULL &&
3165 before(
TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3168 unsigned int seg_size = tcp_wnd_end(tp) -
TCP_SKB_CB(skb)->seq;
3170 if (before(tp->
pushed_seq, TCP_SKB_CB(skb)->end_seq))
3179 seg_size =
min(seg_size, mss);
3183 }
else if (!tcp_skb_pcount(skb))
3184 tcp_set_skb_tso_segs(sk, skb, mss);
3188 err = tcp_transmit_skb(sk, skb, 1,
GFP_ATOMIC);
3190 tcp_event_new_data_sent(sk, skb);
3194 tcp_xmit_probe_skb(sk, 1);
3195 return tcp_xmit_probe_skb(sk, 0);