Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tcp.h
Go to the documentation of this file.
1 /*
2  * INET An implementation of the TCP/IP protocol suite for the LINUX
3  * operating system. INET is implemented using the BSD Socket
4  * interface as the means of communication with the user level.
5  *
6  * Definitions for the TCP module.
7  *
8  * Version: @(#)tcp.h 1.0.5 05/23/93
9  *
10  * Authors: Ross Biro
11  * Fred N. van Kempen, <[email protected]>
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License
15  * as published by the Free Software Foundation; either version
16  * 2 of the License, or (at your option) any later version.
17  */
18 #ifndef _TCP_H
19 #define _TCP_H
20 
21 #define FASTRETRANS_DEBUG 1
22 
23 #include <linux/list.h>
24 #include <linux/tcp.h>
25 #include <linux/bug.h>
26 #include <linux/slab.h>
27 #include <linux/cache.h>
28 #include <linux/percpu.h>
29 #include <linux/skbuff.h>
30 #include <linux/dmaengine.h>
31 #include <linux/crypto.h>
32 #include <linux/cryptohash.h>
33 #include <linux/kref.h>
34 
36 #include <net/inet_timewait_sock.h>
37 #include <net/inet_hashtables.h>
38 #include <net/checksum.h>
39 #include <net/request_sock.h>
40 #include <net/sock.h>
41 #include <net/snmp.h>
42 #include <net/ip.h>
43 #include <net/tcp_states.h>
44 #include <net/inet_ecn.h>
45 #include <net/dst.h>
46 
47 #include <linux/seq_file.h>
48 #include <linux/memcontrol.h>
49 
50 extern struct inet_hashinfo tcp_hashinfo;
51 
52 extern struct percpu_counter tcp_orphan_count;
53 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
54 
55 #define MAX_TCP_HEADER (128 + MAX_HEADER)
56 #define MAX_TCP_OPTION_SPACE 40
57 
58 /*
59  * Never offer a window over 32767 without using window scaling. Some
60  * poor stacks do signed 16bit maths!
61  */
62 #define MAX_TCP_WINDOW 32767U
63 
64 /* Offer an initial receive window of 10 mss. */
65 #define TCP_DEFAULT_INIT_RCVWND 10
66 
67 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68 #define TCP_MIN_MSS 88U
69 
70 /* The least MTU to use for probing */
71 #define TCP_BASE_MSS 512
72 
73 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
74 #define TCP_FASTRETRANS_THRESH 3
75 
76 /* Maximal reordering. */
77 #define TCP_MAX_REORDERING 127
78 
79 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
80 #define TCP_MAX_QUICKACKS 16U
81 
82 /* urg_data states */
83 #define TCP_URG_VALID 0x0100
84 #define TCP_URG_NOTYET 0x0200
85 #define TCP_URG_READ 0x0400
86 
87 #define TCP_RETR1 3 /*
88  * This is how many retries it does before it
89  * tries to figure out if the gateway is
90  * down. Minimal RFC value is 3; it corresponds
91  * to ~3sec-8min depending on RTO.
92  */
93 
94 #define TCP_RETR2 15 /*
95  * This should take at least
96  * 90 minutes to time out.
97  * RFC1122 says that the limit is 100 sec.
98  * 15 is ~13-30min depending on RTO.
99  */
100 
101 #define TCP_SYN_RETRIES 6 /* This is how many retries are done
102  * when active opening a connection.
103  * RFC1122 says the minimum retry MUST
104  * be at least 180secs. Nevertheless
105  * this value is corresponding to
106  * 63secs of retransmission with the
107  * current initial RTO.
108  */
110 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
111  * when passive opening a connection.
112  * This is corresponding to 31secs of
113  * retransmission with the current
114  * initial RTO.
115  */
117 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
118  * state, about 60 seconds */
119 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
120  /* BSD style FIN_WAIT2 deadlock breaker.
121  * It used to be 3min, new value is 60sec,
122  * to combine FIN-WAIT-2 timeout with
123  * TIME-WAIT timer.
124  */
126 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
127 #if HZ >= 100
128 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
129 #define TCP_ATO_MIN ((unsigned)(HZ/25))
130 #else
131 #define TCP_DELACK_MIN 4U
132 #define TCP_ATO_MIN 4U
133 #endif
134 #define TCP_RTO_MAX ((unsigned)(120*HZ))
135 #define TCP_RTO_MIN ((unsigned)(HZ/5))
136 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
137 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
138  * used as a fallback RTO for the
139  * initial data transmission if no
140  * valid RTT sample has been acquired,
141  * most likely due to retrans in 3WHS.
142  */
144 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
145  * for local resources.
146  */
147 
148 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
149 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
150 #define TCP_KEEPALIVE_INTVL (75*HZ)
151 
152 #define MAX_TCP_KEEPIDLE 32767
153 #define MAX_TCP_KEEPINTVL 32767
154 #define MAX_TCP_KEEPCNT 127
155 #define MAX_TCP_SYNCNT 127
157 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
159 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
160 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
161  * after this time. It should be equal
162  * (or greater than) TCP_TIMEWAIT_LEN
163  * to provide reliability equal to one
164  * provided by timewait state.
165  */
166 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
167  * timestamps. It must be less than
168  * minimal timewait lifetime.
169  */
170 /*
171  * TCP option
172  */
173 
174 #define TCPOPT_NOP 1 /* Padding */
175 #define TCPOPT_EOL 0 /* End of options */
176 #define TCPOPT_MSS 2 /* Segment size negotiating */
177 #define TCPOPT_WINDOW 3 /* Window scaling */
178 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
179 #define TCPOPT_SACK 5 /* SACK Block */
180 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
181 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
182 #define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
183 #define TCPOPT_EXP 254 /* Experimental */
184 /* Magic number to be after the option value for sharing TCP
185  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
186  */
187 #define TCPOPT_FASTOPEN_MAGIC 0xF989
189 /*
190  * TCP option lengths
191  */
192 
193 #define TCPOLEN_MSS 4
194 #define TCPOLEN_WINDOW 3
195 #define TCPOLEN_SACK_PERM 2
196 #define TCPOLEN_TIMESTAMP 10
197 #define TCPOLEN_MD5SIG 18
198 #define TCPOLEN_EXP_FASTOPEN_BASE 4
199 #define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
200 #define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
201 #define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
202 #define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
203 
204 /* But this is what stacks really send out. */
205 #define TCPOLEN_TSTAMP_ALIGNED 12
206 #define TCPOLEN_WSCALE_ALIGNED 4
207 #define TCPOLEN_SACKPERM_ALIGNED 4
208 #define TCPOLEN_SACK_BASE 2
209 #define TCPOLEN_SACK_BASE_ALIGNED 4
210 #define TCPOLEN_SACK_PERBLOCK 8
211 #define TCPOLEN_MD5SIG_ALIGNED 20
212 #define TCPOLEN_MSS_ALIGNED 4
213 
214 /* Flags in tp->nonagle */
215 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
216 #define TCP_NAGLE_CORK 2 /* Socket is corked */
217 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
218 
219 /* TCP thin-stream limits */
220 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
221 
222 /* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
223 #define TCP_INIT_CWND 10
224 
225 /* Bit Flags for sysctl_tcp_fastopen */
226 #define TFO_CLIENT_ENABLE 1
227 #define TFO_SERVER_ENABLE 2
228 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
229 
230 /* Process SYN data but skip cookie validation */
231 #define TFO_SERVER_COOKIE_NOT_CHKED 0x100
232 /* Accept SYN data w/o any cookie option */
233 #define TFO_SERVER_COOKIE_NOT_REQD 0x200
234 
235 /* Force enable TFO on all listeners, i.e., not requiring the
236  * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
237  */
238 #define TFO_SERVER_WO_SOCKOPT1 0x400
239 #define TFO_SERVER_WO_SOCKOPT2 0x800
240 /* Always create TFO child sockets on a TFO listener even when
241  * cookie/data not present. (For testing purpose!)
242  */
243 #define TFO_SERVER_ALWAYS 0x1000
244 
246 
247 /* sysctl variables for tcp */
248 extern int sysctl_tcp_timestamps;
249 extern int sysctl_tcp_window_scaling;
250 extern int sysctl_tcp_sack;
251 extern int sysctl_tcp_fin_timeout;
252 extern int sysctl_tcp_keepalive_time;
253 extern int sysctl_tcp_keepalive_probes;
254 extern int sysctl_tcp_keepalive_intvl;
255 extern int sysctl_tcp_syn_retries;
256 extern int sysctl_tcp_synack_retries;
257 extern int sysctl_tcp_retries1;
258 extern int sysctl_tcp_retries2;
259 extern int sysctl_tcp_orphan_retries;
260 extern int sysctl_tcp_syncookies;
261 extern int sysctl_tcp_fastopen;
262 extern int sysctl_tcp_retrans_collapse;
263 extern int sysctl_tcp_stdurg;
264 extern int sysctl_tcp_rfc1337;
266 extern int sysctl_tcp_max_orphans;
267 extern int sysctl_tcp_fack;
268 extern int sysctl_tcp_reordering;
269 extern int sysctl_tcp_ecn;
270 extern int sysctl_tcp_dsack;
271 extern int sysctl_tcp_wmem[3];
272 extern int sysctl_tcp_rmem[3];
273 extern int sysctl_tcp_app_win;
274 extern int sysctl_tcp_adv_win_scale;
275 extern int sysctl_tcp_tw_reuse;
276 extern int sysctl_tcp_frto;
277 extern int sysctl_tcp_frto_response;
278 extern int sysctl_tcp_low_latency;
279 extern int sysctl_tcp_dma_copybreak;
280 extern int sysctl_tcp_nometrics_save;
281 extern int sysctl_tcp_moderate_rcvbuf;
282 extern int sysctl_tcp_tso_win_divisor;
283 extern int sysctl_tcp_abc;
284 extern int sysctl_tcp_mtu_probing;
285 extern int sysctl_tcp_base_mss;
288 extern int sysctl_tcp_max_ssthresh;
289 extern int sysctl_tcp_cookie_size;
291 extern int sysctl_tcp_thin_dupack;
292 extern int sysctl_tcp_early_retrans;
295 
298 extern int tcp_memory_pressure;
299 
300 /*
301  * The next routines deal with comparing 32 bit unsigned ints
302  * and worry about wraparound (automatic with unsigned arithmetic).
303  */
304 
305 static inline bool before(__u32 seq1, __u32 seq2)
306 {
307  return (__s32)(seq1-seq2) < 0;
308 }
309 #define after(seq2, seq1) before(seq1, seq2)
310 
311 /* is s2<=s1<=s3 ? */
312 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
313 {
314  return seq3 - seq2 >= seq1 - seq2;
315 }
316 
317 static inline bool tcp_out_of_memory(struct sock *sk)
318 {
320  sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
321  return true;
322  return false;
323 }
324 
325 static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
326 {
327  struct percpu_counter *ocp = sk->sk_prot->orphan_count;
328  int orphans = percpu_counter_read_positive(ocp);
329 
330  if (orphans << shift > sysctl_tcp_max_orphans) {
331  orphans = percpu_counter_sum_positive(ocp);
332  if (orphans << shift > sysctl_tcp_max_orphans)
333  return true;
334  }
335  return false;
336 }
337 
338 extern bool tcp_check_oom(struct sock *sk, int shift);
339 
340 /* syncookies: remember time of last synqueue overflow */
341 static inline void tcp_synq_overflow(struct sock *sk)
342 {
343  tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
344 }
345 
346 /* syncookies: no recent synqueue overflow on this listening socket? */
347 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
348 {
349  unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
350  return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
351 }
352 
353 extern struct proto tcp_prot;
354 
355 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
356 #define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
357 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
358 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
359 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
360 
361 extern void tcp_init_mem(struct net *net);
362 
363 extern void tcp_tasklet_init(void);
364 
365 extern void tcp_v4_err(struct sk_buff *skb, u32);
366 
367 extern void tcp_shutdown (struct sock *sk, int how);
368 
369 extern void tcp_v4_early_demux(struct sk_buff *skb);
370 extern int tcp_v4_rcv(struct sk_buff *skb);
372 extern struct inet_peer *tcp_v4_get_peer(struct sock *sk);
374 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
375  size_t size);
376 extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
377  size_t size, int flags);
378 extern void tcp_release_cb(struct sock *sk);
379 extern void tcp_write_timer_handler(struct sock *sk);
380 extern void tcp_delack_timer_handler(struct sock *sk);
381 extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
382 extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
383  const struct tcphdr *th, unsigned int len);
384 extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
385  const struct tcphdr *th, unsigned int len);
386 extern void tcp_rcv_space_adjust(struct sock *sk);
387 extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
388 extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
389 extern void tcp_twsk_destructor(struct sock *sk);
390 extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
391  struct pipe_inode_info *pipe, size_t len,
392  unsigned int flags);
393 
394 static inline void tcp_dec_quickack_mode(struct sock *sk,
395  const unsigned int pkts)
396 {
397  struct inet_connection_sock *icsk = inet_csk(sk);
398 
399  if (icsk->icsk_ack.quick) {
400  if (pkts >= icsk->icsk_ack.quick) {
401  icsk->icsk_ack.quick = 0;
402  /* Leaving quickack mode we deflate ATO. */
403  icsk->icsk_ack.ato = TCP_ATO_MIN;
404  } else
405  icsk->icsk_ack.quick -= pkts;
406  }
407 }
408 
409 #define TCP_ECN_OK 1
410 #define TCP_ECN_QUEUE_CWR 2
411 #define TCP_ECN_DEMAND_CWR 4
412 #define TCP_ECN_SEEN 8
413 
414 enum tcp_tw_status {
415  TCP_TW_SUCCESS = 0,
416  TCP_TW_RST = 1,
417  TCP_TW_ACK = 2,
418  TCP_TW_SYN = 3
419 };
420 
421 
423  struct sk_buff *skb,
424  const struct tcphdr *th);
425 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
426  struct request_sock *req,
427  struct request_sock **prev,
428  bool fastopen);
429 extern int tcp_child_process(struct sock *parent, struct sock *child,
430  struct sk_buff *skb);
431 extern bool tcp_use_frto(struct sock *sk);
432 extern void tcp_enter_frto(struct sock *sk);
433 extern void tcp_enter_loss(struct sock *sk, int how);
434 extern void tcp_clear_retrans(struct tcp_sock *tp);
435 extern void tcp_update_metrics(struct sock *sk);
436 extern void tcp_init_metrics(struct sock *sk);
437 extern void tcp_metrics_init(void);
438 extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
439 extern bool tcp_remember_stamp(struct sock *sk);
440 extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
441 extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
442 extern void tcp_disable_fack(struct tcp_sock *tp);
443 extern void tcp_close(struct sock *sk, long timeout);
444 extern void tcp_init_sock(struct sock *sk);
445 extern unsigned int tcp_poll(struct file * file, struct socket *sock,
446  struct poll_table_struct *wait);
447 extern int tcp_getsockopt(struct sock *sk, int level, int optname,
448  char __user *optval, int __user *optlen);
449 extern int tcp_setsockopt(struct sock *sk, int level, int optname,
450  char __user *optval, unsigned int optlen);
451 extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
452  char __user *optval, int __user *optlen);
453 extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
454  char __user *optval, unsigned int optlen);
455 extern void tcp_set_keepalive(struct sock *sk, int val);
456 extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
457 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
458  size_t len, int nonblock, int flags, int *addr_len);
459 extern void tcp_parse_options(const struct sk_buff *skb,
460  struct tcp_options_received *opt_rx, const u8 **hvpp,
461  int estab, struct tcp_fastopen_cookie *foc);
462 extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
463 
464 /*
465  * TCP v4 functions exported for the inet6 API
466  */
467 
468 extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
469 extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
470 extern struct sock * tcp_create_openreq_child(struct sock *sk,
471  struct request_sock *req,
472  struct sk_buff *skb);
473 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
474  struct request_sock *req,
475  struct dst_entry *dst);
476 extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
477 extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
478  int addr_len);
479 extern int tcp_connect(struct sock *sk);
480 extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
481  struct request_sock *req,
482  struct request_values *rvp,
483  struct tcp_fastopen_cookie *foc);
484 extern int tcp_disconnect(struct sock *sk, int flags);
485 
486 void tcp_connect_init(struct sock *sk);
487 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
488 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
489 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
490 
491 /* From syncookies.c */
492 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
493 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
494  struct ip_options *opt);
495 #ifdef CONFIG_SYN_COOKIES
496 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
497  __u16 *mss);
498 #else
499 static inline __u32 cookie_v4_init_sequence(struct sock *sk,
500  struct sk_buff *skb,
501  __u16 *mss)
502 {
503  return 0;
504 }
505 #endif
506 
508 extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
509 
510 /* From net/ipv6/syncookies.c */
511 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
512 #ifdef CONFIG_SYN_COOKIES
513 extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
514  __u16 *mss);
515 #else
516 static inline __u32 cookie_v6_init_sequence(struct sock *sk,
517  struct sk_buff *skb,
518  __u16 *mss)
519 {
520  return 0;
521 }
522 #endif
523 /* tcp_output.c */
524 
525 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
526  int nonagle);
527 extern bool tcp_may_send_now(struct sock *sk);
528 extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
529 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
530 extern void tcp_retransmit_timer(struct sock *sk);
531 extern void tcp_xmit_retransmit_queue(struct sock *);
532 extern void tcp_simple_retransmit(struct sock *);
533 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
534 extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
535 
536 extern void tcp_send_probe0(struct sock *);
537 extern void tcp_send_partial(struct sock *);
538 extern int tcp_write_wakeup(struct sock *);
539 extern void tcp_send_fin(struct sock *sk);
540 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
541 extern int tcp_send_synack(struct sock *);
542 extern bool tcp_syn_flood_action(struct sock *sk,
543  const struct sk_buff *skb,
544  const char *proto);
545 extern void tcp_push_one(struct sock *, unsigned int mss_now);
546 extern void tcp_send_ack(struct sock *sk);
547 extern void tcp_send_delayed_ack(struct sock *sk);
548 
549 /* tcp_input.c */
550 extern void tcp_cwnd_application_limited(struct sock *sk);
551 extern void tcp_resume_early_retransmit(struct sock *sk);
552 extern void tcp_rearm_rto(struct sock *sk);
553 extern void tcp_reset(struct sock *sk);
555 /* tcp_timer.c */
556 extern void tcp_init_xmit_timers(struct sock *);
557 static inline void tcp_clear_xmit_timers(struct sock *sk)
558 {
560 }
561 
562 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
563 extern unsigned int tcp_current_mss(struct sock *sk);
564 
565 /* Bound MSS / TSO packet size with the half of the window */
566 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
567 {
568  int cutoff;
569 
570  /* When peer uses tiny windows, there is no use in packetizing
571  * to sub-MSS pieces for the sake of SWS or making sure there
572  * are enough packets in the pipe for fast recovery.
573  *
574  * On the other hand, for extremely large MSS devices, handling
575  * smaller than MSS windows in this way does make sense.
576  */
577  if (tp->max_window >= 512)
578  cutoff = (tp->max_window >> 1);
579  else
580  cutoff = tp->max_window;
581 
582  if (cutoff && pktsize > cutoff)
583  return max_t(int, cutoff, 68U - tp->tcp_header_len);
584  else
585  return pktsize;
586 }
587 
588 /* tcp.c */
589 extern void tcp_get_info(const struct sock *, struct tcp_info *);
590 
591 /* Read 'sendfile()'-style from a TCP socket */
592 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
593  unsigned int, size_t);
594 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
595  sk_read_actor_t recv_actor);
596 
597 extern void tcp_initialize_rcv_mss(struct sock *sk);
598 
599 extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
600 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
601 extern void tcp_mtup_init(struct sock *sk);
602 extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
603 extern void tcp_init_buffer_space(struct sock *sk);
604 
605 static inline void tcp_bound_rto(const struct sock *sk)
606 {
607  if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
608  inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
609 }
610 
611 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
612 {
613  return (tp->srtt >> 3) + tp->rttvar;
614 }
615 
616 extern void tcp_set_rto(struct sock *sk);
617 
618 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
619 {
620  tp->pred_flags = htonl((tp->tcp_header_len << 26) |
622  snd_wnd);
623 }
624 
625 static inline void tcp_fast_path_on(struct tcp_sock *tp)
626 {
627  __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
628 }
629 
630 static inline void tcp_fast_path_check(struct sock *sk)
631 {
632  struct tcp_sock *tp = tcp_sk(sk);
633 
634  if (skb_queue_empty(&tp->out_of_order_queue) &&
635  tp->rcv_wnd &&
636  atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
637  !tp->urg_data)
638  tcp_fast_path_on(tp);
639 }
640 
641 /* Compute the actual rto_min value */
642 static inline u32 tcp_rto_min(struct sock *sk)
643 {
644  const struct dst_entry *dst = __sk_dst_get(sk);
645  u32 rto_min = TCP_RTO_MIN;
647  if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
648  rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
649  return rto_min;
650 }
652 /* Compute the actual receive window we are currently advertising.
653  * Rcv_nxt can be after the window if our peer push more data
654  * than the offered window.
655  */
656 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
657 {
658  s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
659 
660  if (win < 0)
661  win = 0;
662  return (u32) win;
663 }
664 
665 /* Choose a new window, without checks for shrinking, and without
666  * scaling applied to the result. The caller does these things
667  * if necessary. This is a "raw" window selection.
668  */
669 extern u32 __tcp_select_window(struct sock *sk);
671 void tcp_send_window_probe(struct sock *sk);
673 /* TCP timestamps are only 32-bits, this causes a slight
674  * complication on 64-bit systems since we store a snapshot
675  * of jiffies in the buffer control blocks below. We decided
676  * to use only the low 32-bits of jiffies and hide the ugly
677  * casts with the following macro.
678  */
679 #define tcp_time_stamp ((__u32)(jiffies))
681 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
683 #define TCPHDR_FIN 0x01
684 #define TCPHDR_SYN 0x02
685 #define TCPHDR_RST 0x04
686 #define TCPHDR_PSH 0x08
687 #define TCPHDR_ACK 0x10
688 #define TCPHDR_URG 0x20
689 #define TCPHDR_ECE 0x40
690 #define TCPHDR_CWR 0x80
691 
692 /* This is what the send packet queuing engine uses to pass
693  * TCP per-packet control information to the transmission code.
694  * We also store the host-order sequence numbers in here too.
695  * This is 44 bytes if IPV6 is enabled.
696  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
697  */
698 struct tcp_skb_cb {
699  union {
700  struct inet_skb_parm h4;
701 #if IS_ENABLED(CONFIG_IPV6)
702  struct inet6_skb_parm h6;
703 #endif
704  } header; /* For incoming frames */
705  __u32 seq; /* Starting sequence number */
706  __u32 end_seq; /* SEQ + FIN + SYN + datalen */
707  __u32 when; /* used to compute rtt's */
708  __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
709 
710  __u8 sacked; /* State flags for SACK/FACK. */
711 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
712 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
713 #define TCPCB_LOST 0x04 /* SKB is lost */
714 #define TCPCB_TAGBITS 0x07 /* All tag bits */
715 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
716 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
717 
718  __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
719  /* 1 byte hole */
720  __u32 ack_seq; /* Sequence number ACK'd */
721 };
723 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
725 /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
726  *
727  * If we receive a SYN packet with these bits set, it means a network is
728  * playing bad games with TOS bits. In order to avoid possible false congestion
729  * notifications, we disable TCP ECN negociation.
730  */
731 static inline void
732 TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
733 {
734  const struct tcphdr *th = tcp_hdr(skb);
736  if (sysctl_tcp_ecn && th->ece && th->cwr &&
737  INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
738  inet_rsk(req)->ecn_ok = 1;
739 }
741 /* Due to TSO, an SKB can be composed of multiple actual
742  * packets. To keep these tracked properly, we use this.
743  */
744 static inline int tcp_skb_pcount(const struct sk_buff *skb)
745 {
746  return skb_shinfo(skb)->gso_segs;
747 }
749 /* This is valid iff tcp_skb_pcount() > 1. */
750 static inline int tcp_skb_mss(const struct sk_buff *skb)
751 {
752  return skb_shinfo(skb)->gso_size;
753 }
755 /* Events passed to congestion control interface */
757  CA_EVENT_TX_START, /* first transmit when no packets in flight */
758  CA_EVENT_CWND_RESTART, /* congestion window restart */
759  CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
760  CA_EVENT_FRTO, /* fast recovery timeout */
761  CA_EVENT_LOSS, /* loss timeout */
762  CA_EVENT_FAST_ACK, /* in sequence ack */
763  CA_EVENT_SLOW_ACK, /* other ack */
764 };
766 /*
767  * Interface for adding new TCP congestion control handlers
768  */
769 #define TCP_CA_NAME_MAX 16
770 #define TCP_CA_MAX 128
771 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
772 
773 #define TCP_CONG_NON_RESTRICTED 0x1
774 #define TCP_CONG_RTT_STAMP 0x2
775 
776 struct tcp_congestion_ops {
777  struct list_head list;
778  unsigned long flags;
779 
780  /* initialize private data (optional) */
781  void (*init)(struct sock *sk);
782  /* cleanup private data (optional) */
783  void (*release)(struct sock *sk);
784 
785  /* return slow start threshold (required) */
786  u32 (*ssthresh)(struct sock *sk);
787  /* lower bound for congestion window (optional) */
788  u32 (*min_cwnd)(const struct sock *sk);
789  /* do new cwnd calculation (required) */
790  void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
791  /* call before changing ca_state (optional) */
792  void (*set_state)(struct sock *sk, u8 new_state);
793  /* call when cwnd event occurs (optional) */
794  void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
795  /* new value of cwnd after loss (optional) */
796  u32 (*undo_cwnd)(struct sock *sk);
797  /* hook for packet ack accounting (optional) */
798  void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
799  /* get info for inet_diag (optional) */
800  void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
801 
802  char name[TCP_CA_NAME_MAX];
803  struct module *owner;
804 };
805 
808 
809 extern void tcp_init_congestion_control(struct sock *sk);
810 extern void tcp_cleanup_congestion_control(struct sock *sk);
811 extern int tcp_set_default_congestion_control(const char *name);
812 extern void tcp_get_default_congestion_control(char *name);
813 extern void tcp_get_available_congestion_control(char *buf, size_t len);
814 extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
815 extern int tcp_set_allowed_congestion_control(char *allowed);
816 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
817 extern void tcp_slow_start(struct tcp_sock *tp);
818 extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
819 
821 extern u32 tcp_reno_ssthresh(struct sock *sk);
822 extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
823 extern u32 tcp_reno_min_cwnd(const struct sock *sk);
824 extern struct tcp_congestion_ops tcp_reno;
825 
826 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
827 {
828  struct inet_connection_sock *icsk = inet_csk(sk);
829 
830  if (icsk->icsk_ca_ops->set_state)
831  icsk->icsk_ca_ops->set_state(sk, ca_state);
832  icsk->icsk_ca_state = ca_state;
833 }
834 
835 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
836 {
837  const struct inet_connection_sock *icsk = inet_csk(sk);
838 
839  if (icsk->icsk_ca_ops->cwnd_event)
840  icsk->icsk_ca_ops->cwnd_event(sk, event);
841 }
842 
843 /* These functions determine how the current flow behaves in respect of SACK
844  * handling. SACK is negotiated with the peer, and therefore it can vary
845  * between different flows.
846  *
847  * tcp_is_sack - SACK enabled
848  * tcp_is_reno - No SACK
849  * tcp_is_fack - FACK enabled, implies SACK enabled
850  */
851 static inline int tcp_is_sack(const struct tcp_sock *tp)
852 {
853  return tp->rx_opt.sack_ok;
854 }
855 
856 static inline bool tcp_is_reno(const struct tcp_sock *tp)
857 {
858  return !tcp_is_sack(tp);
859 }
860 
861 static inline bool tcp_is_fack(const struct tcp_sock *tp)
862 {
863  return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
864 }
865 
866 static inline void tcp_enable_fack(struct tcp_sock *tp)
867 {
868  tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
869 }
870 
871 /* TCP early-retransmit (ER) is similar to but more conservative than
872  * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
873  */
874 static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
875 {
878  tp->early_retrans_delayed = 0;
879 }
880 
881 static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
882 {
883  tp->do_early_retrans = 0;
884 }
885 
886 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
887 {
888  return tp->sacked_out + tp->lost_out;
889 }
890 
891 /* This determines how many packets are "in the network" to the best
892  * of our knowledge. In many cases it is conservative, but where
893  * detailed information is available from the receiver (via SACK
894  * blocks etc.) we can make more aggressive calculations.
895  *
896  * Use this for decisions involving congestion control, use just
897  * tp->packets_out to determine if the send queue is empty or not.
898  *
899  * Read this equation as:
900  *
901  * "Packets sent once on transmission queue" MINUS
902  * "Packets left network, but not honestly ACKed yet" PLUS
903  * "Packets fast retransmitted"
904  */
905 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
906 {
907  return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
908 }
909 
910 #define TCP_INFINITE_SSTHRESH 0x7fffffff
911 
912 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
913 {
914  return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
915 }
916 
917 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
918 {
919  return (TCPF_CA_CWR | TCPF_CA_Recovery) &
920  (1 << inet_csk(sk)->icsk_ca_state);
921 }
922 
923 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
924  * The exception is cwnd reduction phase, when cwnd is decreasing towards
925  * ssthresh.
926  */
927 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
928 {
929  const struct tcp_sock *tp = tcp_sk(sk);
930 
931  if (tcp_in_cwnd_reduction(sk))
932  return tp->snd_ssthresh;
933  else
934  return max(tp->snd_ssthresh,
935  ((tp->snd_cwnd >> 1) +
936  (tp->snd_cwnd >> 2)));
937 }
938 
939 /* Use define here intentionally to get WARN_ON location shown at the caller */
940 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
941 
942 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
943 extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
944 
945 /* The maximum number of MSS of available cwnd for which TSO defers
946  * sending if not using sysctl_tcp_tso_win_divisor.
947  */
948 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
949 {
950  return 3;
951 }
952 
953 /* Slow start with delack produces 3 packets of burst, so that
954  * it is safe "de facto". This will be the default - same as
955  * the default reordering threshold - but if reordering increases,
956  * we must be able to allow cwnd to burst at least this much in order
957  * to not pull it back when holes are filled.
958  */
959 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
960 {
961  return tp->reordering;
962 }
963 
964 /* Returns end sequence number of the receiver's advertised window */
965 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
966 {
967  return tp->snd_una + tp->snd_wnd;
968 }
969 extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
970 
971 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
972  const struct sk_buff *skb)
973 {
974  if (skb->len < mss)
975  tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
976 }
977 
978 static inline void tcp_check_probe_timer(struct sock *sk)
979 {
980  const struct tcp_sock *tp = tcp_sk(sk);
981  const struct inet_connection_sock *icsk = inet_csk(sk);
982 
983  if (!tp->packets_out && !icsk->icsk_pending)
984  inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
985  icsk->icsk_rto, TCP_RTO_MAX);
986 }
987 
988 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
989 {
990  tp->snd_wl1 = seq;
991 }
992 
993 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
994 {
995  tp->snd_wl1 = seq;
996 }
997 
998 /*
999  * Calculate(/check) TCP checksum
1000  */
1001 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1002  __be32 daddr, __wsum base)
1003 {
1004  return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1005 }
1006 
1007 static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1008 {
1009  return __skb_checksum_complete(skb);
1010 }
1011 
1012 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1013 {
1014  return !skb_csum_unnecessary(skb) &&
1015  __tcp_checksum_complete(skb);
1016 }
1017 
1018 /* Prequeue for VJ style copy to user, combined with checksumming. */
1019 
1020 static inline void tcp_prequeue_init(struct tcp_sock *tp)
1021 {
1022  tp->ucopy.task = NULL;
1023  tp->ucopy.len = 0;
1024  tp->ucopy.memory = 0;
1025  skb_queue_head_init(&tp->ucopy.prequeue);
1026 #ifdef CONFIG_NET_DMA
1027  tp->ucopy.dma_chan = NULL;
1028  tp->ucopy.wakeup = 0;
1029  tp->ucopy.pinned_list = NULL;
1030  tp->ucopy.dma_cookie = 0;
1031 #endif
1032 }
1033 
1034 /* Packet is added to VJ-style prequeue for processing in process
1035  * context, if a reader task is waiting. Apparently, this exciting
1036  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1037  * failed somewhere. Latency? Burstiness? Well, at least now we will
1038  * see, why it failed. 8)8) --ANK
1039  *
1040  * NOTE: is this not too big to inline?
1041  */
1042 static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1043 {
1044  struct tcp_sock *tp = tcp_sk(sk);
1045 
1046  if (sysctl_tcp_low_latency || !tp->ucopy.task)
1047  return false;
1048 
1049  __skb_queue_tail(&tp->ucopy.prequeue, skb);
1050  tp->ucopy.memory += skb->truesize;
1051  if (tp->ucopy.memory > sk->sk_rcvbuf) {
1052  struct sk_buff *skb1;
1053 
1055 
1056  while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1057  sk_backlog_rcv(sk, skb1);
1058  NET_INC_STATS_BH(sock_net(sk),
1060  }
1061 
1062  tp->ucopy.memory = 0;
1063  } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1064  wake_up_interruptible_sync_poll(sk_sleep(sk),
1066  if (!inet_csk_ack_scheduled(sk))
1067  inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1068  (3 * tcp_rto_min(sk)) / 4,
1069  TCP_RTO_MAX);
1070  }
1071  return true;
1072 }
1073 
1074 
1075 #undef STATE_TRACE
1076 
1077 #ifdef STATE_TRACE
1078 static const char *statename[]={
1079  "Unused","Established","Syn Sent","Syn Recv",
1080  "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1081  "Close Wait","Last ACK","Listen","Closing"
1082 };
1083 #endif
1084 extern void tcp_set_state(struct sock *sk, int state);
1085 
1086 extern void tcp_done(struct sock *sk);
1087 
1088 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1089 {
1090  rx_opt->dsack = 0;
1091  rx_opt->num_sacks = 0;
1092 }
1093 
1094 /* Determine a window scaling and initial window to offer. */
1095 extern void tcp_select_initial_window(int __space, __u32 mss,
1096  __u32 *rcv_wnd, __u32 *window_clamp,
1097  int wscale_ok, __u8 *rcv_wscale,
1098  __u32 init_rcv_wnd);
1099 
1100 static inline int tcp_win_from_space(int space)
1101 {
1102  return sysctl_tcp_adv_win_scale<=0 ?
1103  (space>>(-sysctl_tcp_adv_win_scale)) :
1104  space - (space>>sysctl_tcp_adv_win_scale);
1105 }
1106 
1107 /* Note: caller must be prepared to deal with negative returns */
1108 static inline int tcp_space(const struct sock *sk)
1109 {
1110  return tcp_win_from_space(sk->sk_rcvbuf -
1111  atomic_read(&sk->sk_rmem_alloc));
1112 }
1113 
1114 static inline int tcp_full_space(const struct sock *sk)
1115 {
1116  return tcp_win_from_space(sk->sk_rcvbuf);
1117 }
1118 
1119 static inline void tcp_openreq_init(struct request_sock *req,
1120  struct tcp_options_received *rx_opt,
1121  struct sk_buff *skb)
1122 {
1123  struct inet_request_sock *ireq = inet_rsk(req);
1124 
1125  req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1126  req->cookie_ts = 0;
1127  tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1128  tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1129  tcp_rsk(req)->snt_synack = 0;
1130  req->mss = rx_opt->mss_clamp;
1131  req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1132  ireq->tstamp_ok = rx_opt->tstamp_ok;
1133  ireq->sack_ok = rx_opt->sack_ok;
1134  ireq->snd_wscale = rx_opt->snd_wscale;
1135  ireq->wscale_ok = rx_opt->wscale_ok;
1136  ireq->acked = 0;
1137  ireq->ecn_ok = 0;
1138  ireq->rmt_port = tcp_hdr(skb)->source;
1139  ireq->loc_port = tcp_hdr(skb)->dest;
1140 }
1141 
1142 /* Compute time elapsed between SYNACK and the ACK completing 3WHS */
1143 static inline void tcp_synack_rtt_meas(struct sock *sk,
1144  struct request_sock *req)
1145 {
1146  if (tcp_rsk(req)->snt_synack)
1147  tcp_valid_rtt_meas(sk,
1148  tcp_time_stamp - tcp_rsk(req)->snt_synack);
1149 }
1150 
1151 extern void tcp_enter_memory_pressure(struct sock *sk);
1152 
1153 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1154 {
1156 }
1157 
1158 static inline int keepalive_time_when(const struct tcp_sock *tp)
1159 {
1161 }
1162 
1163 static inline int keepalive_probes(const struct tcp_sock *tp)
1164 {
1166 }
1167 
1168 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1169 {
1170  const struct inet_connection_sock *icsk = &tp->inet_conn;
1171 
1172  return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1173  tcp_time_stamp - tp->rcv_tstamp);
1174 }
1175 
1176 static inline int tcp_fin_time(const struct sock *sk)
1177 {
1178  int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1179  const int rto = inet_csk(sk)->icsk_rto;
1180 
1181  if (fin_timeout < (rto << 2) - (rto >> 1))
1182  fin_timeout = (rto << 2) - (rto >> 1);
1183 
1184  return fin_timeout;
1185 }
1186 
1187 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1188  int paws_win)
1189 {
1190  if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1191  return true;
1193  return true;
1194  /*
1195  * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1196  * then following tcp messages have valid values. Ignore 0 value,
1197  * or else 'negative' tsval might forbid us to accept their packets.
1198  */
1199  if (!rx_opt->ts_recent)
1200  return true;
1201  return false;
1202 }
1203 
1204 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1205  int rst)
1206 {
1207  if (tcp_paws_check(rx_opt, 0))
1208  return false;
1209 
1210  /* RST segments are not recommended to carry timestamp,
1211  and, if they do, it is recommended to ignore PAWS because
1212  "their cleanup function should take precedence over timestamps."
1213  Certainly, it is mistake. It is necessary to understand the reasons
1214  of this constraint to relax it: if peer reboots, clock may go
1215  out-of-sync and half-open connections will not be reset.
1216  Actually, the problem would be not existing if all
1217  the implementations followed draft about maintaining clock
1218  via reboots. Linux-2.2 DOES NOT!
1219 
1220  However, we can relax time bounds for RST segments to MSL.
1221  */
1222  if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1223  return false;
1224  return true;
1227 static inline void tcp_mib_init(struct net *net)
1229  /* See RFC 2012 */
1235 
1236 /* from STCP */
1237 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1243 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1244 {
1245  tcp_clear_retrans_hints_partial(tp);
1249 /* MD5 Signature */
1251 
1252 union tcp_md5_addr {
1253  struct in_addr a4;
1254 #if IS_ENABLED(CONFIG_IPV6)
1255  struct in6_addr a6;
1256 #endif
1257 };
1258 
1259 /* - key database */
1260 struct tcp_md5sig_key {
1263  u8 family; /* AF_INET or AF_INET6 */
1264  union tcp_md5_addr addr;
1266  struct rcu_head rcu;
1267 };
1268 
1269 /* - sock block */
1270 struct tcp_md5sig_info {
1271  struct hlist_head head;
1272  struct rcu_head rcu;
1273 };
1274 
1275 /* - pseudo header */
1276 struct tcp4_pseudohdr {
1277  __be32 saddr;
1278  __be32 daddr;
1279  __u8 pad;
1280  __u8 protocol;
1281  __be16 len;
1282 };
1283 
1284 struct tcp6_pseudohdr {
1285  struct in6_addr saddr;
1286  struct in6_addr daddr;
1287  __be32 len;
1288  __be32 protocol; /* including padding */
1289 };
1291 union tcp_md5sum_block {
1292  struct tcp4_pseudohdr ip4;
1293 #if IS_ENABLED(CONFIG_IPV6)
1294  struct tcp6_pseudohdr ip6;
1295 #endif
1296 };
1297 
1298 /* - pool: digest algorithm, hash description and scratch buffer */
1299 struct tcp_md5sig_pool {
1300  struct hash_desc md5_desc;
1301  union tcp_md5sum_block md5_blk;
1302 };
1303 
1304 /* - functions */
1305 extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1306  const struct sock *sk,
1307  const struct request_sock *req,
1308  const struct sk_buff *skb);
1309 extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1310  int family, const u8 *newkey,
1311  u8 newkeylen, gfp_t gfp);
1312 extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1313  int family);
1314 extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1315  struct sock *addr_sk);
1317 #ifdef CONFIG_TCP_MD5SIG
1318 extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1319  const union tcp_md5_addr *addr, int family);
1320 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1321 #else
1322 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1323  const union tcp_md5_addr *addr,
1324  int family)
1325 {
1326  return NULL;
1328 #define tcp_twsk_md5_key(twsk) NULL
1329 #endif
1331 extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
1332 extern void tcp_free_md5sig_pool(void);
1333 
1334 extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1335 extern void tcp_put_md5sig_pool(void);
1336 
1337 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1338 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1339  unsigned int header_len);
1340 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1341  const struct tcp_md5sig_key *key);
1342 
1343 /* From tcp_fastopen.c */
1344 extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1345  struct tcp_fastopen_cookie *cookie,
1346  int *syn_loss, unsigned long *last_syn_loss);
1347 extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1348  struct tcp_fastopen_cookie *cookie,
1349  bool syn_lost);
1350 struct tcp_fastopen_request {
1351  /* Fast Open cookie. Size 0 means a cookie request */
1352  struct tcp_fastopen_cookie cookie;
1353  struct msghdr *data; /* data in MSG_FASTOPEN */
1354  u16 copied; /* queued in tcp_connect() */
1355 };
1356 void tcp_free_fastopen_req(struct tcp_sock *tp);
1357 
1359 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1361 
1362 #define TCP_FASTOPEN_KEY_LENGTH 16
1363 
1364 /* Fastopen key context */
1365 struct tcp_fastopen_context {
1368  struct rcu_head rcu;
1369 };
1370 
1371 /* write queue abstraction */
1372 static inline void tcp_write_queue_purge(struct sock *sk)
1373 {
1374  struct sk_buff *skb;
1375 
1376  while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1377  sk_wmem_free_skb(sk, skb);
1378  sk_mem_reclaim(sk);
1379  tcp_clear_all_retrans_hints(tcp_sk(sk));
1380 }
1381 
1382 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1383 {
1384  return skb_peek(&sk->sk_write_queue);
1385 }
1386 
1387 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1388 {
1389  return skb_peek_tail(&sk->sk_write_queue);
1390 }
1391 
1392 static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1393  const struct sk_buff *skb)
1394 {
1395  return skb_queue_next(&sk->sk_write_queue, skb);
1396 }
1397 
1398 static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1399  const struct sk_buff *skb)
1400 {
1401  return skb_queue_prev(&sk->sk_write_queue, skb);
1402 }
1403 
1404 #define tcp_for_write_queue(skb, sk) \
1405  skb_queue_walk(&(sk)->sk_write_queue, skb)
1406 
1407 #define tcp_for_write_queue_from(skb, sk) \
1408  skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1409 
1410 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1411  skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1412 
1413 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1414 {
1415  return sk->sk_send_head;
1416 }
1417 
1418 static inline bool tcp_skb_is_last(const struct sock *sk,
1419  const struct sk_buff *skb)
1420 {
1421  return skb_queue_is_last(&sk->sk_write_queue, skb);
1422 }
1423 
1424 static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1425 {
1426  if (tcp_skb_is_last(sk, skb))
1427  sk->sk_send_head = NULL;
1428  else
1429  sk->sk_send_head = tcp_write_queue_next(sk, skb);
1430 }
1431 
1432 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1433 {
1434  if (sk->sk_send_head == skb_unlinked)
1435  sk->sk_send_head = NULL;
1436 }
1437 
1438 static inline void tcp_init_send_head(struct sock *sk)
1439 {
1440  sk->sk_send_head = NULL;
1441 }
1442 
1443 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1444 {
1445  __skb_queue_tail(&sk->sk_write_queue, skb);
1446 }
1447 
1448 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1449 {
1450  __tcp_add_write_queue_tail(sk, skb);
1451 
1452  /* Queue it, remembering where we must start sending. */
1453  if (sk->sk_send_head == NULL) {
1454  sk->sk_send_head = skb;
1455 
1456  if (tcp_sk(sk)->highest_sack == NULL)
1457  tcp_sk(sk)->highest_sack = skb;
1458  }
1459 }
1460 
1461 static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1462 {
1463  __skb_queue_head(&sk->sk_write_queue, skb);
1464 }
1465 
1466 /* Insert buff after skb on the write queue of sk. */
1467 static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1468  struct sk_buff *buff,
1469  struct sock *sk)
1470 {
1471  __skb_queue_after(&sk->sk_write_queue, skb, buff);
1472 }
1473 
1474 /* Insert new before skb on the write queue of sk. */
1475 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1476  struct sk_buff *skb,
1477  struct sock *sk)
1478 {
1479  __skb_queue_before(&sk->sk_write_queue, skb, new);
1480 
1481  if (sk->sk_send_head == skb)
1482  sk->sk_send_head = new;
1483 }
1484 
1485 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1486 {
1487  __skb_unlink(skb, &sk->sk_write_queue);
1488 }
1489 
1490 static inline bool tcp_write_queue_empty(struct sock *sk)
1491 {
1492  return skb_queue_empty(&sk->sk_write_queue);
1493 }
1494 
1495 static inline void tcp_push_pending_frames(struct sock *sk)
1496 {
1497  if (tcp_send_head(sk)) {
1498  struct tcp_sock *tp = tcp_sk(sk);
1499 
1501  }
1502 }
1503 
1504 /* Start sequence of the skb just after the highest skb with SACKed
1505  * bit, valid only if sacked_out > 0 or when the caller has ensured
1506  * validity by itself.
1507  */
1508 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1509 {
1510  if (!tp->sacked_out)
1511  return tp->snd_una;
1512 
1513  if (tp->highest_sack == NULL)
1514  return tp->snd_nxt;
1516  return TCP_SKB_CB(tp->highest_sack)->seq;
1519 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1520 {
1521  tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1522  tcp_write_queue_next(sk, skb);
1523 }
1525 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1527  return tcp_sk(sk)->highest_sack;
1529 
1530 static inline void tcp_highest_sack_reset(struct sock *sk)
1532  tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1535 /* Called when old skb is about to be deleted (to be combined with new skb) */
1536 static inline void tcp_highest_sack_combine(struct sock *sk,
1537  struct sk_buff *old,
1538  struct sk_buff *new)
1539 {
1540  if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1541  tcp_sk(sk)->highest_sack = new;
1542 }
1543 
1544 /* Determines whether this is a thin stream (which may suffer from
1545  * increased latency). Used to trigger latency-reducing mechanisms.
1546  */
1547 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1548 {
1549  return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1550 }
1551 
1552 /* /proc */
1553 enum tcp_seq_states {
1558 };
1559 
1560 int tcp_seq_open(struct inode *inode, struct file *file);
1561 
1562 struct tcp_seq_afinfo {
1563  char *name;
1565  const struct file_operations *seq_fops;
1566  struct seq_operations seq_ops;
1567 };
1568 
1569 struct tcp_iter_state {
1570  struct seq_net_private p;
1572  enum tcp_seq_states state;
1573  struct sock *syn_wait_sk;
1574  int bucket, offset, sbucket, num;
1575  kuid_t uid;
1576  loff_t last_pos;
1577 };
1578 
1579 extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1580 extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1581 
1584 
1585 extern void tcp_v4_destroy_sock(struct sock *sk);
1586 
1587 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1588 extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1590 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1591  struct sk_buff *skb);
1592 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1593  struct sk_buff *skb);
1594 extern int tcp_gro_complete(struct sk_buff *skb);
1595 extern int tcp4_gro_complete(struct sk_buff *skb);
1597 #ifdef CONFIG_PROC_FS
1598 extern int tcp4_proc_init(void);
1599 extern void tcp4_proc_exit(void);
1600 #endif
1601 
1602 /* TCP af-specific functions */
1603 struct tcp_sock_af_ops {
1604 #ifdef CONFIG_TCP_MD5SIG
1605  struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1606  struct sock *addr_sk);
1607  int (*calc_md5_hash) (char *location,
1608  struct tcp_md5sig_key *md5,
1609  const struct sock *sk,
1610  const struct request_sock *req,
1611  const struct sk_buff *skb);
1612  int (*md5_parse) (struct sock *sk,
1613  char __user *optval,
1614  int optlen);
1615 #endif
1616 };
1617 
1618 struct tcp_request_sock_ops {
1619 #ifdef CONFIG_TCP_MD5SIG
1620  struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1621  struct request_sock *req);
1622  int (*calc_md5_hash) (char *location,
1623  struct tcp_md5sig_key *md5,
1624  const struct sock *sk,
1625  const struct request_sock *req,
1626  const struct sk_buff *skb);
1627 #endif
1628 };
1629 
1630 /* Using SHA1 for now, define some constants.
1631  */
1632 #define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1633 #define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1634 #define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1635 
1636 extern int tcp_cookie_generator(u32 *bakery);
1637 
1661 struct tcp_cookie_values {
1662  struct kref kref;
1668  s_data_in:1,
1669  s_data_out:1,
1670  s_data_unused:2;
1671  u8 s_data_payload[0];
1672 };
1673 
1674 static inline void tcp_cookie_values_release(struct kref *kref)
1675 {
1676  kfree(container_of(kref, struct tcp_cookie_values, kref));
1677 }
1678 
1679 /* The length of constant payload data. Note that s_data_desired is
1680  * overloaded, depending on s_data_constant: either the length of constant
1681  * data (returned here) or the limit on variable data.
1682  */
1683 static inline int tcp_s_data_size(const struct tcp_sock *tp)
1684 {
1685  return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1686  ? tp->cookie_values->s_data_desired
1687  : 0;
1688 }
1689 
1702 struct tcp_extend_values {
1703  struct request_values rv;
1705  u8 cookie_plus:6,
1706  cookie_out_never:1,
1707  cookie_in_always:1;
1708 };
1709 
1710 static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1711 {
1712  return (struct tcp_extend_values *)rvp;
1713 }
1714 
1715 extern void tcp_v4_init(void);
1716 extern void tcp_init(void);
1717 
1718 #endif /* _TCP_H */