Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tcp_metrics.c
Go to the documentation of this file.
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
12 
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
22 
24 
27  u16 syn_loss:10; /* Recurring Fast Open SYN losses */
28  unsigned long last_syn_loss; /* Last Fast Open SYN loss */
30 };
31 
35  unsigned long tcpm_stamp;
41 
43 };
44 
45 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
46  enum tcp_metric_index idx)
47 {
48  return tm->tcpm_lock & (1 << idx);
49 }
50 
51 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
52  enum tcp_metric_index idx)
53 {
54  return tm->tcpm_vals[idx];
55 }
56 
57 static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
58  enum tcp_metric_index idx)
59 {
60  return msecs_to_jiffies(tm->tcpm_vals[idx]);
61 }
62 
63 static void tcp_metric_set(struct tcp_metrics_block *tm,
64  enum tcp_metric_index idx,
65  u32 val)
66 {
67  tm->tcpm_vals[idx] = val;
68 }
69 
70 static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
71  enum tcp_metric_index idx,
72  u32 val)
73 {
74  tm->tcpm_vals[idx] = jiffies_to_msecs(val);
75 }
76 
77 static bool addr_same(const struct inetpeer_addr *a,
78  const struct inetpeer_addr *b)
79 {
80  const struct in6_addr *a6, *b6;
81 
82  if (a->family != b->family)
83  return false;
84  if (a->family == AF_INET)
85  return a->addr.a4 == b->addr.a4;
86 
87  a6 = (const struct in6_addr *) &a->addr.a6[0];
88  b6 = (const struct in6_addr *) &b->addr.a6[0];
89 
90  return ipv6_addr_equal(a6, b6);
91 }
92 
95 };
96 
97 static DEFINE_SPINLOCK(tcp_metrics_lock);
98 
99 static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
100 {
101  u32 val;
102 
103  tm->tcpm_stamp = jiffies;
104 
105  val = 0;
106  if (dst_metric_locked(dst, RTAX_RTT))
107  val |= 1 << TCP_METRIC_RTT;
108  if (dst_metric_locked(dst, RTAX_RTTVAR))
109  val |= 1 << TCP_METRIC_RTTVAR;
110  if (dst_metric_locked(dst, RTAX_SSTHRESH))
111  val |= 1 << TCP_METRIC_SSTHRESH;
112  if (dst_metric_locked(dst, RTAX_CWND))
113  val |= 1 << TCP_METRIC_CWND;
114  if (dst_metric_locked(dst, RTAX_REORDERING))
115  val |= 1 << TCP_METRIC_REORDERING;
116  tm->tcpm_lock = val;
117 
118  tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
119  tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
120  tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
121  tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
122  tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
123  tm->tcpm_ts = 0;
124  tm->tcpm_ts_stamp = 0;
125  tm->tcpm_fastopen.mss = 0;
126  tm->tcpm_fastopen.syn_loss = 0;
127  tm->tcpm_fastopen.cookie.len = 0;
128 }
129 
130 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
131  struct inetpeer_addr *addr,
132  unsigned int hash,
133  bool reclaim)
134 {
135  struct tcp_metrics_block *tm;
136  struct net *net;
137 
138  spin_lock_bh(&tcp_metrics_lock);
139  net = dev_net(dst->dev);
140  if (unlikely(reclaim)) {
141  struct tcp_metrics_block *oldest;
142 
143  oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
144  for (tm = rcu_dereference(oldest->tcpm_next); tm;
145  tm = rcu_dereference(tm->tcpm_next)) {
146  if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
147  oldest = tm;
148  }
149  tm = oldest;
150  } else {
151  tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
152  if (!tm)
153  goto out_unlock;
154  }
155  tm->tcpm_addr = *addr;
156 
157  tcpm_suck_dst(tm, dst);
158 
159  if (likely(!reclaim)) {
160  tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
161  rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
162  }
163 
164 out_unlock:
165  spin_unlock_bh(&tcp_metrics_lock);
166  return tm;
167 }
168 
169 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
170 
171 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
172 {
174  tcpm_suck_dst(tm, dst);
175 }
176 
177 #define TCP_METRICS_RECLAIM_DEPTH 5
178 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
179 
180 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
181 {
182  if (tm)
183  return tm;
184  if (depth > TCP_METRICS_RECLAIM_DEPTH)
186  return NULL;
187 }
188 
189 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
190  struct net *net, unsigned int hash)
191 {
192  struct tcp_metrics_block *tm;
193  int depth = 0;
194 
195  for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
196  tm = rcu_dereference(tm->tcpm_next)) {
197  if (addr_same(&tm->tcpm_addr, addr))
198  break;
199  depth++;
200  }
201  return tcp_get_encode(tm, depth);
202 }
203 
204 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
205  struct dst_entry *dst)
206 {
207  struct tcp_metrics_block *tm;
208  struct inetpeer_addr addr;
209  unsigned int hash;
210  struct net *net;
211 
212  addr.family = req->rsk_ops->family;
213  switch (addr.family) {
214  case AF_INET:
215  addr.addr.a4 = inet_rsk(req)->rmt_addr;
216  hash = (__force unsigned int) addr.addr.a4;
217  break;
218  case AF_INET6:
219  *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
220  hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
221  break;
222  default:
223  return NULL;
224  }
225 
226  net = dev_net(dst->dev);
227  hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
228 
229  for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
230  tm = rcu_dereference(tm->tcpm_next)) {
231  if (addr_same(&tm->tcpm_addr, &addr))
232  break;
233  }
234  tcpm_check_stamp(tm, dst);
235  return tm;
236 }
237 
238 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
239 {
240  struct inet6_timewait_sock *tw6;
241  struct tcp_metrics_block *tm;
242  struct inetpeer_addr addr;
243  unsigned int hash;
244  struct net *net;
245 
246  addr.family = tw->tw_family;
247  switch (addr.family) {
248  case AF_INET:
249  addr.addr.a4 = tw->tw_daddr;
250  hash = (__force unsigned int) addr.addr.a4;
251  break;
252  case AF_INET6:
253  tw6 = inet6_twsk((struct sock *)tw);
254  *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
255  hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
256  break;
257  default:
258  return NULL;
259  }
260 
261  net = twsk_net(tw);
262  hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
263 
264  for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
265  tm = rcu_dereference(tm->tcpm_next)) {
266  if (addr_same(&tm->tcpm_addr, &addr))
267  break;
268  }
269  return tm;
270 }
271 
272 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
273  struct dst_entry *dst,
274  bool create)
275 {
276  struct tcp_metrics_block *tm;
277  struct inetpeer_addr addr;
278  unsigned int hash;
279  struct net *net;
280  bool reclaim;
281 
282  addr.family = sk->sk_family;
283  switch (addr.family) {
284  case AF_INET:
285  addr.addr.a4 = inet_sk(sk)->inet_daddr;
286  hash = (__force unsigned int) addr.addr.a4;
287  break;
288  case AF_INET6:
289  *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
290  hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
291  break;
292  default:
293  return NULL;
294  }
295 
296  net = dev_net(dst->dev);
297  hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
298 
299  tm = __tcp_get_metrics(&addr, net, hash);
300  reclaim = false;
301  if (tm == TCP_METRICS_RECLAIM_PTR) {
302  reclaim = true;
303  tm = NULL;
304  }
305  if (!tm && create)
306  tm = tcpm_new(dst, &addr, hash, reclaim);
307  else
308  tcpm_check_stamp(tm, dst);
309 
310  return tm;
311 }
312 
313 /* Save metrics learned by this TCP session. This function is called
314  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
315  * or goes from LAST-ACK to CLOSE.
316  */
317 void tcp_update_metrics(struct sock *sk)
318 {
319  const struct inet_connection_sock *icsk = inet_csk(sk);
320  struct dst_entry *dst = __sk_dst_get(sk);
321  struct tcp_sock *tp = tcp_sk(sk);
322  struct tcp_metrics_block *tm;
323  unsigned long rtt;
324  u32 val;
325  int m;
326 
327  if (sysctl_tcp_nometrics_save || !dst)
328  return;
329 
330  if (dst->flags & DST_HOST)
331  dst_confirm(dst);
332 
333  rcu_read_lock();
334  if (icsk->icsk_backoff || !tp->srtt) {
335  /* This session failed to estimate rtt. Why?
336  * Probably, no packets returned in time. Reset our
337  * results.
338  */
339  tm = tcp_get_metrics(sk, dst, false);
340  if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
341  tcp_metric_set(tm, TCP_METRIC_RTT, 0);
342  goto out_unlock;
343  } else
344  tm = tcp_get_metrics(sk, dst, true);
345 
346  if (!tm)
347  goto out_unlock;
348 
349  rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
350  m = rtt - tp->srtt;
351 
352  /* If newly calculated rtt larger than stored one, store new
353  * one. Otherwise, use EWMA. Remember, rtt overestimation is
354  * always better than underestimation.
355  */
356  if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
357  if (m <= 0)
358  rtt = tp->srtt;
359  else
360  rtt -= (m >> 3);
361  tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
362  }
363 
364  if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
365  unsigned long var;
366 
367  if (m < 0)
368  m = -m;
369 
370  /* Scale deviation to rttvar fixed point */
371  m >>= 1;
372  if (m < tp->mdev)
373  m = tp->mdev;
374 
375  var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
376  if (m >= var)
377  var = m;
378  else
379  var -= (var - m) >> 2;
380 
381  tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
382  }
383 
384  if (tcp_in_initial_slowstart(tp)) {
385  /* Slow start still did not finish. */
386  if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
387  val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
388  if (val && (tp->snd_cwnd >> 1) > val)
389  tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
390  tp->snd_cwnd >> 1);
391  }
392  if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
393  val = tcp_metric_get(tm, TCP_METRIC_CWND);
394  if (tp->snd_cwnd > val)
395  tcp_metric_set(tm, TCP_METRIC_CWND,
396  tp->snd_cwnd);
397  }
398  } else if (tp->snd_cwnd > tp->snd_ssthresh &&
399  icsk->icsk_ca_state == TCP_CA_Open) {
400  /* Cong. avoidance phase, cwnd is reliable. */
401  if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
402  tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
403  max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
404  if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
405  val = tcp_metric_get(tm, TCP_METRIC_CWND);
406  tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
407  }
408  } else {
409  /* Else slow start did not finish, cwnd is non-sense,
410  * ssthresh may be also invalid.
411  */
412  if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
413  val = tcp_metric_get(tm, TCP_METRIC_CWND);
414  tcp_metric_set(tm, TCP_METRIC_CWND,
415  (val + tp->snd_ssthresh) >> 1);
416  }
417  if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
418  val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
419  if (val && tp->snd_ssthresh > val)
420  tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
421  tp->snd_ssthresh);
422  }
423  if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
424  val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
425  if (val < tp->reordering &&
427  tcp_metric_set(tm, TCP_METRIC_REORDERING,
428  tp->reordering);
429  }
430  }
431  tm->tcpm_stamp = jiffies;
432 out_unlock:
433  rcu_read_unlock();
434 }
435 
436 /* Initialize metrics on socket. */
437 
438 void tcp_init_metrics(struct sock *sk)
439 {
440  struct dst_entry *dst = __sk_dst_get(sk);
441  struct tcp_sock *tp = tcp_sk(sk);
442  struct tcp_metrics_block *tm;
443  u32 val;
444 
445  if (dst == NULL)
446  goto reset;
447 
448  dst_confirm(dst);
449 
450  rcu_read_lock();
451  tm = tcp_get_metrics(sk, dst, true);
452  if (!tm) {
453  rcu_read_unlock();
454  goto reset;
455  }
456 
457  if (tcp_metric_locked(tm, TCP_METRIC_CWND))
458  tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
459 
460  val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
461  if (val) {
462  tp->snd_ssthresh = val;
463  if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
464  tp->snd_ssthresh = tp->snd_cwnd_clamp;
465  } else {
466  /* ssthresh may have been reduced unnecessarily during.
467  * 3WHS. Restore it back to its initial default.
468  */
470  }
471  val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
472  if (val && tp->reordering != val) {
473  tcp_disable_fack(tp);
474  tcp_disable_early_retrans(tp);
475  tp->reordering = val;
476  }
477 
478  val = tcp_metric_get(tm, TCP_METRIC_RTT);
479  if (val == 0 || tp->srtt == 0) {
480  rcu_read_unlock();
481  goto reset;
482  }
483  /* Initial rtt is determined from SYN,SYN-ACK.
484  * The segment is small and rtt may appear much
485  * less than real one. Use per-dst memory
486  * to make it more realistic.
487  *
488  * A bit of theory. RTT is time passed after "normal" sized packet
489  * is sent until it is ACKed. In normal circumstances sending small
490  * packets force peer to delay ACKs and calculation is correct too.
491  * The algorithm is adaptive and, provided we follow specs, it
492  * NEVER underestimate RTT. BUT! If peer tries to make some clever
493  * tricks sort of "quick acks" for time long enough to decrease RTT
494  * to low value, and then abruptly stops to do it and starts to delay
495  * ACKs, wait for troubles.
496  */
497  val = msecs_to_jiffies(val);
498  if (val > tp->srtt) {
499  tp->srtt = val;
500  tp->rtt_seq = tp->snd_nxt;
501  }
502  val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
503  if (val > tp->mdev) {
504  tp->mdev = val;
505  tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
506  }
507  rcu_read_unlock();
508 
509  tcp_set_rto(sk);
510 reset:
511  if (tp->srtt == 0) {
512  /* RFC6298: 5.7 We've failed to get a valid RTT sample from
513  * 3WHS. This is most likely due to retransmission,
514  * including spurious one. Reset the RTO back to 3secs
515  * from the more aggressive 1sec to avoid more spurious
516  * retransmission.
517  */
518  tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
519  inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
520  }
521  /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
522  * retransmitted. In light of RFC6298 more aggressive 1sec
523  * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
524  * retransmission has occurred.
525  */
526  if (tp->total_retrans > 1)
527  tp->snd_cwnd = 1;
528  else
529  tp->snd_cwnd = tcp_init_cwnd(tp, dst);
531 }
532 
533 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
534 {
535  struct tcp_metrics_block *tm;
536  bool ret;
537 
538  if (!dst)
539  return false;
540 
541  rcu_read_lock();
542  tm = __tcp_get_metrics_req(req, dst);
543  if (paws_check) {
544  if (tm &&
546  (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
547  ret = false;
548  else
549  ret = true;
550  } else {
551  if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
552  ret = true;
553  else
554  ret = false;
555  }
556  rcu_read_unlock();
557 
558  return ret;
559 }
561 
562 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
563 {
564  struct tcp_metrics_block *tm;
565 
566  rcu_read_lock();
567  tm = tcp_get_metrics(sk, dst, true);
568  if (tm) {
569  struct tcp_sock *tp = tcp_sk(sk);
570 
571  if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
572  tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
573  tp->rx_opt.ts_recent = tm->tcpm_ts;
574  }
575  }
576  rcu_read_unlock();
577 }
579 
580 /* VJ's idea. Save last timestamp seen from this destination and hold
581  * it at least for normal timewait interval to use for duplicate
582  * segment detection in subsequent connections, before they enter
583  * synchronized state.
584  */
585 bool tcp_remember_stamp(struct sock *sk)
586 {
587  struct dst_entry *dst = __sk_dst_get(sk);
588  bool ret = false;
589 
590  if (dst) {
591  struct tcp_metrics_block *tm;
592 
593  rcu_read_lock();
594  tm = tcp_get_metrics(sk, dst, true);
595  if (tm) {
596  struct tcp_sock *tp = tcp_sk(sk);
597 
598  if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
600  tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
601  tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
602  tm->tcpm_ts = tp->rx_opt.ts_recent;
603  }
604  ret = true;
605  }
606  rcu_read_unlock();
607  }
608  return ret;
609 }
610 
612 {
613  struct tcp_metrics_block *tm;
614  bool ret = false;
615 
616  rcu_read_lock();
617  tm = __tcp_get_metrics_tw(tw);
618  if (tm) {
619  const struct tcp_timewait_sock *tcptw;
620  struct sock *sk = (struct sock *) tw;
621 
622  tcptw = tcp_twsk(sk);
623  if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
625  tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
626  tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
627  tm->tcpm_ts = tcptw->tw_ts_recent;
628  }
629  ret = true;
630  }
631  rcu_read_unlock();
632 
633  return ret;
634 }
635 
636 static DEFINE_SEQLOCK(fastopen_seqlock);
637 
638 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
639  struct tcp_fastopen_cookie *cookie,
640  int *syn_loss, unsigned long *last_syn_loss)
641 {
642  struct tcp_metrics_block *tm;
643 
644  rcu_read_lock();
645  tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
646  if (tm) {
647  struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
648  unsigned int seq;
649 
650  do {
651  seq = read_seqbegin(&fastopen_seqlock);
652  if (tfom->mss)
653  *mss = tfom->mss;
654  *cookie = tfom->cookie;
655  *syn_loss = tfom->syn_loss;
656  *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
657  } while (read_seqretry(&fastopen_seqlock, seq));
658  }
659  rcu_read_unlock();
660 }
661 
663  struct tcp_fastopen_cookie *cookie, bool syn_lost)
664 {
665  struct tcp_metrics_block *tm;
666 
667  rcu_read_lock();
668  tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
669  if (tm) {
670  struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
671 
672  write_seqlock_bh(&fastopen_seqlock);
673  tfom->mss = mss;
674  if (cookie->len > 0)
675  tfom->cookie = *cookie;
676  if (syn_lost) {
677  ++tfom->syn_loss;
678  tfom->last_syn_loss = jiffies;
679  } else
680  tfom->syn_loss = 0;
681  write_sequnlock_bh(&fastopen_seqlock);
682  }
683  rcu_read_unlock();
684 }
685 
686 static struct genl_family tcp_metrics_nl_family = {
687  .id = GENL_ID_GENERATE,
688  .hdrsize = 0,
689  .name = TCP_METRICS_GENL_NAME,
690  .version = TCP_METRICS_GENL_VERSION,
691  .maxattr = TCP_METRICS_ATTR_MAX,
692  .netnsok = true,
693 };
694 
695 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
696  [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
698  .len = sizeof(struct in6_addr), },
699  /* Following attributes are not received for GET/DEL,
700  * we keep them for reference
701  */
702 #if 0
703  [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
704  [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
705  [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
706  [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
707  [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
708  [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
711  .len = TCP_FASTOPEN_COOKIE_MAX, },
712 #endif
713 };
714 
715 /* Add attributes, caller cancels its header on failure */
716 static int tcp_metrics_fill_info(struct sk_buff *msg,
717  struct tcp_metrics_block *tm)
718 {
719  struct nlattr *nest;
720  int i;
721 
722  switch (tm->tcpm_addr.family) {
723  case AF_INET:
724  if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
725  tm->tcpm_addr.addr.a4) < 0)
726  goto nla_put_failure;
727  break;
728  case AF_INET6:
729  if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
730  tm->tcpm_addr.addr.a6) < 0)
731  goto nla_put_failure;
732  break;
733  default:
734  return -EAFNOSUPPORT;
735  }
736 
737  if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
738  jiffies - tm->tcpm_stamp) < 0)
739  goto nla_put_failure;
740  if (tm->tcpm_ts_stamp) {
741  if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
742  (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
743  goto nla_put_failure;
744  if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
745  tm->tcpm_ts) < 0)
746  goto nla_put_failure;
747  }
748 
749  {
750  int n = 0;
751 
752  nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
753  if (!nest)
754  goto nla_put_failure;
755  for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
756  if (!tm->tcpm_vals[i])
757  continue;
758  if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
759  goto nla_put_failure;
760  n++;
761  }
762  if (n)
763  nla_nest_end(msg, nest);
764  else
765  nla_nest_cancel(msg, nest);
766  }
767 
768  {
769  struct tcp_fastopen_metrics tfom_copy[1], *tfom;
770  unsigned int seq;
771 
772  do {
773  seq = read_seqbegin(&fastopen_seqlock);
774  tfom_copy[0] = tm->tcpm_fastopen;
775  } while (read_seqretry(&fastopen_seqlock, seq));
776 
777  tfom = tfom_copy;
778  if (tfom->mss &&
779  nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
780  tfom->mss) < 0)
781  goto nla_put_failure;
782  if (tfom->syn_loss &&
783  (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
784  tfom->syn_loss) < 0 ||
785  nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
786  jiffies - tfom->last_syn_loss) < 0))
787  goto nla_put_failure;
788  if (tfom->cookie.len > 0 &&
790  tfom->cookie.len, tfom->cookie.val) < 0)
791  goto nla_put_failure;
792  }
793 
794  return 0;
795 
796 nla_put_failure:
797  return -EMSGSIZE;
798 }
799 
800 static int tcp_metrics_dump_info(struct sk_buff *skb,
801  struct netlink_callback *cb,
802  struct tcp_metrics_block *tm)
803 {
804  void *hdr;
805 
806  hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
807  &tcp_metrics_nl_family, NLM_F_MULTI,
809  if (!hdr)
810  return -EMSGSIZE;
811 
812  if (tcp_metrics_fill_info(skb, tm) < 0)
813  goto nla_put_failure;
814 
815  return genlmsg_end(skb, hdr);
816 
817 nla_put_failure:
818  genlmsg_cancel(skb, hdr);
819  return -EMSGSIZE;
820 }
821 
822 static int tcp_metrics_nl_dump(struct sk_buff *skb,
823  struct netlink_callback *cb)
824 {
825  struct net *net = sock_net(skb->sk);
826  unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
827  unsigned int row, s_row = cb->args[0];
828  int s_col = cb->args[1], col = s_col;
829 
830  for (row = s_row; row < max_rows; row++, s_col = 0) {
831  struct tcp_metrics_block *tm;
832  struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
833 
834  rcu_read_lock();
835  for (col = 0, tm = rcu_dereference(hb->chain); tm;
836  tm = rcu_dereference(tm->tcpm_next), col++) {
837  if (col < s_col)
838  continue;
839  if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
840  rcu_read_unlock();
841  goto done;
842  }
843  }
844  rcu_read_unlock();
845  }
846 
847 done:
848  cb->args[0] = row;
849  cb->args[1] = col;
850  return skb->len;
851 }
852 
853 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
854  unsigned int *hash, int optional)
855 {
856  struct nlattr *a;
857 
859  if (a) {
860  addr->family = AF_INET;
861  addr->addr.a4 = nla_get_be32(a);
862  *hash = (__force unsigned int) addr->addr.a4;
863  return 0;
864  }
866  if (a) {
867  if (nla_len(a) != sizeof(struct in6_addr))
868  return -EINVAL;
869  addr->family = AF_INET6;
870  memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
871  *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
872  return 0;
873  }
874  return optional ? 1 : -EAFNOSUPPORT;
875 }
876 
877 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
878 {
879  struct tcp_metrics_block *tm;
880  struct inetpeer_addr addr;
881  unsigned int hash;
882  struct sk_buff *msg;
883  struct net *net = genl_info_net(info);
884  void *reply;
885  int ret;
886 
887  ret = parse_nl_addr(info, &addr, &hash, 0);
888  if (ret < 0)
889  return ret;
890 
891  msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
892  if (!msg)
893  return -ENOMEM;
894 
895  reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
896  info->genlhdr->cmd);
897  if (!reply)
898  goto nla_put_failure;
899 
900  hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
901  ret = -ESRCH;
902  rcu_read_lock();
903  for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
904  tm = rcu_dereference(tm->tcpm_next)) {
905  if (addr_same(&tm->tcpm_addr, &addr)) {
906  ret = tcp_metrics_fill_info(msg, tm);
907  break;
908  }
909  }
910  rcu_read_unlock();
911  if (ret < 0)
912  goto out_free;
913 
914  genlmsg_end(msg, reply);
915  return genlmsg_reply(msg, info);
916 
917 nla_put_failure:
918  ret = -EMSGSIZE;
919 
920 out_free:
921  nlmsg_free(msg);
922  return ret;
923 }
924 
925 #define deref_locked_genl(p) \
926  rcu_dereference_protected(p, lockdep_genl_is_held() && \
927  lockdep_is_held(&tcp_metrics_lock))
928 
929 #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
930 
931 static int tcp_metrics_flush_all(struct net *net)
932 {
933  unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
934  struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
935  struct tcp_metrics_block *tm;
936  unsigned int row;
937 
938  for (row = 0; row < max_rows; row++, hb++) {
939  spin_lock_bh(&tcp_metrics_lock);
940  tm = deref_locked_genl(hb->chain);
941  if (tm)
942  hb->chain = NULL;
943  spin_unlock_bh(&tcp_metrics_lock);
944  while (tm) {
945  struct tcp_metrics_block *next;
946 
947  next = deref_genl(tm->tcpm_next);
948  kfree_rcu(tm, rcu_head);
949  tm = next;
950  }
951  }
952  return 0;
953 }
954 
955 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
956 {
957  struct tcpm_hash_bucket *hb;
958  struct tcp_metrics_block *tm;
959  struct tcp_metrics_block __rcu **pp;
960  struct inetpeer_addr addr;
961  unsigned int hash;
962  struct net *net = genl_info_net(info);
963  int ret;
964 
965  ret = parse_nl_addr(info, &addr, &hash, 1);
966  if (ret < 0)
967  return ret;
968  if (ret > 0)
969  return tcp_metrics_flush_all(net);
970 
971  hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
972  hb = net->ipv4.tcp_metrics_hash + hash;
973  pp = &hb->chain;
974  spin_lock_bh(&tcp_metrics_lock);
975  for (tm = deref_locked_genl(*pp); tm;
976  pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
977  if (addr_same(&tm->tcpm_addr, &addr)) {
978  *pp = tm->tcpm_next;
979  break;
980  }
981  }
982  spin_unlock_bh(&tcp_metrics_lock);
983  if (!tm)
984  return -ESRCH;
985  kfree_rcu(tm, rcu_head);
986  return 0;
987 }
988 
989 static struct genl_ops tcp_metrics_nl_ops[] = {
990  {
991  .cmd = TCP_METRICS_CMD_GET,
992  .doit = tcp_metrics_nl_cmd_get,
993  .dumpit = tcp_metrics_nl_dump,
994  .policy = tcp_metrics_nl_policy,
995  .flags = GENL_ADMIN_PERM,
996  },
997  {
998  .cmd = TCP_METRICS_CMD_DEL,
999  .doit = tcp_metrics_nl_cmd_del,
1000  .policy = tcp_metrics_nl_policy,
1001  .flags = GENL_ADMIN_PERM,
1002  },
1003 };
1004 
1005 static unsigned int tcpmhash_entries;
1006 static int __init set_tcpmhash_entries(char *str)
1007 {
1008  ssize_t ret;
1009 
1010  if (!str)
1011  return 0;
1012 
1013  ret = kstrtouint(str, 0, &tcpmhash_entries);
1014  if (ret)
1015  return 0;
1016 
1017  return 1;
1018 }
1019 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1020 
1021 static int __net_init tcp_net_metrics_init(struct net *net)
1022 {
1023  size_t size;
1024  unsigned int slots;
1025 
1026  slots = tcpmhash_entries;
1027  if (!slots) {
1028  if (totalram_pages >= 128 * 1024)
1029  slots = 16 * 1024;
1030  else
1031  slots = 8 * 1024;
1032  }
1033 
1034  net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1035  size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1036 
1037  net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1038  if (!net->ipv4.tcp_metrics_hash)
1039  net->ipv4.tcp_metrics_hash = vzalloc(size);
1040 
1041  if (!net->ipv4.tcp_metrics_hash)
1042  return -ENOMEM;
1043 
1044  return 0;
1045 }
1046 
1047 static void __net_exit tcp_net_metrics_exit(struct net *net)
1048 {
1049  unsigned int i;
1050 
1051  for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1052  struct tcp_metrics_block *tm, *next;
1053 
1054  tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1055  while (tm) {
1056  next = rcu_dereference_protected(tm->tcpm_next, 1);
1057  kfree(tm);
1058  tm = next;
1059  }
1060  }
1061  if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1062  vfree(net->ipv4.tcp_metrics_hash);
1063  else
1064  kfree(net->ipv4.tcp_metrics_hash);
1065 }
1066 
1067 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1068  .init = tcp_net_metrics_init,
1069  .exit = tcp_net_metrics_exit,
1070 };
1071 
1073 {
1074  int ret;
1075 
1076  ret = register_pernet_subsys(&tcp_net_metrics_ops);
1077  if (ret < 0)
1078  goto cleanup;
1079  ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1080  tcp_metrics_nl_ops,
1081  ARRAY_SIZE(tcp_metrics_nl_ops));
1082  if (ret < 0)
1083  goto cleanup_subsys;
1084  return;
1085 
1086 cleanup_subsys:
1087  unregister_pernet_subsys(&tcp_net_metrics_ops);
1088 
1089 cleanup:
1090  return;
1091 }