4 #include <linux/module.h>
6 #include <linux/slab.h>
9 #include <linux/hash.h>
90 return ipv6_addr_equal(a6, b6);
106 if (dst_metric_locked(dst,
RTAX_RTT))
138 spin_lock_bh(&tcp_metrics_lock);
139 net = dev_net(dst->
dev);
157 tcpm_suck_dst(tm, dst);
165 spin_unlock_bh(&tcp_metrics_lock);
169 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
174 tcpm_suck_dst(tm, dst);
177 #define TCP_METRICS_RECLAIM_DEPTH 5
178 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
190 struct net *net,
unsigned int hash)
201 return tcp_get_encode(tm, depth);
215 addr.
addr.a4 = inet_rsk(req)->rmt_addr;
219 *(
struct in6_addr *)addr.
addr.a6 = inet6_rsk(req)->rmt_addr;
220 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
226 net = dev_net(dst->
dev);
227 hash = hash_32(hash, net->
ipv4.tcp_metrics_hash_log);
234 tcpm_check_stamp(tm, dst);
246 addr.
family = tw->tw_family;
249 addr.
addr.a4 = tw->tw_daddr;
253 tw6 = inet6_twsk((
struct sock *)tw);
262 hash = hash_32(hash, net->
ipv4.tcp_metrics_hash_log);
282 addr.
family = sk->sk_family;
285 addr.
addr.a4 = inet_sk(sk)->inet_daddr;
290 hash = ipv6_addr_hash(&inet6_sk(sk)->
daddr);
296 net = dev_net(dst->
dev);
297 hash = hash_32(hash, net->
ipv4.tcp_metrics_hash_log);
299 tm = __tcp_get_metrics(&addr, net, hash);
306 tm = tcpm_new(dst, &addr, hash, reclaim);
308 tcpm_check_stamp(tm, dst);
320 struct dst_entry *dst = __sk_dst_get(sk);
339 tm = tcp_get_metrics(sk, dst,
false);
344 tm = tcp_get_metrics(sk, dst,
true);
379 var -= (var -
m) >> 2;
384 if (tcp_in_initial_slowstart(tp)) {
388 if (val && (tp->
snd_cwnd >> 1) > val)
425 if (val < tp->reordering &&
440 struct dst_entry *dst = __sk_dst_get(sk);
451 tm = tcp_get_metrics(sk, dst,
true);
474 tcp_disable_early_retrans(tp);
479 if (val == 0 || tp->
srtt == 0) {
498 if (val > tp->
srtt) {
503 if (val > tp->
mdev) {
542 tm = __tcp_get_metrics_req(req, dst);
567 tm = tcp_get_metrics(sk, dst,
true);
587 struct dst_entry *dst = __sk_dst_get(sk);
594 tm = tcp_get_metrics(sk, dst,
true);
617 tm = __tcp_get_metrics_tw(tw);
620 struct sock *sk = (
struct sock *) tw;
622 tcptw = tcp_twsk(sk);
640 int *syn_loss,
unsigned long *last_syn_loss)
645 tm = tcp_get_metrics(sk, __sk_dst_get(sk),
false);
651 seq = read_seqbegin(&fastopen_seqlock);
657 }
while (read_seqretry(&fastopen_seqlock, seq));
668 tm = tcp_get_metrics(sk, __sk_dst_get(sk),
true);
686 static struct genl_family tcp_metrics_nl_family = {
716 static int tcp_metrics_fill_info(
struct sk_buff *
msg,
726 goto nla_put_failure;
731 goto nla_put_failure;
739 goto nla_put_failure;
743 goto nla_put_failure;
746 goto nla_put_failure;
754 goto nla_put_failure;
758 if (nla_put_u32(msg, i + 1, tm->
tcpm_vals[i]) < 0)
759 goto nla_put_failure;
763 nla_nest_end(msg, nest);
765 nla_nest_cancel(msg, nest);
773 seq = read_seqbegin(&fastopen_seqlock);
775 }
while (read_seqretry(&fastopen_seqlock, seq));
781 goto nla_put_failure;
787 goto nla_put_failure;
788 if (tfom->
cookie.len > 0 &&
791 goto nla_put_failure;
800 static int tcp_metrics_dump_info(
struct sk_buff *
skb,
812 if (tcp_metrics_fill_info(skb, tm) < 0)
813 goto nla_put_failure;
815 return genlmsg_end(skb, hdr);
818 genlmsg_cancel(skb, hdr);
822 static int tcp_metrics_nl_dump(
struct sk_buff *skb,
825 struct net *net = sock_net(skb->
sk);
826 unsigned int max_rows = 1
U << net->
ipv4.tcp_metrics_hash_log;
827 unsigned int row, s_row = cb->
args[0];
828 int s_col = cb->
args[1], col = s_col;
830 for (row = s_row; row < max_rows; row++, s_col = 0) {
839 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
854 unsigned int *hash,
int optional)
861 addr->
addr.a4 = nla_get_be32(a);
871 *hash = ipv6_addr_hash((
struct in6_addr *) addr->
addr.a6);
877 static int tcp_metrics_nl_cmd_get(
struct sk_buff *skb,
struct genl_info *info)
883 struct net *net = genl_info_net(info);
887 ret = parse_nl_addr(info, &addr, &hash, 0);
895 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
898 goto nla_put_failure;
900 hash = hash_32(hash, net->
ipv4.tcp_metrics_hash_log);
906 ret = tcp_metrics_fill_info(msg, tm);
914 genlmsg_end(msg, reply);
915 return genlmsg_reply(msg, info);
925 #define deref_locked_genl(p) \
926 rcu_dereference_protected(p, lockdep_genl_is_held() && \
927 lockdep_is_held(&tcp_metrics_lock))
929 #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
931 static int tcp_metrics_flush_all(
struct net *net)
933 unsigned int max_rows = 1
U << net->
ipv4.tcp_metrics_hash_log;
938 for (row = 0; row < max_rows; row++, hb++) {
939 spin_lock_bh(&tcp_metrics_lock);
943 spin_unlock_bh(&tcp_metrics_lock);
955 static int tcp_metrics_nl_cmd_del(
struct sk_buff *skb,
struct genl_info *info)
962 struct net *net = genl_info_net(info);
965 ret = parse_nl_addr(info, &addr, &hash, 1);
969 return tcp_metrics_flush_all(net);
971 hash = hash_32(hash, net->
ipv4.tcp_metrics_hash_log);
972 hb = net->
ipv4.tcp_metrics_hash +
hash;
974 spin_lock_bh(&tcp_metrics_lock);
982 spin_unlock_bh(&tcp_metrics_lock);
989 static struct genl_ops tcp_metrics_nl_ops[] = {
992 .doit = tcp_metrics_nl_cmd_get,
993 .dumpit = tcp_metrics_nl_dump,
994 .policy = tcp_metrics_nl_policy,
999 .doit = tcp_metrics_nl_cmd_del,
1000 .policy = tcp_metrics_nl_policy,
1005 static unsigned int tcpmhash_entries;
1006 static int __init set_tcpmhash_entries(
char *
str)
1019 __setup(
"tcpmhash_entries=", set_tcpmhash_entries);
1021 static int __net_init tcp_net_metrics_init(
struct net *net)
1026 slots = tcpmhash_entries;
1028 if (totalram_pages >= 128 * 1024)
1038 if (!net->
ipv4.tcp_metrics_hash)
1041 if (!net->
ipv4.tcp_metrics_hash)
1047 static void __net_exit tcp_net_metrics_exit(
struct net *net)
1051 for (i = 0; i < (1
U << net->
ipv4.tcp_metrics_hash_log) ; i++) {
1061 if (is_vmalloc_addr(net->
ipv4.tcp_metrics_hash))
1068 .init = tcp_net_metrics_init,
1069 .exit = tcp_net_metrics_exit,
1083 goto cleanup_subsys;