Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
gen_estimator.c
Go to the documentation of this file.
1 /*
2  * net/sched/gen_estimator.c Simple rate estimator.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  *
9  * Authors: Alexey Kuznetsov, <[email protected]>
10  *
11  * Changes:
12  * Jamal Hadi Salim - moved it to net/core and reshulfed
13  * names to make it usable in general net subsystem.
14  */
15 
16 #include <asm/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/mm.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/in.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/netdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/init.h>
33 #include <linux/rbtree.h>
34 #include <linux/slab.h>
35 #include <net/sock.h>
36 #include <net/gen_stats.h>
37 
38 /*
39  This code is NOT intended to be used for statistics collection,
40  its purpose is to provide a base for statistical multiplexing
41  for controlled load service.
42  If you need only statistics, run a user level daemon which
43  periodically reads byte counters.
44 
45  Unfortunately, rate estimation is not a very easy task.
46  F.e. I did not find a simple way to estimate the current peak rate
47  and even failed to formulate the problem 8)8)
48 
49  So I preferred not to built an estimator into the scheduler,
50  but run this task separately.
51  Ideally, it should be kernel thread(s), but for now it runs
52  from timers, which puts apparent top bounds on the number of rated
53  flows, has minimal overhead on small, but is enough
54  to handle controlled load service, sets of aggregates.
55 
56  We measure rate over A=(1<<interval) seconds and evaluate EWMA:
57 
58  avrate = avrate*(1-W) + rate*W
59 
60  where W is chosen as negative power of 2: W = 2^(-ewma_log)
61 
62  The resulting time constant is:
63 
64  T = A/(-ln(1-W))
65 
66 
67  NOTES.
68 
69  * avbps is scaled by 2^5, avpps is scaled by 2^10.
70  * both values are reported as 32 bit unsigned values. bps can
71  overflow for fast links : max speed being 34360Mbit/sec
72  * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73  for HZ=100 and HZ=1024 8)), maximal interval
74  is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
75  are too expensive, longer ones can be implemented
76  at user level painlessly.
77  */
78 
79 #define EST_MAX_INTERVAL 5
80 
82 {
83  struct list_head list;
87  int ewma_log;
92  struct rcu_head e_rcu;
93  struct rb_node node;
94 };
95 
97 {
98  struct timer_list timer;
99  struct list_head list;
100 };
101 
102 static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
103 
104 /* Protects against NULL dereference */
105 static DEFINE_RWLOCK(est_lock);
106 
107 /* Protects against soft lockup during large deletion */
108 static struct rb_root est_root = RB_ROOT;
109 static DEFINE_SPINLOCK(est_tree_lock);
110 
111 static void est_timer(unsigned long arg)
112 {
113  int idx = (int)arg;
114  struct gen_estimator *e;
115 
116  rcu_read_lock();
117  list_for_each_entry_rcu(e, &elist[idx].list, list) {
118  u64 nbytes;
119  u64 brate;
120  u32 npackets;
121  u32 rate;
122 
123  spin_lock(e->stats_lock);
124  read_lock(&est_lock);
125  if (e->bstats == NULL)
126  goto skip;
127 
128  nbytes = e->bstats->bytes;
129  npackets = e->bstats->packets;
130  brate = (nbytes - e->last_bytes)<<(7 - idx);
131  e->last_bytes = nbytes;
132  e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
133  e->rate_est->bps = (e->avbps+0xF)>>5;
134 
135  rate = (npackets - e->last_packets)<<(12 - idx);
136  e->last_packets = npackets;
137  e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
138  e->rate_est->pps = (e->avpps+0x1FF)>>10;
139 skip:
140  read_unlock(&est_lock);
141  spin_unlock(e->stats_lock);
142  }
143 
144  if (!list_empty(&elist[idx].list))
145  mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
146  rcu_read_unlock();
147 }
148 
149 static void gen_add_node(struct gen_estimator *est)
150 {
151  struct rb_node **p = &est_root.rb_node, *parent = NULL;
152 
153  while (*p) {
154  struct gen_estimator *e;
155 
156  parent = *p;
157  e = rb_entry(parent, struct gen_estimator, node);
158 
159  if (est->bstats > e->bstats)
160  p = &parent->rb_right;
161  else
162  p = &parent->rb_left;
163  }
164  rb_link_node(&est->node, parent, p);
165  rb_insert_color(&est->node, &est_root);
166 }
167 
168 static
169 struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
170  const struct gnet_stats_rate_est *rate_est)
171 {
172  struct rb_node *p = est_root.rb_node;
173 
174  while (p) {
175  struct gen_estimator *e;
176 
177  e = rb_entry(p, struct gen_estimator, node);
178 
179  if (bstats > e->bstats)
180  p = p->rb_right;
181  else if (bstats < e->bstats || rate_est != e->rate_est)
182  p = p->rb_left;
183  else
184  return e;
185  }
186  return NULL;
187 }
188 
206  struct gnet_stats_rate_est *rate_est,
208  struct nlattr *opt)
209 {
210  struct gen_estimator *est;
211  struct gnet_estimator *parm = nla_data(opt);
212  int idx;
213 
214  if (nla_len(opt) < sizeof(*parm))
215  return -EINVAL;
216 
217  if (parm->interval < -2 || parm->interval > 3)
218  return -EINVAL;
219 
220  est = kzalloc(sizeof(*est), GFP_KERNEL);
221  if (est == NULL)
222  return -ENOBUFS;
223 
224  idx = parm->interval + 2;
225  est->bstats = bstats;
226  est->rate_est = rate_est;
227  est->stats_lock = stats_lock;
228  est->ewma_log = parm->ewma_log;
229  est->last_bytes = bstats->bytes;
230  est->avbps = rate_est->bps<<5;
231  est->last_packets = bstats->packets;
232  est->avpps = rate_est->pps<<10;
233 
234  spin_lock_bh(&est_tree_lock);
235  if (!elist[idx].timer.function) {
236  INIT_LIST_HEAD(&elist[idx].list);
237  setup_timer(&elist[idx].timer, est_timer, idx);
238  }
239 
240  if (list_empty(&elist[idx].list))
241  mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
242 
243  list_add_rcu(&est->list, &elist[idx].list);
244  gen_add_node(est);
245  spin_unlock_bh(&est_tree_lock);
246 
247  return 0;
248 }
250 
261  struct gnet_stats_rate_est *rate_est)
262 {
263  struct gen_estimator *e;
264 
265  spin_lock_bh(&est_tree_lock);
266  while ((e = gen_find_node(bstats, rate_est))) {
267  rb_erase(&e->node, &est_root);
268 
269  write_lock(&est_lock);
270  e->bstats = NULL;
271  write_unlock(&est_lock);
272 
273  list_del_rcu(&e->list);
274  kfree_rcu(e, e_rcu);
275  }
276  spin_unlock_bh(&est_tree_lock);
277 }
279 
293  struct gnet_stats_rate_est *rate_est,
294  spinlock_t *stats_lock, struct nlattr *opt)
295 {
296  gen_kill_estimator(bstats, rate_est);
297  return gen_new_estimator(bstats, rate_est, stats_lock, opt);
298 }
300 
309  const struct gnet_stats_rate_est *rate_est)
310 {
311  bool res;
312 
313  ASSERT_RTNL();
314 
315  spin_lock_bh(&est_tree_lock);
316  res = gen_find_node(bstats, rate_est) != NULL;
317  spin_unlock_bh(&est_tree_lock);
318 
319  return res;
320 }