Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
flow.c
Go to the documentation of this file.
1 /* flow.c: Generic flow cache.
2  *
3  * Copyright (C) 2003 Alexey N. Kuznetsov ([email protected])
4  * Copyright (C) 2003 David S. Miller ([email protected])
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
24 #include <net/flow.h>
25 #include <linux/atomic.h>
26 #include <linux/security.h>
27 
29  union {
30  struct hlist_node hlist;
32  } u;
33  struct net *net;
37  struct flowi key;
39 };
40 
47 };
48 
50  struct flow_cache *cache;
53 };
54 
55 struct flow_cache {
62 };
63 
65 EXPORT_SYMBOL(flow_cache_genid);
66 static struct flow_cache flow_cache_global;
67 static struct kmem_cache *flow_cachep __read_mostly;
68 
69 static DEFINE_SPINLOCK(flow_cache_gc_lock);
70 static LIST_HEAD(flow_cache_gc_list);
71 
72 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
73 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
74 
75 static void flow_cache_new_hashrnd(unsigned long arg)
76 {
77  struct flow_cache *fc = (void *) arg;
78  int i;
79 
81  per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
82 
83  fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84  add_timer(&fc->rnd_timer);
85 }
86 
87 static int flow_entry_valid(struct flow_cache_entry *fle)
88 {
89  if (atomic_read(&flow_cache_genid) != fle->genid)
90  return 0;
91  if (fle->object && !fle->object->ops->check(fle->object))
92  return 0;
93  return 1;
94 }
95 
96 static void flow_entry_kill(struct flow_cache_entry *fle)
97 {
98  if (fle->object)
99  fle->object->ops->delete(fle->object);
100  kmem_cache_free(flow_cachep, fle);
101 }
102 
103 static void flow_cache_gc_task(struct work_struct *work)
104 {
105  struct list_head gc_list;
106  struct flow_cache_entry *fce, *n;
107 
108  INIT_LIST_HEAD(&gc_list);
109  spin_lock_bh(&flow_cache_gc_lock);
110  list_splice_tail_init(&flow_cache_gc_list, &gc_list);
111  spin_unlock_bh(&flow_cache_gc_lock);
112 
113  list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
114  flow_entry_kill(fce);
115 }
116 static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
117 
118 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
119  int deleted, struct list_head *gc_list)
120 {
121  if (deleted) {
122  fcp->hash_count -= deleted;
123  spin_lock_bh(&flow_cache_gc_lock);
124  list_splice_tail(gc_list, &flow_cache_gc_list);
125  spin_unlock_bh(&flow_cache_gc_lock);
126  schedule_work(&flow_cache_gc_work);
127  }
128 }
129 
130 static void __flow_cache_shrink(struct flow_cache *fc,
131  struct flow_cache_percpu *fcp,
132  int shrink_to)
133 {
134  struct flow_cache_entry *fle;
135  struct hlist_node *entry, *tmp;
136  LIST_HEAD(gc_list);
137  int i, deleted = 0;
138 
139  for (i = 0; i < flow_cache_hash_size(fc); i++) {
140  int saved = 0;
141 
142  hlist_for_each_entry_safe(fle, entry, tmp,
143  &fcp->hash_table[i], u.hlist) {
144  if (saved < shrink_to &&
145  flow_entry_valid(fle)) {
146  saved++;
147  } else {
148  deleted++;
149  hlist_del(&fle->u.hlist);
150  list_add_tail(&fle->u.gc_list, &gc_list);
151  }
152  }
153  }
154 
155  flow_cache_queue_garbage(fcp, deleted, &gc_list);
156 }
157 
158 static void flow_cache_shrink(struct flow_cache *fc,
159  struct flow_cache_percpu *fcp)
160 {
161  int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
162 
163  __flow_cache_shrink(fc, fcp, shrink_to);
164 }
165 
166 static void flow_new_hash_rnd(struct flow_cache *fc,
167  struct flow_cache_percpu *fcp)
168 {
169  get_random_bytes(&fcp->hash_rnd, sizeof(u32));
170  fcp->hash_rnd_recalc = 0;
171  __flow_cache_shrink(fc, fcp, 0);
172 }
173 
174 static u32 flow_hash_code(struct flow_cache *fc,
175  struct flow_cache_percpu *fcp,
176  const struct flowi *key,
177  size_t keysize)
178 {
179  const u32 *k = (const u32 *) key;
180  const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
181 
182  return jhash2(k, length, fcp->hash_rnd)
183  & (flow_cache_hash_size(fc) - 1);
184 }
185 
186 /* I hear what you're saying, use memcmp. But memcmp cannot make
187  * important assumptions that we can here, such as alignment.
188  */
189 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190  size_t keysize)
191 {
192  const flow_compare_t *k1, *k1_lim, *k2;
193 
194  k1 = (const flow_compare_t *) key1;
195  k1_lim = k1 + keysize;
196 
197  k2 = (const flow_compare_t *) key2;
198 
199  do {
200  if (*k1++ != *k2++)
201  return 1;
202  } while (k1 < k1_lim);
203 
204  return 0;
205 }
206 
207 struct flow_cache_object *
208 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
209  flow_resolve_t resolver, void *ctx)
210 {
211  struct flow_cache *fc = &flow_cache_global;
212  struct flow_cache_percpu *fcp;
213  struct flow_cache_entry *fle, *tfle;
214  struct hlist_node *entry;
215  struct flow_cache_object *flo;
216  size_t keysize;
217  unsigned int hash;
218 
220  fcp = this_cpu_ptr(fc->percpu);
221 
222  fle = NULL;
223  flo = NULL;
224 
225  keysize = flow_key_size(family);
226  if (!keysize)
227  goto nocache;
228 
229  /* Packet really early in init? Making flow_cache_init a
230  * pre-smp initcall would solve this. --RR */
231  if (!fcp->hash_table)
232  goto nocache;
233 
234  if (fcp->hash_rnd_recalc)
235  flow_new_hash_rnd(fc, fcp);
236 
237  hash = flow_hash_code(fc, fcp, key, keysize);
238  hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
239  if (tfle->net == net &&
240  tfle->family == family &&
241  tfle->dir == dir &&
242  flow_key_compare(key, &tfle->key, keysize) == 0) {
243  fle = tfle;
244  break;
245  }
246  }
247 
248  if (unlikely(!fle)) {
249  if (fcp->hash_count > fc->high_watermark)
250  flow_cache_shrink(fc, fcp);
251 
252  fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
253  if (fle) {
254  fle->net = net;
255  fle->family = family;
256  fle->dir = dir;
257  memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
258  fle->object = NULL;
259  hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
260  fcp->hash_count++;
261  }
262  } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
263  flo = fle->object;
264  if (!flo)
265  goto ret_object;
266  flo = flo->ops->get(flo);
267  if (flo)
268  goto ret_object;
269  } else if (fle->object) {
270  flo = fle->object;
271  flo->ops->delete(flo);
272  fle->object = NULL;
273  }
274 
275 nocache:
276  flo = NULL;
277  if (fle) {
278  flo = fle->object;
279  fle->object = NULL;
280  }
281  flo = resolver(net, key, family, dir, flo, ctx);
282  if (fle) {
283  fle->genid = atomic_read(&flow_cache_genid);
284  if (!IS_ERR(flo))
285  fle->object = flo;
286  else
287  fle->genid--;
288  } else {
289  if (flo && !IS_ERR(flo))
290  flo->ops->delete(flo);
291  }
292 ret_object:
293  local_bh_enable();
294  return flo;
295 }
297 
298 static void flow_cache_flush_tasklet(unsigned long data)
299 {
300  struct flow_flush_info *info = (void *)data;
301  struct flow_cache *fc = info->cache;
302  struct flow_cache_percpu *fcp;
303  struct flow_cache_entry *fle;
304  struct hlist_node *entry, *tmp;
305  LIST_HEAD(gc_list);
306  int i, deleted = 0;
307 
308  fcp = this_cpu_ptr(fc->percpu);
309  for (i = 0; i < flow_cache_hash_size(fc); i++) {
310  hlist_for_each_entry_safe(fle, entry, tmp,
311  &fcp->hash_table[i], u.hlist) {
312  if (flow_entry_valid(fle))
313  continue;
314 
315  deleted++;
316  hlist_del(&fle->u.hlist);
317  list_add_tail(&fle->u.gc_list, &gc_list);
318  }
319  }
320 
321  flow_cache_queue_garbage(fcp, deleted, &gc_list);
322 
323  if (atomic_dec_and_test(&info->cpuleft))
324  complete(&info->completion);
325 }
326 
327 static void flow_cache_flush_per_cpu(void *data)
328 {
329  struct flow_flush_info *info = data;
330  int cpu;
331  struct tasklet_struct *tasklet;
332 
333  cpu = smp_processor_id();
334  tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
335  tasklet->data = (unsigned long)info;
336  tasklet_schedule(tasklet);
337 }
338 
340 {
341  struct flow_flush_info info;
342  static DEFINE_MUTEX(flow_flush_sem);
343 
344  /* Don't want cpus going down or up during this. */
345  get_online_cpus();
346  mutex_lock(&flow_flush_sem);
347  info.cache = &flow_cache_global;
349  init_completion(&info.completion);
350 
352  smp_call_function(flow_cache_flush_per_cpu, &info, 0);
353  flow_cache_flush_tasklet((unsigned long)&info);
354  local_bh_enable();
355 
356  wait_for_completion(&info.completion);
357  mutex_unlock(&flow_flush_sem);
358  put_online_cpus();
359 }
360 
361 static void flow_cache_flush_task(struct work_struct *work)
362 {
364 }
365 
366 static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
367 
369 {
370  schedule_work(&flow_cache_flush_work);
371 }
372 
373 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
374 {
375  struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
376  size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
377 
378  if (!fcp->hash_table) {
379  fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
380  if (!fcp->hash_table) {
381  pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
382  return -ENOMEM;
383  }
384  fcp->hash_rnd_recalc = 1;
385  fcp->hash_count = 0;
386  tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
387  }
388  return 0;
389 }
390 
391 static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
392  unsigned long action,
393  void *hcpu)
394 {
395  struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
396  int res, cpu = (unsigned long) hcpu;
397  struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
398 
399  switch (action) {
400  case CPU_UP_PREPARE:
402  res = flow_cache_cpu_prepare(fc, cpu);
403  if (res)
404  return notifier_from_errno(res);
405  break;
406  case CPU_DEAD:
407  case CPU_DEAD_FROZEN:
408  __flow_cache_shrink(fc, fcp, 0);
409  break;
410  }
411  return NOTIFY_OK;
412 }
413 
414 static int __init flow_cache_init(struct flow_cache *fc)
415 {
416  int i;
417 
418  fc->hash_shift = 10;
419  fc->low_watermark = 2 * flow_cache_hash_size(fc);
420  fc->high_watermark = 4 * flow_cache_hash_size(fc);
421 
422  fc->percpu = alloc_percpu(struct flow_cache_percpu);
423  if (!fc->percpu)
424  return -ENOMEM;
425 
427  if (flow_cache_cpu_prepare(fc, i))
428  goto err;
429  }
430  fc->hotcpu_notifier = (struct notifier_block){
431  .notifier_call = flow_cache_cpu,
432  };
434 
435  setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
436  (unsigned long) fc);
437  fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
438  add_timer(&fc->rnd_timer);
439 
440  return 0;
441 
442 err:
444  struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
445  kfree(fcp->hash_table);
446  fcp->hash_table = NULL;
447  }
448 
449  free_percpu(fc->percpu);
450  fc->percpu = NULL;
451 
452  return -ENOMEM;
453 }
454 
455 static int __init flow_cache_init_global(void)
456 {
457  flow_cachep = kmem_cache_create("flow_cache",
458  sizeof(struct flow_cache_entry),
459  0, SLAB_PANIC, NULL);
460 
461  return flow_cache_init(&flow_cache_global);
462 }
463 
464 module_init(flow_cache_init_global);