7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
13 #include <linux/random.h>
15 #include <linux/slab.h>
19 #include <linux/bitops.h>
72 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
73 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
75 static void flow_cache_new_hashrnd(
unsigned long arg)
109 spin_lock_bh(&flow_cache_gc_lock);
110 list_splice_tail_init(&flow_cache_gc_list, &
gc_list);
111 spin_unlock_bh(&flow_cache_gc_lock);
114 flow_entry_kill(fce);
116 static
DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
122 fcp->hash_count -= deleted;
123 spin_lock_bh(&flow_cache_gc_lock);
124 list_splice_tail(gc_list, &flow_cache_gc_list);
125 spin_unlock_bh(&flow_cache_gc_lock);
144 if (saved < shrink_to &&
145 flow_entry_valid(fle)) {
155 flow_cache_queue_garbage(fcp, deleted, &gc_list);
158 static void flow_cache_shrink(
struct flow_cache *fc,
163 __flow_cache_shrink(fc, fcp, shrink_to);
166 static void flow_new_hash_rnd(
struct flow_cache *fc,
171 __flow_cache_shrink(fc, fcp, 0);
179 const u32 *
k = (
const u32 *) key;
182 return jhash2(k, length, fcp->
hash_rnd)
189 static int flow_key_compare(
const struct flowi *key1,
const struct flowi *key2,
202 }
while (k1 < k1_lim);
225 keysize = flow_key_size(family);
235 flow_new_hash_rnd(fc, fcp);
237 hash = flow_hash_code(fc, fcp, key, keysize);
239 if (tfle->
net == net &&
242 flow_key_compare(key, &tfle->
key, keysize) == 0) {
250 flow_cache_shrink(fc, fcp);
266 flo = flo->
ops->get(flo);
271 flo->
ops->delete(flo);
281 flo = resolver(net, key, family, dir, flo, ctx);
289 if (flo && !IS_ERR(flo))
290 flo->
ops->delete(flo);
298 static void flow_cache_flush_tasklet(
unsigned long data)
312 if (flow_entry_valid(fle))
321 flow_cache_queue_garbage(fcp, deleted, &gc_list);
327 static void flow_cache_flush_per_cpu(
void *data)
335 tasklet->
data = (
unsigned long)info;
336 tasklet_schedule(tasklet);
347 info.
cache = &flow_cache_global;
353 flow_cache_flush_tasklet((
unsigned long)&info);
361 static void flow_cache_flush_task(
struct work_struct *work)
366 static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
381 pr_err(
"NET: failed to allocate flow cache sz %zu\n", sz);
396 int res, cpu = (
unsigned long) hcpu;
402 res = flow_cache_cpu_prepare(fc, cpu);
404 return notifier_from_errno(res);
408 __flow_cache_shrink(fc, fcp, 0);
427 if (flow_cache_cpu_prepare(fc, i))
431 .notifier_call = flow_cache_cpu,
455 static int __init flow_cache_init_global(
void)
461 return flow_cache_init(&flow_cache_global);