12 #include <linux/types.h>
13 #include <linux/netfilter.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
21 #include <linux/kernel.h>
24 #include <linux/export.h>
46 struct net *
net = nf_ct_exp_net(exp);
51 hlist_del_rcu(&exp->
hnode);
52 net->ct.expect_count--;
54 hlist_del(&exp->
lnode);
64 static void nf_ct_expectation_timed_out(
unsigned long ul_expect)
68 spin_lock_bh(&nf_conntrack_lock);
69 nf_ct_unlink_expect(exp);
70 spin_unlock_bh(&nf_conntrack_lock);
78 if (
unlikely(!nf_conntrack_hash_rnd)) {
83 (((tuple->
dst.protonum ^ tuple->
src.l3num) << 16) |
85 return ((
u64)hash * nf_ct_expect_hsize) >> 32;
96 if (!net->ct.expect_count)
99 h = nf_ct_expect_dst_hash(tuple);
100 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
101 if (nf_ct_tuple_mask_cmp(tuple, &i->
tuple, &i->
mask) &&
102 nf_ct_zone(i->
master) == zone)
136 if (!net->ct.expect_count)
139 h = nf_ct_expect_dst_hash(tuple);
142 nf_ct_tuple_mask_cmp(tuple, &i->
tuple, &i->
mask) &&
143 nf_ct_zone(i->
master) == zone) {
156 if (!nf_ct_is_confirmed(exp->
master))
163 nf_ct_unlink_expect(exp);
183 nf_ct_unlink_expect(exp);
199 intersect_mask.src.u.all = a->
mask.src.u.all & b->
mask.src.u.all;
202 intersect_mask.src.u3.all[
count] =
206 return nf_ct_tuple_mask_cmp(&a->
tuple, &b->
tuple, &intersect_mask);
214 nf_ct_tuple_mask_equal(&a->
mask, &b->
mask) &&
223 nf_ct_unlink_expect(exp);
269 if (
sizeof(exp->
tuple.src.u3) > len)
272 sizeof(exp->
tuple.src.u3) - len);
274 if (
sizeof(exp->
mask.src.u3) > len)
276 sizeof(exp->
mask.src.u3) - len);
286 exp->
tuple.src.u.all = 0;
287 exp->
mask.src.u.all = 0;
291 if (
sizeof(exp->
tuple.dst.u3) > len)
294 sizeof(exp->
tuple.dst.u3) - len);
300 static void nf_ct_expect_free_rcu(
struct rcu_head *
head)
319 struct net *
net = nf_ct_exp_net(exp);
320 unsigned int h = nf_ct_expect_dst_hash(&exp->
tuple);
328 hlist_add_head_rcu(&exp->
hnode, &net->ct.expect_hash[h]);
329 net->ct.expect_count++;
334 lockdep_is_held(&nf_conntrack_lock));
346 static void evict_oldest_expect(
struct nf_conn *master,
354 if (exp->
class == new->class)
359 nf_ct_unlink_expect(last);
371 struct net *net = nf_ct_exp_net(expect);
380 h = nf_ct_expect_dst_hash(&expect->
tuple);
382 if (expect_matches(i, expect)) {
384 nf_ct_unlink_expect(i);
388 }
else if (expect_clash(i, expect)) {
395 lockdep_is_held(&nf_conntrack_lock));
400 evict_oldest_expect(master, expect);
409 if (net->ct.expect_count >= nf_ct_expect_max) {
423 ret = __nf_ct_expect_check(expect);
427 ret = nf_ct_expect_insert(expect);
431 nf_ct_expect_event_report(
IPEXP_NEW, expect, pid, report);
439 #ifdef CONFIG_NF_CONNTRACK_PROCFS
447 struct net *net = seq_file_net(seq);
462 struct net *net = seq_file_net(seq);
466 while (head ==
NULL) {
467 if (++st->
bucket >= nf_ct_expect_hsize)
476 struct hlist_node *head = ct_expect_get_first(seq);
479 while (pos && (head = ct_expect_get_next(seq, head)))
484 static void *exp_seq_start(
struct seq_file *seq, loff_t *pos)
488 return ct_expect_get_idx(seq, *pos);
491 static void *exp_seq_next(
struct seq_file *seq,
void *
v, loff_t *pos)
494 return ct_expect_get_next(seq, v);
497 static void exp_seq_stop(
struct seq_file *seq,
void *
v)
503 static int exp_seq_show(
struct seq_file *
s,
void *
v)
514 ? (
long)(expect->
timeout.expires - jiffies)/
HZ : 0);
518 expect->
tuple.src.l3num,
519 expect->
tuple.dst.protonum);
521 __nf_ct_l3proto_find(expect->
tuple.src.l3num),
523 expect->
tuple.dst.protonum));
548 .
start = exp_seq_start,
549 .next = exp_seq_next,
550 .stop = exp_seq_stop,
569 static int exp_proc_init(
struct net *net)
571 #ifdef CONFIG_NF_CONNTRACK_PROCFS
581 static void exp_proc_remove(
struct net *net)
583 #ifdef CONFIG_NF_CONNTRACK_PROCFS
603 net->ct.expect_count = 0;
605 if (net->ct.expect_hash ==
NULL)
612 if (!nf_ct_expect_cachep)
616 err = exp_proc_init(net);
633 exp_proc_remove(net);