Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nf_conntrack_ecache.c
Go to the documentation of this file.
1 /* Event cache for netfilter. */
2 
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <[email protected]>
5  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 
27 
28 static DEFINE_MUTEX(nf_ct_ecache_mutex);
29 
30 /* deliver cached events and clear cache entry - must be called with locally
31  * disabled softirqs */
33 {
34  struct net *net = nf_ct_net(ct);
35  unsigned long events, missed;
36  struct nf_ct_event_notifier *notify;
37  struct nf_conntrack_ecache *e;
38  struct nf_ct_event item;
39  int ret;
40 
41  rcu_read_lock();
42  notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
43  if (notify == NULL)
44  goto out_unlock;
45 
46  e = nf_ct_ecache_find(ct);
47  if (e == NULL)
48  goto out_unlock;
49 
50  events = xchg(&e->cache, 0);
51 
52  if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
53  goto out_unlock;
54 
55  /* We make a copy of the missed event cache without taking
56  * the lock, thus we may send missed events twice. However,
57  * this does not harm and it happens very rarely. */
58  missed = e->missed;
59 
60  if (!((events | missed) & e->ctmask))
61  goto out_unlock;
62 
63  item.ct = ct;
64  item.portid = 0;
65  item.report = 0;
66 
67  ret = notify->fcn(events | missed, &item);
68 
69  if (likely(ret >= 0 && !missed))
70  goto out_unlock;
71 
72  spin_lock_bh(&ct->lock);
73  if (ret < 0)
74  e->missed |= events;
75  else
76  e->missed &= ~missed;
77  spin_unlock_bh(&ct->lock);
78 
79 out_unlock:
80  rcu_read_unlock();
81 }
83 
85  struct nf_ct_event_notifier *new)
86 {
87  int ret;
88  struct nf_ct_event_notifier *notify;
89 
90  mutex_lock(&nf_ct_ecache_mutex);
91  notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
92  lockdep_is_held(&nf_ct_ecache_mutex));
93  if (notify != NULL) {
94  ret = -EBUSY;
95  goto out_unlock;
96  }
97  rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
98  ret = 0;
99 
100 out_unlock:
101  mutex_unlock(&nf_ct_ecache_mutex);
102  return ret;
103 }
105 
107  struct nf_ct_event_notifier *new)
108 {
109  struct nf_ct_event_notifier *notify;
110 
111  mutex_lock(&nf_ct_ecache_mutex);
112  notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
113  lockdep_is_held(&nf_ct_ecache_mutex));
114  BUG_ON(notify != new);
115  RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
116  mutex_unlock(&nf_ct_ecache_mutex);
117 }
119 
121  struct nf_exp_event_notifier *new)
122 {
123  int ret;
124  struct nf_exp_event_notifier *notify;
125 
126  mutex_lock(&nf_ct_ecache_mutex);
127  notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
128  lockdep_is_held(&nf_ct_ecache_mutex));
129  if (notify != NULL) {
130  ret = -EBUSY;
131  goto out_unlock;
132  }
133  rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
134  ret = 0;
135 
136 out_unlock:
137  mutex_unlock(&nf_ct_ecache_mutex);
138  return ret;
139 }
141 
143  struct nf_exp_event_notifier *new)
144 {
145  struct nf_exp_event_notifier *notify;
146 
147  mutex_lock(&nf_ct_ecache_mutex);
148  notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
149  lockdep_is_held(&nf_ct_ecache_mutex));
150  BUG_ON(notify != new);
151  RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
152  mutex_unlock(&nf_ct_ecache_mutex);
153 }
155 
156 #define NF_CT_EVENTS_DEFAULT 1
157 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
158 static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
159 
160 #ifdef CONFIG_SYSCTL
161 static struct ctl_table event_sysctl_table[] = {
162  {
163  .procname = "nf_conntrack_events",
164  .data = &init_net.ct.sysctl_events,
165  .maxlen = sizeof(unsigned int),
166  .mode = 0644,
168  },
169  {
170  .procname = "nf_conntrack_events_retry_timeout",
171  .data = &init_net.ct.sysctl_events_retry_timeout,
172  .maxlen = sizeof(unsigned int),
173  .mode = 0644,
175  },
176  {}
177 };
178 #endif /* CONFIG_SYSCTL */
179 
180 static struct nf_ct_ext_type event_extend __read_mostly = {
181  .len = sizeof(struct nf_conntrack_ecache),
182  .align = __alignof__(struct nf_conntrack_ecache),
183  .id = NF_CT_EXT_ECACHE,
184 };
185 
186 #ifdef CONFIG_SYSCTL
187 static int nf_conntrack_event_init_sysctl(struct net *net)
188 {
189  struct ctl_table *table;
190 
191  table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
192  GFP_KERNEL);
193  if (!table)
194  goto out;
195 
196  table[0].data = &net->ct.sysctl_events;
197  table[1].data = &net->ct.sysctl_events_retry_timeout;
198 
199  net->ct.event_sysctl_header =
200  register_net_sysctl(net, "net/netfilter", table);
201  if (!net->ct.event_sysctl_header) {
202  printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
203  goto out_register;
204  }
205  return 0;
206 
207 out_register:
208  kfree(table);
209 out:
210  return -ENOMEM;
211 }
212 
213 static void nf_conntrack_event_fini_sysctl(struct net *net)
214 {
215  struct ctl_table *table;
216 
217  table = net->ct.event_sysctl_header->ctl_table_arg;
218  unregister_net_sysctl_table(net->ct.event_sysctl_header);
219  kfree(table);
220 }
221 #else
222 static int nf_conntrack_event_init_sysctl(struct net *net)
223 {
224  return 0;
225 }
226 
227 static void nf_conntrack_event_fini_sysctl(struct net *net)
228 {
229 }
230 #endif /* CONFIG_SYSCTL */
231 
232 int nf_conntrack_ecache_init(struct net *net)
233 {
234  int ret;
235 
236  net->ct.sysctl_events = nf_ct_events;
237  net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
238 
239  if (net_eq(net, &init_net)) {
240  ret = nf_ct_extend_register(&event_extend);
241  if (ret < 0) {
242  printk(KERN_ERR "nf_ct_event: Unable to register "
243  "event extension.\n");
244  goto out_extend_register;
245  }
246  }
247 
248  ret = nf_conntrack_event_init_sysctl(net);
249  if (ret < 0)
250  goto out_sysctl;
251 
252  return 0;
253 
254 out_sysctl:
255  if (net_eq(net, &init_net))
256  nf_ct_extend_unregister(&event_extend);
257 out_extend_register:
258  return ret;
259 }
260 
261 void nf_conntrack_ecache_fini(struct net *net)
262 {
263  nf_conntrack_event_fini_sysctl(net);
264  if (net_eq(net, &init_net))
265  nf_ct_extend_unregister(&event_extend);
266 }