Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nf_conntrack_l3proto_ipv4_compat.c
Go to the documentation of this file.
1 /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
2  *
3  * (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/types.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/percpu.h>
14 #include <linux/security.h>
15 #include <net/net_namespace.h>
16 
17 #include <linux/netfilter.h>
23 #include <linux/rculist_nulls.h>
24 #include <linux/export.h>
25 
26 struct ct_iter_state {
28  unsigned int bucket;
29 };
30 
31 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
32 {
33  struct net *net = seq_file_net(seq);
34  struct ct_iter_state *st = seq->private;
35  struct hlist_nulls_node *n;
36 
37  for (st->bucket = 0;
38  st->bucket < net->ct.htable_size;
39  st->bucket++) {
40  n = rcu_dereference(
41  hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
42  if (!is_a_nulls(n))
43  return n;
44  }
45  return NULL;
46 }
47 
48 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
49  struct hlist_nulls_node *head)
50 {
51  struct net *net = seq_file_net(seq);
52  struct ct_iter_state *st = seq->private;
53 
54  head = rcu_dereference(hlist_nulls_next_rcu(head));
55  while (is_a_nulls(head)) {
56  if (likely(get_nulls_value(head) == st->bucket)) {
57  if (++st->bucket >= net->ct.htable_size)
58  return NULL;
59  }
60  head = rcu_dereference(
61  hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
62  }
63  return head;
64 }
65 
66 static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
67 {
68  struct hlist_nulls_node *head = ct_get_first(seq);
69 
70  if (head)
71  while (pos && (head = ct_get_next(seq, head)))
72  pos--;
73  return pos ? NULL : head;
74 }
75 
76 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
77  __acquires(RCU)
78 {
79  rcu_read_lock();
80  return ct_get_idx(seq, *pos);
81 }
82 
83 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
84 {
85  (*pos)++;
86  return ct_get_next(s, v);
87 }
88 
89 static void ct_seq_stop(struct seq_file *s, void *v)
90  __releases(RCU)
91 {
92  rcu_read_unlock();
93 }
94 
95 #ifdef CONFIG_NF_CONNTRACK_SECMARK
96 static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
97 {
98  int ret;
99  u32 len;
100  char *secctx;
101 
102  ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
103  if (ret)
104  return 0;
105 
106  ret = seq_printf(s, "secctx=%s ", secctx);
107 
108  security_release_secctx(secctx, len);
109  return ret;
110 }
111 #else
112 static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
113 {
114  return 0;
115 }
116 #endif
117 
118 static int ct_seq_show(struct seq_file *s, void *v)
119 {
120  struct nf_conntrack_tuple_hash *hash = v;
121  struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
122  const struct nf_conntrack_l3proto *l3proto;
123  const struct nf_conntrack_l4proto *l4proto;
124  int ret = 0;
125 
126  NF_CT_ASSERT(ct);
127  if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
128  return 0;
129 
130 
131  /* we only want to print DIR_ORIGINAL */
132  if (NF_CT_DIRECTION(hash))
133  goto release;
134  if (nf_ct_l3num(ct) != AF_INET)
135  goto release;
136 
137  l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
138  NF_CT_ASSERT(l3proto);
139  l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
140  NF_CT_ASSERT(l4proto);
141 
142  ret = -ENOSPC;
143  if (seq_printf(s, "%-8s %u %ld ",
144  l4proto->name, nf_ct_protonum(ct),
145  timer_pending(&ct->timeout)
146  ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
147  goto release;
148 
149  if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
150  goto release;
151 
152  if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
153  l3proto, l4proto))
154  goto release;
155 
157  goto release;
158 
159  if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
160  if (seq_printf(s, "[UNREPLIED] "))
161  goto release;
162 
163  if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
164  l3proto, l4proto))
165  goto release;
166 
167  if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
168  goto release;
169 
170  if (test_bit(IPS_ASSURED_BIT, &ct->status))
171  if (seq_printf(s, "[ASSURED] "))
172  goto release;
173 
174 #ifdef CONFIG_NF_CONNTRACK_MARK
175  if (seq_printf(s, "mark=%u ", ct->mark))
176  goto release;
177 #endif
178 
179  if (ct_show_secctx(s, ct))
180  goto release;
181 
182  if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
183  goto release;
184  ret = 0;
185 release:
186  nf_ct_put(ct);
187  return ret;
188 }
189 
190 static const struct seq_operations ct_seq_ops = {
191  .start = ct_seq_start,
192  .next = ct_seq_next,
193  .stop = ct_seq_stop,
194  .show = ct_seq_show
195 };
196 
197 static int ct_open(struct inode *inode, struct file *file)
198 {
199  return seq_open_net(inode, file, &ct_seq_ops,
200  sizeof(struct ct_iter_state));
201 }
202 
203 static const struct file_operations ct_file_ops = {
204  .owner = THIS_MODULE,
205  .open = ct_open,
206  .read = seq_read,
207  .llseek = seq_lseek,
208  .release = seq_release_net,
209 };
210 
211 /* expects */
214  unsigned int bucket;
215 };
216 
217 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
218 {
219  struct net *net = seq_file_net(seq);
220  struct ct_expect_iter_state *st = seq->private;
221  struct hlist_node *n;
222 
223  for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
224  n = rcu_dereference(
225  hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
226  if (n)
227  return n;
228  }
229  return NULL;
230 }
231 
232 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
233  struct hlist_node *head)
234 {
235  struct net *net = seq_file_net(seq);
236  struct ct_expect_iter_state *st = seq->private;
237 
238  head = rcu_dereference(hlist_next_rcu(head));
239  while (head == NULL) {
240  if (++st->bucket >= nf_ct_expect_hsize)
241  return NULL;
242  head = rcu_dereference(
243  hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
244  }
245  return head;
246 }
247 
248 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
249 {
250  struct hlist_node *head = ct_expect_get_first(seq);
251 
252  if (head)
253  while (pos && (head = ct_expect_get_next(seq, head)))
254  pos--;
255  return pos ? NULL : head;
256 }
257 
258 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
259  __acquires(RCU)
260 {
261  rcu_read_lock();
262  return ct_expect_get_idx(seq, *pos);
263 }
264 
265 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
266 {
267  (*pos)++;
268  return ct_expect_get_next(seq, v);
269 }
270 
271 static void exp_seq_stop(struct seq_file *seq, void *v)
272  __releases(RCU)
273 {
274  rcu_read_unlock();
275 }
276 
277 static int exp_seq_show(struct seq_file *s, void *v)
278 {
279  struct nf_conntrack_expect *exp;
280  const struct hlist_node *n = v;
281 
282  exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
283 
284  if (exp->tuple.src.l3num != AF_INET)
285  return 0;
286 
287  if (exp->timeout.function)
288  seq_printf(s, "%ld ", timer_pending(&exp->timeout)
289  ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
290  else
291  seq_printf(s, "- ");
292 
293  seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
294 
295  print_tuple(s, &exp->tuple,
296  __nf_ct_l3proto_find(exp->tuple.src.l3num),
297  __nf_ct_l4proto_find(exp->tuple.src.l3num,
298  exp->tuple.dst.protonum));
299  return seq_putc(s, '\n');
300 }
301 
302 static const struct seq_operations exp_seq_ops = {
303  .start = exp_seq_start,
304  .next = exp_seq_next,
305  .stop = exp_seq_stop,
306  .show = exp_seq_show
307 };
308 
309 static int exp_open(struct inode *inode, struct file *file)
310 {
311  return seq_open_net(inode, file, &exp_seq_ops,
312  sizeof(struct ct_expect_iter_state));
313 }
314 
315 static const struct file_operations ip_exp_file_ops = {
316  .owner = THIS_MODULE,
317  .open = exp_open,
318  .read = seq_read,
319  .llseek = seq_lseek,
320  .release = seq_release_net,
321 };
322 
323 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
324 {
325  struct net *net = seq_file_net(seq);
326  int cpu;
327 
328  if (*pos == 0)
329  return SEQ_START_TOKEN;
330 
331  for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
332  if (!cpu_possible(cpu))
333  continue;
334  *pos = cpu+1;
335  return per_cpu_ptr(net->ct.stat, cpu);
336  }
337 
338  return NULL;
339 }
340 
341 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
342 {
343  struct net *net = seq_file_net(seq);
344  int cpu;
345 
346  for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
347  if (!cpu_possible(cpu))
348  continue;
349  *pos = cpu+1;
350  return per_cpu_ptr(net->ct.stat, cpu);
351  }
352 
353  return NULL;
354 }
355 
356 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
357 {
358 }
359 
360 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
361 {
362  struct net *net = seq_file_net(seq);
363  unsigned int nr_conntracks = atomic_read(&net->ct.count);
364  const struct ip_conntrack_stat *st = v;
365 
366  if (v == SEQ_START_TOKEN) {
367  seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
368  return 0;
369  }
370 
371  seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
372  "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
373  nr_conntracks,
374  st->searched,
375  st->found,
376  st->new,
377  st->invalid,
378  st->ignore,
379  st->delete,
380  st->delete_list,
381  st->insert,
382  st->insert_failed,
383  st->drop,
384  st->early_drop,
385  st->error,
386 
387  st->expect_new,
388  st->expect_create,
389  st->expect_delete,
390  st->search_restart
391  );
392  return 0;
393 }
394 
395 static const struct seq_operations ct_cpu_seq_ops = {
396  .start = ct_cpu_seq_start,
397  .next = ct_cpu_seq_next,
398  .stop = ct_cpu_seq_stop,
399  .show = ct_cpu_seq_show,
400 };
401 
402 static int ct_cpu_seq_open(struct inode *inode, struct file *file)
403 {
404  return seq_open_net(inode, file, &ct_cpu_seq_ops,
405  sizeof(struct seq_net_private));
406 }
407 
408 static const struct file_operations ct_cpu_seq_fops = {
409  .owner = THIS_MODULE,
410  .open = ct_cpu_seq_open,
411  .read = seq_read,
412  .llseek = seq_lseek,
413  .release = seq_release_net,
414 };
415 
416 static int __net_init ip_conntrack_net_init(struct net *net)
417 {
418  struct proc_dir_entry *proc, *proc_exp, *proc_stat;
419 
420  proc = proc_net_fops_create(net, "ip_conntrack", 0440, &ct_file_ops);
421  if (!proc)
422  goto err1;
423 
424  proc_exp = proc_net_fops_create(net, "ip_conntrack_expect", 0440,
425  &ip_exp_file_ops);
426  if (!proc_exp)
427  goto err2;
428 
429  proc_stat = proc_create("ip_conntrack", S_IRUGO,
430  net->proc_net_stat, &ct_cpu_seq_fops);
431  if (!proc_stat)
432  goto err3;
433  return 0;
434 
435 err3:
436  proc_net_remove(net, "ip_conntrack_expect");
437 err2:
438  proc_net_remove(net, "ip_conntrack");
439 err1:
440  return -ENOMEM;
441 }
442 
443 static void __net_exit ip_conntrack_net_exit(struct net *net)
444 {
445  remove_proc_entry("ip_conntrack", net->proc_net_stat);
446  proc_net_remove(net, "ip_conntrack_expect");
447  proc_net_remove(net, "ip_conntrack");
448 }
449 
450 static struct pernet_operations ip_conntrack_net_ops = {
451  .init = ip_conntrack_net_init,
452  .exit = ip_conntrack_net_exit,
453 };
454 
456 {
457  return register_pernet_subsys(&ip_conntrack_net_ops);
458 }
459 
461 {
462  unregister_pernet_subsys(&ip_conntrack_net_ops);
463 }