Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nfnetlink_queue_core.c
Go to the documentation of this file.
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <[email protected]>
6  * (C) 2007 by Patrick McHardy <[email protected]>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <[email protected]>
10  * (C) 2003-2005 Netfilter Core Team <[email protected]>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
30 #include <linux/list.h>
31 #include <net/sock.h>
32 #include <net/netfilter/nf_queue.h>
34 
35 #include <linux/atomic.h>
36 
37 #ifdef CONFIG_BRIDGE_NETFILTER
38 #include "../bridge/br_private.h"
39 #endif
40 
41 #define NFQNL_QMAX_DEFAULT 1024
42 
44  struct hlist_node hlist; /* global list of queues */
45  struct rcu_head rcu;
46 
48  unsigned int queue_maxlen;
49  unsigned int copy_range;
50  unsigned int queue_dropped;
51  unsigned int queue_user_dropped;
52 
53 
54  u_int16_t queue_num; /* number of this queue */
56  u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
57 /*
58  * Following fields are dirtied for each queued packet,
59  * keep them in same cache line if possible.
60  */
62  unsigned int queue_total;
63  unsigned int id_sequence; /* 'sequence' of pkt ids */
64  struct list_head queue_list; /* packets in queue */
65 };
66 
67 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
68 
69 static DEFINE_SPINLOCK(instances_lock);
70 
71 #define INSTANCE_BUCKETS 16
72 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
73 
74 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
75 {
76  return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
77 }
78 
79 static struct nfqnl_instance *
80 instance_lookup(u_int16_t queue_num)
81 {
82  struct hlist_head *head;
83  struct hlist_node *pos;
84  struct nfqnl_instance *inst;
85 
86  head = &instance_table[instance_hashfn(queue_num)];
87  hlist_for_each_entry_rcu(inst, pos, head, hlist) {
88  if (inst->queue_num == queue_num)
89  return inst;
90  }
91  return NULL;
92 }
93 
94 static struct nfqnl_instance *
95 instance_create(u_int16_t queue_num, int portid)
96 {
97  struct nfqnl_instance *inst;
98  unsigned int h;
99  int err;
100 
101  spin_lock(&instances_lock);
102  if (instance_lookup(queue_num)) {
103  err = -EEXIST;
104  goto out_unlock;
105  }
106 
107  inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
108  if (!inst) {
109  err = -ENOMEM;
110  goto out_unlock;
111  }
112 
113  inst->queue_num = queue_num;
114  inst->peer_portid = portid;
116  inst->copy_range = 0xfffff;
117  inst->copy_mode = NFQNL_COPY_NONE;
118  spin_lock_init(&inst->lock);
119  INIT_LIST_HEAD(&inst->queue_list);
120 
121  if (!try_module_get(THIS_MODULE)) {
122  err = -EAGAIN;
123  goto out_free;
124  }
125 
126  h = instance_hashfn(queue_num);
127  hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
128 
129  spin_unlock(&instances_lock);
130 
131  return inst;
132 
133 out_free:
134  kfree(inst);
135 out_unlock:
136  spin_unlock(&instances_lock);
137  return ERR_PTR(err);
138 }
139 
140 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
141  unsigned long data);
142 
143 static void
144 instance_destroy_rcu(struct rcu_head *head)
145 {
146  struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
147  rcu);
148 
149  nfqnl_flush(inst, NULL, 0);
150  kfree(inst);
151  module_put(THIS_MODULE);
152 }
153 
154 static void
155 __instance_destroy(struct nfqnl_instance *inst)
156 {
157  hlist_del_rcu(&inst->hlist);
158  call_rcu(&inst->rcu, instance_destroy_rcu);
159 }
160 
161 static void
162 instance_destroy(struct nfqnl_instance *inst)
163 {
164  spin_lock(&instances_lock);
165  __instance_destroy(inst);
166  spin_unlock(&instances_lock);
167 }
168 
169 static inline void
170 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
171 {
172  list_add_tail(&entry->list, &queue->queue_list);
173  queue->queue_total++;
174 }
175 
176 static void
177 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
178 {
179  list_del(&entry->list);
180  queue->queue_total--;
181 }
182 
183 static struct nf_queue_entry *
184 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
185 {
186  struct nf_queue_entry *entry = NULL, *i;
187 
188  spin_lock_bh(&queue->lock);
189 
190  list_for_each_entry(i, &queue->queue_list, list) {
191  if (i->id == id) {
192  entry = i;
193  break;
194  }
195  }
196 
197  if (entry)
198  __dequeue_entry(queue, entry);
199 
200  spin_unlock_bh(&queue->lock);
201 
202  return entry;
203 }
204 
205 static void
206 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
207 {
208  struct nf_queue_entry *entry, *next;
209 
210  spin_lock_bh(&queue->lock);
211  list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
212  if (!cmpfn || cmpfn(entry, data)) {
213  list_del(&entry->list);
214  queue->queue_total--;
215  nf_reinject(entry, NF_DROP);
216  }
217  }
218  spin_unlock_bh(&queue->lock);
219 }
220 
221 static struct sk_buff *
222 nfqnl_build_packet_message(struct nfqnl_instance *queue,
223  struct nf_queue_entry *entry,
224  __be32 **packet_id_ptr)
225 {
226  sk_buff_data_t old_tail;
227  size_t size;
228  size_t data_len = 0, cap_len = 0;
229  struct sk_buff *skb;
230  struct nlattr *nla;
231  struct nfqnl_msg_packet_hdr *pmsg;
232  struct nlmsghdr *nlh;
233  struct nfgenmsg *nfmsg;
234  struct sk_buff *entskb = entry->skb;
235  struct net_device *indev;
236  struct net_device *outdev;
237  struct nf_conn *ct = NULL;
239 
240  size = NLMSG_SPACE(sizeof(struct nfgenmsg))
241  + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
242  + nla_total_size(sizeof(u_int32_t)) /* ifindex */
243  + nla_total_size(sizeof(u_int32_t)) /* ifindex */
244 #ifdef CONFIG_BRIDGE_NETFILTER
245  + nla_total_size(sizeof(u_int32_t)) /* ifindex */
246  + nla_total_size(sizeof(u_int32_t)) /* ifindex */
247 #endif
248  + nla_total_size(sizeof(u_int32_t)) /* mark */
249  + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
250  + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)
251  + nla_total_size(sizeof(u_int32_t))); /* cap_len */
252 
253  outdev = entry->outdev;
254 
255  switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
256  case NFQNL_COPY_META:
257  case NFQNL_COPY_NONE:
258  break;
259 
260  case NFQNL_COPY_PACKET:
261  if (entskb->ip_summed == CHECKSUM_PARTIAL &&
262  skb_checksum_help(entskb))
263  return NULL;
264 
265  data_len = ACCESS_ONCE(queue->copy_range);
266  if (data_len == 0 || data_len > entskb->len)
267  data_len = entskb->len;
268 
269  size += nla_total_size(data_len);
270  cap_len = entskb->len;
271  break;
272  }
273 
274  if (queue->flags & NFQA_CFG_F_CONNTRACK)
275  ct = nfqnl_ct_get(entskb, &size, &ctinfo);
276 
277  skb = alloc_skb(size, GFP_ATOMIC);
278  if (!skb)
279  return NULL;
280 
281  old_tail = skb->tail;
282  nlh = nlmsg_put(skb, 0, 0,
284  sizeof(struct nfgenmsg), 0);
285  if (!nlh) {
286  kfree_skb(skb);
287  return NULL;
288  }
289  nfmsg = nlmsg_data(nlh);
290  nfmsg->nfgen_family = entry->pf;
291  nfmsg->version = NFNETLINK_V0;
292  nfmsg->res_id = htons(queue->queue_num);
293 
294  nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
295  pmsg = nla_data(nla);
296  pmsg->hw_protocol = entskb->protocol;
297  pmsg->hook = entry->hook;
298  *packet_id_ptr = &pmsg->packet_id;
299 
300  indev = entry->indev;
301  if (indev) {
302 #ifndef CONFIG_BRIDGE_NETFILTER
303  if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
304  goto nla_put_failure;
305 #else
306  if (entry->pf == PF_BRIDGE) {
307  /* Case 1: indev is physical input device, we need to
308  * look for bridge group (when called from
309  * netfilter_bridge) */
310  if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
311  htonl(indev->ifindex)) ||
312  /* this is the bridge group "brX" */
313  /* rcu_read_lock()ed by __nf_queue */
314  nla_put_be32(skb, NFQA_IFINDEX_INDEV,
315  htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
316  goto nla_put_failure;
317  } else {
318  /* Case 2: indev is bridge group, we need to look for
319  * physical device (when called from ipv4) */
320  if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
321  htonl(indev->ifindex)))
322  goto nla_put_failure;
323  if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
324  nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
325  htonl(entskb->nf_bridge->physindev->ifindex)))
326  goto nla_put_failure;
327  }
328 #endif
329  }
330 
331  if (outdev) {
332 #ifndef CONFIG_BRIDGE_NETFILTER
333  if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
334  goto nla_put_failure;
335 #else
336  if (entry->pf == PF_BRIDGE) {
337  /* Case 1: outdev is physical output device, we need to
338  * look for bridge group (when called from
339  * netfilter_bridge) */
340  if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
341  htonl(outdev->ifindex)) ||
342  /* this is the bridge group "brX" */
343  /* rcu_read_lock()ed by __nf_queue */
344  nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
345  htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
346  goto nla_put_failure;
347  } else {
348  /* Case 2: outdev is bridge group, we need to look for
349  * physical output device (when called from ipv4) */
350  if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
351  htonl(outdev->ifindex)))
352  goto nla_put_failure;
353  if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
354  nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
355  htonl(entskb->nf_bridge->physoutdev->ifindex)))
356  goto nla_put_failure;
357  }
358 #endif
359  }
360 
361  if (entskb->mark &&
362  nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
363  goto nla_put_failure;
364 
365  if (indev && entskb->dev &&
366  entskb->mac_header != entskb->network_header) {
367  struct nfqnl_msg_packet_hw phw;
368  int len = dev_parse_header(entskb, phw.hw_addr);
369  if (len) {
370  phw.hw_addrlen = htons(len);
371  if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
372  goto nla_put_failure;
373  }
374  }
375 
376  if (entskb->tstamp.tv64) {
378  struct timeval tv = ktime_to_timeval(entskb->tstamp);
379  ts.sec = cpu_to_be64(tv.tv_sec);
380  ts.usec = cpu_to_be64(tv.tv_usec);
381 
382  if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
383  goto nla_put_failure;
384  }
385 
386  if (data_len) {
387  struct nlattr *nla;
388  int sz = nla_attr_size(data_len);
389 
390  if (skb_tailroom(skb) < nla_total_size(data_len)) {
391  printk(KERN_WARNING "nf_queue: no tailroom!\n");
392  kfree_skb(skb);
393  return NULL;
394  }
395 
396  nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
397  nla->nla_type = NFQA_PAYLOAD;
398  nla->nla_len = sz;
399 
400  if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
401  BUG();
402  }
403 
404  if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
405  goto nla_put_failure;
406 
407  if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
408  goto nla_put_failure;
409 
410  nlh->nlmsg_len = skb->tail - old_tail;
411  return skb;
412 
413 nla_put_failure:
414  kfree_skb(skb);
415  net_err_ratelimited("nf_queue: error creating packet message\n");
416  return NULL;
417 }
418 
419 static int
420 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
421 {
422  struct sk_buff *nskb;
423  struct nfqnl_instance *queue;
424  int err = -ENOBUFS;
425  __be32 *packet_id_ptr;
426  int failopen = 0;
427 
428  /* rcu_read_lock()ed by nf_hook_slow() */
429  queue = instance_lookup(queuenum);
430  if (!queue) {
431  err = -ESRCH;
432  goto err_out;
433  }
434 
435  if (queue->copy_mode == NFQNL_COPY_NONE) {
436  err = -EINVAL;
437  goto err_out;
438  }
439 
440  nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
441  if (nskb == NULL) {
442  err = -ENOMEM;
443  goto err_out;
444  }
445  spin_lock_bh(&queue->lock);
446 
447  if (!queue->peer_portid) {
448  err = -EINVAL;
449  goto err_out_free_nskb;
450  }
451  if (queue->queue_total >= queue->queue_maxlen) {
452  if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
453  failopen = 1;
454  err = 0;
455  } else {
456  queue->queue_dropped++;
457  net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
458  queue->queue_total);
459  }
460  goto err_out_free_nskb;
461  }
462  entry->id = ++queue->id_sequence;
463  *packet_id_ptr = htonl(entry->id);
464 
465  /* nfnetlink_unicast will either free the nskb or add it to a socket */
466  err = nfnetlink_unicast(nskb, &init_net, queue->peer_portid, MSG_DONTWAIT);
467  if (err < 0) {
468  queue->queue_user_dropped++;
469  goto err_out_unlock;
470  }
471 
472  __enqueue_entry(queue, entry);
473 
474  spin_unlock_bh(&queue->lock);
475  return 0;
476 
477 err_out_free_nskb:
478  kfree_skb(nskb);
479 err_out_unlock:
480  spin_unlock_bh(&queue->lock);
481  if (failopen)
482  nf_reinject(entry, NF_ACCEPT);
483 err_out:
484  return err;
485 }
486 
487 static int
488 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
489 {
490  struct sk_buff *nskb;
491 
492  if (diff < 0) {
493  if (pskb_trim(e->skb, data_len))
494  return -ENOMEM;
495  } else if (diff > 0) {
496  if (data_len > 0xFFFF)
497  return -EINVAL;
498  if (diff > skb_tailroom(e->skb)) {
499  nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
500  diff, GFP_ATOMIC);
501  if (!nskb) {
502  printk(KERN_WARNING "nf_queue: OOM "
503  "in mangle, dropping packet\n");
504  return -ENOMEM;
505  }
506  kfree_skb(e->skb);
507  e->skb = nskb;
508  }
509  skb_put(e->skb, diff);
510  }
511  if (!skb_make_writable(e->skb, data_len))
512  return -ENOMEM;
513  skb_copy_to_linear_data(e->skb, data, data_len);
514  e->skb->ip_summed = CHECKSUM_NONE;
515  return 0;
516 }
517 
518 static int
519 nfqnl_set_mode(struct nfqnl_instance *queue,
520  unsigned char mode, unsigned int range)
521 {
522  int status = 0;
523 
524  spin_lock_bh(&queue->lock);
525  switch (mode) {
526  case NFQNL_COPY_NONE:
527  case NFQNL_COPY_META:
528  queue->copy_mode = mode;
529  queue->copy_range = 0;
530  break;
531 
532  case NFQNL_COPY_PACKET:
533  queue->copy_mode = mode;
534  /* We're using struct nlattr which has 16bit nla_len. Note that
535  * nla_len includes the header length. Thus, the maximum packet
536  * length that we support is 65531 bytes. We send truncated
537  * packets if the specified length is larger than that.
538  */
539  if (range > 0xffff - NLA_HDRLEN)
540  queue->copy_range = 0xffff - NLA_HDRLEN;
541  else
542  queue->copy_range = range;
543  break;
544 
545  default:
546  status = -EINVAL;
547 
548  }
549  spin_unlock_bh(&queue->lock);
550 
551  return status;
552 }
553 
554 static int
555 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
556 {
557  if (entry->indev)
558  if (entry->indev->ifindex == ifindex)
559  return 1;
560  if (entry->outdev)
561  if (entry->outdev->ifindex == ifindex)
562  return 1;
563 #ifdef CONFIG_BRIDGE_NETFILTER
564  if (entry->skb->nf_bridge) {
565  if (entry->skb->nf_bridge->physindev &&
566  entry->skb->nf_bridge->physindev->ifindex == ifindex)
567  return 1;
568  if (entry->skb->nf_bridge->physoutdev &&
569  entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
570  return 1;
571  }
572 #endif
573  return 0;
574 }
575 
576 /* drop all packets with either indev or outdev == ifindex from all queue
577  * instances */
578 static void
579 nfqnl_dev_drop(int ifindex)
580 {
581  int i;
582 
583  rcu_read_lock();
584 
585  for (i = 0; i < INSTANCE_BUCKETS; i++) {
586  struct hlist_node *tmp;
587  struct nfqnl_instance *inst;
588  struct hlist_head *head = &instance_table[i];
589 
590  hlist_for_each_entry_rcu(inst, tmp, head, hlist)
591  nfqnl_flush(inst, dev_cmp, ifindex);
592  }
593 
594  rcu_read_unlock();
595 }
596 
597 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
598 
599 static int
600 nfqnl_rcv_dev_event(struct notifier_block *this,
601  unsigned long event, void *ptr)
602 {
603  struct net_device *dev = ptr;
604 
605  if (!net_eq(dev_net(dev), &init_net))
606  return NOTIFY_DONE;
607 
608  /* Drop any packets associated with the downed device */
609  if (event == NETDEV_DOWN)
610  nfqnl_dev_drop(dev->ifindex);
611  return NOTIFY_DONE;
612 }
613 
614 static struct notifier_block nfqnl_dev_notifier = {
615  .notifier_call = nfqnl_rcv_dev_event,
616 };
617 
618 static int
619 nfqnl_rcv_nl_event(struct notifier_block *this,
620  unsigned long event, void *ptr)
621 {
622  struct netlink_notify *n = ptr;
623 
624  if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
625  int i;
626 
627  /* destroy all instances for this portid */
628  spin_lock(&instances_lock);
629  for (i = 0; i < INSTANCE_BUCKETS; i++) {
630  struct hlist_node *tmp, *t2;
631  struct nfqnl_instance *inst;
632  struct hlist_head *head = &instance_table[i];
633 
634  hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
635  if ((n->net == &init_net) &&
636  (n->portid == inst->peer_portid))
637  __instance_destroy(inst);
638  }
639  }
640  spin_unlock(&instances_lock);
641  }
642  return NOTIFY_DONE;
643 }
644 
645 static struct notifier_block nfqnl_rtnl_notifier = {
646  .notifier_call = nfqnl_rcv_nl_event,
647 };
648 
649 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
650  [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
651  [NFQA_MARK] = { .type = NLA_U32 },
652  [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
653  [NFQA_CT] = { .type = NLA_UNSPEC },
654 };
655 
656 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
657  [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
658  [NFQA_MARK] = { .type = NLA_U32 },
659 };
660 
661 static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlportid)
662 {
663  struct nfqnl_instance *queue;
664 
665  queue = instance_lookup(queue_num);
666  if (!queue)
667  return ERR_PTR(-ENODEV);
668 
669  if (queue->peer_portid != nlportid)
670  return ERR_PTR(-EPERM);
671 
672  return queue;
673 }
674 
675 static struct nfqnl_msg_verdict_hdr*
676 verdicthdr_get(const struct nlattr * const nfqa[])
677 {
678  struct nfqnl_msg_verdict_hdr *vhdr;
679  unsigned int verdict;
680 
681  if (!nfqa[NFQA_VERDICT_HDR])
682  return NULL;
683 
684  vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
685  verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
686  if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
687  return NULL;
688  return vhdr;
689 }
690 
691 static int nfq_id_after(unsigned int id, unsigned int max)
692 {
693  return (int)(id - max) > 0;
694 }
695 
696 static int
697 nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
698  const struct nlmsghdr *nlh,
699  const struct nlattr * const nfqa[])
700 {
701  struct nfgenmsg *nfmsg = nlmsg_data(nlh);
702  struct nf_queue_entry *entry, *tmp;
703  unsigned int verdict, maxid;
704  struct nfqnl_msg_verdict_hdr *vhdr;
705  struct nfqnl_instance *queue;
706  LIST_HEAD(batch_list);
707  u16 queue_num = ntohs(nfmsg->res_id);
708 
709  queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
710  if (IS_ERR(queue))
711  return PTR_ERR(queue);
712 
713  vhdr = verdicthdr_get(nfqa);
714  if (!vhdr)
715  return -EINVAL;
716 
717  verdict = ntohl(vhdr->verdict);
718  maxid = ntohl(vhdr->id);
719 
720  spin_lock_bh(&queue->lock);
721 
722  list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
723  if (nfq_id_after(entry->id, maxid))
724  break;
725  __dequeue_entry(queue, entry);
726  list_add_tail(&entry->list, &batch_list);
727  }
728 
729  spin_unlock_bh(&queue->lock);
730 
731  if (list_empty(&batch_list))
732  return -ENOENT;
733 
734  list_for_each_entry_safe(entry, tmp, &batch_list, list) {
735  if (nfqa[NFQA_MARK])
736  entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
737  nf_reinject(entry, verdict);
738  }
739  return 0;
740 }
741 
742 static int
743 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
744  const struct nlmsghdr *nlh,
745  const struct nlattr * const nfqa[])
746 {
747  struct nfgenmsg *nfmsg = nlmsg_data(nlh);
748  u_int16_t queue_num = ntohs(nfmsg->res_id);
749 
750  struct nfqnl_msg_verdict_hdr *vhdr;
751  struct nfqnl_instance *queue;
752  unsigned int verdict;
753  struct nf_queue_entry *entry;
755  struct nf_conn *ct = NULL;
756 
757  queue = instance_lookup(queue_num);
758  if (!queue)
759 
760  queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
761  if (IS_ERR(queue))
762  return PTR_ERR(queue);
763 
764  vhdr = verdicthdr_get(nfqa);
765  if (!vhdr)
766  return -EINVAL;
767 
768  verdict = ntohl(vhdr->verdict);
769 
770  entry = find_dequeue_entry(queue, ntohl(vhdr->id));
771  if (entry == NULL)
772  return -ENOENT;
773 
774  rcu_read_lock();
775  if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
776  ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
777 
778  if (nfqa[NFQA_PAYLOAD]) {
779  u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
780  int diff = payload_len - entry->skb->len;
781 
782  if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
783  payload_len, entry, diff) < 0)
784  verdict = NF_DROP;
785 
786  if (ct)
787  nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
788  }
789  rcu_read_unlock();
790 
791  if (nfqa[NFQA_MARK])
792  entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
793 
794  nf_reinject(entry, verdict);
795  return 0;
796 }
797 
798 static int
799 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
800  const struct nlmsghdr *nlh,
801  const struct nlattr * const nfqa[])
802 {
803  return -ENOTSUPP;
804 }
805 
806 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
807  [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
808  [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
809 };
810 
811 static const struct nf_queue_handler nfqh = {
812  .name = "nf_queue",
813  .outfn = &nfqnl_enqueue_packet,
814 };
815 
816 static int
817 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
818  const struct nlmsghdr *nlh,
819  const struct nlattr * const nfqa[])
820 {
821  struct nfgenmsg *nfmsg = nlmsg_data(nlh);
822  u_int16_t queue_num = ntohs(nfmsg->res_id);
823  struct nfqnl_instance *queue;
824  struct nfqnl_msg_config_cmd *cmd = NULL;
825  int ret = 0;
826 
827  if (nfqa[NFQA_CFG_CMD]) {
828  cmd = nla_data(nfqa[NFQA_CFG_CMD]);
829 
830  /* Commands without queue context - might sleep */
831  switch (cmd->command) {
833  return nf_register_queue_handler(ntohs(cmd->pf),
834  &nfqh);
836  return nf_unregister_queue_handler(ntohs(cmd->pf),
837  &nfqh);
838  }
839  }
840 
841  rcu_read_lock();
842  queue = instance_lookup(queue_num);
843  if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
844  ret = -EPERM;
845  goto err_out_unlock;
846  }
847 
848  if (cmd != NULL) {
849  switch (cmd->command) {
850  case NFQNL_CFG_CMD_BIND:
851  if (queue) {
852  ret = -EBUSY;
853  goto err_out_unlock;
854  }
855  queue = instance_create(queue_num, NETLINK_CB(skb).portid);
856  if (IS_ERR(queue)) {
857  ret = PTR_ERR(queue);
858  goto err_out_unlock;
859  }
860  break;
862  if (!queue) {
863  ret = -ENODEV;
864  goto err_out_unlock;
865  }
866  instance_destroy(queue);
867  break;
870  break;
871  default:
872  ret = -ENOTSUPP;
873  break;
874  }
875  }
876 
877  if (nfqa[NFQA_CFG_PARAMS]) {
879 
880  if (!queue) {
881  ret = -ENODEV;
882  goto err_out_unlock;
883  }
884  params = nla_data(nfqa[NFQA_CFG_PARAMS]);
885  nfqnl_set_mode(queue, params->copy_mode,
886  ntohl(params->copy_range));
887  }
888 
889  if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
890  __be32 *queue_maxlen;
891 
892  if (!queue) {
893  ret = -ENODEV;
894  goto err_out_unlock;
895  }
896  queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
897  spin_lock_bh(&queue->lock);
898  queue->queue_maxlen = ntohl(*queue_maxlen);
899  spin_unlock_bh(&queue->lock);
900  }
901 
902  if (nfqa[NFQA_CFG_FLAGS]) {
903  __u32 flags, mask;
904 
905  if (!queue) {
906  ret = -ENODEV;
907  goto err_out_unlock;
908  }
909 
910  if (!nfqa[NFQA_CFG_MASK]) {
911  /* A mask is needed to specify which flags are being
912  * changed.
913  */
914  ret = -EINVAL;
915  goto err_out_unlock;
916  }
917 
918  flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
919  mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
920 
921  if (flags >= NFQA_CFG_F_MAX) {
922  ret = -EOPNOTSUPP;
923  goto err_out_unlock;
924  }
925 
926  spin_lock_bh(&queue->lock);
927  queue->flags &= ~mask;
928  queue->flags |= flags & mask;
929  spin_unlock_bh(&queue->lock);
930  }
931 
932 err_out_unlock:
933  rcu_read_unlock();
934  return ret;
935 }
936 
937 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
938  [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp,
939  .attr_count = NFQA_MAX, },
940  [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict,
941  .attr_count = NFQA_MAX,
942  .policy = nfqa_verdict_policy },
943  [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
944  .attr_count = NFQA_CFG_MAX,
945  .policy = nfqa_cfg_policy },
946  [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
947  .attr_count = NFQA_MAX,
948  .policy = nfqa_verdict_batch_policy },
949 };
950 
951 static const struct nfnetlink_subsystem nfqnl_subsys = {
952  .name = "nf_queue",
953  .subsys_id = NFNL_SUBSYS_QUEUE,
954  .cb_count = NFQNL_MSG_MAX,
955  .cb = nfqnl_cb,
956 };
957 
958 #ifdef CONFIG_PROC_FS
959 struct iter_state {
960  unsigned int bucket;
961 };
962 
963 static struct hlist_node *get_first(struct seq_file *seq)
964 {
965  struct iter_state *st = seq->private;
966 
967  if (!st)
968  return NULL;
969 
970  for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
971  if (!hlist_empty(&instance_table[st->bucket]))
972  return instance_table[st->bucket].first;
973  }
974  return NULL;
975 }
976 
977 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
978 {
979  struct iter_state *st = seq->private;
980 
981  h = h->next;
982  while (!h) {
983  if (++st->bucket >= INSTANCE_BUCKETS)
984  return NULL;
985 
986  h = instance_table[st->bucket].first;
987  }
988  return h;
989 }
990 
991 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
992 {
993  struct hlist_node *head;
994  head = get_first(seq);
995 
996  if (head)
997  while (pos && (head = get_next(seq, head)))
998  pos--;
999  return pos ? NULL : head;
1000 }
1001 
1002 static void *seq_start(struct seq_file *seq, loff_t *pos)
1003  __acquires(instances_lock)
1004 {
1005  spin_lock(&instances_lock);
1006  return get_idx(seq, *pos);
1007 }
1008 
1009 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1010 {
1011  (*pos)++;
1012  return get_next(s, v);
1013 }
1014 
1015 static void seq_stop(struct seq_file *s, void *v)
1016  __releases(instances_lock)
1017 {
1018  spin_unlock(&instances_lock);
1019 }
1020 
1021 static int seq_show(struct seq_file *s, void *v)
1022 {
1023  const struct nfqnl_instance *inst = v;
1024 
1025  return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1026  inst->queue_num,
1027  inst->peer_portid, inst->queue_total,
1028  inst->copy_mode, inst->copy_range,
1029  inst->queue_dropped, inst->queue_user_dropped,
1030  inst->id_sequence, 1);
1031 }
1032 
1033 static const struct seq_operations nfqnl_seq_ops = {
1034  .start = seq_start,
1035  .next = seq_next,
1036  .stop = seq_stop,
1037  .show = seq_show,
1038 };
1039 
1040 static int nfqnl_open(struct inode *inode, struct file *file)
1041 {
1042  return seq_open_private(file, &nfqnl_seq_ops,
1043  sizeof(struct iter_state));
1044 }
1045 
1046 static const struct file_operations nfqnl_file_ops = {
1047  .owner = THIS_MODULE,
1048  .open = nfqnl_open,
1049  .read = seq_read,
1050  .llseek = seq_lseek,
1051  .release = seq_release_private,
1052 };
1053 
1054 #endif /* PROC_FS */
1055 
1056 static int __init nfnetlink_queue_init(void)
1057 {
1058  int i, status = -ENOMEM;
1059 
1060  for (i = 0; i < INSTANCE_BUCKETS; i++)
1061  INIT_HLIST_HEAD(&instance_table[i]);
1062 
1063  netlink_register_notifier(&nfqnl_rtnl_notifier);
1064  status = nfnetlink_subsys_register(&nfqnl_subsys);
1065  if (status < 0) {
1066  printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1067  goto cleanup_netlink_notifier;
1068  }
1069 
1070 #ifdef CONFIG_PROC_FS
1071  if (!proc_create("nfnetlink_queue", 0440,
1072  proc_net_netfilter, &nfqnl_file_ops))
1073  goto cleanup_subsys;
1074 #endif
1075 
1076  register_netdevice_notifier(&nfqnl_dev_notifier);
1077  return status;
1078 
1079 #ifdef CONFIG_PROC_FS
1080 cleanup_subsys:
1081  nfnetlink_subsys_unregister(&nfqnl_subsys);
1082 #endif
1083 cleanup_netlink_notifier:
1084  netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1085  return status;
1086 }
1087 
1088 static void __exit nfnetlink_queue_fini(void)
1089 {
1091  unregister_netdevice_notifier(&nfqnl_dev_notifier);
1092 #ifdef CONFIG_PROC_FS
1093  remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1094 #endif
1095  nfnetlink_subsys_unregister(&nfqnl_subsys);
1096  netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1097 
1098  rcu_barrier(); /* Wait for completion of call_rcu()'s */
1099 }
1100 
1101 MODULE_DESCRIPTION("netfilter packet queue handler");
1102 MODULE_AUTHOR("Harald Welte <[email protected]>");
1103 MODULE_LICENSE("GPL");
1105 
1106 module_init(nfnetlink_queue_init);
1107 module_exit(nfnetlink_queue_fini);