17 #include <linux/module.h>
21 #include <linux/slab.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
30 #include <linux/list.h>
37 #ifdef CONFIG_BRIDGE_NETFILTER
38 #include "../bridge/br_private.h"
41 #define NFQNL_QMAX_DEFAULT 1024
71 #define INSTANCE_BUCKETS 16
86 head = &instance_table[instance_hashfn(queue_num)];
87 hlist_for_each_entry_rcu(inst, pos, head,
hlist) {
95 instance_create(
u_int16_t queue_num,
int portid)
101 spin_lock(&instances_lock);
102 if (instance_lookup(queue_num)) {
126 h = instance_hashfn(queue_num);
127 hlist_add_head_rcu(&inst->
hlist, &instance_table[h]);
129 spin_unlock(&instances_lock);
136 spin_unlock(&instances_lock);
144 instance_destroy_rcu(
struct rcu_head *head)
149 nfqnl_flush(inst,
NULL, 0);
157 hlist_del_rcu(&inst->
hlist);
164 spin_lock(&instances_lock);
165 __instance_destroy(inst);
166 spin_unlock(&instances_lock);
188 spin_lock_bh(&queue->
lock);
198 __dequeue_entry(queue, entry);
200 spin_unlock_bh(&queue->
lock);
210 spin_lock_bh(&queue->
lock);
212 if (!cmpfn || cmpfn(entry, data)) {
218 spin_unlock_bh(&queue->
lock);
244 #ifdef CONFIG_BRIDGE_NETFILTER
266 if (data_len == 0 || data_len > entskb->
len)
267 data_len = entskb->
len;
269 size += nla_total_size(data_len);
270 cap_len = entskb->
len;
281 old_tail = skb->
tail;
282 nlh = nlmsg_put(skb, 0, 0,
289 nfmsg = nlmsg_data(nlh);
295 pmsg = nla_data(nla);
300 indev = entry->
indev;
302 #ifndef CONFIG_BRIDGE_NETFILTER
304 goto nla_put_failure;
315 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
316 goto nla_put_failure;
322 goto nla_put_failure;
323 if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
325 htonl(entskb->nf_bridge->physindev->ifindex)))
326 goto nla_put_failure;
332 #ifndef CONFIG_BRIDGE_NETFILTER
334 goto nla_put_failure;
345 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
346 goto nla_put_failure;
352 goto nla_put_failure;
353 if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
355 htonl(entskb->nf_bridge->physoutdev->ifindex)))
356 goto nla_put_failure;
363 goto nla_put_failure;
365 if (indev && entskb->
dev &&
368 int len = dev_parse_header(entskb, phw.hw_addr);
370 phw.hw_addrlen =
htons(len);
372 goto nla_put_failure;
383 goto nla_put_failure;
388 int sz = nla_attr_size(data_len);
390 if (skb_tailroom(skb) < nla_total_size(data_len)) {
396 nla = (
struct nlattr *)
skb_put(skb, nla_total_size(data_len));
405 goto nla_put_failure;
408 goto nla_put_failure;
420 nfqnl_enqueue_packet(
struct nf_queue_entry *entry,
unsigned int queuenum)
429 queue = instance_lookup(queuenum);
440 nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
445 spin_lock_bh(&queue->
lock);
449 goto err_out_free_nskb;
460 goto err_out_free_nskb;
463 *packet_id_ptr =
htonl(entry->
id);
472 __enqueue_entry(queue, entry);
474 spin_unlock_bh(&queue->
lock);
480 spin_unlock_bh(&queue->
lock);
488 nfqnl_mangle(
void *data,
int data_len,
struct nf_queue_entry *
e,
int diff)
493 if (pskb_trim(e->
skb, data_len))
495 }
else if (diff > 0) {
496 if (data_len > 0xFFFF)
498 if (diff > skb_tailroom(e->
skb)) {
503 "in mangle, dropping packet\n");
513 skb_copy_to_linear_data(e->
skb, data, data_len);
524 spin_lock_bh(&queue->
lock);
549 spin_unlock_bh(&queue->
lock);
558 if (entry->
indev->ifindex == ifindex)
561 if (entry->
outdev->ifindex == ifindex)
563 #ifdef CONFIG_BRIDGE_NETFILTER
564 if (entry->
skb->nf_bridge) {
565 if (entry->
skb->nf_bridge->physindev &&
566 entry->
skb->nf_bridge->physindev->ifindex == ifindex)
568 if (entry->
skb->nf_bridge->physoutdev &&
569 entry->
skb->nf_bridge->physoutdev->ifindex == ifindex)
579 nfqnl_dev_drop(
int ifindex)
590 hlist_for_each_entry_rcu(inst, tmp, head,
hlist)
591 nfqnl_flush(inst, dev_cmp, ifindex);
597 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
605 if (!net_eq(dev_net(dev), &
init_net))
615 .notifier_call = nfqnl_rcv_dev_event,
628 spin_lock(&instances_lock);
637 __instance_destroy(inst);
640 spin_unlock(&instances_lock);
646 .notifier_call = nfqnl_rcv_nl_event,
661 static struct nfqnl_instance *verdict_instance_lookup(
u16 queue_num,
int nlportid)
665 queue = instance_lookup(queue_num);
670 return ERR_PTR(-
EPERM);
676 verdicthdr_get(
const struct nlattr *
const nfqa[])
684 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
691 static int nfq_id_after(
unsigned int id,
unsigned int max)
693 return (
int)(
id -
max) > 0;
697 nfqnl_recv_verdict_batch(
struct sock *ctnl,
struct sk_buff *skb,
699 const struct nlattr *
const nfqa[])
701 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
703 unsigned int verdict, maxid;
709 queue = verdict_instance_lookup(queue_num,
NETLINK_CB(skb).portid);
711 return PTR_ERR(queue);
713 vhdr = verdicthdr_get(nfqa);
720 spin_lock_bh(&queue->
lock);
723 if (nfq_id_after(entry->
id, maxid))
725 __dequeue_entry(queue, entry);
729 spin_unlock_bh(&queue->
lock);
731 if (list_empty(&batch_list))
736 entry->
skb->mark =
ntohl(nla_get_be32(nfqa[NFQA_MARK]));
743 nfqnl_recv_verdict(
struct sock *ctnl,
struct sk_buff *skb,
745 const struct nlattr *
const nfqa[])
747 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
752 unsigned int verdict;
757 queue = instance_lookup(queue_num);
760 queue = verdict_instance_lookup(queue_num,
NETLINK_CB(skb).portid);
762 return PTR_ERR(queue);
764 vhdr = verdicthdr_get(nfqa);
770 entry = find_dequeue_entry(queue,
ntohl(vhdr->
id));
780 int diff = payload_len - entry->
skb->len;
782 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
783 payload_len, entry, diff) < 0)
792 entry->
skb->mark =
ntohl(nla_get_be32(nfqa[NFQA_MARK]));
799 nfqnl_recv_unsupp(
struct sock *ctnl,
struct sk_buff *skb,
801 const struct nlattr *
const nfqa[])
813 .outfn = &nfqnl_enqueue_packet,
817 nfqnl_recv_config(
struct sock *ctnl,
struct sk_buff *skb,
819 const struct nlattr *
const nfqa[])
821 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
828 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
842 queue = instance_lookup(queue_num);
855 queue = instance_create(queue_num,
NETLINK_CB(skb).portid);
857 ret = PTR_ERR(queue);
866 instance_destroy(queue);
884 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
896 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
897 spin_lock_bh(&queue->
lock);
899 spin_unlock_bh(&queue->
lock);
918 flags =
ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
919 mask =
ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
926 spin_lock_bh(&queue->
lock);
927 queue->
flags &= ~mask;
929 spin_unlock_bh(&queue->
lock);
942 .policy = nfqa_verdict_policy },
945 .policy = nfqa_cfg_policy },
948 .policy = nfqa_verdict_batch_policy },
958 #ifdef CONFIG_PROC_FS
970 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
971 if (!hlist_empty(&instance_table[st->bucket]))
972 return instance_table[st->bucket].first;
979 struct iter_state *st = seq->
private;
983 if (++st->bucket >= INSTANCE_BUCKETS)
986 h = instance_table[st->bucket].first;
994 head = get_first(seq);
997 while (pos && (head =
get_next(seq, head)))
1002 static void *seq_start(
struct seq_file *seq, loff_t *pos)
1005 spin_lock(&instances_lock);
1006 return get_idx(seq, *pos);
1009 static void *seq_next(
struct seq_file *
s,
void *
v, loff_t *pos)
1015 static void seq_stop(
struct seq_file *
s,
void *
v)
1018 spin_unlock(&instances_lock);
1021 static int seq_show(
struct seq_file *
s,
void *
v)
1025 return seq_printf(s,
"%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1043 sizeof(
struct iter_state));
1056 static int __init nfnetlink_queue_init(
void)
1067 goto cleanup_netlink_notifier;
1070 #ifdef CONFIG_PROC_FS
1071 if (!proc_create(
"nfnetlink_queue", 0440,
1072 proc_net_netfilter, &nfqnl_file_ops))
1073 goto cleanup_subsys;
1079 #ifdef CONFIG_PROC_FS
1083 cleanup_netlink_notifier:
1088 static void __exit nfnetlink_queue_fini(
void)
1092 #ifdef CONFIG_PROC_FS