17 #include <linux/errno.h>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/socket.h>
23 #include <linux/net.h>
24 #include <linux/list.h>
25 #include <linux/netdevice.h>
26 #include <linux/in6.h>
27 #include <linux/ipv6.h>
28 #include <linux/icmpv6.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/netfilter.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/kernel.h>
47 #include <linux/module.h>
58 #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
63 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
65 .
procname =
"nf_conntrack_frag6_timeout",
66 .data = &
init_net.nf_frag.frags.timeout,
67 .maxlen =
sizeof(
unsigned int),
72 .procname =
"nf_conntrack_frag6_low_thresh",
73 .data = &
init_net.nf_frag.frags.low_thresh,
74 .maxlen =
sizeof(
unsigned int),
79 .procname =
"nf_conntrack_frag6_high_thresh",
80 .data = &
init_net.nf_frag.frags.high_thresh,
81 .maxlen =
sizeof(
unsigned int),
88 static int nf_ct_frag6_sysctl_register(
struct net *
net)
93 table = nf_ct_frag6_sysctl_table;
95 table =
kmemdup(table,
sizeof(nf_ct_frag6_sysctl_table),
100 table[0].
data = &net->ipv6.frags.high_thresh;
101 table[1].
data = &net->ipv6.frags.low_thresh;
102 table[2].
data = &net->ipv6.frags.timeout;
109 net->nf_frag.sysctl.frags_hdr =
hdr;
119 static void __net_exit nf_ct_frags6_sysctl_unregister(
struct net *net)
123 table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
130 static int nf_ct_frag6_sysctl_register(
struct net *net)
134 static void __net_exit nf_ct_frags6_sysctl_unregister(
struct net *net)
153 static void nf_ct_frag6_expire(
unsigned long data)
194 const struct frag_hdr *fhdr,
int nhoff)
205 payload_len =
ntohs(ipv6_hdr(skb)->payload_len);
208 end = offset + (payload_len -
209 ((
u8 *)(fhdr + 1) - (
u8 *)(ipv6_hdr(skb) + 1)));
217 const unsigned char *nh = skb_network_header(skb);
228 if (end < fq->q.
len ||
230 pr_debug(
"already received last fragment\n");
243 pr_debug(
"end of fragment not rounded to 8 bytes.\n");
246 if (end > fq->
q.len) {
249 pr_debug(
"last packet already reached.\n");
260 if (!pskb_pull(skb, (
u8 *) (fhdr + 1) - skb->
data)) {
261 pr_debug(
"queue: message is too short.\n");
264 if (pskb_trim_rcsum(skb, end - offset)) {
273 prev = fq->
q.fragments_tail;
279 for (next = fq->
q.fragments; next !=
NULL; next = next->
next) {
308 fq->
q.fragments_tail =
skb;
312 fq->
q.fragments =
skb;
316 fq->
q.meat += skb->
len;
317 if (payload_len > fq->
q.max_size)
329 list_move_tail(&fq->
q.lru_list, &fq->
q.net->lru_list);
360 payload_len = ((head->
data - skb_network_header(head)) -
361 sizeof(
struct ipv6hdr) + fq->
q.len -
364 pr_debug(
"payload len is too large.\n");
370 pr_debug(
"skb is cloned but can't expand head");
377 if (skb_has_frag_list(head)) {
387 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
388 skb_frag_list_init(head);
389 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
390 plen += skb_frag_size(&skb_shinfo(head)->frags[
i]);
403 skb_network_header(head)[fq->
nhoffset] = skb_transport_header(head)[0];
409 skb_shinfo(head)->frag_list = head->
next;
410 skb_reset_transport_header(head);
413 for (fp=head->
next; fp; fp = fp->
next) {
428 ipv6_hdr(head)->payload_len =
htons(payload_len);
434 skb_network_header_len(head),
437 fq->
q.fragments =
NULL;
438 fq->
q.fragments_tail =
NULL;
441 fp = skb_shinfo(head)->frag_list;
447 for (;
fp; fp = fp->
next) {
481 find_prev_fhdr(
struct sk_buff *skb,
u8 *prevhdrp,
int *prevhoff,
int *fhoff)
484 const int netoff = skb_network_offset(skb);
508 hdrlen = (hdr.hdrlen+2)<<2;
515 nexthdr = hdr.nexthdr;
524 *prevhoff = prev_nhoff;
534 struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev)
544 if (ipv6_hdr(skb)->payload_len == 0) {
549 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
560 if (!pskb_may_pull(clone, fhoff +
sizeof(*fhdr))) {
561 pr_debug(
"message is too short.\n");
565 skb_set_transport_header(clone, fhoff);
566 hdr = ipv6_hdr(clone);
567 fhdr = (
struct frag_hdr *)skb_transport_header(clone);
575 pr_debug(
"Can't find and can't create new queue\n");
579 spin_lock_bh(&fq->
q.lock);
581 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
582 spin_unlock_bh(&fq->
q.lock);
583 pr_debug(
"Can't insert skb to queue\n");
584 inet_frag_put(&fq->
q, &nf_frags);
589 fq->
q.meat == fq->
q.len) {
590 ret_skb = nf_ct_frag6_reasm(fq, dev);
592 pr_debug(
"Can't reassemble fragmented packets\n");
594 spin_unlock_bh(&fq->
q.lock);
596 inet_frag_put(&fq->
q, &nf_frags);
609 unsigned int ret = 0;
612 nf_conntrack_put_reasm(s->nfct_reasm);
613 nf_conntrack_get_reasm(skb);
628 nf_conntrack_put_reasm(skb);
631 static int nf_ct_net_init(
struct net *net)
638 return nf_ct_frag6_sysctl_register(net);
641 static void nf_ct_net_exit(
struct net *net)
643 nf_ct_frags6_sysctl_unregister(net);
648 .init = nf_ct_net_init,
649 .exit = nf_ct_net_exit,
656 nf_frags.hashfn = nf_hashfn;
658 nf_frags.destructor =
NULL;
659 nf_frags.skb_free = nf_skb_free;
662 nf_frags.frag_expire = nf_ct_frag6_expire;
663 nf_frags.secret_interval = 10 * 60 *
HZ;