19 #include <asm/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
30 #include <linux/netdevice.h>
35 #include <linux/slab.h>
42 #include <linux/if_arp.h>
49 #include <linux/mroute6.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
71 #ifdef CONFIG_IPV6_PIMSM_V2
72 int mroute_reg_vif_num;
94 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
110 static void ip6mr_free_table(
struct mr6_table *mrt);
118 static int ip6mr_rtm_dumproute(
struct sk_buff *
skb,
120 static void mroute_clean_tables(
struct mr6_table *mrt);
121 static void ipmr_expire_process(
unsigned long arg);
123 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
124 #define ip6mr_for_each_table(mrt, net) \
125 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
138 static int ip6mr_fib_lookup(
struct net *net,
struct flowi6 *flp6,
146 flowi6_to_flowi(flp6), 0, &arg);
178 static int ip6mr_rule_match(
struct fib_rule *rule,
struct flowi *flp,
int flags)
212 .
action = ip6mr_rule_action,
213 .match = ip6mr_rule_match,
214 .configure = ip6mr_rule_configure,
215 .compare = ip6mr_rule_compare,
217 .fill = ip6mr_rule_fill,
219 .policy = ip6mr_rule_policy,
223 static int __net_init ip6mr_rules_init(
struct net *net)
233 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
245 net->ipv6.mr6_rules_ops = ops;
255 static void __net_exit ip6mr_rules_exit(
struct net *net)
261 ip6mr_free_table(mrt);
266 #define ip6mr_for_each_table(mrt, net) \
267 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
269 static struct mr6_table *ip6mr_get_table(
struct net *net,
u32 id)
271 return net->ipv6.mrt6;
274 static int ip6mr_fib_lookup(
struct net *net,
struct flowi6 *flp6,
277 *mrt = net->ipv6.mrt6;
281 static int __net_init ip6mr_rules_init(
struct net *net)
284 return net->ipv6.mrt6 ? 0 : -
ENOMEM;
287 static void __net_exit ip6mr_rules_exit(
struct net *net)
289 ip6mr_free_table(net->ipv6.mrt6);
293 static struct mr6_table *ip6mr_new_table(
struct net *net,
u32 id)
298 mrt = ip6mr_get_table(net,
id);
317 #ifdef CONFIG_IPV6_PIMSM_V2
318 mrt->mroute_reg_vif_num = -1;
320 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
321 list_add_tail_rcu(&mrt->
list, &net->ipv6.mr6_tables);
326 static void ip6mr_free_table(
struct mr6_table *mrt)
329 mroute_clean_tables(mrt);
333 #ifdef CONFIG_PROC_FS
335 struct ipmr_mfc_iter {
343 static struct mfc6_cache *ipmr_mfc_seq_idx(
struct net *net,
344 struct ipmr_mfc_iter *it, loff_t
pos)
350 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
358 spin_lock_bh(&mfc_unres_lock);
359 it->
cache = &mrt->mfc6_unres_queue;
363 spin_unlock_bh(&mfc_unres_lock);
379 static struct mif_device *ip6mr_vif_seq_idx(
struct net *net,
380 struct ipmr_vif_iter *iter,
385 for (iter->ct = 0; iter->ct < mrt->
maxvif; ++iter->ct) {
394 static void *ip6mr_vif_seq_start(
struct seq_file *seq, loff_t *pos)
397 struct ipmr_vif_iter *iter =
seq->private;
398 struct net *net = seq_file_net(seq);
408 return *pos ? ip6mr_vif_seq_idx(net,
seq->private, *pos - 1)
412 static void *ip6mr_vif_seq_next(
struct seq_file *seq,
void *
v, loff_t *pos)
414 struct ipmr_vif_iter *iter = seq->
private;
415 struct net *net = seq_file_net(seq);
420 return ip6mr_vif_seq_idx(net, iter, 0);
422 while (++iter->ct < mrt->
maxvif) {
430 static void ip6mr_vif_seq_stop(
struct seq_file *seq,
void *v)
436 static int ip6mr_vif_seq_show(
struct seq_file *seq,
void *v)
438 struct ipmr_vif_iter *iter = seq->
private;
443 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
446 const char *
name = vif->
dev ? vif->
dev->name :
"none";
449 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
459 .
start = ip6mr_vif_seq_start,
460 .next = ip6mr_vif_seq_next,
461 .stop = ip6mr_vif_seq_stop,
462 .show = ip6mr_vif_seq_show,
468 sizeof(
struct ipmr_vif_iter));
473 .open = ip6mr_vif_open,
479 static void *ipmr_mfc_seq_start(
struct seq_file *seq, loff_t *pos)
481 struct ipmr_mfc_iter *it = seq->
private;
482 struct net *net = seq_file_net(seq);
490 return *pos ? ipmr_mfc_seq_idx(net, seq->
private, *pos - 1)
494 static void *ipmr_mfc_seq_next(
struct seq_file *seq,
void *v, loff_t *pos)
497 struct ipmr_mfc_iter *it = seq->
private;
498 struct net *net = seq_file_net(seq);
504 return ipmr_mfc_seq_idx(net, seq->
private, 0);
506 if (mfc->
list.next != it->cache)
514 while (++it->ct < MFC6_LINES) {
516 if (list_empty(it->cache))
526 spin_lock_bh(&mfc_unres_lock);
527 if (!list_empty(it->cache))
531 spin_unlock_bh(&mfc_unres_lock);
537 static void ipmr_mfc_seq_stop(
struct seq_file *seq,
void *v)
539 struct ipmr_mfc_iter *it = seq->
private;
543 spin_unlock_bh(&mfc_unres_lock);
548 static int ipmr_mfc_seq_show(
struct seq_file *seq,
void *v)
556 "Iif Pkts Bytes Wrong Oifs\n");
559 const struct ipmr_mfc_iter *it = seq->
private;
572 n < mfc->mfc_un.res.maxvif; n++) {
583 seq_printf(seq,
" %8lu %8lu %8lu", 0ul, 0ul, 0ul);
591 .
start = ipmr_mfc_seq_start,
592 .next = ipmr_mfc_seq_next,
593 .stop = ipmr_mfc_seq_stop,
594 .show = ipmr_mfc_seq_show,
597 static int ipmr_mfc_open(
struct inode *inode,
struct file *file)
600 sizeof(
struct ipmr_mfc_iter));
605 .open = ipmr_mfc_open,
612 #ifdef CONFIG_IPV6_PIMSM_V2
619 struct net *net = dev_net(skb->
dev);
622 .flowi6_iif = skb->
dev->ifindex,
623 .flowi6_mark = skb->
mark,
627 if (!pskb_may_pull(skb,
sizeof(*pim) +
sizeof(*encap)))
630 pim = (
struct pimreghdr *)skb_transport_header(skb);
640 encap = (
struct ipv6hdr *)(skb_transport_header(skb) +
643 if (!ipv6_addr_is_multicast(&encap->
daddr) ||
648 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
650 reg_vif_num = mrt->mroute_reg_vif_num;
653 if (reg_vif_num >= 0)
664 skb_reset_network_header(skb);
669 skb_tunnel_rx(skb, reg_dev);
680 static const struct inet6_protocol pim6_protocol = {
689 struct net *net = dev_net(dev);
694 .flowi6_mark = skb->
mark,
698 err = ip6mr_fib_lookup(net, &fl6, &mrt);
706 dev->
stats.tx_packets++;
717 static void reg_vif_setup(
struct net_device *dev)
741 dev_net_set(dev, net);
760 unregister_netdevice(dev);
775 if (vifi < 0 || vifi >= mrt->
maxvif)
789 #ifdef CONFIG_IPV6_PIMSM_V2
790 if (vifi == mrt->mroute_reg_vif_num)
791 mrt->mroute_reg_vif_num = -1;
794 if (vifi + 1 == mrt->
maxvif) {
796 for (tmp = vifi - 1; tmp >= 0; tmp--) {
807 in6_dev = __in6_dev_get(dev);
809 in6_dev->
cnf.mc_forwarding--;
818 static inline void ip6mr_cache_free(
struct mfc6_cache *
c)
835 if (ipv6_hdr(skb)->
version == 0) {
852 static void ipmr_do_expire_process(
struct mr6_table *mrt)
855 unsigned long expires = 10 *
HZ;
862 if (interval < expires)
868 ip6mr_destroy_unres(mrt, c);
875 static void ipmr_expire_process(
unsigned long arg)
879 if (!spin_trylock(&mfc_unres_lock)) {
885 ipmr_do_expire_process(mrt);
887 spin_unlock(&mfc_unres_lock);
901 for (vifi = 0; vifi < mrt->
maxvif; vifi++) {
903 ttls[vifi] && ttls[vifi] < 255) {
904 cache->
mfc_un.
res.ttls[vifi] = ttls[vifi];
913 static int mif6_add(
struct net *net,
struct mr6_table *mrt,
914 struct mif6ctl *vifc,
int mrtsock)
927 #ifdef CONFIG_IPV6_PIMSM_V2
933 if (mrt->mroute_reg_vif_num >= 0)
935 dev = ip6mr_reg_vif(net, mrt);
940 unregister_netdevice(dev);
960 in6_dev = __in6_dev_get(dev);
962 in6_dev->
cnf.mc_forwarding++;
983 #ifdef CONFIG_IPV6_PIMSM_V2
985 mrt->mroute_reg_vif_num = vifi;
987 if (vifi + 1 > mrt->
maxvif)
1011 static struct mfc6_cache *ip6mr_cache_alloc(
void)
1020 static struct mfc6_cache *ip6mr_cache_alloc_unres(
void)
1034 static void ip6mr_cache_resolve(
struct net *net,
struct mr6_table *mrt,
1043 while((skb = __skb_dequeue(&uc->
mfc_un.
unres.unresolved))) {
1044 if (ipv6_hdr(skb)->
version == 0) {
1047 if (__ip6mr_fill_mroute(mrt, skb, c,
NLMSG_DATA(nlh)) > 0) {
1048 nlh->
nlmsg_len = skb_tail_pointer(skb) - (
u8 *)nlh;
1057 ip6_mr_forward(net, mrt, skb, c);
1075 #ifdef CONFIG_IPV6_PIMSM_V2
1091 #ifdef CONFIG_IPV6_PIMSM_V2
1098 skb_push(skb, -skb_network_offset(pkt));
1101 skb_reset_transport_header(skb);
1102 msg = (
struct mrt6msg *)skb_transport_header(skb);
1105 msg->
im6_mif = mrt->mroute_reg_vif_num;
1107 msg->im6_src = ipv6_hdr(pkt)->saddr;
1108 msg->
im6_dst = ipv6_hdr(pkt)->daddr;
1119 skb_reset_network_header(skb);
1120 skb_copy_to_linear_data(skb, ipv6_hdr(pkt),
sizeof(
struct ipv6hdr));
1126 skb_reset_transport_header(skb);
1127 msg = (
struct mrt6msg *)skb_transport_header(skb);
1133 msg->im6_src = ipv6_hdr(pkt)->saddr;
1134 msg->
im6_dst = ipv6_hdr(pkt)->daddr;
1136 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1168 spin_lock_bh(&mfc_unres_lock);
1183 (c = ip6mr_cache_alloc_unres()) ==
NULL) {
1184 spin_unlock_bh(&mfc_unres_lock);
1205 spin_unlock_bh(&mfc_unres_lock);
1207 ip6mr_cache_free(c);
1215 ipmr_do_expire_process(mrt);
1229 spin_unlock_bh(&mfc_unres_lock);
1251 ip6mr_cache_free(c);
1262 struct net *net = dev_net(dev);
1273 for (ct = 0; ct < mrt->
maxvif; ct++, v++) {
1275 mif6_delete(mrt, ct, &
list);
1284 .notifier_call = ip6mr_device_event
1291 static int __net_init ip6mr_net_init(
struct net *net)
1295 err = ip6mr_rules_init(net);
1299 #ifdef CONFIG_PROC_FS
1304 goto proc_cache_fail;
1309 #ifdef CONFIG_PROC_FS
1313 ip6mr_rules_exit(net);
1319 static void __net_exit ip6mr_net_exit(
struct net *net)
1321 #ifdef CONFIG_PROC_FS
1325 ip6mr_rules_exit(net);
1329 .init = ip6mr_net_init,
1330 .exit = ip6mr_net_exit,
1346 goto reg_pernet_fail;
1350 goto reg_notif_fail;
1351 #ifdef CONFIG_IPV6_PIMSM_V2
1353 pr_err(
"%s: can't add PIM protocol\n", __func__);
1355 goto add_proto_fail;
1359 ip6mr_rtm_dumproute,
NULL);
1361 #ifdef CONFIG_IPV6_PIMSM_V2
1379 static int ip6mr_mfc_add(
struct net *net,
struct mr6_table *mrt,
1380 struct mf6cctl *mfc,
int mrtsock)
1392 for (i = 0; i <
MAXMIFS; i++) {
1411 ip6mr_update_thresholds(mrt, c, ttls);
1421 c = ip6mr_cache_alloc();
1428 ip6mr_update_thresholds(mrt, c, ttls);
1441 spin_lock_bh(&mfc_unres_lock);
1453 spin_unlock_bh(&mfc_unres_lock);
1456 ip6mr_cache_resolve(net, mrt, uc, c);
1457 ip6mr_cache_free(uc);
1466 static void mroute_clean_tables(
struct mr6_table *mrt)
1475 for (i = 0; i < mrt->
maxvif; i++) {
1477 mif6_delete(mrt, i, &
list);
1492 ip6mr_cache_free(c);
1497 spin_lock_bh(&mfc_unres_lock);
1500 ip6mr_destroy_unres(mrt, c);
1502 spin_unlock_bh(&mfc_unres_lock);
1509 struct net *net = sock_net(sk);
1515 net->ipv6.devconf_all->mc_forwarding++;
1529 struct net *net = sock_net(sk);
1537 net->ipv6.devconf_all->mc_forwarding--;
1540 mroute_clean_tables(mrt);
1555 .flowi6_oif = skb->
dev->ifindex,
1556 .flowi6_mark = skb->
mark,
1559 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1578 struct net *net = sock_net(sk);
1581 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? :
RT6_TABLE_DFLT);
1595 if (optlen <
sizeof(
int))
1598 return ip6mr_sk_init(mrt, sk);
1604 if (optlen <
sizeof(vif))
1611 ret = mif6_add(net, mrt, &vif, sk == mrt->
mroute6_sk);
1616 if (optlen <
sizeof(
mifi_t))
1621 ret = mif6_delete(mrt, mifi,
NULL);
1631 if (optlen <
sizeof(mfc))
1637 ret = ip6mr_mfc_delete(mrt, &mfc);
1639 ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->
mroute6_sk);
1655 #ifdef CONFIG_IPV6_PIMSM_V2
1673 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1678 if (optlen !=
sizeof(
u32))
1687 if (!ip6mr_new_table(net, v))
1689 raw6_sk(sk)->ip6mr_table =
v;
1712 struct net *net = sock_net(sk);
1715 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? :
RT6_TABLE_DFLT);
1723 #ifdef CONFIG_IPV6_PIMSM_V2
1738 olr =
min_t(
int, olr,
sizeof(
int));
1759 struct net *net = sock_net(sk);
1762 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? :
RT6_TABLE_DFLT);
1792 c = ip6mr_cache_find(mrt, &sr.
src.sin6_addr, &sr.
grp.sin6_addr);
1810 #ifdef CONFIG_COMPAT
1811 struct compat_sioc_sg_req6 {
1819 struct compat_sioc_mif_req6 {
1827 int ip6mr_compat_ioctl(
struct sock *sk,
unsigned int cmd,
void __user *arg)
1829 struct compat_sioc_sg_req6
sr;
1830 struct compat_sioc_mif_req6 vr;
1833 struct net *net = sock_net(sk);
1836 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? :
RT6_TABLE_DFLT);
1844 if (vr.mifi >= mrt->
maxvif)
1866 c = ip6mr_cache_find(mrt, &
sr.src.sin6_addr, &
sr.grp.sin6_addr);
1885 static inline int ip6mr_forward2_finish(
struct sk_buff *skb)
1891 return dst_output(skb);
1898 static int ip6mr_forward2(
struct net *net,
struct mr6_table *mrt,
1910 #ifdef CONFIG_IPV6_PIMSM_V2
1914 vif->
dev->stats.tx_bytes += skb->
len;
1915 vif->
dev->stats.tx_packets++;
1921 ipv6h = ipv6_hdr(skb);
1924 .flowi6_oif = vif->
link,
1925 .daddr = ipv6h->
daddr,
1935 skb_dst_set(skb, dst);
1958 ipv6h = ipv6_hdr(skb);
1964 ip6mr_forward2_finish);
1975 for (ct = mrt->
maxvif - 1; ct >= 0; ct--) {
1982 static int ip6_mr_forward(
struct net *net,
struct mr6_table *mrt,
1999 true_vifi = ip6mr_find_vif(mrt, skb->
dev);
2008 cache->
mfc_un.
res.ttls[true_vifi] < 255) &&
2028 ip6mr_forward2(net, mrt, skb2, cache, psend);
2034 ip6mr_forward2(net, mrt, skb, cache, psend);
2051 struct net *net = dev_net(skb->
dev);
2054 .flowi6_iif = skb->
dev->ifindex,
2055 .flowi6_mark = skb->
mark,
2059 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2066 cache = ip6mr_cache_find(mrt,
2067 &ipv6_hdr(skb)->
saddr, &ipv6_hdr(skb)->
daddr);
2072 if (cache ==
NULL) {
2075 vif = ip6mr_find_vif(mrt, skb->
dev);
2077 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2087 ip6_mr_forward(net, mrt, skb, cache);
2100 u8 *
b = skb_tail_pointer(skb);
2113 for (ct = c->
mfc_un.
res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2116 goto rtattr_failure;
2125 mp_head->
rta_len = skb_tail_pointer(skb) - (
u8 *)mp_head;
2147 cache = ip6mr_cache_find(mrt, &rt->
rt6i_src.addr, &rt->rt6i_dst.addr);
2161 if (dev ==
NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2173 skb_reset_transport_header(skb2);
2176 skb_reset_network_header(skb2);
2178 iph = ipv6_hdr(skb2);
2188 iph->
daddr = rt->rt6i_dst.addr;
2190 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2199 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2214 rtm = nlmsg_data(nlh);
2221 goto nla_put_failure;
2228 goto nla_put_failure;
2229 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
2230 goto nla_put_failure;
2232 return nlmsg_end(skb, nlh);
2235 nlmsg_cancel(skb, nlh);
2241 struct net *net = sock_net(skb->
sk);
2244 unsigned int t = 0, s_t;
2245 unsigned int h = 0, s_h;
2246 unsigned int e = 0, s_e;
2262 if (ip6mr_fill_mroute(mrt, skb,