41 #define pr_fmt(fmt) "IPv6: " fmt
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/socket.h>
48 #include <linux/net.h>
49 #include <linux/in6.h>
50 #include <linux/netdevice.h>
52 #include <linux/if_arp.h>
58 #include <linux/slab.h>
60 #include <linux/sysctl.h>
62 #include <linux/capability.h>
65 #include <linux/string.h>
66 #include <linux/hash.h>
82 #include <linux/if_tunnel.h>
83 #include <linux/rtnetlink.h>
85 #ifdef CONFIG_IPV6_PRIVACY
86 #include <linux/random.h>
90 #include <asm/unaligned.h>
94 #include <linux/export.h>
100 #define ADBG(x) printk x
105 #define INFINITY_LIFE_TIME 0xFFFFFFFF
107 static inline u32 cstamp_delta(
unsigned long cstamp)
112 #define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
113 #define ADDRCONF_TIMER_FUZZ (HZ / 4)
114 #define ADDRCONF_TIMER_FUZZ_MAX (HZ)
117 static void addrconf_sysctl_register(
struct inet6_dev *
idev);
118 static void addrconf_sysctl_unregister(
struct inet6_dev *
idev);
120 static inline void addrconf_sysctl_register(
struct inet6_dev *
idev)
124 static inline void addrconf_sysctl_unregister(
struct inet6_dev *
idev)
129 #ifdef CONFIG_IPV6_PRIVACY
132 static void ipv6_regen_rndid(
unsigned long data);
144 static void addrconf_verify(
unsigned long);
146 static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0);
149 static void addrconf_join_anycast(
struct inet6_ifaddr *ifp);
150 static void addrconf_leave_anycast(
struct inet6_ifaddr *ifp);
153 unsigned long event);
156 static void addrconf_dad_start(
struct inet6_ifaddr *ifp);
157 static void addrconf_dad_timer(
unsigned long data);
158 static void addrconf_dad_completed(
struct inet6_ifaddr *ifp);
160 static void addrconf_rs_timer(
unsigned long data);
176 .accept_redirects = 1,
178 .force_mld_version = 0,
183 #ifdef CONFIG_IPV6_PRIVACY
191 .accept_ra_defrtr = 1,
192 .accept_ra_pinfo = 1,
193 #ifdef CONFIG_IPV6_ROUTER_PREF
194 .accept_ra_rtr_pref = 1,
195 .rtr_probe_interval = 60 *
HZ,
196 #ifdef CONFIG_IPV6_ROUTE_INFO
197 .accept_ra_rt_info_max_plen = 0,
201 .accept_source_route = 0,
206 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
211 .accept_redirects = 1,
217 #ifdef CONFIG_IPV6_PRIVACY
225 .accept_ra_defrtr = 1,
226 .accept_ra_pinfo = 1,
227 #ifdef CONFIG_IPV6_ROUTER_PREF
228 .accept_ra_rtr_pref = 1,
229 .rtr_probe_interval = 60 *
HZ,
230 #ifdef CONFIG_IPV6_ROUTE_INFO
231 .accept_ra_rt_info_max_plen = 0,
235 .accept_source_route = 0,
247 static inline bool addrconf_qdisc_ok(
const struct net_device *
dev)
249 return !qdisc_tx_is_noop(dev);
253 static inline int addrconf_is_prefix_route(
const struct rt6_info *rt)
258 static void addrconf_del_timer(
struct inet6_ifaddr *ifp)
270 static void addrconf_mod_timer(
struct inet6_ifaddr *ifp,
279 ifp->
timer.function = addrconf_dad_timer;
282 ifp->
timer.function = addrconf_rs_timer;
299 if (!idev->
stats.icmpv6dev)
303 if (!idev->
stats.icmpv6msgdev)
316 static void snmp6_free_dev(
struct inet6_dev *idev)
332 #ifdef NET_REFCNT_DEBUG
333 pr_debug(
"%s: %s\n", __func__, dev ? dev->
name :
"NIL");
337 pr_warn(
"Freeing alive inet6 device %p\n", idev);
340 snmp6_free_dev(idev);
363 memcpy(&ndev->
cnf, dev_net(dev)->ipv6.devconf_dflt,
sizeof(ndev->
cnf));
364 ndev->
cnf.mtu6 = dev->
mtu;
371 if (ndev->
cnf.forwarding)
376 if (snmp6_alloc_dev(ndev) < 0) {
378 "%s: cannot allocate memory for statistics; dev=%s.\n",
379 __func__, dev->
name));
388 "%s: cannot create /proc/net/dev_snmp6/%s\n",
389 __func__, dev->
name));
402 ndev->
cnf.accept_dad = -1;
404 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
407 ndev->
cnf.rtr_solicits = 0;
411 #ifdef CONFIG_IPV6_PRIVACY
412 INIT_LIST_HEAD(&ndev->tempaddr_list);
413 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (
unsigned long)ndev);
419 ndev->
cnf.use_tempaddr = -1;
422 ipv6_regen_rndid((
unsigned long) ndev);
426 if (netif_running(dev) && addrconf_qdisc_ok(dev))
431 addrconf_sysctl_register(ndev);
451 idev = __in6_dev_get(dev);
453 idev = ipv6_add_dev(dev);
464 static void dev_forward_change(
struct inet6_dev *idev)
472 if (idev->
cnf.forwarding)
475 if (idev->
cnf.forwarding)
484 if (idev->
cnf.forwarding)
485 addrconf_join_anycast(ifa);
487 addrconf_leave_anycast(ifa);
492 static void addrconf_forward_change(
struct net *
net,
__s32 newf)
498 idev = __in6_dev_get(dev);
500 int changed = (!idev->
cnf.forwarding) ^ (!newf);
501 idev->
cnf.forwarding = newf;
503 dev_forward_change(idev);
508 static int addrconf_fixup_forwarding(
struct ctl_table *
table,
int *
p,
int newf)
514 return restart_syscall();
516 net = (
struct net *)table->
extra2;
520 if (p == &net->ipv6.devconf_dflt->forwarding) {
525 if (p == &net->ipv6.devconf_all->forwarding) {
526 net->ipv6.devconf_dflt->forwarding = newf;
527 addrconf_forward_change(net, newf);
528 }
else if ((!newf) ^ (!old))
543 #ifdef NET_REFCNT_DEBUG
547 in6_dev_put(ifp->
idev);
550 pr_notice(
"Timer is still running, when freeing ifa=%p\n", ifp);
553 pr_warn(
"Freeing alive inet6 address %p\n", ifp);
565 int ifp_scope = ipv6_addr_src_scope(&ifp->
addr);
574 if (ifp_scope >= ipv6_addr_src_scope(&ifa->
addr))
596 int addr_type = ipv6_addr_type(addr);
610 if (idev->
cnf.disable_ipv6) {
615 spin_lock(&addrconf_hash_lock);
618 if (ipv6_chk_same_addr(dev_net(idev->
dev), addr, idev->
dev)) {
619 ADBG((
"ipv6_add_addr: already assigned\n"));
627 ADBG((
"ipv6_add_addr: malloc failed\n"));
658 hash = inet6_addr_hash(addr);
660 hlist_add_head_rcu(&ifa->
addr_lst, &inet6_addr_lst[hash]);
661 spin_unlock(&addrconf_hash_lock);
665 ipv6_link_dev_addr(idev, ifa);
667 #ifdef CONFIG_IPV6_PRIVACY
669 list_add(&ifa->tmp_list, &idev->tempaddr_list);
677 rcu_read_unlock_bh();
688 spin_unlock(&addrconf_hash_lock);
699 int deleted = 0, onlink = 0;
700 unsigned long expires =
jiffies;
710 spin_lock_bh(&addrconf_hash_lock);
712 spin_unlock_bh(&addrconf_hash_lock);
715 #ifdef CONFIG_IPV6_PRIVACY
719 in6_ifa_put(ifp->ifpub);
736 if (ipv6_prefix_equal(&ifa->
addr, &ifp->
addr,
743 unsigned long lifetime;
748 spin_lock(&ifa->
lock);
750 lifetime = addrconf_timeout_fixup(ifa->
valid_lft,
HZ);
758 expires = ifa->
tstamp + lifetime *
HZ;
759 spin_unlock(&ifa->
lock);
766 addrconf_del_timer(ifp);
790 struct net *net = dev_net(ifp->
idev->dev);
794 fl6.flowi6_oif = ifp->
idev->dev->ifindex;
799 if (rt != net->ipv6.ip6_null_entry &&
800 addrconf_is_prefix_route(rt)) {
805 rt6_set_expires(rt, expires);
817 #ifdef CONFIG_IPV6_PRIVACY
822 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
823 unsigned long regen_advance;
832 spin_lock_bh(&ift->
lock);
833 memcpy(&addr.s6_addr[8], &ift->
addr.s6_addr[8], 8);
834 spin_unlock_bh(&ift->
lock);
841 if (idev->
cnf.use_tempaddr <= 0) {
843 pr_info(
"%s: use_tempaddr is disabled\n", __func__);
848 spin_lock_bh(&ifp->
lock);
849 if (ifp->regen_count++ >= idev->
cnf.regen_max_retry) {
850 idev->
cnf.use_tempaddr = -1;
851 spin_unlock_bh(&ifp->
lock);
853 pr_warn(
"%s: regeneration time exceeded - disabled temporary address support\n",
861 __ipv6_try_regen_rndid(idev, tmpaddr);
862 memcpy(&addr.s6_addr[8], idev->rndid, 8);
866 idev->
cnf.temp_valid_lft + age);
869 idev->
cnf.temp_prefered_lft + age -
870 idev->
cnf.max_desync_factor);
872 max_addresses = idev->
cnf.max_addresses;
874 spin_unlock_bh(&ifp->
lock);
876 regen_advance = idev->
cnf.regen_max_retry *
877 idev->
cnf.dad_transmits *
886 if (tmp_prefered_lft <= regen_advance) {
898 ift = !max_addresses ||
899 ipv6_count_addresses(idev) < max_addresses ?
900 ipv6_add_addr(idev, &addr, tmp_plen,
903 if (!ift || IS_ERR(ift)) {
906 pr_info(
"%s: retry temporary address regeneration\n", __func__);
912 spin_lock_bh(&ift->
lock);
918 spin_unlock_bh(&ift->
lock);
920 addrconf_dad_start(ift);
936 #ifdef CONFIG_IPV6_MIP6
941 #ifdef CONFIG_IPV6_PRIVACY
942 IPV6_SADDR_RULE_PRIVACY,
966 static inline int ipv6_saddr_preferred(
int type)
973 static int ipv6_get_saddr_eval(
struct net *net,
980 if (i <= score->
rule) {
989 ret = !!
test_bit(i, score->scorebits);
1001 ret = ipv6_addr_equal(&score->
ifa->addr, dst->
addr);
1025 ret = __ipv6_addr_src_scope(score->
addr_type);
1026 if (ret >= dst->
scope)
1034 ret = ipv6_saddr_preferred(score->
addr_type) ||
1037 #ifdef CONFIG_IPV6_MIP6
1038 case IPV6_SADDR_RULE_HOA:
1049 dst->
ifindex == score->
ifa->idev->dev->ifindex);
1055 score->
ifa->idev->dev->ifindex) == dst->
label;
1057 #ifdef CONFIG_IPV6_PRIVACY
1058 case IPV6_SADDR_RULE_PRIVACY:
1065 score->
ifa->idev->cnf.use_tempaddr >= 2;
1074 ret = !(ipv6_addr_orchid(&score->
ifa->addr) ^
1075 ipv6_addr_orchid(dst->
addr));
1079 ret = ipv6_addr_diff(&score->
ifa->addr, dst->
addr);
1080 if (ret > score->
ifa->prefix_len)
1081 ret = score->
ifa->prefix_len;
1100 *score = &scores[0], *hiscore = &scores[1];
1108 dst.
scope = __ipv6_addr_src_scope(dst_type);
1113 hiscore->ifa =
NULL;
1137 idev = __in6_dev_get(dev);
1162 score->
addr_type & IPV6_ADDR_MULTICAST)) {
1164 "ADDRCONF: unspecified / multicast address "
1165 "assigned as unicast address on %s",
1174 int minihiscore, miniscore;
1176 minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
1177 miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
1179 if (minihiscore > miniscore) {
1193 }
else if (minihiscore < miniscore) {
1195 in6_ifa_put(hiscore->ifa);
1197 in6_ifa_hold(score->
ifa);
1199 swap(hiscore, score);
1202 score->
ifa = hiscore->ifa;
1216 *saddr = hiscore->ifa->addr;
1217 in6_ifa_put(hiscore->ifa);
1223 unsigned char banned_flags)
1229 idev = __in6_dev_get(dev);
1236 !(ifp->
flags & banned_flags)) {
1248 static int ipv6_count_addresses(
struct inet6_dev *idev)
1265 unsigned int hash = inet6_addr_hash(addr);
1268 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1269 if (!net_eq(dev_net(ifp->
idev->dev), net))
1271 if (ipv6_addr_equal(&ifp->
addr, addr) &&
1273 (dev ==
NULL || ifp->
idev->dev == dev ||
1275 rcu_read_unlock_bh();
1280 rcu_read_unlock_bh();
1285 static bool ipv6_chk_same_addr(
struct net *net,
const struct in6_addr *addr,
1288 unsigned int hash = inet6_addr_hash(addr);
1293 if (!net_eq(dev_net(ifp->
idev->dev), net))
1295 if (ipv6_addr_equal(&ifp->
addr, addr)) {
1296 if (dev ==
NULL || ifp->
idev->dev == dev)
1311 idev = __in6_dev_get(dev);
1315 onlink = ipv6_prefix_equal(addr, &ifa->
addr,
1331 unsigned int hash = inet6_addr_hash(addr);
1335 hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1336 if (!net_eq(dev_net(ifp->
idev->dev), net))
1338 if (ipv6_addr_equal(&ifp->
addr, addr)) {
1339 if (dev ==
NULL || ifp->
idev->dev == dev ||
1347 rcu_read_unlock_bh();
1354 static void addrconf_dad_stop(
struct inet6_ifaddr *ifp,
int dad_failed)
1357 spin_lock_bh(&ifp->
lock);
1358 addrconf_del_timer(ifp);
1362 spin_unlock_bh(&ifp->
lock);
1364 ipv6_ifa_notify(0, ifp);
1366 #ifdef CONFIG_IPV6_PRIVACY
1369 spin_lock_bh(&ifp->
lock);
1372 in6_ifa_hold(ifpub);
1373 spin_unlock_bh(&ifp->
lock);
1374 ipv6_create_tempaddr(ifpub, ifp);
1377 spin_unlock_bh(&ifp->
lock);
1403 if (addrconf_dad_end(ifp)) {
1411 if (idev->
cnf.accept_dad > 1 && !idev->
cnf.disable_ipv6) {
1414 addr.s6_addr32[0] =
htonl(0xfe800000);
1415 addr.s6_addr32[1] = 0;
1417 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->
dev) &&
1418 ipv6_addr_equal(&ifp->
addr, &addr)) {
1420 idev->
cnf.disable_ipv6 = 1;
1422 pr_info(
"%s: IPv6 being disabled!\n",
1423 ifp->
idev->dev->name);
1427 addrconf_dad_stop(ifp, 1);
1439 addrconf_addr_solict_mult(addr, &maddr);
1450 addrconf_addr_solict_mult(addr, &maddr);
1454 static void addrconf_join_anycast(
struct inet6_ifaddr *ifp)
1460 if (ipv6_addr_any(&addr))
1465 static void addrconf_leave_anycast(
struct inet6_ifaddr *ifp)
1471 if (ipv6_addr_any(&addr))
1497 eui[3] = (dev->
dev_id >> 8) & 0xFF;
1498 eui[4] = dev->
dev_id & 0xFF;
1507 static int addrconf_ifid_eui64(
u8 *eui,
struct net_device *dev)
1515 static int addrconf_ifid_arcnet(
u8 *eui,
struct net_device *dev)
1525 static int addrconf_ifid_infiniband(
u8 *eui,
struct net_device *dev)
1534 static int __ipv6_isatap_ifid(
u8 *eui,
__be32 addr)
1538 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
1539 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
1540 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
1541 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
1542 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
1543 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
1547 memcpy(eui + 4, &addr, 4);
1551 static int addrconf_ifid_sit(
u8 *eui,
struct net_device *dev)
1558 static int addrconf_ifid_gre(
u8 *eui,
struct net_device *dev)
1563 static int ipv6_generate_eui64(
u8 *eui,
struct net_device *dev)
1565 switch (dev->
type) {
1568 return addrconf_ifid_eui48(eui, dev);
1570 return addrconf_ifid_arcnet(eui, dev);
1572 return addrconf_ifid_infiniband(eui, dev);
1574 return addrconf_ifid_sit(eui, dev);
1576 return addrconf_ifid_gre(eui, dev);
1578 return addrconf_ifid_eui64(eui, dev);
1583 static int ipv6_inherit_eui64(
u8 *eui,
struct inet6_dev *idev)
1600 #ifdef CONFIG_IPV6_PRIVACY
1602 static void __ipv6_regen_rndid(
struct inet6_dev *idev)
1606 idev->rndid[0] &= ~0x02;
1619 if (idev->rndid[0] == 0xfd &&
1620 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
1621 (idev->rndid[7]&0x80))
1623 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
1624 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
1626 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
1631 static void ipv6_regen_rndid(
unsigned long data)
1634 unsigned long expires;
1642 __ipv6_regen_rndid(idev);
1645 idev->
cnf.temp_prefered_lft *
HZ -
1646 idev->
cnf.regen_max_retry * idev->
cnf.dad_transmits * idev->
nd_parms->retrans_time -
1647 idev->
cnf.max_desync_factor *
HZ;
1649 pr_warn(
"%s: too short regeneration interval; timer disabled for %s\n",
1650 __func__, idev->
dev->name);
1654 if (!
mod_timer(&idev->regen_timer, expires))
1659 rcu_read_unlock_bh();
1663 static void __ipv6_try_regen_rndid(
struct inet6_dev *idev,
struct in6_addr *tmpaddr)
1665 if (tmpaddr &&
memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
1666 __ipv6_regen_rndid(idev);
1676 unsigned long expires,
u32 flags)
1682 .fc_expires = expires,
1685 .fc_nlinfo.nl_net = dev_net(dev),
1695 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
1704 static struct rt6_info *addrconf_get_prefix_route(
const struct in6_addr *pfx,
1721 for (rt = fn->
leaf; rt; rt = rt->
dst.rt6_next) {
1726 if ((noflags != 0) && ((rt->
rt6i_flags & flags) != 0))
1739 static void addrconf_add_mroute(
struct net_device *dev)
1747 .fc_nlinfo.nl_net = dev_net(dev),
1750 ipv6_addr_set(&cfg.
fc_dst,
htonl(0xFF000000), 0, 0, 0);
1755 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
1756 static void sit_route_add(
struct net_device *dev)
1764 .fc_nlinfo.nl_net = dev_net(dev),
1778 idev = ipv6_find_idev(dev);
1782 if (idev->
cnf.disable_ipv6)
1787 addrconf_add_mroute(dev);
1799 struct net *net = dev_net(dev);
1804 ADBG((
"addrconf: prefix option too short\n"));
1812 addr_type = ipv6_addr_type(&pinfo->
prefix);
1820 if (prefered_lft > valid_lft) {
1825 in6_dev = in6_dev_get(dev);
1827 if (in6_dev ==
NULL) {
1839 if (pinfo->onlink) {
1841 unsigned long rt_expires;
1849 rt_expires = addrconf_timeout_fixup(valid_lft,
HZ);
1851 rt_expires = addrconf_timeout_fixup(valid_lft,
USER_HZ);
1853 if (addrconf_finite_timeout(rt_expires))
1856 rt = addrconf_get_prefix_route(&pinfo->
prefix,
1864 if (valid_lft == 0) {
1867 }
else if (addrconf_finite_timeout(rt_expires)) {
1869 rt6_set_expires(rt,
jiffies + rt_expires);
1871 rt6_clean_expires(rt);
1873 }
else if (valid_lft) {
1876 if (addrconf_finite_timeout(rt_expires)) {
1882 dev, expires, flags);
1890 if (pinfo->autoconf && in6_dev->
cnf.autoconf) {
1893 int create = 0, update_lft = 0;
1897 if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
1898 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
1899 in6_dev_put(in6_dev);
1906 in6_dev_put(in6_dev);
1913 if (ifp ==
NULL && valid_lft) {
1914 int max_addresses = in6_dev->
cnf.max_addresses;
1917 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1918 if (in6_dev->
cnf.optimistic_dad &&
1919 !net->ipv6.devconf_all->forwarding && sllao)
1926 if (!max_addresses ||
1927 ipv6_count_addresses(in6_dev) < max_addresses)
1928 ifp = ipv6_add_addr(in6_dev, &addr, pinfo->
prefix_len,
1929 addr_type&IPV6_ADDR_SCOPE_MASK,
1932 if (!ifp || IS_ERR(ifp)) {
1933 in6_dev_put(in6_dev);
1937 update_lft = create = 1;
1939 addrconf_dad_start(ifp);
1945 #ifdef CONFIG_IPV6_PRIVACY
1951 spin_lock(&ifp->
lock);
1957 if (!update_lft && stored_lft) {
1959 valid_lft > stored_lft)
1986 valid_lft = stored_lft;
1991 if (valid_lft < prefered_lft)
2003 spin_unlock(&ifp->
lock);
2006 ipv6_ifa_notify(0, ifp);
2008 spin_unlock(&ifp->
lock);
2010 #ifdef CONFIG_IPV6_PRIVACY
2015 int age, max_valid, max_prefered;
2017 if (ifp != ift->ifpub)
2033 max_valid = in6_dev->
cnf.temp_valid_lft - age;
2037 max_prefered = in6_dev->
cnf.temp_prefered_lft -
2038 in6_dev->
cnf.max_desync_factor -
2040 if (max_prefered < 0)
2043 if (valid_lft > max_valid)
2044 valid_lft = max_valid;
2046 if (prefered_lft > max_prefered)
2047 prefered_lft = max_prefered;
2049 spin_lock(&ift->
lock);
2054 if (prefered_lft > 0)
2057 spin_unlock(&ift->
lock);
2059 ipv6_ifa_notify(0, ift);
2062 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->
cnf.use_tempaddr > 0) {
2071 ipv6_create_tempaddr(ifp,
NULL);
2081 in6_dev_put(in6_dev);
2107 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2117 memset(&p, 0,
sizeof(p));
2153 static int inet6_addr_add(
struct net *net,
int ifindex,
const struct in6_addr *pfx,
2154 unsigned int plen,
__u8 ifa_flags,
__u32 prefered_lft,
2171 if (!valid_lft || prefered_lft > valid_lft)
2178 idev = addrconf_add_dev(dev);
2180 return PTR_ERR(idev);
2182 scope = ipv6_addr_scope(pfx);
2184 timeout = addrconf_timeout_fixup(valid_lft,
HZ);
2185 if (addrconf_finite_timeout(timeout)) {
2187 valid_lft = timeout;
2195 timeout = addrconf_timeout_fixup(prefered_lft,
HZ);
2196 if (addrconf_finite_timeout(timeout)) {
2199 prefered_lft = timeout;
2202 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
2205 spin_lock_bh(&ifp->
lock);
2209 spin_unlock_bh(&ifp->
lock);
2218 addrconf_dad_start(ifp);
2224 return PTR_ERR(ifp);
2227 static int inet6_addr_del(
struct net *net,
int ifindex,
const struct in6_addr *pfx,
2241 if ((idev = __in6_dev_get(dev)) ==
NULL)
2247 ipv6_addr_equal(pfx, &ifp->
addr)) {
2257 addrconf_ifdown(idev->
dev, 1);
2304 int plen,
int scope)
2310 spin_lock_bh(&ifp->
lock);
2312 spin_unlock_bh(&ifp->
lock);
2318 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2319 static void sit_add_v4_addrs(
struct inet6_dev *idev)
2323 struct net *net = dev_net(idev->
dev);
2329 memcpy(&addr.s6_addr32[3], idev->
dev->dev_addr, 4);
2332 addr.s6_addr32[0] =
htonl(0xfe800000);
2338 if (addr.s6_addr32[3]) {
2339 add_addr(idev, &addr, 128, scope);
2344 struct in_device *in_dev = __in_dev_get_rtnl(dev);
2346 struct in_ifaddr *ifa;
2350 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
2353 addr.s6_addr32[3] = ifa->ifa_local;
2367 add_addr(idev, &addr, plen, flag);
2374 static void init_loopback(
struct net_device *dev)
2382 if ((idev = ipv6_find_idev(dev)) ==
NULL) {
2383 pr_debug(
"%s: add_dev failed\n", __func__);
2387 add_addr(idev, &in6addr_loopback, 128,
IFA_HOST);
2390 static void addrconf_add_linklocal(
struct inet6_dev *idev,
const struct in6_addr *addr)
2395 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2396 if (idev->
cnf.optimistic_dad &&
2397 !dev_net(idev->
dev)->ipv6.devconf_all->forwarding)
2402 ifp = ipv6_add_addr(idev, addr, 64,
IFA_LINK, addr_flags);
2405 addrconf_dad_start(ifp);
2410 static void addrconf_dev_config(
struct net_device *dev)
2426 idev = addrconf_add_dev(dev);
2431 addr.s6_addr32[0] =
htonl(0xFE800000);
2433 if (ipv6_generate_eui64(addr.s6_addr + 8, dev) == 0)
2434 addrconf_add_linklocal(idev, &addr);
2437 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2438 static void addrconf_sit_config(
struct net_device *dev)
2450 if ((idev = ipv6_find_idev(dev)) ==
NULL) {
2451 pr_debug(
"%s: add_dev failed\n", __func__);
2458 ipv6_addr_set(&addr,
htonl(0xFE800000), 0, 0, 0);
2459 addrconf_prefix_route(&addr, 64, dev, 0, 0);
2460 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
2461 addrconf_add_linklocal(idev, &addr);
2465 sit_add_v4_addrs(idev);
2468 addrconf_add_mroute(dev);
2474 #if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
2475 static void addrconf_gre_config(
struct net_device *dev)
2484 if ((idev = ipv6_find_idev(dev)) ==
NULL) {
2485 pr_debug(
"%s: add_dev failed\n", __func__);
2489 ipv6_addr_set(&addr,
htonl(0xFE800000), 0, 0, 0);
2490 addrconf_prefix_route(&addr, 64, dev, 0, 0);
2492 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
2493 addrconf_add_linklocal(idev, &addr);
2503 addrconf_add_linklocal(idev, &lladdr);
2509 static void ip6_tnl_add_linklocal(
struct inet6_dev *idev)
2512 struct net *net = dev_net(idev->
dev);
2515 if (idev->
dev->iflink &&
2517 if (!ipv6_inherit_linklocal(idev, link_dev))
2522 if (!ipv6_inherit_linklocal(idev, link_dev))
2525 pr_debug(
"init ip6-ip6: add_linklocal failed\n");
2533 static void addrconf_ip6_tnl_config(
struct net_device *dev)
2539 idev = addrconf_add_dev(dev);
2541 pr_debug(
"init ip6-ip6: add_dev failed\n");
2544 ip6_tnl_add_linklocal(idev);
2551 struct inet6_dev *idev = __in6_dev_get(dev);
2552 int run_pending = 0;
2558 idev = ipv6_add_dev(dev);
2560 return notifier_from_errno(-
ENOMEM);
2570 if (!addrconf_qdisc_ok(dev)) {
2572 pr_info(
"ADDRCONF(NETDEV_UP): %s: link is not ready\n",
2578 idev = ipv6_add_dev(dev);
2585 if (!addrconf_qdisc_ok(dev)) {
2597 pr_info(
"ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
2603 switch (dev->
type) {
2604 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2606 addrconf_sit_config(dev);
2609 #if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
2611 addrconf_gre_config(dev);
2615 addrconf_ip6_tnl_config(dev);
2622 addrconf_dev_config(dev);
2628 addrconf_dad_run(idev);
2635 if (idev->
cnf.mtu6 != dev->
mtu &&
2638 idev->
cnf.mtu6 = dev->
mtu;
2648 addrconf_ifdown(dev, 1);
2655 idev->
cnf.mtu6 = dev->
mtu;
2660 idev = ipv6_add_dev(dev);
2681 addrconf_sysctl_unregister(idev);
2682 addrconf_sysctl_register(idev);
2685 return notifier_from_errno(err);
2691 addrconf_type_change(dev, event);
2702 .notifier_call = addrconf_notify,
2705 static void addrconf_type_change(
struct net_device *dev,
unsigned long event)
2710 idev = __in6_dev_get(dev);
2718 static int addrconf_ifdown(
struct net_device *dev,
int how)
2720 struct net *net = dev_net(dev);
2730 idev = __in6_dev_get(dev);
2754 spin_lock_bh(&addrconf_hash_lock);
2756 hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
2757 if (ifa->
idev == idev) {
2758 hlist_del_init_rcu(&ifa->
addr_lst);
2759 addrconf_del_timer(ifa);
2763 spin_unlock_bh(&addrconf_hash_lock);
2772 #ifdef CONFIG_IPV6_PRIVACY
2773 if (how &&
del_timer(&idev->regen_timer))
2777 while (!list_empty(&idev->tempaddr_list)) {
2782 spin_lock_bh(&ifa->
lock);
2785 in6_ifa_put(ifa->ifpub);
2788 spin_unlock_bh(&ifa->
lock);
2797 addrconf_del_timer(ifa);
2829 addrconf_sysctl_unregister(idev);
2837 static void addrconf_rs_timer(
unsigned long data)
2846 if (idev->
cnf.forwarding)
2853 spin_lock(&ifp->
lock);
2854 if (ifp->
probes++ < idev->
cnf.rtr_solicits) {
2856 addrconf_mod_timer(ifp,
AC_RS,
2857 (ifp->
probes == idev->
cnf.rtr_solicits) ?
2858 idev->
cnf.rtr_solicit_delay :
2859 idev->
cnf.rtr_solicit_interval);
2860 spin_unlock(&ifp->
lock);
2864 spin_unlock(&ifp->
lock);
2869 pr_debug(
"%s: no IPv6 routers present\n", idev->
dev->name);
2880 static void addrconf_dad_kick(
struct inet6_ifaddr *ifp)
2882 unsigned long rand_num;
2888 rand_num =
net_random() % (idev->
cnf.rtr_solicit_delay ? : 1);
2891 addrconf_mod_timer(ifp,
AC_DAD, rand_num);
2894 static void addrconf_dad_start(
struct inet6_ifaddr *ifp)
2904 spin_lock(&ifp->
lock);
2909 idev->
cnf.accept_dad < 1 ||
2913 spin_unlock(&ifp->
lock);
2916 addrconf_dad_completed(ifp);
2921 spin_unlock(&ifp->
lock);
2929 addrconf_dad_stop(ifp, 0);
2940 addrconf_dad_kick(ifp);
2942 spin_unlock(&ifp->
lock);
2946 static void addrconf_dad_timer(
unsigned long data)
2952 if (!ifp->
probes && addrconf_dad_end(ifp))
2961 spin_lock(&ifp->
lock);
2963 spin_unlock(&ifp->
lock);
2974 spin_unlock(&ifp->
lock);
2977 addrconf_dad_completed(ifp);
2983 addrconf_mod_timer(ifp,
AC_DAD, ifp->
idev->nd_parms->retrans_time);
2984 spin_unlock(&ifp->
lock);
2988 addrconf_addr_solict_mult(&ifp->
addr, &mcaddr);
2994 static void addrconf_dad_completed(
struct inet6_ifaddr *ifp)
3008 if (((ifp->
idev->cnf.accept_ra == 1 && !ifp->
idev->cnf.forwarding) ||
3009 ifp->
idev->cnf.accept_ra == 2) &&
3010 ifp->
idev->cnf.rtr_solicits > 0 &&
3020 spin_lock_bh(&ifp->
lock);
3023 addrconf_mod_timer(ifp,
AC_RS, ifp->
idev->cnf.rtr_solicit_interval);
3024 spin_unlock_bh(&ifp->
lock);
3028 static void addrconf_dad_run(
struct inet6_dev *idev)
3034 spin_lock(&ifp->
lock);
3037 addrconf_dad_kick(ifp);
3038 spin_unlock(&ifp->
lock);
3043 #ifdef CONFIG_PROC_FS
3044 struct if6_iter_state {
3053 struct if6_iter_state *state = seq->
private;
3054 struct net *net = seq_file_net(seq);
3065 hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
3067 if (!net_eq(dev_net(ifa->
idev->dev), net))
3088 struct if6_iter_state *state = seq->
private;
3089 struct net *net = seq_file_net(seq);
3092 hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
3093 if (!net_eq(dev_net(ifa->
idev->dev), net))
3099 while (++state->bucket < IN6_ADDR_HSIZE) {
3101 hlist_for_each_entry_rcu_bh(ifa, n,
3102 &inet6_addr_lst[state->bucket], addr_lst) {
3103 if (!net_eq(dev_net(ifa->
idev->dev), net))
3113 static void *if6_seq_start(
struct seq_file *seq, loff_t *pos)
3117 return if6_get_first(seq, *pos);
3120 static void *if6_seq_next(
struct seq_file *seq,
void *
v, loff_t *pos)
3124 ifa = if6_get_next(seq, v);
3129 static void if6_seq_stop(
struct seq_file *seq,
void *
v)
3132 rcu_read_unlock_bh();
3135 static int if6_seq_show(
struct seq_file *seq,
void *
v)
3138 seq_printf(seq,
"%pi6 %02x %02x %02x %02x %8s\n",
3140 ifp->
idev->dev->ifindex,
3144 ifp->
idev->dev->name);
3149 .
start = if6_seq_start,
3150 .next = if6_seq_next,
3151 .show = if6_seq_show,
3152 .stop = if6_seq_stop,
3158 sizeof(
struct if6_iter_state));
3163 .open = if6_seq_open,
3169 static int __net_init if6_proc_net_init(
struct net *net)
3176 static void __net_exit if6_proc_net_exit(
struct net *net)
3182 .
init = if6_proc_net_init,
3183 .exit = if6_proc_net_exit,
3186 int __init if6_proc_init(
void)
3191 void if6_proc_exit(
void)
3197 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
3199 int ipv6_chk_home_addr(
struct net *net,
const struct in6_addr *addr)
3204 unsigned int hash = inet6_addr_hash(addr);
3207 hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
3208 if (!net_eq(dev_net(ifp->
idev->dev), net))
3210 if (ipv6_addr_equal(&ifp->
addr, addr) &&
3216 rcu_read_unlock_bh();
3225 static void addrconf_verify(
unsigned long foo)
3227 unsigned long now,
next, next_sec, next_sched;
3233 spin_lock(&addrconf_verify_lock);
3241 hlist_for_each_entry_rcu_bh(ifp, node,
3242 &inet6_addr_lst[i], addr_lst) {
3248 spin_lock(&ifp->
lock);
3254 spin_unlock(&ifp->
lock);
3259 spin_unlock(&ifp->
lock);
3273 spin_unlock(&ifp->
lock);
3278 ipv6_ifa_notify(0, ifp);
3282 #ifdef CONFIG_IPV6_PRIVACY
3285 unsigned long regen_advance = ifp->
idev->cnf.regen_max_retry *
3286 ifp->
idev->cnf.dad_transmits *
3287 ifp->
idev->nd_parms->retrans_time /
HZ;
3293 if (!ifp->regen_count && ifpub) {
3296 in6_ifa_hold(ifpub);
3297 spin_unlock(&ifp->
lock);
3299 spin_lock(&ifpub->
lock);
3300 ifpub->regen_count = 0;
3301 spin_unlock(&ifpub->
lock);
3302 ipv6_create_tempaddr(ifpub, ifp);
3309 spin_unlock(&ifp->
lock);
3315 spin_unlock(&ifp->
lock);
3325 next_sched = next_sec;
3331 ADBG((
KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
3332 now, next, next_sec, next_sched));
3334 addr_chk_timer.expires = next_sched;
3336 spin_unlock(&addrconf_verify_lock);
3337 rcu_read_unlock_bh();
3345 pfx = nla_data(addr);
3348 if (pfx &&
nla_memcmp(local, pfx,
sizeof(*pfx)))
3351 pfx = nla_data(local);
3366 struct net *net = sock_net(skb->
sk);
3372 err = nlmsg_parse(nlh,
sizeof(*ifm), tb,
IFA_MAX, ifa_ipv6_policy);
3376 ifm = nlmsg_data(nlh);
3384 static int inet6_addr_modify(
struct inet6_ifaddr *ifp,
u8 ifa_flags,
3385 u32 prefered_lft,
u32 valid_lft)
3389 unsigned long timeout;
3391 if (!valid_lft || (prefered_lft > valid_lft))
3394 timeout = addrconf_timeout_fixup(valid_lft,
HZ);
3395 if (addrconf_finite_timeout(timeout)) {
3397 valid_lft = timeout;
3405 timeout = addrconf_timeout_fixup(prefered_lft,
HZ);
3406 if (addrconf_finite_timeout(timeout)) {
3409 prefered_lft = timeout;
3412 spin_lock_bh(&ifp->
lock);
3418 spin_unlock_bh(&ifp->
lock);
3420 ipv6_ifa_notify(0, ifp);
3430 inet6_rtm_newaddr(
struct sk_buff *skb,
struct nlmsghdr *nlh,
void *arg)
3432 struct net *net = sock_net(skb->
sk);
3442 err = nlmsg_parse(nlh,
sizeof(*ifm), tb,
IFA_MAX, ifa_ipv6_policy);
3446 ifm = nlmsg_data(nlh);
3454 ci = nla_data(tb[IFA_CACHEINFO]);
3475 return inet6_addr_add(net, ifm->
ifa_index, pfx,
3477 preferred_lft, valid_lft);
3484 err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
3491 static void put_ifaddrmsg(
struct nlmsghdr *nlh,
u8 prefixlen,
u8 flags,
3492 u8 scope,
int ifindex)
3496 ifm = nlmsg_data(nlh);
3504 static int put_cacheinfo(
struct sk_buff *skb,
unsigned long cstamp,
3509 ci.
cstamp = cstamp_delta(cstamp);
3510 ci.tstamp = cstamp_delta(tstamp);
3512 ci.ifa_valid =
valid;
3514 return nla_put(skb, IFA_CACHEINFO,
sizeof(ci), &ci);
3517 static inline int rt_scope(
int ifa_scope)
3529 static inline int inet6_ifaddr_msgsize(
void)
3532 + nla_total_size(16)
3537 u32 portid,
u32 seq,
int event,
unsigned int flags)
3542 nlh = nlmsg_put(skb, portid, seq, event,
sizeof(
struct ifaddrmsg), flags);
3547 ifa->
idev->dev->ifindex);
3554 if (preferred > tval)
3571 put_cacheinfo(skb, ifa->
cstamp, ifa->
tstamp, preferred, valid) < 0) {
3572 nlmsg_cancel(skb, nlh);
3576 return nlmsg_end(skb, nlh);
3579 static int inet6_fill_ifmcaddr(
struct sk_buff *skb,
struct ifmcaddr6 *ifmca,
3580 u32 portid,
u32 seq,
int event,
u16 flags)
3584 int ifindex = ifmca->
idev->dev->ifindex;
3589 nlh = nlmsg_put(skb, portid, seq, event,
sizeof(
struct ifaddrmsg), flags);
3597 nlmsg_cancel(skb, nlh);
3601 return nlmsg_end(skb, nlh);
3604 static int inet6_fill_ifacaddr(
struct sk_buff *skb,
struct ifacaddr6 *ifaca,
3605 u32 portid,
u32 seq,
int event,
unsigned int flags)
3609 int ifindex = ifaca->
aca_idev->dev->ifindex;
3614 nlh = nlmsg_put(skb, portid, seq, event,
sizeof(
struct ifaddrmsg), flags);
3622 nlmsg_cancel(skb, nlh);
3626 return nlmsg_end(skb, nlh);
3638 int s_ip_idx,
int *p_ip_idx)
3643 int ip_idx = *p_ip_idx;
3652 if (++ip_idx < s_ip_idx)
3654 err = inet6_fill_ifaddr(skb, ifa,
3666 for (ifmca = idev->
mc_list; ifmca;
3667 ifmca = ifmca->
next, ip_idx++) {
3668 if (ip_idx < s_ip_idx)
3670 err = inet6_fill_ifmcaddr(skb, ifmca,
3681 for (ifaca = idev->
ac_list; ifaca;
3682 ifaca = ifaca->
aca_next, ip_idx++) {
3683 if (ip_idx < s_ip_idx)
3685 err = inet6_fill_ifacaddr(skb, ifaca,
3705 struct net *net = sock_net(skb->
sk);
3708 int s_idx, s_ip_idx;
3715 s_idx = idx = cb->
args[1];
3716 s_ip_idx = ip_idx = cb->
args[2];
3722 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3725 if (h > s_h || idx > s_idx)
3728 idev = __in6_dev_get(dev);
3732 if (in6_dump_addrs(idev, skb, cb, type,
3733 s_ip_idx, &ip_idx) <= 0)
3743 cb->
args[2] = ip_idx;
3752 return inet6_dump_addr(skb, cb, type);
3759 return inet6_dump_addr(skb, cb, type);
3767 return inet6_dump_addr(skb, cb, type);
3770 static int inet6_rtm_getaddr(
struct sk_buff *in_skb,
struct nlmsghdr *nlh,
3773 struct net *net = sock_net(in_skb->
sk);
3782 err = nlmsg_parse(nlh,
sizeof(*ifm), tb,
IFA_MAX, ifa_ipv6_policy);
3792 ifm = nlmsg_data(nlh);
3802 skb = nlmsg_new(inet6_ifaddr_msgsize(),
GFP_KERNEL);
3808 err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(in_skb).portid,
3823 static void inet6_ifa_notify(
int event,
struct inet6_ifaddr *ifa)
3826 struct net *net = dev_net(ifa->
idev->dev);
3829 skb = nlmsg_new(inet6_ifaddr_msgsize(),
GFP_ATOMIC);
3833 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
3847 static inline void ipv6_store_devconf(
struct ipv6_devconf *cnf,
3866 #ifdef CONFIG_IPV6_PRIVACY
3876 #ifdef CONFIG_IPV6_ROUTER_PREF
3880 #ifdef CONFIG_IPV6_ROUTE_INFO
3886 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3889 #ifdef CONFIG_IPV6_MROUTE
3897 static inline size_t inet6_ifla6_size(
void)
3899 return nla_total_size(4)
3906 static inline size_t inet6_if_nlmsg_size(
void)
3913 + nla_total_size(inet6_ifla6_size());
3917 int items,
int bytes)
3920 int pad = bytes -
sizeof(
u64) * items;
3925 for (i = 1; i < items; i++)
3928 memset(&stats[items], 0, pad);
3931 static inline void __snmp6_fill_stats64(
u64 *stats,
void __percpu **mib,
3932 int items,
int bytes,
size_t syncpoff)
3935 int pad = bytes -
sizeof(
u64) * items;
3940 for (i = 1; i < items; i++)
3941 put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
3943 memset(&stats[items], 0, pad);
3946 static void snmp6_fill_stats(
u64 *stats,
struct inet6_dev *idev,
int attrtype,
3951 __snmp6_fill_stats64(stats, (
void __percpu **)idev->
stats.ipv6,
3960 static int inet6_fill_ifla6_attrs(
struct sk_buff *skb,
struct inet6_dev *idev)
3966 goto nla_put_failure;
3968 ci.tstamp = cstamp_delta(idev->
tstamp);
3972 goto nla_put_failure;
3975 goto nla_put_failure;
3976 ipv6_store_devconf(&idev->
cnf, nla_data(nla), nla_len(nla));
3982 goto nla_put_failure;
3987 goto nla_put_failure;
3996 static size_t inet6_get_link_af_size(
const struct net_device *dev)
3998 if (!__in6_dev_get(dev))
4001 return inet6_ifla6_size();
4004 static int inet6_fill_link_af(
struct sk_buff *skb,
const struct net_device *dev)
4006 struct inet6_dev *idev = __in6_dev_get(dev);
4011 if (inet6_fill_ifla6_attrs(skb, idev) < 0)
4018 u32 portid,
u32 seq,
int event,
unsigned int flags)
4025 nlh = nlmsg_put(skb, portid, seq, event,
sizeof(*hdr), flags);
4029 hdr = nlmsg_data(nlh);
4043 goto nla_put_failure;
4045 if (protoinfo ==
NULL)
4046 goto nla_put_failure;
4048 if (inet6_fill_ifla6_attrs(skb, idev) < 0)
4049 goto nla_put_failure;
4051 nla_nest_end(skb, protoinfo);
4052 return nlmsg_end(skb, nlh);
4055 nlmsg_cancel(skb, nlh);
4061 struct net *net = sock_net(skb->
sk);
4070 s_idx = cb->
args[1];
4076 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
4079 idev = __in6_dev_get(dev);
4082 if (inet6_fill_ifinfo(skb, idev,
4102 struct net *net = dev_net(idev->
dev);
4105 skb = nlmsg_new(inet6_if_nlmsg_size(),
GFP_ATOMIC);
4109 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
4123 static inline size_t inet6_prefix_nlmsg_size(
void)
4126 + nla_total_size(
sizeof(
struct in6_addr))
4132 int event,
unsigned int flags)
4138 nlh = nlmsg_put(skb, portid, seq, event,
sizeof(*pmsg), flags);
4142 pmsg = nlmsg_data(nlh);
4153 if (pinfo->autoconf)
4157 goto nla_put_failure;
4161 goto nla_put_failure;
4162 return nlmsg_end(skb, nlh);
4165 nlmsg_cancel(skb, nlh);
4169 static void inet6_prefix_notify(
int event,
struct inet6_dev *idev,
4173 struct net *net = dev_net(idev->
dev);
4176 skb = nlmsg_new(inet6_prefix_nlmsg_size(),
GFP_ATOMIC);
4180 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
4194 static void __ipv6_ifa_notify(
int event,
struct inet6_ifaddr *ifp)
4206 if (!(ifp->
rt->rt6i_node))
4208 if (ifp->
idev->cnf.forwarding)
4209 addrconf_join_anycast(ifp);
4212 if (ifp->
idev->cnf.forwarding)
4213 addrconf_leave_anycast(ifp);
4215 dst_hold(&ifp->
rt->dst);
4218 dst_free(&ifp->
rt->dst);
4223 static void ipv6_ifa_notify(
int event,
struct inet6_ifaddr *ifp)
4227 __ipv6_ifa_notify(event, ifp);
4228 rcu_read_unlock_bh();
4231 #ifdef CONFIG_SYSCTL
4235 void __user *
buffer,
size_t *lenp, loff_t *ppos)
4237 int *valp = ctl->
data;
4253 ret = addrconf_fixup_forwarding(ctl, valp, val);
4259 static void dev_disable_change(
struct inet6_dev *idev)
4261 if (!idev || !idev->
dev)
4264 if (idev->
cnf.disable_ipv6)
4270 static void addrconf_disable_change(
struct net *net,
__s32 newf)
4277 idev = __in6_dev_get(dev);
4279 int changed = (!idev->
cnf.disable_ipv6) ^ (!newf);
4280 idev->
cnf.disable_ipv6 = newf;
4282 dev_disable_change(idev);
4288 static int addrconf_disable_ipv6(
struct ctl_table *table,
int *p,
int newf)
4294 return restart_syscall();
4296 net = (
struct net *)table->
extra2;
4300 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
4305 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4306 net->ipv6.devconf_dflt->disable_ipv6 = newf;
4307 addrconf_disable_change(net, newf);
4308 }
else if ((!newf) ^ (!old))
4316 int addrconf_sysctl_disable(
ctl_table *ctl,
int write,
4317 void __user *buffer,
size_t *lenp, loff_t *ppos)
4319 int *valp = ctl->
data;
4335 ret = addrconf_disable_ipv6(ctl, valp, val);
4341 static struct addrconf_sysctl_table
4345 } addrconf_sysctl __read_mostly = {
4346 .sysctl_header =
NULL,
4349 .procname =
"forwarding",
4351 .maxlen =
sizeof(
int),
4356 .procname =
"hop_limit",
4358 .maxlen =
sizeof(
int),
4365 .maxlen =
sizeof(
int),
4370 .procname =
"accept_ra",
4372 .maxlen =
sizeof(
int),
4377 .procname =
"accept_redirects",
4379 .maxlen =
sizeof(
int),
4384 .procname =
"autoconf",
4386 .maxlen =
sizeof(
int),
4391 .procname =
"dad_transmits",
4393 .maxlen =
sizeof(
int),
4398 .procname =
"router_solicitations",
4400 .maxlen =
sizeof(
int),
4405 .procname =
"router_solicitation_interval",
4407 .maxlen =
sizeof(
int),
4412 .procname =
"router_solicitation_delay",
4414 .maxlen =
sizeof(
int),
4419 .procname =
"force_mld_version",
4421 .maxlen =
sizeof(
int),
4425 #ifdef CONFIG_IPV6_PRIVACY
4427 .procname =
"use_tempaddr",
4429 .maxlen =
sizeof(
int),
4434 .procname =
"temp_valid_lft",
4436 .maxlen =
sizeof(
int),
4441 .procname =
"temp_prefered_lft",
4443 .maxlen =
sizeof(
int),
4448 .procname =
"regen_max_retry",
4450 .maxlen =
sizeof(
int),
4455 .procname =
"max_desync_factor",
4457 .maxlen =
sizeof(
int),
4463 .procname =
"max_addresses",
4465 .maxlen =
sizeof(
int),
4470 .procname =
"accept_ra_defrtr",
4472 .maxlen =
sizeof(
int),
4477 .procname =
"accept_ra_pinfo",
4479 .maxlen =
sizeof(
int),
4483 #ifdef CONFIG_IPV6_ROUTER_PREF
4485 .procname =
"accept_ra_rtr_pref",
4487 .maxlen =
sizeof(
int),
4492 .procname =
"router_probe_interval",
4494 .maxlen =
sizeof(
int),
4498 #ifdef CONFIG_IPV6_ROUTE_INFO
4500 .procname =
"accept_ra_rt_info_max_plen",
4502 .maxlen =
sizeof(
int),
4509 .procname =
"proxy_ndp",
4511 .maxlen =
sizeof(
int),
4516 .procname =
"accept_source_route",
4518 .maxlen =
sizeof(
int),
4522 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
4524 .procname =
"optimistic_dad",
4526 .maxlen =
sizeof(
int),
4532 #ifdef CONFIG_IPV6_MROUTE
4534 .procname =
"mc_forwarding",
4536 .maxlen =
sizeof(
int),
4542 .procname =
"disable_ipv6",
4544 .maxlen =
sizeof(
int),
4549 .procname =
"accept_dad",
4551 .maxlen =
sizeof(
int),
4556 .procname =
"force_tllao",
4558 .maxlen =
sizeof(
int),
4568 static int __addrconf_sysctl_register(
struct net *net,
char *dev_name,
4572 struct addrconf_sysctl_table *
t;
4579 for (i = 0; t->addrconf_vars[
i].data; i++) {
4580 t->addrconf_vars[
i].data += (
char *)p - (
char *)&
ipv6_devconf;
4581 t->addrconf_vars[
i].extra1 =
idev;
4582 t->addrconf_vars[
i].extra2 = net;
4588 if (t->sysctl_header ==
NULL)
4600 static void __addrconf_sysctl_unregister(
struct ipv6_devconf *p)
4602 struct addrconf_sysctl_table *
t;
4613 static void addrconf_sysctl_register(
struct inet6_dev *idev)
4616 &ndisc_ifinfo_sysctl_change);
4617 __addrconf_sysctl_register(dev_net(idev->
dev), idev->
dev->name,
4621 static void addrconf_sysctl_unregister(
struct inet6_dev *idev)
4623 __addrconf_sysctl_unregister(&idev->
cnf);
4630 static int __net_init addrconf_init_net(
struct net *net)
4637 dflt = &ipv6_devconf_dflt;
4646 goto err_alloc_dflt;
4653 net->ipv6.devconf_all = all;
4654 net->ipv6.devconf_dflt = dflt;
4656 #ifdef CONFIG_SYSCTL
4657 err = __addrconf_sysctl_register(net,
"all",
NULL, all);
4661 err = __addrconf_sysctl_register(net,
"default",
NULL, dflt);
4667 #ifdef CONFIG_SYSCTL
4669 __addrconf_sysctl_unregister(all);
4679 static void __net_exit addrconf_exit_net(
struct net *net)
4681 #ifdef CONFIG_SYSCTL
4682 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
4683 __addrconf_sysctl_unregister(net->ipv6.devconf_all);
4686 kfree(net->ipv6.devconf_dflt);
4687 kfree(net->ipv6.devconf_all);
4692 .init = addrconf_init_net,
4693 .exit = addrconf_exit_net,
4714 .fill_link_af = inet6_fill_link_af,
4715 .get_link_af_size = inet6_get_link_af_size,
4728 pr_crit(
"%s: cannot initialize default policy table: %d\n",
4756 if (!ipv6_add_dev(
init_net.loopback_dev))
4782 inet6_dump_ifaddr,
NULL);
4784 inet6_dump_ifmcaddr,
NULL);
4786 inet6_dump_ifacaddr,
NULL);
4818 if (__in6_dev_get(dev) ==
NULL)
4820 addrconf_ifdown(dev, 1);
4822 addrconf_ifdown(
init_net.loopback_dev, 2);
4827 spin_lock_bh(&addrconf_hash_lock);
4829 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
4830 spin_unlock_bh(&addrconf_hash_lock);