58 #include <linux/errno.h>
59 #include <linux/types.h>
60 #include <linux/socket.h>
62 #include <linux/kernel.h>
64 #include <linux/net.h>
65 #include <linux/netdevice.h>
69 #include <linux/slab.h>
75 #include <linux/rtnetlink.h>
76 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <asm/errno.h>
104 static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
106 static const int dn_rt_min_delay = 2 *
HZ;
107 static const int dn_rt_max_delay = 10 *
HZ;
108 static const int dn_rt_mtu_expires = 10 * 60 *
HZ;
110 static unsigned long dn_rt_deadline;
112 static int dn_dst_gc(
struct dst_ops *ops);
114 static unsigned int dn_dst_default_advmss(
const struct dst_entry *
dst);
115 static unsigned int dn_dst_mtu(
const struct dst_entry *
dst);
116 static void dn_dst_destroy(
struct dst_entry *);
119 static void dn_dst_link_failure(
struct sk_buff *);
127 static int dn_route_input(
struct sk_buff *);
128 static void dn_run_flush(
unsigned long dummy);
131 static unsigned int dn_rt_hash_mask;
134 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
137 static struct dst_ops dn_dst_ops = {
142 .check = dn_dst_check,
143 .default_advmss = dn_dst_default_advmss,
146 .destroy = dn_dst_destroy,
147 .ifdown = dn_dst_ifdown,
148 .negative_advice = dn_dst_negative_advice,
149 .link_failure = dn_dst_link_failure,
150 .update_pmtu = dn_dst_update_pmtu,
151 .redirect = dn_dst_redirect,
152 .neigh_lookup = dn_dst_neigh_lookup,
160 neigh_release(rt->
n);
161 dst_destroy_metrics_generic(dst);
170 if (n && n->
dev == dev) {
171 n->
dev = dev_net(dev)->loopback_dev;
184 return dn_rt_hash_mask & (
unsigned int)tmp;
187 static inline void dnrt_free(
struct dn_route *rt)
192 static inline void dnrt_drop(
struct dn_route *rt)
198 static void dn_dst_check_expire(
unsigned long dummy)
204 unsigned long expire = 120 *
HZ;
206 for (i = 0; i <= dn_rt_hash_mask; i++) {
207 rtp = &dn_rt_hash_table[
i].
chain;
209 spin_lock(&dn_rt_hash_table[i].lock);
211 lockdep_is_held(&dn_rt_hash_table[i].lock))) !=
NULL) {
213 (now - rt->
dst.lastuse) < expire) {
214 rtp = &rt->
dst.dn_next;
217 *rtp = rt->
dst.dn_next;
221 spin_unlock(&dn_rt_hash_table[i].lock);
223 if ((jiffies - now) > 0)
230 static int dn_dst_gc(
struct dst_ops *ops)
236 unsigned long expire = 10 *
HZ;
238 for (i = 0; i <= dn_rt_hash_mask; i++) {
240 spin_lock_bh(&dn_rt_hash_table[i].lock);
241 rtp = &dn_rt_hash_table[
i].
chain;
244 lockdep_is_held(&dn_rt_hash_table[i].lock))) !=
NULL) {
246 (now - rt->
dst.lastuse) < expire) {
247 rtp = &rt->
dst.dn_next;
250 *rtp = rt->
dst.dn_next;
255 spin_unlock_bh(&dn_rt_hash_table[i].lock);
286 if (dst_metric(dst,
RTAX_MTU) > mtu && mtu >= min_mtu) {
287 if (!(dst_metric_locked(dst,
RTAX_MTU))) {
289 dst_set_expires(dst, dn_rt_mtu_expires);
294 if (!existing_mss || existing_mss > mss)
300 static void dn_dst_redirect(
struct dst_entry *dst,
struct sock *sk,
319 static void dn_dst_link_failure(
struct sk_buff *skb)
323 static inline int compare_keys(
struct flowidn *fl1,
struct flowidn *fl2)
327 (fl1->flowidn_mark ^ fl2->flowidn_mark) |
328 (fl1->flowidn_scope ^ fl2->flowidn_scope) |
329 (fl1->flowidn_oif ^ fl2->flowidn_oif) |
330 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
341 spin_lock_bh(&dn_rt_hash_table[hash].lock);
343 lockdep_is_held(&dn_rt_hash_table[hash].lock))) !=
NULL) {
344 if (compare_keys(&rth->
fld, &rt->
fld)) {
346 *rthp = rth->
dst.dn_next;
348 dn_rt_hash_table[hash].
chain);
351 dst_use(&rth->
dst, now);
352 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
358 rthp = &rth->
dst.dn_next;
364 dst_use(&rt->
dst, now);
365 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
370 static void dn_run_flush(
unsigned long dummy)
375 for (i = 0; i < dn_rt_hash_mask; i++) {
376 spin_lock_bh(&dn_rt_hash_table[i].lock);
379 goto nothing_to_declare;
381 for(; rt; rt =
next) {
388 spin_unlock_bh(&dn_rt_hash_table[i].lock);
400 delay = dn_rt_min_delay;
402 spin_lock_bh(&dn_rt_flush_lock);
404 if (
del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
405 long tmo = (
long)(dn_rt_deadline - now);
407 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
415 spin_unlock_bh(&dn_rt_flush_lock);
420 if (dn_rt_deadline == 0)
421 dn_rt_deadline = now + dn_rt_max_delay;
423 dn_rt_flush_timer.expires = now +
delay;
425 spin_unlock_bh(&dn_rt_flush_lock);
433 static int dn_return_short(
struct sk_buff *skb)
460 dn_rt_finish_output(skb,
NULL,
NULL);
469 static int dn_return_long(
struct sk_buff *skb)
488 char padlen = (*ptr & ~DN_RT_F_PF);
506 dn_rt_finish_output(skb, dst_addr, src_addr);
516 static int dn_route_rx_packet(
struct sk_buff *skb)
521 if ((err = dn_route_input(skb)) == 0)
522 return dst_input(skb);
526 char *devname = skb->
dev ? skb->
dev->name :
"???";
529 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
538 return dn_return_short(skb);
540 return dn_return_long(skb);
548 static int dn_route_rx_long(
struct sk_buff *skb)
551 unsigned char *ptr = skb->
data;
553 if (!pskb_may_pull(skb, 21))
557 skb_reset_transport_header(skb);
561 cb->
dst = dn_eth2dn(ptr);
562 if (
memcmp(ptr, dn_hiord_addr, 4) != 0)
569 cb->
src = dn_eth2dn(ptr);
570 if (
memcmp(ptr, dn_hiord_addr, 4) != 0)
587 static int dn_route_rx_short(
struct sk_buff *skb)
590 unsigned char *ptr = skb->
data;
592 if (!pskb_may_pull(skb, 6))
596 skb_reset_transport_header(skb);
602 cb->
hops = *ptr & 0x3f;
612 static int dn_route_discard(
struct sk_buff *skb)
622 static int dn_route_ptp_hello(
struct sk_buff *skb)
632 unsigned char flags = 0;
635 unsigned char padlen = 0;
637 if (!net_eq(dev_net(dev), &
init_net))
646 if (!pskb_may_pull(skb, 3))
666 padlen = flags & ~DN_RT_F_PF;
667 if (!pskb_may_pull(skb, padlen + 1))
673 skb_reset_network_header(skb);
685 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
686 (
int)flags, (dev) ? dev->
name :
"???", len, skb->
len,
705 switch (flags & DN_RT_CNTL_MSK) {
734 return dn_route_rx_long(skb);
736 return dn_route_rx_short(skb);
746 static int dn_to_neigh_output(
struct sk_buff *skb)
755 static int dn_output(
struct sk_buff *skb)
792 static int dn_forward(
struct sk_buff *skb)
799 #ifdef CONFIG_NETFILTER
807 rt = (
struct dn_route *)skb_dst(skb);
808 header_len = dn_db->
use_long ? 21 : 6;
841 static int dn_rt_bug(
struct sk_buff *skb)
853 static unsigned int dn_dst_default_advmss(
const struct dst_entry *dst)
858 static unsigned int dn_dst_mtu(
const struct dst_entry *dst)
860 unsigned int mtu = dst_metric_raw(dst,
RTAX_MTU);
862 return mtu ? : dst->
dev->mtu;
876 unsigned int mss_metric;
899 if (mss_metric > mss)
935 ret = dn_match_addr(daddr, ifa->
ifa_local);
936 if (ret > best_match)
954 return (daddr&~mask)|res->
fi->fib_nh->nh_gw;
957 static int dn_route_output_slow(
struct dst_entry **pprt,
const struct flowidn *oldflp,
int try_hard)
961 .saddr = oldflp->
saddr,
963 .flowidn_mark = oldflp->flowidn_mark,
965 .flowidn_oif = oldflp->flowidn_oif,
971 unsigned int flags = 0;
979 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
983 oldflp->flowidn_oif);
986 if (oldflp->flowidn_oif) {
1002 if (dn_dev_islocal(dev_out, oldflp->
saddr))
1011 if (!dn_dev_islocal(dev, oldflp->
saddr))
1015 !dn_dev_islocal(dev, oldflp->
daddr))
1022 if (dev_out ==
NULL)
1040 fld.
saddr = dnet_select_source(dev_out, 0,
1052 "dn_route_output_slow: initial checks complete."
1053 " dst=%o4x src=%04x oif=%d try_hard=%d\n",
1055 fld.flowidn_oif, try_hard);
1080 if ((oldflp->flowidn_oif &&
1081 (neigh->
dev->ifindex != oldflp->flowidn_oif)) ||
1083 (!dn_dev_islocal(neigh->
dev,
1085 neigh_release(neigh);
1090 if (dn_dev_islocal(neigh->
dev, fld.
daddr)) {
1094 dev_out = neigh->
dev;
1103 if (dev_out ==
NULL)
1106 if (dev_out ==
NULL)
1110 if (dn_dev_islocal(dev_out, fld.
daddr)) {
1118 neigh = neigh_clone(dn_db->
router);
1124 gateway = ((
struct dn_neigh *)neigh)->addr;
1126 gateway = fld.
daddr;
1127 if (fld.
saddr == 0) {
1128 fld.
saddr = dnet_select_source(dev_out, gateway,
1135 fld.flowidn_oif = dev_out->
ifindex;
1150 fld.flowidn_oif = dev_out->
ifindex;
1157 if (res.
fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1172 fld.flowidn_oif = dev_out->
ifindex;
1186 rt->
fld.flowidn_oif = oldflp->flowidn_oif;
1187 rt->
fld.flowidn_iif = 0;
1188 rt->
fld.flowidn_mark = oldflp->flowidn_mark;
1202 rt->
dst.output = dn_output;
1203 rt->
dst.input = dn_rt_bug;
1208 err = dn_rt_set_next_hop(rt, &res);
1213 dn_insert_route(rt, hash, (
struct dn_route **)pprt);
1217 neigh_release(neigh);
1243 static int __dn_route_output_key(
struct dst_entry **pprt,
const struct flowidn *flp,
int flags)
1252 if ((flp->
daddr == rt->
fld.daddr) &&
1254 (flp->flowidn_mark == rt->
fld.flowidn_mark) &&
1255 dn_is_output_route(rt) &&
1256 (rt->
fld.flowidn_oif == flp->flowidn_oif)) {
1257 dst_use(&rt->
dst, jiffies);
1258 rcu_read_unlock_bh();
1263 rcu_read_unlock_bh();
1266 return dn_route_output_slow(pprt, flp, flags);
1269 static int dn_route_output_key(
struct dst_entry **pprt,
struct flowidn *flp,
int flags)
1273 err = __dn_route_output_key(pprt, flp, flags);
1274 if (err == 0 && flp->flowidn_proto) {
1276 flowidn_to_flowi(flp),
NULL, 0);
1277 if (IS_ERR(*pprt)) {
1278 err = PTR_ERR(*pprt);
1289 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
1290 if (err == 0 && fl->flowidn_proto) {
1294 flowidn_to_flowi(fl), sk, 0);
1295 if (IS_ERR(*pprt)) {
1296 err = PTR_ERR(*pprt);
1303 static int dn_route_input_slow(
struct sk_buff *skb)
1319 .flowidn_mark = skb->
mark,
1320 .flowidn_iif = skb->
dev->ifindex,
1343 if (dn_dev_islocal(in_dev, cb->
src))
1353 if (!dn_dev_islocal(in_dev, cb->
dst))
1362 if (out_dev ==
NULL) {
1369 src_map = fld.
saddr;
1373 fld.
daddr = dn_fib_rules_map_destination(fld.
daddr, &res);
1382 gateway = fld.
daddr;
1384 fld.
saddr = src_map;
1395 if (dn_db->
parms.forwarding == 0)
1398 if (res.
fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1407 if (out_dev == in_dev && !(flags &
RTCF_NAT))
1431 neigh = neigh_clone(dn_db->
router);
1433 gateway = ((
struct dn_neigh *)neigh)->addr;
1462 rt->
fld.flowidn_oif = 0;
1464 rt->
fld.flowidn_mark = fld.flowidn_mark;
1468 rt->
dst.output = dn_rt_bug;
1471 rt->
dst.input = dn_forward;
1474 rt->
dst.output = dn_output;
1476 rt->
dst.dev = in_dev;
1486 err = dn_rt_set_next_hop(rt, &res);
1491 dn_insert_route(rt, hash, &rt);
1492 skb_dst_set(skb, &rt->
dst);
1496 neigh_release(neigh);
1518 static int dn_route_input(
struct sk_buff *skb)
1530 if ((rt->
fld.saddr == cb->
src) &&
1531 (rt->
fld.daddr == cb->
dst) &&
1532 (rt->
fld.flowidn_oif == 0) &&
1533 (rt->
fld.flowidn_mark == skb->
mark) &&
1534 (rt->
fld.flowidn_iif == cb->
iif)) {
1535 dst_use(&rt->
dst, jiffies);
1537 skb_dst_set(skb, (
struct dst_entry *)rt);
1543 return dn_route_input_slow(skb);
1546 static int dn_rt_fill_info(
struct sk_buff *skb,
u32 portid,
u32 seq,
1547 int event,
int nowait,
unsigned int flags)
1554 nlh = nlmsg_put(skb, portid, seq, event,
sizeof(*r), flags);
1558 r = nlmsg_data(nlh);
1576 if (rt->
fld.saddr) {
1578 if (nla_put_le16(skb,
RTA_SRC, rt->
fld.saddr) < 0)
1582 nla_put_u32(skb,
RTA_OIF, rt->
dst.dev->ifindex) < 0)
1605 if (dn_is_input_route(rt) &&
1606 nla_put_u32(skb,
RTA_IIF, rt->
fld.flowidn_iif) < 0)
1609 return nlmsg_end(skb, nlh);
1612 nlmsg_cancel(skb, nlh);
1619 static int dn_cache_getroute(
struct sk_buff *in_skb,
struct nlmsghdr *nlh,
void *
arg)
1621 struct net *
net = sock_net(in_skb->
sk);
1623 struct rtmsg *rtm = nlmsg_data(nlh);
1633 memset(&fld, 0,
sizeof(fld));
1639 skb_reset_mac_header(skb);
1649 if (fld.flowidn_iif) {
1665 err = dn_route_input(skb);
1668 rt = (
struct dn_route *)skb_dst(skb);
1669 if (!err && -rt->
dst.error)
1670 err = rt->
dst.error;
1675 fld.flowidn_oif = oif;
1676 err = dn_route_output_key((
struct dst_entry **)&rt, &fld, 0);
1684 skb_dst_set(skb, &rt->
dst);
1710 struct net *net = sock_net(skb->
sk);
1719 if (nlmsg_len(cb->
nlh) <
sizeof(
struct rtmsg))
1722 rtm = nlmsg_data(cb->
nlh);
1727 s_idx = idx = cb->
args[1];
1728 for(h = 0; h <= dn_rt_hash_mask; h++) {
1739 skb_dst_set(skb, dst_clone(&rt->
dst));
1744 rcu_read_unlock_bh();
1749 rcu_read_unlock_bh();
1758 #ifdef CONFIG_PROC_FS
1759 struct dn_rt_cache_iter_state {
1766 struct dn_rt_cache_iter_state *
s = seq->
private;
1768 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1773 rcu_read_unlock_bh();
1780 struct dn_rt_cache_iter_state *s = seq->
private;
1784 rcu_read_unlock_bh();
1785 if (--s->bucket < 0)
1793 static void *dn_rt_cache_seq_start(
struct seq_file *seq, loff_t *
pos)
1795 struct dn_route *rt = dn_rt_cache_get_first(seq);
1798 while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
1801 return *pos ?
NULL : rt;
1804 static void *dn_rt_cache_seq_next(
struct seq_file *seq,
void *
v, loff_t *pos)
1806 struct dn_route *rt = dn_rt_cache_get_next(seq, v);
1811 static void dn_rt_cache_seq_stop(
struct seq_file *seq,
void *
v)
1814 rcu_read_unlock_bh();
1817 static int dn_rt_cache_seq_show(
struct seq_file *seq,
void *v)
1822 seq_printf(seq,
"%-8s %-7s %-7s %04d %04d %04d\n",
1823 rt->
dst.dev ? rt->
dst.dev->name :
"*",
1832 .
start = dn_rt_cache_seq_start,
1833 .next = dn_rt_cache_seq_next,
1834 .stop = dn_rt_cache_seq_stop,
1835 .show = dn_rt_cache_seq_show,
1841 sizeof(
struct dn_rt_cache_iter_state));
1846 .open = dn_rt_cache_seq_open,
1861 dst_entries_init(&dn_dst_ops);
1862 setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
1868 for(order = 0; (1
UL <<
order) < goal; order++)
1880 dn_rt_hash_mask = (1
UL <<
order) * PAGE_SIZE /
1882 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1886 }
while (dn_rt_hash_table ==
NULL && --order > 0);
1888 if (!dn_rt_hash_table)
1889 panic(
"Failed to allocate DECnet route cache hash table\n");
1892 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1897 for(i = 0; i <= dn_rt_hash_mask; i++) {
1902 dn_dst_ops.
gc_thresh = (dn_rt_hash_mask + 1);
1906 #ifdef CONFIG_DECNET_ROUTER
1921 dst_entries_destroy(&dn_dst_ops);