11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
15 #include <linux/string.h>
22 #define INIT_PATHS_SIZE_ORDER 2
25 #define MEAN_CHAIN_LEN 2
27 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
28 time_after(jiffies, mpath->exp_time) && \
29 !(mpath->flags & MESH_PATH_FIXED))
54 static inline struct mesh_table *resize_dereference_mesh_paths(
void)
57 lockdep_is_held(&pathtbl_resize_lock));
60 static inline struct mesh_table *resize_dereference_mpp_paths(
void)
63 lockdep_is_held(&pathtbl_resize_lock));
72 #define for_each_mesh_entry(tbl, p, node, i) \
73 for (i = 0; i <= tbl->hash_mask; i++) \
74 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
114 static void __mesh_table_free(
struct mesh_table *tbl)
121 static void mesh_table_free(
struct mesh_table *tbl,
bool free_leafs)
141 hlist_del(&gate->
list);
148 __mesh_table_free(tbl);
151 static int mesh_table_grow(
struct mesh_table *oldtbl,
177 for (i = 0; i <= newtbl->
hash_mask; i++) {
188 return jhash_2words(*(
u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
217 spin_unlock_irqrestore(&mpath->
frame_queue.lock, flags);
278 static void mesh_path_move_to_queue(
struct mesh_path *gate_mpath,
286 BUG_ON(gate_mpath == from_mpath);
289 __skb_queue_head_init(&failq);
292 skb_queue_splice_init(&from_mpath->
frame_queue, &failq);
293 spin_unlock_irqrestore(&from_mpath->
frame_queue.lock, flags);
295 skb_queue_walk_safe(&failq, fskb, tmp) {
306 prepare_for_gate(skb, gate_mpath->
dst, gate_mpath);
312 __skb_unlink(fskb, &failq);
316 mpath_dbg(gate_mpath->
sdata,
"Mpath queue for gate %pM has %d frames\n",
323 skb_queue_splice(&failq, &from_mpath->
frame_queue);
324 spin_unlock_irqrestore(&from_mpath->
frame_queue.lock, flags);
336 bucket = &tbl->
hash_buckets[mesh_table_hash(dst, sdata, tbl)];
337 hlist_for_each_entry_rcu(node, n, bucket,
list) {
339 if (mpath->
sdata == sdata &&
340 ether_addr_equal(dst, mpath->
dst)) {
390 if (sdata && node->
mpath->sdata != sdata)
394 spin_lock_bh(&node->
mpath->state_lock);
396 spin_unlock_bh(&node->
mpath->state_lock);
420 if (gate->
mpath == mpath) {
432 mpath->
sdata->u.mesh.num_gates++;
439 "Mesh path: Recorded new gate: %pM. %d known gates\n",
440 mpath->
dst, mpath->
sdata->u.mesh.num_gates);
462 if (gate->mpath == mpath) {
464 hlist_del_rcu(&gate->
list);
467 mpath->sdata->u.mesh.num_gates--;
468 mpath->is_gate =
false;
470 "Mesh path: Deleted gate: %pM. %d known gates\n",
471 mpath->dst, mpath->sdata->u.mesh.num_gates);
484 return sdata->
u.
mesh.num_gates;
509 if (ether_addr_equal(dst, sdata->
vif.addr))
513 if (is_multicast_ether_addr(dst))
533 new_mpath->
flags = 0;
535 new_node->
mpath = new_mpath;
536 new_mpath->
timer.data = (
unsigned long) new_mpath;
542 tbl = resize_dereference_mesh_paths();
544 hash_idx = mesh_table_hash(dst, sdata, tbl);
552 if (mpath->
sdata == sdata &&
553 ether_addr_equal(dst, mpath->
dst))
557 hlist_add_head_rcu(&new_node->
list, bucket);
583 static void mesh_table_free_rcu(
struct rcu_head *rcu)
587 mesh_table_free(tbl,
false);
595 oldtbl = resize_dereference_mesh_paths();
596 newtbl = mesh_table_alloc(oldtbl->
size_order + 1);
599 if (mesh_table_grow(oldtbl, newtbl) < 0) {
600 __mesh_table_free(newtbl);
616 oldtbl = resize_dereference_mpp_paths();
617 newtbl = mesh_table_alloc(oldtbl->
size_order + 1);
620 if (mesh_table_grow(oldtbl, newtbl) < 0) {
621 __mesh_table_free(newtbl);
644 if (ether_addr_equal(dst, sdata->
vif.addr))
648 if (is_multicast_ether_addr(dst))
664 new_mpath->
flags = 0;
666 new_node->
mpath = new_mpath;
671 tbl = resize_dereference_mpp_paths();
673 hash_idx = mesh_table_hash(dst, sdata, tbl);
681 if (mpath->
sdata == sdata &&
682 ether_addr_equal(dst, mpath->
dst))
686 hlist_add_head_rcu(&new_node->
list, bucket);
721 static const u8 bcast[
ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
742 reason, bcast, sdata);
748 static void mesh_path_node_reclaim(
struct rcu_head *
rp)
767 mesh_gate_del(tbl, mpath);
768 hlist_del_rcu(&node->
list);
795 tbl = resize_dereference_mesh_paths();
800 __mesh_path_del(tbl, node);
808 static void table_flush_by_iface(
struct mesh_table *tbl,
816 WARN_ON(!rcu_read_lock_held());
819 if (mpath->
sdata != sdata)
822 __mesh_path_del(tbl, node);
841 tbl = resize_dereference_mesh_paths();
842 table_flush_by_iface(tbl, sdata);
843 tbl = resize_dereference_mpp_paths();
844 table_flush_by_iface(tbl, sdata);
868 tbl = resize_dereference_mesh_paths();
869 hash_idx = mesh_table_hash(addr, sdata, tbl);
875 if (mpath->
sdata == sdata &&
876 ether_addr_equal(addr, mpath->
dst)) {
877 __mesh_path_del(tbl, node);
901 ieee80211_add_pending_skbs(mpath->
sdata->local,
933 hlist_for_each_entry_rcu(gate, n, known_gates,
list) {
934 if (gate->
mpath->sdata != sdata)
939 mesh_path_move_to_queue(gate->
mpath, from_mpath, copy);
940 from_mpath = gate->
mpath;
944 "Not forwarding %p (flags %#x)\n",
949 hlist_for_each_entry_rcu(gate, n, known_gates,
list)
950 if (gate->
mpath->sdata == sdata) {
970 sdata->
u.
mesh.mshstats.dropped_frames_no_route++;
1005 mesh_path_activate(mpath);
1010 static void mesh_path_node_free(
struct hlist_node *p,
bool free_leafs)
1014 mpath = node->
mpath;
1030 if (new_node ==
NULL)
1034 mpath = node->
mpath;
1036 hash_idx = mesh_table_hash(mpath->
dst, mpath->
sdata, newtbl);
1037 hlist_add_head(&new_node->
list,
1050 tbl_path->
free_node = &mesh_path_node_free;
1051 tbl_path->
copy_node = &mesh_path_node_copy;
1066 tbl_mpp->
free_node = &mesh_path_node_free;
1067 tbl_mpp->
copy_node = &mesh_path_node_copy;
1083 mesh_table_free(tbl_mpp,
true);
1085 mesh_table_free(tbl_path,
true);
1100 if (node->
mpath->sdata != sdata)
1102 mpath = node->
mpath;