Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mesh_pathtbl.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author: Luis Carlos Cobo <[email protected]>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20 
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
23 
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
26 
27 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
28  time_after(jiffies, mpath->exp_time) && \
29  !(mpath->flags & MESH_PATH_FIXED))
30 
31 struct mpath_node {
32  struct hlist_node list;
33  struct rcu_head rcu;
34  /* This indirection allows two different tables to point to the same
35  * mesh_path structure, useful when resizing
36  */
37  struct mesh_path *mpath;
38 };
39 
40 static struct mesh_table __rcu *mesh_paths;
41 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42 
44 
45 /* This lock will have the grow table function as writer and add / delete nodes
46  * as readers. RCU provides sufficient protection only when reading the table
47  * (i.e. doing lookups). Adding or adding or removing nodes requires we take
48  * the read lock or we risk operating on an old table. The write lock is only
49  * needed when modifying the number of buckets a table.
50  */
51 static DEFINE_RWLOCK(pathtbl_resize_lock);
52 
53 
54 static inline struct mesh_table *resize_dereference_mesh_paths(void)
55 {
56  return rcu_dereference_protected(mesh_paths,
57  lockdep_is_held(&pathtbl_resize_lock));
58 }
59 
60 static inline struct mesh_table *resize_dereference_mpp_paths(void)
61 {
62  return rcu_dereference_protected(mpp_paths,
63  lockdep_is_held(&pathtbl_resize_lock));
64 }
65 
66 /*
67  * CAREFUL -- "tbl" must not be an expression,
68  * in particular not an rcu_dereference(), since
69  * it's used twice. So it is illegal to do
70  * for_each_mesh_entry(rcu_dereference(...), ...)
71  */
72 #define for_each_mesh_entry(tbl, p, node, i) \
73  for (i = 0; i <= tbl->hash_mask; i++) \
74  hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
75 
76 
77 static struct mesh_table *mesh_table_alloc(int size_order)
78 {
79  int i;
80  struct mesh_table *newtbl;
81 
82  newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
83  if (!newtbl)
84  return NULL;
85 
86  newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
87  (1 << size_order), GFP_ATOMIC);
88 
89  if (!newtbl->hash_buckets) {
90  kfree(newtbl);
91  return NULL;
92  }
93 
94  newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
95  (1 << size_order), GFP_ATOMIC);
96  if (!newtbl->hashwlock) {
97  kfree(newtbl->hash_buckets);
98  kfree(newtbl);
99  return NULL;
100  }
101 
102  newtbl->size_order = size_order;
103  newtbl->hash_mask = (1 << size_order) - 1;
104  atomic_set(&newtbl->entries, 0);
105  get_random_bytes(&newtbl->hash_rnd,
106  sizeof(newtbl->hash_rnd));
107  for (i = 0; i <= newtbl->hash_mask; i++)
108  spin_lock_init(&newtbl->hashwlock[i]);
109  spin_lock_init(&newtbl->gates_lock);
110 
111  return newtbl;
112 }
113 
114 static void __mesh_table_free(struct mesh_table *tbl)
115 {
116  kfree(tbl->hash_buckets);
117  kfree(tbl->hashwlock);
118  kfree(tbl);
119 }
120 
121 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
122 {
123  struct hlist_head *mesh_hash;
124  struct hlist_node *p, *q;
125  struct mpath_node *gate;
126  int i;
127 
128  mesh_hash = tbl->hash_buckets;
129  for (i = 0; i <= tbl->hash_mask; i++) {
130  spin_lock_bh(&tbl->hashwlock[i]);
131  hlist_for_each_safe(p, q, &mesh_hash[i]) {
132  tbl->free_node(p, free_leafs);
133  atomic_dec(&tbl->entries);
134  }
135  spin_unlock_bh(&tbl->hashwlock[i]);
136  }
137  if (free_leafs) {
138  spin_lock_bh(&tbl->gates_lock);
139  hlist_for_each_entry_safe(gate, p, q,
140  tbl->known_gates, list) {
141  hlist_del(&gate->list);
142  kfree(gate);
143  }
144  kfree(tbl->known_gates);
145  spin_unlock_bh(&tbl->gates_lock);
146  }
147 
148  __mesh_table_free(tbl);
149 }
150 
151 static int mesh_table_grow(struct mesh_table *oldtbl,
152  struct mesh_table *newtbl)
153 {
154  struct hlist_head *oldhash;
155  struct hlist_node *p, *q;
156  int i;
157 
158  if (atomic_read(&oldtbl->entries)
159  < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
160  return -EAGAIN;
161 
162  newtbl->free_node = oldtbl->free_node;
163  newtbl->mean_chain_len = oldtbl->mean_chain_len;
164  newtbl->copy_node = oldtbl->copy_node;
165  newtbl->known_gates = oldtbl->known_gates;
166  atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
167 
168  oldhash = oldtbl->hash_buckets;
169  for (i = 0; i <= oldtbl->hash_mask; i++)
170  hlist_for_each(p, &oldhash[i])
171  if (oldtbl->copy_node(p, newtbl) < 0)
172  goto errcopy;
173 
174  return 0;
175 
176 errcopy:
177  for (i = 0; i <= newtbl->hash_mask; i++) {
178  hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
179  oldtbl->free_node(p, 0);
180  }
181  return -ENOMEM;
182 }
183 
184 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
185  struct mesh_table *tbl)
186 {
187  /* Use last four bytes of hw addr and interface index as hash index */
188  return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
189  & tbl->hash_mask;
190 }
191 
192 
203 {
204  struct sk_buff *skb;
205  struct ieee80211_hdr *hdr;
206  unsigned long flags;
207 
208  rcu_assign_pointer(mpath->next_hop, sta);
209 
210  spin_lock_irqsave(&mpath->frame_queue.lock, flags);
211  skb_queue_walk(&mpath->frame_queue, skb) {
212  hdr = (struct ieee80211_hdr *) skb->data;
213  memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
214  memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
215  }
216 
217  spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
218 }
219 
220 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
221  struct mesh_path *gate_mpath)
222 {
223  struct ieee80211_hdr *hdr;
224  struct ieee80211s_hdr *mshdr;
225  int mesh_hdrlen, hdrlen;
226  char *next_hop;
227 
228  hdr = (struct ieee80211_hdr *) skb->data;
229  hdrlen = ieee80211_hdrlen(hdr->frame_control);
230  mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
231 
232  if (!(mshdr->flags & MESH_FLAGS_AE)) {
233  /* size of the fixed part of the mesh header */
234  mesh_hdrlen = 6;
235 
236  /* make room for the two extended addresses */
237  skb_push(skb, 2 * ETH_ALEN);
238  memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
239 
240  hdr = (struct ieee80211_hdr *) skb->data;
241 
242  /* we preserve the previous mesh header and only add
243  * the new addreses */
244  mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
245  mshdr->flags = MESH_FLAGS_AE_A5_A6;
246  memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
247  memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
248  }
249 
250  /* update next hop */
251  hdr = (struct ieee80211_hdr *) skb->data;
252  rcu_read_lock();
253  next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
254  memcpy(hdr->addr1, next_hop, ETH_ALEN);
255  rcu_read_unlock();
256  memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
257  memcpy(hdr->addr3, dst_addr, ETH_ALEN);
258 }
259 
278 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
279  struct mesh_path *from_mpath,
280  bool copy)
281 {
282  struct sk_buff *skb, *fskb, *tmp;
283  struct sk_buff_head failq;
284  unsigned long flags;
285 
286  BUG_ON(gate_mpath == from_mpath);
287  BUG_ON(!gate_mpath->next_hop);
288 
289  __skb_queue_head_init(&failq);
290 
291  spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
292  skb_queue_splice_init(&from_mpath->frame_queue, &failq);
293  spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
294 
295  skb_queue_walk_safe(&failq, fskb, tmp) {
296  if (skb_queue_len(&gate_mpath->frame_queue) >=
298  mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
299  break;
300  }
301 
302  skb = skb_copy(fskb, GFP_ATOMIC);
303  if (WARN_ON(!skb))
304  break;
305 
306  prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
307  skb_queue_tail(&gate_mpath->frame_queue, skb);
308 
309  if (copy)
310  continue;
311 
312  __skb_unlink(fskb, &failq);
313  kfree_skb(fskb);
314  }
315 
316  mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
317  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
318 
319  if (!copy)
320  return;
321 
322  spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
323  skb_queue_splice(&failq, &from_mpath->frame_queue);
324  spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
325 }
326 
327 
328 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
329  struct ieee80211_sub_if_data *sdata)
330 {
331  struct mesh_path *mpath;
332  struct hlist_node *n;
333  struct hlist_head *bucket;
334  struct mpath_node *node;
335 
336  bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
337  hlist_for_each_entry_rcu(node, n, bucket, list) {
338  mpath = node->mpath;
339  if (mpath->sdata == sdata &&
340  ether_addr_equal(dst, mpath->dst)) {
341  if (MPATH_EXPIRED(mpath)) {
342  spin_lock_bh(&mpath->state_lock);
343  mpath->flags &= ~MESH_PATH_ACTIVE;
344  spin_unlock_bh(&mpath->state_lock);
345  }
346  return mpath;
347  }
348  }
349  return NULL;
350 }
351 
362 {
363  return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
364 }
365 
366 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
367 {
368  return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
369 }
370 
371 
382 {
383  struct mesh_table *tbl = rcu_dereference(mesh_paths);
384  struct mpath_node *node;
385  struct hlist_node *p;
386  int i;
387  int j = 0;
388 
389  for_each_mesh_entry(tbl, p, node, i) {
390  if (sdata && node->mpath->sdata != sdata)
391  continue;
392  if (j++ == idx) {
393  if (MPATH_EXPIRED(node->mpath)) {
394  spin_lock_bh(&node->mpath->state_lock);
395  node->mpath->flags &= ~MESH_PATH_ACTIVE;
396  spin_unlock_bh(&node->mpath->state_lock);
397  }
398  return node->mpath;
399  }
400  }
401 
402  return NULL;
403 }
404 
409 int mesh_path_add_gate(struct mesh_path *mpath)
410 {
411  struct mesh_table *tbl;
412  struct mpath_node *gate, *new_gate;
413  struct hlist_node *n;
414  int err;
415 
416  rcu_read_lock();
417  tbl = rcu_dereference(mesh_paths);
418 
419  hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
420  if (gate->mpath == mpath) {
421  err = -EEXIST;
422  goto err_rcu;
423  }
424 
425  new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
426  if (!new_gate) {
427  err = -ENOMEM;
428  goto err_rcu;
429  }
430 
431  mpath->is_gate = true;
432  mpath->sdata->u.mesh.num_gates++;
433  new_gate->mpath = mpath;
434  spin_lock_bh(&tbl->gates_lock);
435  hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
436  spin_unlock_bh(&tbl->gates_lock);
437  rcu_read_unlock();
438  mpath_dbg(mpath->sdata,
439  "Mesh path: Recorded new gate: %pM. %d known gates\n",
440  mpath->dst, mpath->sdata->u.mesh.num_gates);
441  return 0;
442 err_rcu:
443  rcu_read_unlock();
444  return err;
445 }
446 
456 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
457 {
458  struct mpath_node *gate;
459  struct hlist_node *p, *q;
460 
461  hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
462  if (gate->mpath == mpath) {
463  spin_lock_bh(&tbl->gates_lock);
464  hlist_del_rcu(&gate->list);
465  kfree_rcu(gate, rcu);
466  spin_unlock_bh(&tbl->gates_lock);
467  mpath->sdata->u.mesh.num_gates--;
468  mpath->is_gate = false;
469  mpath_dbg(mpath->sdata,
470  "Mesh path: Deleted gate: %pM. %d known gates\n",
471  mpath->dst, mpath->sdata->u.mesh.num_gates);
472  break;
473  }
474 
475  return 0;
476 }
477 
483 {
484  return sdata->u.mesh.num_gates;
485 }
486 
496 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
497 {
498  struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
499  struct ieee80211_local *local = sdata->local;
500  struct mesh_table *tbl;
501  struct mesh_path *mpath, *new_mpath;
502  struct mpath_node *node, *new_node;
503  struct hlist_head *bucket;
504  struct hlist_node *n;
505  int grow = 0;
506  int err = 0;
507  u32 hash_idx;
508 
509  if (ether_addr_equal(dst, sdata->vif.addr))
510  /* never add ourselves as neighbours */
511  return -ENOTSUPP;
512 
513  if (is_multicast_ether_addr(dst))
514  return -ENOTSUPP;
515 
516  if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
517  return -ENOSPC;
518 
519  err = -ENOMEM;
520  new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
521  if (!new_mpath)
522  goto err_path_alloc;
523 
524  new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
525  if (!new_node)
526  goto err_node_alloc;
527 
528  read_lock_bh(&pathtbl_resize_lock);
529  memcpy(new_mpath->dst, dst, ETH_ALEN);
530  eth_broadcast_addr(new_mpath->rann_snd_addr);
531  new_mpath->is_root = false;
532  new_mpath->sdata = sdata;
533  new_mpath->flags = 0;
534  skb_queue_head_init(&new_mpath->frame_queue);
535  new_node->mpath = new_mpath;
536  new_mpath->timer.data = (unsigned long) new_mpath;
537  new_mpath->timer.function = mesh_path_timer;
538  new_mpath->exp_time = jiffies;
539  spin_lock_init(&new_mpath->state_lock);
540  init_timer(&new_mpath->timer);
541 
542  tbl = resize_dereference_mesh_paths();
543 
544  hash_idx = mesh_table_hash(dst, sdata, tbl);
545  bucket = &tbl->hash_buckets[hash_idx];
546 
547  spin_lock(&tbl->hashwlock[hash_idx]);
548 
549  err = -EEXIST;
550  hlist_for_each_entry(node, n, bucket, list) {
551  mpath = node->mpath;
552  if (mpath->sdata == sdata &&
553  ether_addr_equal(dst, mpath->dst))
554  goto err_exists;
555  }
556 
557  hlist_add_head_rcu(&new_node->list, bucket);
558  if (atomic_inc_return(&tbl->entries) >=
559  tbl->mean_chain_len * (tbl->hash_mask + 1))
560  grow = 1;
561 
563 
564  spin_unlock(&tbl->hashwlock[hash_idx]);
565  read_unlock_bh(&pathtbl_resize_lock);
566  if (grow) {
568  ieee80211_queue_work(&local->hw, &sdata->work);
569  }
570  return 0;
571 
572 err_exists:
573  spin_unlock(&tbl->hashwlock[hash_idx]);
574  read_unlock_bh(&pathtbl_resize_lock);
575  kfree(new_node);
576 err_node_alloc:
577  kfree(new_mpath);
578 err_path_alloc:
579  atomic_dec(&sdata->u.mesh.mpaths);
580  return err;
581 }
582 
583 static void mesh_table_free_rcu(struct rcu_head *rcu)
584 {
585  struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
586 
587  mesh_table_free(tbl, false);
588 }
589 
591 {
592  struct mesh_table *oldtbl, *newtbl;
593 
594  write_lock_bh(&pathtbl_resize_lock);
595  oldtbl = resize_dereference_mesh_paths();
596  newtbl = mesh_table_alloc(oldtbl->size_order + 1);
597  if (!newtbl)
598  goto out;
599  if (mesh_table_grow(oldtbl, newtbl) < 0) {
600  __mesh_table_free(newtbl);
601  goto out;
602  }
603  rcu_assign_pointer(mesh_paths, newtbl);
604 
605  call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
606 
607  out:
608  write_unlock_bh(&pathtbl_resize_lock);
609 }
610 
612 {
613  struct mesh_table *oldtbl, *newtbl;
614 
615  write_lock_bh(&pathtbl_resize_lock);
616  oldtbl = resize_dereference_mpp_paths();
617  newtbl = mesh_table_alloc(oldtbl->size_order + 1);
618  if (!newtbl)
619  goto out;
620  if (mesh_table_grow(oldtbl, newtbl) < 0) {
621  __mesh_table_free(newtbl);
622  goto out;
623  }
624  rcu_assign_pointer(mpp_paths, newtbl);
625  call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
626 
627  out:
628  write_unlock_bh(&pathtbl_resize_lock);
629 }
630 
631 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
632 {
633  struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
634  struct ieee80211_local *local = sdata->local;
635  struct mesh_table *tbl;
636  struct mesh_path *mpath, *new_mpath;
637  struct mpath_node *node, *new_node;
638  struct hlist_head *bucket;
639  struct hlist_node *n;
640  int grow = 0;
641  int err = 0;
642  u32 hash_idx;
643 
644  if (ether_addr_equal(dst, sdata->vif.addr))
645  /* never add ourselves as neighbours */
646  return -ENOTSUPP;
647 
648  if (is_multicast_ether_addr(dst))
649  return -ENOTSUPP;
650 
651  err = -ENOMEM;
652  new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
653  if (!new_mpath)
654  goto err_path_alloc;
655 
656  new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
657  if (!new_node)
658  goto err_node_alloc;
659 
660  read_lock_bh(&pathtbl_resize_lock);
661  memcpy(new_mpath->dst, dst, ETH_ALEN);
662  memcpy(new_mpath->mpp, mpp, ETH_ALEN);
663  new_mpath->sdata = sdata;
664  new_mpath->flags = 0;
665  skb_queue_head_init(&new_mpath->frame_queue);
666  new_node->mpath = new_mpath;
667  init_timer(&new_mpath->timer);
668  new_mpath->exp_time = jiffies;
669  spin_lock_init(&new_mpath->state_lock);
670 
671  tbl = resize_dereference_mpp_paths();
672 
673  hash_idx = mesh_table_hash(dst, sdata, tbl);
674  bucket = &tbl->hash_buckets[hash_idx];
675 
676  spin_lock(&tbl->hashwlock[hash_idx]);
677 
678  err = -EEXIST;
679  hlist_for_each_entry(node, n, bucket, list) {
680  mpath = node->mpath;
681  if (mpath->sdata == sdata &&
682  ether_addr_equal(dst, mpath->dst))
683  goto err_exists;
684  }
685 
686  hlist_add_head_rcu(&new_node->list, bucket);
687  if (atomic_inc_return(&tbl->entries) >=
688  tbl->mean_chain_len * (tbl->hash_mask + 1))
689  grow = 1;
690 
691  spin_unlock(&tbl->hashwlock[hash_idx]);
692  read_unlock_bh(&pathtbl_resize_lock);
693  if (grow) {
695  ieee80211_queue_work(&local->hw, &sdata->work);
696  }
697  return 0;
698 
699 err_exists:
700  spin_unlock(&tbl->hashwlock[hash_idx]);
701  read_unlock_bh(&pathtbl_resize_lock);
702  kfree(new_node);
703 err_node_alloc:
704  kfree(new_mpath);
705 err_path_alloc:
706  return err;
707 }
708 
709 
718 void mesh_plink_broken(struct sta_info *sta)
719 {
720  struct mesh_table *tbl;
721  static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
722  struct mesh_path *mpath;
723  struct mpath_node *node;
724  struct hlist_node *p;
725  struct ieee80211_sub_if_data *sdata = sta->sdata;
726  int i;
728 
729  rcu_read_lock();
730  tbl = rcu_dereference(mesh_paths);
731  for_each_mesh_entry(tbl, p, node, i) {
732  mpath = node->mpath;
733  if (rcu_dereference(mpath->next_hop) == sta &&
734  mpath->flags & MESH_PATH_ACTIVE &&
735  !(mpath->flags & MESH_PATH_FIXED)) {
736  spin_lock_bh(&mpath->state_lock);
737  mpath->flags &= ~MESH_PATH_ACTIVE;
738  ++mpath->sn;
739  spin_unlock_bh(&mpath->state_lock);
740  mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
741  mpath->dst, cpu_to_le32(mpath->sn),
742  reason, bcast, sdata);
743  }
744  }
745  rcu_read_unlock();
746 }
747 
748 static void mesh_path_node_reclaim(struct rcu_head *rp)
749 {
750  struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
751  struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
752 
753  del_timer_sync(&node->mpath->timer);
754  atomic_dec(&sdata->u.mesh.mpaths);
755  kfree(node->mpath);
756  kfree(node);
757 }
758 
759 /* needs to be called with the corresponding hashwlock taken */
760 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
761 {
762  struct mesh_path *mpath;
763  mpath = node->mpath;
764  spin_lock(&mpath->state_lock);
765  mpath->flags |= MESH_PATH_RESOLVING;
766  if (mpath->is_gate)
767  mesh_gate_del(tbl, mpath);
768  hlist_del_rcu(&node->list);
769  call_rcu(&node->rcu, mesh_path_node_reclaim);
770  spin_unlock(&mpath->state_lock);
771  atomic_dec(&tbl->entries);
772 }
773 
786 {
787  struct mesh_table *tbl;
788  struct mesh_path *mpath;
789  struct mpath_node *node;
790  struct hlist_node *p;
791  int i;
792 
793  rcu_read_lock();
794  read_lock_bh(&pathtbl_resize_lock);
795  tbl = resize_dereference_mesh_paths();
796  for_each_mesh_entry(tbl, p, node, i) {
797  mpath = node->mpath;
798  if (rcu_dereference(mpath->next_hop) == sta) {
799  spin_lock(&tbl->hashwlock[i]);
800  __mesh_path_del(tbl, node);
801  spin_unlock(&tbl->hashwlock[i]);
802  }
803  }
804  read_unlock_bh(&pathtbl_resize_lock);
805  rcu_read_unlock();
806 }
807 
808 static void table_flush_by_iface(struct mesh_table *tbl,
809  struct ieee80211_sub_if_data *sdata)
810 {
811  struct mesh_path *mpath;
812  struct mpath_node *node;
813  struct hlist_node *p;
814  int i;
815 
816  WARN_ON(!rcu_read_lock_held());
817  for_each_mesh_entry(tbl, p, node, i) {
818  mpath = node->mpath;
819  if (mpath->sdata != sdata)
820  continue;
821  spin_lock_bh(&tbl->hashwlock[i]);
822  __mesh_path_del(tbl, node);
823  spin_unlock_bh(&tbl->hashwlock[i]);
824  }
825 }
826 
836 {
837  struct mesh_table *tbl;
838 
839  rcu_read_lock();
840  read_lock_bh(&pathtbl_resize_lock);
841  tbl = resize_dereference_mesh_paths();
842  table_flush_by_iface(tbl, sdata);
843  tbl = resize_dereference_mpp_paths();
844  table_flush_by_iface(tbl, sdata);
845  read_unlock_bh(&pathtbl_resize_lock);
846  rcu_read_unlock();
847 }
848 
857 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
858 {
859  struct mesh_table *tbl;
860  struct mesh_path *mpath;
861  struct mpath_node *node;
862  struct hlist_head *bucket;
863  struct hlist_node *n;
864  int hash_idx;
865  int err = 0;
866 
867  read_lock_bh(&pathtbl_resize_lock);
868  tbl = resize_dereference_mesh_paths();
869  hash_idx = mesh_table_hash(addr, sdata, tbl);
870  bucket = &tbl->hash_buckets[hash_idx];
871 
872  spin_lock(&tbl->hashwlock[hash_idx]);
873  hlist_for_each_entry(node, n, bucket, list) {
874  mpath = node->mpath;
875  if (mpath->sdata == sdata &&
876  ether_addr_equal(addr, mpath->dst)) {
877  __mesh_path_del(tbl, node);
878  goto enddel;
879  }
880  }
881 
882  err = -ENXIO;
883 enddel:
885  spin_unlock(&tbl->hashwlock[hash_idx]);
886  read_unlock_bh(&pathtbl_resize_lock);
887  return err;
888 }
889 
898 void mesh_path_tx_pending(struct mesh_path *mpath)
899 {
900  if (mpath->flags & MESH_PATH_ACTIVE)
901  ieee80211_add_pending_skbs(mpath->sdata->local,
902  &mpath->frame_queue);
903 }
904 
916 {
917  struct ieee80211_sub_if_data *sdata = mpath->sdata;
918  struct hlist_node *n;
919  struct mesh_table *tbl;
920  struct mesh_path *from_mpath = mpath;
921  struct mpath_node *gate = NULL;
922  bool copy = false;
923  struct hlist_head *known_gates;
924 
925  rcu_read_lock();
926  tbl = rcu_dereference(mesh_paths);
927  known_gates = tbl->known_gates;
928  rcu_read_unlock();
929 
930  if (!known_gates)
931  return -EHOSTUNREACH;
932 
933  hlist_for_each_entry_rcu(gate, n, known_gates, list) {
934  if (gate->mpath->sdata != sdata)
935  continue;
936 
937  if (gate->mpath->flags & MESH_PATH_ACTIVE) {
938  mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
939  mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
940  from_mpath = gate->mpath;
941  copy = true;
942  } else {
943  mpath_dbg(sdata,
944  "Not forwarding %p (flags %#x)\n",
945  gate->mpath, gate->mpath->flags);
946  }
947  }
948 
949  hlist_for_each_entry_rcu(gate, n, known_gates, list)
950  if (gate->mpath->sdata == sdata) {
951  mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
953  }
954 
955  return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
956 }
957 
967  struct ieee80211_sub_if_data *sdata)
968 {
969  kfree_skb(skb);
970  sdata->u.mesh.mshstats.dropped_frames_no_route++;
971 }
972 
981 {
982  struct sk_buff *skb;
983 
984  while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
985  mesh_path_discard_frame(skb, mpath->sdata);
986 }
987 
996 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
997 {
998  spin_lock_bh(&mpath->state_lock);
999  mesh_path_assign_nexthop(mpath, next_hop);
1000  mpath->sn = 0xffff;
1001  mpath->metric = 0;
1002  mpath->hop_count = 0;
1003  mpath->exp_time = 0;
1004  mpath->flags |= MESH_PATH_FIXED;
1005  mesh_path_activate(mpath);
1006  spin_unlock_bh(&mpath->state_lock);
1007  mesh_path_tx_pending(mpath);
1008 }
1009 
1010 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1011 {
1012  struct mesh_path *mpath;
1013  struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1014  mpath = node->mpath;
1015  hlist_del_rcu(p);
1016  if (free_leafs) {
1017  del_timer_sync(&mpath->timer);
1018  kfree(mpath);
1019  }
1020  kfree(node);
1021 }
1022 
1023 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1024 {
1025  struct mesh_path *mpath;
1026  struct mpath_node *node, *new_node;
1027  u32 hash_idx;
1028 
1029  new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1030  if (new_node == NULL)
1031  return -ENOMEM;
1032 
1033  node = hlist_entry(p, struct mpath_node, list);
1034  mpath = node->mpath;
1035  new_node->mpath = mpath;
1036  hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1037  hlist_add_head(&new_node->list,
1038  &newtbl->hash_buckets[hash_idx]);
1039  return 0;
1040 }
1041 
1043 {
1044  struct mesh_table *tbl_path, *tbl_mpp;
1045  int ret;
1046 
1047  tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1048  if (!tbl_path)
1049  return -ENOMEM;
1050  tbl_path->free_node = &mesh_path_node_free;
1051  tbl_path->copy_node = &mesh_path_node_copy;
1052  tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1053  tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1054  if (!tbl_path->known_gates) {
1055  ret = -ENOMEM;
1056  goto free_path;
1057  }
1058  INIT_HLIST_HEAD(tbl_path->known_gates);
1059 
1060 
1061  tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1062  if (!tbl_mpp) {
1063  ret = -ENOMEM;
1064  goto free_path;
1065  }
1066  tbl_mpp->free_node = &mesh_path_node_free;
1067  tbl_mpp->copy_node = &mesh_path_node_copy;
1068  tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1069  tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1070  if (!tbl_mpp->known_gates) {
1071  ret = -ENOMEM;
1072  goto free_mpp;
1073  }
1074  INIT_HLIST_HEAD(tbl_mpp->known_gates);
1075 
1076  /* Need no locking since this is during init */
1077  RCU_INIT_POINTER(mesh_paths, tbl_path);
1078  RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1079 
1080  return 0;
1081 
1082 free_mpp:
1083  mesh_table_free(tbl_mpp, true);
1084 free_path:
1085  mesh_table_free(tbl_path, true);
1086  return ret;
1087 }
1088 
1090 {
1091  struct mesh_table *tbl;
1092  struct mesh_path *mpath;
1093  struct mpath_node *node;
1094  struct hlist_node *p;
1095  int i;
1096 
1097  rcu_read_lock();
1098  tbl = rcu_dereference(mesh_paths);
1099  for_each_mesh_entry(tbl, p, node, i) {
1100  if (node->mpath->sdata != sdata)
1101  continue;
1102  mpath = node->mpath;
1103  if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1104  (!(mpath->flags & MESH_PATH_FIXED)) &&
1106  mesh_path_del(mpath->dst, mpath->sdata);
1107  }
1108  rcu_read_unlock();
1109 }
1110 
1112 {
1113  /* no need for locking during exit path */
1114  mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1115  mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1116 }