Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
translation-table.c
Go to the documentation of this file.
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19 
20 #include "main.h"
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "originator.h"
27 #include "routing.h"
28 #include "bridge_loop_avoidance.h"
29 
30 #include <linux/crc16.h>
31 
32 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
33  struct batadv_orig_node *orig_node);
34 static void batadv_tt_purge(struct work_struct *work);
35 static void
36 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
37 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
38  struct batadv_orig_node *orig_node,
39  const unsigned char *addr,
40  const char *message, bool roaming);
41 
42 /* returns 1 if they are the same mac addr */
43 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
44 {
45  const void *data1 = container_of(node, struct batadv_tt_common_entry,
46  hash_entry);
47 
48  return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
49 }
50 
51 static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
52 {
53  INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
55  msecs_to_jiffies(5000));
56 }
57 
58 static struct batadv_tt_common_entry *
59 batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
60 {
61  struct hlist_head *head;
62  struct hlist_node *node;
63  struct batadv_tt_common_entry *tt_common_entry;
64  struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
66 
67  if (!hash)
68  return NULL;
69 
70  index = batadv_choose_orig(data, hash->size);
71  head = &hash->table[index];
72 
73  rcu_read_lock();
74  hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
75  if (!batadv_compare_eth(tt_common_entry, data))
76  continue;
77 
78  if (!atomic_inc_not_zero(&tt_common_entry->refcount))
79  continue;
80 
81  tt_common_entry_tmp = tt_common_entry;
82  break;
83  }
84  rcu_read_unlock();
85 
86  return tt_common_entry_tmp;
87 }
88 
89 static struct batadv_tt_local_entry *
90 batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
91 {
92  struct batadv_tt_common_entry *tt_common_entry;
93  struct batadv_tt_local_entry *tt_local_entry = NULL;
94 
95  tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
96  if (tt_common_entry)
97  tt_local_entry = container_of(tt_common_entry,
98  struct batadv_tt_local_entry,
99  common);
100  return tt_local_entry;
101 }
102 
103 static struct batadv_tt_global_entry *
104 batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
105 {
106  struct batadv_tt_common_entry *tt_common_entry;
107  struct batadv_tt_global_entry *tt_global_entry = NULL;
108 
109  tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
110  if (tt_common_entry)
111  tt_global_entry = container_of(tt_common_entry,
112  struct batadv_tt_global_entry,
113  common);
114  return tt_global_entry;
115 
116 }
117 
118 static void
119 batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
120 {
121  if (atomic_dec_and_test(&tt_local_entry->common.refcount))
122  kfree_rcu(tt_local_entry, common.rcu);
123 }
124 
125 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
126 {
127  struct batadv_tt_common_entry *tt_common_entry;
128  struct batadv_tt_global_entry *tt_global_entry;
129 
130  tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
131  tt_global_entry = container_of(tt_common_entry,
133 
134  kfree(tt_global_entry);
135 }
136 
137 static void
138 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
139 {
140  if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
141  batadv_tt_global_del_orig_list(tt_global_entry);
142  call_rcu(&tt_global_entry->common.rcu,
143  batadv_tt_global_entry_free_rcu);
144  }
145 }
146 
147 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
148 {
149  struct batadv_tt_orig_list_entry *orig_entry;
150 
151  orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
153  kfree(orig_entry);
154 }
155 
156 static void
157 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
158 {
159  if (!atomic_dec_and_test(&orig_entry->refcount))
160  return;
161  /* to avoid race conditions, immediately decrease the tt counter */
162  atomic_dec(&orig_entry->orig_node->tt_size);
163  call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
164 }
165 
166 static void batadv_tt_local_event(struct batadv_priv *bat_priv,
167  const uint8_t *addr, uint8_t flags)
168 {
169  struct batadv_tt_change_node *tt_change_node, *entry, *safe;
170  bool event_removed = false;
171  bool del_op_requested, del_op_entry;
172 
173  tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
174 
175  if (!tt_change_node)
176  return;
177 
178  tt_change_node->change.flags = flags;
179  memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
180 
181  del_op_requested = flags & BATADV_TT_CLIENT_DEL;
182 
183  /* check for ADD+DEL or DEL+ADD events */
184  spin_lock_bh(&bat_priv->tt.changes_list_lock);
185  list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
186  list) {
187  if (!batadv_compare_eth(entry->change.addr, addr))
188  continue;
189 
190  /* DEL+ADD in the same orig interval have no effect and can be
191  * removed to avoid silly behaviour on the receiver side. The
192  * other way around (ADD+DEL) can happen in case of roaming of
193  * a client still in the NEW state. Roaming of NEW clients is
194  * now possible due to automatically recognition of "temporary"
195  * clients
196  */
197  del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
198  if (!del_op_requested && del_op_entry)
199  goto del;
200  if (del_op_requested && !del_op_entry)
201  goto del;
202  continue;
203 del:
204  list_del(&entry->list);
205  kfree(entry);
206  kfree(tt_change_node);
207  event_removed = true;
208  goto unlock;
209  }
210 
211  /* track the change in the OGMinterval list */
212  list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
213 
214 unlock:
215  spin_unlock_bh(&bat_priv->tt.changes_list_lock);
216 
217  if (event_removed)
218  atomic_dec(&bat_priv->tt.local_changes);
219  else
220  atomic_inc(&bat_priv->tt.local_changes);
221 }
222 
223 int batadv_tt_len(int changes_num)
224 {
225  return changes_num * sizeof(struct batadv_tt_change);
226 }
227 
228 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
229 {
230  if (bat_priv->tt.local_hash)
231  return 0;
232 
233  bat_priv->tt.local_hash = batadv_hash_new(1024);
234 
235  if (!bat_priv->tt.local_hash)
236  return -ENOMEM;
237 
238  return 0;
239 }
240 
241 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
242  int ifindex)
243 {
244  struct batadv_priv *bat_priv = netdev_priv(soft_iface);
245  struct batadv_tt_local_entry *tt_local_entry = NULL;
246  struct batadv_tt_global_entry *tt_global_entry = NULL;
247  struct hlist_head *head;
248  struct hlist_node *node;
249  struct batadv_tt_orig_list_entry *orig_entry;
250  int hash_added;
251 
252  tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
253 
254  if (tt_local_entry) {
255  tt_local_entry->last_seen = jiffies;
256  /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
257  tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
258  goto out;
259  }
260 
261  tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
262  if (!tt_local_entry)
263  goto out;
264 
265  batadv_dbg(BATADV_DBG_TT, bat_priv,
266  "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
267  (uint8_t)atomic_read(&bat_priv->tt.vn));
268 
269  memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
270  tt_local_entry->common.flags = BATADV_NO_FLAGS;
271  if (batadv_is_wifi_iface(ifindex))
272  tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
273  atomic_set(&tt_local_entry->common.refcount, 2);
274  tt_local_entry->last_seen = jiffies;
275  tt_local_entry->common.added_at = tt_local_entry->last_seen;
276 
277  /* the batman interface mac address should never be purged */
278  if (batadv_compare_eth(addr, soft_iface->dev_addr))
279  tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
280 
281  /* The local entry has to be marked as NEW to avoid to send it in
282  * a full table response going out before the next ttvn increment
283  * (consistency check)
284  */
285  tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
286 
287  hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
288  batadv_choose_orig,
289  &tt_local_entry->common,
290  &tt_local_entry->common.hash_entry);
291 
292  if (unlikely(hash_added != 0)) {
293  /* remove the reference for the hash */
294  batadv_tt_local_entry_free_ref(tt_local_entry);
295  goto out;
296  }
297 
298  batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
299 
300  /* remove address from global hash if present */
301  tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
302 
303  /* Check whether it is a roaming! */
304  if (tt_global_entry) {
305  /* These node are probably going to update their tt table */
306  head = &tt_global_entry->orig_list;
307  rcu_read_lock();
308  hlist_for_each_entry_rcu(orig_entry, node, head, list) {
309  orig_entry->orig_node->tt_poss_change = true;
310 
311  batadv_send_roam_adv(bat_priv,
312  tt_global_entry->common.addr,
313  orig_entry->orig_node);
314  }
315  rcu_read_unlock();
316  /* The global entry has to be marked as ROAMING and
317  * has to be kept for consistency purpose
318  */
319  tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
320  tt_global_entry->roam_at = jiffies;
321  }
322 out:
323  if (tt_local_entry)
324  batadv_tt_local_entry_free_ref(tt_local_entry);
325  if (tt_global_entry)
326  batadv_tt_global_entry_free_ref(tt_global_entry);
327 }
328 
329 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
330  int *packet_buff_len,
331  int min_packet_len,
332  int new_packet_len)
333 {
334  unsigned char *new_buff;
335 
336  new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
337 
338  /* keep old buffer if kmalloc should fail */
339  if (new_buff) {
340  memcpy(new_buff, *packet_buff, min_packet_len);
341  kfree(*packet_buff);
342  *packet_buff = new_buff;
343  *packet_buff_len = new_packet_len;
344  }
345 }
346 
347 static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
348  unsigned char **packet_buff,
349  int *packet_buff_len,
350  int min_packet_len)
351 {
352  struct batadv_hard_iface *primary_if;
353  int req_len;
354 
355  primary_if = batadv_primary_if_get_selected(bat_priv);
356 
357  req_len = min_packet_len;
358  req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
359 
360  /* if we have too many changes for one packet don't send any
361  * and wait for the tt table request which will be fragmented
362  */
363  if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
364  req_len = min_packet_len;
365 
366  batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
367  min_packet_len, req_len);
368 
369  if (primary_if)
370  batadv_hardif_free_ref(primary_if);
371 }
372 
373 static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
374  unsigned char **packet_buff,
375  int *packet_buff_len,
376  int min_packet_len)
377 {
378  struct batadv_tt_change_node *entry, *safe;
379  int count = 0, tot_changes = 0, new_len;
380  unsigned char *tt_buff;
381 
382  batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
383  packet_buff_len, min_packet_len);
384 
385  new_len = *packet_buff_len - min_packet_len;
386  tt_buff = *packet_buff + min_packet_len;
387 
388  if (new_len > 0)
389  tot_changes = new_len / batadv_tt_len(1);
390 
391  spin_lock_bh(&bat_priv->tt.changes_list_lock);
392  atomic_set(&bat_priv->tt.local_changes, 0);
393 
394  list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
395  list) {
396  if (count < tot_changes) {
397  memcpy(tt_buff + batadv_tt_len(count),
398  &entry->change, sizeof(struct batadv_tt_change));
399  count++;
400  }
401  list_del(&entry->list);
402  kfree(entry);
403  }
404  spin_unlock_bh(&bat_priv->tt.changes_list_lock);
405 
406  /* Keep the buffer for possible tt_request */
407  spin_lock_bh(&bat_priv->tt.last_changeset_lock);
408  kfree(bat_priv->tt.last_changeset);
409  bat_priv->tt.last_changeset_len = 0;
410  bat_priv->tt.last_changeset = NULL;
411  /* check whether this new OGM has no changes due to size problems */
412  if (new_len > 0) {
413  /* if kmalloc() fails we will reply with the full table
414  * instead of providing the diff
415  */
416  bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
417  if (bat_priv->tt.last_changeset) {
418  memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
419  bat_priv->tt.last_changeset_len = new_len;
420  }
421  }
422  spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
423 
424  return count;
425 }
426 
428 {
429  struct net_device *net_dev = (struct net_device *)seq->private;
430  struct batadv_priv *bat_priv = netdev_priv(net_dev);
431  struct batadv_hashtable *hash = bat_priv->tt.local_hash;
432  struct batadv_tt_common_entry *tt_common_entry;
433  struct batadv_hard_iface *primary_if;
434  struct hlist_node *node;
435  struct hlist_head *head;
436  uint32_t i;
437  int ret = 0;
438 
439  primary_if = batadv_primary_if_get_selected(bat_priv);
440  if (!primary_if) {
441  ret = seq_printf(seq,
442  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
443  net_dev->name);
444  goto out;
445  }
446 
447  if (primary_if->if_status != BATADV_IF_ACTIVE) {
448  ret = seq_printf(seq,
449  "BATMAN mesh %s disabled - primary interface not active\n",
450  net_dev->name);
451  goto out;
452  }
453 
454  seq_printf(seq,
455  "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
456  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
457 
458  for (i = 0; i < hash->size; i++) {
459  head = &hash->table[i];
460 
461  rcu_read_lock();
462  hlist_for_each_entry_rcu(tt_common_entry, node,
463  head, hash_entry) {
464  seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
465  tt_common_entry->addr,
466  (tt_common_entry->flags &
467  BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
468  (tt_common_entry->flags &
469  BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
470  (tt_common_entry->flags &
471  BATADV_TT_CLIENT_NEW ? 'N' : '.'),
472  (tt_common_entry->flags &
473  BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
474  (tt_common_entry->flags &
475  BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
476  }
477  rcu_read_unlock();
478  }
479 out:
480  if (primary_if)
481  batadv_hardif_free_ref(primary_if);
482  return ret;
483 }
484 
485 static void
486 batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
487  struct batadv_tt_local_entry *tt_local_entry,
488  uint16_t flags, const char *message)
489 {
490  batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
491  tt_local_entry->common.flags | flags);
492 
493  /* The local client has to be marked as "pending to be removed" but has
494  * to be kept in the table in order to send it in a full table
495  * response issued before the net ttvn increment (consistency check)
496  */
497  tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
498 
499  batadv_dbg(BATADV_DBG_TT, bat_priv,
500  "Local tt entry (%pM) pending to be removed: %s\n",
501  tt_local_entry->common.addr, message);
502 }
503 
504 void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
505  const char *message, bool roaming)
506 {
507  struct batadv_tt_local_entry *tt_local_entry = NULL;
508  uint16_t flags;
509 
510  tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
511  if (!tt_local_entry)
512  goto out;
513 
514  flags = BATADV_TT_CLIENT_DEL;
515  if (roaming)
516  flags |= BATADV_TT_CLIENT_ROAM;
517 
518  batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
519 out:
520  if (tt_local_entry)
521  batadv_tt_local_entry_free_ref(tt_local_entry);
522 }
523 
524 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
525  struct hlist_head *head)
526 {
527  struct batadv_tt_local_entry *tt_local_entry;
528  struct batadv_tt_common_entry *tt_common_entry;
529  struct hlist_node *node, *node_tmp;
530 
531  hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
532  hash_entry) {
533  tt_local_entry = container_of(tt_common_entry,
534  struct batadv_tt_local_entry,
535  common);
536  if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
537  continue;
538 
539  /* entry already marked for deletion */
540  if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
541  continue;
542 
543  if (!batadv_has_timed_out(tt_local_entry->last_seen,
545  continue;
546 
547  batadv_tt_local_set_pending(bat_priv, tt_local_entry,
548  BATADV_TT_CLIENT_DEL, "timed out");
549  }
550 }
551 
552 static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
553 {
554  struct batadv_hashtable *hash = bat_priv->tt.local_hash;
555  struct hlist_head *head;
556  spinlock_t *list_lock; /* protects write access to the hash lists */
557  uint32_t i;
558 
559  for (i = 0; i < hash->size; i++) {
560  head = &hash->table[i];
561  list_lock = &hash->list_locks[i];
562 
563  spin_lock_bh(list_lock);
564  batadv_tt_local_purge_list(bat_priv, head);
565  spin_unlock_bh(list_lock);
566  }
567 
568 }
569 
570 static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
571 {
572  struct batadv_hashtable *hash;
573  spinlock_t *list_lock; /* protects write access to the hash lists */
574  struct batadv_tt_common_entry *tt_common_entry;
575  struct batadv_tt_local_entry *tt_local;
576  struct hlist_node *node, *node_tmp;
577  struct hlist_head *head;
578  uint32_t i;
579 
580  if (!bat_priv->tt.local_hash)
581  return;
582 
583  hash = bat_priv->tt.local_hash;
584 
585  for (i = 0; i < hash->size; i++) {
586  head = &hash->table[i];
587  list_lock = &hash->list_locks[i];
588 
589  spin_lock_bh(list_lock);
590  hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
591  head, hash_entry) {
592  hlist_del_rcu(node);
593  tt_local = container_of(tt_common_entry,
594  struct batadv_tt_local_entry,
595  common);
596  batadv_tt_local_entry_free_ref(tt_local);
597  }
598  spin_unlock_bh(list_lock);
599  }
600 
601  batadv_hash_destroy(hash);
602 
603  bat_priv->tt.local_hash = NULL;
604 }
605 
606 static int batadv_tt_global_init(struct batadv_priv *bat_priv)
607 {
608  if (bat_priv->tt.global_hash)
609  return 0;
610 
611  bat_priv->tt.global_hash = batadv_hash_new(1024);
612 
613  if (!bat_priv->tt.global_hash)
614  return -ENOMEM;
615 
616  return 0;
617 }
618 
619 static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
620 {
621  struct batadv_tt_change_node *entry, *safe;
622 
623  spin_lock_bh(&bat_priv->tt.changes_list_lock);
624 
625  list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
626  list) {
627  list_del(&entry->list);
628  kfree(entry);
629  }
630 
631  atomic_set(&bat_priv->tt.local_changes, 0);
632  spin_unlock_bh(&bat_priv->tt.changes_list_lock);
633 }
634 
635 /* retrieves the orig_tt_list_entry belonging to orig_node from the
636  * batadv_tt_global_entry list
637  *
638  * returns it with an increased refcounter, NULL if not found
639  */
640 static struct batadv_tt_orig_list_entry *
641 batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
642  const struct batadv_orig_node *orig_node)
643 {
644  struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
645  const struct hlist_head *head;
646  struct hlist_node *node;
647 
648  rcu_read_lock();
649  head = &entry->orig_list;
650  hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
651  if (tmp_orig_entry->orig_node != orig_node)
652  continue;
653  if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
654  continue;
655 
656  orig_entry = tmp_orig_entry;
657  break;
658  }
659  rcu_read_unlock();
660 
661  return orig_entry;
662 }
663 
664 /* find out if an orig_node is already in the list of a tt_global_entry.
665  * returns true if found, false otherwise
666  */
667 static bool
668 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
669  const struct batadv_orig_node *orig_node)
670 {
671  struct batadv_tt_orig_list_entry *orig_entry;
672  bool found = false;
673 
674  orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
675  if (orig_entry) {
676  found = true;
677  batadv_tt_orig_list_entry_free_ref(orig_entry);
678  }
679 
680  return found;
681 }
682 
683 static void
684 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
685  struct batadv_orig_node *orig_node, int ttvn)
686 {
687  struct batadv_tt_orig_list_entry *orig_entry;
688 
689  orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
690  if (orig_entry) {
691  /* refresh the ttvn: the current value could be a bogus one that
692  * was added during a "temporary client detection"
693  */
694  orig_entry->ttvn = ttvn;
695  goto out;
696  }
697 
698  orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
699  if (!orig_entry)
700  goto out;
701 
702  INIT_HLIST_NODE(&orig_entry->list);
703  atomic_inc(&orig_node->refcount);
704  atomic_inc(&orig_node->tt_size);
705  orig_entry->orig_node = orig_node;
706  orig_entry->ttvn = ttvn;
707  atomic_set(&orig_entry->refcount, 2);
708 
709  spin_lock_bh(&tt_global->list_lock);
710  hlist_add_head_rcu(&orig_entry->list,
711  &tt_global->orig_list);
712  spin_unlock_bh(&tt_global->list_lock);
713 out:
714  if (orig_entry)
715  batadv_tt_orig_list_entry_free_ref(orig_entry);
716 }
717 
718 /* caller must hold orig_node refcount */
719 int batadv_tt_global_add(struct batadv_priv *bat_priv,
720  struct batadv_orig_node *orig_node,
721  const unsigned char *tt_addr, uint8_t flags,
722  uint8_t ttvn)
723 {
724  struct batadv_tt_global_entry *tt_global_entry = NULL;
725  int ret = 0;
726  int hash_added;
728 
729  tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
730 
731  if (!tt_global_entry) {
732  tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
733  if (!tt_global_entry)
734  goto out;
735 
736  common = &tt_global_entry->common;
737  memcpy(common->addr, tt_addr, ETH_ALEN);
738 
739  common->flags = flags;
740  tt_global_entry->roam_at = 0;
741  atomic_set(&common->refcount, 2);
742  common->added_at = jiffies;
743 
744  INIT_HLIST_HEAD(&tt_global_entry->orig_list);
745  spin_lock_init(&tt_global_entry->list_lock);
746 
747  hash_added = batadv_hash_add(bat_priv->tt.global_hash,
748  batadv_compare_tt,
749  batadv_choose_orig, common,
750  &common->hash_entry);
751 
752  if (unlikely(hash_added != 0)) {
753  /* remove the reference for the hash */
754  batadv_tt_global_entry_free_ref(tt_global_entry);
755  goto out_remove;
756  }
757  } else {
758  /* If there is already a global entry, we can use this one for
759  * our processing.
760  * But if we are trying to add a temporary client we can exit
761  * directly because the temporary information should never
762  * override any already known client state (whatever it is)
763  */
764  if (flags & BATADV_TT_CLIENT_TEMP)
765  goto out;
766 
767  /* if the client was temporary added before receiving the first
768  * OGM announcing it, we have to clear the TEMP flag
769  */
770  tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
771 
772  /* the change can carry possible "attribute" flags like the
773  * TT_CLIENT_WIFI, therefore they have to be copied in the
774  * client entry
775  */
776  tt_global_entry->common.flags |= flags;
777 
778  /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
779  * one originator left in the list and we previously received a
780  * delete + roaming change for this originator.
781  *
782  * We should first delete the old originator before adding the
783  * new one.
784  */
785  if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
786  batadv_tt_global_del_orig_list(tt_global_entry);
787  tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
788  tt_global_entry->roam_at = 0;
789  }
790  }
791  /* add the new orig_entry (if needed) or update it */
792  batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
793 
794  batadv_dbg(BATADV_DBG_TT, bat_priv,
795  "Creating new global tt entry: %pM (via %pM)\n",
796  tt_global_entry->common.addr, orig_node->orig);
797 
798 out_remove:
799  /* remove address from local hash if present */
800  batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
801  "global tt received",
802  flags & BATADV_TT_CLIENT_ROAM);
803  ret = 1;
804 out:
805  if (tt_global_entry)
806  batadv_tt_global_entry_free_ref(tt_global_entry);
807  return ret;
808 }
809 
810 /* print all orig nodes who announce the address for this global entry.
811  * it is assumed that the caller holds rcu_read_lock();
812  */
813 static void
814 batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
815  struct seq_file *seq)
816 {
817  struct hlist_head *head;
818  struct hlist_node *node;
819  struct batadv_tt_orig_list_entry *orig_entry;
820  struct batadv_tt_common_entry *tt_common_entry;
821  uint16_t flags;
822  uint8_t last_ttvn;
823 
824  tt_common_entry = &tt_global_entry->common;
825 
826  head = &tt_global_entry->orig_list;
827 
828  hlist_for_each_entry_rcu(orig_entry, node, head, list) {
829  flags = tt_common_entry->flags;
830  last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
831  seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n",
832  tt_global_entry->common.addr, orig_entry->ttvn,
833  orig_entry->orig_node->orig, last_ttvn,
834  (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
835  (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
836  (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
837  }
838 }
839 
841 {
842  struct net_device *net_dev = (struct net_device *)seq->private;
843  struct batadv_priv *bat_priv = netdev_priv(net_dev);
844  struct batadv_hashtable *hash = bat_priv->tt.global_hash;
845  struct batadv_tt_common_entry *tt_common_entry;
846  struct batadv_tt_global_entry *tt_global;
847  struct batadv_hard_iface *primary_if;
848  struct hlist_node *node;
849  struct hlist_head *head;
850  uint32_t i;
851  int ret = 0;
852 
853  primary_if = batadv_primary_if_get_selected(bat_priv);
854  if (!primary_if) {
855  ret = seq_printf(seq,
856  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
857  net_dev->name);
858  goto out;
859  }
860 
861  if (primary_if->if_status != BATADV_IF_ACTIVE) {
862  ret = seq_printf(seq,
863  "BATMAN mesh %s disabled - primary interface not active\n",
864  net_dev->name);
865  goto out;
866  }
867 
868  seq_printf(seq,
869  "Globally announced TT entries received via the mesh %s\n",
870  net_dev->name);
871  seq_printf(seq, " %-13s %s %-15s %s %s\n",
872  "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
873 
874  for (i = 0; i < hash->size; i++) {
875  head = &hash->table[i];
876 
877  rcu_read_lock();
878  hlist_for_each_entry_rcu(tt_common_entry, node,
879  head, hash_entry) {
880  tt_global = container_of(tt_common_entry,
881  struct batadv_tt_global_entry,
882  common);
883  batadv_tt_global_print_entry(tt_global, seq);
884  }
885  rcu_read_unlock();
886  }
887 out:
888  if (primary_if)
889  batadv_hardif_free_ref(primary_if);
890  return ret;
891 }
892 
893 /* deletes the orig list of a tt_global_entry */
894 static void
895 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
896 {
897  struct hlist_head *head;
898  struct hlist_node *node, *safe;
899  struct batadv_tt_orig_list_entry *orig_entry;
900 
901  spin_lock_bh(&tt_global_entry->list_lock);
902  head = &tt_global_entry->orig_list;
903  hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
904  hlist_del_rcu(node);
905  batadv_tt_orig_list_entry_free_ref(orig_entry);
906  }
907  spin_unlock_bh(&tt_global_entry->list_lock);
908 
909 }
910 
911 static void
912 batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
913  struct batadv_tt_global_entry *tt_global_entry,
914  struct batadv_orig_node *orig_node,
915  const char *message)
916 {
917  struct hlist_head *head;
918  struct hlist_node *node, *safe;
919  struct batadv_tt_orig_list_entry *orig_entry;
920 
921  spin_lock_bh(&tt_global_entry->list_lock);
922  head = &tt_global_entry->orig_list;
923  hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
924  if (orig_entry->orig_node == orig_node) {
925  batadv_dbg(BATADV_DBG_TT, bat_priv,
926  "Deleting %pM from global tt entry %pM: %s\n",
927  orig_node->orig,
928  tt_global_entry->common.addr, message);
929  hlist_del_rcu(node);
930  batadv_tt_orig_list_entry_free_ref(orig_entry);
931  }
932  }
933  spin_unlock_bh(&tt_global_entry->list_lock);
934 }
935 
936 static void
937 batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
938  struct batadv_tt_global_entry *tt_global_entry,
939  const char *message)
940 {
941  batadv_dbg(BATADV_DBG_TT, bat_priv,
942  "Deleting global tt entry %pM: %s\n",
943  tt_global_entry->common.addr, message);
944 
945  batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
946  batadv_choose_orig, tt_global_entry->common.addr);
947  batadv_tt_global_entry_free_ref(tt_global_entry);
948 
949 }
950 
951 /* If the client is to be deleted, we check if it is the last origantor entry
952  * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
953  * timer, otherwise we simply remove the originator scheduled for deletion.
954  */
955 static void
956 batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
957  struct batadv_tt_global_entry *tt_global_entry,
958  struct batadv_orig_node *orig_node,
959  const char *message)
960 {
961  bool last_entry = true;
962  struct hlist_head *head;
963  struct hlist_node *node;
964  struct batadv_tt_orig_list_entry *orig_entry;
965 
966  /* no local entry exists, case 1:
967  * Check if this is the last one or if other entries exist.
968  */
969 
970  rcu_read_lock();
971  head = &tt_global_entry->orig_list;
972  hlist_for_each_entry_rcu(orig_entry, node, head, list) {
973  if (orig_entry->orig_node != orig_node) {
974  last_entry = false;
975  break;
976  }
977  }
978  rcu_read_unlock();
979 
980  if (last_entry) {
981  /* its the last one, mark for roaming. */
982  tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
983  tt_global_entry->roam_at = jiffies;
984  } else
985  /* there is another entry, we can simply delete this
986  * one and can still use the other one.
987  */
988  batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
989  orig_node, message);
990 }
991 
992 
993 
994 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
995  struct batadv_orig_node *orig_node,
996  const unsigned char *addr,
997  const char *message, bool roaming)
998 {
999  struct batadv_tt_global_entry *tt_global_entry = NULL;
1000  struct batadv_tt_local_entry *local_entry = NULL;
1001 
1002  tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1003  if (!tt_global_entry)
1004  goto out;
1005 
1006  if (!roaming) {
1007  batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
1008  orig_node, message);
1009 
1010  if (hlist_empty(&tt_global_entry->orig_list))
1011  batadv_tt_global_del_struct(bat_priv, tt_global_entry,
1012  message);
1013 
1014  goto out;
1015  }
1016 
1017  /* if we are deleting a global entry due to a roam
1018  * event, there are two possibilities:
1019  * 1) the client roamed from node A to node B => if there
1020  * is only one originator left for this client, we mark
1021  * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
1022  * wait for node B to claim it. In case of timeout
1023  * the entry is purged.
1024  *
1025  * If there are other originators left, we directly delete
1026  * the originator.
1027  * 2) the client roamed to us => we can directly delete
1028  * the global entry, since it is useless now.
1029  */
1030  local_entry = batadv_tt_local_hash_find(bat_priv,
1031  tt_global_entry->common.addr);
1032  if (local_entry) {
1033  /* local entry exists, case 2: client roamed to us. */
1034  batadv_tt_global_del_orig_list(tt_global_entry);
1035  batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
1036  } else
1037  /* no local entry exists, case 1: check for roaming */
1038  batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
1039  orig_node, message);
1040 
1041 
1042 out:
1043  if (tt_global_entry)
1044  batadv_tt_global_entry_free_ref(tt_global_entry);
1045  if (local_entry)
1046  batadv_tt_local_entry_free_ref(local_entry);
1047 }
1048 
1050  struct batadv_orig_node *orig_node,
1051  const char *message)
1052 {
1053  struct batadv_tt_global_entry *tt_global;
1054  struct batadv_tt_common_entry *tt_common_entry;
1055  uint32_t i;
1056  struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1057  struct hlist_node *node, *safe;
1058  struct hlist_head *head;
1059  spinlock_t *list_lock; /* protects write access to the hash lists */
1060 
1061  if (!hash)
1062  return;
1063 
1064  for (i = 0; i < hash->size; i++) {
1065  head = &hash->table[i];
1066  list_lock = &hash->list_locks[i];
1067 
1068  spin_lock_bh(list_lock);
1069  hlist_for_each_entry_safe(tt_common_entry, node, safe,
1070  head, hash_entry) {
1071  tt_global = container_of(tt_common_entry,
1072  struct batadv_tt_global_entry,
1073  common);
1074 
1075  batadv_tt_global_del_orig_entry(bat_priv, tt_global,
1076  orig_node, message);
1077 
1078  if (hlist_empty(&tt_global->orig_list)) {
1079  batadv_dbg(BATADV_DBG_TT, bat_priv,
1080  "Deleting global tt entry %pM: %s\n",
1081  tt_global->common.addr, message);
1082  hlist_del_rcu(node);
1083  batadv_tt_global_entry_free_ref(tt_global);
1084  }
1085  }
1086  spin_unlock_bh(list_lock);
1087  }
1088  orig_node->tt_initialised = false;
1089 }
1090 
1091 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
1092  char **msg)
1093 {
1094  bool purge = false;
1095  unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
1096  unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
1097 
1098  if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
1099  batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
1100  purge = true;
1101  *msg = "Roaming timeout\n";
1102  }
1103 
1104  if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
1105  batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
1106  purge = true;
1107  *msg = "Temporary client timeout\n";
1108  }
1109 
1110  return purge;
1111 }
1112 
1113 static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1114 {
1115  struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1116  struct hlist_head *head;
1117  struct hlist_node *node, *node_tmp;
1118  spinlock_t *list_lock; /* protects write access to the hash lists */
1119  uint32_t i;
1120  char *msg = NULL;
1121  struct batadv_tt_common_entry *tt_common;
1122  struct batadv_tt_global_entry *tt_global;
1123 
1124  for (i = 0; i < hash->size; i++) {
1125  head = &hash->table[i];
1126  list_lock = &hash->list_locks[i];
1127 
1128  spin_lock_bh(list_lock);
1129  hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
1130  hash_entry) {
1131  tt_global = container_of(tt_common,
1132  struct batadv_tt_global_entry,
1133  common);
1134 
1135  if (!batadv_tt_global_to_purge(tt_global, &msg))
1136  continue;
1137 
1138  batadv_dbg(BATADV_DBG_TT, bat_priv,
1139  "Deleting global tt entry (%pM): %s\n",
1140  tt_global->common.addr, msg);
1141 
1142  hlist_del_rcu(node);
1143 
1144  batadv_tt_global_entry_free_ref(tt_global);
1145  }
1146  spin_unlock_bh(list_lock);
1147  }
1148 }
1149 
1150 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1151 {
1152  struct batadv_hashtable *hash;
1153  spinlock_t *list_lock; /* protects write access to the hash lists */
1154  struct batadv_tt_common_entry *tt_common_entry;
1155  struct batadv_tt_global_entry *tt_global;
1156  struct hlist_node *node, *node_tmp;
1157  struct hlist_head *head;
1158  uint32_t i;
1159 
1160  if (!bat_priv->tt.global_hash)
1161  return;
1162 
1163  hash = bat_priv->tt.global_hash;
1164 
1165  for (i = 0; i < hash->size; i++) {
1166  head = &hash->table[i];
1167  list_lock = &hash->list_locks[i];
1168 
1169  spin_lock_bh(list_lock);
1170  hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1171  head, hash_entry) {
1172  hlist_del_rcu(node);
1173  tt_global = container_of(tt_common_entry,
1174  struct batadv_tt_global_entry,
1175  common);
1176  batadv_tt_global_entry_free_ref(tt_global);
1177  }
1178  spin_unlock_bh(list_lock);
1179  }
1180 
1181  batadv_hash_destroy(hash);
1182 
1183  bat_priv->tt.global_hash = NULL;
1184 }
1185 
1186 static bool
1187 _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
1188  struct batadv_tt_global_entry *tt_global_entry)
1189 {
1190  bool ret = false;
1191 
1192  if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
1193  tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
1194  ret = true;
1195 
1196  return ret;
1197 }
1198 
1200  const uint8_t *src,
1201  const uint8_t *addr)
1202 {
1203  struct batadv_tt_local_entry *tt_local_entry = NULL;
1204  struct batadv_tt_global_entry *tt_global_entry = NULL;
1205  struct batadv_orig_node *orig_node = NULL;
1206  struct batadv_neigh_node *router = NULL;
1207  struct hlist_head *head;
1208  struct hlist_node *node;
1209  struct batadv_tt_orig_list_entry *orig_entry;
1210  int best_tq;
1211 
1212  if (src && atomic_read(&bat_priv->ap_isolation)) {
1213  tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1214  if (!tt_local_entry)
1215  goto out;
1216  }
1217 
1218  tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1219  if (!tt_global_entry)
1220  goto out;
1221 
1222  /* check whether the clients should not communicate due to AP
1223  * isolation
1224  */
1225  if (tt_local_entry &&
1226  _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1227  goto out;
1228 
1229  best_tq = 0;
1230 
1231  rcu_read_lock();
1232  head = &tt_global_entry->orig_list;
1233  hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1234  router = batadv_orig_node_get_router(orig_entry->orig_node);
1235  if (!router)
1236  continue;
1237 
1238  if (router->tq_avg > best_tq) {
1239  orig_node = orig_entry->orig_node;
1240  best_tq = router->tq_avg;
1241  }
1243  }
1244  /* found anything? */
1245  if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1246  orig_node = NULL;
1247  rcu_read_unlock();
1248 out:
1249  if (tt_global_entry)
1250  batadv_tt_global_entry_free_ref(tt_global_entry);
1251  if (tt_local_entry)
1252  batadv_tt_local_entry_free_ref(tt_local_entry);
1253 
1254  return orig_node;
1255 }
1256 
1257 /* Calculates the checksum of the local table of a given orig_node */
1258 static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1259  struct batadv_orig_node *orig_node)
1260 {
1261  uint16_t total = 0, total_one;
1262  struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1263  struct batadv_tt_common_entry *tt_common;
1264  struct batadv_tt_global_entry *tt_global;
1265  struct hlist_node *node;
1266  struct hlist_head *head;
1267  uint32_t i;
1268  int j;
1269 
1270  for (i = 0; i < hash->size; i++) {
1271  head = &hash->table[i];
1272 
1273  rcu_read_lock();
1274  hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1275  tt_global = container_of(tt_common,
1276  struct batadv_tt_global_entry,
1277  common);
1278  /* Roaming clients are in the global table for
1279  * consistency only. They don't have to be
1280  * taken into account while computing the
1281  * global crc
1282  */
1283  if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1284  continue;
1285  /* Temporary clients have not been announced yet, so
1286  * they have to be skipped while computing the global
1287  * crc
1288  */
1289  if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
1290  continue;
1291 
1292  /* find out if this global entry is announced by this
1293  * originator
1294  */
1295  if (!batadv_tt_global_entry_has_orig(tt_global,
1296  orig_node))
1297  continue;
1298 
1299  total_one = 0;
1300  for (j = 0; j < ETH_ALEN; j++)
1301  total_one = crc16_byte(total_one,
1302  tt_common->addr[j]);
1303  total ^= total_one;
1304  }
1305  rcu_read_unlock();
1306  }
1307 
1308  return total;
1309 }
1310 
1311 /* Calculates the checksum of the local table */
1312 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1313 {
1314  uint16_t total = 0, total_one;
1315  struct batadv_hashtable *hash = bat_priv->tt.local_hash;
1316  struct batadv_tt_common_entry *tt_common;
1317  struct hlist_node *node;
1318  struct hlist_head *head;
1319  uint32_t i;
1320  int j;
1321 
1322  for (i = 0; i < hash->size; i++) {
1323  head = &hash->table[i];
1324 
1325  rcu_read_lock();
1326  hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1327  /* not yet committed clients have not to be taken into
1328  * account while computing the CRC
1329  */
1330  if (tt_common->flags & BATADV_TT_CLIENT_NEW)
1331  continue;
1332  total_one = 0;
1333  for (j = 0; j < ETH_ALEN; j++)
1334  total_one = crc16_byte(total_one,
1335  tt_common->addr[j]);
1336  total ^= total_one;
1337  }
1338  rcu_read_unlock();
1339  }
1340 
1341  return total;
1342 }
1343 
1344 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
1345 {
1346  struct batadv_tt_req_node *node, *safe;
1347 
1348  spin_lock_bh(&bat_priv->tt.req_list_lock);
1349 
1350  list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1351  list_del(&node->list);
1352  kfree(node);
1353  }
1354 
1355  spin_unlock_bh(&bat_priv->tt.req_list_lock);
1356 }
1357 
1358 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
1359  struct batadv_orig_node *orig_node,
1360  const unsigned char *tt_buff,
1361  uint8_t tt_num_changes)
1362 {
1363  uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1364 
1365  /* Replace the old buffer only if I received something in the
1366  * last OGM (the OGM could carry no changes)
1367  */
1368  spin_lock_bh(&orig_node->tt_buff_lock);
1369  if (tt_buff_len > 0) {
1370  kfree(orig_node->tt_buff);
1371  orig_node->tt_buff_len = 0;
1372  orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1373  if (orig_node->tt_buff) {
1374  memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1375  orig_node->tt_buff_len = tt_buff_len;
1376  }
1377  }
1378  spin_unlock_bh(&orig_node->tt_buff_lock);
1379 }
1380 
1381 static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
1382 {
1383  struct batadv_tt_req_node *node, *safe;
1384 
1385  spin_lock_bh(&bat_priv->tt.req_list_lock);
1386  list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1387  if (batadv_has_timed_out(node->issued_at,
1389  list_del(&node->list);
1390  kfree(node);
1391  }
1392  }
1393  spin_unlock_bh(&bat_priv->tt.req_list_lock);
1394 }
1395 
1396 /* returns the pointer to the new tt_req_node struct if no request
1397  * has already been issued for this orig_node, NULL otherwise
1398  */
1399 static struct batadv_tt_req_node *
1400 batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1401  struct batadv_orig_node *orig_node)
1402 {
1403  struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1404 
1405  spin_lock_bh(&bat_priv->tt.req_list_lock);
1406  list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
1407  if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1408  !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1410  goto unlock;
1411  }
1412 
1413  tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1414  if (!tt_req_node)
1415  goto unlock;
1416 
1417  memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1418  tt_req_node->issued_at = jiffies;
1419 
1420  list_add(&tt_req_node->list, &bat_priv->tt.req_list);
1421 unlock:
1422  spin_unlock_bh(&bat_priv->tt.req_list_lock);
1423  return tt_req_node;
1424 }
1425 
1426 /* data_ptr is useless here, but has to be kept to respect the prototype */
1427 static int batadv_tt_local_valid_entry(const void *entry_ptr,
1428  const void *data_ptr)
1429 {
1430  const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1431 
1432  if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
1433  return 0;
1434  return 1;
1435 }
1436 
1437 static int batadv_tt_global_valid(const void *entry_ptr,
1438  const void *data_ptr)
1439 {
1440  const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1441  const struct batadv_tt_global_entry *tt_global_entry;
1442  const struct batadv_orig_node *orig_node = data_ptr;
1443 
1444  if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
1445  tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
1446  return 0;
1447 
1448  tt_global_entry = container_of(tt_common_entry,
1449  struct batadv_tt_global_entry,
1450  common);
1451 
1452  return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
1453 }
1454 
1455 static struct sk_buff *
1456 batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1457  struct batadv_hashtable *hash,
1458  struct batadv_hard_iface *primary_if,
1459  int (*valid_cb)(const void *, const void *),
1460  void *cb_data)
1461 {
1462  struct batadv_tt_common_entry *tt_common_entry;
1463  struct batadv_tt_query_packet *tt_response;
1464  struct batadv_tt_change *tt_change;
1465  struct hlist_node *node;
1466  struct hlist_head *head;
1467  struct sk_buff *skb = NULL;
1468  uint16_t tt_tot, tt_count;
1469  ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
1470  uint32_t i;
1471  size_t len;
1472 
1473  if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1474  tt_len = primary_if->soft_iface->mtu - tt_query_size;
1475  tt_len -= tt_len % sizeof(struct batadv_tt_change);
1476  }
1477  tt_tot = tt_len / sizeof(struct batadv_tt_change);
1478 
1479  len = tt_query_size + tt_len;
1480  skb = dev_alloc_skb(len + ETH_HLEN);
1481  if (!skb)
1482  goto out;
1483 
1484  skb_reserve(skb, ETH_HLEN);
1485  tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
1486  tt_response->ttvn = ttvn;
1487 
1488  tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
1489  tt_count = 0;
1490 
1491  rcu_read_lock();
1492  for (i = 0; i < hash->size; i++) {
1493  head = &hash->table[i];
1494 
1495  hlist_for_each_entry_rcu(tt_common_entry, node,
1496  head, hash_entry) {
1497  if (tt_count == tt_tot)
1498  break;
1499 
1500  if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1501  continue;
1502 
1503  memcpy(tt_change->addr, tt_common_entry->addr,
1504  ETH_ALEN);
1505  tt_change->flags = tt_common_entry->flags;
1506 
1507  tt_count++;
1508  tt_change++;
1509  }
1510  }
1511  rcu_read_unlock();
1512 
1513  /* store in the message the number of entries we have successfully
1514  * copied
1515  */
1516  tt_response->tt_data = htons(tt_count);
1517 
1518 out:
1519  return skb;
1520 }
1521 
1522 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1523  struct batadv_orig_node *dst_orig_node,
1524  uint8_t ttvn, uint16_t tt_crc,
1525  bool full_table)
1526 {
1527  struct sk_buff *skb = NULL;
1528  struct batadv_tt_query_packet *tt_request;
1529  struct batadv_neigh_node *neigh_node = NULL;
1530  struct batadv_hard_iface *primary_if;
1531  struct batadv_tt_req_node *tt_req_node = NULL;
1532  int ret = 1;
1533  size_t tt_req_len;
1534 
1535  primary_if = batadv_primary_if_get_selected(bat_priv);
1536  if (!primary_if)
1537  goto out;
1538 
1539  /* The new tt_req will be issued only if I'm not waiting for a
1540  * reply from the same orig_node yet
1541  */
1542  tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
1543  if (!tt_req_node)
1544  goto out;
1545 
1546  skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
1547  if (!skb)
1548  goto out;
1549 
1550  skb_reserve(skb, ETH_HLEN);
1551 
1552  tt_req_len = sizeof(*tt_request);
1553  tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
1554 
1555  tt_request->header.packet_type = BATADV_TT_QUERY;
1556  tt_request->header.version = BATADV_COMPAT_VERSION;
1557  memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1558  memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1559  tt_request->header.ttl = BATADV_TTL;
1560  tt_request->ttvn = ttvn;
1561  tt_request->tt_data = htons(tt_crc);
1562  tt_request->flags = BATADV_TT_REQUEST;
1563 
1564  if (full_table)
1565  tt_request->flags |= BATADV_TT_FULL_TABLE;
1566 
1567  neigh_node = batadv_orig_node_get_router(dst_orig_node);
1568  if (!neigh_node)
1569  goto out;
1570 
1571  batadv_dbg(BATADV_DBG_TT, bat_priv,
1572  "Sending TT_REQUEST to %pM via %pM [%c]\n",
1573  dst_orig_node->orig, neigh_node->addr,
1574  (full_table ? 'F' : '.'));
1575 
1577 
1578  batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1579  ret = 0;
1580 
1581 out:
1582  if (neigh_node)
1583  batadv_neigh_node_free_ref(neigh_node);
1584  if (primary_if)
1585  batadv_hardif_free_ref(primary_if);
1586  if (ret)
1587  kfree_skb(skb);
1588  if (ret && tt_req_node) {
1589  spin_lock_bh(&bat_priv->tt.req_list_lock);
1590  list_del(&tt_req_node->list);
1591  spin_unlock_bh(&bat_priv->tt.req_list_lock);
1592  kfree(tt_req_node);
1593  }
1594  return ret;
1595 }
1596 
1597 static bool
1598 batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1599  struct batadv_tt_query_packet *tt_request)
1600 {
1601  struct batadv_orig_node *req_dst_orig_node = NULL;
1602  struct batadv_orig_node *res_dst_orig_node = NULL;
1603  struct batadv_neigh_node *neigh_node = NULL;
1604  struct batadv_hard_iface *primary_if = NULL;
1605  uint8_t orig_ttvn, req_ttvn, ttvn;
1606  int ret = false;
1607  unsigned char *tt_buff;
1608  bool full_table;
1609  uint16_t tt_len, tt_tot;
1610  struct sk_buff *skb = NULL;
1611  struct batadv_tt_query_packet *tt_response;
1612  uint8_t *packet_pos;
1613  size_t len;
1614 
1615  batadv_dbg(BATADV_DBG_TT, bat_priv,
1616  "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1617  tt_request->src, tt_request->ttvn, tt_request->dst,
1618  (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1619 
1620  /* Let's get the orig node of the REAL destination */
1621  req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1622  if (!req_dst_orig_node)
1623  goto out;
1624 
1625  res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1626  if (!res_dst_orig_node)
1627  goto out;
1628 
1629  neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1630  if (!neigh_node)
1631  goto out;
1632 
1633  primary_if = batadv_primary_if_get_selected(bat_priv);
1634  if (!primary_if)
1635  goto out;
1636 
1637  orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1638  req_ttvn = tt_request->ttvn;
1639 
1640  /* I don't have the requested data */
1641  if (orig_ttvn != req_ttvn ||
1642  tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1643  goto out;
1644 
1645  /* If the full table has been explicitly requested */
1646  if (tt_request->flags & BATADV_TT_FULL_TABLE ||
1647  !req_dst_orig_node->tt_buff)
1648  full_table = true;
1649  else
1650  full_table = false;
1651 
1652  /* In this version, fragmentation is not implemented, then
1653  * I'll send only one packet with as much TT entries as I can
1654  */
1655  if (!full_table) {
1656  spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1657  tt_len = req_dst_orig_node->tt_buff_len;
1658  tt_tot = tt_len / sizeof(struct batadv_tt_change);
1659 
1660  len = sizeof(*tt_response) + tt_len;
1661  skb = dev_alloc_skb(len + ETH_HLEN);
1662  if (!skb)
1663  goto unlock;
1664 
1665  skb_reserve(skb, ETH_HLEN);
1666  packet_pos = skb_put(skb, len);
1667  tt_response = (struct batadv_tt_query_packet *)packet_pos;
1668  tt_response->ttvn = req_ttvn;
1669  tt_response->tt_data = htons(tt_tot);
1670 
1671  tt_buff = skb->data + sizeof(*tt_response);
1672  /* Copy the last orig_node's OGM buffer */
1673  memcpy(tt_buff, req_dst_orig_node->tt_buff,
1674  req_dst_orig_node->tt_buff_len);
1675 
1676  spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1677  } else {
1678  tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
1679  tt_len *= sizeof(struct batadv_tt_change);
1680  ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1681 
1682  skb = batadv_tt_response_fill_table(tt_len, ttvn,
1683  bat_priv->tt.global_hash,
1684  primary_if,
1685  batadv_tt_global_valid,
1686  req_dst_orig_node);
1687  if (!skb)
1688  goto out;
1689 
1690  tt_response = (struct batadv_tt_query_packet *)skb->data;
1691  }
1692 
1693  tt_response->header.packet_type = BATADV_TT_QUERY;
1694  tt_response->header.version = BATADV_COMPAT_VERSION;
1695  tt_response->header.ttl = BATADV_TTL;
1696  memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1697  memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1698  tt_response->flags = BATADV_TT_RESPONSE;
1699 
1700  if (full_table)
1701  tt_response->flags |= BATADV_TT_FULL_TABLE;
1702 
1703  batadv_dbg(BATADV_DBG_TT, bat_priv,
1704  "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1705  res_dst_orig_node->orig, neigh_node->addr,
1706  req_dst_orig_node->orig, req_ttvn);
1707 
1709 
1710  batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1711  ret = true;
1712  goto out;
1713 
1714 unlock:
1715  spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1716 
1717 out:
1718  if (res_dst_orig_node)
1719  batadv_orig_node_free_ref(res_dst_orig_node);
1720  if (req_dst_orig_node)
1721  batadv_orig_node_free_ref(req_dst_orig_node);
1722  if (neigh_node)
1723  batadv_neigh_node_free_ref(neigh_node);
1724  if (primary_if)
1725  batadv_hardif_free_ref(primary_if);
1726  if (!ret)
1727  kfree_skb(skb);
1728  return ret;
1729 
1730 }
1731 
1732 static bool
1733 batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1734  struct batadv_tt_query_packet *tt_request)
1735 {
1736  struct batadv_orig_node *orig_node = NULL;
1737  struct batadv_neigh_node *neigh_node = NULL;
1738  struct batadv_hard_iface *primary_if = NULL;
1739  uint8_t my_ttvn, req_ttvn, ttvn;
1740  int ret = false;
1741  unsigned char *tt_buff;
1742  bool full_table;
1743  uint16_t tt_len, tt_tot;
1744  struct sk_buff *skb = NULL;
1745  struct batadv_tt_query_packet *tt_response;
1746  uint8_t *packet_pos;
1747  size_t len;
1748 
1749  batadv_dbg(BATADV_DBG_TT, bat_priv,
1750  "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1751  tt_request->src, tt_request->ttvn,
1752  (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1753 
1754 
1755  my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1756  req_ttvn = tt_request->ttvn;
1757 
1758  orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1759  if (!orig_node)
1760  goto out;
1761 
1762  neigh_node = batadv_orig_node_get_router(orig_node);
1763  if (!neigh_node)
1764  goto out;
1765 
1766  primary_if = batadv_primary_if_get_selected(bat_priv);
1767  if (!primary_if)
1768  goto out;
1769 
1770  /* If the full table has been explicitly requested or the gap
1771  * is too big send the whole local translation table
1772  */
1773  if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1774  !bat_priv->tt.last_changeset)
1775  full_table = true;
1776  else
1777  full_table = false;
1778 
1779  /* In this version, fragmentation is not implemented, then
1780  * I'll send only one packet with as much TT entries as I can
1781  */
1782  if (!full_table) {
1783  spin_lock_bh(&bat_priv->tt.last_changeset_lock);
1784  tt_len = bat_priv->tt.last_changeset_len;
1785  tt_tot = tt_len / sizeof(struct batadv_tt_change);
1786 
1787  len = sizeof(*tt_response) + tt_len;
1788  skb = dev_alloc_skb(len + ETH_HLEN);
1789  if (!skb)
1790  goto unlock;
1791 
1792  skb_reserve(skb, ETH_HLEN);
1793  packet_pos = skb_put(skb, len);
1794  tt_response = (struct batadv_tt_query_packet *)packet_pos;
1795  tt_response->ttvn = req_ttvn;
1796  tt_response->tt_data = htons(tt_tot);
1797 
1798  tt_buff = skb->data + sizeof(*tt_response);
1799  memcpy(tt_buff, bat_priv->tt.last_changeset,
1800  bat_priv->tt.last_changeset_len);
1801  spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1802  } else {
1803  tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
1804  tt_len *= sizeof(struct batadv_tt_change);
1805  ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1806 
1807  skb = batadv_tt_response_fill_table(tt_len, ttvn,
1808  bat_priv->tt.local_hash,
1809  primary_if,
1810  batadv_tt_local_valid_entry,
1811  NULL);
1812  if (!skb)
1813  goto out;
1814 
1815  tt_response = (struct batadv_tt_query_packet *)skb->data;
1816  }
1817 
1818  tt_response->header.packet_type = BATADV_TT_QUERY;
1819  tt_response->header.version = BATADV_COMPAT_VERSION;
1820  tt_response->header.ttl = BATADV_TTL;
1821  memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1822  memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1823  tt_response->flags = BATADV_TT_RESPONSE;
1824 
1825  if (full_table)
1826  tt_response->flags |= BATADV_TT_FULL_TABLE;
1827 
1828  batadv_dbg(BATADV_DBG_TT, bat_priv,
1829  "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1830  orig_node->orig, neigh_node->addr,
1831  (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1832 
1834 
1835  batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1836  ret = true;
1837  goto out;
1838 
1839 unlock:
1840  spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1841 out:
1842  if (orig_node)
1843  batadv_orig_node_free_ref(orig_node);
1844  if (neigh_node)
1845  batadv_neigh_node_free_ref(neigh_node);
1846  if (primary_if)
1847  batadv_hardif_free_ref(primary_if);
1848  if (!ret)
1849  kfree_skb(skb);
1850  /* This packet was for me, so it doesn't need to be re-routed */
1851  return true;
1852 }
1853 
1854 bool batadv_send_tt_response(struct batadv_priv *bat_priv,
1855  struct batadv_tt_query_packet *tt_request)
1856 {
1857  if (batadv_is_my_mac(tt_request->dst)) {
1858  /* don't answer backbone gws! */
1859  if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1860  return true;
1861 
1862  return batadv_send_my_tt_response(bat_priv, tt_request);
1863  } else {
1864  return batadv_send_other_tt_response(bat_priv, tt_request);
1865  }
1866 }
1867 
1868 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
1869  struct batadv_orig_node *orig_node,
1870  struct batadv_tt_change *tt_change,
1871  uint16_t tt_num_changes, uint8_t ttvn)
1872 {
1873  int i;
1874  int roams;
1875 
1876  for (i = 0; i < tt_num_changes; i++) {
1877  if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
1878  roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
1879  batadv_tt_global_del(bat_priv, orig_node,
1880  (tt_change + i)->addr,
1881  "tt removed by changes",
1882  roams);
1883  } else {
1884  if (!batadv_tt_global_add(bat_priv, orig_node,
1885  (tt_change + i)->addr,
1886  (tt_change + i)->flags, ttvn))
1887  /* In case of problem while storing a
1888  * global_entry, we stop the updating
1889  * procedure without committing the
1890  * ttvn change. This will avoid to send
1891  * corrupted data on tt_request
1892  */
1893  return;
1894  }
1895  }
1896  orig_node->tt_initialised = true;
1897 }
1898 
1899 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
1900  struct batadv_tt_query_packet *tt_response)
1901 {
1902  struct batadv_orig_node *orig_node = NULL;
1903 
1904  orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1905  if (!orig_node)
1906  goto out;
1907 
1908  /* Purge the old table first.. */
1909  batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1910 
1911  _batadv_tt_update_changes(bat_priv, orig_node,
1912  (struct batadv_tt_change *)(tt_response + 1),
1913  ntohs(tt_response->tt_data),
1914  tt_response->ttvn);
1915 
1916  spin_lock_bh(&orig_node->tt_buff_lock);
1917  kfree(orig_node->tt_buff);
1918  orig_node->tt_buff_len = 0;
1919  orig_node->tt_buff = NULL;
1920  spin_unlock_bh(&orig_node->tt_buff_lock);
1921 
1922  atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1923 
1924 out:
1925  if (orig_node)
1926  batadv_orig_node_free_ref(orig_node);
1927 }
1928 
1929 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
1930  struct batadv_orig_node *orig_node,
1931  uint16_t tt_num_changes, uint8_t ttvn,
1932  struct batadv_tt_change *tt_change)
1933 {
1934  _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
1935  tt_num_changes, ttvn);
1936 
1937  batadv_tt_save_orig_buffer(bat_priv, orig_node,
1938  (unsigned char *)tt_change, tt_num_changes);
1939  atomic_set(&orig_node->last_ttvn, ttvn);
1940 }
1941 
1942 bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
1943 {
1944  struct batadv_tt_local_entry *tt_local_entry = NULL;
1945  bool ret = false;
1946 
1947  tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
1948  if (!tt_local_entry)
1949  goto out;
1950  /* Check if the client has been logically deleted (but is kept for
1951  * consistency purpose)
1952  */
1953  if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
1954  goto out;
1955  ret = true;
1956 out:
1957  if (tt_local_entry)
1958  batadv_tt_local_entry_free_ref(tt_local_entry);
1959  return ret;
1960 }
1961 
1963  struct batadv_tt_query_packet *tt_response)
1964 {
1965  struct batadv_tt_req_node *node, *safe;
1966  struct batadv_orig_node *orig_node = NULL;
1967  struct batadv_tt_change *tt_change;
1968 
1969  batadv_dbg(BATADV_DBG_TT, bat_priv,
1970  "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1971  tt_response->src, tt_response->ttvn,
1972  ntohs(tt_response->tt_data),
1973  (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1974 
1975  /* we should have never asked a backbone gw */
1976  if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1977  goto out;
1978 
1979  orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1980  if (!orig_node)
1981  goto out;
1982 
1983  if (tt_response->flags & BATADV_TT_FULL_TABLE) {
1984  batadv_tt_fill_gtable(bat_priv, tt_response);
1985  } else {
1986  tt_change = (struct batadv_tt_change *)(tt_response + 1);
1987  batadv_tt_update_changes(bat_priv, orig_node,
1988  ntohs(tt_response->tt_data),
1989  tt_response->ttvn, tt_change);
1990  }
1991 
1992  /* Delete the tt_req_node from pending tt_requests list */
1993  spin_lock_bh(&bat_priv->tt.req_list_lock);
1994  list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1995  if (!batadv_compare_eth(node->addr, tt_response->src))
1996  continue;
1997  list_del(&node->list);
1998  kfree(node);
1999  }
2000  spin_unlock_bh(&bat_priv->tt.req_list_lock);
2001 
2002  /* Recalculate the CRC for this orig_node and store it */
2003  orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2004  /* Roaming phase is over: tables are in sync again. I can
2005  * unset the flag
2006  */
2007  orig_node->tt_poss_change = false;
2008 out:
2009  if (orig_node)
2010  batadv_orig_node_free_ref(orig_node);
2011 }
2012 
2013 int batadv_tt_init(struct batadv_priv *bat_priv)
2014 {
2015  int ret;
2016 
2017  ret = batadv_tt_local_init(bat_priv);
2018  if (ret < 0)
2019  return ret;
2020 
2021  ret = batadv_tt_global_init(bat_priv);
2022  if (ret < 0)
2023  return ret;
2024 
2025  batadv_tt_start_timer(bat_priv);
2026 
2027  return 1;
2028 }
2029 
2030 static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
2031 {
2032  struct batadv_tt_roam_node *node, *safe;
2033 
2034  spin_lock_bh(&bat_priv->tt.roam_list_lock);
2035 
2036  list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
2037  list_del(&node->list);
2038  kfree(node);
2039  }
2040 
2041  spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2042 }
2043 
2044 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
2045 {
2046  struct batadv_tt_roam_node *node, *safe;
2047 
2048  spin_lock_bh(&bat_priv->tt.roam_list_lock);
2049  list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
2050  if (!batadv_has_timed_out(node->first_time,
2052  continue;
2053 
2054  list_del(&node->list);
2055  kfree(node);
2056  }
2057  spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2058 }
2059 
2060 /* This function checks whether the client already reached the
2061  * maximum number of possible roaming phases. In this case the ROAMING_ADV
2062  * will not be sent.
2063  *
2064  * returns true if the ROAMING_ADV can be sent, false otherwise
2065  */
2066 static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
2067  uint8_t *client)
2068 {
2069  struct batadv_tt_roam_node *tt_roam_node;
2070  bool ret = false;
2071 
2072  spin_lock_bh(&bat_priv->tt.roam_list_lock);
2073  /* The new tt_req will be issued only if I'm not waiting for a
2074  * reply from the same orig_node yet
2075  */
2076  list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
2077  if (!batadv_compare_eth(tt_roam_node->addr, client))
2078  continue;
2079 
2080  if (batadv_has_timed_out(tt_roam_node->first_time,
2082  continue;
2083 
2084  if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
2085  /* Sorry, you roamed too many times! */
2086  goto unlock;
2087  ret = true;
2088  break;
2089  }
2090 
2091  if (!ret) {
2092  tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
2093  if (!tt_roam_node)
2094  goto unlock;
2095 
2096  tt_roam_node->first_time = jiffies;
2097  atomic_set(&tt_roam_node->counter,
2099  memcpy(tt_roam_node->addr, client, ETH_ALEN);
2100 
2101  list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
2102  ret = true;
2103  }
2104 
2105 unlock:
2106  spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2107  return ret;
2108 }
2109 
2110 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
2111  struct batadv_orig_node *orig_node)
2112 {
2113  struct batadv_neigh_node *neigh_node = NULL;
2114  struct sk_buff *skb = NULL;
2115  struct batadv_roam_adv_packet *roam_adv_packet;
2116  int ret = 1;
2117  struct batadv_hard_iface *primary_if;
2118  size_t len = sizeof(*roam_adv_packet);
2119 
2120  /* before going on we have to check whether the client has
2121  * already roamed to us too many times
2122  */
2123  if (!batadv_tt_check_roam_count(bat_priv, client))
2124  goto out;
2125 
2126  skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
2127  if (!skb)
2128  goto out;
2129 
2130  skb_reserve(skb, ETH_HLEN);
2131 
2132  roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
2133 
2134  roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
2135  roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
2136  roam_adv_packet->header.ttl = BATADV_TTL;
2137  roam_adv_packet->reserved = 0;
2138  primary_if = batadv_primary_if_get_selected(bat_priv);
2139  if (!primary_if)
2140  goto out;
2141  memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
2142  batadv_hardif_free_ref(primary_if);
2143  memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
2144  memcpy(roam_adv_packet->client, client, ETH_ALEN);
2145 
2146  neigh_node = batadv_orig_node_get_router(orig_node);
2147  if (!neigh_node)
2148  goto out;
2149 
2150  batadv_dbg(BATADV_DBG_TT, bat_priv,
2151  "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2152  orig_node->orig, client, neigh_node->addr);
2153 
2155 
2156  batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
2157  ret = 0;
2158 
2159 out:
2160  if (neigh_node)
2161  batadv_neigh_node_free_ref(neigh_node);
2162  if (ret)
2163  kfree_skb(skb);
2164  return;
2165 }
2166 
2167 static void batadv_tt_purge(struct work_struct *work)
2168 {
2169  struct delayed_work *delayed_work;
2170  struct batadv_priv_tt *priv_tt;
2171  struct batadv_priv *bat_priv;
2172 
2173  delayed_work = container_of(work, struct delayed_work, work);
2174  priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
2175  bat_priv = container_of(priv_tt, struct batadv_priv, tt);
2176 
2177  batadv_tt_local_purge(bat_priv);
2178  batadv_tt_global_purge(bat_priv);
2179  batadv_tt_req_purge(bat_priv);
2180  batadv_tt_roam_purge(bat_priv);
2181 
2182  batadv_tt_start_timer(bat_priv);
2183 }
2184 
2185 void batadv_tt_free(struct batadv_priv *bat_priv)
2186 {
2187  cancel_delayed_work_sync(&bat_priv->tt.work);
2188 
2189  batadv_tt_local_table_free(bat_priv);
2190  batadv_tt_global_table_free(bat_priv);
2191  batadv_tt_req_list_free(bat_priv);
2192  batadv_tt_changes_list_free(bat_priv);
2193  batadv_tt_roam_list_free(bat_priv);
2194 
2195  kfree(bat_priv->tt.last_changeset);
2196 }
2197 
2198 /* This function will enable or disable the specified flags for all the entries
2199  * in the given hash table and returns the number of modified entries
2200  */
2201 static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2202  uint16_t flags, bool enable)
2203 {
2204  uint32_t i;
2205  uint16_t changed_num = 0;
2206  struct hlist_head *head;
2207  struct hlist_node *node;
2208  struct batadv_tt_common_entry *tt_common_entry;
2209 
2210  if (!hash)
2211  goto out;
2212 
2213  for (i = 0; i < hash->size; i++) {
2214  head = &hash->table[i];
2215 
2216  rcu_read_lock();
2217  hlist_for_each_entry_rcu(tt_common_entry, node,
2218  head, hash_entry) {
2219  if (enable) {
2220  if ((tt_common_entry->flags & flags) == flags)
2221  continue;
2222  tt_common_entry->flags |= flags;
2223  } else {
2224  if (!(tt_common_entry->flags & flags))
2225  continue;
2226  tt_common_entry->flags &= ~flags;
2227  }
2228  changed_num++;
2229  }
2230  rcu_read_unlock();
2231  }
2232 out:
2233  return changed_num;
2234 }
2235 
2236 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2237 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2238 {
2239  struct batadv_hashtable *hash = bat_priv->tt.local_hash;
2240  struct batadv_tt_common_entry *tt_common;
2241  struct batadv_tt_local_entry *tt_local;
2242  struct hlist_node *node, *node_tmp;
2243  struct hlist_head *head;
2244  spinlock_t *list_lock; /* protects write access to the hash lists */
2245  uint32_t i;
2246 
2247  if (!hash)
2248  return;
2249 
2250  for (i = 0; i < hash->size; i++) {
2251  head = &hash->table[i];
2252  list_lock = &hash->list_locks[i];
2253 
2254  spin_lock_bh(list_lock);
2255  hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
2256  hash_entry) {
2257  if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
2258  continue;
2259 
2260  batadv_dbg(BATADV_DBG_TT, bat_priv,
2261  "Deleting local tt entry (%pM): pending\n",
2262  tt_common->addr);
2263 
2264  atomic_dec(&bat_priv->tt.local_entry_num);
2265  hlist_del_rcu(node);
2266  tt_local = container_of(tt_common,
2267  struct batadv_tt_local_entry,
2268  common);
2269  batadv_tt_local_entry_free_ref(tt_local);
2270  }
2271  spin_unlock_bh(list_lock);
2272  }
2273 
2274 }
2275 
2276 static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2277  unsigned char **packet_buff,
2278  int *packet_buff_len, int packet_min_len)
2279 {
2280  uint16_t changed_num = 0;
2281 
2282  if (atomic_read(&bat_priv->tt.local_changes) < 1)
2283  return -ENOENT;
2284 
2285  changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
2286  BATADV_TT_CLIENT_NEW, false);
2287 
2288  /* all reset entries have to be counted as local entries */
2289  atomic_add(changed_num, &bat_priv->tt.local_entry_num);
2290  batadv_tt_local_purge_pending_clients(bat_priv);
2291  bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
2292 
2293  /* Increment the TTVN only once per OGM interval */
2294  atomic_inc(&bat_priv->tt.vn);
2295  batadv_dbg(BATADV_DBG_TT, bat_priv,
2296  "Local changes committed, updating to ttvn %u\n",
2297  (uint8_t)atomic_read(&bat_priv->tt.vn));
2298  bat_priv->tt.poss_change = false;
2299 
2300  /* reset the sending counter */
2301  atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2302 
2303  return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2304  packet_buff_len, packet_min_len);
2305 }
2306 
2307 /* when calling this function (hard_iface == primary_if) has to be true */
2308 int batadv_tt_append_diff(struct batadv_priv *bat_priv,
2309  unsigned char **packet_buff, int *packet_buff_len,
2310  int packet_min_len)
2311 {
2312  int tt_num_changes;
2313 
2314  /* if at least one change happened */
2315  tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
2316  packet_buff_len,
2317  packet_min_len);
2318 
2319  /* if the changes have been sent often enough */
2320  if ((tt_num_changes < 0) &&
2321  (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
2322  batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2323  packet_min_len, packet_min_len);
2324  tt_num_changes = 0;
2325  }
2326 
2327  return tt_num_changes;
2328 }
2329 
2331  uint8_t *dst)
2332 {
2333  struct batadv_tt_local_entry *tt_local_entry = NULL;
2334  struct batadv_tt_global_entry *tt_global_entry = NULL;
2335  bool ret = false;
2336 
2337  if (!atomic_read(&bat_priv->ap_isolation))
2338  goto out;
2339 
2340  tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
2341  if (!tt_local_entry)
2342  goto out;
2343 
2344  tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
2345  if (!tt_global_entry)
2346  goto out;
2347 
2348  if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
2349  goto out;
2350 
2351  ret = true;
2352 
2353 out:
2354  if (tt_global_entry)
2355  batadv_tt_global_entry_free_ref(tt_global_entry);
2356  if (tt_local_entry)
2357  batadv_tt_local_entry_free_ref(tt_local_entry);
2358  return ret;
2359 }
2360 
2361 void batadv_tt_update_orig(struct batadv_priv *bat_priv,
2362  struct batadv_orig_node *orig_node,
2363  const unsigned char *tt_buff, uint8_t tt_num_changes,
2364  uint8_t ttvn, uint16_t tt_crc)
2365 {
2366  uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2367  bool full_table = true;
2368  struct batadv_tt_change *tt_change;
2369 
2370  /* don't care about a backbone gateways updates. */
2371  if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2372  return;
2373 
2374  /* orig table not initialised AND first diff is in the OGM OR the ttvn
2375  * increased by one -> we can apply the attached changes
2376  */
2377  if ((!orig_node->tt_initialised && ttvn == 1) ||
2378  ttvn - orig_ttvn == 1) {
2379  /* the OGM could not contain the changes due to their size or
2380  * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2381  * times.
2382  * In this case send a tt request
2383  */
2384  if (!tt_num_changes) {
2385  full_table = false;
2386  goto request_table;
2387  }
2388 
2389  tt_change = (struct batadv_tt_change *)tt_buff;
2390  batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
2391  ttvn, tt_change);
2392 
2393  /* Even if we received the precomputed crc with the OGM, we
2394  * prefer to recompute it to spot any possible inconsistency
2395  * in the global table
2396  */
2397  orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2398 
2399  /* The ttvn alone is not enough to guarantee consistency
2400  * because a single value could represent different states
2401  * (due to the wrap around). Thus a node has to check whether
2402  * the resulting table (after applying the changes) is still
2403  * consistent or not. E.g. a node could disconnect while its
2404  * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2405  * checking the CRC value is mandatory to detect the
2406  * inconsistency
2407  */
2408  if (orig_node->tt_crc != tt_crc)
2409  goto request_table;
2410 
2411  /* Roaming phase is over: tables are in sync again. I can
2412  * unset the flag
2413  */
2414  orig_node->tt_poss_change = false;
2415  } else {
2416  /* if we missed more than one change or our tables are not
2417  * in sync anymore -> request fresh tt data
2418  */
2419  if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2420  orig_node->tt_crc != tt_crc) {
2421 request_table:
2422  batadv_dbg(BATADV_DBG_TT, bat_priv,
2423  "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2424  orig_node->orig, ttvn, orig_ttvn, tt_crc,
2425  orig_node->tt_crc, tt_num_changes);
2426  batadv_send_tt_request(bat_priv, orig_node, ttvn,
2427  tt_crc, full_table);
2428  return;
2429  }
2430  }
2431 }
2432 
2433 /* returns true whether we know that the client has moved from its old
2434  * originator to another one. This entry is kept is still kept for consistency
2435  * purposes
2436  */
2438  uint8_t *addr)
2439 {
2440  struct batadv_tt_global_entry *tt_global_entry;
2441  bool ret = false;
2442 
2443  tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
2444  if (!tt_global_entry)
2445  goto out;
2446 
2447  ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2448  batadv_tt_global_entry_free_ref(tt_global_entry);
2449 out:
2450  return ret;
2451 }
2452 
2454  struct batadv_orig_node *orig_node,
2455  const unsigned char *addr)
2456 {
2457  bool ret = false;
2458 
2459  /* if the originator is a backbone node (meaning it belongs to the same
2460  * LAN of this node) the temporary client must not be added because to
2461  * reach such destination the node must use the LAN instead of the mesh
2462  */
2463  if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2464  goto out;
2465 
2466  if (!batadv_tt_global_add(bat_priv, orig_node, addr,
2468  atomic_read(&orig_node->last_ttvn)))
2469  goto out;
2470 
2471  batadv_dbg(BATADV_DBG_TT, bat_priv,
2472  "Added temporary global client (addr: %pM orig: %pM)\n",
2473  addr, orig_node->orig);
2474  ret = true;
2475 out:
2476  return ret;
2477 }