2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
28 #include "originator.h"
31 #include <linux/crc16.h>
33 static void _tt_global_del(struct bat_priv
*bat_priv
,
34 struct tt_global_entry
*tt_global_entry
,
36 static void tt_purge(struct work_struct
*work
);
38 /* returns 1 if they are the same mac addr */
39 static int compare_ltt(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct tt_local_entry
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 /* returns 1 if they are the same mac addr */
48 static int compare_gtt(const struct hlist_node
*node
, const void *data2
)
50 const void *data1
= container_of(node
, struct tt_global_entry
,
53 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
56 static void tt_start_timer(struct bat_priv
*bat_priv
)
58 INIT_DELAYED_WORK(&bat_priv
->tt_work
, tt_purge
);
59 queue_delayed_work(bat_event_workqueue
, &bat_priv
->tt_work
,
60 msecs_to_jiffies(5000));
63 static struct tt_local_entry
*tt_local_hash_find(struct bat_priv
*bat_priv
,
66 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
67 struct hlist_head
*head
;
68 struct hlist_node
*node
;
69 struct tt_local_entry
*tt_local_entry
, *tt_local_entry_tmp
= NULL
;
75 index
= choose_orig(data
, hash
->size
);
76 head
= &hash
->table
[index
];
79 hlist_for_each_entry_rcu(tt_local_entry
, node
, head
, hash_entry
) {
80 if (!compare_eth(tt_local_entry
, data
))
83 if (!atomic_inc_not_zero(&tt_local_entry
->refcount
))
86 tt_local_entry_tmp
= tt_local_entry
;
91 return tt_local_entry_tmp
;
94 static struct tt_global_entry
*tt_global_hash_find(struct bat_priv
*bat_priv
,
97 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
98 struct hlist_head
*head
;
99 struct hlist_node
*node
;
100 struct tt_global_entry
*tt_global_entry
;
101 struct tt_global_entry
*tt_global_entry_tmp
= NULL
;
107 index
= choose_orig(data
, hash
->size
);
108 head
= &hash
->table
[index
];
111 hlist_for_each_entry_rcu(tt_global_entry
, node
, head
, hash_entry
) {
112 if (!compare_eth(tt_global_entry
, data
))
115 if (!atomic_inc_not_zero(&tt_global_entry
->refcount
))
118 tt_global_entry_tmp
= tt_global_entry
;
123 return tt_global_entry_tmp
;
126 static bool is_out_of_time(unsigned long starting_time
, unsigned long timeout
)
128 unsigned long deadline
;
129 deadline
= starting_time
+ msecs_to_jiffies(timeout
);
131 return time_after(jiffies
, deadline
);
134 static void tt_local_entry_free_ref(struct tt_local_entry
*tt_local_entry
)
136 if (atomic_dec_and_test(&tt_local_entry
->refcount
))
137 kfree_rcu(tt_local_entry
, rcu
);
140 static void tt_global_entry_free_rcu(struct rcu_head
*rcu
)
142 struct tt_global_entry
*tt_global_entry
;
144 tt_global_entry
= container_of(rcu
, struct tt_global_entry
, rcu
);
146 if (tt_global_entry
->orig_node
)
147 orig_node_free_ref(tt_global_entry
->orig_node
);
149 kfree(tt_global_entry
);
152 static void tt_global_entry_free_ref(struct tt_global_entry
*tt_global_entry
)
154 if (atomic_dec_and_test(&tt_global_entry
->refcount
))
155 call_rcu(&tt_global_entry
->rcu
, tt_global_entry_free_rcu
);
158 static void tt_local_event(struct bat_priv
*bat_priv
, const uint8_t *addr
,
161 struct tt_change_node
*tt_change_node
;
163 tt_change_node
= kmalloc(sizeof(*tt_change_node
), GFP_ATOMIC
);
168 tt_change_node
->change
.flags
= flags
;
169 memcpy(tt_change_node
->change
.addr
, addr
, ETH_ALEN
);
171 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
172 /* track the change in the OGMinterval list */
173 list_add_tail(&tt_change_node
->list
, &bat_priv
->tt_changes_list
);
174 atomic_inc(&bat_priv
->tt_local_changes
);
175 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
177 atomic_set(&bat_priv
->tt_ogm_append_cnt
, 0);
180 int tt_len(int changes_num
)
182 return changes_num
* sizeof(struct tt_change
);
185 static int tt_local_init(struct bat_priv
*bat_priv
)
187 if (bat_priv
->tt_local_hash
)
190 bat_priv
->tt_local_hash
= hash_new(1024);
192 if (!bat_priv
->tt_local_hash
)
198 void tt_local_add(struct net_device
*soft_iface
, const uint8_t *addr
)
200 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
201 struct tt_local_entry
*tt_local_entry
= NULL
;
202 struct tt_global_entry
*tt_global_entry
= NULL
;
204 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
206 if (tt_local_entry
) {
207 tt_local_entry
->last_seen
= jiffies
;
211 tt_local_entry
= kmalloc(sizeof(*tt_local_entry
), GFP_ATOMIC
);
215 bat_dbg(DBG_TT
, bat_priv
,
216 "Creating new local tt entry: %pM (ttvn: %d)\n", addr
,
217 (uint8_t)atomic_read(&bat_priv
->ttvn
));
219 memcpy(tt_local_entry
->addr
, addr
, ETH_ALEN
);
220 tt_local_entry
->last_seen
= jiffies
;
221 tt_local_entry
->flags
= NO_FLAGS
;
222 atomic_set(&tt_local_entry
->refcount
, 2);
224 /* the batman interface mac address should never be purged */
225 if (compare_eth(addr
, soft_iface
->dev_addr
))
226 tt_local_entry
->flags
|= TT_CLIENT_NOPURGE
;
228 tt_local_event(bat_priv
, addr
, tt_local_entry
->flags
);
230 /* The local entry has to be marked as NEW to avoid to send it in
231 * a full table response going out before the next ttvn increment
232 * (consistency check) */
233 tt_local_entry
->flags
|= TT_CLIENT_NEW
;
235 hash_add(bat_priv
->tt_local_hash
, compare_ltt
, choose_orig
,
236 tt_local_entry
, &tt_local_entry
->hash_entry
);
238 /* remove address from global hash if present */
239 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
241 /* Check whether it is a roaming! */
242 if (tt_global_entry
) {
243 /* This node is probably going to update its tt table */
244 tt_global_entry
->orig_node
->tt_poss_change
= true;
245 /* The global entry has to be marked as PENDING and has to be
246 * kept for consistency purpose */
247 tt_global_entry
->flags
|= TT_CLIENT_PENDING
;
248 send_roam_adv(bat_priv
, tt_global_entry
->addr
,
249 tt_global_entry
->orig_node
);
253 tt_local_entry_free_ref(tt_local_entry
);
255 tt_global_entry_free_ref(tt_global_entry
);
258 int tt_changes_fill_buffer(struct bat_priv
*bat_priv
,
259 unsigned char *buff
, int buff_len
)
261 int count
= 0, tot_changes
= 0;
262 struct tt_change_node
*entry
, *safe
;
265 tot_changes
= buff_len
/ tt_len(1);
267 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
268 atomic_set(&bat_priv
->tt_local_changes
, 0);
270 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
272 if (count
< tot_changes
) {
273 memcpy(buff
+ tt_len(count
),
274 &entry
->change
, sizeof(struct tt_change
));
277 list_del(&entry
->list
);
280 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
282 /* Keep the buffer for possible tt_request */
283 spin_lock_bh(&bat_priv
->tt_buff_lock
);
284 kfree(bat_priv
->tt_buff
);
285 bat_priv
->tt_buff_len
= 0;
286 bat_priv
->tt_buff
= NULL
;
287 /* We check whether this new OGM has no changes due to size
291 * if kmalloc() fails we will reply with the full table
292 * instead of providing the diff
294 bat_priv
->tt_buff
= kmalloc(buff_len
, GFP_ATOMIC
);
295 if (bat_priv
->tt_buff
) {
296 memcpy(bat_priv
->tt_buff
, buff
, buff_len
);
297 bat_priv
->tt_buff_len
= buff_len
;
300 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
305 int tt_local_seq_print_text(struct seq_file
*seq
, void *offset
)
307 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
308 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
309 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
310 struct tt_local_entry
*tt_local_entry
;
311 struct hard_iface
*primary_if
;
312 struct hlist_node
*node
;
313 struct hlist_head
*head
;
314 size_t buf_size
, pos
;
318 primary_if
= primary_if_get_selected(bat_priv
);
320 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
321 "please specify interfaces to enable it\n",
326 if (primary_if
->if_status
!= IF_ACTIVE
) {
327 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
328 "primary interface not active\n",
333 seq_printf(seq
, "Locally retrieved addresses (from %s) "
334 "announced via TT (TTVN: %u):\n",
335 net_dev
->name
, (uint8_t)atomic_read(&bat_priv
->ttvn
));
338 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
339 for (i
= 0; i
< hash
->size
; i
++) {
340 head
= &hash
->table
[i
];
343 __hlist_for_each_rcu(node
, head
)
348 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
357 for (i
= 0; i
< hash
->size
; i
++) {
358 head
= &hash
->table
[i
];
361 hlist_for_each_entry_rcu(tt_local_entry
, node
,
363 pos
+= snprintf(buff
+ pos
, 22, " * %pM\n",
364 tt_local_entry
->addr
);
369 seq_printf(seq
, "%s", buff
);
373 hardif_free_ref(primary_if
);
377 static void tt_local_set_pending(struct bat_priv
*bat_priv
,
378 struct tt_local_entry
*tt_local_entry
,
381 tt_local_event(bat_priv
, tt_local_entry
->addr
,
382 tt_local_entry
->flags
| flags
);
384 /* The local client has to be merked as "pending to be removed" but has
385 * to be kept in the table in order to send it in an full tables
386 * response issued before the net ttvn increment (consistency check) */
387 tt_local_entry
->flags
|= TT_CLIENT_PENDING
;
390 void tt_local_remove(struct bat_priv
*bat_priv
, const uint8_t *addr
,
391 const char *message
, bool roaming
)
393 struct tt_local_entry
*tt_local_entry
= NULL
;
395 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
399 tt_local_set_pending(bat_priv
, tt_local_entry
, TT_CLIENT_DEL
|
400 (roaming
? TT_CLIENT_ROAM
: NO_FLAGS
));
402 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) pending to be removed: "
403 "%s\n", tt_local_entry
->addr
, message
);
406 tt_local_entry_free_ref(tt_local_entry
);
409 static void tt_local_purge(struct bat_priv
*bat_priv
)
411 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
412 struct tt_local_entry
*tt_local_entry
;
413 struct hlist_node
*node
, *node_tmp
;
414 struct hlist_head
*head
;
415 spinlock_t
*list_lock
; /* protects write access to the hash lists */
418 for (i
= 0; i
< hash
->size
; i
++) {
419 head
= &hash
->table
[i
];
420 list_lock
= &hash
->list_locks
[i
];
422 spin_lock_bh(list_lock
);
423 hlist_for_each_entry_safe(tt_local_entry
, node
, node_tmp
,
425 if (tt_local_entry
->flags
& TT_CLIENT_NOPURGE
)
428 /* entry already marked for deletion */
429 if (tt_local_entry
->flags
& TT_CLIENT_PENDING
)
432 if (!is_out_of_time(tt_local_entry
->last_seen
,
433 TT_LOCAL_TIMEOUT
* 1000))
436 tt_local_set_pending(bat_priv
, tt_local_entry
,
438 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) "
439 "pending to be removed: timed out\n",
440 tt_local_entry
->addr
);
442 spin_unlock_bh(list_lock
);
447 static void tt_local_table_free(struct bat_priv
*bat_priv
)
449 struct hashtable_t
*hash
;
450 spinlock_t
*list_lock
; /* protects write access to the hash lists */
451 struct tt_local_entry
*tt_local_entry
;
452 struct hlist_node
*node
, *node_tmp
;
453 struct hlist_head
*head
;
456 if (!bat_priv
->tt_local_hash
)
459 hash
= bat_priv
->tt_local_hash
;
461 for (i
= 0; i
< hash
->size
; i
++) {
462 head
= &hash
->table
[i
];
463 list_lock
= &hash
->list_locks
[i
];
465 spin_lock_bh(list_lock
);
466 hlist_for_each_entry_safe(tt_local_entry
, node
, node_tmp
,
469 tt_local_entry_free_ref(tt_local_entry
);
471 spin_unlock_bh(list_lock
);
476 bat_priv
->tt_local_hash
= NULL
;
479 static int tt_global_init(struct bat_priv
*bat_priv
)
481 if (bat_priv
->tt_global_hash
)
484 bat_priv
->tt_global_hash
= hash_new(1024);
486 if (!bat_priv
->tt_global_hash
)
492 static void tt_changes_list_free(struct bat_priv
*bat_priv
)
494 struct tt_change_node
*entry
, *safe
;
496 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
498 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
500 list_del(&entry
->list
);
504 atomic_set(&bat_priv
->tt_local_changes
, 0);
505 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
508 /* caller must hold orig_node refcount */
509 int tt_global_add(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
510 const unsigned char *tt_addr
, uint8_t ttvn
, bool roaming
)
512 struct tt_global_entry
*tt_global_entry
;
513 struct orig_node
*orig_node_tmp
;
516 tt_global_entry
= tt_global_hash_find(bat_priv
, tt_addr
);
518 if (!tt_global_entry
) {
520 kmalloc(sizeof(*tt_global_entry
),
522 if (!tt_global_entry
)
525 memcpy(tt_global_entry
->addr
, tt_addr
, ETH_ALEN
);
526 /* Assign the new orig_node */
527 atomic_inc(&orig_node
->refcount
);
528 tt_global_entry
->orig_node
= orig_node
;
529 tt_global_entry
->ttvn
= ttvn
;
530 tt_global_entry
->flags
= NO_FLAGS
;
531 tt_global_entry
->roam_at
= 0;
532 atomic_set(&tt_global_entry
->refcount
, 2);
534 hash_add(bat_priv
->tt_global_hash
, compare_gtt
,
535 choose_orig
, tt_global_entry
,
536 &tt_global_entry
->hash_entry
);
537 atomic_inc(&orig_node
->tt_size
);
539 if (tt_global_entry
->orig_node
!= orig_node
) {
540 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
541 orig_node_tmp
= tt_global_entry
->orig_node
;
542 atomic_inc(&orig_node
->refcount
);
543 tt_global_entry
->orig_node
= orig_node
;
544 orig_node_free_ref(orig_node_tmp
);
545 atomic_inc(&orig_node
->tt_size
);
547 tt_global_entry
->ttvn
= ttvn
;
548 tt_global_entry
->flags
= NO_FLAGS
;
549 tt_global_entry
->roam_at
= 0;
552 bat_dbg(DBG_TT
, bat_priv
,
553 "Creating new global tt entry: %pM (via %pM)\n",
554 tt_global_entry
->addr
, orig_node
->orig
);
556 /* remove address from local hash if present */
557 tt_local_remove(bat_priv
, tt_global_entry
->addr
,
558 "global tt received", roaming
);
562 tt_global_entry_free_ref(tt_global_entry
);
566 int tt_global_seq_print_text(struct seq_file
*seq
, void *offset
)
568 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
569 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
570 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
571 struct tt_global_entry
*tt_global_entry
;
572 struct hard_iface
*primary_if
;
573 struct hlist_node
*node
;
574 struct hlist_head
*head
;
575 size_t buf_size
, pos
;
579 primary_if
= primary_if_get_selected(bat_priv
);
581 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - please "
582 "specify interfaces to enable it\n",
587 if (primary_if
->if_status
!= IF_ACTIVE
) {
588 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
589 "primary interface not active\n",
595 "Globally announced TT entries received via the mesh %s\n",
597 seq_printf(seq
, " %-13s %s %-15s %s\n",
598 "Client", "(TTVN)", "Originator", "(Curr TTVN)");
601 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
602 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
603 for (i
= 0; i
< hash
->size
; i
++) {
604 head
= &hash
->table
[i
];
607 __hlist_for_each_rcu(node
, head
)
612 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
621 for (i
= 0; i
< hash
->size
; i
++) {
622 head
= &hash
->table
[i
];
625 hlist_for_each_entry_rcu(tt_global_entry
, node
,
627 pos
+= snprintf(buff
+ pos
, 61,
628 " * %pM (%3u) via %pM (%3u)\n",
629 tt_global_entry
->addr
,
630 tt_global_entry
->ttvn
,
631 tt_global_entry
->orig_node
->orig
,
632 (uint8_t) atomic_read(
633 &tt_global_entry
->orig_node
->
639 seq_printf(seq
, "%s", buff
);
643 hardif_free_ref(primary_if
);
647 static void _tt_global_del(struct bat_priv
*bat_priv
,
648 struct tt_global_entry
*tt_global_entry
,
651 if (!tt_global_entry
)
654 bat_dbg(DBG_TT
, bat_priv
,
655 "Deleting global tt entry %pM (via %pM): %s\n",
656 tt_global_entry
->addr
, tt_global_entry
->orig_node
->orig
,
659 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
661 hash_remove(bat_priv
->tt_global_hash
, compare_gtt
, choose_orig
,
662 tt_global_entry
->addr
);
665 tt_global_entry_free_ref(tt_global_entry
);
668 void tt_global_del(struct bat_priv
*bat_priv
,
669 struct orig_node
*orig_node
, const unsigned char *addr
,
670 const char *message
, bool roaming
)
672 struct tt_global_entry
*tt_global_entry
= NULL
;
674 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
675 if (!tt_global_entry
)
678 if (tt_global_entry
->orig_node
== orig_node
) {
680 tt_global_entry
->flags
|= TT_CLIENT_ROAM
;
681 tt_global_entry
->roam_at
= jiffies
;
684 _tt_global_del(bat_priv
, tt_global_entry
, message
);
688 tt_global_entry_free_ref(tt_global_entry
);
691 void tt_global_del_orig(struct bat_priv
*bat_priv
,
692 struct orig_node
*orig_node
, const char *message
)
694 struct tt_global_entry
*tt_global_entry
;
696 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
697 struct hlist_node
*node
, *safe
;
698 struct hlist_head
*head
;
699 spinlock_t
*list_lock
; /* protects write access to the hash lists */
704 for (i
= 0; i
< hash
->size
; i
++) {
705 head
= &hash
->table
[i
];
706 list_lock
= &hash
->list_locks
[i
];
708 spin_lock_bh(list_lock
);
709 hlist_for_each_entry_safe(tt_global_entry
, node
, safe
,
711 if (tt_global_entry
->orig_node
== orig_node
) {
712 bat_dbg(DBG_TT
, bat_priv
,
713 "Deleting global tt entry %pM "
714 "(via %pM): originator time out\n",
715 tt_global_entry
->addr
,
716 tt_global_entry
->orig_node
->orig
);
718 tt_global_entry_free_ref(tt_global_entry
);
721 spin_unlock_bh(list_lock
);
723 atomic_set(&orig_node
->tt_size
, 0);
726 static void tt_global_roam_purge(struct bat_priv
*bat_priv
)
728 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
729 struct tt_global_entry
*tt_global_entry
;
730 struct hlist_node
*node
, *node_tmp
;
731 struct hlist_head
*head
;
732 spinlock_t
*list_lock
; /* protects write access to the hash lists */
735 for (i
= 0; i
< hash
->size
; i
++) {
736 head
= &hash
->table
[i
];
737 list_lock
= &hash
->list_locks
[i
];
739 spin_lock_bh(list_lock
);
740 hlist_for_each_entry_safe(tt_global_entry
, node
, node_tmp
,
742 if (!(tt_global_entry
->flags
& TT_CLIENT_ROAM
))
744 if (!is_out_of_time(tt_global_entry
->roam_at
,
745 TT_CLIENT_ROAM_TIMEOUT
* 1000))
748 bat_dbg(DBG_TT
, bat_priv
, "Deleting global "
749 "tt entry (%pM): Roaming timeout\n",
750 tt_global_entry
->addr
);
751 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
753 tt_global_entry_free_ref(tt_global_entry
);
755 spin_unlock_bh(list_lock
);
760 static void tt_global_table_free(struct bat_priv
*bat_priv
)
762 struct hashtable_t
*hash
;
763 spinlock_t
*list_lock
; /* protects write access to the hash lists */
764 struct tt_global_entry
*tt_global_entry
;
765 struct hlist_node
*node
, *node_tmp
;
766 struct hlist_head
*head
;
769 if (!bat_priv
->tt_global_hash
)
772 hash
= bat_priv
->tt_global_hash
;
774 for (i
= 0; i
< hash
->size
; i
++) {
775 head
= &hash
->table
[i
];
776 list_lock
= &hash
->list_locks
[i
];
778 spin_lock_bh(list_lock
);
779 hlist_for_each_entry_safe(tt_global_entry
, node
, node_tmp
,
782 tt_global_entry_free_ref(tt_global_entry
);
784 spin_unlock_bh(list_lock
);
789 bat_priv
->tt_global_hash
= NULL
;
792 struct orig_node
*transtable_search(struct bat_priv
*bat_priv
,
795 struct tt_global_entry
*tt_global_entry
;
796 struct orig_node
*orig_node
= NULL
;
798 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
800 if (!tt_global_entry
)
803 if (!atomic_inc_not_zero(&tt_global_entry
->orig_node
->refcount
))
806 /* A global client marked as PENDING has already moved from that
808 if (tt_global_entry
->flags
& TT_CLIENT_PENDING
)
811 orig_node
= tt_global_entry
->orig_node
;
814 tt_global_entry_free_ref(tt_global_entry
);
819 /* Calculates the checksum of the local table of a given orig_node */
820 uint16_t tt_global_crc(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
)
822 uint16_t total
= 0, total_one
;
823 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
824 struct tt_global_entry
*tt_global_entry
;
825 struct hlist_node
*node
;
826 struct hlist_head
*head
;
829 for (i
= 0; i
< hash
->size
; i
++) {
830 head
= &hash
->table
[i
];
833 hlist_for_each_entry_rcu(tt_global_entry
, node
,
835 if (compare_eth(tt_global_entry
->orig_node
,
837 /* Roaming clients are in the global table for
838 * consistency only. They don't have to be
839 * taken into account while computing the
841 if (tt_global_entry
->flags
& TT_CLIENT_ROAM
)
844 for (j
= 0; j
< ETH_ALEN
; j
++)
845 total_one
= crc16_byte(total_one
,
846 tt_global_entry
->addr
[j
]);
856 /* Calculates the checksum of the local table */
857 uint16_t tt_local_crc(struct bat_priv
*bat_priv
)
859 uint16_t total
= 0, total_one
;
860 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
861 struct tt_local_entry
*tt_local_entry
;
862 struct hlist_node
*node
;
863 struct hlist_head
*head
;
866 for (i
= 0; i
< hash
->size
; i
++) {
867 head
= &hash
->table
[i
];
870 hlist_for_each_entry_rcu(tt_local_entry
, node
,
872 /* not yet committed clients have not to be taken into
873 * account while computing the CRC */
874 if (tt_local_entry
->flags
& TT_CLIENT_NEW
)
877 for (j
= 0; j
< ETH_ALEN
; j
++)
878 total_one
= crc16_byte(total_one
,
879 tt_local_entry
->addr
[j
]);
888 static void tt_req_list_free(struct bat_priv
*bat_priv
)
890 struct tt_req_node
*node
, *safe
;
892 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
894 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
895 list_del(&node
->list
);
899 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
902 void tt_save_orig_buffer(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
903 const unsigned char *tt_buff
, uint8_t tt_num_changes
)
905 uint16_t tt_buff_len
= tt_len(tt_num_changes
);
907 /* Replace the old buffer only if I received something in the
908 * last OGM (the OGM could carry no changes) */
909 spin_lock_bh(&orig_node
->tt_buff_lock
);
910 if (tt_buff_len
> 0) {
911 kfree(orig_node
->tt_buff
);
912 orig_node
->tt_buff_len
= 0;
913 orig_node
->tt_buff
= kmalloc(tt_buff_len
, GFP_ATOMIC
);
914 if (orig_node
->tt_buff
) {
915 memcpy(orig_node
->tt_buff
, tt_buff
, tt_buff_len
);
916 orig_node
->tt_buff_len
= tt_buff_len
;
919 spin_unlock_bh(&orig_node
->tt_buff_lock
);
922 static void tt_req_purge(struct bat_priv
*bat_priv
)
924 struct tt_req_node
*node
, *safe
;
926 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
927 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
928 if (is_out_of_time(node
->issued_at
,
929 TT_REQUEST_TIMEOUT
* 1000)) {
930 list_del(&node
->list
);
934 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
937 /* returns the pointer to the new tt_req_node struct if no request
938 * has already been issued for this orig_node, NULL otherwise */
939 static struct tt_req_node
*new_tt_req_node(struct bat_priv
*bat_priv
,
940 struct orig_node
*orig_node
)
942 struct tt_req_node
*tt_req_node_tmp
, *tt_req_node
= NULL
;
944 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
945 list_for_each_entry(tt_req_node_tmp
, &bat_priv
->tt_req_list
, list
) {
946 if (compare_eth(tt_req_node_tmp
, orig_node
) &&
947 !is_out_of_time(tt_req_node_tmp
->issued_at
,
948 TT_REQUEST_TIMEOUT
* 1000))
952 tt_req_node
= kmalloc(sizeof(*tt_req_node
), GFP_ATOMIC
);
956 memcpy(tt_req_node
->addr
, orig_node
->orig
, ETH_ALEN
);
957 tt_req_node
->issued_at
= jiffies
;
959 list_add(&tt_req_node
->list
, &bat_priv
->tt_req_list
);
961 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
965 /* data_ptr is useless here, but has to be kept to respect the prototype */
966 static int tt_local_valid_entry(const void *entry_ptr
, const void *data_ptr
)
968 const struct tt_local_entry
*tt_local_entry
= entry_ptr
;
970 if (tt_local_entry
->flags
& TT_CLIENT_NEW
)
975 static int tt_global_valid_entry(const void *entry_ptr
, const void *data_ptr
)
977 const struct tt_global_entry
*tt_global_entry
= entry_ptr
;
978 const struct orig_node
*orig_node
= data_ptr
;
980 if (tt_global_entry
->flags
& TT_CLIENT_ROAM
)
983 return (tt_global_entry
->orig_node
== orig_node
);
986 static struct sk_buff
*tt_response_fill_table(uint16_t tt_len
, uint8_t ttvn
,
987 struct hashtable_t
*hash
,
988 struct hard_iface
*primary_if
,
989 int (*valid_cb
)(const void *,
993 struct tt_local_entry
*tt_local_entry
;
994 struct tt_query_packet
*tt_response
;
995 struct tt_change
*tt_change
;
996 struct hlist_node
*node
;
997 struct hlist_head
*head
;
998 struct sk_buff
*skb
= NULL
;
999 uint16_t tt_tot
, tt_count
;
1000 ssize_t tt_query_size
= sizeof(struct tt_query_packet
);
1003 if (tt_query_size
+ tt_len
> primary_if
->soft_iface
->mtu
) {
1004 tt_len
= primary_if
->soft_iface
->mtu
- tt_query_size
;
1005 tt_len
-= tt_len
% sizeof(struct tt_change
);
1007 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1009 skb
= dev_alloc_skb(tt_query_size
+ tt_len
+ ETH_HLEN
);
1013 skb_reserve(skb
, ETH_HLEN
);
1014 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1015 tt_query_size
+ tt_len
);
1016 tt_response
->ttvn
= ttvn
;
1018 tt_change
= (struct tt_change
*)(skb
->data
+ tt_query_size
);
1022 for (i
= 0; i
< hash
->size
; i
++) {
1023 head
= &hash
->table
[i
];
1025 hlist_for_each_entry_rcu(tt_local_entry
, node
,
1027 if (tt_count
== tt_tot
)
1030 if ((valid_cb
) && (!valid_cb(tt_local_entry
, cb_data
)))
1033 memcpy(tt_change
->addr
, tt_local_entry
->addr
, ETH_ALEN
);
1034 tt_change
->flags
= NO_FLAGS
;
1042 /* store in the message the number of entries we have successfully
1044 tt_response
->tt_data
= htons(tt_count
);
1050 int send_tt_request(struct bat_priv
*bat_priv
, struct orig_node
*dst_orig_node
,
1051 uint8_t ttvn
, uint16_t tt_crc
, bool full_table
)
1053 struct sk_buff
*skb
= NULL
;
1054 struct tt_query_packet
*tt_request
;
1055 struct neigh_node
*neigh_node
= NULL
;
1056 struct hard_iface
*primary_if
;
1057 struct tt_req_node
*tt_req_node
= NULL
;
1060 primary_if
= primary_if_get_selected(bat_priv
);
1064 /* The new tt_req will be issued only if I'm not waiting for a
1065 * reply from the same orig_node yet */
1066 tt_req_node
= new_tt_req_node(bat_priv
, dst_orig_node
);
1070 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) + ETH_HLEN
);
1074 skb_reserve(skb
, ETH_HLEN
);
1076 tt_request
= (struct tt_query_packet
*)skb_put(skb
,
1077 sizeof(struct tt_query_packet
));
1079 tt_request
->packet_type
= BAT_TT_QUERY
;
1080 tt_request
->version
= COMPAT_VERSION
;
1081 memcpy(tt_request
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1082 memcpy(tt_request
->dst
, dst_orig_node
->orig
, ETH_ALEN
);
1083 tt_request
->ttl
= TTL
;
1084 tt_request
->ttvn
= ttvn
;
1085 tt_request
->tt_data
= tt_crc
;
1086 tt_request
->flags
= TT_REQUEST
;
1089 tt_request
->flags
|= TT_FULL_TABLE
;
1091 neigh_node
= orig_node_get_router(dst_orig_node
);
1095 bat_dbg(DBG_TT
, bat_priv
, "Sending TT_REQUEST to %pM via %pM "
1096 "[%c]\n", dst_orig_node
->orig
, neigh_node
->addr
,
1097 (full_table
? 'F' : '.'));
1099 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1104 neigh_node_free_ref(neigh_node
);
1106 hardif_free_ref(primary_if
);
1109 if (ret
&& tt_req_node
) {
1110 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1111 list_del(&tt_req_node
->list
);
1112 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1118 static bool send_other_tt_response(struct bat_priv
*bat_priv
,
1119 struct tt_query_packet
*tt_request
)
1121 struct orig_node
*req_dst_orig_node
= NULL
, *res_dst_orig_node
= NULL
;
1122 struct neigh_node
*neigh_node
= NULL
;
1123 struct hard_iface
*primary_if
= NULL
;
1124 uint8_t orig_ttvn
, req_ttvn
, ttvn
;
1126 unsigned char *tt_buff
;
1128 uint16_t tt_len
, tt_tot
;
1129 struct sk_buff
*skb
= NULL
;
1130 struct tt_query_packet
*tt_response
;
1132 bat_dbg(DBG_TT
, bat_priv
,
1133 "Received TT_REQUEST from %pM for "
1134 "ttvn: %u (%pM) [%c]\n", tt_request
->src
,
1135 tt_request
->ttvn
, tt_request
->dst
,
1136 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1138 /* Let's get the orig node of the REAL destination */
1139 req_dst_orig_node
= get_orig_node(bat_priv
, tt_request
->dst
);
1140 if (!req_dst_orig_node
)
1143 res_dst_orig_node
= get_orig_node(bat_priv
, tt_request
->src
);
1144 if (!res_dst_orig_node
)
1147 neigh_node
= orig_node_get_router(res_dst_orig_node
);
1151 primary_if
= primary_if_get_selected(bat_priv
);
1155 orig_ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1156 req_ttvn
= tt_request
->ttvn
;
1158 /* I have not the requested data */
1159 if (orig_ttvn
!= req_ttvn
||
1160 tt_request
->tt_data
!= req_dst_orig_node
->tt_crc
)
1163 /* If it has explicitly been requested the full table */
1164 if (tt_request
->flags
& TT_FULL_TABLE
||
1165 !req_dst_orig_node
->tt_buff
)
1170 /* In this version, fragmentation is not implemented, then
1171 * I'll send only one packet with as much TT entries as I can */
1173 spin_lock_bh(&req_dst_orig_node
->tt_buff_lock
);
1174 tt_len
= req_dst_orig_node
->tt_buff_len
;
1175 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1177 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1182 skb_reserve(skb
, ETH_HLEN
);
1183 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1184 sizeof(struct tt_query_packet
) + tt_len
);
1185 tt_response
->ttvn
= req_ttvn
;
1186 tt_response
->tt_data
= htons(tt_tot
);
1188 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1189 /* Copy the last orig_node's OGM buffer */
1190 memcpy(tt_buff
, req_dst_orig_node
->tt_buff
,
1191 req_dst_orig_node
->tt_buff_len
);
1193 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1195 tt_len
= (uint16_t)atomic_read(&req_dst_orig_node
->tt_size
) *
1196 sizeof(struct tt_change
);
1197 ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1199 skb
= tt_response_fill_table(tt_len
, ttvn
,
1200 bat_priv
->tt_global_hash
,
1201 primary_if
, tt_global_valid_entry
,
1206 tt_response
= (struct tt_query_packet
*)skb
->data
;
1209 tt_response
->packet_type
= BAT_TT_QUERY
;
1210 tt_response
->version
= COMPAT_VERSION
;
1211 tt_response
->ttl
= TTL
;
1212 memcpy(tt_response
->src
, req_dst_orig_node
->orig
, ETH_ALEN
);
1213 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1214 tt_response
->flags
= TT_RESPONSE
;
1217 tt_response
->flags
|= TT_FULL_TABLE
;
1219 bat_dbg(DBG_TT
, bat_priv
,
1220 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1221 res_dst_orig_node
->orig
, neigh_node
->addr
,
1222 req_dst_orig_node
->orig
, req_ttvn
);
1224 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1229 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1232 if (res_dst_orig_node
)
1233 orig_node_free_ref(res_dst_orig_node
);
1234 if (req_dst_orig_node
)
1235 orig_node_free_ref(req_dst_orig_node
);
1237 neigh_node_free_ref(neigh_node
);
1239 hardif_free_ref(primary_if
);
1245 static bool send_my_tt_response(struct bat_priv
*bat_priv
,
1246 struct tt_query_packet
*tt_request
)
1248 struct orig_node
*orig_node
= NULL
;
1249 struct neigh_node
*neigh_node
= NULL
;
1250 struct hard_iface
*primary_if
= NULL
;
1251 uint8_t my_ttvn
, req_ttvn
, ttvn
;
1253 unsigned char *tt_buff
;
1255 uint16_t tt_len
, tt_tot
;
1256 struct sk_buff
*skb
= NULL
;
1257 struct tt_query_packet
*tt_response
;
1259 bat_dbg(DBG_TT
, bat_priv
,
1260 "Received TT_REQUEST from %pM for "
1261 "ttvn: %u (me) [%c]\n", tt_request
->src
,
1263 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1266 my_ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1267 req_ttvn
= tt_request
->ttvn
;
1269 orig_node
= get_orig_node(bat_priv
, tt_request
->src
);
1273 neigh_node
= orig_node_get_router(orig_node
);
1277 primary_if
= primary_if_get_selected(bat_priv
);
1281 /* If the full table has been explicitly requested or the gap
1282 * is too big send the whole local translation table */
1283 if (tt_request
->flags
& TT_FULL_TABLE
|| my_ttvn
!= req_ttvn
||
1289 /* In this version, fragmentation is not implemented, then
1290 * I'll send only one packet with as much TT entries as I can */
1292 spin_lock_bh(&bat_priv
->tt_buff_lock
);
1293 tt_len
= bat_priv
->tt_buff_len
;
1294 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1296 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1301 skb_reserve(skb
, ETH_HLEN
);
1302 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1303 sizeof(struct tt_query_packet
) + tt_len
);
1304 tt_response
->ttvn
= req_ttvn
;
1305 tt_response
->tt_data
= htons(tt_tot
);
1307 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1308 memcpy(tt_buff
, bat_priv
->tt_buff
,
1309 bat_priv
->tt_buff_len
);
1310 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1312 tt_len
= (uint16_t)atomic_read(&bat_priv
->num_local_tt
) *
1313 sizeof(struct tt_change
);
1314 ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1316 skb
= tt_response_fill_table(tt_len
, ttvn
,
1317 bat_priv
->tt_local_hash
,
1318 primary_if
, tt_local_valid_entry
,
1323 tt_response
= (struct tt_query_packet
*)skb
->data
;
1326 tt_response
->packet_type
= BAT_TT_QUERY
;
1327 tt_response
->version
= COMPAT_VERSION
;
1328 tt_response
->ttl
= TTL
;
1329 memcpy(tt_response
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1330 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1331 tt_response
->flags
= TT_RESPONSE
;
1334 tt_response
->flags
|= TT_FULL_TABLE
;
1336 bat_dbg(DBG_TT
, bat_priv
,
1337 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1338 orig_node
->orig
, neigh_node
->addr
,
1339 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1341 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1346 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1349 orig_node_free_ref(orig_node
);
1351 neigh_node_free_ref(neigh_node
);
1353 hardif_free_ref(primary_if
);
1356 /* This packet was for me, so it doesn't need to be re-routed */
1360 bool send_tt_response(struct bat_priv
*bat_priv
,
1361 struct tt_query_packet
*tt_request
)
1363 if (is_my_mac(tt_request
->dst
))
1364 return send_my_tt_response(bat_priv
, tt_request
);
1366 return send_other_tt_response(bat_priv
, tt_request
);
1369 static void _tt_update_changes(struct bat_priv
*bat_priv
,
1370 struct orig_node
*orig_node
,
1371 struct tt_change
*tt_change
,
1372 uint16_t tt_num_changes
, uint8_t ttvn
)
1376 for (i
= 0; i
< tt_num_changes
; i
++) {
1377 if ((tt_change
+ i
)->flags
& TT_CLIENT_DEL
)
1378 tt_global_del(bat_priv
, orig_node
,
1379 (tt_change
+ i
)->addr
,
1380 "tt removed by changes",
1381 (tt_change
+ i
)->flags
& TT_CLIENT_ROAM
);
1383 if (!tt_global_add(bat_priv
, orig_node
,
1384 (tt_change
+ i
)->addr
, ttvn
, false))
1385 /* In case of problem while storing a
1386 * global_entry, we stop the updating
1387 * procedure without committing the
1388 * ttvn change. This will avoid to send
1389 * corrupted data on tt_request
1395 static void tt_fill_gtable(struct bat_priv
*bat_priv
,
1396 struct tt_query_packet
*tt_response
)
1398 struct orig_node
*orig_node
= NULL
;
1400 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1404 /* Purge the old table first.. */
1405 tt_global_del_orig(bat_priv
, orig_node
, "Received full table");
1407 _tt_update_changes(bat_priv
, orig_node
,
1408 (struct tt_change
*)(tt_response
+ 1),
1409 tt_response
->tt_data
, tt_response
->ttvn
);
1411 spin_lock_bh(&orig_node
->tt_buff_lock
);
1412 kfree(orig_node
->tt_buff
);
1413 orig_node
->tt_buff_len
= 0;
1414 orig_node
->tt_buff
= NULL
;
1415 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1417 atomic_set(&orig_node
->last_ttvn
, tt_response
->ttvn
);
1421 orig_node_free_ref(orig_node
);
1424 void tt_update_changes(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
1425 uint16_t tt_num_changes
, uint8_t ttvn
,
1426 struct tt_change
*tt_change
)
1428 _tt_update_changes(bat_priv
, orig_node
, tt_change
, tt_num_changes
,
1431 tt_save_orig_buffer(bat_priv
, orig_node
, (unsigned char *)tt_change
,
1433 atomic_set(&orig_node
->last_ttvn
, ttvn
);
1436 bool is_my_client(struct bat_priv
*bat_priv
, const uint8_t *addr
)
1438 struct tt_local_entry
*tt_local_entry
= NULL
;
1441 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
1442 if (!tt_local_entry
)
1444 /* Check if the client has been logically deleted (but is kept for
1445 * consistency purpose) */
1446 if (tt_local_entry
->flags
& TT_CLIENT_PENDING
)
1451 tt_local_entry_free_ref(tt_local_entry
);
1455 void handle_tt_response(struct bat_priv
*bat_priv
,
1456 struct tt_query_packet
*tt_response
)
1458 struct tt_req_node
*node
, *safe
;
1459 struct orig_node
*orig_node
= NULL
;
1461 bat_dbg(DBG_TT
, bat_priv
, "Received TT_RESPONSE from %pM for "
1462 "ttvn %d t_size: %d [%c]\n",
1463 tt_response
->src
, tt_response
->ttvn
,
1464 tt_response
->tt_data
,
1465 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1467 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1471 if (tt_response
->flags
& TT_FULL_TABLE
)
1472 tt_fill_gtable(bat_priv
, tt_response
);
1474 tt_update_changes(bat_priv
, orig_node
, tt_response
->tt_data
,
1476 (struct tt_change
*)(tt_response
+ 1));
1478 /* Delete the tt_req_node from pending tt_requests list */
1479 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1480 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1481 if (!compare_eth(node
->addr
, tt_response
->src
))
1483 list_del(&node
->list
);
1486 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1488 /* Recalculate the CRC for this orig_node and store it */
1489 orig_node
->tt_crc
= tt_global_crc(bat_priv
, orig_node
);
1490 /* Roaming phase is over: tables are in sync again. I can
1492 orig_node
->tt_poss_change
= false;
1495 orig_node_free_ref(orig_node
);
1498 int tt_init(struct bat_priv
*bat_priv
)
1500 if (!tt_local_init(bat_priv
))
1503 if (!tt_global_init(bat_priv
))
1506 tt_start_timer(bat_priv
);
1511 static void tt_roam_list_free(struct bat_priv
*bat_priv
)
1513 struct tt_roam_node
*node
, *safe
;
1515 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1517 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1518 list_del(&node
->list
);
1522 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1525 static void tt_roam_purge(struct bat_priv
*bat_priv
)
1527 struct tt_roam_node
*node
, *safe
;
1529 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1530 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1531 if (!is_out_of_time(node
->first_time
,
1532 ROAMING_MAX_TIME
* 1000))
1535 list_del(&node
->list
);
1538 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1541 /* This function checks whether the client already reached the
1542 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1545 * returns true if the ROAMING_ADV can be sent, false otherwise */
1546 static bool tt_check_roam_count(struct bat_priv
*bat_priv
,
1549 struct tt_roam_node
*tt_roam_node
;
1552 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1553 /* The new tt_req will be issued only if I'm not waiting for a
1554 * reply from the same orig_node yet */
1555 list_for_each_entry(tt_roam_node
, &bat_priv
->tt_roam_list
, list
) {
1556 if (!compare_eth(tt_roam_node
->addr
, client
))
1559 if (is_out_of_time(tt_roam_node
->first_time
,
1560 ROAMING_MAX_TIME
* 1000))
1563 if (!atomic_dec_not_zero(&tt_roam_node
->counter
))
1564 /* Sorry, you roamed too many times! */
1571 tt_roam_node
= kmalloc(sizeof(*tt_roam_node
), GFP_ATOMIC
);
1575 tt_roam_node
->first_time
= jiffies
;
1576 atomic_set(&tt_roam_node
->counter
, ROAMING_MAX_COUNT
- 1);
1577 memcpy(tt_roam_node
->addr
, client
, ETH_ALEN
);
1579 list_add(&tt_roam_node
->list
, &bat_priv
->tt_roam_list
);
1584 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1588 void send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
1589 struct orig_node
*orig_node
)
1591 struct neigh_node
*neigh_node
= NULL
;
1592 struct sk_buff
*skb
= NULL
;
1593 struct roam_adv_packet
*roam_adv_packet
;
1595 struct hard_iface
*primary_if
;
1597 /* before going on we have to check whether the client has
1598 * already roamed to us too many times */
1599 if (!tt_check_roam_count(bat_priv
, client
))
1602 skb
= dev_alloc_skb(sizeof(struct roam_adv_packet
) + ETH_HLEN
);
1606 skb_reserve(skb
, ETH_HLEN
);
1608 roam_adv_packet
= (struct roam_adv_packet
*)skb_put(skb
,
1609 sizeof(struct roam_adv_packet
));
1611 roam_adv_packet
->packet_type
= BAT_ROAM_ADV
;
1612 roam_adv_packet
->version
= COMPAT_VERSION
;
1613 roam_adv_packet
->ttl
= TTL
;
1614 primary_if
= primary_if_get_selected(bat_priv
);
1617 memcpy(roam_adv_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1618 hardif_free_ref(primary_if
);
1619 memcpy(roam_adv_packet
->dst
, orig_node
->orig
, ETH_ALEN
);
1620 memcpy(roam_adv_packet
->client
, client
, ETH_ALEN
);
1622 neigh_node
= orig_node_get_router(orig_node
);
1626 bat_dbg(DBG_TT
, bat_priv
,
1627 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1628 orig_node
->orig
, client
, neigh_node
->addr
);
1630 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1635 neigh_node_free_ref(neigh_node
);
1641 static void tt_purge(struct work_struct
*work
)
1643 struct delayed_work
*delayed_work
=
1644 container_of(work
, struct delayed_work
, work
);
1645 struct bat_priv
*bat_priv
=
1646 container_of(delayed_work
, struct bat_priv
, tt_work
);
1648 tt_local_purge(bat_priv
);
1649 tt_global_roam_purge(bat_priv
);
1650 tt_req_purge(bat_priv
);
1651 tt_roam_purge(bat_priv
);
1653 tt_start_timer(bat_priv
);
1656 void tt_free(struct bat_priv
*bat_priv
)
1658 cancel_delayed_work_sync(&bat_priv
->tt_work
);
1660 tt_local_table_free(bat_priv
);
1661 tt_global_table_free(bat_priv
);
1662 tt_req_list_free(bat_priv
);
1663 tt_changes_list_free(bat_priv
);
1664 tt_roam_list_free(bat_priv
);
1666 kfree(bat_priv
->tt_buff
);
1669 /* This function will reset the specified flags from all the entries in
1670 * the given hash table and will increment num_local_tt for each involved
1672 static void tt_local_reset_flags(struct bat_priv
*bat_priv
, uint16_t flags
)
1675 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1676 struct hlist_head
*head
;
1677 struct hlist_node
*node
;
1678 struct tt_local_entry
*tt_local_entry
;
1683 for (i
= 0; i
< hash
->size
; i
++) {
1684 head
= &hash
->table
[i
];
1687 hlist_for_each_entry_rcu(tt_local_entry
, node
,
1689 if (!(tt_local_entry
->flags
& flags
))
1691 tt_local_entry
->flags
&= ~flags
;
1692 atomic_inc(&bat_priv
->num_local_tt
);
1699 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1700 static void tt_local_purge_pending_clients(struct bat_priv
*bat_priv
)
1702 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1703 struct tt_local_entry
*tt_local_entry
;
1704 struct hlist_node
*node
, *node_tmp
;
1705 struct hlist_head
*head
;
1706 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1712 for (i
= 0; i
< hash
->size
; i
++) {
1713 head
= &hash
->table
[i
];
1714 list_lock
= &hash
->list_locks
[i
];
1716 spin_lock_bh(list_lock
);
1717 hlist_for_each_entry_safe(tt_local_entry
, node
, node_tmp
,
1719 if (!(tt_local_entry
->flags
& TT_CLIENT_PENDING
))
1722 bat_dbg(DBG_TT
, bat_priv
, "Deleting local tt entry "
1723 "(%pM): pending\n", tt_local_entry
->addr
);
1725 atomic_dec(&bat_priv
->num_local_tt
);
1726 hlist_del_rcu(node
);
1727 tt_local_entry_free_ref(tt_local_entry
);
1729 spin_unlock_bh(list_lock
);
1734 void tt_commit_changes(struct bat_priv
*bat_priv
)
1736 tt_local_reset_flags(bat_priv
, TT_CLIENT_NEW
);
1737 tt_local_purge_pending_clients(bat_priv
);
1739 /* Increment the TTVN only once per OGM interval */
1740 atomic_inc(&bat_priv
->ttvn
);
1741 bat_priv
->tt_poss_change
= false;