1 /* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include "distributed-arp-table.h"
22 #include "originator.h"
24 #include "translation-table.h"
26 #include "gateway_client.h"
27 #include "hard-interface.h"
29 #include "soft-interface.h"
30 #include "bridge_loop_avoidance.h"
31 #include "network-coding.h"
34 static struct lock_class_key batadv_orig_hash_lock_class_key
;
36 static void batadv_purge_orig(struct work_struct
*work
);
38 /* returns 1 if they are the same originator */
39 static int batadv_compare_orig(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct batadv_orig_node
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 int batadv_originator_init(struct batadv_priv
*bat_priv
)
49 if (bat_priv
->orig_hash
)
52 bat_priv
->orig_hash
= batadv_hash_new(1024);
54 if (!bat_priv
->orig_hash
)
57 batadv_hash_set_lock_class(bat_priv
->orig_hash
,
58 &batadv_orig_hash_lock_class_key
);
60 INIT_DELAYED_WORK(&bat_priv
->orig_work
, batadv_purge_orig
);
61 queue_delayed_work(batadv_event_workqueue
,
63 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD
));
71 void batadv_neigh_node_free_ref(struct batadv_neigh_node
*neigh_node
)
73 if (atomic_dec_and_test(&neigh_node
->refcount
))
74 kfree_rcu(neigh_node
, rcu
);
77 /* increases the refcounter of a found router */
78 struct batadv_neigh_node
*
79 batadv_orig_node_get_router(struct batadv_orig_node
*orig_node
)
81 struct batadv_neigh_node
*router
;
84 router
= rcu_dereference(orig_node
->router
);
86 if (router
&& !atomic_inc_not_zero(&router
->refcount
))
93 struct batadv_neigh_node
*
94 batadv_neigh_node_new(struct batadv_hard_iface
*hard_iface
,
95 const uint8_t *neigh_addr
, uint32_t seqno
)
97 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
98 struct batadv_neigh_node
*neigh_node
;
100 neigh_node
= kzalloc(sizeof(*neigh_node
), GFP_ATOMIC
);
104 INIT_HLIST_NODE(&neigh_node
->list
);
106 memcpy(neigh_node
->addr
, neigh_addr
, ETH_ALEN
);
107 spin_lock_init(&neigh_node
->lq_update_lock
);
109 /* extra reference for return */
110 atomic_set(&neigh_node
->refcount
, 2);
112 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
113 "Creating new neighbor %pM, initial seqno %d\n",
120 static void batadv_orig_node_free_rcu(struct rcu_head
*rcu
)
122 struct hlist_node
*node_tmp
;
123 struct batadv_neigh_node
*neigh_node
, *tmp_neigh_node
;
124 struct batadv_orig_node
*orig_node
;
126 orig_node
= container_of(rcu
, struct batadv_orig_node
, rcu
);
128 spin_lock_bh(&orig_node
->neigh_list_lock
);
130 /* for all bonding members ... */
131 list_for_each_entry_safe(neigh_node
, tmp_neigh_node
,
132 &orig_node
->bond_list
, bonding_list
) {
133 list_del_rcu(&neigh_node
->bonding_list
);
134 batadv_neigh_node_free_ref(neigh_node
);
137 /* for all neighbors towards this originator ... */
138 hlist_for_each_entry_safe(neigh_node
, node_tmp
,
139 &orig_node
->neigh_list
, list
) {
140 hlist_del_rcu(&neigh_node
->list
);
141 batadv_neigh_node_free_ref(neigh_node
);
144 spin_unlock_bh(&orig_node
->neigh_list_lock
);
147 batadv_nc_purge_orig(orig_node
->bat_priv
, orig_node
, NULL
);
149 batadv_frag_list_free(&orig_node
->frag_list
);
150 batadv_tt_global_del_orig(orig_node
->bat_priv
, orig_node
,
151 "originator timed out");
153 kfree(orig_node
->tt_buff
);
154 kfree(orig_node
->bcast_own
);
155 kfree(orig_node
->bcast_own_sum
);
160 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
161 * schedule an rcu callback for freeing it
162 * @orig_node: the orig node to free
164 void batadv_orig_node_free_ref(struct batadv_orig_node
*orig_node
)
166 if (atomic_dec_and_test(&orig_node
->refcount
))
167 call_rcu(&orig_node
->rcu
, batadv_orig_node_free_rcu
);
171 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
172 * possibly free it (without rcu callback)
173 * @orig_node: the orig node to free
175 void batadv_orig_node_free_ref_now(struct batadv_orig_node
*orig_node
)
177 if (atomic_dec_and_test(&orig_node
->refcount
))
178 batadv_orig_node_free_rcu(&orig_node
->rcu
);
181 void batadv_originator_free(struct batadv_priv
*bat_priv
)
183 struct batadv_hashtable
*hash
= bat_priv
->orig_hash
;
184 struct hlist_node
*node_tmp
;
185 struct hlist_head
*head
;
186 spinlock_t
*list_lock
; /* spinlock to protect write access */
187 struct batadv_orig_node
*orig_node
;
193 cancel_delayed_work_sync(&bat_priv
->orig_work
);
195 bat_priv
->orig_hash
= NULL
;
197 for (i
= 0; i
< hash
->size
; i
++) {
198 head
= &hash
->table
[i
];
199 list_lock
= &hash
->list_locks
[i
];
201 spin_lock_bh(list_lock
);
202 hlist_for_each_entry_safe(orig_node
, node_tmp
,
204 hlist_del_rcu(&orig_node
->hash_entry
);
205 batadv_orig_node_free_ref(orig_node
);
207 spin_unlock_bh(list_lock
);
210 batadv_hash_destroy(hash
);
213 /* this function finds or creates an originator entry for the given
214 * address if it does not exits
216 struct batadv_orig_node
*batadv_get_orig_node(struct batadv_priv
*bat_priv
,
219 struct batadv_orig_node
*orig_node
;
222 unsigned long reset_time
;
224 orig_node
= batadv_orig_hash_find(bat_priv
, addr
);
228 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
229 "Creating new originator: %pM\n", addr
);
231 orig_node
= kzalloc(sizeof(*orig_node
), GFP_ATOMIC
);
235 INIT_HLIST_HEAD(&orig_node
->neigh_list
);
236 INIT_LIST_HEAD(&orig_node
->bond_list
);
237 spin_lock_init(&orig_node
->ogm_cnt_lock
);
238 spin_lock_init(&orig_node
->bcast_seqno_lock
);
239 spin_lock_init(&orig_node
->neigh_list_lock
);
240 spin_lock_init(&orig_node
->tt_buff_lock
);
242 batadv_nc_init_orig(orig_node
);
244 /* extra reference for return */
245 atomic_set(&orig_node
->refcount
, 2);
247 orig_node
->tt_initialised
= false;
248 orig_node
->bat_priv
= bat_priv
;
249 memcpy(orig_node
->orig
, addr
, ETH_ALEN
);
250 batadv_dat_init_orig_node_addr(orig_node
);
251 orig_node
->router
= NULL
;
252 orig_node
->tt_crc
= 0;
253 atomic_set(&orig_node
->last_ttvn
, 0);
254 orig_node
->tt_buff
= NULL
;
255 orig_node
->tt_buff_len
= 0;
256 atomic_set(&orig_node
->tt_size
, 0);
257 reset_time
= jiffies
- 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS
);
258 orig_node
->bcast_seqno_reset
= reset_time
;
259 orig_node
->batman_seqno_reset
= reset_time
;
261 atomic_set(&orig_node
->bond_candidates
, 0);
263 size
= bat_priv
->num_ifaces
* sizeof(unsigned long) * BATADV_NUM_WORDS
;
265 orig_node
->bcast_own
= kzalloc(size
, GFP_ATOMIC
);
266 if (!orig_node
->bcast_own
)
269 size
= bat_priv
->num_ifaces
* sizeof(uint8_t);
270 orig_node
->bcast_own_sum
= kzalloc(size
, GFP_ATOMIC
);
272 INIT_LIST_HEAD(&orig_node
->frag_list
);
273 orig_node
->last_frag_packet
= 0;
275 if (!orig_node
->bcast_own_sum
)
278 hash_added
= batadv_hash_add(bat_priv
->orig_hash
, batadv_compare_orig
,
279 batadv_choose_orig
, orig_node
,
280 &orig_node
->hash_entry
);
282 goto free_bcast_own_sum
;
286 kfree(orig_node
->bcast_own_sum
);
288 kfree(orig_node
->bcast_own
);
295 batadv_purge_orig_neighbors(struct batadv_priv
*bat_priv
,
296 struct batadv_orig_node
*orig_node
,
297 struct batadv_neigh_node
**best_neigh_node
)
299 struct hlist_node
*node_tmp
;
300 struct batadv_neigh_node
*neigh_node
;
301 bool neigh_purged
= false;
302 unsigned long last_seen
;
303 struct batadv_hard_iface
*if_incoming
;
305 *best_neigh_node
= NULL
;
307 spin_lock_bh(&orig_node
->neigh_list_lock
);
309 /* for all neighbors towards this originator ... */
310 hlist_for_each_entry_safe(neigh_node
, node_tmp
,
311 &orig_node
->neigh_list
, list
) {
312 last_seen
= neigh_node
->last_seen
;
313 if_incoming
= neigh_node
->if_incoming
;
315 if ((batadv_has_timed_out(last_seen
, BATADV_PURGE_TIMEOUT
)) ||
316 (if_incoming
->if_status
== BATADV_IF_INACTIVE
) ||
317 (if_incoming
->if_status
== BATADV_IF_NOT_IN_USE
) ||
318 (if_incoming
->if_status
== BATADV_IF_TO_BE_REMOVED
)) {
319 if ((if_incoming
->if_status
== BATADV_IF_INACTIVE
) ||
320 (if_incoming
->if_status
== BATADV_IF_NOT_IN_USE
) ||
321 (if_incoming
->if_status
== BATADV_IF_TO_BE_REMOVED
))
322 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
323 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
324 orig_node
->orig
, neigh_node
->addr
,
325 if_incoming
->net_dev
->name
);
327 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
328 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
329 orig_node
->orig
, neigh_node
->addr
,
330 jiffies_to_msecs(last_seen
));
334 hlist_del_rcu(&neigh_node
->list
);
335 batadv_bonding_candidate_del(orig_node
, neigh_node
);
336 batadv_neigh_node_free_ref(neigh_node
);
338 if ((!*best_neigh_node
) ||
339 (neigh_node
->tq_avg
> (*best_neigh_node
)->tq_avg
))
340 *best_neigh_node
= neigh_node
;
344 spin_unlock_bh(&orig_node
->neigh_list_lock
);
348 static bool batadv_purge_orig_node(struct batadv_priv
*bat_priv
,
349 struct batadv_orig_node
*orig_node
)
351 struct batadv_neigh_node
*best_neigh_node
;
353 if (batadv_has_timed_out(orig_node
->last_seen
,
354 2 * BATADV_PURGE_TIMEOUT
)) {
355 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
356 "Originator timeout: originator %pM, last_seen %u\n",
358 jiffies_to_msecs(orig_node
->last_seen
));
361 if (batadv_purge_orig_neighbors(bat_priv
, orig_node
,
363 batadv_update_route(bat_priv
, orig_node
,
370 static void _batadv_purge_orig(struct batadv_priv
*bat_priv
)
372 struct batadv_hashtable
*hash
= bat_priv
->orig_hash
;
373 struct hlist_node
*node_tmp
;
374 struct hlist_head
*head
;
375 spinlock_t
*list_lock
; /* spinlock to protect write access */
376 struct batadv_orig_node
*orig_node
;
382 /* for all origins... */
383 for (i
= 0; i
< hash
->size
; i
++) {
384 head
= &hash
->table
[i
];
385 list_lock
= &hash
->list_locks
[i
];
387 spin_lock_bh(list_lock
);
388 hlist_for_each_entry_safe(orig_node
, node_tmp
,
390 if (batadv_purge_orig_node(bat_priv
, orig_node
)) {
391 if (orig_node
->gw_flags
)
392 batadv_gw_node_delete(bat_priv
,
394 hlist_del_rcu(&orig_node
->hash_entry
);
395 batadv_orig_node_free_ref(orig_node
);
399 if (batadv_has_timed_out(orig_node
->last_frag_packet
,
400 BATADV_FRAG_TIMEOUT
))
401 batadv_frag_list_free(&orig_node
->frag_list
);
403 spin_unlock_bh(list_lock
);
406 batadv_gw_node_purge(bat_priv
);
407 batadv_gw_election(bat_priv
);
410 static void batadv_purge_orig(struct work_struct
*work
)
412 struct delayed_work
*delayed_work
;
413 struct batadv_priv
*bat_priv
;
415 delayed_work
= container_of(work
, struct delayed_work
, work
);
416 bat_priv
= container_of(delayed_work
, struct batadv_priv
, orig_work
);
417 _batadv_purge_orig(bat_priv
);
418 queue_delayed_work(batadv_event_workqueue
,
419 &bat_priv
->orig_work
,
420 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD
));
423 void batadv_purge_orig_ref(struct batadv_priv
*bat_priv
)
425 _batadv_purge_orig(bat_priv
);
428 int batadv_orig_seq_print_text(struct seq_file
*seq
, void *offset
)
430 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
431 struct batadv_priv
*bat_priv
= netdev_priv(net_dev
);
432 struct batadv_hashtable
*hash
= bat_priv
->orig_hash
;
433 struct hlist_head
*head
;
434 struct batadv_hard_iface
*primary_if
;
435 struct batadv_orig_node
*orig_node
;
436 struct batadv_neigh_node
*neigh_node
, *neigh_node_tmp
;
437 int batman_count
= 0;
440 unsigned long last_seen_jiffies
;
443 primary_if
= batadv_seq_print_text_primary_if_get(seq
);
447 seq_printf(seq
, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
448 BATADV_SOURCE_VERSION
, primary_if
->net_dev
->name
,
449 primary_if
->net_dev
->dev_addr
, net_dev
->name
);
450 seq_printf(seq
, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
451 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE
,
452 "Nexthop", "outgoingIF", "Potential nexthops");
454 for (i
= 0; i
< hash
->size
; i
++) {
455 head
= &hash
->table
[i
];
458 hlist_for_each_entry_rcu(orig_node
, head
, hash_entry
) {
459 neigh_node
= batadv_orig_node_get_router(orig_node
);
463 if (neigh_node
->tq_avg
== 0)
466 last_seen_jiffies
= jiffies
- orig_node
->last_seen
;
467 last_seen_msecs
= jiffies_to_msecs(last_seen_jiffies
);
468 last_seen_secs
= last_seen_msecs
/ 1000;
469 last_seen_msecs
= last_seen_msecs
% 1000;
471 seq_printf(seq
, "%pM %4i.%03is (%3i) %pM [%10s]:",
472 orig_node
->orig
, last_seen_secs
,
473 last_seen_msecs
, neigh_node
->tq_avg
,
475 neigh_node
->if_incoming
->net_dev
->name
);
477 hlist_for_each_entry_rcu(neigh_node_tmp
,
478 &orig_node
->neigh_list
, list
) {
479 seq_printf(seq
, " %pM (%3i)",
480 neigh_node_tmp
->addr
,
481 neigh_node_tmp
->tq_avg
);
488 batadv_neigh_node_free_ref(neigh_node
);
493 if (batman_count
== 0)
494 seq_puts(seq
, "No batman nodes in range ...\n");
498 batadv_hardif_free_ref(primary_if
);
502 static int batadv_orig_node_add_if(struct batadv_orig_node
*orig_node
,
506 size_t data_size
, old_size
;
508 data_size
= max_if_num
* sizeof(unsigned long) * BATADV_NUM_WORDS
;
509 old_size
= (max_if_num
- 1) * sizeof(unsigned long) * BATADV_NUM_WORDS
;
510 data_ptr
= kmalloc(data_size
, GFP_ATOMIC
);
514 memcpy(data_ptr
, orig_node
->bcast_own
, old_size
);
515 kfree(orig_node
->bcast_own
);
516 orig_node
->bcast_own
= data_ptr
;
518 data_ptr
= kmalloc(max_if_num
* sizeof(uint8_t), GFP_ATOMIC
);
522 memcpy(data_ptr
, orig_node
->bcast_own_sum
,
523 (max_if_num
- 1) * sizeof(uint8_t));
524 kfree(orig_node
->bcast_own_sum
);
525 orig_node
->bcast_own_sum
= data_ptr
;
530 int batadv_orig_hash_add_if(struct batadv_hard_iface
*hard_iface
,
533 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
534 struct batadv_hashtable
*hash
= bat_priv
->orig_hash
;
535 struct hlist_head
*head
;
536 struct batadv_orig_node
*orig_node
;
540 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
543 for (i
= 0; i
< hash
->size
; i
++) {
544 head
= &hash
->table
[i
];
547 hlist_for_each_entry_rcu(orig_node
, head
, hash_entry
) {
548 spin_lock_bh(&orig_node
->ogm_cnt_lock
);
549 ret
= batadv_orig_node_add_if(orig_node
, max_if_num
);
550 spin_unlock_bh(&orig_node
->ogm_cnt_lock
);
565 static int batadv_orig_node_del_if(struct batadv_orig_node
*orig_node
,
566 int max_if_num
, int del_if_num
)
568 void *data_ptr
= NULL
;
571 /* last interface was removed */
575 chunk_size
= sizeof(unsigned long) * BATADV_NUM_WORDS
;
576 data_ptr
= kmalloc(max_if_num
* chunk_size
, GFP_ATOMIC
);
580 /* copy first part */
581 memcpy(data_ptr
, orig_node
->bcast_own
, del_if_num
* chunk_size
);
583 /* copy second part */
584 memcpy((char *)data_ptr
+ del_if_num
* chunk_size
,
585 orig_node
->bcast_own
+ ((del_if_num
+ 1) * chunk_size
),
586 (max_if_num
- del_if_num
) * chunk_size
);
589 kfree(orig_node
->bcast_own
);
590 orig_node
->bcast_own
= data_ptr
;
595 data_ptr
= kmalloc(max_if_num
* sizeof(uint8_t), GFP_ATOMIC
);
599 memcpy(data_ptr
, orig_node
->bcast_own_sum
,
600 del_if_num
* sizeof(uint8_t));
602 memcpy((char *)data_ptr
+ del_if_num
* sizeof(uint8_t),
603 orig_node
->bcast_own_sum
+ ((del_if_num
+ 1) * sizeof(uint8_t)),
604 (max_if_num
- del_if_num
) * sizeof(uint8_t));
607 kfree(orig_node
->bcast_own_sum
);
608 orig_node
->bcast_own_sum
= data_ptr
;
613 int batadv_orig_hash_del_if(struct batadv_hard_iface
*hard_iface
,
616 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
617 struct batadv_hashtable
*hash
= bat_priv
->orig_hash
;
618 struct hlist_head
*head
;
619 struct batadv_hard_iface
*hard_iface_tmp
;
620 struct batadv_orig_node
*orig_node
;
624 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
627 for (i
= 0; i
< hash
->size
; i
++) {
628 head
= &hash
->table
[i
];
631 hlist_for_each_entry_rcu(orig_node
, head
, hash_entry
) {
632 spin_lock_bh(&orig_node
->ogm_cnt_lock
);
633 ret
= batadv_orig_node_del_if(orig_node
, max_if_num
,
635 spin_unlock_bh(&orig_node
->ogm_cnt_lock
);
643 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
645 list_for_each_entry_rcu(hard_iface_tmp
, &batadv_hardif_list
, list
) {
646 if (hard_iface_tmp
->if_status
== BATADV_IF_NOT_IN_USE
)
649 if (hard_iface
== hard_iface_tmp
)
652 if (hard_iface
->soft_iface
!= hard_iface_tmp
->soft_iface
)
655 if (hard_iface_tmp
->if_num
> hard_iface
->if_num
)
656 hard_iface_tmp
->if_num
--;
660 hard_iface
->if_num
= -1;