2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 /* increase the reference counter for this originator */
25 #include "originator.h"
27 #include "translation-table.h"
29 #include "gateway_client.h"
30 #include "hard-interface.h"
32 #include "soft-interface.h"
34 static void purge_orig(struct work_struct
*work
);
36 static void start_purge_timer(struct bat_priv
*bat_priv
)
38 INIT_DELAYED_WORK(&bat_priv
->orig_work
, purge_orig
);
39 queue_delayed_work(bat_event_workqueue
, &bat_priv
->orig_work
, 1 * HZ
);
42 int originator_init(struct bat_priv
*bat_priv
)
44 if (bat_priv
->orig_hash
)
47 bat_priv
->orig_hash
= hash_new(1024);
49 if (!bat_priv
->orig_hash
)
52 start_purge_timer(bat_priv
);
59 static void neigh_node_free_rcu(struct rcu_head
*rcu
)
61 struct neigh_node
*neigh_node
;
63 neigh_node
= container_of(rcu
, struct neigh_node
, rcu
);
67 void neigh_node_free_ref(struct neigh_node
*neigh_node
)
69 if (atomic_dec_and_test(&neigh_node
->refcount
))
70 call_rcu(&neigh_node
->rcu
, neigh_node_free_rcu
);
73 struct neigh_node
*create_neighbor(struct orig_node
*orig_node
,
74 struct orig_node
*orig_neigh_node
,
76 struct hard_iface
*if_incoming
)
78 struct bat_priv
*bat_priv
= netdev_priv(if_incoming
->soft_iface
);
79 struct neigh_node
*neigh_node
;
81 bat_dbg(DBG_BATMAN
, bat_priv
,
82 "Creating new last-hop neighbor of originator\n");
84 neigh_node
= kzalloc(sizeof(struct neigh_node
), GFP_ATOMIC
);
88 INIT_HLIST_NODE(&neigh_node
->list
);
89 INIT_LIST_HEAD(&neigh_node
->bonding_list
);
91 memcpy(neigh_node
->addr
, neigh
, ETH_ALEN
);
92 neigh_node
->orig_node
= orig_neigh_node
;
93 neigh_node
->if_incoming
= if_incoming
;
95 /* extra reference for return */
96 atomic_set(&neigh_node
->refcount
, 2);
98 spin_lock_bh(&orig_node
->neigh_list_lock
);
99 hlist_add_head_rcu(&neigh_node
->list
, &orig_node
->neigh_list
);
100 spin_unlock_bh(&orig_node
->neigh_list_lock
);
104 static void orig_node_free_rcu(struct rcu_head
*rcu
)
106 struct hlist_node
*node
, *node_tmp
;
107 struct neigh_node
*neigh_node
, *tmp_neigh_node
;
108 struct orig_node
*orig_node
;
110 orig_node
= container_of(rcu
, struct orig_node
, rcu
);
112 spin_lock_bh(&orig_node
->neigh_list_lock
);
114 /* for all bonding members ... */
115 list_for_each_entry_safe(neigh_node
, tmp_neigh_node
,
116 &orig_node
->bond_list
, bonding_list
) {
117 list_del_rcu(&neigh_node
->bonding_list
);
118 neigh_node_free_ref(neigh_node
);
121 /* for all neighbors towards this originator ... */
122 hlist_for_each_entry_safe(neigh_node
, node
, node_tmp
,
123 &orig_node
->neigh_list
, list
) {
124 hlist_del_rcu(&neigh_node
->list
);
125 neigh_node_free_ref(neigh_node
);
128 spin_unlock_bh(&orig_node
->neigh_list_lock
);
130 frag_list_free(&orig_node
->frag_list
);
131 hna_global_del_orig(orig_node
->bat_priv
, orig_node
,
132 "originator timed out");
134 kfree(orig_node
->bcast_own
);
135 kfree(orig_node
->bcast_own_sum
);
139 void orig_node_free_ref(struct orig_node
*orig_node
)
141 if (atomic_dec_and_test(&orig_node
->refcount
))
142 call_rcu(&orig_node
->rcu
, orig_node_free_rcu
);
145 void originator_free(struct bat_priv
*bat_priv
)
147 struct hashtable_t
*hash
= bat_priv
->orig_hash
;
148 struct hlist_node
*node
, *node_tmp
;
149 struct hlist_head
*head
;
150 spinlock_t
*list_lock
; /* spinlock to protect write access */
151 struct orig_node
*orig_node
;
157 cancel_delayed_work_sync(&bat_priv
->orig_work
);
159 bat_priv
->orig_hash
= NULL
;
161 for (i
= 0; i
< hash
->size
; i
++) {
162 head
= &hash
->table
[i
];
163 list_lock
= &hash
->list_locks
[i
];
165 spin_lock_bh(list_lock
);
166 hlist_for_each_entry_safe(orig_node
, node
, node_tmp
,
170 orig_node_free_ref(orig_node
);
172 spin_unlock_bh(list_lock
);
178 /* this function finds or creates an originator entry for the given
179 * address if it does not exits */
180 struct orig_node
*get_orig_node(struct bat_priv
*bat_priv
, uint8_t *addr
)
182 struct orig_node
*orig_node
;
186 orig_node
= orig_hash_find(bat_priv
, addr
);
190 bat_dbg(DBG_BATMAN
, bat_priv
,
191 "Creating new originator: %pM\n", addr
);
193 orig_node
= kzalloc(sizeof(struct orig_node
), GFP_ATOMIC
);
197 INIT_HLIST_HEAD(&orig_node
->neigh_list
);
198 INIT_LIST_HEAD(&orig_node
->bond_list
);
199 spin_lock_init(&orig_node
->ogm_cnt_lock
);
200 spin_lock_init(&orig_node
->bcast_seqno_lock
);
201 spin_lock_init(&orig_node
->neigh_list_lock
);
203 /* extra reference for return */
204 atomic_set(&orig_node
->refcount
, 2);
206 orig_node
->bat_priv
= bat_priv
;
207 memcpy(orig_node
->orig
, addr
, ETH_ALEN
);
208 orig_node
->router
= NULL
;
209 orig_node
->hna_buff
= NULL
;
210 orig_node
->bcast_seqno_reset
= jiffies
- 1
211 - msecs_to_jiffies(RESET_PROTECTION_MS
);
212 orig_node
->batman_seqno_reset
= jiffies
- 1
213 - msecs_to_jiffies(RESET_PROTECTION_MS
);
215 atomic_set(&orig_node
->bond_candidates
, 0);
217 size
= bat_priv
->num_ifaces
* sizeof(unsigned long) * NUM_WORDS
;
219 orig_node
->bcast_own
= kzalloc(size
, GFP_ATOMIC
);
220 if (!orig_node
->bcast_own
)
223 size
= bat_priv
->num_ifaces
* sizeof(uint8_t);
224 orig_node
->bcast_own_sum
= kzalloc(size
, GFP_ATOMIC
);
226 INIT_LIST_HEAD(&orig_node
->frag_list
);
227 orig_node
->last_frag_packet
= 0;
229 if (!orig_node
->bcast_own_sum
)
232 hash_added
= hash_add(bat_priv
->orig_hash
, compare_orig
,
233 choose_orig
, orig_node
, &orig_node
->hash_entry
);
235 goto free_bcast_own_sum
;
239 kfree(orig_node
->bcast_own_sum
);
241 kfree(orig_node
->bcast_own
);
247 static bool purge_orig_neighbors(struct bat_priv
*bat_priv
,
248 struct orig_node
*orig_node
,
249 struct neigh_node
**best_neigh_node
)
251 struct hlist_node
*node
, *node_tmp
;
252 struct neigh_node
*neigh_node
;
253 bool neigh_purged
= false;
255 *best_neigh_node
= NULL
;
257 spin_lock_bh(&orig_node
->neigh_list_lock
);
259 /* for all neighbors towards this originator ... */
260 hlist_for_each_entry_safe(neigh_node
, node
, node_tmp
,
261 &orig_node
->neigh_list
, list
) {
263 if ((time_after(jiffies
,
264 neigh_node
->last_valid
+ PURGE_TIMEOUT
* HZ
)) ||
265 (neigh_node
->if_incoming
->if_status
== IF_INACTIVE
) ||
266 (neigh_node
->if_incoming
->if_status
== IF_NOT_IN_USE
) ||
267 (neigh_node
->if_incoming
->if_status
== IF_TO_BE_REMOVED
)) {
269 if ((neigh_node
->if_incoming
->if_status
==
271 (neigh_node
->if_incoming
->if_status
==
273 (neigh_node
->if_incoming
->if_status
==
275 bat_dbg(DBG_BATMAN
, bat_priv
,
276 "neighbor purge: originator %pM, "
277 "neighbor: %pM, iface: %s\n",
278 orig_node
->orig
, neigh_node
->addr
,
279 neigh_node
->if_incoming
->net_dev
->name
);
281 bat_dbg(DBG_BATMAN
, bat_priv
,
282 "neighbor timeout: originator %pM, "
283 "neighbor: %pM, last_valid: %lu\n",
284 orig_node
->orig
, neigh_node
->addr
,
285 (neigh_node
->last_valid
/ HZ
));
289 hlist_del_rcu(&neigh_node
->list
);
290 bonding_candidate_del(orig_node
, neigh_node
);
291 neigh_node_free_ref(neigh_node
);
293 if ((!*best_neigh_node
) ||
294 (neigh_node
->tq_avg
> (*best_neigh_node
)->tq_avg
))
295 *best_neigh_node
= neigh_node
;
299 spin_unlock_bh(&orig_node
->neigh_list_lock
);
303 static bool purge_orig_node(struct bat_priv
*bat_priv
,
304 struct orig_node
*orig_node
)
306 struct neigh_node
*best_neigh_node
;
308 if (time_after(jiffies
,
309 orig_node
->last_valid
+ 2 * PURGE_TIMEOUT
* HZ
)) {
311 bat_dbg(DBG_BATMAN
, bat_priv
,
312 "Originator timeout: originator %pM, last_valid %lu\n",
313 orig_node
->orig
, (orig_node
->last_valid
/ HZ
));
316 if (purge_orig_neighbors(bat_priv
, orig_node
,
318 update_routes(bat_priv
, orig_node
,
321 orig_node
->hna_buff_len
);
328 static void _purge_orig(struct bat_priv
*bat_priv
)
330 struct hashtable_t
*hash
= bat_priv
->orig_hash
;
331 struct hlist_node
*node
, *node_tmp
;
332 struct hlist_head
*head
;
333 spinlock_t
*list_lock
; /* spinlock to protect write access */
334 struct orig_node
*orig_node
;
340 /* for all origins... */
341 for (i
= 0; i
< hash
->size
; i
++) {
342 head
= &hash
->table
[i
];
343 list_lock
= &hash
->list_locks
[i
];
345 spin_lock_bh(list_lock
);
346 hlist_for_each_entry_safe(orig_node
, node
, node_tmp
,
348 if (purge_orig_node(bat_priv
, orig_node
)) {
349 if (orig_node
->gw_flags
)
350 gw_node_delete(bat_priv
, orig_node
);
352 orig_node_free_ref(orig_node
);
356 if (time_after(jiffies
, orig_node
->last_frag_packet
+
357 msecs_to_jiffies(FRAG_TIMEOUT
)))
358 frag_list_free(&orig_node
->frag_list
);
360 spin_unlock_bh(list_lock
);
363 gw_node_purge(bat_priv
);
364 gw_election(bat_priv
);
366 softif_neigh_purge(bat_priv
);
369 static void purge_orig(struct work_struct
*work
)
371 struct delayed_work
*delayed_work
=
372 container_of(work
, struct delayed_work
, work
);
373 struct bat_priv
*bat_priv
=
374 container_of(delayed_work
, struct bat_priv
, orig_work
);
376 _purge_orig(bat_priv
);
377 start_purge_timer(bat_priv
);
380 void purge_orig_ref(struct bat_priv
*bat_priv
)
382 _purge_orig(bat_priv
);
385 int orig_seq_print_text(struct seq_file
*seq
, void *offset
)
387 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
388 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
389 struct hashtable_t
*hash
= bat_priv
->orig_hash
;
390 struct hlist_node
*node
, *node_tmp
;
391 struct hlist_head
*head
;
392 struct orig_node
*orig_node
;
393 struct neigh_node
*neigh_node
;
394 int batman_count
= 0;
399 if ((!bat_priv
->primary_if
) ||
400 (bat_priv
->primary_if
->if_status
!= IF_ACTIVE
)) {
401 if (!bat_priv
->primary_if
)
402 return seq_printf(seq
, "BATMAN mesh %s disabled - "
403 "please specify interfaces to enable it\n",
406 return seq_printf(seq
, "BATMAN mesh %s "
407 "disabled - primary interface not active\n",
411 seq_printf(seq
, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
412 SOURCE_VERSION
, REVISION_VERSION_STR
,
413 bat_priv
->primary_if
->net_dev
->name
,
414 bat_priv
->primary_if
->net_dev
->dev_addr
, net_dev
->name
);
415 seq_printf(seq
, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
416 "Originator", "last-seen", "#", TQ_MAX_VALUE
, "Nexthop",
417 "outgoingIF", "Potential nexthops");
419 for (i
= 0; i
< hash
->size
; i
++) {
420 head
= &hash
->table
[i
];
423 hlist_for_each_entry_rcu(orig_node
, node
, head
, hash_entry
) {
424 if (!orig_node
->router
)
427 if (orig_node
->router
->tq_avg
== 0)
430 last_seen_secs
= jiffies_to_msecs(jiffies
-
431 orig_node
->last_valid
) / 1000;
432 last_seen_msecs
= jiffies_to_msecs(jiffies
-
433 orig_node
->last_valid
) % 1000;
435 neigh_node
= orig_node
->router
;
436 seq_printf(seq
, "%pM %4i.%03is (%3i) %pM [%10s]:",
437 orig_node
->orig
, last_seen_secs
,
438 last_seen_msecs
, neigh_node
->tq_avg
,
440 neigh_node
->if_incoming
->net_dev
->name
);
442 hlist_for_each_entry_rcu(neigh_node
, node_tmp
,
443 &orig_node
->neigh_list
, list
) {
444 seq_printf(seq
, " %pM (%3i)", neigh_node
->addr
,
448 seq_printf(seq
, "\n");
454 if ((batman_count
== 0))
455 seq_printf(seq
, "No batman nodes in range ...\n");
460 static int orig_node_add_if(struct orig_node
*orig_node
, int max_if_num
)
464 data_ptr
= kmalloc(max_if_num
* sizeof(unsigned long) * NUM_WORDS
,
467 pr_err("Can't resize orig: out of memory\n");
471 memcpy(data_ptr
, orig_node
->bcast_own
,
472 (max_if_num
- 1) * sizeof(unsigned long) * NUM_WORDS
);
473 kfree(orig_node
->bcast_own
);
474 orig_node
->bcast_own
= data_ptr
;
476 data_ptr
= kmalloc(max_if_num
* sizeof(uint8_t), GFP_ATOMIC
);
478 pr_err("Can't resize orig: out of memory\n");
482 memcpy(data_ptr
, orig_node
->bcast_own_sum
,
483 (max_if_num
- 1) * sizeof(uint8_t));
484 kfree(orig_node
->bcast_own_sum
);
485 orig_node
->bcast_own_sum
= data_ptr
;
490 int orig_hash_add_if(struct hard_iface
*hard_iface
, int max_if_num
)
492 struct bat_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
493 struct hashtable_t
*hash
= bat_priv
->orig_hash
;
494 struct hlist_node
*node
;
495 struct hlist_head
*head
;
496 struct orig_node
*orig_node
;
499 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
501 for (i
= 0; i
< hash
->size
; i
++) {
502 head
= &hash
->table
[i
];
505 hlist_for_each_entry_rcu(orig_node
, node
, head
, hash_entry
) {
506 spin_lock_bh(&orig_node
->ogm_cnt_lock
);
507 ret
= orig_node_add_if(orig_node
, max_if_num
);
508 spin_unlock_bh(&orig_node
->ogm_cnt_lock
);
523 static int orig_node_del_if(struct orig_node
*orig_node
,
524 int max_if_num
, int del_if_num
)
526 void *data_ptr
= NULL
;
529 /* last interface was removed */
533 chunk_size
= sizeof(unsigned long) * NUM_WORDS
;
534 data_ptr
= kmalloc(max_if_num
* chunk_size
, GFP_ATOMIC
);
536 pr_err("Can't resize orig: out of memory\n");
540 /* copy first part */
541 memcpy(data_ptr
, orig_node
->bcast_own
, del_if_num
* chunk_size
);
543 /* copy second part */
544 memcpy(data_ptr
+ del_if_num
* chunk_size
,
545 orig_node
->bcast_own
+ ((del_if_num
+ 1) * chunk_size
),
546 (max_if_num
- del_if_num
) * chunk_size
);
549 kfree(orig_node
->bcast_own
);
550 orig_node
->bcast_own
= data_ptr
;
555 data_ptr
= kmalloc(max_if_num
* sizeof(uint8_t), GFP_ATOMIC
);
557 pr_err("Can't resize orig: out of memory\n");
561 memcpy(data_ptr
, orig_node
->bcast_own_sum
,
562 del_if_num
* sizeof(uint8_t));
564 memcpy(data_ptr
+ del_if_num
* sizeof(uint8_t),
565 orig_node
->bcast_own_sum
+ ((del_if_num
+ 1) * sizeof(uint8_t)),
566 (max_if_num
- del_if_num
) * sizeof(uint8_t));
569 kfree(orig_node
->bcast_own_sum
);
570 orig_node
->bcast_own_sum
= data_ptr
;
575 int orig_hash_del_if(struct hard_iface
*hard_iface
, int max_if_num
)
577 struct bat_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
578 struct hashtable_t
*hash
= bat_priv
->orig_hash
;
579 struct hlist_node
*node
;
580 struct hlist_head
*head
;
581 struct hard_iface
*hard_iface_tmp
;
582 struct orig_node
*orig_node
;
585 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
587 for (i
= 0; i
< hash
->size
; i
++) {
588 head
= &hash
->table
[i
];
591 hlist_for_each_entry_rcu(orig_node
, node
, head
, hash_entry
) {
592 spin_lock_bh(&orig_node
->ogm_cnt_lock
);
593 ret
= orig_node_del_if(orig_node
, max_if_num
,
595 spin_unlock_bh(&orig_node
->ogm_cnt_lock
);
603 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
605 list_for_each_entry_rcu(hard_iface_tmp
, &hardif_list
, list
) {
606 if (hard_iface_tmp
->if_status
== IF_NOT_IN_USE
)
609 if (hard_iface
== hard_iface_tmp
)
612 if (hard_iface
->soft_iface
!= hard_iface_tmp
->soft_iface
)
615 if (hard_iface_tmp
->if_num
> hard_iface
->if_num
)
616 hard_iface_tmp
->if_num
--;
620 hard_iface
->if_num
= -1;