Merge branch 'irq/for-arm' into irq/core
[linux-2.6/btrfs-unstable.git] / net / batman-adv / originator.c
blob6df12a2e36052b7f8a07dea276565c362890863f
1 /* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "main.h"
19 #include "distributed-arp-table.h"
20 #include "originator.h"
21 #include "hash.h"
22 #include "translation-table.h"
23 #include "routing.h"
24 #include "gateway_client.h"
25 #include "hard-interface.h"
26 #include "soft-interface.h"
27 #include "bridge_loop_avoidance.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
31 /* hash class keys */
32 static struct lock_class_key batadv_orig_hash_lock_class_key;
34 static void batadv_purge_orig(struct work_struct *work);
36 /* returns 1 if they are the same originator */
37 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
39 const void *data1 = container_of(node, struct batadv_orig_node,
40 hash_entry);
42 return batadv_compare_eth(data1, data2);
45 /**
46 * batadv_orig_node_vlan_get - get an orig_node_vlan object
47 * @orig_node: the originator serving the VLAN
48 * @vid: the VLAN identifier
50 * Returns the vlan object identified by vid and belonging to orig_node or NULL
51 * if it does not exist.
53 struct batadv_orig_node_vlan *
54 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
55 unsigned short vid)
57 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
59 rcu_read_lock();
60 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
61 if (tmp->vid != vid)
62 continue;
64 if (!atomic_inc_not_zero(&tmp->refcount))
65 continue;
67 vlan = tmp;
69 break;
71 rcu_read_unlock();
73 return vlan;
76 /**
77 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
78 * object
79 * @orig_node: the originator serving the VLAN
80 * @vid: the VLAN identifier
82 * Returns NULL in case of failure or the vlan object identified by vid and
83 * belonging to orig_node otherwise. The object is created and added to the list
84 * if it does not exist.
86 * The object is returned with refcounter increased by 1.
88 struct batadv_orig_node_vlan *
89 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
90 unsigned short vid)
92 struct batadv_orig_node_vlan *vlan;
94 spin_lock_bh(&orig_node->vlan_list_lock);
96 /* first look if an object for this vid already exists */
97 vlan = batadv_orig_node_vlan_get(orig_node, vid);
98 if (vlan)
99 goto out;
101 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
102 if (!vlan)
103 goto out;
105 atomic_set(&vlan->refcount, 2);
106 vlan->vid = vid;
108 list_add_rcu(&vlan->list, &orig_node->vlan_list);
110 out:
111 spin_unlock_bh(&orig_node->vlan_list_lock);
113 return vlan;
117 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
118 * the originator-vlan object
119 * @orig_vlan: the originator-vlan object to release
121 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
123 if (atomic_dec_and_test(&orig_vlan->refcount))
124 kfree_rcu(orig_vlan, rcu);
127 int batadv_originator_init(struct batadv_priv *bat_priv)
129 if (bat_priv->orig_hash)
130 return 0;
132 bat_priv->orig_hash = batadv_hash_new(1024);
134 if (!bat_priv->orig_hash)
135 goto err;
137 batadv_hash_set_lock_class(bat_priv->orig_hash,
138 &batadv_orig_hash_lock_class_key);
140 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
141 queue_delayed_work(batadv_event_workqueue,
142 &bat_priv->orig_work,
143 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
145 return 0;
147 err:
148 return -ENOMEM;
152 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
153 * @rcu: rcu pointer of the neigh_ifinfo object
155 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
157 struct batadv_neigh_ifinfo *neigh_ifinfo;
159 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
161 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
162 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
164 kfree(neigh_ifinfo);
168 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
169 * the neigh_ifinfo (without rcu callback)
170 * @neigh_ifinfo: the neigh_ifinfo object to release
172 static void
173 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
175 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
176 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
180 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
181 * the neigh_ifinfo
182 * @neigh_ifinfo: the neigh_ifinfo object to release
184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
186 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
191 * batadv_neigh_node_free_rcu - free the neigh_node
192 * @rcu: rcu pointer of the neigh_node
194 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
196 struct hlist_node *node_tmp;
197 struct batadv_neigh_node *neigh_node;
198 struct batadv_neigh_ifinfo *neigh_ifinfo;
200 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
202 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
203 &neigh_node->ifinfo_list, list) {
204 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
206 batadv_hardif_free_ref_now(neigh_node->if_incoming);
208 kfree(neigh_node);
212 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
213 * and possibly free it (without rcu callback)
214 * @neigh_node: neigh neighbor to free
216 static void
217 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
219 if (atomic_dec_and_test(&neigh_node->refcount))
220 batadv_neigh_node_free_rcu(&neigh_node->rcu);
224 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
225 * and possibly free it
226 * @neigh_node: neigh neighbor to free
228 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
230 if (atomic_dec_and_test(&neigh_node->refcount))
231 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
235 * batadv_orig_node_get_router - router to the originator depending on iface
236 * @orig_node: the orig node for the router
237 * @if_outgoing: the interface where the payload packet has been received or
238 * the OGM should be sent to
240 * Returns the neighbor which should be router for this orig_node/iface.
242 * The object is returned with refcounter increased by 1.
244 struct batadv_neigh_node *
245 batadv_orig_router_get(struct batadv_orig_node *orig_node,
246 const struct batadv_hard_iface *if_outgoing)
248 struct batadv_orig_ifinfo *orig_ifinfo;
249 struct batadv_neigh_node *router = NULL;
251 rcu_read_lock();
252 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
253 if (orig_ifinfo->if_outgoing != if_outgoing)
254 continue;
256 router = rcu_dereference(orig_ifinfo->router);
257 break;
260 if (router && !atomic_inc_not_zero(&router->refcount))
261 router = NULL;
263 rcu_read_unlock();
264 return router;
268 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
269 * @orig_node: the orig node to be queried
270 * @if_outgoing: the interface for which the ifinfo should be acquired
272 * Returns the requested orig_ifinfo or NULL if not found.
274 * The object is returned with refcounter increased by 1.
276 struct batadv_orig_ifinfo *
277 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
278 struct batadv_hard_iface *if_outgoing)
280 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
282 rcu_read_lock();
283 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
284 list) {
285 if (tmp->if_outgoing != if_outgoing)
286 continue;
288 if (!atomic_inc_not_zero(&tmp->refcount))
289 continue;
291 orig_ifinfo = tmp;
292 break;
294 rcu_read_unlock();
296 return orig_ifinfo;
300 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
301 * @orig_node: the orig node to be queried
302 * @if_outgoing: the interface for which the ifinfo should be acquired
304 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
305 * interface otherwise. The object is created and added to the list
306 * if it does not exist.
308 * The object is returned with refcounter increased by 1.
310 struct batadv_orig_ifinfo *
311 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
312 struct batadv_hard_iface *if_outgoing)
314 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
315 unsigned long reset_time;
317 spin_lock_bh(&orig_node->neigh_list_lock);
319 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
320 if (orig_ifinfo)
321 goto out;
323 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
324 if (!orig_ifinfo)
325 goto out;
327 if (if_outgoing != BATADV_IF_DEFAULT &&
328 !atomic_inc_not_zero(&if_outgoing->refcount)) {
329 kfree(orig_ifinfo);
330 orig_ifinfo = NULL;
331 goto out;
334 reset_time = jiffies - 1;
335 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
336 orig_ifinfo->batman_seqno_reset = reset_time;
337 orig_ifinfo->if_outgoing = if_outgoing;
338 INIT_HLIST_NODE(&orig_ifinfo->list);
339 atomic_set(&orig_ifinfo->refcount, 2);
340 hlist_add_head_rcu(&orig_ifinfo->list,
341 &orig_node->ifinfo_list);
342 out:
343 spin_unlock_bh(&orig_node->neigh_list_lock);
344 return orig_ifinfo;
348 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
349 * @neigh_node: the neigh node to be queried
350 * @if_outgoing: the interface for which the ifinfo should be acquired
352 * The object is returned with refcounter increased by 1.
354 * Returns the requested neigh_ifinfo or NULL if not found
356 struct batadv_neigh_ifinfo *
357 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
358 struct batadv_hard_iface *if_outgoing)
360 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
361 *tmp_neigh_ifinfo;
363 rcu_read_lock();
364 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
365 list) {
366 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
367 continue;
369 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
370 continue;
372 neigh_ifinfo = tmp_neigh_ifinfo;
373 break;
375 rcu_read_unlock();
377 return neigh_ifinfo;
381 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
382 * @neigh_node: the neigh node to be queried
383 * @if_outgoing: the interface for which the ifinfo should be acquired
385 * Returns NULL in case of failure or the neigh_ifinfo object for the
386 * if_outgoing interface otherwise. The object is created and added to the list
387 * if it does not exist.
389 * The object is returned with refcounter increased by 1.
391 struct batadv_neigh_ifinfo *
392 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
393 struct batadv_hard_iface *if_outgoing)
395 struct batadv_neigh_ifinfo *neigh_ifinfo;
397 spin_lock_bh(&neigh->ifinfo_lock);
399 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
400 if (neigh_ifinfo)
401 goto out;
403 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
404 if (!neigh_ifinfo)
405 goto out;
407 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
408 kfree(neigh_ifinfo);
409 neigh_ifinfo = NULL;
410 goto out;
413 INIT_HLIST_NODE(&neigh_ifinfo->list);
414 atomic_set(&neigh_ifinfo->refcount, 2);
415 neigh_ifinfo->if_outgoing = if_outgoing;
417 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
419 out:
420 spin_unlock_bh(&neigh->ifinfo_lock);
422 return neigh_ifinfo;
426 * batadv_neigh_node_new - create and init a new neigh_node object
427 * @hard_iface: the interface where the neighbour is connected to
428 * @neigh_addr: the mac address of the neighbour interface
429 * @orig_node: originator object representing the neighbour
431 * Allocates a new neigh_node object and initialises all the generic fields.
432 * Returns the new object or NULL on failure.
434 struct batadv_neigh_node *
435 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
436 const uint8_t *neigh_addr,
437 struct batadv_orig_node *orig_node)
439 struct batadv_neigh_node *neigh_node;
441 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
442 if (!neigh_node)
443 goto out;
445 INIT_HLIST_NODE(&neigh_node->list);
446 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
447 spin_lock_init(&neigh_node->ifinfo_lock);
449 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
450 neigh_node->if_incoming = hard_iface;
451 neigh_node->orig_node = orig_node;
453 /* extra reference for return */
454 atomic_set(&neigh_node->refcount, 2);
456 out:
457 return neigh_node;
461 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
462 * @rcu: rcu pointer of the orig_ifinfo object
464 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
466 struct batadv_orig_ifinfo *orig_ifinfo;
468 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
470 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
471 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
473 kfree(orig_ifinfo);
477 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
478 * the orig_ifinfo (without rcu callback)
479 * @orig_ifinfo: the orig_ifinfo object to release
481 static void
482 batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
484 if (atomic_dec_and_test(&orig_ifinfo->refcount))
485 batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
489 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
490 * the orig_ifinfo
491 * @orig_ifinfo: the orig_ifinfo object to release
493 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
495 if (atomic_dec_and_test(&orig_ifinfo->refcount))
496 call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
499 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
501 struct hlist_node *node_tmp;
502 struct batadv_neigh_node *neigh_node;
503 struct batadv_orig_node *orig_node;
504 struct batadv_orig_ifinfo *orig_ifinfo;
506 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
508 spin_lock_bh(&orig_node->neigh_list_lock);
510 /* for all neighbors towards this originator ... */
511 hlist_for_each_entry_safe(neigh_node, node_tmp,
512 &orig_node->neigh_list, list) {
513 hlist_del_rcu(&neigh_node->list);
514 batadv_neigh_node_free_ref_now(neigh_node);
517 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
518 &orig_node->ifinfo_list, list) {
519 hlist_del_rcu(&orig_ifinfo->list);
520 batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
522 spin_unlock_bh(&orig_node->neigh_list_lock);
524 /* Free nc_nodes */
525 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
527 batadv_frag_purge_orig(orig_node, NULL);
529 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
530 "originator timed out");
532 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
533 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
535 kfree(orig_node->tt_buff);
536 kfree(orig_node);
540 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
541 * schedule an rcu callback for freeing it
542 * @orig_node: the orig node to free
544 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
546 if (atomic_dec_and_test(&orig_node->refcount))
547 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
551 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
552 * possibly free it (without rcu callback)
553 * @orig_node: the orig node to free
555 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
557 if (atomic_dec_and_test(&orig_node->refcount))
558 batadv_orig_node_free_rcu(&orig_node->rcu);
561 void batadv_originator_free(struct batadv_priv *bat_priv)
563 struct batadv_hashtable *hash = bat_priv->orig_hash;
564 struct hlist_node *node_tmp;
565 struct hlist_head *head;
566 spinlock_t *list_lock; /* spinlock to protect write access */
567 struct batadv_orig_node *orig_node;
568 uint32_t i;
570 if (!hash)
571 return;
573 cancel_delayed_work_sync(&bat_priv->orig_work);
575 bat_priv->orig_hash = NULL;
577 for (i = 0; i < hash->size; i++) {
578 head = &hash->table[i];
579 list_lock = &hash->list_locks[i];
581 spin_lock_bh(list_lock);
582 hlist_for_each_entry_safe(orig_node, node_tmp,
583 head, hash_entry) {
584 hlist_del_rcu(&orig_node->hash_entry);
585 batadv_orig_node_free_ref(orig_node);
587 spin_unlock_bh(list_lock);
590 batadv_hash_destroy(hash);
594 * batadv_orig_node_new - creates a new orig_node
595 * @bat_priv: the bat priv with all the soft interface information
596 * @addr: the mac address of the originator
598 * Creates a new originator object and initialise all the generic fields.
599 * The new object is not added to the originator list.
600 * Returns the newly created object or NULL on failure.
602 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
603 const uint8_t *addr)
605 struct batadv_orig_node *orig_node;
606 struct batadv_orig_node_vlan *vlan;
607 unsigned long reset_time;
608 int i;
610 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
611 "Creating new originator: %pM\n", addr);
613 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
614 if (!orig_node)
615 return NULL;
617 INIT_HLIST_HEAD(&orig_node->neigh_list);
618 INIT_LIST_HEAD(&orig_node->vlan_list);
619 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
620 spin_lock_init(&orig_node->bcast_seqno_lock);
621 spin_lock_init(&orig_node->neigh_list_lock);
622 spin_lock_init(&orig_node->tt_buff_lock);
623 spin_lock_init(&orig_node->tt_lock);
624 spin_lock_init(&orig_node->vlan_list_lock);
626 batadv_nc_init_orig(orig_node);
628 /* extra reference for return */
629 atomic_set(&orig_node->refcount, 2);
631 orig_node->tt_initialised = false;
632 orig_node->bat_priv = bat_priv;
633 memcpy(orig_node->orig, addr, ETH_ALEN);
634 batadv_dat_init_orig_node_addr(orig_node);
635 atomic_set(&orig_node->last_ttvn, 0);
636 orig_node->tt_buff = NULL;
637 orig_node->tt_buff_len = 0;
638 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
639 orig_node->bcast_seqno_reset = reset_time;
641 /* create a vlan object for the "untagged" LAN */
642 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
643 if (!vlan)
644 goto free_orig_node;
645 /* batadv_orig_node_vlan_new() increases the refcounter.
646 * Immediately release vlan since it is not needed anymore in this
647 * context
649 batadv_orig_node_vlan_free_ref(vlan);
651 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
652 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
653 spin_lock_init(&orig_node->fragments[i].lock);
654 orig_node->fragments[i].size = 0;
657 return orig_node;
658 free_orig_node:
659 kfree(orig_node);
660 return NULL;
664 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
665 * @bat_priv: the bat priv with all the soft interface information
666 * @orig_node: orig node which is to be checked
668 * Returns true if any ifinfo entry was purged, false otherwise.
670 static bool
671 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
672 struct batadv_orig_node *orig_node)
674 struct batadv_orig_ifinfo *orig_ifinfo;
675 struct batadv_hard_iface *if_outgoing;
676 struct hlist_node *node_tmp;
677 bool ifinfo_purged = false;
679 spin_lock_bh(&orig_node->neigh_list_lock);
681 /* for all ifinfo objects for this originator */
682 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
683 &orig_node->ifinfo_list, list) {
684 if_outgoing = orig_ifinfo->if_outgoing;
686 /* always keep the default interface */
687 if (if_outgoing == BATADV_IF_DEFAULT)
688 continue;
690 /* don't purge if the interface is not (going) down */
691 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
692 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
693 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
694 continue;
696 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
697 "router/ifinfo purge: originator %pM, iface: %s\n",
698 orig_node->orig, if_outgoing->net_dev->name);
700 ifinfo_purged = true;
702 hlist_del_rcu(&orig_ifinfo->list);
703 batadv_orig_ifinfo_free_ref(orig_ifinfo);
704 if (orig_node->last_bonding_candidate == orig_ifinfo) {
705 orig_node->last_bonding_candidate = NULL;
706 batadv_orig_ifinfo_free_ref(orig_ifinfo);
710 spin_unlock_bh(&orig_node->neigh_list_lock);
712 return ifinfo_purged;
717 * batadv_purge_orig_neighbors - purges neighbors from originator
718 * @bat_priv: the bat priv with all the soft interface information
719 * @orig_node: orig node which is to be checked
721 * Returns true if any neighbor was purged, false otherwise
723 static bool
724 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
725 struct batadv_orig_node *orig_node)
727 struct hlist_node *node_tmp;
728 struct batadv_neigh_node *neigh_node;
729 bool neigh_purged = false;
730 unsigned long last_seen;
731 struct batadv_hard_iface *if_incoming;
733 spin_lock_bh(&orig_node->neigh_list_lock);
735 /* for all neighbors towards this originator ... */
736 hlist_for_each_entry_safe(neigh_node, node_tmp,
737 &orig_node->neigh_list, list) {
738 last_seen = neigh_node->last_seen;
739 if_incoming = neigh_node->if_incoming;
741 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
742 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
743 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
744 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
745 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
746 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
747 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
748 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
749 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
750 orig_node->orig, neigh_node->addr,
751 if_incoming->net_dev->name);
752 else
753 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
754 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
755 orig_node->orig, neigh_node->addr,
756 jiffies_to_msecs(last_seen));
758 neigh_purged = true;
760 hlist_del_rcu(&neigh_node->list);
761 batadv_neigh_node_free_ref(neigh_node);
765 spin_unlock_bh(&orig_node->neigh_list_lock);
766 return neigh_purged;
770 * batadv_find_best_neighbor - finds the best neighbor after purging
771 * @bat_priv: the bat priv with all the soft interface information
772 * @orig_node: orig node which is to be checked
773 * @if_outgoing: the interface for which the metric should be compared
775 * Returns the current best neighbor, with refcount increased.
777 static struct batadv_neigh_node *
778 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
779 struct batadv_orig_node *orig_node,
780 struct batadv_hard_iface *if_outgoing)
782 struct batadv_neigh_node *best = NULL, *neigh;
783 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
785 rcu_read_lock();
786 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
787 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
788 best, if_outgoing) <= 0))
789 continue;
791 if (!atomic_inc_not_zero(&neigh->refcount))
792 continue;
794 if (best)
795 batadv_neigh_node_free_ref(best);
797 best = neigh;
799 rcu_read_unlock();
801 return best;
805 * batadv_purge_orig_node - purges obsolete information from an orig_node
806 * @bat_priv: the bat priv with all the soft interface information
807 * @orig_node: orig node which is to be checked
809 * This function checks if the orig_node or substructures of it have become
810 * obsolete, and purges this information if that's the case.
812 * Returns true if the orig_node is to be removed, false otherwise.
814 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
815 struct batadv_orig_node *orig_node)
817 struct batadv_neigh_node *best_neigh_node;
818 struct batadv_hard_iface *hard_iface;
819 bool changed;
821 if (batadv_has_timed_out(orig_node->last_seen,
822 2 * BATADV_PURGE_TIMEOUT)) {
823 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
824 "Originator timeout: originator %pM, last_seen %u\n",
825 orig_node->orig,
826 jiffies_to_msecs(orig_node->last_seen));
827 return true;
829 changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
830 changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
832 if (!changed)
833 return false;
835 /* first for NULL ... */
836 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
837 BATADV_IF_DEFAULT);
838 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
839 best_neigh_node);
840 if (best_neigh_node)
841 batadv_neigh_node_free_ref(best_neigh_node);
843 /* ... then for all other interfaces. */
844 rcu_read_lock();
845 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
846 if (hard_iface->if_status != BATADV_IF_ACTIVE)
847 continue;
849 if (hard_iface->soft_iface != bat_priv->soft_iface)
850 continue;
852 best_neigh_node = batadv_find_best_neighbor(bat_priv,
853 orig_node,
854 hard_iface);
855 batadv_update_route(bat_priv, orig_node, hard_iface,
856 best_neigh_node);
857 if (best_neigh_node)
858 batadv_neigh_node_free_ref(best_neigh_node);
860 rcu_read_unlock();
862 return false;
865 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
867 struct batadv_hashtable *hash = bat_priv->orig_hash;
868 struct hlist_node *node_tmp;
869 struct hlist_head *head;
870 spinlock_t *list_lock; /* spinlock to protect write access */
871 struct batadv_orig_node *orig_node;
872 uint32_t i;
874 if (!hash)
875 return;
877 /* for all origins... */
878 for (i = 0; i < hash->size; i++) {
879 head = &hash->table[i];
880 list_lock = &hash->list_locks[i];
882 spin_lock_bh(list_lock);
883 hlist_for_each_entry_safe(orig_node, node_tmp,
884 head, hash_entry) {
885 if (batadv_purge_orig_node(bat_priv, orig_node)) {
886 batadv_gw_node_delete(bat_priv, orig_node);
887 hlist_del_rcu(&orig_node->hash_entry);
888 batadv_orig_node_free_ref(orig_node);
889 continue;
892 batadv_frag_purge_orig(orig_node,
893 batadv_frag_check_entry);
895 spin_unlock_bh(list_lock);
898 batadv_gw_node_purge(bat_priv);
899 batadv_gw_election(bat_priv);
902 static void batadv_purge_orig(struct work_struct *work)
904 struct delayed_work *delayed_work;
905 struct batadv_priv *bat_priv;
907 delayed_work = container_of(work, struct delayed_work, work);
908 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
909 _batadv_purge_orig(bat_priv);
910 queue_delayed_work(batadv_event_workqueue,
911 &bat_priv->orig_work,
912 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
915 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
917 _batadv_purge_orig(bat_priv);
920 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
922 struct net_device *net_dev = (struct net_device *)seq->private;
923 struct batadv_priv *bat_priv = netdev_priv(net_dev);
924 struct batadv_hard_iface *primary_if;
926 primary_if = batadv_seq_print_text_primary_if_get(seq);
927 if (!primary_if)
928 return 0;
930 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
931 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
932 primary_if->net_dev->dev_addr, net_dev->name,
933 bat_priv->bat_algo_ops->name);
935 batadv_hardif_free_ref(primary_if);
937 if (!bat_priv->bat_algo_ops->bat_orig_print) {
938 seq_puts(seq,
939 "No printing function for this routing protocol\n");
940 return 0;
943 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
944 BATADV_IF_DEFAULT);
946 return 0;
950 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
951 * outgoing interface
952 * @seq: debugfs table seq_file struct
953 * @offset: not used
955 * Returns 0
957 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
959 struct net_device *net_dev = (struct net_device *)seq->private;
960 struct batadv_hard_iface *hard_iface;
961 struct batadv_priv *bat_priv;
963 hard_iface = batadv_hardif_get_by_netdev(net_dev);
965 if (!hard_iface || !hard_iface->soft_iface) {
966 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
967 goto out;
970 bat_priv = netdev_priv(hard_iface->soft_iface);
971 if (!bat_priv->bat_algo_ops->bat_orig_print) {
972 seq_puts(seq,
973 "No printing function for this routing protocol\n");
974 goto out;
977 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
978 seq_puts(seq, "Interface not active\n");
979 goto out;
982 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
983 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
984 hard_iface->net_dev->dev_addr,
985 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
987 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
989 out:
990 batadv_hardif_free_ref(hard_iface);
991 return 0;
994 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
995 int max_if_num)
997 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
998 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
999 struct batadv_hashtable *hash = bat_priv->orig_hash;
1000 struct hlist_head *head;
1001 struct batadv_orig_node *orig_node;
1002 uint32_t i;
1003 int ret;
1005 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1006 * if_num
1008 for (i = 0; i < hash->size; i++) {
1009 head = &hash->table[i];
1011 rcu_read_lock();
1012 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1013 ret = 0;
1014 if (bao->bat_orig_add_if)
1015 ret = bao->bat_orig_add_if(orig_node,
1016 max_if_num);
1017 if (ret == -ENOMEM)
1018 goto err;
1020 rcu_read_unlock();
1023 return 0;
1025 err:
1026 rcu_read_unlock();
1027 return -ENOMEM;
1030 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1031 int max_if_num)
1033 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1034 struct batadv_hashtable *hash = bat_priv->orig_hash;
1035 struct hlist_head *head;
1036 struct batadv_hard_iface *hard_iface_tmp;
1037 struct batadv_orig_node *orig_node;
1038 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1039 uint32_t i;
1040 int ret;
1042 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1043 * if_num
1045 for (i = 0; i < hash->size; i++) {
1046 head = &hash->table[i];
1048 rcu_read_lock();
1049 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1050 ret = 0;
1051 if (bao->bat_orig_del_if)
1052 ret = bao->bat_orig_del_if(orig_node,
1053 max_if_num,
1054 hard_iface->if_num);
1055 if (ret == -ENOMEM)
1056 goto err;
1058 rcu_read_unlock();
1061 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1062 rcu_read_lock();
1063 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1064 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1065 continue;
1067 if (hard_iface == hard_iface_tmp)
1068 continue;
1070 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1071 continue;
1073 if (hard_iface_tmp->if_num > hard_iface->if_num)
1074 hard_iface_tmp->if_num--;
1076 rcu_read_unlock();
1078 hard_iface->if_num = -1;
1079 return 0;
1081 err:
1082 rcu_read_unlock();
1083 return -ENOMEM;