batman-adv: form groups in the bridge loop avoidance
[linux-2.6.git] / net / batman-adv / bridge_loop_avoidance.c
blob1cf18ac44ba933741bd232a67840cafee686c596
1 /*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
4 * Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "hash.h"
24 #include "hard-interface.h"
25 #include "originator.h"
26 #include "bridge_loop_avoidance.h"
27 #include "translation-table.h"
28 #include "send.h"
30 #include <linux/etherdevice.h>
31 #include <linux/crc16.h>
32 #include <linux/if_arp.h>
33 #include <net/arp.h>
34 #include <linux/if_vlan.h>
36 static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
38 static void bla_periodic_work(struct work_struct *work);
39 static void bla_send_announce(struct bat_priv *bat_priv,
40 struct backbone_gw *backbone_gw);
42 /* return the index of the claim */
43 static inline uint32_t choose_claim(const void *data, uint32_t size)
45 const unsigned char *key = data;
46 uint32_t hash = 0;
47 size_t i;
49 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
50 hash += key[i];
51 hash += (hash << 10);
52 hash ^= (hash >> 6);
55 hash += (hash << 3);
56 hash ^= (hash >> 11);
57 hash += (hash << 15);
59 return hash % size;
62 /* return the index of the backbone gateway */
63 static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
65 const unsigned char *key = data;
66 uint32_t hash = 0;
67 size_t i;
69 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
70 hash += key[i];
71 hash += (hash << 10);
72 hash ^= (hash >> 6);
75 hash += (hash << 3);
76 hash ^= (hash >> 11);
77 hash += (hash << 15);
79 return hash % size;
83 /* compares address and vid of two backbone gws */
84 static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
86 const void *data1 = container_of(node, struct backbone_gw,
87 hash_entry);
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
92 /* compares address and vid of two claims */
93 static int compare_claim(const struct hlist_node *node, const void *data2)
95 const void *data1 = container_of(node, struct claim,
96 hash_entry);
98 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
101 /* free a backbone gw */
102 static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
104 if (atomic_dec_and_test(&backbone_gw->refcount))
105 kfree_rcu(backbone_gw, rcu);
108 /* finally deinitialize the claim */
109 static void claim_free_rcu(struct rcu_head *rcu)
111 struct claim *claim;
113 claim = container_of(rcu, struct claim, rcu);
115 backbone_gw_free_ref(claim->backbone_gw);
116 kfree(claim);
119 /* free a claim, call claim_free_rcu if its the last reference */
120 static void claim_free_ref(struct claim *claim)
122 if (atomic_dec_and_test(&claim->refcount))
123 call_rcu(&claim->rcu, claim_free_rcu);
127 * @bat_priv: the bat priv with all the soft interface information
128 * @data: search data (may be local/static data)
130 * looks for a claim in the hash, and returns it if found
131 * or NULL otherwise.
133 static struct claim *claim_hash_find(struct bat_priv *bat_priv,
134 struct claim *data)
136 struct hashtable_t *hash = bat_priv->claim_hash;
137 struct hlist_head *head;
138 struct hlist_node *node;
139 struct claim *claim;
140 struct claim *claim_tmp = NULL;
141 int index;
143 if (!hash)
144 return NULL;
146 index = choose_claim(data, hash->size);
147 head = &hash->table[index];
149 rcu_read_lock();
150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
151 if (!compare_claim(&claim->hash_entry, data))
152 continue;
154 if (!atomic_inc_not_zero(&claim->refcount))
155 continue;
157 claim_tmp = claim;
158 break;
160 rcu_read_unlock();
162 return claim_tmp;
166 * @bat_priv: the bat priv with all the soft interface information
167 * @addr: the address of the originator
168 * @vid: the VLAN ID
170 * looks for a claim in the hash, and returns it if found
171 * or NULL otherwise.
173 static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
174 uint8_t *addr, short vid)
176 struct hashtable_t *hash = bat_priv->backbone_hash;
177 struct hlist_head *head;
178 struct hlist_node *node;
179 struct backbone_gw search_entry, *backbone_gw;
180 struct backbone_gw *backbone_gw_tmp = NULL;
181 int index;
183 if (!hash)
184 return NULL;
186 memcpy(search_entry.orig, addr, ETH_ALEN);
187 search_entry.vid = vid;
189 index = choose_backbone_gw(&search_entry, hash->size);
190 head = &hash->table[index];
192 rcu_read_lock();
193 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
194 if (!compare_backbone_gw(&backbone_gw->hash_entry,
195 &search_entry))
196 continue;
198 if (!atomic_inc_not_zero(&backbone_gw->refcount))
199 continue;
201 backbone_gw_tmp = backbone_gw;
202 break;
204 rcu_read_unlock();
206 return backbone_gw_tmp;
209 /* delete all claims for a backbone */
210 static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
212 struct hashtable_t *hash;
213 struct hlist_node *node, *node_tmp;
214 struct hlist_head *head;
215 struct claim *claim;
216 int i;
217 spinlock_t *list_lock; /* protects write access to the hash lists */
219 hash = backbone_gw->bat_priv->claim_hash;
220 if (!hash)
221 return;
223 for (i = 0; i < hash->size; i++) {
224 head = &hash->table[i];
225 list_lock = &hash->list_locks[i];
227 spin_lock_bh(list_lock);
228 hlist_for_each_entry_safe(claim, node, node_tmp,
229 head, hash_entry) {
231 if (claim->backbone_gw != backbone_gw)
232 continue;
234 claim_free_ref(claim);
235 hlist_del_rcu(node);
237 spin_unlock_bh(list_lock);
240 /* all claims gone, intialize CRC */
241 backbone_gw->crc = BLA_CRC_INIT;
245 * @bat_priv: the bat priv with all the soft interface information
246 * @orig: the mac address to be announced within the claim
247 * @vid: the VLAN ID
248 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
250 * sends a claim frame according to the provided info.
252 static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
253 short vid, int claimtype)
255 struct sk_buff *skb;
256 struct ethhdr *ethhdr;
257 struct hard_iface *primary_if;
258 struct net_device *soft_iface;
259 uint8_t *hw_src;
260 struct bla_claim_dst local_claim_dest;
261 uint32_t zeroip = 0;
263 primary_if = primary_if_get_selected(bat_priv);
264 if (!primary_if)
265 return;
267 memcpy(&local_claim_dest, &bat_priv->claim_dest,
268 sizeof(local_claim_dest));
269 local_claim_dest.type = claimtype;
271 soft_iface = primary_if->soft_iface;
273 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
274 /* IP DST: 0.0.0.0 */
275 zeroip,
276 primary_if->soft_iface,
277 /* IP SRC: 0.0.0.0 */
278 zeroip,
279 /* Ethernet DST: Broadcast */
280 NULL,
281 /* Ethernet SRC/HW SRC: originator mac */
282 primary_if->net_dev->dev_addr,
283 /* HW DST: FF:43:05:XX:00:00
284 * with XX = claim type
285 * and YY:YY = group id
287 (uint8_t *)&local_claim_dest);
289 if (!skb)
290 goto out;
292 ethhdr = (struct ethhdr *)skb->data;
293 hw_src = (uint8_t *)ethhdr +
294 sizeof(struct ethhdr) +
295 sizeof(struct arphdr);
297 /* now we pretend that the client would have sent this ... */
298 switch (claimtype) {
299 case CLAIM_TYPE_ADD:
300 /* normal claim frame
301 * set Ethernet SRC to the clients mac
303 memcpy(ethhdr->h_source, mac, ETH_ALEN);
304 bat_dbg(DBG_BLA, bat_priv,
305 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
306 break;
307 case CLAIM_TYPE_DEL:
308 /* unclaim frame
309 * set HW SRC to the clients mac
311 memcpy(hw_src, mac, ETH_ALEN);
312 bat_dbg(DBG_BLA, bat_priv,
313 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
314 break;
315 case CLAIM_TYPE_ANNOUNCE:
316 /* announcement frame
317 * set HW SRC to the special mac containg the crc
319 memcpy(hw_src, mac, ETH_ALEN);
320 bat_dbg(DBG_BLA, bat_priv,
321 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
322 ethhdr->h_source, vid);
323 break;
324 case CLAIM_TYPE_REQUEST:
325 /* request frame
326 * set HW SRC to the special mac containg the crc
328 memcpy(hw_src, mac, ETH_ALEN);
329 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
330 bat_dbg(DBG_BLA, bat_priv,
331 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
332 ethhdr->h_source, ethhdr->h_dest, vid);
333 break;
337 if (vid != -1)
338 skb = vlan_insert_tag(skb, vid);
340 skb_reset_mac_header(skb);
341 skb->protocol = eth_type_trans(skb, soft_iface);
342 bat_priv->stats.rx_packets++;
343 bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
344 soft_iface->last_rx = jiffies;
346 netif_rx(skb);
347 out:
348 if (primary_if)
349 hardif_free_ref(primary_if);
353 * @bat_priv: the bat priv with all the soft interface information
354 * @orig: the mac address of the originator
355 * @vid: the VLAN ID
357 * searches for the backbone gw or creates a new one if it could not
358 * be found.
360 static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
361 uint8_t *orig, short vid)
363 struct backbone_gw *entry;
364 struct orig_node *orig_node;
365 int hash_added;
367 entry = backbone_hash_find(bat_priv, orig, vid);
369 if (entry)
370 return entry;
372 bat_dbg(DBG_BLA, bat_priv,
373 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
374 orig, vid);
376 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
377 if (!entry)
378 return NULL;
380 entry->vid = vid;
381 entry->lasttime = jiffies;
382 entry->crc = BLA_CRC_INIT;
383 entry->bat_priv = bat_priv;
384 atomic_set(&entry->request_sent, 0);
385 memcpy(entry->orig, orig, ETH_ALEN);
387 /* one for the hash, one for returning */
388 atomic_set(&entry->refcount, 2);
390 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
391 choose_backbone_gw, entry, &entry->hash_entry);
393 if (unlikely(hash_added != 0)) {
394 /* hash failed, free the structure */
395 kfree(entry);
396 return NULL;
399 /* this is a gateway now, remove any tt entries */
400 orig_node = orig_hash_find(bat_priv, orig);
401 if (orig_node) {
402 tt_global_del_orig(bat_priv, orig_node,
403 "became a backbone gateway");
404 orig_node_free_ref(orig_node);
406 return entry;
409 /* update or add the own backbone gw to make sure we announce
410 * where we receive other backbone gws
412 static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
413 struct hard_iface *primary_if,
414 short vid)
416 struct backbone_gw *backbone_gw;
418 backbone_gw = bla_get_backbone_gw(bat_priv,
419 primary_if->net_dev->dev_addr, vid);
420 if (unlikely(!backbone_gw))
421 return;
423 backbone_gw->lasttime = jiffies;
424 backbone_gw_free_ref(backbone_gw);
428 * @bat_priv: the bat priv with all the soft interface information
429 * @vid: the vid where the request came on
431 * Repeat all of our own claims, and finally send an ANNOUNCE frame
432 * to allow the requester another check if the CRC is correct now.
434 static void bla_answer_request(struct bat_priv *bat_priv,
435 struct hard_iface *primary_if, short vid)
437 struct hlist_node *node;
438 struct hlist_head *head;
439 struct hashtable_t *hash;
440 struct claim *claim;
441 struct backbone_gw *backbone_gw;
442 int i;
444 bat_dbg(DBG_BLA, bat_priv,
445 "bla_answer_request(): received a claim request, send all of our own claims again\n");
447 backbone_gw = backbone_hash_find(bat_priv,
448 primary_if->net_dev->dev_addr, vid);
449 if (!backbone_gw)
450 return;
452 hash = bat_priv->claim_hash;
453 for (i = 0; i < hash->size; i++) {
454 head = &hash->table[i];
456 rcu_read_lock();
457 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
458 /* only own claims are interesting */
459 if (claim->backbone_gw != backbone_gw)
460 continue;
462 bla_send_claim(bat_priv, claim->addr, claim->vid,
463 CLAIM_TYPE_ADD);
465 rcu_read_unlock();
468 /* finally, send an announcement frame */
469 bla_send_announce(bat_priv, backbone_gw);
470 backbone_gw_free_ref(backbone_gw);
474 * @backbone_gw: the backbone gateway from whom we are out of sync
476 * When the crc is wrong, ask the backbone gateway for a full table update.
477 * After the request, it will repeat all of his own claims and finally
478 * send an announcement claim with which we can check again.
480 static void bla_send_request(struct backbone_gw *backbone_gw)
482 /* first, remove all old entries */
483 bla_del_backbone_claims(backbone_gw);
485 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
486 "Sending REQUEST to %pM\n",
487 backbone_gw->orig);
489 /* send request */
490 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
491 backbone_gw->vid, CLAIM_TYPE_REQUEST);
493 /* no local broadcasts should be sent or received, for now. */
494 if (!atomic_read(&backbone_gw->request_sent)) {
495 atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
496 atomic_set(&backbone_gw->request_sent, 1);
501 * @bat_priv: the bat priv with all the soft interface information
502 * @backbone_gw: our backbone gateway which should be announced
504 * This function sends an announcement. It is called from multiple
505 * places.
507 static void bla_send_announce(struct bat_priv *bat_priv,
508 struct backbone_gw *backbone_gw)
510 uint8_t mac[ETH_ALEN];
511 uint16_t crc;
513 memcpy(mac, announce_mac, 4);
514 crc = htons(backbone_gw->crc);
515 memcpy(&mac[4], (uint8_t *)&crc, 2);
517 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
522 * @bat_priv: the bat priv with all the soft interface information
523 * @mac: the mac address of the claim
524 * @vid: the VLAN ID of the frame
525 * @backbone_gw: the backbone gateway which claims it
527 * Adds a claim in the claim hash.
529 static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
530 const short vid, struct backbone_gw *backbone_gw)
532 struct claim *claim;
533 struct claim search_claim;
534 int hash_added;
536 memcpy(search_claim.addr, mac, ETH_ALEN);
537 search_claim.vid = vid;
538 claim = claim_hash_find(bat_priv, &search_claim);
540 /* create a new claim entry if it does not exist yet. */
541 if (!claim) {
542 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
543 if (!claim)
544 return;
546 memcpy(claim->addr, mac, ETH_ALEN);
547 claim->vid = vid;
548 claim->lasttime = jiffies;
549 claim->backbone_gw = backbone_gw;
551 atomic_set(&claim->refcount, 2);
552 bat_dbg(DBG_BLA, bat_priv,
553 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
554 mac, vid);
555 hash_added = hash_add(bat_priv->claim_hash, compare_claim,
556 choose_claim, claim, &claim->hash_entry);
558 if (unlikely(hash_added != 0)) {
559 /* only local changes happened. */
560 kfree(claim);
561 return;
563 } else {
564 claim->lasttime = jiffies;
565 if (claim->backbone_gw == backbone_gw)
566 /* no need to register a new backbone */
567 goto claim_free_ref;
569 bat_dbg(DBG_BLA, bat_priv,
570 "bla_add_claim(): changing ownership for %pM, vid %d\n",
571 mac, vid);
573 claim->backbone_gw->crc ^=
574 crc16(0, claim->addr, ETH_ALEN);
575 backbone_gw_free_ref(claim->backbone_gw);
578 /* set (new) backbone gw */
579 atomic_inc(&backbone_gw->refcount);
580 claim->backbone_gw = backbone_gw;
582 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
583 backbone_gw->lasttime = jiffies;
585 claim_free_ref:
586 claim_free_ref(claim);
589 /* Delete a claim from the claim hash which has the
590 * given mac address and vid.
592 static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
593 const short vid)
595 struct claim search_claim, *claim;
597 memcpy(search_claim.addr, mac, ETH_ALEN);
598 search_claim.vid = vid;
599 claim = claim_hash_find(bat_priv, &search_claim);
600 if (!claim)
601 return;
603 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
605 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
606 claim_free_ref(claim); /* reference from the hash is gone */
608 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
610 /* don't need the reference from hash_find() anymore */
611 claim_free_ref(claim);
614 /* check for ANNOUNCE frame, return 1 if handled */
615 static int handle_announce(struct bat_priv *bat_priv,
616 uint8_t *an_addr, uint8_t *backbone_addr, short vid)
618 struct backbone_gw *backbone_gw;
619 uint16_t crc;
621 if (memcmp(an_addr, announce_mac, 4) != 0)
622 return 0;
624 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
626 if (unlikely(!backbone_gw))
627 return 1;
630 /* handle as ANNOUNCE frame */
631 backbone_gw->lasttime = jiffies;
632 crc = ntohs(*((uint16_t *)(&an_addr[4])));
634 bat_dbg(DBG_BLA, bat_priv,
635 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
636 vid, backbone_gw->orig, crc);
638 if (backbone_gw->crc != crc) {
639 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
640 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
641 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
642 crc);
644 bla_send_request(backbone_gw);
645 } else {
646 /* if we have sent a request and the crc was OK,
647 * we can allow traffic again.
649 if (atomic_read(&backbone_gw->request_sent)) {
650 atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
651 atomic_set(&backbone_gw->request_sent, 0);
655 backbone_gw_free_ref(backbone_gw);
656 return 1;
659 /* check for REQUEST frame, return 1 if handled */
660 static int handle_request(struct bat_priv *bat_priv,
661 struct hard_iface *primary_if,
662 uint8_t *backbone_addr,
663 struct ethhdr *ethhdr, short vid)
665 /* check for REQUEST frame */
666 if (!compare_eth(backbone_addr, ethhdr->h_dest))
667 return 0;
669 /* sanity check, this should not happen on a normal switch,
670 * we ignore it in this case.
672 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
673 return 1;
675 bat_dbg(DBG_BLA, bat_priv,
676 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
677 vid, ethhdr->h_source);
679 bla_answer_request(bat_priv, primary_if, vid);
680 return 1;
683 /* check for UNCLAIM frame, return 1 if handled */
684 static int handle_unclaim(struct bat_priv *bat_priv,
685 struct hard_iface *primary_if,
686 uint8_t *backbone_addr,
687 uint8_t *claim_addr, short vid)
689 struct backbone_gw *backbone_gw;
691 /* unclaim in any case if it is our own */
692 if (primary_if && compare_eth(backbone_addr,
693 primary_if->net_dev->dev_addr))
694 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
696 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
698 if (!backbone_gw)
699 return 1;
701 /* this must be an UNCLAIM frame */
702 bat_dbg(DBG_BLA, bat_priv,
703 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
704 claim_addr, vid, backbone_gw->orig);
706 bla_del_claim(bat_priv, claim_addr, vid);
707 backbone_gw_free_ref(backbone_gw);
708 return 1;
711 /* check for CLAIM frame, return 1 if handled */
712 static int handle_claim(struct bat_priv *bat_priv,
713 struct hard_iface *primary_if, uint8_t *backbone_addr,
714 uint8_t *claim_addr, short vid)
716 struct backbone_gw *backbone_gw;
718 /* register the gateway if not yet available, and add the claim. */
720 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
722 if (unlikely(!backbone_gw))
723 return 1;
725 /* this must be a CLAIM frame */
726 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
727 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
728 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
730 /* TODO: we could call something like tt_local_del() here. */
732 backbone_gw_free_ref(backbone_gw);
733 return 1;
737 * @bat_priv: the bat priv with all the soft interface information
738 * @bat_priv: the bat priv with all the soft interface information
739 * @hw_src: the Hardware source in the ARP Header
740 * @hw_dst: the Hardware destination in the ARP Header
741 * @ethhdr: pointer to the Ethernet header of the claim frame
743 * checks if it is a claim packet and if its on the same group.
744 * This function also applies the group ID of the sender
745 * if it is in the same mesh.
747 * returns:
748 * 2 - if it is a claim packet and on the same group
749 * 1 - if is a claim packet from another group
750 * 0 - if it is not a claim packet
752 static int check_claim_group(struct bat_priv *bat_priv,
753 struct hard_iface *primary_if,
754 uint8_t *hw_src, uint8_t *hw_dst,
755 struct ethhdr *ethhdr)
757 uint8_t *backbone_addr;
758 struct orig_node *orig_node;
759 struct bla_claim_dst *bla_dst, *bla_dst_own;
761 bla_dst = (struct bla_claim_dst *)hw_dst;
762 bla_dst_own = &bat_priv->claim_dest;
764 /* check if it is a claim packet in general */
765 if (memcmp(bla_dst->magic, bla_dst_own->magic,
766 sizeof(bla_dst->magic)) != 0)
767 return 0;
769 /* if announcement packet, use the source,
770 * otherwise assume it is in the hw_src
772 switch (bla_dst->type) {
773 case CLAIM_TYPE_ADD:
774 backbone_addr = hw_src;
775 break;
776 case CLAIM_TYPE_REQUEST:
777 case CLAIM_TYPE_ANNOUNCE:
778 case CLAIM_TYPE_DEL:
779 backbone_addr = ethhdr->h_source;
780 break;
781 default:
782 return 0;
785 /* don't accept claim frames from ourselves */
786 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
787 return 0;
789 /* if its already the same group, it is fine. */
790 if (bla_dst->group == bla_dst_own->group)
791 return 2;
793 /* lets see if this originator is in our mesh */
794 orig_node = orig_hash_find(bat_priv, backbone_addr);
796 /* dont accept claims from gateways which are not in
797 * the same mesh or group.
799 if (!orig_node)
800 return 1;
802 /* if our mesh friends mac is bigger, use it for ourselves. */
803 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
804 bat_dbg(DBG_BLA, bat_priv,
805 "taking other backbones claim group: %04x\n",
806 ntohs(bla_dst->group));
807 bla_dst_own->group = bla_dst->group;
810 orig_node_free_ref(orig_node);
812 return 2;
817 * @bat_priv: the bat priv with all the soft interface information
818 * @skb: the frame to be checked
820 * Check if this is a claim frame, and process it accordingly.
822 * returns 1 if it was a claim frame, otherwise return 0 to
823 * tell the callee that it can use the frame on its own.
825 static int bla_process_claim(struct bat_priv *bat_priv,
826 struct hard_iface *primary_if,
827 struct sk_buff *skb)
829 struct ethhdr *ethhdr;
830 struct vlan_ethhdr *vhdr;
831 struct arphdr *arphdr;
832 uint8_t *hw_src, *hw_dst;
833 struct bla_claim_dst *bla_dst;
834 uint16_t proto;
835 int headlen;
836 short vid = -1;
837 int ret;
839 ethhdr = (struct ethhdr *)skb_mac_header(skb);
841 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
842 vhdr = (struct vlan_ethhdr *)ethhdr;
843 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
844 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
845 headlen = sizeof(*vhdr);
846 } else {
847 proto = ntohs(ethhdr->h_proto);
848 headlen = sizeof(*ethhdr);
851 if (proto != ETH_P_ARP)
852 return 0; /* not a claim frame */
854 /* this must be a ARP frame. check if it is a claim. */
856 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
857 return 0;
859 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
860 ethhdr = (struct ethhdr *)skb_mac_header(skb);
861 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
863 /* Check whether the ARP frame carries a valid
864 * IP information
867 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
868 return 0;
869 if (arphdr->ar_pro != htons(ETH_P_IP))
870 return 0;
871 if (arphdr->ar_hln != ETH_ALEN)
872 return 0;
873 if (arphdr->ar_pln != 4)
874 return 0;
876 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
877 hw_dst = hw_src + ETH_ALEN + 4;
878 bla_dst = (struct bla_claim_dst *)hw_dst;
880 /* check if it is a claim frame. */
881 ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
882 if (ret == 1)
883 bat_dbg(DBG_BLA, bat_priv,
884 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
885 ethhdr->h_source, vid, hw_src, hw_dst);
887 if (ret < 2)
888 return ret;
890 /* become a backbone gw ourselves on this vlan if not happened yet */
891 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
893 /* check for the different types of claim frames ... */
894 switch (bla_dst->type) {
895 case CLAIM_TYPE_ADD:
896 if (handle_claim(bat_priv, primary_if, hw_src,
897 ethhdr->h_source, vid))
898 return 1;
899 break;
900 case CLAIM_TYPE_DEL:
901 if (handle_unclaim(bat_priv, primary_if,
902 ethhdr->h_source, hw_src, vid))
903 return 1;
904 break;
906 case CLAIM_TYPE_ANNOUNCE:
907 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
908 return 1;
909 break;
910 case CLAIM_TYPE_REQUEST:
911 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
912 return 1;
913 break;
916 bat_dbg(DBG_BLA, bat_priv,
917 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
918 ethhdr->h_source, vid, hw_src, hw_dst);
919 return 1;
922 /* Check when we last heard from other nodes, and remove them in case of
923 * a time out, or clean all backbone gws if now is set.
925 static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
927 struct backbone_gw *backbone_gw;
928 struct hlist_node *node, *node_tmp;
929 struct hlist_head *head;
930 struct hashtable_t *hash;
931 spinlock_t *list_lock; /* protects write access to the hash lists */
932 int i;
934 hash = bat_priv->backbone_hash;
935 if (!hash)
936 return;
938 for (i = 0; i < hash->size; i++) {
939 head = &hash->table[i];
940 list_lock = &hash->list_locks[i];
942 spin_lock_bh(list_lock);
943 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
944 head, hash_entry) {
945 if (now)
946 goto purge_now;
947 if (!has_timed_out(backbone_gw->lasttime,
948 BLA_BACKBONE_TIMEOUT))
949 continue;
951 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
952 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
953 backbone_gw->orig);
955 purge_now:
956 /* don't wait for the pending request anymore */
957 if (atomic_read(&backbone_gw->request_sent))
958 atomic_dec(&bat_priv->bla_num_requests);
960 bla_del_backbone_claims(backbone_gw);
962 hlist_del_rcu(node);
963 backbone_gw_free_ref(backbone_gw);
965 spin_unlock_bh(list_lock);
970 * @bat_priv: the bat priv with all the soft interface information
971 * @primary_if: the selected primary interface, may be NULL if now is set
972 * @now: whether the whole hash shall be wiped now
974 * Check when we heard last time from our own claims, and remove them in case of
975 * a time out, or clean all claims if now is set
977 static void bla_purge_claims(struct bat_priv *bat_priv,
978 struct hard_iface *primary_if, int now)
980 struct claim *claim;
981 struct hlist_node *node;
982 struct hlist_head *head;
983 struct hashtable_t *hash;
984 int i;
986 hash = bat_priv->claim_hash;
987 if (!hash)
988 return;
990 for (i = 0; i < hash->size; i++) {
991 head = &hash->table[i];
993 rcu_read_lock();
994 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
995 if (now)
996 goto purge_now;
997 if (!compare_eth(claim->backbone_gw->orig,
998 primary_if->net_dev->dev_addr))
999 continue;
1000 if (!has_timed_out(claim->lasttime,
1001 BLA_CLAIM_TIMEOUT))
1002 continue;
1004 bat_dbg(DBG_BLA, bat_priv,
1005 "bla_purge_claims(): %pM, vid %d, time out\n",
1006 claim->addr, claim->vid);
1008 purge_now:
1009 handle_unclaim(bat_priv, primary_if,
1010 claim->backbone_gw->orig,
1011 claim->addr, claim->vid);
1013 rcu_read_unlock();
1018 * @bat_priv: the bat priv with all the soft interface information
1019 * @primary_if: the new selected primary_if
1020 * @oldif: the old primary interface, may be NULL
1022 * Update the backbone gateways when the own orig address changes.
1025 void bla_update_orig_address(struct bat_priv *bat_priv,
1026 struct hard_iface *primary_if,
1027 struct hard_iface *oldif)
1029 struct backbone_gw *backbone_gw;
1030 struct hlist_node *node;
1031 struct hlist_head *head;
1032 struct hashtable_t *hash;
1033 int i;
1035 /* reset bridge loop avoidance group id */
1036 bat_priv->claim_dest.group =
1037 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1039 if (!oldif) {
1040 bla_purge_claims(bat_priv, NULL, 1);
1041 bla_purge_backbone_gw(bat_priv, 1);
1042 return;
1045 hash = bat_priv->backbone_hash;
1046 if (!hash)
1047 return;
1049 for (i = 0; i < hash->size; i++) {
1050 head = &hash->table[i];
1052 rcu_read_lock();
1053 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1054 /* own orig still holds the old value. */
1055 if (!compare_eth(backbone_gw->orig,
1056 oldif->net_dev->dev_addr))
1057 continue;
1059 memcpy(backbone_gw->orig,
1060 primary_if->net_dev->dev_addr, ETH_ALEN);
1061 /* send an announce frame so others will ask for our
1062 * claims and update their tables.
1064 bla_send_announce(bat_priv, backbone_gw);
1066 rcu_read_unlock();
1072 /* (re)start the timer */
1073 static void bla_start_timer(struct bat_priv *bat_priv)
1075 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
1076 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
1077 msecs_to_jiffies(BLA_PERIOD_LENGTH));
1080 /* periodic work to do:
1081 * * purge structures when they are too old
1082 * * send announcements
1084 static void bla_periodic_work(struct work_struct *work)
1086 struct delayed_work *delayed_work =
1087 container_of(work, struct delayed_work, work);
1088 struct bat_priv *bat_priv =
1089 container_of(delayed_work, struct bat_priv, bla_work);
1090 struct hlist_node *node;
1091 struct hlist_head *head;
1092 struct backbone_gw *backbone_gw;
1093 struct hashtable_t *hash;
1094 struct hard_iface *primary_if;
1095 int i;
1097 primary_if = primary_if_get_selected(bat_priv);
1098 if (!primary_if)
1099 goto out;
1101 bla_purge_claims(bat_priv, primary_if, 0);
1102 bla_purge_backbone_gw(bat_priv, 0);
1104 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1105 goto out;
1107 hash = bat_priv->backbone_hash;
1108 if (!hash)
1109 goto out;
1111 for (i = 0; i < hash->size; i++) {
1112 head = &hash->table[i];
1114 rcu_read_lock();
1115 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1116 if (!compare_eth(backbone_gw->orig,
1117 primary_if->net_dev->dev_addr))
1118 continue;
1120 backbone_gw->lasttime = jiffies;
1122 bla_send_announce(bat_priv, backbone_gw);
1124 rcu_read_unlock();
1126 out:
1127 if (primary_if)
1128 hardif_free_ref(primary_if);
1130 bla_start_timer(bat_priv);
1133 /* initialize all bla structures */
1134 int bla_init(struct bat_priv *bat_priv)
1136 int i;
1137 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1138 struct hard_iface *primary_if;
1140 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
1142 /* setting claim destination address */
1143 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
1144 bat_priv->claim_dest.type = 0;
1145 primary_if = primary_if_get_selected(bat_priv);
1146 if (primary_if) {
1147 bat_priv->claim_dest.group =
1148 htons(crc16(0, primary_if->net_dev->dev_addr,
1149 ETH_ALEN));
1150 hardif_free_ref(primary_if);
1151 } else {
1152 bat_priv->claim_dest.group = 0; /* will be set later */
1155 /* initialize the duplicate list */
1156 for (i = 0; i < DUPLIST_SIZE; i++)
1157 bat_priv->bcast_duplist[i].entrytime =
1158 jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
1159 bat_priv->bcast_duplist_curr = 0;
1161 if (bat_priv->claim_hash)
1162 return 1;
1164 bat_priv->claim_hash = hash_new(128);
1165 bat_priv->backbone_hash = hash_new(32);
1167 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1168 return -1;
1170 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
1172 bla_start_timer(bat_priv);
1173 return 1;
1177 * @bat_priv: the bat priv with all the soft interface information
1178 * @bcast_packet: originator mac address
1179 * @hdr_size: maximum length of the frame
1181 * check if it is on our broadcast list. Another gateway might
1182 * have sent the same packet because it is connected to the same backbone,
1183 * so we have to remove this duplicate.
1185 * This is performed by checking the CRC, which will tell us
1186 * with a good chance that it is the same packet. If it is furthermore
1187 * sent by another host, drop it. We allow equal packets from
1188 * the same host however as this might be intended.
1192 int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1193 struct bcast_packet *bcast_packet,
1194 int hdr_size)
1196 int i, length, curr;
1197 uint8_t *content;
1198 uint16_t crc;
1199 struct bcast_duplist_entry *entry;
1201 length = hdr_size - sizeof(*bcast_packet);
1202 content = (uint8_t *)bcast_packet;
1203 content += sizeof(*bcast_packet);
1205 /* calculate the crc ... */
1206 crc = crc16(0, content, length);
1208 for (i = 0 ; i < DUPLIST_SIZE; i++) {
1209 curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
1210 entry = &bat_priv->bcast_duplist[curr];
1212 /* we can stop searching if the entry is too old ;
1213 * later entries will be even older
1215 if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
1216 break;
1218 if (entry->crc != crc)
1219 continue;
1221 if (compare_eth(entry->orig, bcast_packet->orig))
1222 continue;
1224 /* this entry seems to match: same crc, not too old,
1225 * and from another gw. therefore return 1 to forbid it.
1227 return 1;
1229 /* not found, add a new entry (overwrite the oldest entry) */
1230 curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
1231 entry = &bat_priv->bcast_duplist[curr];
1232 entry->crc = crc;
1233 entry->entrytime = jiffies;
1234 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1235 bat_priv->bcast_duplist_curr = curr;
1237 /* allow it, its the first occurence. */
1238 return 0;
1244 * @bat_priv: the bat priv with all the soft interface information
1245 * @orig: originator mac address
1247 * check if the originator is a gateway for any VLAN ID.
1249 * returns 1 if it is found, 0 otherwise
1253 int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1255 struct hashtable_t *hash = bat_priv->backbone_hash;
1256 struct hlist_head *head;
1257 struct hlist_node *node;
1258 struct backbone_gw *backbone_gw;
1259 int i;
1261 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1262 return 0;
1264 if (!hash)
1265 return 0;
1267 for (i = 0; i < hash->size; i++) {
1268 head = &hash->table[i];
1270 rcu_read_lock();
1271 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1272 if (compare_eth(backbone_gw->orig, orig)) {
1273 rcu_read_unlock();
1274 return 1;
1277 rcu_read_unlock();
1280 return 0;
1285 * @skb: the frame to be checked
1286 * @orig_node: the orig_node of the frame
1287 * @hdr_size: maximum length of the frame
1289 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1290 * if the orig_node is also a gateway on the soft interface, otherwise it
1291 * returns 0.
1294 int bla_is_backbone_gw(struct sk_buff *skb,
1295 struct orig_node *orig_node, int hdr_size)
1297 struct ethhdr *ethhdr;
1298 struct vlan_ethhdr *vhdr;
1299 struct backbone_gw *backbone_gw;
1300 short vid = -1;
1302 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1303 return 0;
1305 /* first, find out the vid. */
1306 if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr)))
1307 return 0;
1309 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1311 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1312 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1313 return 0;
1315 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
1316 hdr_size);
1317 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1320 /* see if this originator is a backbone gw for this VLAN */
1322 backbone_gw = backbone_hash_find(orig_node->bat_priv,
1323 orig_node->orig, vid);
1324 if (!backbone_gw)
1325 return 0;
1327 backbone_gw_free_ref(backbone_gw);
1328 return 1;
1331 /* free all bla structures (for softinterface free or module unload) */
1332 void bla_free(struct bat_priv *bat_priv)
1334 struct hard_iface *primary_if;
1336 cancel_delayed_work_sync(&bat_priv->bla_work);
1337 primary_if = primary_if_get_selected(bat_priv);
1339 if (bat_priv->claim_hash) {
1340 bla_purge_claims(bat_priv, primary_if, 1);
1341 hash_destroy(bat_priv->claim_hash);
1342 bat_priv->claim_hash = NULL;
1344 if (bat_priv->backbone_hash) {
1345 bla_purge_backbone_gw(bat_priv, 1);
1346 hash_destroy(bat_priv->backbone_hash);
1347 bat_priv->backbone_hash = NULL;
1349 if (primary_if)
1350 hardif_free_ref(primary_if);
1354 * @bat_priv: the bat priv with all the soft interface information
1355 * @skb: the frame to be checked
1356 * @vid: the VLAN ID of the frame
1358 * bla_rx avoidance checks if:
1359 * * we have to race for a claim
1360 * * if the frame is allowed on the LAN
1362 * in these cases, the skb is further handled by this function and
1363 * returns 1, otherwise it returns 0 and the caller shall further
1364 * process the skb.
1367 int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1369 struct ethhdr *ethhdr;
1370 struct claim search_claim, *claim = NULL;
1371 struct hard_iface *primary_if;
1372 int ret;
1374 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1376 primary_if = primary_if_get_selected(bat_priv);
1377 if (!primary_if)
1378 goto handled;
1380 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1381 goto allow;
1384 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1385 /* don't allow broadcasts while requests are in flight */
1386 if (is_multicast_ether_addr(ethhdr->h_dest))
1387 goto handled;
1389 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1390 search_claim.vid = vid;
1391 claim = claim_hash_find(bat_priv, &search_claim);
1393 if (!claim) {
1394 /* possible optimization: race for a claim */
1395 /* No claim exists yet, claim it for us!
1397 handle_claim(bat_priv, primary_if,
1398 primary_if->net_dev->dev_addr,
1399 ethhdr->h_source, vid);
1400 goto allow;
1403 /* if it is our own claim ... */
1404 if (compare_eth(claim->backbone_gw->orig,
1405 primary_if->net_dev->dev_addr)) {
1406 /* ... allow it in any case */
1407 claim->lasttime = jiffies;
1408 goto allow;
1411 /* if it is a broadcast ... */
1412 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1413 /* ... drop it. the responsible gateway is in charge. */
1414 goto handled;
1415 } else {
1416 /* seems the client considers us as its best gateway.
1417 * send a claim and update the claim table
1418 * immediately.
1420 handle_claim(bat_priv, primary_if,
1421 primary_if->net_dev->dev_addr,
1422 ethhdr->h_source, vid);
1423 goto allow;
1425 allow:
1426 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1427 ret = 0;
1428 goto out;
1430 handled:
1431 kfree_skb(skb);
1432 ret = 1;
1434 out:
1435 if (primary_if)
1436 hardif_free_ref(primary_if);
1437 if (claim)
1438 claim_free_ref(claim);
1439 return ret;
1443 * @bat_priv: the bat priv with all the soft interface information
1444 * @skb: the frame to be checked
1445 * @vid: the VLAN ID of the frame
1447 * bla_tx checks if:
1448 * * a claim was received which has to be processed
1449 * * the frame is allowed on the mesh
1451 * in these cases, the skb is further handled by this function and
1452 * returns 1, otherwise it returns 0 and the caller shall further
1453 * process the skb.
1456 int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1458 struct ethhdr *ethhdr;
1459 struct claim search_claim, *claim = NULL;
1460 struct hard_iface *primary_if;
1461 int ret = 0;
1463 primary_if = primary_if_get_selected(bat_priv);
1464 if (!primary_if)
1465 goto out;
1467 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1468 goto allow;
1470 /* in VLAN case, the mac header might not be set. */
1471 skb_reset_mac_header(skb);
1473 if (bla_process_claim(bat_priv, primary_if, skb))
1474 goto handled;
1476 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1478 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1479 /* don't allow broadcasts while requests are in flight */
1480 if (is_multicast_ether_addr(ethhdr->h_dest))
1481 goto handled;
1483 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1484 search_claim.vid = vid;
1486 claim = claim_hash_find(bat_priv, &search_claim);
1488 /* if no claim exists, allow it. */
1489 if (!claim)
1490 goto allow;
1492 /* check if we are responsible. */
1493 if (compare_eth(claim->backbone_gw->orig,
1494 primary_if->net_dev->dev_addr)) {
1495 /* if yes, the client has roamed and we have
1496 * to unclaim it.
1498 handle_unclaim(bat_priv, primary_if,
1499 primary_if->net_dev->dev_addr,
1500 ethhdr->h_source, vid);
1501 goto allow;
1504 /* check if it is a multicast/broadcast frame */
1505 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1506 /* drop it. the responsible gateway has forwarded it into
1507 * the backbone network.
1509 goto handled;
1510 } else {
1511 /* we must allow it. at least if we are
1512 * responsible for the DESTINATION.
1514 goto allow;
1516 allow:
1517 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1518 ret = 0;
1519 goto out;
1520 handled:
1521 ret = 1;
1522 out:
1523 if (primary_if)
1524 hardif_free_ref(primary_if);
1525 if (claim)
1526 claim_free_ref(claim);
1527 return ret;
1530 int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1532 struct net_device *net_dev = (struct net_device *)seq->private;
1533 struct bat_priv *bat_priv = netdev_priv(net_dev);
1534 struct hashtable_t *hash = bat_priv->claim_hash;
1535 struct claim *claim;
1536 struct hard_iface *primary_if;
1537 struct hlist_node *node;
1538 struct hlist_head *head;
1539 uint32_t i;
1540 bool is_own;
1541 int ret = 0;
1543 primary_if = primary_if_get_selected(bat_priv);
1544 if (!primary_if) {
1545 ret = seq_printf(seq,
1546 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1547 net_dev->name);
1548 goto out;
1551 if (primary_if->if_status != IF_ACTIVE) {
1552 ret = seq_printf(seq,
1553 "BATMAN mesh %s disabled - primary interface not active\n",
1554 net_dev->name);
1555 goto out;
1558 seq_printf(seq,
1559 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1560 net_dev->name, primary_if->net_dev->dev_addr,
1561 ntohs(bat_priv->claim_dest.group));
1562 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1563 "Client", "VID", "Originator", "CRC");
1564 for (i = 0; i < hash->size; i++) {
1565 head = &hash->table[i];
1567 rcu_read_lock();
1568 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1569 is_own = compare_eth(claim->backbone_gw->orig,
1570 primary_if->net_dev->dev_addr);
1571 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1572 claim->addr, claim->vid,
1573 claim->backbone_gw->orig,
1574 (is_own ? 'x' : ' '),
1575 claim->backbone_gw->crc);
1577 rcu_read_unlock();
1579 out:
1580 if (primary_if)
1581 hardif_free_ref(primary_if);
1582 return ret;