batman-adv: add broadcast duplicate check
[linux-2.6.git] / net / batman-adv / bridge_loop_avoidance.c
blob4f6b44a5b128f1bd2a74f196737979dd72358be7
1 /*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
4 * Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "hash.h"
24 #include "hard-interface.h"
25 #include "originator.h"
26 #include "bridge_loop_avoidance.h"
27 #include "translation-table.h"
28 #include "send.h"
30 #include <linux/etherdevice.h>
31 #include <linux/crc16.h>
32 #include <linux/if_arp.h>
33 #include <net/arp.h>
34 #include <linux/if_vlan.h>
36 static const uint8_t claim_dest[6] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
37 static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
39 static void bla_periodic_work(struct work_struct *work);
40 static void bla_send_announce(struct bat_priv *bat_priv,
41 struct backbone_gw *backbone_gw);
43 /* return the index of the claim */
44 static inline uint32_t choose_claim(const void *data, uint32_t size)
46 const unsigned char *key = data;
47 uint32_t hash = 0;
48 size_t i;
50 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
51 hash += key[i];
52 hash += (hash << 10);
53 hash ^= (hash >> 6);
56 hash += (hash << 3);
57 hash ^= (hash >> 11);
58 hash += (hash << 15);
60 return hash % size;
63 /* return the index of the backbone gateway */
64 static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
66 const unsigned char *key = data;
67 uint32_t hash = 0;
68 size_t i;
70 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
71 hash += key[i];
72 hash += (hash << 10);
73 hash ^= (hash >> 6);
76 hash += (hash << 3);
77 hash ^= (hash >> 11);
78 hash += (hash << 15);
80 return hash % size;
84 /* compares address and vid of two backbone gws */
85 static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
87 const void *data1 = container_of(node, struct backbone_gw,
88 hash_entry);
90 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
93 /* compares address and vid of two claims */
94 static int compare_claim(const struct hlist_node *node, const void *data2)
96 const void *data1 = container_of(node, struct claim,
97 hash_entry);
99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
102 /* free a backbone gw */
103 static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
105 if (atomic_dec_and_test(&backbone_gw->refcount))
106 kfree_rcu(backbone_gw, rcu);
109 /* finally deinitialize the claim */
110 static void claim_free_rcu(struct rcu_head *rcu)
112 struct claim *claim;
114 claim = container_of(rcu, struct claim, rcu);
116 backbone_gw_free_ref(claim->backbone_gw);
117 kfree(claim);
120 /* free a claim, call claim_free_rcu if its the last reference */
121 static void claim_free_ref(struct claim *claim)
123 if (atomic_dec_and_test(&claim->refcount))
124 call_rcu(&claim->rcu, claim_free_rcu);
128 * @bat_priv: the bat priv with all the soft interface information
129 * @data: search data (may be local/static data)
131 * looks for a claim in the hash, and returns it if found
132 * or NULL otherwise.
134 static struct claim *claim_hash_find(struct bat_priv *bat_priv,
135 struct claim *data)
137 struct hashtable_t *hash = bat_priv->claim_hash;
138 struct hlist_head *head;
139 struct hlist_node *node;
140 struct claim *claim;
141 struct claim *claim_tmp = NULL;
142 int index;
144 if (!hash)
145 return NULL;
147 index = choose_claim(data, hash->size);
148 head = &hash->table[index];
150 rcu_read_lock();
151 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
152 if (!compare_claim(&claim->hash_entry, data))
153 continue;
155 if (!atomic_inc_not_zero(&claim->refcount))
156 continue;
158 claim_tmp = claim;
159 break;
161 rcu_read_unlock();
163 return claim_tmp;
167 * @bat_priv: the bat priv with all the soft interface information
168 * @addr: the address of the originator
169 * @vid: the VLAN ID
171 * looks for a claim in the hash, and returns it if found
172 * or NULL otherwise.
174 static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
175 uint8_t *addr, short vid)
177 struct hashtable_t *hash = bat_priv->backbone_hash;
178 struct hlist_head *head;
179 struct hlist_node *node;
180 struct backbone_gw search_entry, *backbone_gw;
181 struct backbone_gw *backbone_gw_tmp = NULL;
182 int index;
184 if (!hash)
185 return NULL;
187 memcpy(search_entry.orig, addr, ETH_ALEN);
188 search_entry.vid = vid;
190 index = choose_backbone_gw(&search_entry, hash->size);
191 head = &hash->table[index];
193 rcu_read_lock();
194 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
195 if (!compare_backbone_gw(&backbone_gw->hash_entry,
196 &search_entry))
197 continue;
199 if (!atomic_inc_not_zero(&backbone_gw->refcount))
200 continue;
202 backbone_gw_tmp = backbone_gw;
203 break;
205 rcu_read_unlock();
207 return backbone_gw_tmp;
210 /* delete all claims for a backbone */
211 static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
213 struct hashtable_t *hash;
214 struct hlist_node *node, *node_tmp;
215 struct hlist_head *head;
216 struct claim *claim;
217 int i;
218 spinlock_t *list_lock; /* protects write access to the hash lists */
220 hash = backbone_gw->bat_priv->claim_hash;
221 if (!hash)
222 return;
224 for (i = 0; i < hash->size; i++) {
225 head = &hash->table[i];
226 list_lock = &hash->list_locks[i];
228 spin_lock_bh(list_lock);
229 hlist_for_each_entry_safe(claim, node, node_tmp,
230 head, hash_entry) {
232 if (claim->backbone_gw != backbone_gw)
233 continue;
235 claim_free_ref(claim);
236 hlist_del_rcu(node);
238 spin_unlock_bh(list_lock);
241 /* all claims gone, intialize CRC */
242 backbone_gw->crc = BLA_CRC_INIT;
246 * @bat_priv: the bat priv with all the soft interface information
247 * @orig: the mac address to be announced within the claim
248 * @vid: the VLAN ID
249 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
251 * sends a claim frame according to the provided info.
253 static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
254 short vid, int claimtype)
256 struct sk_buff *skb;
257 struct ethhdr *ethhdr;
258 struct hard_iface *primary_if;
259 struct net_device *soft_iface;
260 uint8_t *hw_src;
261 struct bla_claim_dst local_claim_dest;
262 uint32_t zeroip = 0;
264 primary_if = primary_if_get_selected(bat_priv);
265 if (!primary_if)
266 return;
268 memcpy(&local_claim_dest, claim_dest, sizeof(local_claim_dest));
269 local_claim_dest.type = claimtype;
271 soft_iface = primary_if->soft_iface;
273 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
274 /* IP DST: 0.0.0.0 */
275 zeroip,
276 primary_if->soft_iface,
277 /* IP SRC: 0.0.0.0 */
278 zeroip,
279 /* Ethernet DST: Broadcast */
280 NULL,
281 /* Ethernet SRC/HW SRC: originator mac */
282 primary_if->net_dev->dev_addr,
283 /* HW DST: FF:43:05:XX:00:00
284 * with XX = claim type
286 (uint8_t *)&local_claim_dest);
288 if (!skb)
289 goto out;
291 ethhdr = (struct ethhdr *)skb->data;
292 hw_src = (uint8_t *)ethhdr +
293 sizeof(struct ethhdr) +
294 sizeof(struct arphdr);
296 /* now we pretend that the client would have sent this ... */
297 switch (claimtype) {
298 case CLAIM_TYPE_ADD:
299 /* normal claim frame
300 * set Ethernet SRC to the clients mac
302 memcpy(ethhdr->h_source, mac, ETH_ALEN);
303 bat_dbg(DBG_BLA, bat_priv,
304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
305 break;
306 case CLAIM_TYPE_DEL:
307 /* unclaim frame
308 * set HW SRC to the clients mac
310 memcpy(hw_src, mac, ETH_ALEN);
311 bat_dbg(DBG_BLA, bat_priv,
312 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
313 break;
314 case CLAIM_TYPE_ANNOUNCE:
315 /* announcement frame
316 * set HW SRC to the special mac containg the crc
318 memcpy(hw_src, mac, ETH_ALEN);
319 bat_dbg(DBG_BLA, bat_priv,
320 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
321 ethhdr->h_source, vid);
322 break;
323 case CLAIM_TYPE_REQUEST:
324 /* request frame
325 * set HW SRC to the special mac containg the crc
327 memcpy(hw_src, mac, ETH_ALEN);
328 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
329 bat_dbg(DBG_BLA, bat_priv,
330 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
331 ethhdr->h_source, ethhdr->h_dest, vid);
332 break;
336 if (vid != -1)
337 skb = vlan_insert_tag(skb, vid);
339 skb_reset_mac_header(skb);
340 skb->protocol = eth_type_trans(skb, soft_iface);
341 bat_priv->stats.rx_packets++;
342 bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
343 soft_iface->last_rx = jiffies;
345 netif_rx(skb);
346 out:
347 if (primary_if)
348 hardif_free_ref(primary_if);
352 * @bat_priv: the bat priv with all the soft interface information
353 * @orig: the mac address of the originator
354 * @vid: the VLAN ID
356 * searches for the backbone gw or creates a new one if it could not
357 * be found.
359 static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
360 uint8_t *orig, short vid)
362 struct backbone_gw *entry;
363 struct orig_node *orig_node;
364 int hash_added;
366 entry = backbone_hash_find(bat_priv, orig, vid);
368 if (entry)
369 return entry;
371 bat_dbg(DBG_BLA, bat_priv,
372 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
373 orig, vid);
375 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
376 if (!entry)
377 return NULL;
379 entry->vid = vid;
380 entry->lasttime = jiffies;
381 entry->crc = BLA_CRC_INIT;
382 entry->bat_priv = bat_priv;
383 atomic_set(&entry->request_sent, 0);
384 memcpy(entry->orig, orig, ETH_ALEN);
386 /* one for the hash, one for returning */
387 atomic_set(&entry->refcount, 2);
389 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
390 choose_backbone_gw, entry, &entry->hash_entry);
392 if (unlikely(hash_added != 0)) {
393 /* hash failed, free the structure */
394 kfree(entry);
395 return NULL;
398 /* this is a gateway now, remove any tt entries */
399 orig_node = orig_hash_find(bat_priv, orig);
400 if (orig_node) {
401 tt_global_del_orig(bat_priv, orig_node,
402 "became a backbone gateway");
403 orig_node_free_ref(orig_node);
405 return entry;
408 /* update or add the own backbone gw to make sure we announce
409 * where we receive other backbone gws
411 static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
412 struct hard_iface *primary_if,
413 short vid)
415 struct backbone_gw *backbone_gw;
417 backbone_gw = bla_get_backbone_gw(bat_priv,
418 primary_if->net_dev->dev_addr, vid);
419 if (unlikely(!backbone_gw))
420 return;
422 backbone_gw->lasttime = jiffies;
423 backbone_gw_free_ref(backbone_gw);
427 * @bat_priv: the bat priv with all the soft interface information
428 * @vid: the vid where the request came on
430 * Repeat all of our own claims, and finally send an ANNOUNCE frame
431 * to allow the requester another check if the CRC is correct now.
433 static void bla_answer_request(struct bat_priv *bat_priv,
434 struct hard_iface *primary_if, short vid)
436 struct hlist_node *node;
437 struct hlist_head *head;
438 struct hashtable_t *hash;
439 struct claim *claim;
440 struct backbone_gw *backbone_gw;
441 int i;
443 bat_dbg(DBG_BLA, bat_priv,
444 "bla_answer_request(): received a claim request, send all of our own claims again\n");
446 backbone_gw = backbone_hash_find(bat_priv,
447 primary_if->net_dev->dev_addr, vid);
448 if (!backbone_gw)
449 return;
451 hash = bat_priv->claim_hash;
452 for (i = 0; i < hash->size; i++) {
453 head = &hash->table[i];
455 rcu_read_lock();
456 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
457 /* only own claims are interesting */
458 if (claim->backbone_gw != backbone_gw)
459 continue;
461 bla_send_claim(bat_priv, claim->addr, claim->vid,
462 CLAIM_TYPE_ADD);
464 rcu_read_unlock();
467 /* finally, send an announcement frame */
468 bla_send_announce(bat_priv, backbone_gw);
469 backbone_gw_free_ref(backbone_gw);
473 * @backbone_gw: the backbone gateway from whom we are out of sync
475 * When the crc is wrong, ask the backbone gateway for a full table update.
476 * After the request, it will repeat all of his own claims and finally
477 * send an announcement claim with which we can check again.
479 static void bla_send_request(struct backbone_gw *backbone_gw)
481 /* first, remove all old entries */
482 bla_del_backbone_claims(backbone_gw);
484 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
485 "Sending REQUEST to %pM\n",
486 backbone_gw->orig);
488 /* send request */
489 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
490 backbone_gw->vid, CLAIM_TYPE_REQUEST);
492 /* no local broadcasts should be sent or received, for now. */
493 if (!atomic_read(&backbone_gw->request_sent)) {
494 atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
495 atomic_set(&backbone_gw->request_sent, 1);
500 * @bat_priv: the bat priv with all the soft interface information
501 * @backbone_gw: our backbone gateway which should be announced
503 * This function sends an announcement. It is called from multiple
504 * places.
506 static void bla_send_announce(struct bat_priv *bat_priv,
507 struct backbone_gw *backbone_gw)
509 uint8_t mac[ETH_ALEN];
510 uint16_t crc;
512 memcpy(mac, announce_mac, 4);
513 crc = htons(backbone_gw->crc);
514 memcpy(&mac[4], (uint8_t *)&crc, 2);
516 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
521 * @bat_priv: the bat priv with all the soft interface information
522 * @mac: the mac address of the claim
523 * @vid: the VLAN ID of the frame
524 * @backbone_gw: the backbone gateway which claims it
526 * Adds a claim in the claim hash.
528 static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
529 const short vid, struct backbone_gw *backbone_gw)
531 struct claim *claim;
532 struct claim search_claim;
533 int hash_added;
535 memcpy(search_claim.addr, mac, ETH_ALEN);
536 search_claim.vid = vid;
537 claim = claim_hash_find(bat_priv, &search_claim);
539 /* create a new claim entry if it does not exist yet. */
540 if (!claim) {
541 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
542 if (!claim)
543 return;
545 memcpy(claim->addr, mac, ETH_ALEN);
546 claim->vid = vid;
547 claim->lasttime = jiffies;
548 claim->backbone_gw = backbone_gw;
550 atomic_set(&claim->refcount, 2);
551 bat_dbg(DBG_BLA, bat_priv,
552 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
553 mac, vid);
554 hash_added = hash_add(bat_priv->claim_hash, compare_claim,
555 choose_claim, claim, &claim->hash_entry);
557 if (unlikely(hash_added != 0)) {
558 /* only local changes happened. */
559 kfree(claim);
560 return;
562 } else {
563 claim->lasttime = jiffies;
564 if (claim->backbone_gw == backbone_gw)
565 /* no need to register a new backbone */
566 goto claim_free_ref;
568 bat_dbg(DBG_BLA, bat_priv,
569 "bla_add_claim(): changing ownership for %pM, vid %d\n",
570 mac, vid);
572 claim->backbone_gw->crc ^=
573 crc16(0, claim->addr, ETH_ALEN);
574 backbone_gw_free_ref(claim->backbone_gw);
577 /* set (new) backbone gw */
578 atomic_inc(&backbone_gw->refcount);
579 claim->backbone_gw = backbone_gw;
581 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
582 backbone_gw->lasttime = jiffies;
584 claim_free_ref:
585 claim_free_ref(claim);
588 /* Delete a claim from the claim hash which has the
589 * given mac address and vid.
591 static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
592 const short vid)
594 struct claim search_claim, *claim;
596 memcpy(search_claim.addr, mac, ETH_ALEN);
597 search_claim.vid = vid;
598 claim = claim_hash_find(bat_priv, &search_claim);
599 if (!claim)
600 return;
602 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
604 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
605 claim_free_ref(claim); /* reference from the hash is gone */
607 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
609 /* don't need the reference from hash_find() anymore */
610 claim_free_ref(claim);
613 /* check for ANNOUNCE frame, return 1 if handled */
614 static int handle_announce(struct bat_priv *bat_priv,
615 uint8_t *an_addr, uint8_t *backbone_addr, short vid)
617 struct backbone_gw *backbone_gw;
618 uint16_t crc;
620 if (memcmp(an_addr, announce_mac, 4) != 0)
621 return 0;
623 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
625 if (unlikely(!backbone_gw))
626 return 1;
629 /* handle as ANNOUNCE frame */
630 backbone_gw->lasttime = jiffies;
631 crc = ntohs(*((uint16_t *)(&an_addr[4])));
633 bat_dbg(DBG_BLA, bat_priv,
634 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
635 vid, backbone_gw->orig, crc);
637 if (backbone_gw->crc != crc) {
638 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
639 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
640 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
641 crc);
643 bla_send_request(backbone_gw);
644 } else {
645 /* if we have sent a request and the crc was OK,
646 * we can allow traffic again.
648 if (atomic_read(&backbone_gw->request_sent)) {
649 atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
650 atomic_set(&backbone_gw->request_sent, 0);
654 backbone_gw_free_ref(backbone_gw);
655 return 1;
658 /* check for REQUEST frame, return 1 if handled */
659 static int handle_request(struct bat_priv *bat_priv,
660 struct hard_iface *primary_if,
661 uint8_t *backbone_addr,
662 struct ethhdr *ethhdr, short vid)
664 /* check for REQUEST frame */
665 if (!compare_eth(backbone_addr, ethhdr->h_dest))
666 return 0;
668 /* sanity check, this should not happen on a normal switch,
669 * we ignore it in this case.
671 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
672 return 1;
674 bat_dbg(DBG_BLA, bat_priv,
675 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
676 vid, ethhdr->h_source);
678 bla_answer_request(bat_priv, primary_if, vid);
679 return 1;
682 /* check for UNCLAIM frame, return 1 if handled */
683 static int handle_unclaim(struct bat_priv *bat_priv,
684 struct hard_iface *primary_if,
685 uint8_t *backbone_addr,
686 uint8_t *claim_addr, short vid)
688 struct backbone_gw *backbone_gw;
690 /* unclaim in any case if it is our own */
691 if (primary_if && compare_eth(backbone_addr,
692 primary_if->net_dev->dev_addr))
693 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
695 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
697 if (!backbone_gw)
698 return 1;
700 /* this must be an UNCLAIM frame */
701 bat_dbg(DBG_BLA, bat_priv,
702 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
703 claim_addr, vid, backbone_gw->orig);
705 bla_del_claim(bat_priv, claim_addr, vid);
706 backbone_gw_free_ref(backbone_gw);
707 return 1;
710 /* check for CLAIM frame, return 1 if handled */
711 static int handle_claim(struct bat_priv *bat_priv,
712 struct hard_iface *primary_if, uint8_t *backbone_addr,
713 uint8_t *claim_addr, short vid)
715 struct backbone_gw *backbone_gw;
717 /* register the gateway if not yet available, and add the claim. */
719 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
721 if (unlikely(!backbone_gw))
722 return 1;
724 /* this must be a CLAIM frame */
725 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
726 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
727 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
729 /* TODO: we could call something like tt_local_del() here. */
731 backbone_gw_free_ref(backbone_gw);
732 return 1;
736 * @bat_priv: the bat priv with all the soft interface information
737 * @skb: the frame to be checked
739 * Check if this is a claim frame, and process it accordingly.
741 * returns 1 if it was a claim frame, otherwise return 0 to
742 * tell the callee that it can use the frame on its own.
744 static int bla_process_claim(struct bat_priv *bat_priv,
745 struct hard_iface *primary_if,
746 struct sk_buff *skb)
748 struct ethhdr *ethhdr;
749 struct vlan_ethhdr *vhdr;
750 struct arphdr *arphdr;
751 uint8_t *hw_src, *hw_dst;
752 struct bla_claim_dst *bla_dst;
753 uint16_t proto;
754 int headlen;
755 short vid = -1;
757 ethhdr = (struct ethhdr *)skb_mac_header(skb);
759 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
760 vhdr = (struct vlan_ethhdr *)ethhdr;
761 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
762 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
763 headlen = sizeof(*vhdr);
764 } else {
765 proto = ntohs(ethhdr->h_proto);
766 headlen = sizeof(*ethhdr);
769 if (proto != ETH_P_ARP)
770 return 0; /* not a claim frame */
772 /* this must be a ARP frame. check if it is a claim. */
774 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
775 return 0;
777 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
778 ethhdr = (struct ethhdr *)skb_mac_header(skb);
779 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
781 /* Check whether the ARP frame carries a valid
782 * IP information
785 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
786 return 0;
787 if (arphdr->ar_pro != htons(ETH_P_IP))
788 return 0;
789 if (arphdr->ar_hln != ETH_ALEN)
790 return 0;
791 if (arphdr->ar_pln != 4)
792 return 0;
794 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
795 hw_dst = hw_src + ETH_ALEN + 4;
796 bla_dst = (struct bla_claim_dst *)hw_dst;
798 /* check if it is a claim frame. */
799 if (memcmp(hw_dst, claim_dest, 3) != 0)
800 return 0;
802 /* become a backbone gw ourselves on this vlan if not happened yet */
803 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
805 /* check for the different types of claim frames ... */
806 switch (bla_dst->type) {
807 case CLAIM_TYPE_ADD:
808 if (handle_claim(bat_priv, primary_if, hw_src,
809 ethhdr->h_source, vid))
810 return 1;
811 break;
812 case CLAIM_TYPE_DEL:
813 if (handle_unclaim(bat_priv, primary_if,
814 ethhdr->h_source, hw_src, vid))
815 return 1;
816 break;
818 case CLAIM_TYPE_ANNOUNCE:
819 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
820 return 1;
821 break;
822 case CLAIM_TYPE_REQUEST:
823 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
824 return 1;
825 break;
828 bat_dbg(DBG_BLA, bat_priv,
829 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
830 ethhdr->h_source, vid, hw_src, hw_dst);
831 return 1;
834 /* Check when we last heard from other nodes, and remove them in case of
835 * a time out, or clean all backbone gws if now is set.
837 static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
839 struct backbone_gw *backbone_gw;
840 struct hlist_node *node, *node_tmp;
841 struct hlist_head *head;
842 struct hashtable_t *hash;
843 spinlock_t *list_lock; /* protects write access to the hash lists */
844 int i;
846 hash = bat_priv->backbone_hash;
847 if (!hash)
848 return;
850 for (i = 0; i < hash->size; i++) {
851 head = &hash->table[i];
852 list_lock = &hash->list_locks[i];
854 spin_lock_bh(list_lock);
855 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
856 head, hash_entry) {
857 if (now)
858 goto purge_now;
859 if (!has_timed_out(backbone_gw->lasttime,
860 BLA_BACKBONE_TIMEOUT))
861 continue;
863 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
864 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
865 backbone_gw->orig);
867 purge_now:
868 /* don't wait for the pending request anymore */
869 if (atomic_read(&backbone_gw->request_sent))
870 atomic_dec(&bat_priv->bla_num_requests);
872 bla_del_backbone_claims(backbone_gw);
874 hlist_del_rcu(node);
875 backbone_gw_free_ref(backbone_gw);
877 spin_unlock_bh(list_lock);
882 * @bat_priv: the bat priv with all the soft interface information
883 * @primary_if: the selected primary interface, may be NULL if now is set
884 * @now: whether the whole hash shall be wiped now
886 * Check when we heard last time from our own claims, and remove them in case of
887 * a time out, or clean all claims if now is set
889 static void bla_purge_claims(struct bat_priv *bat_priv,
890 struct hard_iface *primary_if, int now)
892 struct claim *claim;
893 struct hlist_node *node;
894 struct hlist_head *head;
895 struct hashtable_t *hash;
896 int i;
898 hash = bat_priv->claim_hash;
899 if (!hash)
900 return;
902 for (i = 0; i < hash->size; i++) {
903 head = &hash->table[i];
905 rcu_read_lock();
906 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
907 if (now)
908 goto purge_now;
909 if (!compare_eth(claim->backbone_gw->orig,
910 primary_if->net_dev->dev_addr))
911 continue;
912 if (!has_timed_out(claim->lasttime,
913 BLA_CLAIM_TIMEOUT))
914 continue;
916 bat_dbg(DBG_BLA, bat_priv,
917 "bla_purge_claims(): %pM, vid %d, time out\n",
918 claim->addr, claim->vid);
920 purge_now:
921 handle_unclaim(bat_priv, primary_if,
922 claim->backbone_gw->orig,
923 claim->addr, claim->vid);
925 rcu_read_unlock();
930 * @bat_priv: the bat priv with all the soft interface information
931 * @primary_if: the new selected primary_if
932 * @oldif: the old primary interface, may be NULL
934 * Update the backbone gateways when the own orig address changes.
937 void bla_update_orig_address(struct bat_priv *bat_priv,
938 struct hard_iface *primary_if,
939 struct hard_iface *oldif)
941 struct backbone_gw *backbone_gw;
942 struct hlist_node *node;
943 struct hlist_head *head;
944 struct hashtable_t *hash;
945 int i;
947 if (!oldif) {
948 bla_purge_claims(bat_priv, NULL, 1);
949 bla_purge_backbone_gw(bat_priv, 1);
950 return;
953 hash = bat_priv->backbone_hash;
954 if (!hash)
955 return;
957 for (i = 0; i < hash->size; i++) {
958 head = &hash->table[i];
960 rcu_read_lock();
961 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
962 /* own orig still holds the old value. */
963 if (!compare_eth(backbone_gw->orig,
964 oldif->net_dev->dev_addr))
965 continue;
967 memcpy(backbone_gw->orig,
968 primary_if->net_dev->dev_addr, ETH_ALEN);
969 /* send an announce frame so others will ask for our
970 * claims and update their tables.
972 bla_send_announce(bat_priv, backbone_gw);
974 rcu_read_unlock();
980 /* (re)start the timer */
981 static void bla_start_timer(struct bat_priv *bat_priv)
983 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
984 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
985 msecs_to_jiffies(BLA_PERIOD_LENGTH));
988 /* periodic work to do:
989 * * purge structures when they are too old
990 * * send announcements
992 static void bla_periodic_work(struct work_struct *work)
994 struct delayed_work *delayed_work =
995 container_of(work, struct delayed_work, work);
996 struct bat_priv *bat_priv =
997 container_of(delayed_work, struct bat_priv, bla_work);
998 struct hlist_node *node;
999 struct hlist_head *head;
1000 struct backbone_gw *backbone_gw;
1001 struct hashtable_t *hash;
1002 struct hard_iface *primary_if;
1003 int i;
1005 primary_if = primary_if_get_selected(bat_priv);
1006 if (!primary_if)
1007 goto out;
1009 bla_purge_claims(bat_priv, primary_if, 0);
1010 bla_purge_backbone_gw(bat_priv, 0);
1012 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1013 goto out;
1015 hash = bat_priv->backbone_hash;
1016 if (!hash)
1017 goto out;
1019 for (i = 0; i < hash->size; i++) {
1020 head = &hash->table[i];
1022 rcu_read_lock();
1023 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1024 if (!compare_eth(backbone_gw->orig,
1025 primary_if->net_dev->dev_addr))
1026 continue;
1028 backbone_gw->lasttime = jiffies;
1030 bla_send_announce(bat_priv, backbone_gw);
1032 rcu_read_unlock();
1034 out:
1035 if (primary_if)
1036 hardif_free_ref(primary_if);
1038 bla_start_timer(bat_priv);
1041 /* initialize all bla structures */
1042 int bla_init(struct bat_priv *bat_priv)
1044 int i;
1046 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
1048 /* initialize the duplicate list */
1049 for (i = 0; i < DUPLIST_SIZE; i++)
1050 bat_priv->bcast_duplist[i].entrytime =
1051 jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
1052 bat_priv->bcast_duplist_curr = 0;
1054 if (bat_priv->claim_hash)
1055 return 1;
1057 bat_priv->claim_hash = hash_new(128);
1058 bat_priv->backbone_hash = hash_new(32);
1060 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1061 return -1;
1063 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
1065 bla_start_timer(bat_priv);
1066 return 1;
1070 * @bat_priv: the bat priv with all the soft interface information
1071 * @bcast_packet: originator mac address
1072 * @hdr_size: maximum length of the frame
1074 * check if it is on our broadcast list. Another gateway might
1075 * have sent the same packet because it is connected to the same backbone,
1076 * so we have to remove this duplicate.
1078 * This is performed by checking the CRC, which will tell us
1079 * with a good chance that it is the same packet. If it is furthermore
1080 * sent by another host, drop it. We allow equal packets from
1081 * the same host however as this might be intended.
1085 int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1086 struct bcast_packet *bcast_packet,
1087 int hdr_size)
1089 int i, length, curr;
1090 uint8_t *content;
1091 uint16_t crc;
1092 struct bcast_duplist_entry *entry;
1094 length = hdr_size - sizeof(*bcast_packet);
1095 content = (uint8_t *)bcast_packet;
1096 content += sizeof(*bcast_packet);
1098 /* calculate the crc ... */
1099 crc = crc16(0, content, length);
1101 for (i = 0 ; i < DUPLIST_SIZE; i++) {
1102 curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
1103 entry = &bat_priv->bcast_duplist[curr];
1105 /* we can stop searching if the entry is too old ;
1106 * later entries will be even older
1108 if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
1109 break;
1111 if (entry->crc != crc)
1112 continue;
1114 if (compare_eth(entry->orig, bcast_packet->orig))
1115 continue;
1117 /* this entry seems to match: same crc, not too old,
1118 * and from another gw. therefore return 1 to forbid it.
1120 return 1;
1122 /* not found, add a new entry (overwrite the oldest entry) */
1123 curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
1124 entry = &bat_priv->bcast_duplist[curr];
1125 entry->crc = crc;
1126 entry->entrytime = jiffies;
1127 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1128 bat_priv->bcast_duplist_curr = curr;
1130 /* allow it, its the first occurence. */
1131 return 0;
1137 * @bat_priv: the bat priv with all the soft interface information
1138 * @orig: originator mac address
1140 * check if the originator is a gateway for any VLAN ID.
1142 * returns 1 if it is found, 0 otherwise
1146 int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1148 struct hashtable_t *hash = bat_priv->backbone_hash;
1149 struct hlist_head *head;
1150 struct hlist_node *node;
1151 struct backbone_gw *backbone_gw;
1152 int i;
1154 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1155 return 0;
1157 if (!hash)
1158 return 0;
1160 for (i = 0; i < hash->size; i++) {
1161 head = &hash->table[i];
1163 rcu_read_lock();
1164 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1165 if (compare_eth(backbone_gw->orig, orig)) {
1166 rcu_read_unlock();
1167 return 1;
1170 rcu_read_unlock();
1173 return 0;
1178 * @skb: the frame to be checked
1179 * @orig_node: the orig_node of the frame
1180 * @hdr_size: maximum length of the frame
1182 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1183 * if the orig_node is also a gateway on the soft interface, otherwise it
1184 * returns 0.
1187 int bla_is_backbone_gw(struct sk_buff *skb,
1188 struct orig_node *orig_node, int hdr_size)
1190 struct ethhdr *ethhdr;
1191 struct vlan_ethhdr *vhdr;
1192 struct backbone_gw *backbone_gw;
1193 short vid = -1;
1195 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1196 return 0;
1198 /* first, find out the vid. */
1199 if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr)))
1200 return 0;
1202 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1204 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1205 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1206 return 0;
1208 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
1209 hdr_size);
1210 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1213 /* see if this originator is a backbone gw for this VLAN */
1215 backbone_gw = backbone_hash_find(orig_node->bat_priv,
1216 orig_node->orig, vid);
1217 if (!backbone_gw)
1218 return 0;
1220 backbone_gw_free_ref(backbone_gw);
1221 return 1;
1224 /* free all bla structures (for softinterface free or module unload) */
1225 void bla_free(struct bat_priv *bat_priv)
1227 struct hard_iface *primary_if;
1229 cancel_delayed_work_sync(&bat_priv->bla_work);
1230 primary_if = primary_if_get_selected(bat_priv);
1232 if (bat_priv->claim_hash) {
1233 bla_purge_claims(bat_priv, primary_if, 1);
1234 hash_destroy(bat_priv->claim_hash);
1235 bat_priv->claim_hash = NULL;
1237 if (bat_priv->backbone_hash) {
1238 bla_purge_backbone_gw(bat_priv, 1);
1239 hash_destroy(bat_priv->backbone_hash);
1240 bat_priv->backbone_hash = NULL;
1242 if (primary_if)
1243 hardif_free_ref(primary_if);
1247 * @bat_priv: the bat priv with all the soft interface information
1248 * @skb: the frame to be checked
1249 * @vid: the VLAN ID of the frame
1251 * bla_rx avoidance checks if:
1252 * * we have to race for a claim
1253 * * if the frame is allowed on the LAN
1255 * in these cases, the skb is further handled by this function and
1256 * returns 1, otherwise it returns 0 and the caller shall further
1257 * process the skb.
1260 int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1262 struct ethhdr *ethhdr;
1263 struct claim search_claim, *claim = NULL;
1264 struct hard_iface *primary_if;
1265 int ret;
1267 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1269 primary_if = primary_if_get_selected(bat_priv);
1270 if (!primary_if)
1271 goto handled;
1273 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1274 goto allow;
1277 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1278 /* don't allow broadcasts while requests are in flight */
1279 if (is_multicast_ether_addr(ethhdr->h_dest))
1280 goto handled;
1282 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1283 search_claim.vid = vid;
1284 claim = claim_hash_find(bat_priv, &search_claim);
1286 if (!claim) {
1287 /* possible optimization: race for a claim */
1288 /* No claim exists yet, claim it for us!
1290 handle_claim(bat_priv, primary_if,
1291 primary_if->net_dev->dev_addr,
1292 ethhdr->h_source, vid);
1293 goto allow;
1296 /* if it is our own claim ... */
1297 if (compare_eth(claim->backbone_gw->orig,
1298 primary_if->net_dev->dev_addr)) {
1299 /* ... allow it in any case */
1300 claim->lasttime = jiffies;
1301 goto allow;
1304 /* if it is a broadcast ... */
1305 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1306 /* ... drop it. the responsible gateway is in charge. */
1307 goto handled;
1308 } else {
1309 /* seems the client considers us as its best gateway.
1310 * send a claim and update the claim table
1311 * immediately.
1313 handle_claim(bat_priv, primary_if,
1314 primary_if->net_dev->dev_addr,
1315 ethhdr->h_source, vid);
1316 goto allow;
1318 allow:
1319 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1320 ret = 0;
1321 goto out;
1323 handled:
1324 kfree_skb(skb);
1325 ret = 1;
1327 out:
1328 if (primary_if)
1329 hardif_free_ref(primary_if);
1330 if (claim)
1331 claim_free_ref(claim);
1332 return ret;
1336 * @bat_priv: the bat priv with all the soft interface information
1337 * @skb: the frame to be checked
1338 * @vid: the VLAN ID of the frame
1340 * bla_tx checks if:
1341 * * a claim was received which has to be processed
1342 * * the frame is allowed on the mesh
1344 * in these cases, the skb is further handled by this function and
1345 * returns 1, otherwise it returns 0 and the caller shall further
1346 * process the skb.
1349 int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1351 struct ethhdr *ethhdr;
1352 struct claim search_claim, *claim = NULL;
1353 struct hard_iface *primary_if;
1354 int ret = 0;
1356 primary_if = primary_if_get_selected(bat_priv);
1357 if (!primary_if)
1358 goto out;
1360 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1361 goto allow;
1363 /* in VLAN case, the mac header might not be set. */
1364 skb_reset_mac_header(skb);
1366 if (bla_process_claim(bat_priv, primary_if, skb))
1367 goto handled;
1369 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1371 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1372 /* don't allow broadcasts while requests are in flight */
1373 if (is_multicast_ether_addr(ethhdr->h_dest))
1374 goto handled;
1376 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1377 search_claim.vid = vid;
1379 claim = claim_hash_find(bat_priv, &search_claim);
1381 /* if no claim exists, allow it. */
1382 if (!claim)
1383 goto allow;
1385 /* check if we are responsible. */
1386 if (compare_eth(claim->backbone_gw->orig,
1387 primary_if->net_dev->dev_addr)) {
1388 /* if yes, the client has roamed and we have
1389 * to unclaim it.
1391 handle_unclaim(bat_priv, primary_if,
1392 primary_if->net_dev->dev_addr,
1393 ethhdr->h_source, vid);
1394 goto allow;
1397 /* check if it is a multicast/broadcast frame */
1398 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1399 /* drop it. the responsible gateway has forwarded it into
1400 * the backbone network.
1402 goto handled;
1403 } else {
1404 /* we must allow it. at least if we are
1405 * responsible for the DESTINATION.
1407 goto allow;
1409 allow:
1410 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1411 ret = 0;
1412 goto out;
1413 handled:
1414 ret = 1;
1415 out:
1416 if (primary_if)
1417 hardif_free_ref(primary_if);
1418 if (claim)
1419 claim_free_ref(claim);
1420 return ret;
1423 int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1425 struct net_device *net_dev = (struct net_device *)seq->private;
1426 struct bat_priv *bat_priv = netdev_priv(net_dev);
1427 struct hashtable_t *hash = bat_priv->claim_hash;
1428 struct claim *claim;
1429 struct hard_iface *primary_if;
1430 struct hlist_node *node;
1431 struct hlist_head *head;
1432 uint32_t i;
1433 bool is_own;
1434 int ret = 0;
1436 primary_if = primary_if_get_selected(bat_priv);
1437 if (!primary_if) {
1438 ret = seq_printf(seq,
1439 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1440 net_dev->name);
1441 goto out;
1444 if (primary_if->if_status != IF_ACTIVE) {
1445 ret = seq_printf(seq,
1446 "BATMAN mesh %s disabled - primary interface not active\n",
1447 net_dev->name);
1448 goto out;
1451 seq_printf(seq, "Claims announced for the mesh %s (orig %pM)\n",
1452 net_dev->name, primary_if->net_dev->dev_addr);
1453 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1454 "Client", "VID", "Originator", "CRC");
1455 for (i = 0; i < hash->size; i++) {
1456 head = &hash->table[i];
1458 rcu_read_lock();
1459 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1460 is_own = compare_eth(claim->backbone_gw->orig,
1461 primary_if->net_dev->dev_addr);
1462 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1463 claim->addr, claim->vid,
1464 claim->backbone_gw->orig,
1465 (is_own ? 'x' : ' '),
1466 claim->backbone_gw->crc);
1468 rcu_read_unlock();
1470 out:
1471 if (primary_if)
1472 hardif_free_ref(primary_if);
1473 return ret;