Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
[linux-2.6/libata-dev.git] / net / batman-adv / vis.c
blob7db9ad82cc00ebe2d7fb35634dec34d258189784
1 /*
2 * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
4 * Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "send.h"
24 #include "translation-table.h"
25 #include "vis.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "hash.h"
29 #include "originator.h"
31 #define MAX_VIS_PACKET_SIZE 1000
33 /* Returns the smallest signed integer in two's complement with the sizeof x */
34 #define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
36 /* Checks if a sequence number x is a predecessor/successor of y.
37 * they handle overflows/underflows and can correctly check for a
38 * predecessor/successor unless the variable sequence number has grown by
39 * more then 2**(bitwidth(x)-1)-1.
40 * This means that for a uint8_t with the maximum value 255, it would think:
41 * - when adding nothing - it is neither a predecessor nor a successor
42 * - before adding more than 127 to the starting value - it is a predecessor,
43 * - when adding 128 - it is neither a predecessor nor a successor,
44 * - after adding more than 127 to the starting value - it is a successor */
45 #define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
46 _dummy > smallest_signed_int(_dummy); })
47 #define seq_after(x, y) seq_before(y, x)
49 static void start_vis_timer(struct bat_priv *bat_priv);
51 /* free the info */
52 static void free_info(struct kref *ref)
54 struct vis_info *info = container_of(ref, struct vis_info, refcount);
55 struct bat_priv *bat_priv = info->bat_priv;
56 struct recvlist_node *entry, *tmp;
58 list_del_init(&info->send_list);
59 spin_lock_bh(&bat_priv->vis_list_lock);
60 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
61 list_del(&entry->list);
62 kfree(entry);
65 spin_unlock_bh(&bat_priv->vis_list_lock);
66 kfree_skb(info->skb_packet);
67 kfree(info);
70 /* Compare two vis packets, used by the hashing algorithm */
71 static int vis_info_cmp(void *data1, void *data2)
73 struct vis_info *d1, *d2;
74 struct vis_packet *p1, *p2;
75 d1 = data1;
76 d2 = data2;
77 p1 = (struct vis_packet *)d1->skb_packet->data;
78 p2 = (struct vis_packet *)d2->skb_packet->data;
79 return compare_orig(p1->vis_orig, p2->vis_orig);
82 /* hash function to choose an entry in a hash table of given size */
83 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
84 static int vis_info_choose(void *data, int size)
86 struct vis_info *vis_info = data;
87 struct vis_packet *packet;
88 unsigned char *key;
89 uint32_t hash = 0;
90 size_t i;
92 packet = (struct vis_packet *)vis_info->skb_packet->data;
93 key = packet->vis_orig;
94 for (i = 0; i < ETH_ALEN; i++) {
95 hash += key[i];
96 hash += (hash << 10);
97 hash ^= (hash >> 6);
100 hash += (hash << 3);
101 hash ^= (hash >> 11);
102 hash += (hash << 15);
104 return hash % size;
107 /* insert interface to the list of interfaces of one originator, if it
108 * does not already exist in the list */
109 static void vis_data_insert_interface(const uint8_t *interface,
110 struct hlist_head *if_list,
111 bool primary)
113 struct if_list_entry *entry;
114 struct hlist_node *pos;
116 hlist_for_each_entry(entry, pos, if_list, list) {
117 if (compare_orig(entry->addr, (void *)interface))
118 return;
121 /* its a new address, add it to the list */
122 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
123 if (!entry)
124 return;
125 memcpy(entry->addr, interface, ETH_ALEN);
126 entry->primary = primary;
127 hlist_add_head(&entry->list, if_list);
130 static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
132 struct if_list_entry *entry;
133 struct hlist_node *pos;
134 size_t len = 0;
136 hlist_for_each_entry(entry, pos, if_list, list) {
137 if (entry->primary)
138 len += sprintf(buff + len, "PRIMARY, ");
139 else
140 len += sprintf(buff + len, "SEC %pM, ", entry->addr);
143 return len;
146 static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
148 struct if_list_entry *entry;
149 struct hlist_node *pos;
150 size_t count = 0;
152 hlist_for_each_entry(entry, pos, if_list, list) {
153 if (entry->primary)
154 count += 9;
155 else
156 count += 23;
159 return count;
162 /* read an entry */
163 static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
164 uint8_t *src, bool primary)
166 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
167 if (primary && entry->quality == 0)
168 return sprintf(buff, "HNA %pM, ", entry->dest);
169 else if (compare_orig(entry->src, src))
170 return sprintf(buff, "TQ %pM %d, ", entry->dest,
171 entry->quality);
173 return 0;
176 int vis_seq_print_text(struct seq_file *seq, void *offset)
178 struct hlist_node *walk;
179 struct hlist_head *head;
180 struct element_t *bucket;
181 struct vis_info *info;
182 struct vis_packet *packet;
183 struct vis_info_entry *entries;
184 struct net_device *net_dev = (struct net_device *)seq->private;
185 struct bat_priv *bat_priv = netdev_priv(net_dev);
186 struct hashtable_t *hash = bat_priv->vis_hash;
187 HLIST_HEAD(vis_if_list);
188 struct if_list_entry *entry;
189 struct hlist_node *pos, *n;
190 int i, j;
191 int vis_server = atomic_read(&bat_priv->vis_mode);
192 size_t buff_pos, buf_size;
193 char *buff;
194 int compare;
196 if ((!bat_priv->primary_if) ||
197 (vis_server == VIS_TYPE_CLIENT_UPDATE))
198 return 0;
200 buf_size = 1;
201 /* Estimate length */
202 spin_lock_bh(&bat_priv->vis_hash_lock);
203 for (i = 0; i < hash->size; i++) {
204 head = &hash->table[i];
206 hlist_for_each_entry(bucket, walk, head, hlist) {
207 info = bucket->data;
208 packet = (struct vis_packet *)info->skb_packet->data;
209 entries = (struct vis_info_entry *)
210 ((char *)packet + sizeof(struct vis_packet));
212 for (j = 0; j < packet->entries; j++) {
213 if (entries[j].quality == 0)
214 continue;
215 compare =
216 compare_orig(entries[j].src, packet->vis_orig);
217 vis_data_insert_interface(entries[j].src,
218 &vis_if_list,
219 compare);
222 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
223 buf_size += 18 + 26 * packet->entries;
225 /* add primary/secondary records */
226 if (compare_orig(entry->addr, packet->vis_orig))
227 buf_size +=
228 vis_data_count_prim_sec(&vis_if_list);
230 buf_size += 1;
233 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
234 list) {
235 hlist_del(&entry->list);
236 kfree(entry);
241 buff = kmalloc(buf_size, GFP_ATOMIC);
242 if (!buff) {
243 spin_unlock_bh(&bat_priv->vis_hash_lock);
244 return -ENOMEM;
246 buff[0] = '\0';
247 buff_pos = 0;
249 for (i = 0; i < hash->size; i++) {
250 head = &hash->table[i];
252 hlist_for_each_entry(bucket, walk, head, hlist) {
253 info = bucket->data;
254 packet = (struct vis_packet *)info->skb_packet->data;
255 entries = (struct vis_info_entry *)
256 ((char *)packet + sizeof(struct vis_packet));
258 for (j = 0; j < packet->entries; j++) {
259 if (entries[j].quality == 0)
260 continue;
261 compare =
262 compare_orig(entries[j].src, packet->vis_orig);
263 vis_data_insert_interface(entries[j].src,
264 &vis_if_list,
265 compare);
268 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
269 buff_pos += sprintf(buff + buff_pos, "%pM,",
270 entry->addr);
272 for (j = 0; j < packet->entries; j++)
273 buff_pos += vis_data_read_entry(
274 buff + buff_pos,
275 &entries[j],
276 entry->addr,
277 entry->primary);
279 /* add primary/secondary records */
280 if (compare_orig(entry->addr, packet->vis_orig))
281 buff_pos +=
282 vis_data_read_prim_sec(buff + buff_pos,
283 &vis_if_list);
285 buff_pos += sprintf(buff + buff_pos, "\n");
288 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
289 list) {
290 hlist_del(&entry->list);
291 kfree(entry);
296 spin_unlock_bh(&bat_priv->vis_hash_lock);
298 seq_printf(seq, "%s", buff);
299 kfree(buff);
301 return 0;
304 /* add the info packet to the send list, if it was not
305 * already linked in. */
306 static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
308 if (list_empty(&info->send_list)) {
309 kref_get(&info->refcount);
310 list_add_tail(&info->send_list, &bat_priv->vis_send_list);
314 /* delete the info packet from the send list, if it was
315 * linked in. */
316 static void send_list_del(struct vis_info *info)
318 if (!list_empty(&info->send_list)) {
319 list_del_init(&info->send_list);
320 kref_put(&info->refcount, free_info);
324 /* tries to add one entry to the receive list. */
325 static void recv_list_add(struct bat_priv *bat_priv,
326 struct list_head *recv_list, char *mac)
328 struct recvlist_node *entry;
330 entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
331 if (!entry)
332 return;
334 memcpy(entry->mac, mac, ETH_ALEN);
335 spin_lock_bh(&bat_priv->vis_list_lock);
336 list_add_tail(&entry->list, recv_list);
337 spin_unlock_bh(&bat_priv->vis_list_lock);
340 /* returns 1 if this mac is in the recv_list */
341 static int recv_list_is_in(struct bat_priv *bat_priv,
342 struct list_head *recv_list, char *mac)
344 struct recvlist_node *entry;
346 spin_lock_bh(&bat_priv->vis_list_lock);
347 list_for_each_entry(entry, recv_list, list) {
348 if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
349 spin_unlock_bh(&bat_priv->vis_list_lock);
350 return 1;
353 spin_unlock_bh(&bat_priv->vis_list_lock);
354 return 0;
357 /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
358 * broken.. ). vis hash must be locked outside. is_new is set when the packet
359 * is newer than old entries in the hash. */
360 static struct vis_info *add_packet(struct bat_priv *bat_priv,
361 struct vis_packet *vis_packet,
362 int vis_info_len, int *is_new,
363 int make_broadcast)
365 struct vis_info *info, *old_info;
366 struct vis_packet *search_packet, *old_packet;
367 struct vis_info search_elem;
368 struct vis_packet *packet;
369 int hash_added;
371 *is_new = 0;
372 /* sanity check */
373 if (!bat_priv->vis_hash)
374 return NULL;
376 /* see if the packet is already in vis_hash */
377 search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
378 if (!search_elem.skb_packet)
379 return NULL;
380 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
381 sizeof(struct vis_packet));
383 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
384 old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
385 &search_elem);
386 kfree_skb(search_elem.skb_packet);
388 if (old_info) {
389 old_packet = (struct vis_packet *)old_info->skb_packet->data;
390 if (!seq_after(ntohl(vis_packet->seqno),
391 ntohl(old_packet->seqno))) {
392 if (old_packet->seqno == vis_packet->seqno) {
393 recv_list_add(bat_priv, &old_info->recv_list,
394 vis_packet->sender_orig);
395 return old_info;
396 } else {
397 /* newer packet is already in hash. */
398 return NULL;
401 /* remove old entry */
402 hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
403 old_info);
404 send_list_del(old_info);
405 kref_put(&old_info->refcount, free_info);
408 info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
409 if (!info)
410 return NULL;
412 info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
413 vis_info_len + sizeof(struct ethhdr));
414 if (!info->skb_packet) {
415 kfree(info);
416 return NULL;
418 skb_reserve(info->skb_packet, sizeof(struct ethhdr));
419 packet = (struct vis_packet *)skb_put(info->skb_packet,
420 sizeof(struct vis_packet) +
421 vis_info_len);
423 kref_init(&info->refcount);
424 INIT_LIST_HEAD(&info->send_list);
425 INIT_LIST_HEAD(&info->recv_list);
426 info->first_seen = jiffies;
427 info->bat_priv = bat_priv;
428 memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
430 /* initialize and add new packet. */
431 *is_new = 1;
433 /* Make it a broadcast packet, if required */
434 if (make_broadcast)
435 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
437 /* repair if entries is longer than packet. */
438 if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
439 packet->entries = vis_info_len / sizeof(struct vis_info_entry);
441 recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
443 /* try to add it */
444 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
445 info);
446 if (hash_added < 0) {
447 /* did not work (for some reason) */
448 kref_put(&info->refcount, free_info);
449 info = NULL;
452 return info;
455 /* handle the server sync packet, forward if needed. */
456 void receive_server_sync_packet(struct bat_priv *bat_priv,
457 struct vis_packet *vis_packet,
458 int vis_info_len)
460 struct vis_info *info;
461 int is_new, make_broadcast;
462 int vis_server = atomic_read(&bat_priv->vis_mode);
464 make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
466 spin_lock_bh(&bat_priv->vis_hash_lock);
467 info = add_packet(bat_priv, vis_packet, vis_info_len,
468 &is_new, make_broadcast);
469 if (!info)
470 goto end;
472 /* only if we are server ourselves and packet is newer than the one in
473 * hash.*/
474 if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
475 send_list_add(bat_priv, info);
476 end:
477 spin_unlock_bh(&bat_priv->vis_hash_lock);
480 /* handle an incoming client update packet and schedule forward if needed. */
481 void receive_client_update_packet(struct bat_priv *bat_priv,
482 struct vis_packet *vis_packet,
483 int vis_info_len)
485 struct vis_info *info;
486 struct vis_packet *packet;
487 int is_new;
488 int vis_server = atomic_read(&bat_priv->vis_mode);
489 int are_target = 0;
491 /* clients shall not broadcast. */
492 if (is_broadcast_ether_addr(vis_packet->target_orig))
493 return;
495 /* Are we the target for this VIS packet? */
496 if (vis_server == VIS_TYPE_SERVER_SYNC &&
497 is_my_mac(vis_packet->target_orig))
498 are_target = 1;
500 spin_lock_bh(&bat_priv->vis_hash_lock);
501 info = add_packet(bat_priv, vis_packet, vis_info_len,
502 &is_new, are_target);
504 if (!info)
505 goto end;
506 /* note that outdated packets will be dropped at this point. */
508 packet = (struct vis_packet *)info->skb_packet->data;
510 /* send only if we're the target server or ... */
511 if (are_target && is_new) {
512 packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
513 send_list_add(bat_priv, info);
515 /* ... we're not the recipient (and thus need to forward). */
516 } else if (!is_my_mac(packet->target_orig)) {
517 send_list_add(bat_priv, info);
520 end:
521 spin_unlock_bh(&bat_priv->vis_hash_lock);
524 /* Walk the originators and find the VIS server with the best tq. Set the packet
525 * address to its address and return the best_tq.
527 * Must be called with the originator hash locked */
528 static int find_best_vis_server(struct bat_priv *bat_priv,
529 struct vis_info *info)
531 struct hashtable_t *hash = bat_priv->orig_hash;
532 struct hlist_node *walk;
533 struct hlist_head *head;
534 struct element_t *bucket;
535 struct orig_node *orig_node;
536 struct vis_packet *packet;
537 int best_tq = -1, i;
539 packet = (struct vis_packet *)info->skb_packet->data;
541 for (i = 0; i < hash->size; i++) {
542 head = &hash->table[i];
544 hlist_for_each_entry(bucket, walk, head, hlist) {
545 orig_node = bucket->data;
546 if ((orig_node) && (orig_node->router) &&
547 (orig_node->flags & VIS_SERVER) &&
548 (orig_node->router->tq_avg > best_tq)) {
549 best_tq = orig_node->router->tq_avg;
550 memcpy(packet->target_orig, orig_node->orig,
551 ETH_ALEN);
556 return best_tq;
559 /* Return true if the vis packet is full. */
560 static bool vis_packet_full(struct vis_info *info)
562 struct vis_packet *packet;
563 packet = (struct vis_packet *)info->skb_packet->data;
565 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
566 < packet->entries + 1)
567 return true;
568 return false;
571 /* generates a packet of own vis data,
572 * returns 0 on success, -1 if no packet could be generated */
573 static int generate_vis_packet(struct bat_priv *bat_priv)
575 struct hashtable_t *hash = bat_priv->orig_hash;
576 struct hlist_node *walk;
577 struct hlist_head *head;
578 struct element_t *bucket;
579 struct orig_node *orig_node;
580 struct neigh_node *neigh_node;
581 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
582 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
583 struct vis_info_entry *entry;
584 struct hna_local_entry *hna_local_entry;
585 int best_tq = -1, i;
587 info->first_seen = jiffies;
588 packet->vis_type = atomic_read(&bat_priv->vis_mode);
590 spin_lock_bh(&bat_priv->orig_hash_lock);
591 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
592 packet->ttl = TTL;
593 packet->seqno = htonl(ntohl(packet->seqno) + 1);
594 packet->entries = 0;
595 skb_trim(info->skb_packet, sizeof(struct vis_packet));
597 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
598 best_tq = find_best_vis_server(bat_priv, info);
600 if (best_tq < 0) {
601 spin_unlock_bh(&bat_priv->orig_hash_lock);
602 return -1;
606 for (i = 0; i < hash->size; i++) {
607 head = &hash->table[i];
609 hlist_for_each_entry(bucket, walk, head, hlist) {
610 orig_node = bucket->data;
611 neigh_node = orig_node->router;
613 if (!neigh_node)
614 continue;
616 if (!compare_orig(neigh_node->addr, orig_node->orig))
617 continue;
619 if (neigh_node->if_incoming->if_status != IF_ACTIVE)
620 continue;
622 if (neigh_node->tq_avg < 1)
623 continue;
625 /* fill one entry into buffer. */
626 entry = (struct vis_info_entry *)
627 skb_put(info->skb_packet, sizeof(*entry));
628 memcpy(entry->src,
629 neigh_node->if_incoming->net_dev->dev_addr,
630 ETH_ALEN);
631 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
632 entry->quality = neigh_node->tq_avg;
633 packet->entries++;
635 if (vis_packet_full(info)) {
636 spin_unlock_bh(&bat_priv->orig_hash_lock);
637 return 0;
642 spin_unlock_bh(&bat_priv->orig_hash_lock);
644 hash = bat_priv->hna_local_hash;
646 spin_lock_bh(&bat_priv->hna_lhash_lock);
647 for (i = 0; i < hash->size; i++) {
648 head = &hash->table[i];
650 hlist_for_each_entry(bucket, walk, head, hlist) {
651 hna_local_entry = bucket->data;
652 entry = (struct vis_info_entry *)
653 skb_put(info->skb_packet,
654 sizeof(*entry));
655 memset(entry->src, 0, ETH_ALEN);
656 memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
657 entry->quality = 0; /* 0 means HNA */
658 packet->entries++;
660 if (vis_packet_full(info)) {
661 spin_unlock_bh(&bat_priv->hna_lhash_lock);
662 return 0;
667 spin_unlock_bh(&bat_priv->hna_lhash_lock);
668 return 0;
671 /* free old vis packets. Must be called with this vis_hash_lock
672 * held */
673 static void purge_vis_packets(struct bat_priv *bat_priv)
675 int i;
676 struct hashtable_t *hash = bat_priv->vis_hash;
677 struct hlist_node *walk, *safe;
678 struct hlist_head *head;
679 struct element_t *bucket;
680 struct vis_info *info;
682 for (i = 0; i < hash->size; i++) {
683 head = &hash->table[i];
685 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
686 info = bucket->data;
688 /* never purge own data. */
689 if (info == bat_priv->my_vis_info)
690 continue;
692 if (time_after(jiffies,
693 info->first_seen + VIS_TIMEOUT * HZ)) {
694 hlist_del(walk);
695 kfree(bucket);
696 send_list_del(info);
697 kref_put(&info->refcount, free_info);
703 static void broadcast_vis_packet(struct bat_priv *bat_priv,
704 struct vis_info *info)
706 struct hashtable_t *hash = bat_priv->orig_hash;
707 struct hlist_node *walk;
708 struct hlist_head *head;
709 struct element_t *bucket;
710 struct orig_node *orig_node;
711 struct vis_packet *packet;
712 struct sk_buff *skb;
713 struct batman_if *batman_if;
714 uint8_t dstaddr[ETH_ALEN];
715 int i;
718 spin_lock_bh(&bat_priv->orig_hash_lock);
719 packet = (struct vis_packet *)info->skb_packet->data;
721 /* send to all routers in range. */
722 for (i = 0; i < hash->size; i++) {
723 head = &hash->table[i];
725 hlist_for_each_entry(bucket, walk, head, hlist) {
726 orig_node = bucket->data;
728 /* if it's a vis server and reachable, send it. */
729 if ((!orig_node) || (!orig_node->router))
730 continue;
731 if (!(orig_node->flags & VIS_SERVER))
732 continue;
733 /* don't send it if we already received the packet from
734 * this node. */
735 if (recv_list_is_in(bat_priv, &info->recv_list,
736 orig_node->orig))
737 continue;
739 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
740 batman_if = orig_node->router->if_incoming;
741 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
742 spin_unlock_bh(&bat_priv->orig_hash_lock);
744 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
745 if (skb)
746 send_skb_packet(skb, batman_if, dstaddr);
748 spin_lock_bh(&bat_priv->orig_hash_lock);
753 spin_unlock_bh(&bat_priv->orig_hash_lock);
756 static void unicast_vis_packet(struct bat_priv *bat_priv,
757 struct vis_info *info)
759 struct orig_node *orig_node;
760 struct sk_buff *skb;
761 struct vis_packet *packet;
762 struct batman_if *batman_if;
763 uint8_t dstaddr[ETH_ALEN];
765 spin_lock_bh(&bat_priv->orig_hash_lock);
766 packet = (struct vis_packet *)info->skb_packet->data;
767 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
768 compare_orig, choose_orig,
769 packet->target_orig));
771 if ((!orig_node) || (!orig_node->router))
772 goto out;
774 /* don't lock while sending the packets ... we therefore
775 * copy the required data before sending */
776 batman_if = orig_node->router->if_incoming;
777 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
778 spin_unlock_bh(&bat_priv->orig_hash_lock);
780 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
781 if (skb)
782 send_skb_packet(skb, batman_if, dstaddr);
784 return;
786 out:
787 spin_unlock_bh(&bat_priv->orig_hash_lock);
790 /* only send one vis packet. called from send_vis_packets() */
791 static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
793 struct vis_packet *packet;
795 packet = (struct vis_packet *)info->skb_packet->data;
796 if (packet->ttl < 2) {
797 pr_debug("Error - can't send vis packet: ttl exceeded\n");
798 return;
801 memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
802 ETH_ALEN);
803 packet->ttl--;
805 if (is_broadcast_ether_addr(packet->target_orig))
806 broadcast_vis_packet(bat_priv, info);
807 else
808 unicast_vis_packet(bat_priv, info);
809 packet->ttl++; /* restore TTL */
812 /* called from timer; send (and maybe generate) vis packet. */
813 static void send_vis_packets(struct work_struct *work)
815 struct delayed_work *delayed_work =
816 container_of(work, struct delayed_work, work);
817 struct bat_priv *bat_priv =
818 container_of(delayed_work, struct bat_priv, vis_work);
819 struct vis_info *info;
821 spin_lock_bh(&bat_priv->vis_hash_lock);
822 purge_vis_packets(bat_priv);
824 if (generate_vis_packet(bat_priv) == 0) {
825 /* schedule if generation was successful */
826 send_list_add(bat_priv, bat_priv->my_vis_info);
829 while (!list_empty(&bat_priv->vis_send_list)) {
830 info = list_first_entry(&bat_priv->vis_send_list,
831 typeof(*info), send_list);
833 kref_get(&info->refcount);
834 spin_unlock_bh(&bat_priv->vis_hash_lock);
836 if (bat_priv->primary_if)
837 send_vis_packet(bat_priv, info);
839 spin_lock_bh(&bat_priv->vis_hash_lock);
840 send_list_del(info);
841 kref_put(&info->refcount, free_info);
843 spin_unlock_bh(&bat_priv->vis_hash_lock);
844 start_vis_timer(bat_priv);
847 /* init the vis server. this may only be called when if_list is already
848 * initialized (e.g. bat0 is initialized, interfaces have been added) */
849 int vis_init(struct bat_priv *bat_priv)
851 struct vis_packet *packet;
852 int hash_added;
854 if (bat_priv->vis_hash)
855 return 1;
857 spin_lock_bh(&bat_priv->vis_hash_lock);
859 bat_priv->vis_hash = hash_new(256);
860 if (!bat_priv->vis_hash) {
861 pr_err("Can't initialize vis_hash\n");
862 goto err;
865 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
866 if (!bat_priv->my_vis_info) {
867 pr_err("Can't initialize vis packet\n");
868 goto err;
871 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
872 sizeof(struct vis_packet) +
873 MAX_VIS_PACKET_SIZE +
874 sizeof(struct ethhdr));
875 if (!bat_priv->my_vis_info->skb_packet)
876 goto free_info;
878 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
879 packet = (struct vis_packet *)skb_put(
880 bat_priv->my_vis_info->skb_packet,
881 sizeof(struct vis_packet));
883 /* prefill the vis info */
884 bat_priv->my_vis_info->first_seen = jiffies -
885 msecs_to_jiffies(VIS_INTERVAL);
886 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
887 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
888 kref_init(&bat_priv->my_vis_info->refcount);
889 bat_priv->my_vis_info->bat_priv = bat_priv;
890 packet->version = COMPAT_VERSION;
891 packet->packet_type = BAT_VIS;
892 packet->ttl = TTL;
893 packet->seqno = 0;
894 packet->entries = 0;
896 INIT_LIST_HEAD(&bat_priv->vis_send_list);
898 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
899 bat_priv->my_vis_info);
900 if (hash_added < 0) {
901 pr_err("Can't add own vis packet into hash\n");
902 /* not in hash, need to remove it manually. */
903 kref_put(&bat_priv->my_vis_info->refcount, free_info);
904 goto err;
907 spin_unlock_bh(&bat_priv->vis_hash_lock);
908 start_vis_timer(bat_priv);
909 return 1;
911 free_info:
912 kfree(bat_priv->my_vis_info);
913 bat_priv->my_vis_info = NULL;
914 err:
915 spin_unlock_bh(&bat_priv->vis_hash_lock);
916 vis_quit(bat_priv);
917 return 0;
920 /* Decrease the reference count on a hash item info */
921 static void free_info_ref(void *data, void *arg)
923 struct vis_info *info = data;
925 send_list_del(info);
926 kref_put(&info->refcount, free_info);
929 /* shutdown vis-server */
930 void vis_quit(struct bat_priv *bat_priv)
932 if (!bat_priv->vis_hash)
933 return;
935 cancel_delayed_work_sync(&bat_priv->vis_work);
937 spin_lock_bh(&bat_priv->vis_hash_lock);
938 /* properly remove, kill timers ... */
939 hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
940 bat_priv->vis_hash = NULL;
941 bat_priv->my_vis_info = NULL;
942 spin_unlock_bh(&bat_priv->vis_hash_lock);
945 /* schedule packets for (re)transmission */
946 static void start_vis_timer(struct bat_priv *bat_priv)
948 INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
949 queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
950 msecs_to_jiffies(VIS_INTERVAL));