Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[wandboard.git] / net / batman-adv / vis.c
blobcd4c4231fa48ace2d6674cb1aae2909d9281c8be
1 /*
2 * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
4 * Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "send.h"
24 #include "translation-table.h"
25 #include "vis.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "hash.h"
29 #include "originator.h"
31 #define MAX_VIS_PACKET_SIZE 1000
33 /* Returns the smallest signed integer in two's complement with the sizeof x */
34 #define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
36 /* Checks if a sequence number x is a predecessor/successor of y.
37 * they handle overflows/underflows and can correctly check for a
38 * predecessor/successor unless the variable sequence number has grown by
39 * more then 2**(bitwidth(x)-1)-1.
40 * This means that for a uint8_t with the maximum value 255, it would think:
41 * - when adding nothing - it is neither a predecessor nor a successor
42 * - before adding more than 127 to the starting value - it is a predecessor,
43 * - when adding 128 - it is neither a predecessor nor a successor,
44 * - after adding more than 127 to the starting value - it is a successor */
45 #define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
46 _dummy > smallest_signed_int(_dummy); })
47 #define seq_after(x, y) seq_before(y, x)
49 static void start_vis_timer(struct bat_priv *bat_priv);
51 /* free the info */
52 static void free_info(struct kref *ref)
54 struct vis_info *info = container_of(ref, struct vis_info, refcount);
55 struct bat_priv *bat_priv = info->bat_priv;
56 struct recvlist_node *entry, *tmp;
58 list_del_init(&info->send_list);
59 spin_lock_bh(&bat_priv->vis_list_lock);
60 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
61 list_del(&entry->list);
62 kfree(entry);
65 spin_unlock_bh(&bat_priv->vis_list_lock);
66 kfree_skb(info->skb_packet);
69 /* Compare two vis packets, used by the hashing algorithm */
70 static int vis_info_cmp(void *data1, void *data2)
72 struct vis_info *d1, *d2;
73 struct vis_packet *p1, *p2;
74 d1 = data1;
75 d2 = data2;
76 p1 = (struct vis_packet *)d1->skb_packet->data;
77 p2 = (struct vis_packet *)d2->skb_packet->data;
78 return compare_orig(p1->vis_orig, p2->vis_orig);
81 /* hash function to choose an entry in a hash table of given size */
82 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
83 static int vis_info_choose(void *data, int size)
85 struct vis_info *vis_info = data;
86 struct vis_packet *packet;
87 unsigned char *key;
88 uint32_t hash = 0;
89 size_t i;
91 packet = (struct vis_packet *)vis_info->skb_packet->data;
92 key = packet->vis_orig;
93 for (i = 0; i < ETH_ALEN; i++) {
94 hash += key[i];
95 hash += (hash << 10);
96 hash ^= (hash >> 6);
99 hash += (hash << 3);
100 hash ^= (hash >> 11);
101 hash += (hash << 15);
103 return hash % size;
106 /* insert interface to the list of interfaces of one originator, if it
107 * does not already exist in the list */
108 static void vis_data_insert_interface(const uint8_t *interface,
109 struct hlist_head *if_list,
110 bool primary)
112 struct if_list_entry *entry;
113 struct hlist_node *pos;
115 hlist_for_each_entry(entry, pos, if_list, list) {
116 if (compare_orig(entry->addr, (void *)interface))
117 return;
120 /* its a new address, add it to the list */
121 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122 if (!entry)
123 return;
124 memcpy(entry->addr, interface, ETH_ALEN);
125 entry->primary = primary;
126 hlist_add_head(&entry->list, if_list);
129 static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
131 struct if_list_entry *entry;
132 struct hlist_node *pos;
133 size_t len = 0;
135 hlist_for_each_entry(entry, pos, if_list, list) {
136 if (entry->primary)
137 len += sprintf(buff + len, "PRIMARY, ");
138 else
139 len += sprintf(buff + len, "SEC %pM, ", entry->addr);
142 return len;
145 static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
147 struct if_list_entry *entry;
148 struct hlist_node *pos;
149 size_t count = 0;
151 hlist_for_each_entry(entry, pos, if_list, list) {
152 if (entry->primary)
153 count += 9;
154 else
155 count += 23;
158 return count;
161 /* read an entry */
162 static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
163 uint8_t *src, bool primary)
165 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
166 if (primary && entry->quality == 0)
167 return sprintf(buff, "HNA %pM, ", entry->dest);
168 else if (compare_orig(entry->src, src))
169 return sprintf(buff, "TQ %pM %d, ", entry->dest,
170 entry->quality);
172 return 0;
175 int vis_seq_print_text(struct seq_file *seq, void *offset)
177 struct hlist_node *walk;
178 struct hlist_head *head;
179 struct element_t *bucket;
180 struct vis_info *info;
181 struct vis_packet *packet;
182 struct vis_info_entry *entries;
183 struct net_device *net_dev = (struct net_device *)seq->private;
184 struct bat_priv *bat_priv = netdev_priv(net_dev);
185 struct hashtable_t *hash = bat_priv->vis_hash;
186 HLIST_HEAD(vis_if_list);
187 struct if_list_entry *entry;
188 struct hlist_node *pos, *n;
189 int i, j;
190 int vis_server = atomic_read(&bat_priv->vis_mode);
191 size_t buff_pos, buf_size;
192 char *buff;
193 int compare;
195 if ((!bat_priv->primary_if) ||
196 (vis_server == VIS_TYPE_CLIENT_UPDATE))
197 return 0;
199 buf_size = 1;
200 /* Estimate length */
201 spin_lock_bh(&bat_priv->vis_hash_lock);
202 for (i = 0; i < hash->size; i++) {
203 head = &hash->table[i];
205 hlist_for_each_entry(bucket, walk, head, hlist) {
206 info = bucket->data;
207 packet = (struct vis_packet *)info->skb_packet->data;
208 entries = (struct vis_info_entry *)
209 ((char *)packet + sizeof(struct vis_packet));
211 for (j = 0; j < packet->entries; j++) {
212 if (entries[j].quality == 0)
213 continue;
214 compare =
215 compare_orig(entries[j].src, packet->vis_orig);
216 vis_data_insert_interface(entries[j].src,
217 &vis_if_list,
218 compare);
221 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
222 buf_size += 18 + 26 * packet->entries;
224 /* add primary/secondary records */
225 if (compare_orig(entry->addr, packet->vis_orig))
226 buf_size +=
227 vis_data_count_prim_sec(&vis_if_list);
229 buf_size += 1;
232 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
233 list) {
234 hlist_del(&entry->list);
235 kfree(entry);
240 buff = kmalloc(buf_size, GFP_ATOMIC);
241 if (!buff) {
242 spin_unlock_bh(&bat_priv->vis_hash_lock);
243 return -ENOMEM;
245 buff[0] = '\0';
246 buff_pos = 0;
248 for (i = 0; i < hash->size; i++) {
249 head = &hash->table[i];
251 hlist_for_each_entry(bucket, walk, head, hlist) {
252 info = bucket->data;
253 packet = (struct vis_packet *)info->skb_packet->data;
254 entries = (struct vis_info_entry *)
255 ((char *)packet + sizeof(struct vis_packet));
257 for (j = 0; j < packet->entries; j++) {
258 if (entries[j].quality == 0)
259 continue;
260 compare =
261 compare_orig(entries[j].src, packet->vis_orig);
262 vis_data_insert_interface(entries[j].src,
263 &vis_if_list,
264 compare);
267 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
268 buff_pos += sprintf(buff + buff_pos, "%pM,",
269 entry->addr);
271 for (i = 0; i < packet->entries; i++)
272 buff_pos += vis_data_read_entry(
273 buff + buff_pos,
274 &entries[i],
275 entry->addr,
276 entry->primary);
278 /* add primary/secondary records */
279 if (compare_orig(entry->addr, packet->vis_orig))
280 buff_pos +=
281 vis_data_read_prim_sec(buff + buff_pos,
282 &vis_if_list);
284 buff_pos += sprintf(buff + buff_pos, "\n");
287 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
288 list) {
289 hlist_del(&entry->list);
290 kfree(entry);
295 spin_unlock_bh(&bat_priv->vis_hash_lock);
297 seq_printf(seq, "%s", buff);
298 kfree(buff);
300 return 0;
303 /* add the info packet to the send list, if it was not
304 * already linked in. */
305 static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
307 if (list_empty(&info->send_list)) {
308 kref_get(&info->refcount);
309 list_add_tail(&info->send_list, &bat_priv->vis_send_list);
313 /* delete the info packet from the send list, if it was
314 * linked in. */
315 static void send_list_del(struct vis_info *info)
317 if (!list_empty(&info->send_list)) {
318 list_del_init(&info->send_list);
319 kref_put(&info->refcount, free_info);
323 /* tries to add one entry to the receive list. */
324 static void recv_list_add(struct bat_priv *bat_priv,
325 struct list_head *recv_list, char *mac)
327 struct recvlist_node *entry;
329 entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
330 if (!entry)
331 return;
333 memcpy(entry->mac, mac, ETH_ALEN);
334 spin_lock_bh(&bat_priv->vis_list_lock);
335 list_add_tail(&entry->list, recv_list);
336 spin_unlock_bh(&bat_priv->vis_list_lock);
339 /* returns 1 if this mac is in the recv_list */
340 static int recv_list_is_in(struct bat_priv *bat_priv,
341 struct list_head *recv_list, char *mac)
343 struct recvlist_node *entry;
345 spin_lock_bh(&bat_priv->vis_list_lock);
346 list_for_each_entry(entry, recv_list, list) {
347 if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
348 spin_unlock_bh(&bat_priv->vis_list_lock);
349 return 1;
352 spin_unlock_bh(&bat_priv->vis_list_lock);
353 return 0;
356 /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
357 * broken.. ). vis hash must be locked outside. is_new is set when the packet
358 * is newer than old entries in the hash. */
359 static struct vis_info *add_packet(struct bat_priv *bat_priv,
360 struct vis_packet *vis_packet,
361 int vis_info_len, int *is_new,
362 int make_broadcast)
364 struct vis_info *info, *old_info;
365 struct vis_packet *search_packet, *old_packet;
366 struct vis_info search_elem;
367 struct vis_packet *packet;
368 int hash_added;
370 *is_new = 0;
371 /* sanity check */
372 if (!bat_priv->vis_hash)
373 return NULL;
375 /* see if the packet is already in vis_hash */
376 search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
377 if (!search_elem.skb_packet)
378 return NULL;
379 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
380 sizeof(struct vis_packet));
382 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
383 old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
384 &search_elem);
385 kfree_skb(search_elem.skb_packet);
387 if (old_info) {
388 old_packet = (struct vis_packet *)old_info->skb_packet->data;
389 if (!seq_after(ntohl(vis_packet->seqno),
390 ntohl(old_packet->seqno))) {
391 if (old_packet->seqno == vis_packet->seqno) {
392 recv_list_add(bat_priv, &old_info->recv_list,
393 vis_packet->sender_orig);
394 return old_info;
395 } else {
396 /* newer packet is already in hash. */
397 return NULL;
400 /* remove old entry */
401 hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
402 old_info);
403 send_list_del(old_info);
404 kref_put(&old_info->refcount, free_info);
407 info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
408 if (!info)
409 return NULL;
411 info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
412 vis_info_len + sizeof(struct ethhdr));
413 if (!info->skb_packet) {
414 kfree(info);
415 return NULL;
417 skb_reserve(info->skb_packet, sizeof(struct ethhdr));
418 packet = (struct vis_packet *)skb_put(info->skb_packet,
419 sizeof(struct vis_packet) +
420 vis_info_len);
422 kref_init(&info->refcount);
423 INIT_LIST_HEAD(&info->send_list);
424 INIT_LIST_HEAD(&info->recv_list);
425 info->first_seen = jiffies;
426 info->bat_priv = bat_priv;
427 memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
429 /* initialize and add new packet. */
430 *is_new = 1;
432 /* Make it a broadcast packet, if required */
433 if (make_broadcast)
434 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
436 /* repair if entries is longer than packet. */
437 if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
438 packet->entries = vis_info_len / sizeof(struct vis_info_entry);
440 recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
442 /* try to add it */
443 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
444 info);
445 if (hash_added < 0) {
446 /* did not work (for some reason) */
447 kref_put(&old_info->refcount, free_info);
448 info = NULL;
451 return info;
454 /* handle the server sync packet, forward if needed. */
455 void receive_server_sync_packet(struct bat_priv *bat_priv,
456 struct vis_packet *vis_packet,
457 int vis_info_len)
459 struct vis_info *info;
460 int is_new, make_broadcast;
461 int vis_server = atomic_read(&bat_priv->vis_mode);
463 make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
465 spin_lock_bh(&bat_priv->vis_hash_lock);
466 info = add_packet(bat_priv, vis_packet, vis_info_len,
467 &is_new, make_broadcast);
468 if (!info)
469 goto end;
471 /* only if we are server ourselves and packet is newer than the one in
472 * hash.*/
473 if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
474 send_list_add(bat_priv, info);
475 end:
476 spin_unlock_bh(&bat_priv->vis_hash_lock);
479 /* handle an incoming client update packet and schedule forward if needed. */
480 void receive_client_update_packet(struct bat_priv *bat_priv,
481 struct vis_packet *vis_packet,
482 int vis_info_len)
484 struct vis_info *info;
485 struct vis_packet *packet;
486 int is_new;
487 int vis_server = atomic_read(&bat_priv->vis_mode);
488 int are_target = 0;
490 /* clients shall not broadcast. */
491 if (is_broadcast_ether_addr(vis_packet->target_orig))
492 return;
494 /* Are we the target for this VIS packet? */
495 if (vis_server == VIS_TYPE_SERVER_SYNC &&
496 is_my_mac(vis_packet->target_orig))
497 are_target = 1;
499 spin_lock_bh(&bat_priv->vis_hash_lock);
500 info = add_packet(bat_priv, vis_packet, vis_info_len,
501 &is_new, are_target);
503 if (!info)
504 goto end;
505 /* note that outdated packets will be dropped at this point. */
507 packet = (struct vis_packet *)info->skb_packet->data;
509 /* send only if we're the target server or ... */
510 if (are_target && is_new) {
511 packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
512 send_list_add(bat_priv, info);
514 /* ... we're not the recipient (and thus need to forward). */
515 } else if (!is_my_mac(packet->target_orig)) {
516 send_list_add(bat_priv, info);
519 end:
520 spin_unlock_bh(&bat_priv->vis_hash_lock);
523 /* Walk the originators and find the VIS server with the best tq. Set the packet
524 * address to its address and return the best_tq.
526 * Must be called with the originator hash locked */
527 static int find_best_vis_server(struct bat_priv *bat_priv,
528 struct vis_info *info)
530 struct hashtable_t *hash = bat_priv->orig_hash;
531 struct hlist_node *walk;
532 struct hlist_head *head;
533 struct element_t *bucket;
534 struct orig_node *orig_node;
535 struct vis_packet *packet;
536 int best_tq = -1, i;
538 packet = (struct vis_packet *)info->skb_packet->data;
540 for (i = 0; i < hash->size; i++) {
541 head = &hash->table[i];
543 hlist_for_each_entry(bucket, walk, head, hlist) {
544 orig_node = bucket->data;
545 if ((orig_node) && (orig_node->router) &&
546 (orig_node->flags & VIS_SERVER) &&
547 (orig_node->router->tq_avg > best_tq)) {
548 best_tq = orig_node->router->tq_avg;
549 memcpy(packet->target_orig, orig_node->orig,
550 ETH_ALEN);
555 return best_tq;
558 /* Return true if the vis packet is full. */
559 static bool vis_packet_full(struct vis_info *info)
561 struct vis_packet *packet;
562 packet = (struct vis_packet *)info->skb_packet->data;
564 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
565 < packet->entries + 1)
566 return true;
567 return false;
570 /* generates a packet of own vis data,
571 * returns 0 on success, -1 if no packet could be generated */
572 static int generate_vis_packet(struct bat_priv *bat_priv)
574 struct hashtable_t *hash = bat_priv->orig_hash;
575 struct hlist_node *walk;
576 struct hlist_head *head;
577 struct element_t *bucket;
578 struct orig_node *orig_node;
579 struct neigh_node *neigh_node;
580 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
581 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
582 struct vis_info_entry *entry;
583 struct hna_local_entry *hna_local_entry;
584 int best_tq = -1, i;
586 info->first_seen = jiffies;
587 packet->vis_type = atomic_read(&bat_priv->vis_mode);
589 spin_lock_bh(&bat_priv->orig_hash_lock);
590 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
591 packet->ttl = TTL;
592 packet->seqno = htonl(ntohl(packet->seqno) + 1);
593 packet->entries = 0;
594 skb_trim(info->skb_packet, sizeof(struct vis_packet));
596 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
597 best_tq = find_best_vis_server(bat_priv, info);
599 if (best_tq < 0) {
600 spin_unlock_bh(&bat_priv->orig_hash_lock);
601 return -1;
605 for (i = 0; i < hash->size; i++) {
606 head = &hash->table[i];
608 hlist_for_each_entry(bucket, walk, head, hlist) {
609 orig_node = bucket->data;
610 neigh_node = orig_node->router;
612 if (!neigh_node)
613 continue;
615 if (!compare_orig(neigh_node->addr, orig_node->orig))
616 continue;
618 if (neigh_node->if_incoming->if_status != IF_ACTIVE)
619 continue;
621 if (neigh_node->tq_avg < 1)
622 continue;
624 /* fill one entry into buffer. */
625 entry = (struct vis_info_entry *)
626 skb_put(info->skb_packet, sizeof(*entry));
627 memcpy(entry->src,
628 neigh_node->if_incoming->net_dev->dev_addr,
629 ETH_ALEN);
630 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
631 entry->quality = neigh_node->tq_avg;
632 packet->entries++;
634 if (vis_packet_full(info)) {
635 spin_unlock_bh(&bat_priv->orig_hash_lock);
636 return 0;
641 spin_unlock_bh(&bat_priv->orig_hash_lock);
643 hash = bat_priv->hna_local_hash;
645 spin_lock_bh(&bat_priv->hna_lhash_lock);
646 for (i = 0; i < hash->size; i++) {
647 head = &hash->table[i];
649 hlist_for_each_entry(bucket, walk, head, hlist) {
650 hna_local_entry = bucket->data;
651 entry = (struct vis_info_entry *)
652 skb_put(info->skb_packet,
653 sizeof(*entry));
654 memset(entry->src, 0, ETH_ALEN);
655 memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
656 entry->quality = 0; /* 0 means HNA */
657 packet->entries++;
659 if (vis_packet_full(info)) {
660 spin_unlock_bh(&bat_priv->hna_lhash_lock);
661 return 0;
666 spin_unlock_bh(&bat_priv->hna_lhash_lock);
667 return 0;
670 /* free old vis packets. Must be called with this vis_hash_lock
671 * held */
672 static void purge_vis_packets(struct bat_priv *bat_priv)
674 int i;
675 struct hashtable_t *hash = bat_priv->vis_hash;
676 struct hlist_node *walk, *safe;
677 struct hlist_head *head;
678 struct element_t *bucket;
679 struct vis_info *info;
681 for (i = 0; i < hash->size; i++) {
682 head = &hash->table[i];
684 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
685 info = bucket->data;
687 /* never purge own data. */
688 if (info == bat_priv->my_vis_info)
689 continue;
691 if (time_after(jiffies,
692 info->first_seen + VIS_TIMEOUT * HZ)) {
693 hlist_del(walk);
694 kfree(bucket);
695 send_list_del(info);
696 kref_put(&info->refcount, free_info);
702 static void broadcast_vis_packet(struct bat_priv *bat_priv,
703 struct vis_info *info)
705 struct hashtable_t *hash = bat_priv->orig_hash;
706 struct hlist_node *walk;
707 struct hlist_head *head;
708 struct element_t *bucket;
709 struct orig_node *orig_node;
710 struct vis_packet *packet;
711 struct sk_buff *skb;
712 struct batman_if *batman_if;
713 uint8_t dstaddr[ETH_ALEN];
714 int i;
717 spin_lock_bh(&bat_priv->orig_hash_lock);
718 packet = (struct vis_packet *)info->skb_packet->data;
720 /* send to all routers in range. */
721 for (i = 0; i < hash->size; i++) {
722 head = &hash->table[i];
724 hlist_for_each_entry(bucket, walk, head, hlist) {
725 orig_node = bucket->data;
727 /* if it's a vis server and reachable, send it. */
728 if ((!orig_node) || (!orig_node->router))
729 continue;
730 if (!(orig_node->flags & VIS_SERVER))
731 continue;
732 /* don't send it if we already received the packet from
733 * this node. */
734 if (recv_list_is_in(bat_priv, &info->recv_list,
735 orig_node->orig))
736 continue;
738 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
739 batman_if = orig_node->router->if_incoming;
740 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
741 spin_unlock_bh(&bat_priv->orig_hash_lock);
743 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
744 if (skb)
745 send_skb_packet(skb, batman_if, dstaddr);
747 spin_lock_bh(&bat_priv->orig_hash_lock);
752 spin_unlock_bh(&bat_priv->orig_hash_lock);
755 static void unicast_vis_packet(struct bat_priv *bat_priv,
756 struct vis_info *info)
758 struct orig_node *orig_node;
759 struct sk_buff *skb;
760 struct vis_packet *packet;
761 struct batman_if *batman_if;
762 uint8_t dstaddr[ETH_ALEN];
764 spin_lock_bh(&bat_priv->orig_hash_lock);
765 packet = (struct vis_packet *)info->skb_packet->data;
766 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
767 compare_orig, choose_orig,
768 packet->target_orig));
770 if ((!orig_node) || (!orig_node->router))
771 goto out;
773 /* don't lock while sending the packets ... we therefore
774 * copy the required data before sending */
775 batman_if = orig_node->router->if_incoming;
776 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
777 spin_unlock_bh(&bat_priv->orig_hash_lock);
779 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
780 if (skb)
781 send_skb_packet(skb, batman_if, dstaddr);
783 return;
785 out:
786 spin_unlock_bh(&bat_priv->orig_hash_lock);
789 /* only send one vis packet. called from send_vis_packets() */
790 static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
792 struct vis_packet *packet;
794 packet = (struct vis_packet *)info->skb_packet->data;
795 if (packet->ttl < 2) {
796 pr_debug("Error - can't send vis packet: ttl exceeded\n");
797 return;
800 memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
801 ETH_ALEN);
802 packet->ttl--;
804 if (is_broadcast_ether_addr(packet->target_orig))
805 broadcast_vis_packet(bat_priv, info);
806 else
807 unicast_vis_packet(bat_priv, info);
808 packet->ttl++; /* restore TTL */
811 /* called from timer; send (and maybe generate) vis packet. */
812 static void send_vis_packets(struct work_struct *work)
814 struct delayed_work *delayed_work =
815 container_of(work, struct delayed_work, work);
816 struct bat_priv *bat_priv =
817 container_of(delayed_work, struct bat_priv, vis_work);
818 struct vis_info *info, *temp;
820 spin_lock_bh(&bat_priv->vis_hash_lock);
821 purge_vis_packets(bat_priv);
823 if (generate_vis_packet(bat_priv) == 0) {
824 /* schedule if generation was successful */
825 send_list_add(bat_priv, bat_priv->my_vis_info);
828 list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
829 send_list) {
831 kref_get(&info->refcount);
832 spin_unlock_bh(&bat_priv->vis_hash_lock);
834 if (bat_priv->primary_if)
835 send_vis_packet(bat_priv, info);
837 spin_lock_bh(&bat_priv->vis_hash_lock);
838 send_list_del(info);
839 kref_put(&info->refcount, free_info);
841 spin_unlock_bh(&bat_priv->vis_hash_lock);
842 start_vis_timer(bat_priv);
845 /* init the vis server. this may only be called when if_list is already
846 * initialized (e.g. bat0 is initialized, interfaces have been added) */
847 int vis_init(struct bat_priv *bat_priv)
849 struct vis_packet *packet;
850 int hash_added;
852 if (bat_priv->vis_hash)
853 return 1;
855 spin_lock_bh(&bat_priv->vis_hash_lock);
857 bat_priv->vis_hash = hash_new(256);
858 if (!bat_priv->vis_hash) {
859 pr_err("Can't initialize vis_hash\n");
860 goto err;
863 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
864 if (!bat_priv->my_vis_info) {
865 pr_err("Can't initialize vis packet\n");
866 goto err;
869 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
870 sizeof(struct vis_packet) +
871 MAX_VIS_PACKET_SIZE +
872 sizeof(struct ethhdr));
873 if (!bat_priv->my_vis_info->skb_packet)
874 goto free_info;
876 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
877 packet = (struct vis_packet *)skb_put(
878 bat_priv->my_vis_info->skb_packet,
879 sizeof(struct vis_packet));
881 /* prefill the vis info */
882 bat_priv->my_vis_info->first_seen = jiffies -
883 msecs_to_jiffies(VIS_INTERVAL);
884 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
885 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
886 kref_init(&bat_priv->my_vis_info->refcount);
887 bat_priv->my_vis_info->bat_priv = bat_priv;
888 packet->version = COMPAT_VERSION;
889 packet->packet_type = BAT_VIS;
890 packet->ttl = TTL;
891 packet->seqno = 0;
892 packet->entries = 0;
894 INIT_LIST_HEAD(&bat_priv->vis_send_list);
896 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
897 bat_priv->my_vis_info);
898 if (hash_added < 0) {
899 pr_err("Can't add own vis packet into hash\n");
900 /* not in hash, need to remove it manually. */
901 kref_put(&bat_priv->my_vis_info->refcount, free_info);
902 goto err;
905 spin_unlock_bh(&bat_priv->vis_hash_lock);
906 start_vis_timer(bat_priv);
907 return 1;
909 free_info:
910 kfree(bat_priv->my_vis_info);
911 bat_priv->my_vis_info = NULL;
912 err:
913 spin_unlock_bh(&bat_priv->vis_hash_lock);
914 vis_quit(bat_priv);
915 return 0;
918 /* Decrease the reference count on a hash item info */
919 static void free_info_ref(void *data, void *arg)
921 struct vis_info *info = data;
923 send_list_del(info);
924 kref_put(&info->refcount, free_info);
927 /* shutdown vis-server */
928 void vis_quit(struct bat_priv *bat_priv)
930 if (!bat_priv->vis_hash)
931 return;
933 cancel_delayed_work_sync(&bat_priv->vis_work);
935 spin_lock_bh(&bat_priv->vis_hash_lock);
936 /* properly remove, kill timers ... */
937 hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
938 bat_priv->vis_hash = NULL;
939 bat_priv->my_vis_info = NULL;
940 spin_unlock_bh(&bat_priv->vis_hash_lock);
943 /* schedule packets for (re)transmission */
944 static void start_vis_timer(struct bat_priv *bat_priv)
946 INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
947 queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
948 msecs_to_jiffies(VIS_INTERVAL));