2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "translation-table.h"
24 #include "soft-interface.h"
26 #include "originator.h"
28 static void hna_local_purge(struct work_struct
*work
);
29 static void _hna_global_del_orig(struct bat_priv
*bat_priv
,
30 struct hna_global_entry
*hna_global_entry
,
33 /* returns 1 if they are the same mac addr */
34 static int compare_lhna(struct hlist_node
*node
, void *data2
)
36 void *data1
= container_of(node
, struct hna_local_entry
, hash_entry
);
38 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
41 /* returns 1 if they are the same mac addr */
42 static int compare_ghna(struct hlist_node
*node
, void *data2
)
44 void *data1
= container_of(node
, struct hna_global_entry
, hash_entry
);
46 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
49 static void hna_local_start_timer(struct bat_priv
*bat_priv
)
51 INIT_DELAYED_WORK(&bat_priv
->hna_work
, hna_local_purge
);
52 queue_delayed_work(bat_event_workqueue
, &bat_priv
->hna_work
, 10 * HZ
);
55 static struct hna_local_entry
*hna_local_hash_find(struct bat_priv
*bat_priv
,
58 struct hashtable_t
*hash
= bat_priv
->hna_local_hash
;
59 struct hlist_head
*head
;
60 struct hlist_node
*node
;
61 struct hna_local_entry
*hna_local_entry
, *hna_local_entry_tmp
= NULL
;
67 index
= choose_orig(data
, hash
->size
);
68 head
= &hash
->table
[index
];
71 hlist_for_each_entry_rcu(hna_local_entry
, node
, head
, hash_entry
) {
72 if (!compare_eth(hna_local_entry
, data
))
75 hna_local_entry_tmp
= hna_local_entry
;
80 return hna_local_entry_tmp
;
83 static struct hna_global_entry
*hna_global_hash_find(struct bat_priv
*bat_priv
,
86 struct hashtable_t
*hash
= bat_priv
->hna_global_hash
;
87 struct hlist_head
*head
;
88 struct hlist_node
*node
;
89 struct hna_global_entry
*hna_global_entry
;
90 struct hna_global_entry
*hna_global_entry_tmp
= NULL
;
96 index
= choose_orig(data
, hash
->size
);
97 head
= &hash
->table
[index
];
100 hlist_for_each_entry_rcu(hna_global_entry
, node
, head
, hash_entry
) {
101 if (!compare_eth(hna_global_entry
, data
))
104 hna_global_entry_tmp
= hna_global_entry
;
109 return hna_global_entry_tmp
;
112 int hna_local_init(struct bat_priv
*bat_priv
)
114 if (bat_priv
->hna_local_hash
)
117 bat_priv
->hna_local_hash
= hash_new(1024);
119 if (!bat_priv
->hna_local_hash
)
122 atomic_set(&bat_priv
->hna_local_changed
, 0);
123 hna_local_start_timer(bat_priv
);
128 void hna_local_add(struct net_device
*soft_iface
, uint8_t *addr
)
130 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
131 struct hna_local_entry
*hna_local_entry
;
132 struct hna_global_entry
*hna_global_entry
;
135 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
136 hna_local_entry
= hna_local_hash_find(bat_priv
, addr
);
137 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
139 if (hna_local_entry
) {
140 hna_local_entry
->last_seen
= jiffies
;
144 /* only announce as many hosts as possible in the batman-packet and
145 space in batman_packet->num_hna That also should give a limit to
147 required_bytes
= (bat_priv
->num_local_hna
+ 1) * ETH_ALEN
;
148 required_bytes
+= BAT_PACKET_LEN
;
150 if ((required_bytes
> ETH_DATA_LEN
) ||
151 (atomic_read(&bat_priv
->aggregated_ogms
) &&
152 required_bytes
> MAX_AGGREGATION_BYTES
) ||
153 (bat_priv
->num_local_hna
+ 1 > 255)) {
154 bat_dbg(DBG_ROUTES
, bat_priv
,
155 "Can't add new local hna entry (%pM): "
156 "number of local hna entries exceeds packet size\n",
161 bat_dbg(DBG_ROUTES
, bat_priv
,
162 "Creating new local hna entry: %pM\n", addr
);
164 hna_local_entry
= kmalloc(sizeof(struct hna_local_entry
), GFP_ATOMIC
);
165 if (!hna_local_entry
)
168 memcpy(hna_local_entry
->addr
, addr
, ETH_ALEN
);
169 hna_local_entry
->last_seen
= jiffies
;
171 /* the batman interface mac address should never be purged */
172 if (compare_eth(addr
, soft_iface
->dev_addr
))
173 hna_local_entry
->never_purge
= 1;
175 hna_local_entry
->never_purge
= 0;
177 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
179 hash_add(bat_priv
->hna_local_hash
, compare_lhna
, choose_orig
,
180 hna_local_entry
, &hna_local_entry
->hash_entry
);
181 bat_priv
->num_local_hna
++;
182 atomic_set(&bat_priv
->hna_local_changed
, 1);
184 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
186 /* remove address from global hash if present */
187 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
189 hna_global_entry
= hna_global_hash_find(bat_priv
, addr
);
191 if (hna_global_entry
)
192 _hna_global_del_orig(bat_priv
, hna_global_entry
,
193 "local hna received");
195 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
198 int hna_local_fill_buffer(struct bat_priv
*bat_priv
,
199 unsigned char *buff
, int buff_len
)
201 struct hashtable_t
*hash
= bat_priv
->hna_local_hash
;
202 struct hna_local_entry
*hna_local_entry
;
203 struct hlist_node
*node
;
204 struct hlist_head
*head
;
207 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
209 for (i
= 0; i
< hash
->size
; i
++) {
210 head
= &hash
->table
[i
];
213 hlist_for_each_entry_rcu(hna_local_entry
, node
,
215 if (buff_len
< (count
+ 1) * ETH_ALEN
)
218 memcpy(buff
+ (count
* ETH_ALEN
), hna_local_entry
->addr
,
226 /* if we did not get all new local hnas see you next time ;-) */
227 if (count
== bat_priv
->num_local_hna
)
228 atomic_set(&bat_priv
->hna_local_changed
, 0);
230 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
234 int hna_local_seq_print_text(struct seq_file
*seq
, void *offset
)
236 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
237 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
238 struct hashtable_t
*hash
= bat_priv
->hna_local_hash
;
239 struct hna_local_entry
*hna_local_entry
;
240 struct hlist_node
*node
;
241 struct hlist_head
*head
;
242 size_t buf_size
, pos
;
246 if (!bat_priv
->primary_if
) {
247 return seq_printf(seq
, "BATMAN mesh %s disabled - "
248 "please specify interfaces to enable it\n",
252 seq_printf(seq
, "Locally retrieved addresses (from %s) "
253 "announced via HNA:\n",
256 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
259 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
260 for (i
= 0; i
< hash
->size
; i
++) {
261 head
= &hash
->table
[i
];
264 __hlist_for_each_rcu(node
, head
)
269 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
271 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
278 for (i
= 0; i
< hash
->size
; i
++) {
279 head
= &hash
->table
[i
];
282 hlist_for_each_entry_rcu(hna_local_entry
, node
,
284 pos
+= snprintf(buff
+ pos
, 22, " * %pM\n",
285 hna_local_entry
->addr
);
290 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
292 seq_printf(seq
, "%s", buff
);
297 static void _hna_local_del(struct hlist_node
*node
, void *arg
)
299 struct bat_priv
*bat_priv
= (struct bat_priv
*)arg
;
300 void *data
= container_of(node
, struct hna_local_entry
, hash_entry
);
303 bat_priv
->num_local_hna
--;
304 atomic_set(&bat_priv
->hna_local_changed
, 1);
307 static void hna_local_del(struct bat_priv
*bat_priv
,
308 struct hna_local_entry
*hna_local_entry
,
311 bat_dbg(DBG_ROUTES
, bat_priv
, "Deleting local hna entry (%pM): %s\n",
312 hna_local_entry
->addr
, message
);
314 hash_remove(bat_priv
->hna_local_hash
, compare_lhna
, choose_orig
,
315 hna_local_entry
->addr
);
316 _hna_local_del(&hna_local_entry
->hash_entry
, bat_priv
);
319 void hna_local_remove(struct bat_priv
*bat_priv
,
320 uint8_t *addr
, char *message
)
322 struct hna_local_entry
*hna_local_entry
;
324 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
326 hna_local_entry
= hna_local_hash_find(bat_priv
, addr
);
329 hna_local_del(bat_priv
, hna_local_entry
, message
);
331 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
334 static void hna_local_purge(struct work_struct
*work
)
336 struct delayed_work
*delayed_work
=
337 container_of(work
, struct delayed_work
, work
);
338 struct bat_priv
*bat_priv
=
339 container_of(delayed_work
, struct bat_priv
, hna_work
);
340 struct hashtable_t
*hash
= bat_priv
->hna_local_hash
;
341 struct hna_local_entry
*hna_local_entry
;
342 struct hlist_node
*node
, *node_tmp
;
343 struct hlist_head
*head
;
344 unsigned long timeout
;
347 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
349 for (i
= 0; i
< hash
->size
; i
++) {
350 head
= &hash
->table
[i
];
352 hlist_for_each_entry_safe(hna_local_entry
, node
, node_tmp
,
354 if (hna_local_entry
->never_purge
)
357 timeout
= hna_local_entry
->last_seen
;
358 timeout
+= LOCAL_HNA_TIMEOUT
* HZ
;
360 if (time_before(jiffies
, timeout
))
363 hna_local_del(bat_priv
, hna_local_entry
,
364 "address timed out");
368 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
369 hna_local_start_timer(bat_priv
);
372 void hna_local_free(struct bat_priv
*bat_priv
)
374 if (!bat_priv
->hna_local_hash
)
377 cancel_delayed_work_sync(&bat_priv
->hna_work
);
378 hash_delete(bat_priv
->hna_local_hash
, _hna_local_del
, bat_priv
);
379 bat_priv
->hna_local_hash
= NULL
;
382 int hna_global_init(struct bat_priv
*bat_priv
)
384 if (bat_priv
->hna_global_hash
)
387 bat_priv
->hna_global_hash
= hash_new(1024);
389 if (!bat_priv
->hna_global_hash
)
395 void hna_global_add_orig(struct bat_priv
*bat_priv
,
396 struct orig_node
*orig_node
,
397 unsigned char *hna_buff
, int hna_buff_len
)
399 struct hna_global_entry
*hna_global_entry
;
400 struct hna_local_entry
*hna_local_entry
;
401 int hna_buff_count
= 0;
402 unsigned char *hna_ptr
;
404 while ((hna_buff_count
+ 1) * ETH_ALEN
<= hna_buff_len
) {
405 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
407 hna_ptr
= hna_buff
+ (hna_buff_count
* ETH_ALEN
);
408 hna_global_entry
= hna_global_hash_find(bat_priv
, hna_ptr
);
410 if (!hna_global_entry
) {
411 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
414 kmalloc(sizeof(struct hna_global_entry
),
417 if (!hna_global_entry
)
420 memcpy(hna_global_entry
->addr
, hna_ptr
, ETH_ALEN
);
422 bat_dbg(DBG_ROUTES
, bat_priv
,
423 "Creating new global hna entry: "
425 hna_global_entry
->addr
, orig_node
->orig
);
427 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
428 hash_add(bat_priv
->hna_global_hash
, compare_ghna
,
429 choose_orig
, hna_global_entry
,
430 &hna_global_entry
->hash_entry
);
434 hna_global_entry
->orig_node
= orig_node
;
435 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
437 /* remove address from local hash if present */
438 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
440 hna_ptr
= hna_buff
+ (hna_buff_count
* ETH_ALEN
);
441 hna_local_entry
= hna_local_hash_find(bat_priv
, hna_ptr
);
444 hna_local_del(bat_priv
, hna_local_entry
,
445 "global hna received");
447 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
452 /* initialize, and overwrite if malloc succeeds */
453 orig_node
->hna_buff
= NULL
;
454 orig_node
->hna_buff_len
= 0;
456 if (hna_buff_len
> 0) {
457 orig_node
->hna_buff
= kmalloc(hna_buff_len
, GFP_ATOMIC
);
458 if (orig_node
->hna_buff
) {
459 memcpy(orig_node
->hna_buff
, hna_buff
, hna_buff_len
);
460 orig_node
->hna_buff_len
= hna_buff_len
;
465 int hna_global_seq_print_text(struct seq_file
*seq
, void *offset
)
467 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
468 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
469 struct hashtable_t
*hash
= bat_priv
->hna_global_hash
;
470 struct hna_global_entry
*hna_global_entry
;
471 struct hlist_node
*node
;
472 struct hlist_head
*head
;
473 size_t buf_size
, pos
;
477 if (!bat_priv
->primary_if
) {
478 return seq_printf(seq
, "BATMAN mesh %s disabled - "
479 "please specify interfaces to enable it\n",
483 seq_printf(seq
, "Globally announced HNAs received via the mesh %s\n",
486 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
489 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
490 for (i
= 0; i
< hash
->size
; i
++) {
491 head
= &hash
->table
[i
];
494 __hlist_for_each_rcu(node
, head
)
499 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
501 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
507 for (i
= 0; i
< hash
->size
; i
++) {
508 head
= &hash
->table
[i
];
511 hlist_for_each_entry_rcu(hna_global_entry
, node
,
513 pos
+= snprintf(buff
+ pos
, 44,
515 hna_global_entry
->addr
,
516 hna_global_entry
->orig_node
->orig
);
521 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
523 seq_printf(seq
, "%s", buff
);
528 static void _hna_global_del_orig(struct bat_priv
*bat_priv
,
529 struct hna_global_entry
*hna_global_entry
,
532 bat_dbg(DBG_ROUTES
, bat_priv
,
533 "Deleting global hna entry %pM (via %pM): %s\n",
534 hna_global_entry
->addr
, hna_global_entry
->orig_node
->orig
,
537 hash_remove(bat_priv
->hna_global_hash
, compare_ghna
, choose_orig
,
538 hna_global_entry
->addr
);
539 kfree(hna_global_entry
);
542 void hna_global_del_orig(struct bat_priv
*bat_priv
,
543 struct orig_node
*orig_node
, char *message
)
545 struct hna_global_entry
*hna_global_entry
;
546 int hna_buff_count
= 0;
547 unsigned char *hna_ptr
;
549 if (orig_node
->hna_buff_len
== 0)
552 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
554 while ((hna_buff_count
+ 1) * ETH_ALEN
<= orig_node
->hna_buff_len
) {
555 hna_ptr
= orig_node
->hna_buff
+ (hna_buff_count
* ETH_ALEN
);
556 hna_global_entry
= hna_global_hash_find(bat_priv
, hna_ptr
);
558 if ((hna_global_entry
) &&
559 (hna_global_entry
->orig_node
== orig_node
))
560 _hna_global_del_orig(bat_priv
, hna_global_entry
,
566 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
568 orig_node
->hna_buff_len
= 0;
569 kfree(orig_node
->hna_buff
);
570 orig_node
->hna_buff
= NULL
;
573 static void hna_global_del(struct hlist_node
*node
, void *arg
)
575 void *data
= container_of(node
, struct hna_global_entry
, hash_entry
);
580 void hna_global_free(struct bat_priv
*bat_priv
)
582 if (!bat_priv
->hna_global_hash
)
585 hash_delete(bat_priv
->hna_global_hash
, hna_global_del
, NULL
);
586 bat_priv
->hna_global_hash
= NULL
;
589 struct orig_node
*transtable_search(struct bat_priv
*bat_priv
, uint8_t *addr
)
591 struct hna_global_entry
*hna_global_entry
;
592 struct orig_node
*orig_node
= NULL
;
594 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
595 hna_global_entry
= hna_global_hash_find(bat_priv
, addr
);
597 if (!hna_global_entry
)
600 if (!atomic_inc_not_zero(&hna_global_entry
->orig_node
->refcount
))
603 orig_node
= hna_global_entry
->orig_node
;
606 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);