2 * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "translation-table.h"
24 #include "soft-interface.h"
27 #include "originator.h"
29 static void hna_local_purge(struct work_struct
*work
);
30 static void _hna_global_del_orig(struct bat_priv
*bat_priv
,
31 struct hna_global_entry
*hna_global_entry
,
34 static void hna_local_start_timer(struct bat_priv
*bat_priv
)
36 INIT_DELAYED_WORK(&bat_priv
->hna_work
, hna_local_purge
);
37 queue_delayed_work(bat_event_workqueue
, &bat_priv
->hna_work
, 10 * HZ
);
40 int hna_local_init(struct bat_priv
*bat_priv
)
42 if (bat_priv
->hna_local_hash
)
45 bat_priv
->hna_local_hash
= hash_new(1024);
47 if (!bat_priv
->hna_local_hash
)
50 atomic_set(&bat_priv
->hna_local_changed
, 0);
51 hna_local_start_timer(bat_priv
);
56 void hna_local_add(struct net_device
*soft_iface
, uint8_t *addr
)
58 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
59 struct hna_local_entry
*hna_local_entry
;
60 struct hna_global_entry
*hna_global_entry
;
63 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
65 ((struct hna_local_entry
*)hash_find(bat_priv
->hna_local_hash
,
66 compare_orig
, choose_orig
,
68 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
70 if (hna_local_entry
) {
71 hna_local_entry
->last_seen
= jiffies
;
75 /* only announce as many hosts as possible in the batman-packet and
76 space in batman_packet->num_hna That also should give a limit to
78 required_bytes
= (bat_priv
->num_local_hna
+ 1) * ETH_ALEN
;
79 required_bytes
+= BAT_PACKET_LEN
;
81 if ((required_bytes
> ETH_DATA_LEN
) ||
82 (atomic_read(&bat_priv
->aggregated_ogms
) &&
83 required_bytes
> MAX_AGGREGATION_BYTES
) ||
84 (bat_priv
->num_local_hna
+ 1 > 255)) {
85 bat_dbg(DBG_ROUTES
, bat_priv
,
86 "Can't add new local hna entry (%pM): "
87 "number of local hna entries exceeds packet size\n",
92 bat_dbg(DBG_ROUTES
, bat_priv
,
93 "Creating new local hna entry: %pM\n", addr
);
95 hna_local_entry
= kmalloc(sizeof(struct hna_local_entry
), GFP_ATOMIC
);
99 memcpy(hna_local_entry
->addr
, addr
, ETH_ALEN
);
100 hna_local_entry
->last_seen
= jiffies
;
102 /* the batman interface mac address should never be purged */
103 if (compare_orig(addr
, soft_iface
->dev_addr
))
104 hna_local_entry
->never_purge
= 1;
106 hna_local_entry
->never_purge
= 0;
108 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
110 hash_add(bat_priv
->hna_local_hash
, compare_orig
, choose_orig
,
112 bat_priv
->num_local_hna
++;
113 atomic_set(&bat_priv
->hna_local_changed
, 1);
115 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
117 /* remove address from global hash if present */
118 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
120 hna_global_entry
= ((struct hna_global_entry
*)
121 hash_find(bat_priv
->hna_global_hash
,
122 compare_orig
, choose_orig
, addr
));
124 if (hna_global_entry
)
125 _hna_global_del_orig(bat_priv
, hna_global_entry
,
126 "local hna received");
128 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
131 int hna_local_fill_buffer(struct bat_priv
*bat_priv
,
132 unsigned char *buff
, int buff_len
)
134 struct hashtable_t
*hash
= bat_priv
->hna_local_hash
;
135 struct hna_local_entry
*hna_local_entry
;
136 struct element_t
*bucket
;
138 struct hlist_node
*walk
;
139 struct hlist_head
*head
;
142 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
144 for (i
= 0; i
< hash
->size
; i
++) {
145 head
= &hash
->table
[i
];
147 hlist_for_each_entry(bucket
, walk
, head
, hlist
) {
149 if (buff_len
< (count
+ 1) * ETH_ALEN
)
152 hna_local_entry
= bucket
->data
;
153 memcpy(buff
+ (count
* ETH_ALEN
), hna_local_entry
->addr
,
160 /* if we did not get all new local hnas see you next time ;-) */
161 if (count
== bat_priv
->num_local_hna
)
162 atomic_set(&bat_priv
->hna_local_changed
, 0);
164 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
168 int hna_local_seq_print_text(struct seq_file
*seq
, void *offset
)
170 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
171 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
172 struct hashtable_t
*hash
= bat_priv
->hna_local_hash
;
173 struct hna_local_entry
*hna_local_entry
;
175 struct hlist_node
*walk
;
176 struct hlist_head
*head
;
177 struct element_t
*bucket
;
178 size_t buf_size
, pos
;
181 if (!bat_priv
->primary_if
) {
182 return seq_printf(seq
, "BATMAN mesh %s disabled - "
183 "please specify interfaces to enable it\n",
187 seq_printf(seq
, "Locally retrieved addresses (from %s) "
188 "announced via HNA:\n",
191 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
194 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
195 for (i
= 0; i
< hash
->size
; i
++) {
196 head
= &hash
->table
[i
];
198 hlist_for_each(walk
, head
)
202 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
204 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
210 for (i
= 0; i
< hash
->size
; i
++) {
211 head
= &hash
->table
[i
];
213 hlist_for_each_entry(bucket
, walk
, head
, hlist
) {
214 hna_local_entry
= bucket
->data
;
216 pos
+= snprintf(buff
+ pos
, 22, " * %pM\n",
217 hna_local_entry
->addr
);
221 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
223 seq_printf(seq
, "%s", buff
);
228 static void _hna_local_del(void *data
, void *arg
)
230 struct bat_priv
*bat_priv
= (struct bat_priv
*)arg
;
233 bat_priv
->num_local_hna
--;
234 atomic_set(&bat_priv
->hna_local_changed
, 1);
237 static void hna_local_del(struct bat_priv
*bat_priv
,
238 struct hna_local_entry
*hna_local_entry
,
241 bat_dbg(DBG_ROUTES
, bat_priv
, "Deleting local hna entry (%pM): %s\n",
242 hna_local_entry
->addr
, message
);
244 hash_remove(bat_priv
->hna_local_hash
, compare_orig
, choose_orig
,
245 hna_local_entry
->addr
);
246 _hna_local_del(hna_local_entry
, bat_priv
);
249 void hna_local_remove(struct bat_priv
*bat_priv
,
250 uint8_t *addr
, char *message
)
252 struct hna_local_entry
*hna_local_entry
;
254 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
256 hna_local_entry
= (struct hna_local_entry
*)
257 hash_find(bat_priv
->hna_local_hash
, compare_orig
, choose_orig
,
261 hna_local_del(bat_priv
, hna_local_entry
, message
);
263 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
266 static void hna_local_purge(struct work_struct
*work
)
268 struct delayed_work
*delayed_work
=
269 container_of(work
, struct delayed_work
, work
);
270 struct bat_priv
*bat_priv
=
271 container_of(delayed_work
, struct bat_priv
, hna_work
);
272 struct hashtable_t
*hash
= bat_priv
->hna_local_hash
;
273 struct hna_local_entry
*hna_local_entry
;
275 struct hlist_node
*walk
, *safe
;
276 struct hlist_head
*head
;
277 struct element_t
*bucket
;
278 unsigned long timeout
;
280 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
282 for (i
= 0; i
< hash
->size
; i
++) {
283 head
= &hash
->table
[i
];
285 hlist_for_each_entry_safe(bucket
, walk
, safe
, head
, hlist
) {
286 hna_local_entry
= bucket
->data
;
288 timeout
= hna_local_entry
->last_seen
;
289 timeout
+= LOCAL_HNA_TIMEOUT
* HZ
;
291 if ((!hna_local_entry
->never_purge
) &&
292 time_after(jiffies
, timeout
))
293 hna_local_del(bat_priv
, hna_local_entry
,
294 "address timed out");
298 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
299 hna_local_start_timer(bat_priv
);
302 void hna_local_free(struct bat_priv
*bat_priv
)
304 if (!bat_priv
->hna_local_hash
)
307 cancel_delayed_work_sync(&bat_priv
->hna_work
);
308 hash_delete(bat_priv
->hna_local_hash
, _hna_local_del
, bat_priv
);
309 bat_priv
->hna_local_hash
= NULL
;
312 int hna_global_init(struct bat_priv
*bat_priv
)
314 if (bat_priv
->hna_global_hash
)
317 bat_priv
->hna_global_hash
= hash_new(1024);
319 if (!bat_priv
->hna_global_hash
)
325 void hna_global_add_orig(struct bat_priv
*bat_priv
,
326 struct orig_node
*orig_node
,
327 unsigned char *hna_buff
, int hna_buff_len
)
329 struct hna_global_entry
*hna_global_entry
;
330 struct hna_local_entry
*hna_local_entry
;
331 int hna_buff_count
= 0;
332 unsigned char *hna_ptr
;
334 while ((hna_buff_count
+ 1) * ETH_ALEN
<= hna_buff_len
) {
335 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
337 hna_ptr
= hna_buff
+ (hna_buff_count
* ETH_ALEN
);
338 hna_global_entry
= (struct hna_global_entry
*)
339 hash_find(bat_priv
->hna_global_hash
, compare_orig
,
340 choose_orig
, hna_ptr
);
342 if (!hna_global_entry
) {
343 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
346 kmalloc(sizeof(struct hna_global_entry
),
349 if (!hna_global_entry
)
352 memcpy(hna_global_entry
->addr
, hna_ptr
, ETH_ALEN
);
354 bat_dbg(DBG_ROUTES
, bat_priv
,
355 "Creating new global hna entry: "
357 hna_global_entry
->addr
, orig_node
->orig
);
359 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
360 hash_add(bat_priv
->hna_global_hash
, compare_orig
,
361 choose_orig
, hna_global_entry
);
365 hna_global_entry
->orig_node
= orig_node
;
366 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
368 /* remove address from local hash if present */
369 spin_lock_bh(&bat_priv
->hna_lhash_lock
);
371 hna_ptr
= hna_buff
+ (hna_buff_count
* ETH_ALEN
);
372 hna_local_entry
= (struct hna_local_entry
*)
373 hash_find(bat_priv
->hna_local_hash
, compare_orig
,
374 choose_orig
, hna_ptr
);
377 hna_local_del(bat_priv
, hna_local_entry
,
378 "global hna received");
380 spin_unlock_bh(&bat_priv
->hna_lhash_lock
);
385 /* initialize, and overwrite if malloc succeeds */
386 orig_node
->hna_buff
= NULL
;
387 orig_node
->hna_buff_len
= 0;
389 if (hna_buff_len
> 0) {
390 orig_node
->hna_buff
= kmalloc(hna_buff_len
, GFP_ATOMIC
);
391 if (orig_node
->hna_buff
) {
392 memcpy(orig_node
->hna_buff
, hna_buff
, hna_buff_len
);
393 orig_node
->hna_buff_len
= hna_buff_len
;
398 int hna_global_seq_print_text(struct seq_file
*seq
, void *offset
)
400 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
401 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
402 struct hashtable_t
*hash
= bat_priv
->hna_global_hash
;
403 struct hna_global_entry
*hna_global_entry
;
405 struct hlist_node
*walk
;
406 struct hlist_head
*head
;
407 struct element_t
*bucket
;
408 size_t buf_size
, pos
;
411 if (!bat_priv
->primary_if
) {
412 return seq_printf(seq
, "BATMAN mesh %s disabled - "
413 "please specify interfaces to enable it\n",
417 seq_printf(seq
, "Globally announced HNAs received via the mesh %s\n",
420 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
423 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
424 for (i
= 0; i
< hash
->size
; i
++) {
425 head
= &hash
->table
[i
];
427 hlist_for_each(walk
, head
)
431 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
433 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
439 for (i
= 0; i
< hash
->size
; i
++) {
440 head
= &hash
->table
[i
];
442 hlist_for_each_entry(bucket
, walk
, head
, hlist
) {
443 hna_global_entry
= bucket
->data
;
445 pos
+= snprintf(buff
+ pos
, 44,
447 hna_global_entry
->addr
,
448 hna_global_entry
->orig_node
->orig
);
452 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
454 seq_printf(seq
, "%s", buff
);
459 static void _hna_global_del_orig(struct bat_priv
*bat_priv
,
460 struct hna_global_entry
*hna_global_entry
,
463 bat_dbg(DBG_ROUTES
, bat_priv
,
464 "Deleting global hna entry %pM (via %pM): %s\n",
465 hna_global_entry
->addr
, hna_global_entry
->orig_node
->orig
,
468 hash_remove(bat_priv
->hna_global_hash
, compare_orig
, choose_orig
,
469 hna_global_entry
->addr
);
470 kfree(hna_global_entry
);
473 void hna_global_del_orig(struct bat_priv
*bat_priv
,
474 struct orig_node
*orig_node
, char *message
)
476 struct hna_global_entry
*hna_global_entry
;
477 int hna_buff_count
= 0;
478 unsigned char *hna_ptr
;
480 if (orig_node
->hna_buff_len
== 0)
483 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
485 while ((hna_buff_count
+ 1) * ETH_ALEN
<= orig_node
->hna_buff_len
) {
486 hna_ptr
= orig_node
->hna_buff
+ (hna_buff_count
* ETH_ALEN
);
487 hna_global_entry
= (struct hna_global_entry
*)
488 hash_find(bat_priv
->hna_global_hash
, compare_orig
,
489 choose_orig
, hna_ptr
);
491 if ((hna_global_entry
) &&
492 (hna_global_entry
->orig_node
== orig_node
))
493 _hna_global_del_orig(bat_priv
, hna_global_entry
,
499 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
501 orig_node
->hna_buff_len
= 0;
502 kfree(orig_node
->hna_buff
);
503 orig_node
->hna_buff
= NULL
;
506 static void hna_global_del(void *data
, void *arg
)
511 void hna_global_free(struct bat_priv
*bat_priv
)
513 if (!bat_priv
->hna_global_hash
)
516 hash_delete(bat_priv
->hna_global_hash
, hna_global_del
, NULL
);
517 bat_priv
->hna_global_hash
= NULL
;
520 struct orig_node
*transtable_search(struct bat_priv
*bat_priv
, uint8_t *addr
)
522 struct hna_global_entry
*hna_global_entry
;
524 spin_lock_bh(&bat_priv
->hna_ghash_lock
);
525 hna_global_entry
= (struct hna_global_entry
*)
526 hash_find(bat_priv
->hna_global_hash
,
527 compare_orig
, choose_orig
, addr
);
528 spin_unlock_bh(&bat_priv
->hna_ghash_lock
);
530 if (!hna_global_entry
)
533 return hna_global_entry
->orig_node
;