serial: fix missing bit coverage of ASYNC_FLAGS
[linux-2.6/libata-dev.git] / drivers / staging / batman-adv / translation-table.c
blobb233377d756808bf15398ad13ff7560b2589d51d
1 /*
2 * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "types.h"
26 #include "hash.h"
28 struct hashtable_t *hna_local_hash;
29 static struct hashtable_t *hna_global_hash;
30 atomic_t hna_local_changed;
32 DEFINE_SPINLOCK(hna_local_hash_lock);
33 static DEFINE_SPINLOCK(hna_global_hash_lock);
35 static void hna_local_purge(struct work_struct *work);
36 static DECLARE_DELAYED_WORK(hna_local_purge_wq, hna_local_purge);
37 static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
38 char *message);
40 static void hna_local_start_timer(void)
42 queue_delayed_work(bat_event_workqueue, &hna_local_purge_wq, 10 * HZ);
45 int hna_local_init(void)
47 if (hna_local_hash)
48 return 1;
50 hna_local_hash = hash_new(128, compare_orig, choose_orig);
52 if (!hna_local_hash)
53 return 0;
55 atomic_set(&hna_local_changed, 0);
56 hna_local_start_timer();
58 return 1;
61 void hna_local_add(uint8_t *addr)
63 /* FIXME: each orig_node->batman_if will be attached to a softif */
64 struct bat_priv *bat_priv = netdev_priv(soft_device);
65 struct hna_local_entry *hna_local_entry;
66 struct hna_global_entry *hna_global_entry;
67 struct hashtable_t *swaphash;
68 unsigned long flags;
70 spin_lock_irqsave(&hna_local_hash_lock, flags);
71 hna_local_entry =
72 ((struct hna_local_entry *)hash_find(hna_local_hash, addr));
73 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
75 if (hna_local_entry != NULL) {
76 hna_local_entry->last_seen = jiffies;
77 return;
80 /* only announce as many hosts as possible in the batman-packet and
81 space in batman_packet->num_hna That also should give a limit to
82 MAC-flooding. */
83 if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
84 (num_hna + 1 > 255)) {
85 bat_dbg(DBG_ROUTES, bat_priv,
86 "Can't add new local hna entry (%pM): "
87 "number of local hna entries exceeds packet size\n",
88 addr);
89 return;
92 bat_dbg(DBG_ROUTES, bat_priv,
93 "Creating new local hna entry: %pM\n", addr);
95 hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
96 if (!hna_local_entry)
97 return;
99 memcpy(hna_local_entry->addr, addr, ETH_ALEN);
100 hna_local_entry->last_seen = jiffies;
102 /* the batman interface mac address should never be purged */
103 if (compare_orig(addr, soft_device->dev_addr))
104 hna_local_entry->never_purge = 1;
105 else
106 hna_local_entry->never_purge = 0;
108 spin_lock_irqsave(&hna_local_hash_lock, flags);
110 hash_add(hna_local_hash, hna_local_entry);
111 num_hna++;
112 atomic_set(&hna_local_changed, 1);
114 if (hna_local_hash->elements * 4 > hna_local_hash->size) {
115 swaphash = hash_resize(hna_local_hash,
116 hna_local_hash->size * 2);
118 if (swaphash == NULL)
119 pr_err("Couldn't resize local hna hash table\n");
120 else
121 hna_local_hash = swaphash;
124 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
126 /* remove address from global hash if present */
127 spin_lock_irqsave(&hna_global_hash_lock, flags);
129 hna_global_entry =
130 ((struct hna_global_entry *)hash_find(hna_global_hash, addr));
132 if (hna_global_entry != NULL)
133 _hna_global_del_orig(hna_global_entry, "local hna received");
135 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
138 int hna_local_fill_buffer(unsigned char *buff, int buff_len)
140 struct hna_local_entry *hna_local_entry;
141 HASHIT(hashit);
142 int i = 0;
143 unsigned long flags;
145 spin_lock_irqsave(&hna_local_hash_lock, flags);
147 while (hash_iterate(hna_local_hash, &hashit)) {
149 if (buff_len < (i + 1) * ETH_ALEN)
150 break;
152 hna_local_entry = hashit.bucket->data;
153 memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN);
155 i++;
158 /* if we did not get all new local hnas see you next time ;-) */
159 if (i == num_hna)
160 atomic_set(&hna_local_changed, 0);
162 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
164 return i;
167 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
169 struct net_device *net_dev = (struct net_device *)seq->private;
170 struct bat_priv *bat_priv = netdev_priv(net_dev);
171 struct hna_local_entry *hna_local_entry;
172 HASHIT(hashit);
173 HASHIT(hashit_count);
174 unsigned long flags;
175 size_t buf_size, pos;
176 char *buff;
178 if (!bat_priv->primary_if) {
179 return seq_printf(seq, "BATMAN mesh %s disabled - "
180 "please specify interfaces to enable it\n",
181 net_dev->name);
184 seq_printf(seq, "Locally retrieved addresses (from %s) "
185 "announced via HNA:\n",
186 net_dev->name);
188 spin_lock_irqsave(&hna_local_hash_lock, flags);
190 buf_size = 1;
191 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
192 while (hash_iterate(hna_local_hash, &hashit_count))
193 buf_size += 21;
195 buff = kmalloc(buf_size, GFP_ATOMIC);
196 if (!buff) {
197 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
198 return -ENOMEM;
200 buff[0] = '\0';
201 pos = 0;
203 while (hash_iterate(hna_local_hash, &hashit)) {
204 hna_local_entry = hashit.bucket->data;
206 pos += snprintf(buff + pos, 22, " * %pM\n",
207 hna_local_entry->addr);
210 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
212 seq_printf(seq, "%s", buff);
213 kfree(buff);
214 return 0;
217 static void _hna_local_del(void *data)
219 kfree(data);
220 num_hna--;
221 atomic_set(&hna_local_changed, 1);
224 static void hna_local_del(struct hna_local_entry *hna_local_entry,
225 char *message)
227 /* FIXME: each orig_node->batman_if will be attached to a softif */
228 struct bat_priv *bat_priv = netdev_priv(soft_device);
229 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
230 hna_local_entry->addr, message);
232 hash_remove(hna_local_hash, hna_local_entry->addr);
233 _hna_local_del(hna_local_entry);
236 void hna_local_remove(uint8_t *addr, char *message)
238 struct hna_local_entry *hna_local_entry;
239 unsigned long flags;
241 spin_lock_irqsave(&hna_local_hash_lock, flags);
243 hna_local_entry = (struct hna_local_entry *)
244 hash_find(hna_local_hash, addr);
245 if (hna_local_entry)
246 hna_local_del(hna_local_entry, message);
248 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
251 static void hna_local_purge(struct work_struct *work)
253 struct hna_local_entry *hna_local_entry;
254 HASHIT(hashit);
255 unsigned long flags;
256 unsigned long timeout;
258 spin_lock_irqsave(&hna_local_hash_lock, flags);
260 while (hash_iterate(hna_local_hash, &hashit)) {
261 hna_local_entry = hashit.bucket->data;
263 timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
264 if ((!hna_local_entry->never_purge) &&
265 time_after(jiffies, timeout))
266 hna_local_del(hna_local_entry, "address timed out");
269 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
270 hna_local_start_timer();
273 void hna_local_free(void)
275 if (!hna_local_hash)
276 return;
278 cancel_delayed_work_sync(&hna_local_purge_wq);
279 hash_delete(hna_local_hash, _hna_local_del);
280 hna_local_hash = NULL;
283 int hna_global_init(void)
285 if (hna_global_hash)
286 return 1;
288 hna_global_hash = hash_new(128, compare_orig, choose_orig);
290 if (!hna_global_hash)
291 return 0;
293 return 1;
296 void hna_global_add_orig(struct orig_node *orig_node,
297 unsigned char *hna_buff, int hna_buff_len)
299 /* FIXME: each orig_node->batman_if will be attached to a softif */
300 struct bat_priv *bat_priv = netdev_priv(soft_device);
301 struct hna_global_entry *hna_global_entry;
302 struct hna_local_entry *hna_local_entry;
303 struct hashtable_t *swaphash;
304 int hna_buff_count = 0;
305 unsigned long flags;
306 unsigned char *hna_ptr;
308 while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
309 spin_lock_irqsave(&hna_global_hash_lock, flags);
311 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
312 hna_global_entry = (struct hna_global_entry *)
313 hash_find(hna_global_hash, hna_ptr);
315 if (hna_global_entry == NULL) {
316 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
318 hna_global_entry =
319 kmalloc(sizeof(struct hna_global_entry),
320 GFP_ATOMIC);
322 if (!hna_global_entry)
323 break;
325 memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
327 bat_dbg(DBG_ROUTES, bat_priv,
328 "Creating new global hna entry: "
329 "%pM (via %pM)\n",
330 hna_global_entry->addr, orig_node->orig);
332 spin_lock_irqsave(&hna_global_hash_lock, flags);
333 hash_add(hna_global_hash, hna_global_entry);
337 hna_global_entry->orig_node = orig_node;
338 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
340 /* remove address from local hash if present */
341 spin_lock_irqsave(&hna_local_hash_lock, flags);
343 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
344 hna_local_entry = (struct hna_local_entry *)
345 hash_find(hna_local_hash, hna_ptr);
347 if (hna_local_entry != NULL)
348 hna_local_del(hna_local_entry, "global hna received");
350 spin_unlock_irqrestore(&hna_local_hash_lock, flags);
352 hna_buff_count++;
355 /* initialize, and overwrite if malloc succeeds */
356 orig_node->hna_buff = NULL;
357 orig_node->hna_buff_len = 0;
359 if (hna_buff_len > 0) {
360 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
361 if (orig_node->hna_buff) {
362 memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
363 orig_node->hna_buff_len = hna_buff_len;
367 spin_lock_irqsave(&hna_global_hash_lock, flags);
369 if (hna_global_hash->elements * 4 > hna_global_hash->size) {
370 swaphash = hash_resize(hna_global_hash,
371 hna_global_hash->size * 2);
373 if (swaphash == NULL)
374 pr_err("Couldn't resize global hna hash table\n");
375 else
376 hna_global_hash = swaphash;
379 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
382 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
384 struct net_device *net_dev = (struct net_device *)seq->private;
385 struct bat_priv *bat_priv = netdev_priv(net_dev);
386 struct hna_global_entry *hna_global_entry;
387 HASHIT(hashit);
388 HASHIT(hashit_count);
389 unsigned long flags;
390 size_t buf_size, pos;
391 char *buff;
393 if (!bat_priv->primary_if) {
394 return seq_printf(seq, "BATMAN mesh %s disabled - "
395 "please specify interfaces to enable it\n",
396 net_dev->name);
399 seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
400 net_dev->name);
402 spin_lock_irqsave(&hna_global_hash_lock, flags);
404 buf_size = 1;
405 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
406 while (hash_iterate(hna_global_hash, &hashit_count))
407 buf_size += 43;
409 buff = kmalloc(buf_size, GFP_ATOMIC);
410 if (!buff) {
411 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
412 return -ENOMEM;
414 buff[0] = '\0';
415 pos = 0;
417 while (hash_iterate(hna_global_hash, &hashit)) {
418 hna_global_entry = hashit.bucket->data;
420 pos += snprintf(buff + pos, 44,
421 " * %pM via %pM\n", hna_global_entry->addr,
422 hna_global_entry->orig_node->orig);
425 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
427 seq_printf(seq, "%s", buff);
428 kfree(buff);
429 return 0;
432 static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
433 char *message)
435 /* FIXME: each orig_node->batman_if will be attached to a softif */
436 struct bat_priv *bat_priv = netdev_priv(soft_device);
437 bat_dbg(DBG_ROUTES, bat_priv,
438 "Deleting global hna entry %pM (via %pM): %s\n",
439 hna_global_entry->addr, hna_global_entry->orig_node->orig,
440 message);
442 hash_remove(hna_global_hash, hna_global_entry->addr);
443 kfree(hna_global_entry);
446 void hna_global_del_orig(struct orig_node *orig_node, char *message)
448 struct hna_global_entry *hna_global_entry;
449 int hna_buff_count = 0;
450 unsigned long flags;
451 unsigned char *hna_ptr;
453 if (orig_node->hna_buff_len == 0)
454 return;
456 spin_lock_irqsave(&hna_global_hash_lock, flags);
458 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
459 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
460 hna_global_entry = (struct hna_global_entry *)
461 hash_find(hna_global_hash, hna_ptr);
463 if ((hna_global_entry != NULL) &&
464 (hna_global_entry->orig_node == orig_node))
465 _hna_global_del_orig(hna_global_entry, message);
467 hna_buff_count++;
470 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
472 orig_node->hna_buff_len = 0;
473 kfree(orig_node->hna_buff);
474 orig_node->hna_buff = NULL;
477 static void hna_global_del(void *data)
479 kfree(data);
482 void hna_global_free(void)
484 if (!hna_global_hash)
485 return;
487 hash_delete(hna_global_hash, hna_global_del);
488 hna_global_hash = NULL;
491 struct orig_node *transtable_search(uint8_t *addr)
493 struct hna_global_entry *hna_global_entry;
494 unsigned long flags;
496 spin_lock_irqsave(&hna_global_hash_lock, flags);
497 hna_global_entry = (struct hna_global_entry *)
498 hash_find(hna_global_hash, addr);
499 spin_unlock_irqrestore(&hna_global_hash_lock, flags);
501 if (hna_global_entry == NULL)
502 return NULL;
504 return hna_global_entry->orig_node;