eCryptfs: Fix new inode race condition
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / batman-adv / translation-table.c
blob7b729660cbfdd6ecea373822995621fa0daff3ba
1 /*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "hash.h"
27 #include "originator.h"
29 static void tt_local_purge(struct work_struct *work);
30 static void _tt_global_del_orig(struct bat_priv *bat_priv,
31 struct tt_global_entry *tt_global_entry,
32 char *message);
34 /* returns 1 if they are the same mac addr */
35 static int compare_ltt(struct hlist_node *node, void *data2)
37 void *data1 = container_of(node, struct tt_local_entry, hash_entry);
39 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
42 /* returns 1 if they are the same mac addr */
43 static int compare_gtt(struct hlist_node *node, void *data2)
45 void *data1 = container_of(node, struct tt_global_entry, hash_entry);
47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
50 static void tt_local_start_timer(struct bat_priv *bat_priv)
52 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
53 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
56 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
57 void *data)
59 struct hashtable_t *hash = bat_priv->tt_local_hash;
60 struct hlist_head *head;
61 struct hlist_node *node;
62 struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
63 int index;
65 if (!hash)
66 return NULL;
68 index = choose_orig(data, hash->size);
69 head = &hash->table[index];
71 rcu_read_lock();
72 hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
73 if (!compare_eth(tt_local_entry, data))
74 continue;
76 tt_local_entry_tmp = tt_local_entry;
77 break;
79 rcu_read_unlock();
81 return tt_local_entry_tmp;
84 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
85 void *data)
87 struct hashtable_t *hash = bat_priv->tt_global_hash;
88 struct hlist_head *head;
89 struct hlist_node *node;
90 struct tt_global_entry *tt_global_entry;
91 struct tt_global_entry *tt_global_entry_tmp = NULL;
92 int index;
94 if (!hash)
95 return NULL;
97 index = choose_orig(data, hash->size);
98 head = &hash->table[index];
100 rcu_read_lock();
101 hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
102 if (!compare_eth(tt_global_entry, data))
103 continue;
105 tt_global_entry_tmp = tt_global_entry;
106 break;
108 rcu_read_unlock();
110 return tt_global_entry_tmp;
113 int tt_local_init(struct bat_priv *bat_priv)
115 if (bat_priv->tt_local_hash)
116 return 1;
118 bat_priv->tt_local_hash = hash_new(1024);
120 if (!bat_priv->tt_local_hash)
121 return 0;
123 atomic_set(&bat_priv->tt_local_changed, 0);
124 tt_local_start_timer(bat_priv);
126 return 1;
129 void tt_local_add(struct net_device *soft_iface, uint8_t *addr)
131 struct bat_priv *bat_priv = netdev_priv(soft_iface);
132 struct tt_local_entry *tt_local_entry;
133 struct tt_global_entry *tt_global_entry;
134 int required_bytes;
136 spin_lock_bh(&bat_priv->tt_lhash_lock);
137 tt_local_entry = tt_local_hash_find(bat_priv, addr);
138 spin_unlock_bh(&bat_priv->tt_lhash_lock);
140 if (tt_local_entry) {
141 tt_local_entry->last_seen = jiffies;
142 return;
145 /* only announce as many hosts as possible in the batman-packet and
146 space in batman_packet->num_tt That also should give a limit to
147 MAC-flooding. */
148 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
149 required_bytes += BAT_PACKET_LEN;
151 if ((required_bytes > ETH_DATA_LEN) ||
152 (atomic_read(&bat_priv->aggregated_ogms) &&
153 required_bytes > MAX_AGGREGATION_BYTES) ||
154 (bat_priv->num_local_tt + 1 > 255)) {
155 bat_dbg(DBG_ROUTES, bat_priv,
156 "Can't add new local tt entry (%pM): "
157 "number of local tt entries exceeds packet size\n",
158 addr);
159 return;
162 bat_dbg(DBG_ROUTES, bat_priv,
163 "Creating new local tt entry: %pM\n", addr);
165 tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
166 if (!tt_local_entry)
167 return;
169 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
170 tt_local_entry->last_seen = jiffies;
172 /* the batman interface mac address should never be purged */
173 if (compare_eth(addr, soft_iface->dev_addr))
174 tt_local_entry->never_purge = 1;
175 else
176 tt_local_entry->never_purge = 0;
178 spin_lock_bh(&bat_priv->tt_lhash_lock);
180 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
181 tt_local_entry, &tt_local_entry->hash_entry);
182 bat_priv->num_local_tt++;
183 atomic_set(&bat_priv->tt_local_changed, 1);
185 spin_unlock_bh(&bat_priv->tt_lhash_lock);
187 /* remove address from global hash if present */
188 spin_lock_bh(&bat_priv->tt_ghash_lock);
190 tt_global_entry = tt_global_hash_find(bat_priv, addr);
192 if (tt_global_entry)
193 _tt_global_del_orig(bat_priv, tt_global_entry,
194 "local tt received");
196 spin_unlock_bh(&bat_priv->tt_ghash_lock);
199 int tt_local_fill_buffer(struct bat_priv *bat_priv,
200 unsigned char *buff, int buff_len)
202 struct hashtable_t *hash = bat_priv->tt_local_hash;
203 struct tt_local_entry *tt_local_entry;
204 struct hlist_node *node;
205 struct hlist_head *head;
206 int i, count = 0;
208 spin_lock_bh(&bat_priv->tt_lhash_lock);
210 for (i = 0; i < hash->size; i++) {
211 head = &hash->table[i];
213 rcu_read_lock();
214 hlist_for_each_entry_rcu(tt_local_entry, node,
215 head, hash_entry) {
216 if (buff_len < (count + 1) * ETH_ALEN)
217 break;
219 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
220 ETH_ALEN);
222 count++;
224 rcu_read_unlock();
227 /* if we did not get all new local tts see you next time ;-) */
228 if (count == bat_priv->num_local_tt)
229 atomic_set(&bat_priv->tt_local_changed, 0);
231 spin_unlock_bh(&bat_priv->tt_lhash_lock);
232 return count;
235 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
237 struct net_device *net_dev = (struct net_device *)seq->private;
238 struct bat_priv *bat_priv = netdev_priv(net_dev);
239 struct hashtable_t *hash = bat_priv->tt_local_hash;
240 struct tt_local_entry *tt_local_entry;
241 struct hard_iface *primary_if;
242 struct hlist_node *node;
243 struct hlist_head *head;
244 size_t buf_size, pos;
245 char *buff;
246 int i, ret = 0;
248 primary_if = primary_if_get_selected(bat_priv);
249 if (!primary_if) {
250 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
251 "please specify interfaces to enable it\n",
252 net_dev->name);
253 goto out;
256 if (primary_if->if_status != IF_ACTIVE) {
257 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
258 "primary interface not active\n",
259 net_dev->name);
260 goto out;
263 seq_printf(seq, "Locally retrieved addresses (from %s) "
264 "announced via TT:\n",
265 net_dev->name);
267 spin_lock_bh(&bat_priv->tt_lhash_lock);
269 buf_size = 1;
270 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
271 for (i = 0; i < hash->size; i++) {
272 head = &hash->table[i];
274 rcu_read_lock();
275 __hlist_for_each_rcu(node, head)
276 buf_size += 21;
277 rcu_read_unlock();
280 buff = kmalloc(buf_size, GFP_ATOMIC);
281 if (!buff) {
282 spin_unlock_bh(&bat_priv->tt_lhash_lock);
283 ret = -ENOMEM;
284 goto out;
287 buff[0] = '\0';
288 pos = 0;
290 for (i = 0; i < hash->size; i++) {
291 head = &hash->table[i];
293 rcu_read_lock();
294 hlist_for_each_entry_rcu(tt_local_entry, node,
295 head, hash_entry) {
296 pos += snprintf(buff + pos, 22, " * %pM\n",
297 tt_local_entry->addr);
299 rcu_read_unlock();
302 spin_unlock_bh(&bat_priv->tt_lhash_lock);
304 seq_printf(seq, "%s", buff);
305 kfree(buff);
306 out:
307 if (primary_if)
308 hardif_free_ref(primary_if);
309 return ret;
312 static void _tt_local_del(struct hlist_node *node, void *arg)
314 struct bat_priv *bat_priv = (struct bat_priv *)arg;
315 void *data = container_of(node, struct tt_local_entry, hash_entry);
317 kfree(data);
318 bat_priv->num_local_tt--;
319 atomic_set(&bat_priv->tt_local_changed, 1);
322 static void tt_local_del(struct bat_priv *bat_priv,
323 struct tt_local_entry *tt_local_entry,
324 char *message)
326 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
327 tt_local_entry->addr, message);
329 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
330 tt_local_entry->addr);
331 _tt_local_del(&tt_local_entry->hash_entry, bat_priv);
334 void tt_local_remove(struct bat_priv *bat_priv,
335 uint8_t *addr, char *message)
337 struct tt_local_entry *tt_local_entry;
339 spin_lock_bh(&bat_priv->tt_lhash_lock);
341 tt_local_entry = tt_local_hash_find(bat_priv, addr);
343 if (tt_local_entry)
344 tt_local_del(bat_priv, tt_local_entry, message);
346 spin_unlock_bh(&bat_priv->tt_lhash_lock);
349 static void tt_local_purge(struct work_struct *work)
351 struct delayed_work *delayed_work =
352 container_of(work, struct delayed_work, work);
353 struct bat_priv *bat_priv =
354 container_of(delayed_work, struct bat_priv, tt_work);
355 struct hashtable_t *hash = bat_priv->tt_local_hash;
356 struct tt_local_entry *tt_local_entry;
357 struct hlist_node *node, *node_tmp;
358 struct hlist_head *head;
359 unsigned long timeout;
360 int i;
362 spin_lock_bh(&bat_priv->tt_lhash_lock);
364 for (i = 0; i < hash->size; i++) {
365 head = &hash->table[i];
367 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
368 head, hash_entry) {
369 if (tt_local_entry->never_purge)
370 continue;
372 timeout = tt_local_entry->last_seen;
373 timeout += TT_LOCAL_TIMEOUT * HZ;
375 if (time_before(jiffies, timeout))
376 continue;
378 tt_local_del(bat_priv, tt_local_entry,
379 "address timed out");
383 spin_unlock_bh(&bat_priv->tt_lhash_lock);
384 tt_local_start_timer(bat_priv);
387 void tt_local_free(struct bat_priv *bat_priv)
389 if (!bat_priv->tt_local_hash)
390 return;
392 cancel_delayed_work_sync(&bat_priv->tt_work);
393 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
394 bat_priv->tt_local_hash = NULL;
397 int tt_global_init(struct bat_priv *bat_priv)
399 if (bat_priv->tt_global_hash)
400 return 1;
402 bat_priv->tt_global_hash = hash_new(1024);
404 if (!bat_priv->tt_global_hash)
405 return 0;
407 return 1;
410 void tt_global_add_orig(struct bat_priv *bat_priv,
411 struct orig_node *orig_node,
412 unsigned char *tt_buff, int tt_buff_len)
414 struct tt_global_entry *tt_global_entry;
415 struct tt_local_entry *tt_local_entry;
416 int tt_buff_count = 0;
417 unsigned char *tt_ptr;
419 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
420 spin_lock_bh(&bat_priv->tt_ghash_lock);
422 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
423 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
425 if (!tt_global_entry) {
426 spin_unlock_bh(&bat_priv->tt_ghash_lock);
428 tt_global_entry =
429 kmalloc(sizeof(struct tt_global_entry),
430 GFP_ATOMIC);
432 if (!tt_global_entry)
433 break;
435 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
437 bat_dbg(DBG_ROUTES, bat_priv,
438 "Creating new global tt entry: "
439 "%pM (via %pM)\n",
440 tt_global_entry->addr, orig_node->orig);
442 spin_lock_bh(&bat_priv->tt_ghash_lock);
443 hash_add(bat_priv->tt_global_hash, compare_gtt,
444 choose_orig, tt_global_entry,
445 &tt_global_entry->hash_entry);
449 tt_global_entry->orig_node = orig_node;
450 spin_unlock_bh(&bat_priv->tt_ghash_lock);
452 /* remove address from local hash if present */
453 spin_lock_bh(&bat_priv->tt_lhash_lock);
455 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
456 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
458 if (tt_local_entry)
459 tt_local_del(bat_priv, tt_local_entry,
460 "global tt received");
462 spin_unlock_bh(&bat_priv->tt_lhash_lock);
464 tt_buff_count++;
467 /* initialize, and overwrite if malloc succeeds */
468 orig_node->tt_buff = NULL;
469 orig_node->tt_buff_len = 0;
471 if (tt_buff_len > 0) {
472 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
473 if (orig_node->tt_buff) {
474 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
475 orig_node->tt_buff_len = tt_buff_len;
480 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
482 struct net_device *net_dev = (struct net_device *)seq->private;
483 struct bat_priv *bat_priv = netdev_priv(net_dev);
484 struct hashtable_t *hash = bat_priv->tt_global_hash;
485 struct tt_global_entry *tt_global_entry;
486 struct hard_iface *primary_if;
487 struct hlist_node *node;
488 struct hlist_head *head;
489 size_t buf_size, pos;
490 char *buff;
491 int i, ret = 0;
493 primary_if = primary_if_get_selected(bat_priv);
494 if (!primary_if) {
495 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
496 "specify interfaces to enable it\n",
497 net_dev->name);
498 goto out;
501 if (primary_if->if_status != IF_ACTIVE) {
502 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
503 "primary interface not active\n",
504 net_dev->name);
505 goto out;
508 seq_printf(seq,
509 "Globally announced TT entries received via the mesh %s\n",
510 net_dev->name);
512 spin_lock_bh(&bat_priv->tt_ghash_lock);
514 buf_size = 1;
515 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
516 for (i = 0; i < hash->size; i++) {
517 head = &hash->table[i];
519 rcu_read_lock();
520 __hlist_for_each_rcu(node, head)
521 buf_size += 43;
522 rcu_read_unlock();
525 buff = kmalloc(buf_size, GFP_ATOMIC);
526 if (!buff) {
527 spin_unlock_bh(&bat_priv->tt_ghash_lock);
528 ret = -ENOMEM;
529 goto out;
531 buff[0] = '\0';
532 pos = 0;
534 for (i = 0; i < hash->size; i++) {
535 head = &hash->table[i];
537 rcu_read_lock();
538 hlist_for_each_entry_rcu(tt_global_entry, node,
539 head, hash_entry) {
540 pos += snprintf(buff + pos, 44,
541 " * %pM via %pM\n",
542 tt_global_entry->addr,
543 tt_global_entry->orig_node->orig);
545 rcu_read_unlock();
548 spin_unlock_bh(&bat_priv->tt_ghash_lock);
550 seq_printf(seq, "%s", buff);
551 kfree(buff);
552 out:
553 if (primary_if)
554 hardif_free_ref(primary_if);
555 return ret;
558 static void _tt_global_del_orig(struct bat_priv *bat_priv,
559 struct tt_global_entry *tt_global_entry,
560 char *message)
562 bat_dbg(DBG_ROUTES, bat_priv,
563 "Deleting global tt entry %pM (via %pM): %s\n",
564 tt_global_entry->addr, tt_global_entry->orig_node->orig,
565 message);
567 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
568 tt_global_entry->addr);
569 kfree(tt_global_entry);
572 void tt_global_del_orig(struct bat_priv *bat_priv,
573 struct orig_node *orig_node, char *message)
575 struct tt_global_entry *tt_global_entry;
576 int tt_buff_count = 0;
577 unsigned char *tt_ptr;
579 if (orig_node->tt_buff_len == 0)
580 return;
582 spin_lock_bh(&bat_priv->tt_ghash_lock);
584 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
585 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
586 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
588 if ((tt_global_entry) &&
589 (tt_global_entry->orig_node == orig_node))
590 _tt_global_del_orig(bat_priv, tt_global_entry,
591 message);
593 tt_buff_count++;
596 spin_unlock_bh(&bat_priv->tt_ghash_lock);
598 orig_node->tt_buff_len = 0;
599 kfree(orig_node->tt_buff);
600 orig_node->tt_buff = NULL;
603 static void tt_global_del(struct hlist_node *node, void *arg)
605 void *data = container_of(node, struct tt_global_entry, hash_entry);
607 kfree(data);
610 void tt_global_free(struct bat_priv *bat_priv)
612 if (!bat_priv->tt_global_hash)
613 return;
615 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
616 bat_priv->tt_global_hash = NULL;
619 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
621 struct tt_global_entry *tt_global_entry;
622 struct orig_node *orig_node = NULL;
624 spin_lock_bh(&bat_priv->tt_ghash_lock);
625 tt_global_entry = tt_global_hash_find(bat_priv, addr);
627 if (!tt_global_entry)
628 goto out;
630 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
631 goto out;
633 orig_node = tt_global_entry->orig_node;
635 out:
636 spin_unlock_bh(&bat_priv->tt_ghash_lock);
637 return orig_node;