rbd: terminate rbd_opts_tokens with Opt_err
[linux-2.6/btrfs-unstable.git] / net / mac80211 / mesh_pathtbl.c
blobb890e225a8f1b98681c53c764fc5252e5f2d5c8d
1 /*
2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
27 static inline bool mpath_expired(struct mesh_path *mpath)
29 return (mpath->flags & MESH_PATH_ACTIVE) &&
30 time_after(jiffies, mpath->exp_time) &&
31 !(mpath->flags & MESH_PATH_FIXED);
34 struct mpath_node {
35 struct hlist_node list;
36 struct rcu_head rcu;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
40 struct mesh_path *mpath;
43 static struct mesh_table __rcu *mesh_paths;
44 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
46 int mesh_paths_generation;
47 int mpp_paths_generation;
49 /* This lock will have the grow table function as writer and add / delete nodes
50 * as readers. RCU provides sufficient protection only when reading the table
51 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
52 * the read lock or we risk operating on an old table. The write lock is only
53 * needed when modifying the number of buckets a table.
55 static DEFINE_RWLOCK(pathtbl_resize_lock);
58 static inline struct mesh_table *resize_dereference_mesh_paths(void)
60 return rcu_dereference_protected(mesh_paths,
61 lockdep_is_held(&pathtbl_resize_lock));
64 static inline struct mesh_table *resize_dereference_mpp_paths(void)
66 return rcu_dereference_protected(mpp_paths,
67 lockdep_is_held(&pathtbl_resize_lock));
71 * CAREFUL -- "tbl" must not be an expression,
72 * in particular not an rcu_dereference(), since
73 * it's used twice. So it is illegal to do
74 * for_each_mesh_entry(rcu_dereference(...), ...)
76 #define for_each_mesh_entry(tbl, node, i) \
77 for (i = 0; i <= tbl->hash_mask; i++) \
78 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
81 static struct mesh_table *mesh_table_alloc(int size_order)
83 int i;
84 struct mesh_table *newtbl;
86 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
87 if (!newtbl)
88 return NULL;
90 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
91 (1 << size_order), GFP_ATOMIC);
93 if (!newtbl->hash_buckets) {
94 kfree(newtbl);
95 return NULL;
98 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
99 (1 << size_order), GFP_ATOMIC);
100 if (!newtbl->hashwlock) {
101 kfree(newtbl->hash_buckets);
102 kfree(newtbl);
103 return NULL;
106 newtbl->size_order = size_order;
107 newtbl->hash_mask = (1 << size_order) - 1;
108 atomic_set(&newtbl->entries, 0);
109 get_random_bytes(&newtbl->hash_rnd,
110 sizeof(newtbl->hash_rnd));
111 for (i = 0; i <= newtbl->hash_mask; i++)
112 spin_lock_init(&newtbl->hashwlock[i]);
113 spin_lock_init(&newtbl->gates_lock);
115 return newtbl;
118 static void __mesh_table_free(struct mesh_table *tbl)
120 kfree(tbl->hash_buckets);
121 kfree(tbl->hashwlock);
122 kfree(tbl);
125 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
127 struct hlist_head *mesh_hash;
128 struct hlist_node *p, *q;
129 struct mpath_node *gate;
130 int i;
132 mesh_hash = tbl->hash_buckets;
133 for (i = 0; i <= tbl->hash_mask; i++) {
134 spin_lock_bh(&tbl->hashwlock[i]);
135 hlist_for_each_safe(p, q, &mesh_hash[i]) {
136 tbl->free_node(p, free_leafs);
137 atomic_dec(&tbl->entries);
139 spin_unlock_bh(&tbl->hashwlock[i]);
141 if (free_leafs) {
142 spin_lock_bh(&tbl->gates_lock);
143 hlist_for_each_entry_safe(gate, q,
144 tbl->known_gates, list) {
145 hlist_del(&gate->list);
146 kfree(gate);
148 kfree(tbl->known_gates);
149 spin_unlock_bh(&tbl->gates_lock);
152 __mesh_table_free(tbl);
155 static int mesh_table_grow(struct mesh_table *oldtbl,
156 struct mesh_table *newtbl)
158 struct hlist_head *oldhash;
159 struct hlist_node *p, *q;
160 int i;
162 if (atomic_read(&oldtbl->entries)
163 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
164 return -EAGAIN;
166 newtbl->free_node = oldtbl->free_node;
167 newtbl->mean_chain_len = oldtbl->mean_chain_len;
168 newtbl->copy_node = oldtbl->copy_node;
169 newtbl->known_gates = oldtbl->known_gates;
170 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
172 oldhash = oldtbl->hash_buckets;
173 for (i = 0; i <= oldtbl->hash_mask; i++)
174 hlist_for_each(p, &oldhash[i])
175 if (oldtbl->copy_node(p, newtbl) < 0)
176 goto errcopy;
178 return 0;
180 errcopy:
181 for (i = 0; i <= newtbl->hash_mask; i++) {
182 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
183 oldtbl->free_node(p, 0);
185 return -ENOMEM;
188 static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
189 struct mesh_table *tbl)
191 /* Use last four bytes of hw addr and interface index as hash index */
192 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
193 tbl->hash_rnd) & tbl->hash_mask;
199 * mesh_path_assign_nexthop - update mesh path next hop
201 * @mpath: mesh path to update
202 * @sta: next hop to assign
204 * Locking: mpath->state_lock must be held when calling this function
206 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
208 struct sk_buff *skb;
209 struct ieee80211_hdr *hdr;
210 unsigned long flags;
212 rcu_assign_pointer(mpath->next_hop, sta);
214 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
215 skb_queue_walk(&mpath->frame_queue, skb) {
216 hdr = (struct ieee80211_hdr *) skb->data;
217 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
218 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
219 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
222 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
225 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
226 struct mesh_path *gate_mpath)
228 struct ieee80211_hdr *hdr;
229 struct ieee80211s_hdr *mshdr;
230 int mesh_hdrlen, hdrlen;
231 char *next_hop;
233 hdr = (struct ieee80211_hdr *) skb->data;
234 hdrlen = ieee80211_hdrlen(hdr->frame_control);
235 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
237 if (!(mshdr->flags & MESH_FLAGS_AE)) {
238 /* size of the fixed part of the mesh header */
239 mesh_hdrlen = 6;
241 /* make room for the two extended addresses */
242 skb_push(skb, 2 * ETH_ALEN);
243 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
245 hdr = (struct ieee80211_hdr *) skb->data;
247 /* we preserve the previous mesh header and only add
248 * the new addreses */
249 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
250 mshdr->flags = MESH_FLAGS_AE_A5_A6;
251 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
252 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
255 /* update next hop */
256 hdr = (struct ieee80211_hdr *) skb->data;
257 rcu_read_lock();
258 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
259 memcpy(hdr->addr1, next_hop, ETH_ALEN);
260 rcu_read_unlock();
261 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
262 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
267 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
269 * This function is used to transfer or copy frames from an unresolved mpath to
270 * a gate mpath. The function also adds the Address Extension field and
271 * updates the next hop.
273 * If a frame already has an Address Extension field, only the next hop and
274 * destination addresses are updated.
276 * The gate mpath must be an active mpath with a valid mpath->next_hop.
278 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
279 * @from_mpath: The failed mpath
280 * @copy: When true, copy all the frames to the new mpath queue. When false,
281 * move them.
283 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
284 struct mesh_path *from_mpath,
285 bool copy)
287 struct sk_buff *skb, *fskb, *tmp;
288 struct sk_buff_head failq;
289 unsigned long flags;
291 if (WARN_ON(gate_mpath == from_mpath))
292 return;
293 if (WARN_ON(!gate_mpath->next_hop))
294 return;
296 __skb_queue_head_init(&failq);
298 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
299 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
300 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
302 skb_queue_walk_safe(&failq, fskb, tmp) {
303 if (skb_queue_len(&gate_mpath->frame_queue) >=
304 MESH_FRAME_QUEUE_LEN) {
305 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
306 break;
309 skb = skb_copy(fskb, GFP_ATOMIC);
310 if (WARN_ON(!skb))
311 break;
313 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314 skb_queue_tail(&gate_mpath->frame_queue, skb);
316 if (copy)
317 continue;
319 __skb_unlink(fskb, &failq);
320 kfree_skb(fskb);
323 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
324 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
326 if (!copy)
327 return;
329 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
330 skb_queue_splice(&failq, &from_mpath->frame_queue);
331 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
335 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
336 struct ieee80211_sub_if_data *sdata)
338 struct mesh_path *mpath;
339 struct hlist_head *bucket;
340 struct mpath_node *node;
342 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
343 hlist_for_each_entry_rcu(node, bucket, list) {
344 mpath = node->mpath;
345 if (mpath->sdata == sdata &&
346 ether_addr_equal(dst, mpath->dst)) {
347 if (mpath_expired(mpath)) {
348 spin_lock_bh(&mpath->state_lock);
349 mpath->flags &= ~MESH_PATH_ACTIVE;
350 spin_unlock_bh(&mpath->state_lock);
352 return mpath;
355 return NULL;
359 * mesh_path_lookup - look up a path in the mesh path table
360 * @sdata: local subif
361 * @dst: hardware address (ETH_ALEN length) of destination
363 * Returns: pointer to the mesh path structure, or NULL if not found
365 * Locking: must be called within a read rcu section.
367 struct mesh_path *
368 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
370 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
373 struct mesh_path *
374 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
376 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
381 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
382 * @idx: index
383 * @sdata: local subif, or NULL for all entries
385 * Returns: pointer to the mesh path structure, or NULL if not found.
387 * Locking: must be called within a read rcu section.
389 struct mesh_path *
390 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
392 struct mesh_table *tbl = rcu_dereference(mesh_paths);
393 struct mpath_node *node;
394 int i;
395 int j = 0;
397 for_each_mesh_entry(tbl, node, i) {
398 if (sdata && node->mpath->sdata != sdata)
399 continue;
400 if (j++ == idx) {
401 if (mpath_expired(node->mpath)) {
402 spin_lock_bh(&node->mpath->state_lock);
403 node->mpath->flags &= ~MESH_PATH_ACTIVE;
404 spin_unlock_bh(&node->mpath->state_lock);
406 return node->mpath;
410 return NULL;
414 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
415 * @idx: index
416 * @sdata: local subif, or NULL for all entries
418 * Returns: pointer to the proxy path structure, or NULL if not found.
420 * Locking: must be called within a read rcu section.
422 struct mesh_path *
423 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
425 struct mesh_table *tbl = rcu_dereference(mpp_paths);
426 struct mpath_node *node;
427 int i;
428 int j = 0;
430 for_each_mesh_entry(tbl, node, i) {
431 if (sdata && node->mpath->sdata != sdata)
432 continue;
433 if (j++ == idx)
434 return node->mpath;
437 return NULL;
441 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
442 * @mpath: gate path to add to table
444 int mesh_path_add_gate(struct mesh_path *mpath)
446 struct mesh_table *tbl;
447 struct mpath_node *gate, *new_gate;
448 int err;
450 rcu_read_lock();
451 tbl = rcu_dereference(mesh_paths);
453 hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
454 if (gate->mpath == mpath) {
455 err = -EEXIST;
456 goto err_rcu;
459 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
460 if (!new_gate) {
461 err = -ENOMEM;
462 goto err_rcu;
465 mpath->is_gate = true;
466 mpath->sdata->u.mesh.num_gates++;
467 new_gate->mpath = mpath;
468 spin_lock_bh(&tbl->gates_lock);
469 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
470 spin_unlock_bh(&tbl->gates_lock);
471 mpath_dbg(mpath->sdata,
472 "Mesh path: Recorded new gate: %pM. %d known gates\n",
473 mpath->dst, mpath->sdata->u.mesh.num_gates);
474 err = 0;
475 err_rcu:
476 rcu_read_unlock();
477 return err;
481 * mesh_gate_del - remove a mesh gate from the list of known gates
482 * @tbl: table which holds our list of known gates
483 * @mpath: gate mpath
485 * Locking: must be called inside rcu_read_lock() section
487 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
489 struct mpath_node *gate;
490 struct hlist_node *q;
492 hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
493 if (gate->mpath != mpath)
494 continue;
495 spin_lock_bh(&tbl->gates_lock);
496 hlist_del_rcu(&gate->list);
497 kfree_rcu(gate, rcu);
498 spin_unlock_bh(&tbl->gates_lock);
499 mpath->sdata->u.mesh.num_gates--;
500 mpath->is_gate = false;
501 mpath_dbg(mpath->sdata,
502 "Mesh path: Deleted gate: %pM. %d known gates\n",
503 mpath->dst, mpath->sdata->u.mesh.num_gates);
504 break;
509 * mesh_gate_num - number of gates known to this interface
510 * @sdata: subif data
512 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
514 return sdata->u.mesh.num_gates;
518 * mesh_path_add - allocate and add a new path to the mesh path table
519 * @dst: destination address of the path (ETH_ALEN length)
520 * @sdata: local subif
522 * Returns: 0 on success
524 * State: the initial state of the new path is set to 0
526 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
527 const u8 *dst)
529 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
530 struct ieee80211_local *local = sdata->local;
531 struct mesh_table *tbl;
532 struct mesh_path *mpath, *new_mpath;
533 struct mpath_node *node, *new_node;
534 struct hlist_head *bucket;
535 int grow = 0;
536 int err;
537 u32 hash_idx;
539 if (ether_addr_equal(dst, sdata->vif.addr))
540 /* never add ourselves as neighbours */
541 return ERR_PTR(-ENOTSUPP);
543 if (is_multicast_ether_addr(dst))
544 return ERR_PTR(-ENOTSUPP);
546 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
547 return ERR_PTR(-ENOSPC);
549 read_lock_bh(&pathtbl_resize_lock);
550 tbl = resize_dereference_mesh_paths();
552 hash_idx = mesh_table_hash(dst, sdata, tbl);
553 bucket = &tbl->hash_buckets[hash_idx];
555 spin_lock(&tbl->hashwlock[hash_idx]);
557 hlist_for_each_entry(node, bucket, list) {
558 mpath = node->mpath;
559 if (mpath->sdata == sdata &&
560 ether_addr_equal(dst, mpath->dst))
561 goto found;
564 err = -ENOMEM;
565 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
566 if (!new_mpath)
567 goto err_path_alloc;
569 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
570 if (!new_node)
571 goto err_node_alloc;
573 memcpy(new_mpath->dst, dst, ETH_ALEN);
574 eth_broadcast_addr(new_mpath->rann_snd_addr);
575 new_mpath->is_root = false;
576 new_mpath->sdata = sdata;
577 new_mpath->flags = 0;
578 skb_queue_head_init(&new_mpath->frame_queue);
579 new_node->mpath = new_mpath;
580 new_mpath->timer.data = (unsigned long) new_mpath;
581 new_mpath->timer.function = mesh_path_timer;
582 new_mpath->exp_time = jiffies;
583 spin_lock_init(&new_mpath->state_lock);
584 init_timer(&new_mpath->timer);
586 hlist_add_head_rcu(&new_node->list, bucket);
587 if (atomic_inc_return(&tbl->entries) >=
588 tbl->mean_chain_len * (tbl->hash_mask + 1))
589 grow = 1;
591 mesh_paths_generation++;
593 if (grow) {
594 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
595 ieee80211_queue_work(&local->hw, &sdata->work);
597 mpath = new_mpath;
598 found:
599 spin_unlock(&tbl->hashwlock[hash_idx]);
600 read_unlock_bh(&pathtbl_resize_lock);
601 return mpath;
603 err_node_alloc:
604 kfree(new_mpath);
605 err_path_alloc:
606 atomic_dec(&sdata->u.mesh.mpaths);
607 spin_unlock(&tbl->hashwlock[hash_idx]);
608 read_unlock_bh(&pathtbl_resize_lock);
609 return ERR_PTR(err);
612 static void mesh_table_free_rcu(struct rcu_head *rcu)
614 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
616 mesh_table_free(tbl, false);
619 void mesh_mpath_table_grow(void)
621 struct mesh_table *oldtbl, *newtbl;
623 write_lock_bh(&pathtbl_resize_lock);
624 oldtbl = resize_dereference_mesh_paths();
625 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
626 if (!newtbl)
627 goto out;
628 if (mesh_table_grow(oldtbl, newtbl) < 0) {
629 __mesh_table_free(newtbl);
630 goto out;
632 rcu_assign_pointer(mesh_paths, newtbl);
634 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
636 out:
637 write_unlock_bh(&pathtbl_resize_lock);
640 void mesh_mpp_table_grow(void)
642 struct mesh_table *oldtbl, *newtbl;
644 write_lock_bh(&pathtbl_resize_lock);
645 oldtbl = resize_dereference_mpp_paths();
646 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
647 if (!newtbl)
648 goto out;
649 if (mesh_table_grow(oldtbl, newtbl) < 0) {
650 __mesh_table_free(newtbl);
651 goto out;
653 rcu_assign_pointer(mpp_paths, newtbl);
654 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
656 out:
657 write_unlock_bh(&pathtbl_resize_lock);
660 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
661 const u8 *dst, const u8 *mpp)
663 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
664 struct ieee80211_local *local = sdata->local;
665 struct mesh_table *tbl;
666 struct mesh_path *mpath, *new_mpath;
667 struct mpath_node *node, *new_node;
668 struct hlist_head *bucket;
669 int grow = 0;
670 int err = 0;
671 u32 hash_idx;
673 if (ether_addr_equal(dst, sdata->vif.addr))
674 /* never add ourselves as neighbours */
675 return -ENOTSUPP;
677 if (is_multicast_ether_addr(dst))
678 return -ENOTSUPP;
680 err = -ENOMEM;
681 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
682 if (!new_mpath)
683 goto err_path_alloc;
685 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
686 if (!new_node)
687 goto err_node_alloc;
689 read_lock_bh(&pathtbl_resize_lock);
690 memcpy(new_mpath->dst, dst, ETH_ALEN);
691 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
692 new_mpath->sdata = sdata;
693 new_mpath->flags = 0;
694 skb_queue_head_init(&new_mpath->frame_queue);
695 new_node->mpath = new_mpath;
696 init_timer(&new_mpath->timer);
697 new_mpath->exp_time = jiffies;
698 spin_lock_init(&new_mpath->state_lock);
700 tbl = resize_dereference_mpp_paths();
702 hash_idx = mesh_table_hash(dst, sdata, tbl);
703 bucket = &tbl->hash_buckets[hash_idx];
705 spin_lock(&tbl->hashwlock[hash_idx]);
707 err = -EEXIST;
708 hlist_for_each_entry(node, bucket, list) {
709 mpath = node->mpath;
710 if (mpath->sdata == sdata &&
711 ether_addr_equal(dst, mpath->dst))
712 goto err_exists;
715 hlist_add_head_rcu(&new_node->list, bucket);
716 if (atomic_inc_return(&tbl->entries) >=
717 tbl->mean_chain_len * (tbl->hash_mask + 1))
718 grow = 1;
720 spin_unlock(&tbl->hashwlock[hash_idx]);
721 read_unlock_bh(&pathtbl_resize_lock);
723 mpp_paths_generation++;
725 if (grow) {
726 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
727 ieee80211_queue_work(&local->hw, &sdata->work);
729 return 0;
731 err_exists:
732 spin_unlock(&tbl->hashwlock[hash_idx]);
733 read_unlock_bh(&pathtbl_resize_lock);
734 kfree(new_node);
735 err_node_alloc:
736 kfree(new_mpath);
737 err_path_alloc:
738 return err;
743 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
745 * @sta: broken peer link
747 * This function must be called from the rate control algorithm if enough
748 * delivery errors suggest that a peer link is no longer usable.
750 void mesh_plink_broken(struct sta_info *sta)
752 struct mesh_table *tbl;
753 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
754 struct mesh_path *mpath;
755 struct mpath_node *node;
756 struct ieee80211_sub_if_data *sdata = sta->sdata;
757 int i;
759 rcu_read_lock();
760 tbl = rcu_dereference(mesh_paths);
761 for_each_mesh_entry(tbl, node, i) {
762 mpath = node->mpath;
763 if (rcu_access_pointer(mpath->next_hop) == sta &&
764 mpath->flags & MESH_PATH_ACTIVE &&
765 !(mpath->flags & MESH_PATH_FIXED)) {
766 spin_lock_bh(&mpath->state_lock);
767 mpath->flags &= ~MESH_PATH_ACTIVE;
768 ++mpath->sn;
769 spin_unlock_bh(&mpath->state_lock);
770 mesh_path_error_tx(sdata,
771 sdata->u.mesh.mshcfg.element_ttl,
772 mpath->dst, mpath->sn,
773 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
776 rcu_read_unlock();
779 static void mesh_path_node_reclaim(struct rcu_head *rp)
781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
784 del_timer_sync(&node->mpath->timer);
785 atomic_dec(&sdata->u.mesh.mpaths);
786 kfree(node->mpath);
787 kfree(node);
790 /* needs to be called with the corresponding hashwlock taken */
791 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
793 struct mesh_path *mpath;
794 mpath = node->mpath;
795 spin_lock(&mpath->state_lock);
796 mpath->flags |= MESH_PATH_RESOLVING;
797 if (mpath->is_gate)
798 mesh_gate_del(tbl, mpath);
799 hlist_del_rcu(&node->list);
800 call_rcu(&node->rcu, mesh_path_node_reclaim);
801 spin_unlock(&mpath->state_lock);
802 atomic_dec(&tbl->entries);
806 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
808 * @sta: mesh peer to match
810 * RCU notes: this function is called when a mesh plink transitions from
811 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
812 * allows path creation. This will happen before the sta can be freed (because
813 * sta_info_destroy() calls this) so any reader in a rcu read block will be
814 * protected against the plink disappearing.
816 void mesh_path_flush_by_nexthop(struct sta_info *sta)
818 struct mesh_table *tbl;
819 struct mesh_path *mpath;
820 struct mpath_node *node;
821 int i;
823 rcu_read_lock();
824 read_lock_bh(&pathtbl_resize_lock);
825 tbl = resize_dereference_mesh_paths();
826 for_each_mesh_entry(tbl, node, i) {
827 mpath = node->mpath;
828 if (rcu_access_pointer(mpath->next_hop) == sta) {
829 spin_lock(&tbl->hashwlock[i]);
830 __mesh_path_del(tbl, node);
831 spin_unlock(&tbl->hashwlock[i]);
834 read_unlock_bh(&pathtbl_resize_lock);
835 rcu_read_unlock();
838 static void table_flush_by_iface(struct mesh_table *tbl,
839 struct ieee80211_sub_if_data *sdata)
841 struct mesh_path *mpath;
842 struct mpath_node *node;
843 int i;
845 WARN_ON(!rcu_read_lock_held());
846 for_each_mesh_entry(tbl, node, i) {
847 mpath = node->mpath;
848 if (mpath->sdata != sdata)
849 continue;
850 spin_lock_bh(&tbl->hashwlock[i]);
851 __mesh_path_del(tbl, node);
852 spin_unlock_bh(&tbl->hashwlock[i]);
857 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
859 * This function deletes both mesh paths as well as mesh portal paths.
861 * @sdata: interface data to match
864 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
866 struct mesh_table *tbl;
868 rcu_read_lock();
869 read_lock_bh(&pathtbl_resize_lock);
870 tbl = resize_dereference_mesh_paths();
871 table_flush_by_iface(tbl, sdata);
872 tbl = resize_dereference_mpp_paths();
873 table_flush_by_iface(tbl, sdata);
874 read_unlock_bh(&pathtbl_resize_lock);
875 rcu_read_unlock();
879 * mesh_path_del - delete a mesh path from the table
881 * @addr: dst address (ETH_ALEN length)
882 * @sdata: local subif
884 * Returns: 0 if successful
886 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
888 struct mesh_table *tbl;
889 struct mesh_path *mpath;
890 struct mpath_node *node;
891 struct hlist_head *bucket;
892 int hash_idx;
893 int err = 0;
895 read_lock_bh(&pathtbl_resize_lock);
896 tbl = resize_dereference_mesh_paths();
897 hash_idx = mesh_table_hash(addr, sdata, tbl);
898 bucket = &tbl->hash_buckets[hash_idx];
900 spin_lock(&tbl->hashwlock[hash_idx]);
901 hlist_for_each_entry(node, bucket, list) {
902 mpath = node->mpath;
903 if (mpath->sdata == sdata &&
904 ether_addr_equal(addr, mpath->dst)) {
905 __mesh_path_del(tbl, node);
906 goto enddel;
910 err = -ENXIO;
911 enddel:
912 mesh_paths_generation++;
913 spin_unlock(&tbl->hashwlock[hash_idx]);
914 read_unlock_bh(&pathtbl_resize_lock);
915 return err;
919 * mesh_path_tx_pending - sends pending frames in a mesh path queue
921 * @mpath: mesh path to activate
923 * Locking: the state_lock of the mpath structure must NOT be held when calling
924 * this function.
926 void mesh_path_tx_pending(struct mesh_path *mpath)
928 if (mpath->flags & MESH_PATH_ACTIVE)
929 ieee80211_add_pending_skbs(mpath->sdata->local,
930 &mpath->frame_queue);
934 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
936 * @mpath: mesh path whose queue will be emptied
938 * If there is only one gate, the frames are transferred from the failed mpath
939 * queue to that gate's queue. If there are more than one gates, the frames
940 * are copied from each gate to the next. After frames are copied, the
941 * mpath queues are emptied onto the transmission queue.
943 int mesh_path_send_to_gates(struct mesh_path *mpath)
945 struct ieee80211_sub_if_data *sdata = mpath->sdata;
946 struct mesh_table *tbl;
947 struct mesh_path *from_mpath = mpath;
948 struct mpath_node *gate = NULL;
949 bool copy = false;
950 struct hlist_head *known_gates;
952 rcu_read_lock();
953 tbl = rcu_dereference(mesh_paths);
954 known_gates = tbl->known_gates;
955 rcu_read_unlock();
957 if (!known_gates)
958 return -EHOSTUNREACH;
960 hlist_for_each_entry_rcu(gate, known_gates, list) {
961 if (gate->mpath->sdata != sdata)
962 continue;
964 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
965 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
966 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
967 from_mpath = gate->mpath;
968 copy = true;
969 } else {
970 mpath_dbg(sdata,
971 "Not forwarding %p (flags %#x)\n",
972 gate->mpath, gate->mpath->flags);
976 hlist_for_each_entry_rcu(gate, known_gates, list)
977 if (gate->mpath->sdata == sdata) {
978 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
979 mesh_path_tx_pending(gate->mpath);
982 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
986 * mesh_path_discard_frame - discard a frame whose path could not be resolved
988 * @skb: frame to discard
989 * @sdata: network subif the frame was to be sent through
991 * Locking: the function must me called within a rcu_read_lock region
993 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
994 struct sk_buff *skb)
996 kfree_skb(skb);
997 sdata->u.mesh.mshstats.dropped_frames_no_route++;
1001 * mesh_path_flush_pending - free the pending queue of a mesh path
1003 * @mpath: mesh path whose queue has to be freed
1005 * Locking: the function must me called within a rcu_read_lock region
1007 void mesh_path_flush_pending(struct mesh_path *mpath)
1009 struct sk_buff *skb;
1011 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1012 mesh_path_discard_frame(mpath->sdata, skb);
1016 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1018 * @mpath: the mesh path to modify
1019 * @next_hop: the next hop to force
1021 * Locking: this function must be called holding mpath->state_lock
1023 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1025 spin_lock_bh(&mpath->state_lock);
1026 mesh_path_assign_nexthop(mpath, next_hop);
1027 mpath->sn = 0xffff;
1028 mpath->metric = 0;
1029 mpath->hop_count = 0;
1030 mpath->exp_time = 0;
1031 mpath->flags |= MESH_PATH_FIXED;
1032 mesh_path_activate(mpath);
1033 spin_unlock_bh(&mpath->state_lock);
1034 mesh_path_tx_pending(mpath);
1037 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1039 struct mesh_path *mpath;
1040 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1041 mpath = node->mpath;
1042 hlist_del_rcu(p);
1043 if (free_leafs) {
1044 del_timer_sync(&mpath->timer);
1045 kfree(mpath);
1047 kfree(node);
1050 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1052 struct mesh_path *mpath;
1053 struct mpath_node *node, *new_node;
1054 u32 hash_idx;
1056 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1057 if (new_node == NULL)
1058 return -ENOMEM;
1060 node = hlist_entry(p, struct mpath_node, list);
1061 mpath = node->mpath;
1062 new_node->mpath = mpath;
1063 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1064 hlist_add_head(&new_node->list,
1065 &newtbl->hash_buckets[hash_idx]);
1066 return 0;
1069 int mesh_pathtbl_init(void)
1071 struct mesh_table *tbl_path, *tbl_mpp;
1072 int ret;
1074 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1075 if (!tbl_path)
1076 return -ENOMEM;
1077 tbl_path->free_node = &mesh_path_node_free;
1078 tbl_path->copy_node = &mesh_path_node_copy;
1079 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1080 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1081 if (!tbl_path->known_gates) {
1082 ret = -ENOMEM;
1083 goto free_path;
1085 INIT_HLIST_HEAD(tbl_path->known_gates);
1088 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1089 if (!tbl_mpp) {
1090 ret = -ENOMEM;
1091 goto free_path;
1093 tbl_mpp->free_node = &mesh_path_node_free;
1094 tbl_mpp->copy_node = &mesh_path_node_copy;
1095 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1096 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1097 if (!tbl_mpp->known_gates) {
1098 ret = -ENOMEM;
1099 goto free_mpp;
1101 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1103 /* Need no locking since this is during init */
1104 RCU_INIT_POINTER(mesh_paths, tbl_path);
1105 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1107 return 0;
1109 free_mpp:
1110 mesh_table_free(tbl_mpp, true);
1111 free_path:
1112 mesh_table_free(tbl_path, true);
1113 return ret;
1116 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1118 struct mesh_table *tbl;
1119 struct mesh_path *mpath;
1120 struct mpath_node *node;
1121 int i;
1123 rcu_read_lock();
1124 tbl = rcu_dereference(mesh_paths);
1125 for_each_mesh_entry(tbl, node, i) {
1126 if (node->mpath->sdata != sdata)
1127 continue;
1128 mpath = node->mpath;
1129 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1130 (!(mpath->flags & MESH_PATH_FIXED)) &&
1131 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1132 mesh_path_del(mpath->sdata, mpath->dst);
1134 rcu_read_unlock();
1137 void mesh_pathtbl_unregister(void)
1139 /* no need for locking during exit path */
1140 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1141 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);