2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
27 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
28 time_after(jiffies, mpath->exp_time) && \
29 !(mpath->flags & MESH_PATH_FIXED))
32 struct hlist_node list
;
34 /* This indirection allows two different tables to point to the same
35 * mesh_path structure, useful when resizing
37 struct mesh_path
*mpath
;
40 static struct mesh_table __rcu
*mesh_paths
;
41 static struct mesh_table __rcu
*mpp_paths
; /* Store paths for MPP&MAP */
43 int mesh_paths_generation
;
45 /* This lock will have the grow table function as writer and add / delete nodes
46 * as readers. RCU provides sufficient protection only when reading the table
47 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
48 * the read lock or we risk operating on an old table. The write lock is only
49 * needed when modifying the number of buckets a table.
51 static DEFINE_RWLOCK(pathtbl_resize_lock
);
54 static inline struct mesh_table
*resize_dereference_mesh_paths(void)
56 return rcu_dereference_protected(mesh_paths
,
57 lockdep_is_held(&pathtbl_resize_lock
));
60 static inline struct mesh_table
*resize_dereference_mpp_paths(void)
62 return rcu_dereference_protected(mpp_paths
,
63 lockdep_is_held(&pathtbl_resize_lock
));
67 * CAREFUL -- "tbl" must not be an expression,
68 * in particular not an rcu_dereference(), since
69 * it's used twice. So it is illegal to do
70 * for_each_mesh_entry(rcu_dereference(...), ...)
72 #define for_each_mesh_entry(tbl, p, node, i) \
73 for (i = 0; i <= tbl->hash_mask; i++) \
74 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
77 static struct mesh_table
*mesh_table_alloc(int size_order
)
80 struct mesh_table
*newtbl
;
82 newtbl
= kmalloc(sizeof(struct mesh_table
), GFP_ATOMIC
);
86 newtbl
->hash_buckets
= kzalloc(sizeof(struct hlist_head
) *
87 (1 << size_order
), GFP_ATOMIC
);
89 if (!newtbl
->hash_buckets
) {
94 newtbl
->hashwlock
= kmalloc(sizeof(spinlock_t
) *
95 (1 << size_order
), GFP_ATOMIC
);
96 if (!newtbl
->hashwlock
) {
97 kfree(newtbl
->hash_buckets
);
102 newtbl
->size_order
= size_order
;
103 newtbl
->hash_mask
= (1 << size_order
) - 1;
104 atomic_set(&newtbl
->entries
, 0);
105 get_random_bytes(&newtbl
->hash_rnd
,
106 sizeof(newtbl
->hash_rnd
));
107 for (i
= 0; i
<= newtbl
->hash_mask
; i
++)
108 spin_lock_init(&newtbl
->hashwlock
[i
]);
109 spin_lock_init(&newtbl
->gates_lock
);
114 static void __mesh_table_free(struct mesh_table
*tbl
)
116 kfree(tbl
->hash_buckets
);
117 kfree(tbl
->hashwlock
);
121 static void mesh_table_free(struct mesh_table
*tbl
, bool free_leafs
)
123 struct hlist_head
*mesh_hash
;
124 struct hlist_node
*p
, *q
;
125 struct mpath_node
*gate
;
128 mesh_hash
= tbl
->hash_buckets
;
129 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
130 spin_lock_bh(&tbl
->hashwlock
[i
]);
131 hlist_for_each_safe(p
, q
, &mesh_hash
[i
]) {
132 tbl
->free_node(p
, free_leafs
);
133 atomic_dec(&tbl
->entries
);
135 spin_unlock_bh(&tbl
->hashwlock
[i
]);
138 spin_lock_bh(&tbl
->gates_lock
);
139 hlist_for_each_entry_safe(gate
, p
, q
,
140 tbl
->known_gates
, list
) {
141 hlist_del(&gate
->list
);
144 kfree(tbl
->known_gates
);
145 spin_unlock_bh(&tbl
->gates_lock
);
148 __mesh_table_free(tbl
);
151 static int mesh_table_grow(struct mesh_table
*oldtbl
,
152 struct mesh_table
*newtbl
)
154 struct hlist_head
*oldhash
;
155 struct hlist_node
*p
, *q
;
158 if (atomic_read(&oldtbl
->entries
)
159 < oldtbl
->mean_chain_len
* (oldtbl
->hash_mask
+ 1))
162 newtbl
->free_node
= oldtbl
->free_node
;
163 newtbl
->mean_chain_len
= oldtbl
->mean_chain_len
;
164 newtbl
->copy_node
= oldtbl
->copy_node
;
165 newtbl
->known_gates
= oldtbl
->known_gates
;
166 atomic_set(&newtbl
->entries
, atomic_read(&oldtbl
->entries
));
168 oldhash
= oldtbl
->hash_buckets
;
169 for (i
= 0; i
<= oldtbl
->hash_mask
; i
++)
170 hlist_for_each(p
, &oldhash
[i
])
171 if (oldtbl
->copy_node(p
, newtbl
) < 0)
177 for (i
= 0; i
<= newtbl
->hash_mask
; i
++) {
178 hlist_for_each_safe(p
, q
, &newtbl
->hash_buckets
[i
])
179 oldtbl
->free_node(p
, 0);
184 static u32
mesh_table_hash(u8
*addr
, struct ieee80211_sub_if_data
*sdata
,
185 struct mesh_table
*tbl
)
187 /* Use last four bytes of hw addr and interface index as hash index */
188 return jhash_2words(*(u32
*)(addr
+2), sdata
->dev
->ifindex
, tbl
->hash_rnd
)
195 * mesh_path_assign_nexthop - update mesh path next hop
197 * @mpath: mesh path to update
198 * @sta: next hop to assign
200 * Locking: mpath->state_lock must be held when calling this function
202 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
205 struct ieee80211_hdr
*hdr
;
208 rcu_assign_pointer(mpath
->next_hop
, sta
);
210 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
211 skb_queue_walk(&mpath
->frame_queue
, skb
) {
212 hdr
= (struct ieee80211_hdr
*) skb
->data
;
213 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
214 memcpy(hdr
->addr2
, mpath
->sdata
->vif
.addr
, ETH_ALEN
);
217 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
220 static void prepare_for_gate(struct sk_buff
*skb
, char *dst_addr
,
221 struct mesh_path
*gate_mpath
)
223 struct ieee80211_hdr
*hdr
;
224 struct ieee80211s_hdr
*mshdr
;
225 int mesh_hdrlen
, hdrlen
;
228 hdr
= (struct ieee80211_hdr
*) skb
->data
;
229 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
230 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
232 if (!(mshdr
->flags
& MESH_FLAGS_AE
)) {
233 /* size of the fixed part of the mesh header */
236 /* make room for the two extended addresses */
237 skb_push(skb
, 2 * ETH_ALEN
);
238 memmove(skb
->data
, hdr
, hdrlen
+ mesh_hdrlen
);
240 hdr
= (struct ieee80211_hdr
*) skb
->data
;
242 /* we preserve the previous mesh header and only add
243 * the new addreses */
244 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
245 mshdr
->flags
= MESH_FLAGS_AE_A5_A6
;
246 memcpy(mshdr
->eaddr1
, hdr
->addr3
, ETH_ALEN
);
247 memcpy(mshdr
->eaddr2
, hdr
->addr4
, ETH_ALEN
);
250 /* update next hop */
251 hdr
= (struct ieee80211_hdr
*) skb
->data
;
253 next_hop
= rcu_dereference(gate_mpath
->next_hop
)->sta
.addr
;
254 memcpy(hdr
->addr1
, next_hop
, ETH_ALEN
);
256 memcpy(hdr
->addr2
, gate_mpath
->sdata
->vif
.addr
, ETH_ALEN
);
257 memcpy(hdr
->addr3
, dst_addr
, ETH_ALEN
);
262 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
264 * This function is used to transfer or copy frames from an unresolved mpath to
265 * a gate mpath. The function also adds the Address Extension field and
266 * updates the next hop.
268 * If a frame already has an Address Extension field, only the next hop and
269 * destination addresses are updated.
271 * The gate mpath must be an active mpath with a valid mpath->next_hop.
273 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
274 * @from_mpath: The failed mpath
275 * @copy: When true, copy all the frames to the new mpath queue. When false,
278 static void mesh_path_move_to_queue(struct mesh_path
*gate_mpath
,
279 struct mesh_path
*from_mpath
,
282 struct sk_buff
*skb
, *fskb
, *tmp
;
283 struct sk_buff_head failq
;
286 BUG_ON(gate_mpath
== from_mpath
);
287 BUG_ON(!gate_mpath
->next_hop
);
289 __skb_queue_head_init(&failq
);
291 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
292 skb_queue_splice_init(&from_mpath
->frame_queue
, &failq
);
293 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
295 skb_queue_walk_safe(&failq
, fskb
, tmp
) {
296 if (skb_queue_len(&gate_mpath
->frame_queue
) >=
297 MESH_FRAME_QUEUE_LEN
) {
298 mpath_dbg(gate_mpath
->sdata
, "mpath queue full!\n");
302 skb
= skb_copy(fskb
, GFP_ATOMIC
);
306 prepare_for_gate(skb
, gate_mpath
->dst
, gate_mpath
);
307 skb_queue_tail(&gate_mpath
->frame_queue
, skb
);
312 __skb_unlink(fskb
, &failq
);
316 mpath_dbg(gate_mpath
->sdata
, "Mpath queue for gate %pM has %d frames\n",
317 gate_mpath
->dst
, skb_queue_len(&gate_mpath
->frame_queue
));
322 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
323 skb_queue_splice(&failq
, &from_mpath
->frame_queue
);
324 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
328 static struct mesh_path
*mpath_lookup(struct mesh_table
*tbl
, u8
*dst
,
329 struct ieee80211_sub_if_data
*sdata
)
331 struct mesh_path
*mpath
;
332 struct hlist_node
*n
;
333 struct hlist_head
*bucket
;
334 struct mpath_node
*node
;
336 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
337 hlist_for_each_entry_rcu(node
, n
, bucket
, list
) {
339 if (mpath
->sdata
== sdata
&&
340 ether_addr_equal(dst
, mpath
->dst
)) {
341 if (MPATH_EXPIRED(mpath
)) {
342 spin_lock_bh(&mpath
->state_lock
);
343 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
344 spin_unlock_bh(&mpath
->state_lock
);
353 * mesh_path_lookup - look up a path in the mesh path table
354 * @dst: hardware address (ETH_ALEN length) of destination
355 * @sdata: local subif
357 * Returns: pointer to the mesh path structure, or NULL if not found
359 * Locking: must be called within a read rcu section.
361 struct mesh_path
*mesh_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
363 return mpath_lookup(rcu_dereference(mesh_paths
), dst
, sdata
);
366 struct mesh_path
*mpp_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
368 return mpath_lookup(rcu_dereference(mpp_paths
), dst
, sdata
);
373 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
375 * @sdata: local subif, or NULL for all entries
377 * Returns: pointer to the mesh path structure, or NULL if not found.
379 * Locking: must be called within a read rcu section.
381 struct mesh_path
*mesh_path_lookup_by_idx(int idx
, struct ieee80211_sub_if_data
*sdata
)
383 struct mesh_table
*tbl
= rcu_dereference(mesh_paths
);
384 struct mpath_node
*node
;
385 struct hlist_node
*p
;
389 for_each_mesh_entry(tbl
, p
, node
, i
) {
390 if (sdata
&& node
->mpath
->sdata
!= sdata
)
393 if (MPATH_EXPIRED(node
->mpath
)) {
394 spin_lock_bh(&node
->mpath
->state_lock
);
395 node
->mpath
->flags
&= ~MESH_PATH_ACTIVE
;
396 spin_unlock_bh(&node
->mpath
->state_lock
);
406 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
407 * @mpath: gate path to add to table
409 int mesh_path_add_gate(struct mesh_path
*mpath
)
411 struct mesh_table
*tbl
;
412 struct mpath_node
*gate
, *new_gate
;
413 struct hlist_node
*n
;
417 tbl
= rcu_dereference(mesh_paths
);
419 hlist_for_each_entry_rcu(gate
, n
, tbl
->known_gates
, list
)
420 if (gate
->mpath
== mpath
) {
425 new_gate
= kzalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
431 mpath
->is_gate
= true;
432 mpath
->sdata
->u
.mesh
.num_gates
++;
433 new_gate
->mpath
= mpath
;
434 spin_lock_bh(&tbl
->gates_lock
);
435 hlist_add_head_rcu(&new_gate
->list
, tbl
->known_gates
);
436 spin_unlock_bh(&tbl
->gates_lock
);
438 mpath_dbg(mpath
->sdata
,
439 "Mesh path: Recorded new gate: %pM. %d known gates\n",
440 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
448 * mesh_gate_del - remove a mesh gate from the list of known gates
449 * @tbl: table which holds our list of known gates
452 * Returns: 0 on success
454 * Locking: must be called inside rcu_read_lock() section
456 static int mesh_gate_del(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
458 struct mpath_node
*gate
;
459 struct hlist_node
*p
, *q
;
461 hlist_for_each_entry_safe(gate
, p
, q
, tbl
->known_gates
, list
)
462 if (gate
->mpath
== mpath
) {
463 spin_lock_bh(&tbl
->gates_lock
);
464 hlist_del_rcu(&gate
->list
);
465 kfree_rcu(gate
, rcu
);
466 spin_unlock_bh(&tbl
->gates_lock
);
467 mpath
->sdata
->u
.mesh
.num_gates
--;
468 mpath
->is_gate
= false;
469 mpath_dbg(mpath
->sdata
,
470 "Mesh path: Deleted gate: %pM. %d known gates\n",
471 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
479 * mesh_gate_num - number of gates known to this interface
482 int mesh_gate_num(struct ieee80211_sub_if_data
*sdata
)
484 return sdata
->u
.mesh
.num_gates
;
488 * mesh_path_add - allocate and add a new path to the mesh path table
489 * @addr: destination address of the path (ETH_ALEN length)
490 * @sdata: local subif
492 * Returns: 0 on success
494 * State: the initial state of the new path is set to 0
496 int mesh_path_add(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
498 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
499 struct ieee80211_local
*local
= sdata
->local
;
500 struct mesh_table
*tbl
;
501 struct mesh_path
*mpath
, *new_mpath
;
502 struct mpath_node
*node
, *new_node
;
503 struct hlist_head
*bucket
;
504 struct hlist_node
*n
;
509 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
510 /* never add ourselves as neighbours */
513 if (is_multicast_ether_addr(dst
))
516 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
520 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
524 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
528 read_lock_bh(&pathtbl_resize_lock
);
529 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
530 eth_broadcast_addr(new_mpath
->rann_snd_addr
);
531 new_mpath
->is_root
= false;
532 new_mpath
->sdata
= sdata
;
533 new_mpath
->flags
= 0;
534 skb_queue_head_init(&new_mpath
->frame_queue
);
535 new_node
->mpath
= new_mpath
;
536 new_mpath
->timer
.data
= (unsigned long) new_mpath
;
537 new_mpath
->timer
.function
= mesh_path_timer
;
538 new_mpath
->exp_time
= jiffies
;
539 spin_lock_init(&new_mpath
->state_lock
);
540 init_timer(&new_mpath
->timer
);
542 tbl
= resize_dereference_mesh_paths();
544 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
545 bucket
= &tbl
->hash_buckets
[hash_idx
];
547 spin_lock(&tbl
->hashwlock
[hash_idx
]);
550 hlist_for_each_entry(node
, n
, bucket
, list
) {
552 if (mpath
->sdata
== sdata
&&
553 ether_addr_equal(dst
, mpath
->dst
))
557 hlist_add_head_rcu(&new_node
->list
, bucket
);
558 if (atomic_inc_return(&tbl
->entries
) >=
559 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
562 mesh_paths_generation
++;
564 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
565 read_unlock_bh(&pathtbl_resize_lock
);
567 set_bit(MESH_WORK_GROW_MPATH_TABLE
, &ifmsh
->wrkq_flags
);
568 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
573 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
574 read_unlock_bh(&pathtbl_resize_lock
);
579 atomic_dec(&sdata
->u
.mesh
.mpaths
);
583 static void mesh_table_free_rcu(struct rcu_head
*rcu
)
585 struct mesh_table
*tbl
= container_of(rcu
, struct mesh_table
, rcu_head
);
587 mesh_table_free(tbl
, false);
590 void mesh_mpath_table_grow(void)
592 struct mesh_table
*oldtbl
, *newtbl
;
594 write_lock_bh(&pathtbl_resize_lock
);
595 oldtbl
= resize_dereference_mesh_paths();
596 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
599 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
600 __mesh_table_free(newtbl
);
603 rcu_assign_pointer(mesh_paths
, newtbl
);
605 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
608 write_unlock_bh(&pathtbl_resize_lock
);
611 void mesh_mpp_table_grow(void)
613 struct mesh_table
*oldtbl
, *newtbl
;
615 write_lock_bh(&pathtbl_resize_lock
);
616 oldtbl
= resize_dereference_mpp_paths();
617 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
620 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
621 __mesh_table_free(newtbl
);
624 rcu_assign_pointer(mpp_paths
, newtbl
);
625 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
628 write_unlock_bh(&pathtbl_resize_lock
);
631 int mpp_path_add(u8
*dst
, u8
*mpp
, struct ieee80211_sub_if_data
*sdata
)
633 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
634 struct ieee80211_local
*local
= sdata
->local
;
635 struct mesh_table
*tbl
;
636 struct mesh_path
*mpath
, *new_mpath
;
637 struct mpath_node
*node
, *new_node
;
638 struct hlist_head
*bucket
;
639 struct hlist_node
*n
;
644 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
645 /* never add ourselves as neighbours */
648 if (is_multicast_ether_addr(dst
))
652 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
656 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
660 read_lock_bh(&pathtbl_resize_lock
);
661 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
662 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
663 new_mpath
->sdata
= sdata
;
664 new_mpath
->flags
= 0;
665 skb_queue_head_init(&new_mpath
->frame_queue
);
666 new_node
->mpath
= new_mpath
;
667 init_timer(&new_mpath
->timer
);
668 new_mpath
->exp_time
= jiffies
;
669 spin_lock_init(&new_mpath
->state_lock
);
671 tbl
= resize_dereference_mpp_paths();
673 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
674 bucket
= &tbl
->hash_buckets
[hash_idx
];
676 spin_lock(&tbl
->hashwlock
[hash_idx
]);
679 hlist_for_each_entry(node
, n
, bucket
, list
) {
681 if (mpath
->sdata
== sdata
&&
682 ether_addr_equal(dst
, mpath
->dst
))
686 hlist_add_head_rcu(&new_node
->list
, bucket
);
687 if (atomic_inc_return(&tbl
->entries
) >=
688 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
691 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
692 read_unlock_bh(&pathtbl_resize_lock
);
694 set_bit(MESH_WORK_GROW_MPP_TABLE
, &ifmsh
->wrkq_flags
);
695 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
700 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
701 read_unlock_bh(&pathtbl_resize_lock
);
711 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
713 * @sta: broken peer link
715 * This function must be called from the rate control algorithm if enough
716 * delivery errors suggest that a peer link is no longer usable.
718 void mesh_plink_broken(struct sta_info
*sta
)
720 struct mesh_table
*tbl
;
721 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
722 struct mesh_path
*mpath
;
723 struct mpath_node
*node
;
724 struct hlist_node
*p
;
725 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
727 __le16 reason
= cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE
);
730 tbl
= rcu_dereference(mesh_paths
);
731 for_each_mesh_entry(tbl
, p
, node
, i
) {
733 if (rcu_dereference(mpath
->next_hop
) == sta
&&
734 mpath
->flags
& MESH_PATH_ACTIVE
&&
735 !(mpath
->flags
& MESH_PATH_FIXED
)) {
736 spin_lock_bh(&mpath
->state_lock
);
737 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
739 spin_unlock_bh(&mpath
->state_lock
);
740 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
,
741 mpath
->dst
, cpu_to_le32(mpath
->sn
),
742 reason
, bcast
, sdata
);
748 static void mesh_path_node_reclaim(struct rcu_head
*rp
)
750 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
751 struct ieee80211_sub_if_data
*sdata
= node
->mpath
->sdata
;
753 del_timer_sync(&node
->mpath
->timer
);
754 atomic_dec(&sdata
->u
.mesh
.mpaths
);
759 /* needs to be called with the corresponding hashwlock taken */
760 static void __mesh_path_del(struct mesh_table
*tbl
, struct mpath_node
*node
)
762 struct mesh_path
*mpath
;
764 spin_lock(&mpath
->state_lock
);
765 mpath
->flags
|= MESH_PATH_RESOLVING
;
767 mesh_gate_del(tbl
, mpath
);
768 hlist_del_rcu(&node
->list
);
769 call_rcu(&node
->rcu
, mesh_path_node_reclaim
);
770 spin_unlock(&mpath
->state_lock
);
771 atomic_dec(&tbl
->entries
);
775 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
777 * @sta: mesh peer to match
779 * RCU notes: this function is called when a mesh plink transitions from
780 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
781 * allows path creation. This will happen before the sta can be freed (because
782 * sta_info_destroy() calls this) so any reader in a rcu read block will be
783 * protected against the plink disappearing.
785 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
787 struct mesh_table
*tbl
;
788 struct mesh_path
*mpath
;
789 struct mpath_node
*node
;
790 struct hlist_node
*p
;
794 read_lock_bh(&pathtbl_resize_lock
);
795 tbl
= resize_dereference_mesh_paths();
796 for_each_mesh_entry(tbl
, p
, node
, i
) {
798 if (rcu_dereference(mpath
->next_hop
) == sta
) {
799 spin_lock(&tbl
->hashwlock
[i
]);
800 __mesh_path_del(tbl
, node
);
801 spin_unlock(&tbl
->hashwlock
[i
]);
804 read_unlock_bh(&pathtbl_resize_lock
);
808 static void table_flush_by_iface(struct mesh_table
*tbl
,
809 struct ieee80211_sub_if_data
*sdata
)
811 struct mesh_path
*mpath
;
812 struct mpath_node
*node
;
813 struct hlist_node
*p
;
816 WARN_ON(!rcu_read_lock_held());
817 for_each_mesh_entry(tbl
, p
, node
, i
) {
819 if (mpath
->sdata
!= sdata
)
821 spin_lock_bh(&tbl
->hashwlock
[i
]);
822 __mesh_path_del(tbl
, node
);
823 spin_unlock_bh(&tbl
->hashwlock
[i
]);
828 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
830 * This function deletes both mesh paths as well as mesh portal paths.
832 * @sdata: interface data to match
835 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data
*sdata
)
837 struct mesh_table
*tbl
;
840 read_lock_bh(&pathtbl_resize_lock
);
841 tbl
= resize_dereference_mesh_paths();
842 table_flush_by_iface(tbl
, sdata
);
843 tbl
= resize_dereference_mpp_paths();
844 table_flush_by_iface(tbl
, sdata
);
845 read_unlock_bh(&pathtbl_resize_lock
);
850 * mesh_path_del - delete a mesh path from the table
852 * @addr: dst address (ETH_ALEN length)
853 * @sdata: local subif
855 * Returns: 0 if successful
857 int mesh_path_del(u8
*addr
, struct ieee80211_sub_if_data
*sdata
)
859 struct mesh_table
*tbl
;
860 struct mesh_path
*mpath
;
861 struct mpath_node
*node
;
862 struct hlist_head
*bucket
;
863 struct hlist_node
*n
;
867 read_lock_bh(&pathtbl_resize_lock
);
868 tbl
= resize_dereference_mesh_paths();
869 hash_idx
= mesh_table_hash(addr
, sdata
, tbl
);
870 bucket
= &tbl
->hash_buckets
[hash_idx
];
872 spin_lock(&tbl
->hashwlock
[hash_idx
]);
873 hlist_for_each_entry(node
, n
, bucket
, list
) {
875 if (mpath
->sdata
== sdata
&&
876 ether_addr_equal(addr
, mpath
->dst
)) {
877 __mesh_path_del(tbl
, node
);
884 mesh_paths_generation
++;
885 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
886 read_unlock_bh(&pathtbl_resize_lock
);
891 * mesh_path_tx_pending - sends pending frames in a mesh path queue
893 * @mpath: mesh path to activate
895 * Locking: the state_lock of the mpath structure must NOT be held when calling
898 void mesh_path_tx_pending(struct mesh_path
*mpath
)
900 if (mpath
->flags
& MESH_PATH_ACTIVE
)
901 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
902 &mpath
->frame_queue
);
906 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
908 * @mpath: mesh path whose queue will be emptied
910 * If there is only one gate, the frames are transferred from the failed mpath
911 * queue to that gate's queue. If there are more than one gates, the frames
912 * are copied from each gate to the next. After frames are copied, the
913 * mpath queues are emptied onto the transmission queue.
915 int mesh_path_send_to_gates(struct mesh_path
*mpath
)
917 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
918 struct hlist_node
*n
;
919 struct mesh_table
*tbl
;
920 struct mesh_path
*from_mpath
= mpath
;
921 struct mpath_node
*gate
= NULL
;
923 struct hlist_head
*known_gates
;
926 tbl
= rcu_dereference(mesh_paths
);
927 known_gates
= tbl
->known_gates
;
931 return -EHOSTUNREACH
;
933 hlist_for_each_entry_rcu(gate
, n
, known_gates
, list
) {
934 if (gate
->mpath
->sdata
!= sdata
)
937 if (gate
->mpath
->flags
& MESH_PATH_ACTIVE
) {
938 mpath_dbg(sdata
, "Forwarding to %pM\n", gate
->mpath
->dst
);
939 mesh_path_move_to_queue(gate
->mpath
, from_mpath
, copy
);
940 from_mpath
= gate
->mpath
;
944 "Not forwarding %p (flags %#x)\n",
945 gate
->mpath
, gate
->mpath
->flags
);
949 hlist_for_each_entry_rcu(gate
, n
, known_gates
, list
)
950 if (gate
->mpath
->sdata
== sdata
) {
951 mpath_dbg(sdata
, "Sending to %pM\n", gate
->mpath
->dst
);
952 mesh_path_tx_pending(gate
->mpath
);
955 return (from_mpath
== mpath
) ? -EHOSTUNREACH
: 0;
959 * mesh_path_discard_frame - discard a frame whose path could not be resolved
961 * @skb: frame to discard
962 * @sdata: network subif the frame was to be sent through
964 * Locking: the function must me called within a rcu_read_lock region
966 void mesh_path_discard_frame(struct sk_buff
*skb
,
967 struct ieee80211_sub_if_data
*sdata
)
970 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
974 * mesh_path_flush_pending - free the pending queue of a mesh path
976 * @mpath: mesh path whose queue has to be freed
978 * Locking: the function must me called within a rcu_read_lock region
980 void mesh_path_flush_pending(struct mesh_path
*mpath
)
984 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) != NULL
)
985 mesh_path_discard_frame(skb
, mpath
->sdata
);
989 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
991 * @mpath: the mesh path to modify
992 * @next_hop: the next hop to force
994 * Locking: this function must be called holding mpath->state_lock
996 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
998 spin_lock_bh(&mpath
->state_lock
);
999 mesh_path_assign_nexthop(mpath
, next_hop
);
1002 mpath
->hop_count
= 0;
1003 mpath
->exp_time
= 0;
1004 mpath
->flags
|= MESH_PATH_FIXED
;
1005 mesh_path_activate(mpath
);
1006 spin_unlock_bh(&mpath
->state_lock
);
1007 mesh_path_tx_pending(mpath
);
1010 static void mesh_path_node_free(struct hlist_node
*p
, bool free_leafs
)
1012 struct mesh_path
*mpath
;
1013 struct mpath_node
*node
= hlist_entry(p
, struct mpath_node
, list
);
1014 mpath
= node
->mpath
;
1017 del_timer_sync(&mpath
->timer
);
1023 static int mesh_path_node_copy(struct hlist_node
*p
, struct mesh_table
*newtbl
)
1025 struct mesh_path
*mpath
;
1026 struct mpath_node
*node
, *new_node
;
1029 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
1030 if (new_node
== NULL
)
1033 node
= hlist_entry(p
, struct mpath_node
, list
);
1034 mpath
= node
->mpath
;
1035 new_node
->mpath
= mpath
;
1036 hash_idx
= mesh_table_hash(mpath
->dst
, mpath
->sdata
, newtbl
);
1037 hlist_add_head(&new_node
->list
,
1038 &newtbl
->hash_buckets
[hash_idx
]);
1042 int mesh_pathtbl_init(void)
1044 struct mesh_table
*tbl_path
, *tbl_mpp
;
1047 tbl_path
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1050 tbl_path
->free_node
= &mesh_path_node_free
;
1051 tbl_path
->copy_node
= &mesh_path_node_copy
;
1052 tbl_path
->mean_chain_len
= MEAN_CHAIN_LEN
;
1053 tbl_path
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1054 if (!tbl_path
->known_gates
) {
1058 INIT_HLIST_HEAD(tbl_path
->known_gates
);
1061 tbl_mpp
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1066 tbl_mpp
->free_node
= &mesh_path_node_free
;
1067 tbl_mpp
->copy_node
= &mesh_path_node_copy
;
1068 tbl_mpp
->mean_chain_len
= MEAN_CHAIN_LEN
;
1069 tbl_mpp
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1070 if (!tbl_mpp
->known_gates
) {
1074 INIT_HLIST_HEAD(tbl_mpp
->known_gates
);
1076 /* Need no locking since this is during init */
1077 RCU_INIT_POINTER(mesh_paths
, tbl_path
);
1078 RCU_INIT_POINTER(mpp_paths
, tbl_mpp
);
1083 mesh_table_free(tbl_mpp
, true);
1085 mesh_table_free(tbl_path
, true);
1089 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
1091 struct mesh_table
*tbl
;
1092 struct mesh_path
*mpath
;
1093 struct mpath_node
*node
;
1094 struct hlist_node
*p
;
1098 tbl
= rcu_dereference(mesh_paths
);
1099 for_each_mesh_entry(tbl
, p
, node
, i
) {
1100 if (node
->mpath
->sdata
!= sdata
)
1102 mpath
= node
->mpath
;
1103 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
1104 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
1105 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
))
1106 mesh_path_del(mpath
->dst
, mpath
->sdata
);
1111 void mesh_pathtbl_unregister(void)
1113 /* no need for locking during exit path */
1114 mesh_table_free(rcu_dereference_protected(mesh_paths
, 1), true);
1115 mesh_table_free(rcu_dereference_protected(mpp_paths
, 1), true);