ACPI: thinkpad-acpi: add development version tag
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / mac80211 / mesh_pathtbl.c
blob838ee60492ad4d84780692aff2861d015e52886a
1 /*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/random.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
18 #include "mesh.h"
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER 2
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN 2
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 time_after(jiffies, mpath->exp_time) && \
28 !(mpath->flags & MESH_PATH_FIXED))
30 struct mpath_node {
31 struct hlist_node list;
32 struct rcu_head rcu;
33 /* This indirection allows two different tables to point to the same
34 * mesh_path structure, useful when resizing
36 struct mesh_path *mpath;
39 static struct mesh_table *mesh_paths;
41 /* This lock will have the grow table function as writer and add / delete nodes
42 * as readers. When reading the table (i.e. doing lookups) we are well protected
43 * by RCU
45 static DEFINE_RWLOCK(pathtbl_resize_lock);
47 /**
49 * mesh_path_assign_nexthop - update mesh path next hop
51 * @mpath: mesh path to update
52 * @sta: next hop to assign
54 * Locking: mpath->state_lock must be held when calling this function
56 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
58 rcu_assign_pointer(mpath->next_hop, sta);
62 /**
63 * mesh_path_lookup - look up a path in the mesh path table
64 * @dst: hardware address (ETH_ALEN length) of destination
65 * @dev: local interface
67 * Returns: pointer to the mesh path structure, or NULL if not found
69 * Locking: must be called within a read rcu section.
71 struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
73 struct mesh_path *mpath;
74 struct hlist_node *n;
75 struct hlist_head *bucket;
76 struct mesh_table *tbl;
77 struct mpath_node *node;
79 tbl = rcu_dereference(mesh_paths);
81 bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)];
82 hlist_for_each_entry_rcu(node, n, bucket, list) {
83 mpath = node->mpath;
84 if (mpath->dev == dev &&
85 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
86 if (MPATH_EXPIRED(mpath)) {
87 spin_lock_bh(&mpath->state_lock);
88 if (MPATH_EXPIRED(mpath))
89 mpath->flags &= ~MESH_PATH_ACTIVE;
90 spin_unlock_bh(&mpath->state_lock);
92 return mpath;
95 return NULL;
98 /**
99 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
100 * @idx: index
101 * @dev: local interface, or NULL for all entries
103 * Returns: pointer to the mesh path structure, or NULL if not found.
105 * Locking: must be called within a read rcu section.
107 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
109 struct mpath_node *node;
110 struct hlist_node *p;
111 int i;
112 int j = 0;
114 for_each_mesh_entry(mesh_paths, p, node, i) {
115 if (dev && node->mpath->dev != dev)
116 continue;
117 if (j++ == idx) {
118 if (MPATH_EXPIRED(node->mpath)) {
119 spin_lock_bh(&node->mpath->state_lock);
120 if (MPATH_EXPIRED(node->mpath))
121 node->mpath->flags &= ~MESH_PATH_ACTIVE;
122 spin_unlock_bh(&node->mpath->state_lock);
124 return node->mpath;
128 return NULL;
132 * mesh_path_add - allocate and add a new path to the mesh path table
133 * @addr: destination address of the path (ETH_ALEN length)
134 * @dev: local interface
136 * Returns: 0 on sucess
138 * State: the initial state of the new path is set to 0
140 int mesh_path_add(u8 *dst, struct net_device *dev)
142 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
143 struct mesh_path *mpath, *new_mpath;
144 struct mpath_node *node, *new_node;
145 struct hlist_head *bucket;
146 struct hlist_node *n;
147 int grow = 0;
148 int err = 0;
149 u32 hash_idx;
151 if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0)
152 /* never add ourselves as neighbours */
153 return -ENOTSUPP;
155 if (is_multicast_ether_addr(dst))
156 return -ENOTSUPP;
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC;
161 err = -ENOMEM;
162 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
163 if (!new_mpath)
164 goto err_path_alloc;
166 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
167 if (!new_node)
168 goto err_node_alloc;
170 read_lock(&pathtbl_resize_lock);
171 memcpy(new_mpath->dst, dst, ETH_ALEN);
172 new_mpath->dev = dev;
173 new_mpath->flags = 0;
174 skb_queue_head_init(&new_mpath->frame_queue);
175 new_node->mpath = new_mpath;
176 new_mpath->timer.data = (unsigned long) new_mpath;
177 new_mpath->timer.function = mesh_path_timer;
178 new_mpath->exp_time = jiffies;
179 spin_lock_init(&new_mpath->state_lock);
180 init_timer(&new_mpath->timer);
182 hash_idx = mesh_table_hash(dst, dev, mesh_paths);
183 bucket = &mesh_paths->hash_buckets[hash_idx];
185 spin_lock(&mesh_paths->hashwlock[hash_idx]);
187 err = -EEXIST;
188 hlist_for_each_entry(node, n, bucket, list) {
189 mpath = node->mpath;
190 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
191 goto err_exists;
194 hlist_add_head_rcu(&new_node->list, bucket);
195 if (atomic_inc_return(&mesh_paths->entries) >=
196 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
197 grow = 1;
199 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
200 read_unlock(&pathtbl_resize_lock);
201 if (grow) {
202 struct mesh_table *oldtbl, *newtbl;
204 write_lock(&pathtbl_resize_lock);
205 oldtbl = mesh_paths;
206 newtbl = mesh_table_grow(mesh_paths);
207 if (!newtbl) {
208 write_unlock(&pathtbl_resize_lock);
209 return 0;
211 rcu_assign_pointer(mesh_paths, newtbl);
212 write_unlock(&pathtbl_resize_lock);
214 synchronize_rcu();
215 mesh_table_free(oldtbl, false);
217 return 0;
219 err_exists:
220 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
221 read_unlock(&pathtbl_resize_lock);
222 kfree(new_node);
223 err_node_alloc:
224 kfree(new_mpath);
225 err_path_alloc:
226 atomic_dec(&sdata->u.sta.mpaths);
227 return err;
232 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
234 * @sta: broken peer link
236 * This function must be called from the rate control algorithm if enough
237 * delivery errors suggest that a peer link is no longer usable.
239 void mesh_plink_broken(struct sta_info *sta)
241 struct mesh_path *mpath;
242 struct mpath_node *node;
243 struct hlist_node *p;
244 struct net_device *dev = sta->sdata->dev;
245 int i;
247 rcu_read_lock();
248 for_each_mesh_entry(mesh_paths, p, node, i) {
249 mpath = node->mpath;
250 spin_lock_bh(&mpath->state_lock);
251 if (mpath->next_hop == sta &&
252 mpath->flags & MESH_PATH_ACTIVE &&
253 !(mpath->flags & MESH_PATH_FIXED)) {
254 mpath->flags &= ~MESH_PATH_ACTIVE;
255 ++mpath->dsn;
256 spin_unlock_bh(&mpath->state_lock);
257 mesh_path_error_tx(mpath->dst,
258 cpu_to_le32(mpath->dsn),
259 dev->broadcast, dev);
260 } else
261 spin_unlock_bh(&mpath->state_lock);
263 rcu_read_unlock();
267 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
269 * @sta - mesh peer to match
271 * RCU notes: this function is called when a mesh plink transitions from
272 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
273 * allows path creation. This will happen before the sta can be freed (because
274 * sta_info_destroy() calls this) so any reader in a rcu read block will be
275 * protected against the plink disappearing.
277 void mesh_path_flush_by_nexthop(struct sta_info *sta)
279 struct mesh_path *mpath;
280 struct mpath_node *node;
281 struct hlist_node *p;
282 int i;
284 for_each_mesh_entry(mesh_paths, p, node, i) {
285 mpath = node->mpath;
286 if (mpath->next_hop == sta)
287 mesh_path_del(mpath->dst, mpath->dev);
291 void mesh_path_flush(struct net_device *dev)
293 struct mesh_path *mpath;
294 struct mpath_node *node;
295 struct hlist_node *p;
296 int i;
298 for_each_mesh_entry(mesh_paths, p, node, i) {
299 mpath = node->mpath;
300 if (mpath->dev == dev)
301 mesh_path_del(mpath->dst, mpath->dev);
305 static void mesh_path_node_reclaim(struct rcu_head *rp)
307 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
308 struct ieee80211_sub_if_data *sdata =
309 IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
311 del_timer_sync(&node->mpath->timer);
312 atomic_dec(&sdata->u.sta.mpaths);
313 kfree(node->mpath);
314 kfree(node);
318 * mesh_path_del - delete a mesh path from the table
320 * @addr: dst address (ETH_ALEN length)
321 * @dev: local interface
323 * Returns: 0 if succesful
325 int mesh_path_del(u8 *addr, struct net_device *dev)
327 struct mesh_path *mpath;
328 struct mpath_node *node;
329 struct hlist_head *bucket;
330 struct hlist_node *n;
331 int hash_idx;
332 int err = 0;
334 read_lock(&pathtbl_resize_lock);
335 hash_idx = mesh_table_hash(addr, dev, mesh_paths);
336 bucket = &mesh_paths->hash_buckets[hash_idx];
338 spin_lock(&mesh_paths->hashwlock[hash_idx]);
339 hlist_for_each_entry(node, n, bucket, list) {
340 mpath = node->mpath;
341 if (mpath->dev == dev &&
342 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
343 spin_lock_bh(&mpath->state_lock);
344 mpath->flags |= MESH_PATH_RESOLVING;
345 hlist_del_rcu(&node->list);
346 call_rcu(&node->rcu, mesh_path_node_reclaim);
347 atomic_dec(&mesh_paths->entries);
348 spin_unlock_bh(&mpath->state_lock);
349 goto enddel;
353 err = -ENXIO;
354 enddel:
355 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
356 read_unlock(&pathtbl_resize_lock);
357 return err;
361 * mesh_path_tx_pending - sends pending frames in a mesh path queue
363 * @mpath: mesh path to activate
365 * Locking: the state_lock of the mpath structure must NOT be held when calling
366 * this function.
368 void mesh_path_tx_pending(struct mesh_path *mpath)
370 struct sk_buff *skb;
372 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
373 (mpath->flags & MESH_PATH_ACTIVE))
374 dev_queue_xmit(skb);
378 * mesh_path_discard_frame - discard a frame whose path could not be resolved
380 * @skb: frame to discard
381 * @dev: network device the frame was to be sent through
383 * If the frame was beign forwarded from another MP, a PERR frame will be sent
384 * to the precursor.
386 * Locking: the function must me called within a rcu_read_lock region
388 void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
390 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
392 struct mesh_path *mpath;
393 u32 dsn = 0;
395 if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) {
396 u8 *ra, *da;
398 da = hdr->addr3;
399 ra = hdr->addr2;
400 mpath = mesh_path_lookup(da, dev);
401 if (mpath)
402 dsn = ++mpath->dsn;
403 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev);
406 kfree_skb(skb);
407 sdata->u.sta.mshstats.dropped_frames_no_route++;
411 * mesh_path_flush_pending - free the pending queue of a mesh path
413 * @mpath: mesh path whose queue has to be freed
415 * Locking: the function must me called withing a rcu_read_lock region
417 void mesh_path_flush_pending(struct mesh_path *mpath)
419 struct ieee80211_sub_if_data *sdata;
420 struct sk_buff *skb;
422 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
424 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
425 (mpath->flags & MESH_PATH_ACTIVE))
426 mesh_path_discard_frame(skb, mpath->dev);
430 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
432 * @mpath: the mesh path to modify
433 * @next_hop: the next hop to force
435 * Locking: this function must be called holding mpath->state_lock
437 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
439 spin_lock_bh(&mpath->state_lock);
440 mesh_path_assign_nexthop(mpath, next_hop);
441 mpath->dsn = 0xffff;
442 mpath->metric = 0;
443 mpath->hop_count = 0;
444 mpath->exp_time = 0;
445 mpath->flags |= MESH_PATH_FIXED;
446 mesh_path_activate(mpath);
447 spin_unlock_bh(&mpath->state_lock);
448 mesh_path_tx_pending(mpath);
451 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
453 struct mesh_path *mpath;
454 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
455 mpath = node->mpath;
456 hlist_del_rcu(p);
457 if (free_leafs)
458 kfree(mpath);
459 kfree(node);
462 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
464 struct mesh_path *mpath;
465 struct mpath_node *node, *new_node;
466 u32 hash_idx;
468 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
469 if (new_node == NULL)
470 return -ENOMEM;
472 node = hlist_entry(p, struct mpath_node, list);
473 mpath = node->mpath;
474 new_node->mpath = mpath;
475 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
476 hlist_add_head(&new_node->list,
477 &newtbl->hash_buckets[hash_idx]);
478 return 0;
481 int mesh_pathtbl_init(void)
483 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
484 mesh_paths->free_node = &mesh_path_node_free;
485 mesh_paths->copy_node = &mesh_path_node_copy;
486 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
487 if (!mesh_paths)
488 return -ENOMEM;
489 return 0;
492 void mesh_path_expire(struct net_device *dev)
494 struct mesh_path *mpath;
495 struct mpath_node *node;
496 struct hlist_node *p;
497 int i;
499 read_lock(&pathtbl_resize_lock);
500 for_each_mesh_entry(mesh_paths, p, node, i) {
501 if (node->mpath->dev != dev)
502 continue;
503 mpath = node->mpath;
504 spin_lock_bh(&mpath->state_lock);
505 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
506 (!(mpath->flags & MESH_PATH_FIXED)) &&
507 time_after(jiffies,
508 mpath->exp_time + MESH_PATH_EXPIRE)) {
509 spin_unlock_bh(&mpath->state_lock);
510 mesh_path_del(mpath->dst, mpath->dev);
511 } else
512 spin_unlock_bh(&mpath->state_lock);
514 read_unlock(&pathtbl_resize_lock);
517 void mesh_pathtbl_unregister(void)
519 mesh_table_free(mesh_paths, true);