x86-32: tighten the bound on additional memory to map
[linux-2.6/mini2440.git] / net / mac80211 / mesh_hwmp.c
blob71fe609612303a2d9d6fc11e263845df26438256
1 /*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
10 #include "mesh.h"
12 #define TEST_FRAME_LEN 8192
13 #define MAX_METRIC 0xffffffff
14 #define ARITH_SHIFT 8
16 /* Number of frames buffered per destination for unresolved destinations */
17 #define MESH_FRAME_QUEUE_LEN 10
18 #define MAX_PREQ_QUEUE_LEN 64
20 /* Destination only */
21 #define MP_F_DO 0x1
22 /* Reply and forward */
23 #define MP_F_RF 0x2
25 static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
27 if (ae)
28 offset += 6;
29 return get_unaligned_le32(preq_elem + offset);
32 /* HWMP IE processing macros */
33 #define AE_F (1<<6)
34 #define AE_F_SET(x) (*x & AE_F)
35 #define PREQ_IE_FLAGS(x) (*(x))
36 #define PREQ_IE_HOPCOUNT(x) (*(x + 1))
37 #define PREQ_IE_TTL(x) (*(x + 2))
38 #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
39 #define PREQ_IE_ORIG_ADDR(x) (x + 7)
40 #define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0);
41 #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x));
42 #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x));
43 #define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
44 #define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
45 #define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x));
48 #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
49 #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
50 #define PREP_IE_TTL(x) PREQ_IE_TTL(x)
51 #define PREP_IE_ORIG_ADDR(x) (x + 3)
52 #define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0);
53 #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x));
54 #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x));
55 #define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
56 #define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x));
58 #define PERR_IE_DST_ADDR(x) (x + 2)
59 #define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0);
61 #define TU_TO_EXP_TIME(x) (jiffies + msecs_to_jiffies(x * 1024 / 1000))
62 #define MSEC_TO_TU(x) (x*1000/1024)
63 #define DSN_GT(x, y) ((long) (y) - (long) (x) < 0)
64 #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
66 #define net_traversal_jiffies(s) \
67 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
68 #define default_lifetime(s) \
69 MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
70 #define min_preq_int_jiff(s) \
71 (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
72 #define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
73 #define disc_timeout_jiff(s) \
74 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
76 enum mpath_frame_type {
77 MPATH_PREQ = 0,
78 MPATH_PREP,
79 MPATH_PERR
82 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
85 __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata)
87 struct ieee80211_local *local = sdata->local;
88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
89 struct ieee80211_mgmt *mgmt;
90 u8 *pos;
91 int ie_len;
93 if (!skb)
94 return -1;
95 skb_reserve(skb, local->hw.extra_tx_headroom);
96 /* 25 is the size of the common mgmt part (24) plus the size of the
97 * common action part (1)
99 mgmt = (struct ieee80211_mgmt *)
100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
102 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
103 IEEE80211_STYPE_ACTION);
105 memcpy(mgmt->da, da, ETH_ALEN);
106 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
107 /* BSSID is left zeroed, wildcard value */
108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
109 mgmt->u.action.u.mesh_action.action_code = action;
111 switch (action) {
112 case MPATH_PREQ:
113 ie_len = 37;
114 pos = skb_put(skb, 2 + ie_len);
115 *pos++ = WLAN_EID_PREQ;
116 break;
117 case MPATH_PREP:
118 ie_len = 31;
119 pos = skb_put(skb, 2 + ie_len);
120 *pos++ = WLAN_EID_PREP;
121 break;
122 default:
123 kfree_skb(skb);
124 return -ENOTSUPP;
125 break;
127 *pos++ = ie_len;
128 *pos++ = flags;
129 *pos++ = hop_count;
130 *pos++ = ttl;
131 if (action == MPATH_PREQ) {
132 memcpy(pos, &preq_id, 4);
133 pos += 4;
135 memcpy(pos, orig_addr, ETH_ALEN);
136 pos += ETH_ALEN;
137 memcpy(pos, &orig_dsn, 4);
138 pos += 4;
139 memcpy(pos, &lifetime, 4);
140 pos += 4;
141 memcpy(pos, &metric, 4);
142 pos += 4;
143 if (action == MPATH_PREQ) {
144 /* destination count */
145 *pos++ = 1;
146 *pos++ = dst_flags;
148 memcpy(pos, dst, ETH_ALEN);
149 pos += ETH_ALEN;
150 memcpy(pos, &dst_dsn, 4);
152 ieee80211_tx_skb(sdata, skb, 0);
153 return 0;
157 * mesh_send_path error - Sends a PERR mesh management frame
159 * @dst: broken destination
160 * @dst_dsn: dsn of the broken destination
161 * @ra: node this frame is addressed to
163 int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
164 struct ieee80211_sub_if_data *sdata)
166 struct ieee80211_local *local = sdata->local;
167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
168 struct ieee80211_mgmt *mgmt;
169 u8 *pos;
170 int ie_len;
172 if (!skb)
173 return -1;
174 skb_reserve(skb, local->hw.extra_tx_headroom);
175 /* 25 is the size of the common mgmt part (24) plus the size of the
176 * common action part (1)
178 mgmt = (struct ieee80211_mgmt *)
179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
181 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
182 IEEE80211_STYPE_ACTION);
184 memcpy(mgmt->da, ra, ETH_ALEN);
185 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
186 /* BSSID is left zeroed, wildcard value */
187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
189 ie_len = 12;
190 pos = skb_put(skb, 2 + ie_len);
191 *pos++ = WLAN_EID_PERR;
192 *pos++ = ie_len;
193 /* mode flags, reserved */
194 *pos++ = 0;
195 /* number of destinations */
196 *pos++ = 1;
197 memcpy(pos, dst, ETH_ALEN);
198 pos += ETH_ALEN;
199 memcpy(pos, &dst_dsn, 4);
201 ieee80211_tx_skb(sdata, skb, 0);
202 return 0;
205 static u32 airtime_link_metric_get(struct ieee80211_local *local,
206 struct sta_info *sta)
208 struct ieee80211_supported_band *sband;
209 /* This should be adjusted for each device */
210 int device_constant = 1 << ARITH_SHIFT;
211 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
212 int s_unit = 1 << ARITH_SHIFT;
213 int rate, err;
214 u32 tx_time, estimated_retx;
215 u64 result;
217 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
219 if (sta->fail_avg >= 100)
220 return MAX_METRIC;
222 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
223 return MAX_METRIC;
225 err = (sta->fail_avg << ARITH_SHIFT) / 100;
227 /* bitrate is in units of 100 Kbps, while we need rate in units of
228 * 1Mbps. This will be corrected on tx_time computation.
230 rate = sband->bitrates[sta->last_tx_rate.idx].bitrate;
231 tx_time = (device_constant + 10 * test_frame_len / rate);
232 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
233 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
234 return (u32)result;
238 * hwmp_route_info_get - Update routing info to originator and transmitter
240 * @sdata: local mesh subif
241 * @mgmt: mesh management frame
242 * @hwmp_ie: hwmp information element (PREP or PREQ)
244 * This function updates the path routing information to the originator and the
245 * transmitter of a HWMP PREQ or PREP fram.
247 * Returns: metric to frame originator or 0 if the frame should not be further
248 * processed
250 * Notes: this function is the only place (besides user-provided info) where
251 * path routing information is updated.
253 static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
254 struct ieee80211_mgmt *mgmt,
255 u8 *hwmp_ie)
257 struct ieee80211_local *local = sdata->local;
258 struct mesh_path *mpath;
259 struct sta_info *sta;
260 bool fresh_info;
261 u8 *orig_addr, *ta;
262 u32 orig_dsn, orig_metric;
263 unsigned long orig_lifetime, exp_time;
264 u32 last_hop_metric, new_metric;
265 bool process = true;
266 u8 action = mgmt->u.action.u.mesh_action.action_code;
268 rcu_read_lock();
269 sta = sta_info_get(local, mgmt->sa);
270 if (!sta) {
271 rcu_read_unlock();
272 return 0;
275 last_hop_metric = airtime_link_metric_get(local, sta);
276 /* Update and check originator routing info */
277 fresh_info = true;
279 switch (action) {
280 case MPATH_PREQ:
281 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
282 orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie);
283 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
284 orig_metric = PREQ_IE_METRIC(hwmp_ie);
285 break;
286 case MPATH_PREP:
287 /* Originator here refers to the MP that was the destination in
288 * the Path Request. The draft refers to that MP as the
289 * destination address, even though usually it is the origin of
290 * the PREP frame. We divert from the nomenclature in the draft
291 * so that we can easily use a single function to gather path
292 * information from both PREQ and PREP frames.
294 orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
295 orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie);
296 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
297 orig_metric = PREP_IE_METRIC(hwmp_ie);
298 break;
299 default:
300 rcu_read_unlock();
301 return 0;
303 new_metric = orig_metric + last_hop_metric;
304 if (new_metric < orig_metric)
305 new_metric = MAX_METRIC;
306 exp_time = TU_TO_EXP_TIME(orig_lifetime);
308 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
309 /* This MP is the originator, we are not interested in this
310 * frame, except for updating transmitter's path info.
312 process = false;
313 fresh_info = false;
314 } else {
315 mpath = mesh_path_lookup(orig_addr, sdata);
316 if (mpath) {
317 spin_lock_bh(&mpath->state_lock);
318 if (mpath->flags & MESH_PATH_FIXED)
319 fresh_info = false;
320 else if ((mpath->flags & MESH_PATH_ACTIVE) &&
321 (mpath->flags & MESH_PATH_DSN_VALID)) {
322 if (DSN_GT(mpath->dsn, orig_dsn) ||
323 (mpath->dsn == orig_dsn &&
324 action == MPATH_PREQ &&
325 new_metric > mpath->metric)) {
326 process = false;
327 fresh_info = false;
330 } else {
331 mesh_path_add(orig_addr, sdata);
332 mpath = mesh_path_lookup(orig_addr, sdata);
333 if (!mpath) {
334 rcu_read_unlock();
335 return 0;
337 spin_lock_bh(&mpath->state_lock);
340 if (fresh_info) {
341 mesh_path_assign_nexthop(mpath, sta);
342 mpath->flags |= MESH_PATH_DSN_VALID;
343 mpath->metric = new_metric;
344 mpath->dsn = orig_dsn;
345 mpath->exp_time = time_after(mpath->exp_time, exp_time)
346 ? mpath->exp_time : exp_time;
347 mesh_path_activate(mpath);
348 spin_unlock_bh(&mpath->state_lock);
349 mesh_path_tx_pending(mpath);
350 /* draft says preq_id should be saved to, but there does
351 * not seem to be any use for it, skipping by now
353 } else
354 spin_unlock_bh(&mpath->state_lock);
357 /* Update and check transmitter routing info */
358 ta = mgmt->sa;
359 if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
360 fresh_info = false;
361 else {
362 fresh_info = true;
364 mpath = mesh_path_lookup(ta, sdata);
365 if (mpath) {
366 spin_lock_bh(&mpath->state_lock);
367 if ((mpath->flags & MESH_PATH_FIXED) ||
368 ((mpath->flags & MESH_PATH_ACTIVE) &&
369 (last_hop_metric > mpath->metric)))
370 fresh_info = false;
371 } else {
372 mesh_path_add(ta, sdata);
373 mpath = mesh_path_lookup(ta, sdata);
374 if (!mpath) {
375 rcu_read_unlock();
376 return 0;
378 spin_lock_bh(&mpath->state_lock);
381 if (fresh_info) {
382 mesh_path_assign_nexthop(mpath, sta);
383 mpath->flags &= ~MESH_PATH_DSN_VALID;
384 mpath->metric = last_hop_metric;
385 mpath->exp_time = time_after(mpath->exp_time, exp_time)
386 ? mpath->exp_time : exp_time;
387 mesh_path_activate(mpath);
388 spin_unlock_bh(&mpath->state_lock);
389 mesh_path_tx_pending(mpath);
390 } else
391 spin_unlock_bh(&mpath->state_lock);
394 rcu_read_unlock();
396 return process ? new_metric : 0;
399 static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
400 struct ieee80211_mgmt *mgmt,
401 u8 *preq_elem, u32 metric) {
402 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
403 struct mesh_path *mpath;
404 u8 *dst_addr, *orig_addr;
405 u8 dst_flags, ttl;
406 u32 orig_dsn, dst_dsn, lifetime;
407 bool reply = false;
408 bool forward = true;
410 /* Update destination DSN, if present */
411 dst_addr = PREQ_IE_DST_ADDR(preq_elem);
412 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
413 dst_dsn = PREQ_IE_DST_DSN(preq_elem);
414 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
415 dst_flags = PREQ_IE_DST_F(preq_elem);
417 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
418 forward = false;
419 reply = true;
420 metric = 0;
421 if (time_after(jiffies, ifmsh->last_dsn_update +
422 net_traversal_jiffies(sdata)) ||
423 time_before(jiffies, ifmsh->last_dsn_update)) {
424 dst_dsn = ++ifmsh->dsn;
425 ifmsh->last_dsn_update = jiffies;
427 } else {
428 rcu_read_lock();
429 mpath = mesh_path_lookup(dst_addr, sdata);
430 if (mpath) {
431 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
432 DSN_LT(mpath->dsn, dst_dsn)) {
433 mpath->dsn = dst_dsn;
434 mpath->flags &= MESH_PATH_DSN_VALID;
435 } else if ((!(dst_flags & MP_F_DO)) &&
436 (mpath->flags & MESH_PATH_ACTIVE)) {
437 reply = true;
438 metric = mpath->metric;
439 dst_dsn = mpath->dsn;
440 if (dst_flags & MP_F_RF)
441 dst_flags |= MP_F_DO;
442 else
443 forward = false;
446 rcu_read_unlock();
449 if (reply) {
450 lifetime = PREQ_IE_LIFETIME(preq_elem);
451 ttl = ifmsh->mshcfg.dot11MeshTTL;
452 if (ttl != 0)
453 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
454 cpu_to_le32(dst_dsn), 0, orig_addr,
455 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
456 cpu_to_le32(lifetime), cpu_to_le32(metric),
457 0, sdata);
458 else
459 ifmsh->mshstats.dropped_frames_ttl++;
462 if (forward) {
463 u32 preq_id;
464 u8 hopcount, flags;
466 ttl = PREQ_IE_TTL(preq_elem);
467 lifetime = PREQ_IE_LIFETIME(preq_elem);
468 if (ttl <= 1) {
469 ifmsh->mshstats.dropped_frames_ttl++;
470 return;
472 --ttl;
473 flags = PREQ_IE_FLAGS(preq_elem);
474 preq_id = PREQ_IE_PREQ_ID(preq_elem);
475 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
476 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
477 cpu_to_le32(orig_dsn), dst_flags, dst_addr,
478 cpu_to_le32(dst_dsn), sdata->dev->broadcast,
479 hopcount, ttl, cpu_to_le32(lifetime),
480 cpu_to_le32(metric), cpu_to_le32(preq_id),
481 sdata);
482 ifmsh->mshstats.fwded_frames++;
487 static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
488 struct ieee80211_mgmt *mgmt,
489 u8 *prep_elem, u32 metric)
491 struct mesh_path *mpath;
492 u8 *dst_addr, *orig_addr;
493 u8 ttl, hopcount, flags;
494 u8 next_hop[ETH_ALEN];
495 u32 dst_dsn, orig_dsn, lifetime;
497 /* Note that we divert from the draft nomenclature and denominate
498 * destination to what the draft refers to as origininator. So in this
499 * function destnation refers to the final destination of the PREP,
500 * which corresponds with the originator of the PREQ which this PREP
501 * replies
503 dst_addr = PREP_IE_DST_ADDR(prep_elem);
504 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
505 /* destination, no forwarding required */
506 return;
508 ttl = PREP_IE_TTL(prep_elem);
509 if (ttl <= 1) {
510 sdata->u.mesh.mshstats.dropped_frames_ttl++;
511 return;
514 rcu_read_lock();
515 mpath = mesh_path_lookup(dst_addr, sdata);
516 if (mpath)
517 spin_lock_bh(&mpath->state_lock);
518 else
519 goto fail;
520 if (!(mpath->flags & MESH_PATH_ACTIVE)) {
521 spin_unlock_bh(&mpath->state_lock);
522 goto fail;
524 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
525 spin_unlock_bh(&mpath->state_lock);
526 --ttl;
527 flags = PREP_IE_FLAGS(prep_elem);
528 lifetime = PREP_IE_LIFETIME(prep_elem);
529 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
530 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
531 dst_dsn = PREP_IE_DST_DSN(prep_elem);
532 orig_dsn = PREP_IE_ORIG_DSN(prep_elem);
534 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
535 cpu_to_le32(orig_dsn), 0, dst_addr,
536 cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl,
537 cpu_to_le32(lifetime), cpu_to_le32(metric),
538 0, sdata);
539 rcu_read_unlock();
540 sdata->u.mesh.mshstats.fwded_frames++;
541 return;
543 fail:
544 rcu_read_unlock();
545 sdata->u.mesh.mshstats.dropped_frames_no_route++;
546 return;
549 static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
550 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
552 struct mesh_path *mpath;
553 u8 *ta, *dst_addr;
554 u32 dst_dsn;
556 ta = mgmt->sa;
557 dst_addr = PERR_IE_DST_ADDR(perr_elem);
558 dst_dsn = PERR_IE_DST_DSN(perr_elem);
559 rcu_read_lock();
560 mpath = mesh_path_lookup(dst_addr, sdata);
561 if (mpath) {
562 spin_lock_bh(&mpath->state_lock);
563 if (mpath->flags & MESH_PATH_ACTIVE &&
564 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 &&
565 (!(mpath->flags & MESH_PATH_DSN_VALID) ||
566 DSN_GT(dst_dsn, mpath->dsn))) {
567 mpath->flags &= ~MESH_PATH_ACTIVE;
568 mpath->dsn = dst_dsn;
569 spin_unlock_bh(&mpath->state_lock);
570 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
571 sdata->dev->broadcast, sdata);
572 } else
573 spin_unlock_bh(&mpath->state_lock);
575 rcu_read_unlock();
580 void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
581 struct ieee80211_mgmt *mgmt,
582 size_t len)
584 struct ieee802_11_elems elems;
585 size_t baselen;
586 u32 last_hop_metric;
588 /* need action_code */
589 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
590 return;
592 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
593 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
594 len - baselen, &elems);
596 switch (mgmt->u.action.u.mesh_action.action_code) {
597 case MPATH_PREQ:
598 if (!elems.preq || elems.preq_len != 37)
599 /* Right now we support just 1 destination and no AE */
600 return;
601 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq);
602 if (!last_hop_metric)
603 return;
604 hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric);
605 break;
606 case MPATH_PREP:
607 if (!elems.prep || elems.prep_len != 31)
608 /* Right now we support no AE */
609 return;
610 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep);
611 if (!last_hop_metric)
612 return;
613 hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric);
614 break;
615 case MPATH_PERR:
616 if (!elems.perr || elems.perr_len != 12)
617 /* Right now we support only one destination per PERR */
618 return;
619 hwmp_perr_frame_process(sdata, mgmt, elems.perr);
620 default:
621 return;
627 * mesh_queue_preq - queue a PREQ to a given destination
629 * @mpath: mesh path to discover
630 * @flags: special attributes of the PREQ to be sent
632 * Locking: the function must be called from within a rcu read lock block.
635 static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
637 struct ieee80211_sub_if_data *sdata = mpath->sdata;
638 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
639 struct mesh_preq_queue *preq_node;
641 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
642 if (!preq_node) {
643 printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
644 return;
647 spin_lock(&ifmsh->mesh_preq_queue_lock);
648 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
649 spin_unlock(&ifmsh->mesh_preq_queue_lock);
650 kfree(preq_node);
651 if (printk_ratelimit())
652 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
653 return;
656 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
657 preq_node->flags = flags;
659 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
660 ++ifmsh->preq_queue_len;
661 spin_unlock(&ifmsh->mesh_preq_queue_lock);
663 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
664 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
666 else if (time_before(jiffies, ifmsh->last_preq)) {
667 /* avoid long wait if did not send preqs for a long time
668 * and jiffies wrapped around
670 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
671 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
672 } else
673 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
674 min_preq_int_jiff(sdata));
678 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
680 * @sdata: local mesh subif
682 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
684 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
685 struct mesh_preq_queue *preq_node;
686 struct mesh_path *mpath;
687 u8 ttl, dst_flags;
688 u32 lifetime;
690 spin_lock(&ifmsh->mesh_preq_queue_lock);
691 if (!ifmsh->preq_queue_len ||
692 time_before(jiffies, ifmsh->last_preq +
693 min_preq_int_jiff(sdata))) {
694 spin_unlock(&ifmsh->mesh_preq_queue_lock);
695 return;
698 preq_node = list_first_entry(&ifmsh->preq_queue.list,
699 struct mesh_preq_queue, list);
700 list_del(&preq_node->list);
701 --ifmsh->preq_queue_len;
702 spin_unlock(&ifmsh->mesh_preq_queue_lock);
704 rcu_read_lock();
705 mpath = mesh_path_lookup(preq_node->dst, sdata);
706 if (!mpath)
707 goto enddiscovery;
709 spin_lock_bh(&mpath->state_lock);
710 if (preq_node->flags & PREQ_Q_F_START) {
711 if (mpath->flags & MESH_PATH_RESOLVING) {
712 spin_unlock_bh(&mpath->state_lock);
713 goto enddiscovery;
714 } else {
715 mpath->flags &= ~MESH_PATH_RESOLVED;
716 mpath->flags |= MESH_PATH_RESOLVING;
717 mpath->discovery_retries = 0;
718 mpath->discovery_timeout = disc_timeout_jiff(sdata);
720 } else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
721 mpath->flags & MESH_PATH_RESOLVED) {
722 mpath->flags &= ~MESH_PATH_RESOLVING;
723 spin_unlock_bh(&mpath->state_lock);
724 goto enddiscovery;
727 ifmsh->last_preq = jiffies;
729 if (time_after(jiffies, ifmsh->last_dsn_update +
730 net_traversal_jiffies(sdata)) ||
731 time_before(jiffies, ifmsh->last_dsn_update)) {
732 ++ifmsh->dsn;
733 sdata->u.mesh.last_dsn_update = jiffies;
735 lifetime = default_lifetime(sdata);
736 ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
737 if (ttl == 0) {
738 sdata->u.mesh.mshstats.dropped_frames_ttl++;
739 spin_unlock_bh(&mpath->state_lock);
740 goto enddiscovery;
743 if (preq_node->flags & PREQ_Q_F_REFRESH)
744 dst_flags = MP_F_DO;
745 else
746 dst_flags = MP_F_RF;
748 spin_unlock_bh(&mpath->state_lock);
749 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
750 cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst,
751 cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0,
752 ttl, cpu_to_le32(lifetime), 0,
753 cpu_to_le32(ifmsh->preq_id++), sdata);
754 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
756 enddiscovery:
757 rcu_read_unlock();
758 kfree(preq_node);
762 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
764 * @skb: 802.11 frame to be sent
765 * @sdata: network subif the frame will be sent through
767 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
768 * found, the function will start a path discovery and queue the frame so it is
769 * sent when the path is resolved. This means the caller must not free the skb
770 * in this case.
772 int mesh_nexthop_lookup(struct sk_buff *skb,
773 struct ieee80211_sub_if_data *sdata)
775 struct sk_buff *skb_to_free = NULL;
776 struct mesh_path *mpath;
777 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
778 u8 *dst_addr = hdr->addr3;
779 int err = 0;
781 rcu_read_lock();
782 mpath = mesh_path_lookup(dst_addr, sdata);
784 if (!mpath) {
785 mesh_path_add(dst_addr, sdata);
786 mpath = mesh_path_lookup(dst_addr, sdata);
787 if (!mpath) {
788 dev_kfree_skb(skb);
789 sdata->u.mesh.mshstats.dropped_frames_no_route++;
790 err = -ENOSPC;
791 goto endlookup;
795 if (mpath->flags & MESH_PATH_ACTIVE) {
796 if (time_after(jiffies, mpath->exp_time -
797 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
798 && !memcmp(sdata->dev->dev_addr, hdr->addr4,
799 ETH_ALEN)
800 && !(mpath->flags & MESH_PATH_RESOLVING)
801 && !(mpath->flags & MESH_PATH_FIXED)) {
802 mesh_queue_preq(mpath,
803 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
805 memcpy(hdr->addr1, mpath->next_hop->sta.addr,
806 ETH_ALEN);
807 } else {
808 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
809 /* Start discovery only if it is not running yet */
810 mesh_queue_preq(mpath, PREQ_Q_F_START);
813 if (skb_queue_len(&mpath->frame_queue) >=
814 MESH_FRAME_QUEUE_LEN) {
815 skb_to_free = mpath->frame_queue.next;
816 skb_unlink(skb_to_free, &mpath->frame_queue);
819 skb_queue_tail(&mpath->frame_queue, skb);
820 if (skb_to_free)
821 mesh_path_discard_frame(skb_to_free, sdata);
822 err = -ENOENT;
825 endlookup:
826 rcu_read_unlock();
827 return err;
830 void mesh_path_timer(unsigned long data)
832 struct ieee80211_sub_if_data *sdata;
833 struct mesh_path *mpath;
835 rcu_read_lock();
836 mpath = (struct mesh_path *) data;
837 mpath = rcu_dereference(mpath);
838 if (!mpath)
839 goto endmpathtimer;
840 spin_lock_bh(&mpath->state_lock);
841 sdata = mpath->sdata;
842 if (mpath->flags & MESH_PATH_RESOLVED ||
843 (!(mpath->flags & MESH_PATH_RESOLVING)))
844 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
845 else if (mpath->discovery_retries < max_preq_retries(sdata)) {
846 ++mpath->discovery_retries;
847 mpath->discovery_timeout *= 2;
848 mesh_queue_preq(mpath, 0);
849 } else {
850 mpath->flags = 0;
851 mpath->exp_time = jiffies;
852 mesh_path_flush_pending(mpath);
855 spin_unlock_bh(&mpath->state_lock);
856 endmpathtimer:
857 rcu_read_unlock();