selinux: adjust rules for ATTR_FORCE
[linux-2.6.git] / net / mac80211 / mesh_hwmp.c
blobf49ef288e2e28841a2e6308c2ab94e83cf2d1717
1 /*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
10 #include "mesh.h"
12 #define TEST_FRAME_LEN 8192
13 #define MAX_METRIC 0xffffffff
14 #define ARITH_SHIFT 8
16 /* Number of frames buffered per destination for unresolved destinations */
17 #define MESH_FRAME_QUEUE_LEN 10
18 #define MAX_PREQ_QUEUE_LEN 64
20 /* Destination only */
21 #define MP_F_DO 0x1
22 /* Reply and forward */
23 #define MP_F_RF 0x2
25 static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
27 if (ae)
28 offset += 6;
29 return get_unaligned_le32(preq_elem + offset);
32 /* HWMP IE processing macros */
33 #define AE_F (1<<6)
34 #define AE_F_SET(x) (*x & AE_F)
35 #define PREQ_IE_FLAGS(x) (*(x))
36 #define PREQ_IE_HOPCOUNT(x) (*(x + 1))
37 #define PREQ_IE_TTL(x) (*(x + 2))
38 #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
39 #define PREQ_IE_ORIG_ADDR(x) (x + 7)
40 #define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0);
41 #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x));
42 #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x));
43 #define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
44 #define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
45 #define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x));
48 #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
49 #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
50 #define PREP_IE_TTL(x) PREQ_IE_TTL(x)
51 #define PREP_IE_ORIG_ADDR(x) (x + 3)
52 #define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0);
53 #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x));
54 #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x));
55 #define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
56 #define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x));
58 #define PERR_IE_DST_ADDR(x) (x + 2)
59 #define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0);
61 #define MSEC_TO_TU(x) (x*1000/1024)
62 #define DSN_GT(x, y) ((long) (y) - (long) (x) < 0)
63 #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
65 #define net_traversal_jiffies(s) \
66 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
67 #define default_lifetime(s) \
68 MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
69 #define min_preq_int_jiff(s) \
70 (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
71 #define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
72 #define disc_timeout_jiff(s) \
73 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
75 enum mpath_frame_type {
76 MPATH_PREQ = 0,
77 MPATH_PREP,
78 MPATH_PERR
81 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
82 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
83 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
84 __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata)
86 struct ieee80211_local *local = sdata->local;
87 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
88 struct ieee80211_mgmt *mgmt;
89 u8 *pos;
90 int ie_len;
92 if (!skb)
93 return -1;
94 skb_reserve(skb, local->hw.extra_tx_headroom);
95 /* 25 is the size of the common mgmt part (24) plus the size of the
96 * common action part (1)
98 mgmt = (struct ieee80211_mgmt *)
99 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
100 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
101 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
102 IEEE80211_STYPE_ACTION);
104 memcpy(mgmt->da, da, ETH_ALEN);
105 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
106 /* BSSID is left zeroed, wildcard value */
107 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
108 mgmt->u.action.u.mesh_action.action_code = action;
110 switch (action) {
111 case MPATH_PREQ:
112 ie_len = 37;
113 pos = skb_put(skb, 2 + ie_len);
114 *pos++ = WLAN_EID_PREQ;
115 break;
116 case MPATH_PREP:
117 ie_len = 31;
118 pos = skb_put(skb, 2 + ie_len);
119 *pos++ = WLAN_EID_PREP;
120 break;
121 default:
122 kfree_skb(skb);
123 return -ENOTSUPP;
124 break;
126 *pos++ = ie_len;
127 *pos++ = flags;
128 *pos++ = hop_count;
129 *pos++ = ttl;
130 if (action == MPATH_PREQ) {
131 memcpy(pos, &preq_id, 4);
132 pos += 4;
134 memcpy(pos, orig_addr, ETH_ALEN);
135 pos += ETH_ALEN;
136 memcpy(pos, &orig_dsn, 4);
137 pos += 4;
138 memcpy(pos, &lifetime, 4);
139 pos += 4;
140 memcpy(pos, &metric, 4);
141 pos += 4;
142 if (action == MPATH_PREQ) {
143 /* destination count */
144 *pos++ = 1;
145 *pos++ = dst_flags;
147 memcpy(pos, dst, ETH_ALEN);
148 pos += ETH_ALEN;
149 memcpy(pos, &dst_dsn, 4);
151 ieee80211_tx_skb(sdata, skb, 1);
152 return 0;
156 * mesh_send_path error - Sends a PERR mesh management frame
158 * @dst: broken destination
159 * @dst_dsn: dsn of the broken destination
160 * @ra: node this frame is addressed to
162 int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
163 struct ieee80211_sub_if_data *sdata)
165 struct ieee80211_local *local = sdata->local;
166 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
167 struct ieee80211_mgmt *mgmt;
168 u8 *pos;
169 int ie_len;
171 if (!skb)
172 return -1;
173 skb_reserve(skb, local->hw.extra_tx_headroom);
174 /* 25 is the size of the common mgmt part (24) plus the size of the
175 * common action part (1)
177 mgmt = (struct ieee80211_mgmt *)
178 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
179 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
180 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
181 IEEE80211_STYPE_ACTION);
183 memcpy(mgmt->da, ra, ETH_ALEN);
184 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
185 /* BSSID is left zeroed, wildcard value */
186 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
187 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
188 ie_len = 12;
189 pos = skb_put(skb, 2 + ie_len);
190 *pos++ = WLAN_EID_PERR;
191 *pos++ = ie_len;
192 /* mode flags, reserved */
193 *pos++ = 0;
194 /* number of destinations */
195 *pos++ = 1;
196 memcpy(pos, dst, ETH_ALEN);
197 pos += ETH_ALEN;
198 memcpy(pos, &dst_dsn, 4);
200 ieee80211_tx_skb(sdata, skb, 1);
201 return 0;
204 static u32 airtime_link_metric_get(struct ieee80211_local *local,
205 struct sta_info *sta)
207 struct ieee80211_supported_band *sband;
208 /* This should be adjusted for each device */
209 int device_constant = 1 << ARITH_SHIFT;
210 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
211 int s_unit = 1 << ARITH_SHIFT;
212 int rate, err;
213 u32 tx_time, estimated_retx;
214 u64 result;
216 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
218 if (sta->fail_avg >= 100)
219 return MAX_METRIC;
221 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
222 return MAX_METRIC;
224 err = (sta->fail_avg << ARITH_SHIFT) / 100;
226 /* bitrate is in units of 100 Kbps, while we need rate in units of
227 * 1Mbps. This will be corrected on tx_time computation.
229 rate = sband->bitrates[sta->last_tx_rate.idx].bitrate;
230 tx_time = (device_constant + 10 * test_frame_len / rate);
231 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
232 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
233 return (u32)result;
237 * hwmp_route_info_get - Update routing info to originator and transmitter
239 * @sdata: local mesh subif
240 * @mgmt: mesh management frame
241 * @hwmp_ie: hwmp information element (PREP or PREQ)
243 * This function updates the path routing information to the originator and the
244 * transmitter of a HWMP PREQ or PREP fram.
246 * Returns: metric to frame originator or 0 if the frame should not be further
247 * processed
249 * Notes: this function is the only place (besides user-provided info) where
250 * path routing information is updated.
252 static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
253 struct ieee80211_mgmt *mgmt,
254 u8 *hwmp_ie)
256 struct ieee80211_local *local = sdata->local;
257 struct mesh_path *mpath;
258 struct sta_info *sta;
259 bool fresh_info;
260 u8 *orig_addr, *ta;
261 u32 orig_dsn, orig_metric;
262 unsigned long orig_lifetime, exp_time;
263 u32 last_hop_metric, new_metric;
264 bool process = true;
265 u8 action = mgmt->u.action.u.mesh_action.action_code;
267 rcu_read_lock();
268 sta = sta_info_get(local, mgmt->sa);
269 if (!sta) {
270 rcu_read_unlock();
271 return 0;
274 last_hop_metric = airtime_link_metric_get(local, sta);
275 /* Update and check originator routing info */
276 fresh_info = true;
278 switch (action) {
279 case MPATH_PREQ:
280 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
281 orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie);
282 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
283 orig_metric = PREQ_IE_METRIC(hwmp_ie);
284 break;
285 case MPATH_PREP:
286 /* Originator here refers to the MP that was the destination in
287 * the Path Request. The draft refers to that MP as the
288 * destination address, even though usually it is the origin of
289 * the PREP frame. We divert from the nomenclature in the draft
290 * so that we can easily use a single function to gather path
291 * information from both PREQ and PREP frames.
293 orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
294 orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie);
295 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
296 orig_metric = PREP_IE_METRIC(hwmp_ie);
297 break;
298 default:
299 rcu_read_unlock();
300 return 0;
302 new_metric = orig_metric + last_hop_metric;
303 if (new_metric < orig_metric)
304 new_metric = MAX_METRIC;
305 exp_time = TU_TO_EXP_TIME(orig_lifetime);
307 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
308 /* This MP is the originator, we are not interested in this
309 * frame, except for updating transmitter's path info.
311 process = false;
312 fresh_info = false;
313 } else {
314 mpath = mesh_path_lookup(orig_addr, sdata);
315 if (mpath) {
316 spin_lock_bh(&mpath->state_lock);
317 if (mpath->flags & MESH_PATH_FIXED)
318 fresh_info = false;
319 else if ((mpath->flags & MESH_PATH_ACTIVE) &&
320 (mpath->flags & MESH_PATH_DSN_VALID)) {
321 if (DSN_GT(mpath->dsn, orig_dsn) ||
322 (mpath->dsn == orig_dsn &&
323 action == MPATH_PREQ &&
324 new_metric > mpath->metric)) {
325 process = false;
326 fresh_info = false;
329 } else {
330 mesh_path_add(orig_addr, sdata);
331 mpath = mesh_path_lookup(orig_addr, sdata);
332 if (!mpath) {
333 rcu_read_unlock();
334 return 0;
336 spin_lock_bh(&mpath->state_lock);
339 if (fresh_info) {
340 mesh_path_assign_nexthop(mpath, sta);
341 mpath->flags |= MESH_PATH_DSN_VALID;
342 mpath->metric = new_metric;
343 mpath->dsn = orig_dsn;
344 mpath->exp_time = time_after(mpath->exp_time, exp_time)
345 ? mpath->exp_time : exp_time;
346 mesh_path_activate(mpath);
347 spin_unlock_bh(&mpath->state_lock);
348 mesh_path_tx_pending(mpath);
349 /* draft says preq_id should be saved to, but there does
350 * not seem to be any use for it, skipping by now
352 } else
353 spin_unlock_bh(&mpath->state_lock);
356 /* Update and check transmitter routing info */
357 ta = mgmt->sa;
358 if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
359 fresh_info = false;
360 else {
361 fresh_info = true;
363 mpath = mesh_path_lookup(ta, sdata);
364 if (mpath) {
365 spin_lock_bh(&mpath->state_lock);
366 if ((mpath->flags & MESH_PATH_FIXED) ||
367 ((mpath->flags & MESH_PATH_ACTIVE) &&
368 (last_hop_metric > mpath->metric)))
369 fresh_info = false;
370 } else {
371 mesh_path_add(ta, sdata);
372 mpath = mesh_path_lookup(ta, sdata);
373 if (!mpath) {
374 rcu_read_unlock();
375 return 0;
377 spin_lock_bh(&mpath->state_lock);
380 if (fresh_info) {
381 mesh_path_assign_nexthop(mpath, sta);
382 mpath->flags &= ~MESH_PATH_DSN_VALID;
383 mpath->metric = last_hop_metric;
384 mpath->exp_time = time_after(mpath->exp_time, exp_time)
385 ? mpath->exp_time : exp_time;
386 mesh_path_activate(mpath);
387 spin_unlock_bh(&mpath->state_lock);
388 mesh_path_tx_pending(mpath);
389 } else
390 spin_unlock_bh(&mpath->state_lock);
393 rcu_read_unlock();
395 return process ? new_metric : 0;
398 static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
399 struct ieee80211_mgmt *mgmt,
400 u8 *preq_elem, u32 metric) {
401 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
402 struct mesh_path *mpath;
403 u8 *dst_addr, *orig_addr;
404 u8 dst_flags, ttl;
405 u32 orig_dsn, dst_dsn, lifetime;
406 bool reply = false;
407 bool forward = true;
409 /* Update destination DSN, if present */
410 dst_addr = PREQ_IE_DST_ADDR(preq_elem);
411 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
412 dst_dsn = PREQ_IE_DST_DSN(preq_elem);
413 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
414 dst_flags = PREQ_IE_DST_F(preq_elem);
416 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
417 forward = false;
418 reply = true;
419 metric = 0;
420 if (time_after(jiffies, ifmsh->last_dsn_update +
421 net_traversal_jiffies(sdata)) ||
422 time_before(jiffies, ifmsh->last_dsn_update)) {
423 dst_dsn = ++ifmsh->dsn;
424 ifmsh->last_dsn_update = jiffies;
426 } else {
427 rcu_read_lock();
428 mpath = mesh_path_lookup(dst_addr, sdata);
429 if (mpath) {
430 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
431 DSN_LT(mpath->dsn, dst_dsn)) {
432 mpath->dsn = dst_dsn;
433 mpath->flags &= MESH_PATH_DSN_VALID;
434 } else if ((!(dst_flags & MP_F_DO)) &&
435 (mpath->flags & MESH_PATH_ACTIVE)) {
436 reply = true;
437 metric = mpath->metric;
438 dst_dsn = mpath->dsn;
439 if (dst_flags & MP_F_RF)
440 dst_flags |= MP_F_DO;
441 else
442 forward = false;
445 rcu_read_unlock();
448 if (reply) {
449 lifetime = PREQ_IE_LIFETIME(preq_elem);
450 ttl = ifmsh->mshcfg.dot11MeshTTL;
451 if (ttl != 0)
452 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
453 cpu_to_le32(dst_dsn), 0, orig_addr,
454 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
455 cpu_to_le32(lifetime), cpu_to_le32(metric),
456 0, sdata);
457 else
458 ifmsh->mshstats.dropped_frames_ttl++;
461 if (forward) {
462 u32 preq_id;
463 u8 hopcount, flags;
465 ttl = PREQ_IE_TTL(preq_elem);
466 lifetime = PREQ_IE_LIFETIME(preq_elem);
467 if (ttl <= 1) {
468 ifmsh->mshstats.dropped_frames_ttl++;
469 return;
471 --ttl;
472 flags = PREQ_IE_FLAGS(preq_elem);
473 preq_id = PREQ_IE_PREQ_ID(preq_elem);
474 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
475 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
476 cpu_to_le32(orig_dsn), dst_flags, dst_addr,
477 cpu_to_le32(dst_dsn), sdata->dev->broadcast,
478 hopcount, ttl, cpu_to_le32(lifetime),
479 cpu_to_le32(metric), cpu_to_le32(preq_id),
480 sdata);
481 ifmsh->mshstats.fwded_frames++;
486 static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
487 struct ieee80211_mgmt *mgmt,
488 u8 *prep_elem, u32 metric)
490 struct mesh_path *mpath;
491 u8 *dst_addr, *orig_addr;
492 u8 ttl, hopcount, flags;
493 u8 next_hop[ETH_ALEN];
494 u32 dst_dsn, orig_dsn, lifetime;
496 /* Note that we divert from the draft nomenclature and denominate
497 * destination to what the draft refers to as origininator. So in this
498 * function destnation refers to the final destination of the PREP,
499 * which corresponds with the originator of the PREQ which this PREP
500 * replies
502 dst_addr = PREP_IE_DST_ADDR(prep_elem);
503 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
504 /* destination, no forwarding required */
505 return;
507 ttl = PREP_IE_TTL(prep_elem);
508 if (ttl <= 1) {
509 sdata->u.mesh.mshstats.dropped_frames_ttl++;
510 return;
513 rcu_read_lock();
514 mpath = mesh_path_lookup(dst_addr, sdata);
515 if (mpath)
516 spin_lock_bh(&mpath->state_lock);
517 else
518 goto fail;
519 if (!(mpath->flags & MESH_PATH_ACTIVE)) {
520 spin_unlock_bh(&mpath->state_lock);
521 goto fail;
523 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
524 spin_unlock_bh(&mpath->state_lock);
525 --ttl;
526 flags = PREP_IE_FLAGS(prep_elem);
527 lifetime = PREP_IE_LIFETIME(prep_elem);
528 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
529 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
530 dst_dsn = PREP_IE_DST_DSN(prep_elem);
531 orig_dsn = PREP_IE_ORIG_DSN(prep_elem);
533 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
534 cpu_to_le32(orig_dsn), 0, dst_addr,
535 cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl,
536 cpu_to_le32(lifetime), cpu_to_le32(metric),
537 0, sdata);
538 rcu_read_unlock();
539 sdata->u.mesh.mshstats.fwded_frames++;
540 return;
542 fail:
543 rcu_read_unlock();
544 sdata->u.mesh.mshstats.dropped_frames_no_route++;
545 return;
548 static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
549 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
551 struct mesh_path *mpath;
552 u8 *ta, *dst_addr;
553 u32 dst_dsn;
555 ta = mgmt->sa;
556 dst_addr = PERR_IE_DST_ADDR(perr_elem);
557 dst_dsn = PERR_IE_DST_DSN(perr_elem);
558 rcu_read_lock();
559 mpath = mesh_path_lookup(dst_addr, sdata);
560 if (mpath) {
561 spin_lock_bh(&mpath->state_lock);
562 if (mpath->flags & MESH_PATH_ACTIVE &&
563 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 &&
564 (!(mpath->flags & MESH_PATH_DSN_VALID) ||
565 DSN_GT(dst_dsn, mpath->dsn))) {
566 mpath->flags &= ~MESH_PATH_ACTIVE;
567 mpath->dsn = dst_dsn;
568 spin_unlock_bh(&mpath->state_lock);
569 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
570 sdata->dev->broadcast, sdata);
571 } else
572 spin_unlock_bh(&mpath->state_lock);
574 rcu_read_unlock();
579 void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
580 struct ieee80211_mgmt *mgmt,
581 size_t len)
583 struct ieee802_11_elems elems;
584 size_t baselen;
585 u32 last_hop_metric;
587 /* need action_code */
588 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
589 return;
591 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
592 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
593 len - baselen, &elems);
595 switch (mgmt->u.action.u.mesh_action.action_code) {
596 case MPATH_PREQ:
597 if (!elems.preq || elems.preq_len != 37)
598 /* Right now we support just 1 destination and no AE */
599 return;
600 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq);
601 if (!last_hop_metric)
602 return;
603 hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric);
604 break;
605 case MPATH_PREP:
606 if (!elems.prep || elems.prep_len != 31)
607 /* Right now we support no AE */
608 return;
609 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep);
610 if (!last_hop_metric)
611 return;
612 hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric);
613 break;
614 case MPATH_PERR:
615 if (!elems.perr || elems.perr_len != 12)
616 /* Right now we support only one destination per PERR */
617 return;
618 hwmp_perr_frame_process(sdata, mgmt, elems.perr);
619 default:
620 return;
626 * mesh_queue_preq - queue a PREQ to a given destination
628 * @mpath: mesh path to discover
629 * @flags: special attributes of the PREQ to be sent
631 * Locking: the function must be called from within a rcu read lock block.
634 static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
636 struct ieee80211_sub_if_data *sdata = mpath->sdata;
637 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
638 struct mesh_preq_queue *preq_node;
640 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
641 if (!preq_node) {
642 printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
643 return;
646 spin_lock(&ifmsh->mesh_preq_queue_lock);
647 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
648 spin_unlock(&ifmsh->mesh_preq_queue_lock);
649 kfree(preq_node);
650 if (printk_ratelimit())
651 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
652 return;
655 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
656 preq_node->flags = flags;
658 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
659 ++ifmsh->preq_queue_len;
660 spin_unlock(&ifmsh->mesh_preq_queue_lock);
662 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
663 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
665 else if (time_before(jiffies, ifmsh->last_preq)) {
666 /* avoid long wait if did not send preqs for a long time
667 * and jiffies wrapped around
669 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
670 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
671 } else
672 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
673 min_preq_int_jiff(sdata));
677 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
679 * @sdata: local mesh subif
681 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
683 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
684 struct mesh_preq_queue *preq_node;
685 struct mesh_path *mpath;
686 u8 ttl, dst_flags;
687 u32 lifetime;
689 spin_lock(&ifmsh->mesh_preq_queue_lock);
690 if (!ifmsh->preq_queue_len ||
691 time_before(jiffies, ifmsh->last_preq +
692 min_preq_int_jiff(sdata))) {
693 spin_unlock(&ifmsh->mesh_preq_queue_lock);
694 return;
697 preq_node = list_first_entry(&ifmsh->preq_queue.list,
698 struct mesh_preq_queue, list);
699 list_del(&preq_node->list);
700 --ifmsh->preq_queue_len;
701 spin_unlock(&ifmsh->mesh_preq_queue_lock);
703 rcu_read_lock();
704 mpath = mesh_path_lookup(preq_node->dst, sdata);
705 if (!mpath)
706 goto enddiscovery;
708 spin_lock_bh(&mpath->state_lock);
709 if (preq_node->flags & PREQ_Q_F_START) {
710 if (mpath->flags & MESH_PATH_RESOLVING) {
711 spin_unlock_bh(&mpath->state_lock);
712 goto enddiscovery;
713 } else {
714 mpath->flags &= ~MESH_PATH_RESOLVED;
715 mpath->flags |= MESH_PATH_RESOLVING;
716 mpath->discovery_retries = 0;
717 mpath->discovery_timeout = disc_timeout_jiff(sdata);
719 } else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
720 mpath->flags & MESH_PATH_RESOLVED) {
721 mpath->flags &= ~MESH_PATH_RESOLVING;
722 spin_unlock_bh(&mpath->state_lock);
723 goto enddiscovery;
726 ifmsh->last_preq = jiffies;
728 if (time_after(jiffies, ifmsh->last_dsn_update +
729 net_traversal_jiffies(sdata)) ||
730 time_before(jiffies, ifmsh->last_dsn_update)) {
731 ++ifmsh->dsn;
732 sdata->u.mesh.last_dsn_update = jiffies;
734 lifetime = default_lifetime(sdata);
735 ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
736 if (ttl == 0) {
737 sdata->u.mesh.mshstats.dropped_frames_ttl++;
738 spin_unlock_bh(&mpath->state_lock);
739 goto enddiscovery;
742 if (preq_node->flags & PREQ_Q_F_REFRESH)
743 dst_flags = MP_F_DO;
744 else
745 dst_flags = MP_F_RF;
747 spin_unlock_bh(&mpath->state_lock);
748 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
749 cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst,
750 cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0,
751 ttl, cpu_to_le32(lifetime), 0,
752 cpu_to_le32(ifmsh->preq_id++), sdata);
753 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
755 enddiscovery:
756 rcu_read_unlock();
757 kfree(preq_node);
761 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame
763 * @skb: 802.11 frame to be sent
764 * @sdata: network subif the frame will be sent through
766 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
767 * found, the function will start a path discovery and queue the frame so it is
768 * sent when the path is resolved. This means the caller must not free the skb
769 * in this case.
771 int mesh_nexthop_lookup(struct sk_buff *skb,
772 struct ieee80211_sub_if_data *sdata)
774 struct sk_buff *skb_to_free = NULL;
775 struct mesh_path *mpath;
776 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
777 u8 *dst_addr = hdr->addr3;
778 int err = 0;
780 rcu_read_lock();
781 mpath = mesh_path_lookup(dst_addr, sdata);
783 if (!mpath) {
784 mesh_path_add(dst_addr, sdata);
785 mpath = mesh_path_lookup(dst_addr, sdata);
786 if (!mpath) {
787 dev_kfree_skb(skb);
788 sdata->u.mesh.mshstats.dropped_frames_no_route++;
789 err = -ENOSPC;
790 goto endlookup;
794 if (mpath->flags & MESH_PATH_ACTIVE) {
795 if (time_after(jiffies, mpath->exp_time -
796 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
797 && !memcmp(sdata->dev->dev_addr, hdr->addr4,
798 ETH_ALEN)
799 && !(mpath->flags & MESH_PATH_RESOLVING)
800 && !(mpath->flags & MESH_PATH_FIXED)) {
801 mesh_queue_preq(mpath,
802 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
804 memcpy(hdr->addr1, mpath->next_hop->sta.addr,
805 ETH_ALEN);
806 } else {
807 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
808 /* Start discovery only if it is not running yet */
809 mesh_queue_preq(mpath, PREQ_Q_F_START);
812 if (skb_queue_len(&mpath->frame_queue) >=
813 MESH_FRAME_QUEUE_LEN) {
814 skb_to_free = mpath->frame_queue.next;
815 skb_unlink(skb_to_free, &mpath->frame_queue);
818 skb_queue_tail(&mpath->frame_queue, skb);
819 if (skb_to_free)
820 mesh_path_discard_frame(skb_to_free, sdata);
821 err = -ENOENT;
824 endlookup:
825 rcu_read_unlock();
826 return err;
829 void mesh_path_timer(unsigned long data)
831 struct ieee80211_sub_if_data *sdata;
832 struct mesh_path *mpath;
834 rcu_read_lock();
835 mpath = (struct mesh_path *) data;
836 mpath = rcu_dereference(mpath);
837 if (!mpath)
838 goto endmpathtimer;
839 sdata = mpath->sdata;
841 if (sdata->local->quiescing) {
842 rcu_read_unlock();
843 return;
846 spin_lock_bh(&mpath->state_lock);
847 if (mpath->flags & MESH_PATH_RESOLVED ||
848 (!(mpath->flags & MESH_PATH_RESOLVING)))
849 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
850 else if (mpath->discovery_retries < max_preq_retries(sdata)) {
851 ++mpath->discovery_retries;
852 mpath->discovery_timeout *= 2;
853 mesh_queue_preq(mpath, 0);
854 } else {
855 mpath->flags = 0;
856 mpath->exp_time = jiffies;
857 mesh_path_flush_pending(mpath);
860 spin_unlock_bh(&mpath->state_lock);
861 endmpathtimer:
862 rcu_read_unlock();