blackfin: use set_current_blocked() and block_sigmask()
[linux-2.6.git] / net / batman-adv / gateway_client.c
blob47f7186dcefcdce2e090e80660ed8bb6dba369c2
1 /*
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
4 * Marek Lindner
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "bat_sysfs.h"
24 #include "gateway_client.h"
25 #include "gateway_common.h"
26 #include "hard-interface.h"
27 #include "originator.h"
28 #include "translation-table.h"
29 #include "routing.h"
30 #include <linux/ip.h>
31 #include <linux/ipv6.h>
32 #include <linux/udp.h>
33 #include <linux/if_vlan.h>
35 /* This is the offset of the options field in a dhcp packet starting at
36 * the beginning of the dhcp header */
37 #define DHCP_OPTIONS_OFFSET 240
38 #define DHCP_REQUEST 3
40 static void gw_node_free_ref(struct gw_node *gw_node)
42 if (atomic_dec_and_test(&gw_node->refcount))
43 kfree_rcu(gw_node, rcu);
46 static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
48 struct gw_node *gw_node;
50 rcu_read_lock();
51 gw_node = rcu_dereference(bat_priv->curr_gw);
52 if (!gw_node)
53 goto out;
55 if (!atomic_inc_not_zero(&gw_node->refcount))
56 gw_node = NULL;
58 out:
59 rcu_read_unlock();
60 return gw_node;
63 struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
65 struct gw_node *gw_node;
66 struct orig_node *orig_node = NULL;
68 gw_node = gw_get_selected_gw_node(bat_priv);
69 if (!gw_node)
70 goto out;
72 rcu_read_lock();
73 orig_node = gw_node->orig_node;
74 if (!orig_node)
75 goto unlock;
77 if (!atomic_inc_not_zero(&orig_node->refcount))
78 orig_node = NULL;
80 unlock:
81 rcu_read_unlock();
82 out:
83 if (gw_node)
84 gw_node_free_ref(gw_node);
85 return orig_node;
88 static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
90 struct gw_node *curr_gw_node;
92 spin_lock_bh(&bat_priv->gw_list_lock);
94 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
95 new_gw_node = NULL;
97 curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
98 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
100 if (curr_gw_node)
101 gw_node_free_ref(curr_gw_node);
103 spin_unlock_bh(&bat_priv->gw_list_lock);
106 void gw_deselect(struct bat_priv *bat_priv)
108 atomic_set(&bat_priv->gw_reselect, 1);
111 static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
113 struct neigh_node *router;
114 struct hlist_node *node;
115 struct gw_node *gw_node, *curr_gw = NULL;
116 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
117 uint8_t max_tq = 0;
118 int down, up;
120 rcu_read_lock();
121 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
122 if (gw_node->deleted)
123 continue;
125 router = orig_node_get_router(gw_node->orig_node);
126 if (!router)
127 continue;
129 if (!atomic_inc_not_zero(&gw_node->refcount))
130 goto next;
132 switch (atomic_read(&bat_priv->gw_sel_class)) {
133 case 1: /* fast connection */
134 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
135 &down, &up);
137 tmp_gw_factor = (router->tq_avg * router->tq_avg *
138 down * 100 * 100) /
139 (TQ_LOCAL_WINDOW_SIZE *
140 TQ_LOCAL_WINDOW_SIZE * 64);
142 if ((tmp_gw_factor > max_gw_factor) ||
143 ((tmp_gw_factor == max_gw_factor) &&
144 (router->tq_avg > max_tq))) {
145 if (curr_gw)
146 gw_node_free_ref(curr_gw);
147 curr_gw = gw_node;
148 atomic_inc(&curr_gw->refcount);
150 break;
152 default: /**
153 * 2: stable connection (use best statistic)
154 * 3: fast-switch (use best statistic but change as
155 * soon as a better gateway appears)
156 * XX: late-switch (use best statistic but change as
157 * soon as a better gateway appears which has
158 * $routing_class more tq points)
160 if (router->tq_avg > max_tq) {
161 if (curr_gw)
162 gw_node_free_ref(curr_gw);
163 curr_gw = gw_node;
164 atomic_inc(&curr_gw->refcount);
166 break;
169 if (router->tq_avg > max_tq)
170 max_tq = router->tq_avg;
172 if (tmp_gw_factor > max_gw_factor)
173 max_gw_factor = tmp_gw_factor;
175 gw_node_free_ref(gw_node);
177 next:
178 neigh_node_free_ref(router);
180 rcu_read_unlock();
182 return curr_gw;
185 void gw_election(struct bat_priv *bat_priv)
187 struct gw_node *curr_gw = NULL, *next_gw = NULL;
188 struct neigh_node *router = NULL;
189 char gw_addr[18] = { '\0' };
192 * The batman daemon checks here if we already passed a full originator
193 * cycle in order to make sure we don't choose the first gateway we
194 * hear about. This check is based on the daemon's uptime which we
195 * don't have.
197 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
198 goto out;
200 if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
201 goto out;
203 curr_gw = gw_get_selected_gw_node(bat_priv);
205 next_gw = gw_get_best_gw_node(bat_priv);
207 if (curr_gw == next_gw)
208 goto out;
210 if (next_gw) {
211 sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
213 router = orig_node_get_router(next_gw->orig_node);
214 if (!router) {
215 gw_deselect(bat_priv);
216 goto out;
220 if ((curr_gw) && (!next_gw)) {
221 bat_dbg(DBG_BATMAN, bat_priv,
222 "Removing selected gateway - no gateway in range\n");
223 throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
224 } else if ((!curr_gw) && (next_gw)) {
225 bat_dbg(DBG_BATMAN, bat_priv,
226 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
227 next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
228 router->tq_avg);
229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
230 } else {
231 bat_dbg(DBG_BATMAN, bat_priv,
232 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
233 next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
234 router->tq_avg);
235 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
238 gw_select(bat_priv, next_gw);
240 out:
241 if (curr_gw)
242 gw_node_free_ref(curr_gw);
243 if (next_gw)
244 gw_node_free_ref(next_gw);
245 if (router)
246 neigh_node_free_ref(router);
249 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
251 struct orig_node *curr_gw_orig;
252 struct neigh_node *router_gw = NULL, *router_orig = NULL;
253 uint8_t gw_tq_avg, orig_tq_avg;
255 curr_gw_orig = gw_get_selected_orig(bat_priv);
256 if (!curr_gw_orig)
257 goto deselect;
259 router_gw = orig_node_get_router(curr_gw_orig);
260 if (!router_gw)
261 goto deselect;
263 /* this node already is the gateway */
264 if (curr_gw_orig == orig_node)
265 goto out;
267 router_orig = orig_node_get_router(orig_node);
268 if (!router_orig)
269 goto out;
271 gw_tq_avg = router_gw->tq_avg;
272 orig_tq_avg = router_orig->tq_avg;
274 /* the TQ value has to be better */
275 if (orig_tq_avg < gw_tq_avg)
276 goto out;
279 * if the routing class is greater than 3 the value tells us how much
280 * greater the TQ value of the new gateway must be
282 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
283 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
284 goto out;
286 bat_dbg(DBG_BATMAN, bat_priv,
287 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
288 gw_tq_avg, orig_tq_avg);
290 deselect:
291 gw_deselect(bat_priv);
292 out:
293 if (curr_gw_orig)
294 orig_node_free_ref(curr_gw_orig);
295 if (router_gw)
296 neigh_node_free_ref(router_gw);
297 if (router_orig)
298 neigh_node_free_ref(router_orig);
300 return;
303 static void gw_node_add(struct bat_priv *bat_priv,
304 struct orig_node *orig_node, uint8_t new_gwflags)
306 struct gw_node *gw_node;
307 int down, up;
309 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
310 if (!gw_node)
311 return;
313 INIT_HLIST_NODE(&gw_node->list);
314 gw_node->orig_node = orig_node;
315 atomic_set(&gw_node->refcount, 1);
317 spin_lock_bh(&bat_priv->gw_list_lock);
318 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
319 spin_unlock_bh(&bat_priv->gw_list_lock);
321 gw_bandwidth_to_kbit(new_gwflags, &down, &up);
322 bat_dbg(DBG_BATMAN, bat_priv,
323 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
324 orig_node->orig, new_gwflags,
325 (down > 2048 ? down / 1024 : down),
326 (down > 2048 ? "MBit" : "KBit"),
327 (up > 2048 ? up / 1024 : up),
328 (up > 2048 ? "MBit" : "KBit"));
331 void gw_node_update(struct bat_priv *bat_priv,
332 struct orig_node *orig_node, uint8_t new_gwflags)
334 struct hlist_node *node;
335 struct gw_node *gw_node, *curr_gw;
338 * Note: We don't need a NULL check here, since curr_gw never gets
339 * dereferenced. If curr_gw is NULL we also should not exit as we may
340 * have this gateway in our list (duplication check!) even though we
341 * have no currently selected gateway.
343 curr_gw = gw_get_selected_gw_node(bat_priv);
345 rcu_read_lock();
346 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
347 if (gw_node->orig_node != orig_node)
348 continue;
350 bat_dbg(DBG_BATMAN, bat_priv,
351 "Gateway class of originator %pM changed from %i to %i\n",
352 orig_node->orig, gw_node->orig_node->gw_flags,
353 new_gwflags);
355 gw_node->deleted = 0;
357 if (new_gwflags == NO_FLAGS) {
358 gw_node->deleted = jiffies;
359 bat_dbg(DBG_BATMAN, bat_priv,
360 "Gateway %pM removed from gateway list\n",
361 orig_node->orig);
363 if (gw_node == curr_gw)
364 goto deselect;
367 goto unlock;
370 if (new_gwflags == NO_FLAGS)
371 goto unlock;
373 gw_node_add(bat_priv, orig_node, new_gwflags);
374 goto unlock;
376 deselect:
377 gw_deselect(bat_priv);
378 unlock:
379 rcu_read_unlock();
381 if (curr_gw)
382 gw_node_free_ref(curr_gw);
385 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
387 gw_node_update(bat_priv, orig_node, 0);
390 void gw_node_purge(struct bat_priv *bat_priv)
392 struct gw_node *gw_node, *curr_gw;
393 struct hlist_node *node, *node_tmp;
394 unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT);
395 int do_deselect = 0;
397 curr_gw = gw_get_selected_gw_node(bat_priv);
399 spin_lock_bh(&bat_priv->gw_list_lock);
401 hlist_for_each_entry_safe(gw_node, node, node_tmp,
402 &bat_priv->gw_list, list) {
403 if (((!gw_node->deleted) ||
404 (time_before(jiffies, gw_node->deleted + timeout))) &&
405 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
406 continue;
408 if (curr_gw == gw_node)
409 do_deselect = 1;
411 hlist_del_rcu(&gw_node->list);
412 gw_node_free_ref(gw_node);
415 spin_unlock_bh(&bat_priv->gw_list_lock);
417 /* gw_deselect() needs to acquire the gw_list_lock */
418 if (do_deselect)
419 gw_deselect(bat_priv);
421 if (curr_gw)
422 gw_node_free_ref(curr_gw);
426 * fails if orig_node has no router
428 static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
429 const struct gw_node *gw_node)
431 struct gw_node *curr_gw;
432 struct neigh_node *router;
433 int down, up, ret = -1;
435 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
437 router = orig_node_get_router(gw_node->orig_node);
438 if (!router)
439 goto out;
441 curr_gw = gw_get_selected_gw_node(bat_priv);
443 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
444 (curr_gw == gw_node ? "=>" : " "),
445 gw_node->orig_node->orig,
446 router->tq_avg, router->addr,
447 router->if_incoming->net_dev->name,
448 gw_node->orig_node->gw_flags,
449 (down > 2048 ? down / 1024 : down),
450 (down > 2048 ? "MBit" : "KBit"),
451 (up > 2048 ? up / 1024 : up),
452 (up > 2048 ? "MBit" : "KBit"));
454 neigh_node_free_ref(router);
455 if (curr_gw)
456 gw_node_free_ref(curr_gw);
457 out:
458 return ret;
461 int gw_client_seq_print_text(struct seq_file *seq, void *offset)
463 struct net_device *net_dev = (struct net_device *)seq->private;
464 struct bat_priv *bat_priv = netdev_priv(net_dev);
465 struct hard_iface *primary_if;
466 struct gw_node *gw_node;
467 struct hlist_node *node;
468 int gw_count = 0, ret = 0;
470 primary_if = primary_if_get_selected(bat_priv);
471 if (!primary_if) {
472 ret = seq_printf(seq,
473 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
474 net_dev->name);
475 goto out;
478 if (primary_if->if_status != IF_ACTIVE) {
479 ret = seq_printf(seq,
480 "BATMAN mesh %s disabled - primary interface not active\n",
481 net_dev->name);
482 goto out;
485 seq_printf(seq,
486 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
487 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
488 SOURCE_VERSION, primary_if->net_dev->name,
489 primary_if->net_dev->dev_addr, net_dev->name);
491 rcu_read_lock();
492 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
493 if (gw_node->deleted)
494 continue;
496 /* fails if orig_node has no router */
497 if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
498 continue;
500 gw_count++;
502 rcu_read_unlock();
504 if (gw_count == 0)
505 seq_printf(seq, "No gateways in range ...\n");
507 out:
508 if (primary_if)
509 hardif_free_ref(primary_if);
510 return ret;
513 static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
515 int ret = false;
516 unsigned char *p;
517 int pkt_len;
519 if (skb_linearize(skb) < 0)
520 goto out;
522 pkt_len = skb_headlen(skb);
524 if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
525 goto out;
527 p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
528 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
530 /* Access the dhcp option lists. Each entry is made up by:
531 * - octet 1: option type
532 * - octet 2: option data len (only if type != 255 and 0)
533 * - octet 3: option data */
534 while (*p != 255 && !ret) {
535 /* p now points to the first octet: option type */
536 if (*p == 53) {
537 /* type 53 is the message type option.
538 * Jump the len octet and go to the data octet */
539 if (pkt_len < 2)
540 goto out;
541 p += 2;
543 /* check if the message type is what we need */
544 if (*p == DHCP_REQUEST)
545 ret = true;
546 break;
547 } else if (*p == 0) {
548 /* option type 0 (padding), just go forward */
549 if (pkt_len < 1)
550 goto out;
551 pkt_len--;
552 p++;
553 } else {
554 /* This is any other option. So we get the length... */
555 if (pkt_len < 1)
556 goto out;
557 pkt_len--;
558 p++;
560 /* ...and then we jump over the data */
561 if (pkt_len < 1 + (*p))
562 goto out;
563 pkt_len -= 1 + (*p);
564 p += 1 + (*p);
567 out:
568 return ret;
571 bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
573 struct ethhdr *ethhdr;
574 struct iphdr *iphdr;
575 struct ipv6hdr *ipv6hdr;
576 struct udphdr *udphdr;
578 /* check for ethernet header */
579 if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
580 return false;
581 ethhdr = (struct ethhdr *)skb->data;
582 *header_len += ETH_HLEN;
584 /* check for initial vlan header */
585 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
586 if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
587 return false;
588 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
589 *header_len += VLAN_HLEN;
592 /* check for ip header */
593 switch (ntohs(ethhdr->h_proto)) {
594 case ETH_P_IP:
595 if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
596 return false;
597 iphdr = (struct iphdr *)(skb->data + *header_len);
598 *header_len += iphdr->ihl * 4;
600 /* check for udp header */
601 if (iphdr->protocol != IPPROTO_UDP)
602 return false;
604 break;
605 case ETH_P_IPV6:
606 if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
607 return false;
608 ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
609 *header_len += sizeof(*ipv6hdr);
611 /* check for udp header */
612 if (ipv6hdr->nexthdr != IPPROTO_UDP)
613 return false;
615 break;
616 default:
617 return false;
620 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
621 return false;
622 udphdr = (struct udphdr *)(skb->data + *header_len);
623 *header_len += sizeof(*udphdr);
625 /* check for bootp port */
626 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
627 (ntohs(udphdr->dest) != 67))
628 return false;
630 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
631 (ntohs(udphdr->dest) != 547))
632 return false;
634 return true;
637 bool gw_out_of_range(struct bat_priv *bat_priv,
638 struct sk_buff *skb, struct ethhdr *ethhdr)
640 struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
641 struct orig_node *orig_dst_node = NULL;
642 struct gw_node *curr_gw = NULL;
643 bool ret, out_of_range = false;
644 unsigned int header_len = 0;
645 uint8_t curr_tq_avg;
647 ret = gw_is_dhcp_target(skb, &header_len);
648 if (!ret)
649 goto out;
651 orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
652 ethhdr->h_dest);
653 if (!orig_dst_node)
654 goto out;
656 if (!orig_dst_node->gw_flags)
657 goto out;
659 ret = is_type_dhcprequest(skb, header_len);
660 if (!ret)
661 goto out;
663 switch (atomic_read(&bat_priv->gw_mode)) {
664 case GW_MODE_SERVER:
665 /* If we are a GW then we are our best GW. We can artificially
666 * set the tq towards ourself as the maximum value */
667 curr_tq_avg = TQ_MAX_VALUE;
668 break;
669 case GW_MODE_CLIENT:
670 curr_gw = gw_get_selected_gw_node(bat_priv);
671 if (!curr_gw)
672 goto out;
674 /* packet is going to our gateway */
675 if (curr_gw->orig_node == orig_dst_node)
676 goto out;
678 /* If the dhcp packet has been sent to a different gw,
679 * we have to evaluate whether the old gw is still
680 * reliable enough */
681 neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
682 if (!neigh_curr)
683 goto out;
685 curr_tq_avg = neigh_curr->tq_avg;
686 break;
687 case GW_MODE_OFF:
688 default:
689 goto out;
692 neigh_old = find_router(bat_priv, orig_dst_node, NULL);
693 if (!neigh_old)
694 goto out;
696 if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
697 out_of_range = true;
699 out:
700 if (orig_dst_node)
701 orig_node_free_ref(orig_dst_node);
702 if (curr_gw)
703 gw_node_free_ref(curr_gw);
704 if (neigh_old)
705 neigh_node_free_ref(neigh_old);
706 if (neigh_curr)
707 neigh_node_free_ref(neigh_curr);
708 return out_of_range;