SUNRPC: Ensure we always bump the backlog queue in xprt_free_slot
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / batman-adv / gateway_client.c
blob056180ef9e1a5d8856a784a2ddf396d02d346c71
1 /*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "bat_sysfs.h"
24 #include "gateway_client.h"
25 #include "gateway_common.h"
26 #include "hard-interface.h"
27 #include "originator.h"
28 #include "routing.h"
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/udp.h>
32 #include <linux/if_vlan.h>
34 /* This is the offset of the options field in a dhcp packet starting at
35 * the beginning of the dhcp header */
36 #define DHCP_OPTIONS_OFFSET 240
37 #define DHCP_REQUEST 3
39 static void gw_node_free_ref(struct gw_node *gw_node)
41 if (atomic_dec_and_test(&gw_node->refcount))
42 kfree_rcu(gw_node, rcu);
45 static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
47 struct gw_node *gw_node;
49 rcu_read_lock();
50 gw_node = rcu_dereference(bat_priv->curr_gw);
51 if (!gw_node)
52 goto out;
54 if (!atomic_inc_not_zero(&gw_node->refcount))
55 gw_node = NULL;
57 out:
58 rcu_read_unlock();
59 return gw_node;
62 struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
64 struct gw_node *gw_node;
65 struct orig_node *orig_node = NULL;
67 gw_node = gw_get_selected_gw_node(bat_priv);
68 if (!gw_node)
69 goto out;
71 rcu_read_lock();
72 orig_node = gw_node->orig_node;
73 if (!orig_node)
74 goto unlock;
76 if (!atomic_inc_not_zero(&orig_node->refcount))
77 orig_node = NULL;
79 unlock:
80 rcu_read_unlock();
81 out:
82 if (gw_node)
83 gw_node_free_ref(gw_node);
84 return orig_node;
87 static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
89 struct gw_node *curr_gw_node;
91 spin_lock_bh(&bat_priv->gw_list_lock);
93 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
94 new_gw_node = NULL;
96 curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
97 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
99 if (curr_gw_node)
100 gw_node_free_ref(curr_gw_node);
102 spin_unlock_bh(&bat_priv->gw_list_lock);
105 void gw_deselect(struct bat_priv *bat_priv)
107 atomic_set(&bat_priv->gw_reselect, 1);
110 static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
112 struct neigh_node *router;
113 struct hlist_node *node;
114 struct gw_node *gw_node, *curr_gw = NULL;
115 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
116 uint8_t max_tq = 0;
117 int down, up;
119 rcu_read_lock();
120 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
121 if (gw_node->deleted)
122 continue;
124 router = orig_node_get_router(gw_node->orig_node);
125 if (!router)
126 continue;
128 if (!atomic_inc_not_zero(&gw_node->refcount))
129 goto next;
131 switch (atomic_read(&bat_priv->gw_sel_class)) {
132 case 1: /* fast connection */
133 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
134 &down, &up);
136 tmp_gw_factor = (router->tq_avg * router->tq_avg *
137 down * 100 * 100) /
138 (TQ_LOCAL_WINDOW_SIZE *
139 TQ_LOCAL_WINDOW_SIZE * 64);
141 if ((tmp_gw_factor > max_gw_factor) ||
142 ((tmp_gw_factor == max_gw_factor) &&
143 (router->tq_avg > max_tq))) {
144 if (curr_gw)
145 gw_node_free_ref(curr_gw);
146 curr_gw = gw_node;
147 atomic_inc(&curr_gw->refcount);
149 break;
151 default: /**
152 * 2: stable connection (use best statistic)
153 * 3: fast-switch (use best statistic but change as
154 * soon as a better gateway appears)
155 * XX: late-switch (use best statistic but change as
156 * soon as a better gateway appears which has
157 * $routing_class more tq points)
159 if (router->tq_avg > max_tq) {
160 if (curr_gw)
161 gw_node_free_ref(curr_gw);
162 curr_gw = gw_node;
163 atomic_inc(&curr_gw->refcount);
165 break;
168 if (router->tq_avg > max_tq)
169 max_tq = router->tq_avg;
171 if (tmp_gw_factor > max_gw_factor)
172 max_gw_factor = tmp_gw_factor;
174 gw_node_free_ref(gw_node);
176 next:
177 neigh_node_free_ref(router);
179 rcu_read_unlock();
181 return curr_gw;
184 void gw_election(struct bat_priv *bat_priv)
186 struct gw_node *curr_gw = NULL, *next_gw = NULL;
187 struct neigh_node *router = NULL;
188 char gw_addr[18] = { '\0' };
191 * The batman daemon checks here if we already passed a full originator
192 * cycle in order to make sure we don't choose the first gateway we
193 * hear about. This check is based on the daemon's uptime which we
194 * don't have.
196 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
197 goto out;
199 if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
200 goto out;
202 curr_gw = gw_get_selected_gw_node(bat_priv);
204 next_gw = gw_get_best_gw_node(bat_priv);
206 if (curr_gw == next_gw)
207 goto out;
209 if (next_gw) {
210 sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
212 router = orig_node_get_router(next_gw->orig_node);
213 if (!router) {
214 gw_deselect(bat_priv);
215 goto out;
219 if ((curr_gw) && (!next_gw)) {
220 bat_dbg(DBG_BATMAN, bat_priv,
221 "Removing selected gateway - no gateway in range\n");
222 throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
223 } else if ((!curr_gw) && (next_gw)) {
224 bat_dbg(DBG_BATMAN, bat_priv,
225 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
226 next_gw->orig_node->orig,
227 next_gw->orig_node->gw_flags,
228 router->tq_avg);
229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
230 } else {
231 bat_dbg(DBG_BATMAN, bat_priv,
232 "Changing route to gateway %pM "
233 "(gw_flags: %i, tq: %i)\n",
234 next_gw->orig_node->orig,
235 next_gw->orig_node->gw_flags,
236 router->tq_avg);
237 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
240 gw_select(bat_priv, next_gw);
242 out:
243 if (curr_gw)
244 gw_node_free_ref(curr_gw);
245 if (next_gw)
246 gw_node_free_ref(next_gw);
247 if (router)
248 neigh_node_free_ref(router);
251 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
253 struct orig_node *curr_gw_orig;
254 struct neigh_node *router_gw = NULL, *router_orig = NULL;
255 uint8_t gw_tq_avg, orig_tq_avg;
257 curr_gw_orig = gw_get_selected_orig(bat_priv);
258 if (!curr_gw_orig)
259 goto deselect;
261 router_gw = orig_node_get_router(curr_gw_orig);
262 if (!router_gw)
263 goto deselect;
265 /* this node already is the gateway */
266 if (curr_gw_orig == orig_node)
267 goto out;
269 router_orig = orig_node_get_router(orig_node);
270 if (!router_orig)
271 goto out;
273 gw_tq_avg = router_gw->tq_avg;
274 orig_tq_avg = router_orig->tq_avg;
276 /* the TQ value has to be better */
277 if (orig_tq_avg < gw_tq_avg)
278 goto out;
281 * if the routing class is greater than 3 the value tells us how much
282 * greater the TQ value of the new gateway must be
284 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
285 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
286 goto out;
288 bat_dbg(DBG_BATMAN, bat_priv,
289 "Restarting gateway selection: better gateway found (tq curr: "
290 "%i, tq new: %i)\n",
291 gw_tq_avg, orig_tq_avg);
293 deselect:
294 gw_deselect(bat_priv);
295 out:
296 if (curr_gw_orig)
297 orig_node_free_ref(curr_gw_orig);
298 if (router_gw)
299 neigh_node_free_ref(router_gw);
300 if (router_orig)
301 neigh_node_free_ref(router_orig);
303 return;
306 static void gw_node_add(struct bat_priv *bat_priv,
307 struct orig_node *orig_node, uint8_t new_gwflags)
309 struct gw_node *gw_node;
310 int down, up;
312 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
313 if (!gw_node)
314 return;
316 INIT_HLIST_NODE(&gw_node->list);
317 gw_node->orig_node = orig_node;
318 atomic_set(&gw_node->refcount, 1);
320 spin_lock_bh(&bat_priv->gw_list_lock);
321 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
322 spin_unlock_bh(&bat_priv->gw_list_lock);
324 gw_bandwidth_to_kbit(new_gwflags, &down, &up);
325 bat_dbg(DBG_BATMAN, bat_priv,
326 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
327 orig_node->orig, new_gwflags,
328 (down > 2048 ? down / 1024 : down),
329 (down > 2048 ? "MBit" : "KBit"),
330 (up > 2048 ? up / 1024 : up),
331 (up > 2048 ? "MBit" : "KBit"));
334 void gw_node_update(struct bat_priv *bat_priv,
335 struct orig_node *orig_node, uint8_t new_gwflags)
337 struct hlist_node *node;
338 struct gw_node *gw_node, *curr_gw;
341 * Note: We don't need a NULL check here, since curr_gw never gets
342 * dereferenced. If curr_gw is NULL we also should not exit as we may
343 * have this gateway in our list (duplication check!) even though we
344 * have no currently selected gateway.
346 curr_gw = gw_get_selected_gw_node(bat_priv);
348 rcu_read_lock();
349 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
350 if (gw_node->orig_node != orig_node)
351 continue;
353 bat_dbg(DBG_BATMAN, bat_priv,
354 "Gateway class of originator %pM changed from "
355 "%i to %i\n",
356 orig_node->orig, gw_node->orig_node->gw_flags,
357 new_gwflags);
359 gw_node->deleted = 0;
361 if (new_gwflags == NO_FLAGS) {
362 gw_node->deleted = jiffies;
363 bat_dbg(DBG_BATMAN, bat_priv,
364 "Gateway %pM removed from gateway list\n",
365 orig_node->orig);
367 if (gw_node == curr_gw)
368 goto deselect;
371 goto unlock;
374 if (new_gwflags == NO_FLAGS)
375 goto unlock;
377 gw_node_add(bat_priv, orig_node, new_gwflags);
378 goto unlock;
380 deselect:
381 gw_deselect(bat_priv);
382 unlock:
383 rcu_read_unlock();
385 if (curr_gw)
386 gw_node_free_ref(curr_gw);
389 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
391 gw_node_update(bat_priv, orig_node, 0);
394 void gw_node_purge(struct bat_priv *bat_priv)
396 struct gw_node *gw_node, *curr_gw;
397 struct hlist_node *node, *node_tmp;
398 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
399 int do_deselect = 0;
401 curr_gw = gw_get_selected_gw_node(bat_priv);
403 spin_lock_bh(&bat_priv->gw_list_lock);
405 hlist_for_each_entry_safe(gw_node, node, node_tmp,
406 &bat_priv->gw_list, list) {
407 if (((!gw_node->deleted) ||
408 (time_before(jiffies, gw_node->deleted + timeout))) &&
409 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
410 continue;
412 if (curr_gw == gw_node)
413 do_deselect = 1;
415 hlist_del_rcu(&gw_node->list);
416 gw_node_free_ref(gw_node);
419 spin_unlock_bh(&bat_priv->gw_list_lock);
421 /* gw_deselect() needs to acquire the gw_list_lock */
422 if (do_deselect)
423 gw_deselect(bat_priv);
425 if (curr_gw)
426 gw_node_free_ref(curr_gw);
430 * fails if orig_node has no router
432 static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
433 const struct gw_node *gw_node)
435 struct gw_node *curr_gw;
436 struct neigh_node *router;
437 int down, up, ret = -1;
439 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
441 router = orig_node_get_router(gw_node->orig_node);
442 if (!router)
443 goto out;
445 curr_gw = gw_get_selected_gw_node(bat_priv);
447 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
448 (curr_gw == gw_node ? "=>" : " "),
449 gw_node->orig_node->orig,
450 router->tq_avg, router->addr,
451 router->if_incoming->net_dev->name,
452 gw_node->orig_node->gw_flags,
453 (down > 2048 ? down / 1024 : down),
454 (down > 2048 ? "MBit" : "KBit"),
455 (up > 2048 ? up / 1024 : up),
456 (up > 2048 ? "MBit" : "KBit"));
458 neigh_node_free_ref(router);
459 if (curr_gw)
460 gw_node_free_ref(curr_gw);
461 out:
462 return ret;
465 int gw_client_seq_print_text(struct seq_file *seq, void *offset)
467 struct net_device *net_dev = (struct net_device *)seq->private;
468 struct bat_priv *bat_priv = netdev_priv(net_dev);
469 struct hard_iface *primary_if;
470 struct gw_node *gw_node;
471 struct hlist_node *node;
472 int gw_count = 0, ret = 0;
474 primary_if = primary_if_get_selected(bat_priv);
475 if (!primary_if) {
476 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
477 "specify interfaces to enable it\n",
478 net_dev->name);
479 goto out;
482 if (primary_if->if_status != IF_ACTIVE) {
483 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
484 "primary interface not active\n",
485 net_dev->name);
486 goto out;
489 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
490 "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
491 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
492 "outgoingIF", SOURCE_VERSION, primary_if->net_dev->name,
493 primary_if->net_dev->dev_addr, net_dev->name);
495 rcu_read_lock();
496 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
497 if (gw_node->deleted)
498 continue;
500 /* fails if orig_node has no router */
501 if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
502 continue;
504 gw_count++;
506 rcu_read_unlock();
508 if (gw_count == 0)
509 seq_printf(seq, "No gateways in range ...\n");
511 out:
512 if (primary_if)
513 hardif_free_ref(primary_if);
514 return ret;
517 static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
519 int ret = false;
520 unsigned char *p;
521 int pkt_len;
523 if (skb_linearize(skb) < 0)
524 goto out;
526 pkt_len = skb_headlen(skb);
528 if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
529 goto out;
531 p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
532 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
534 /* Access the dhcp option lists. Each entry is made up by:
535 * - octect 1: option type
536 * - octect 2: option data len (only if type != 255 and 0)
537 * - octect 3: option data */
538 while (*p != 255 && !ret) {
539 /* p now points to the first octect: option type */
540 if (*p == 53) {
541 /* type 53 is the message type option.
542 * Jump the len octect and go to the data octect */
543 if (pkt_len < 2)
544 goto out;
545 p += 2;
547 /* check if the message type is what we need */
548 if (*p == DHCP_REQUEST)
549 ret = true;
550 break;
551 } else if (*p == 0) {
552 /* option type 0 (padding), just go forward */
553 if (pkt_len < 1)
554 goto out;
555 pkt_len--;
556 p++;
557 } else {
558 /* This is any other option. So we get the length... */
559 if (pkt_len < 1)
560 goto out;
561 pkt_len--;
562 p++;
564 /* ...and then we jump over the data */
565 if (pkt_len < *p)
566 goto out;
567 pkt_len -= *p;
568 p += (*p);
571 out:
572 return ret;
575 int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
576 struct orig_node *old_gw)
578 struct ethhdr *ethhdr;
579 struct iphdr *iphdr;
580 struct ipv6hdr *ipv6hdr;
581 struct udphdr *udphdr;
582 struct gw_node *curr_gw;
583 struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
584 unsigned int header_len = 0;
585 int ret = 1;
587 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
588 return 0;
590 /* check for ethernet header */
591 if (!pskb_may_pull(skb, header_len + ETH_HLEN))
592 return 0;
593 ethhdr = (struct ethhdr *)skb->data;
594 header_len += ETH_HLEN;
596 /* check for initial vlan header */
597 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
598 if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
599 return 0;
600 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
601 header_len += VLAN_HLEN;
604 /* check for ip header */
605 switch (ntohs(ethhdr->h_proto)) {
606 case ETH_P_IP:
607 if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
608 return 0;
609 iphdr = (struct iphdr *)(skb->data + header_len);
610 header_len += iphdr->ihl * 4;
612 /* check for udp header */
613 if (iphdr->protocol != IPPROTO_UDP)
614 return 0;
616 break;
617 case ETH_P_IPV6:
618 if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
619 return 0;
620 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
621 header_len += sizeof(*ipv6hdr);
623 /* check for udp header */
624 if (ipv6hdr->nexthdr != IPPROTO_UDP)
625 return 0;
627 break;
628 default:
629 return 0;
632 if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
633 return 0;
634 udphdr = (struct udphdr *)(skb->data + header_len);
635 header_len += sizeof(*udphdr);
637 /* check for bootp port */
638 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
639 (ntohs(udphdr->dest) != 67))
640 return 0;
642 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
643 (ntohs(udphdr->dest) != 547))
644 return 0;
646 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
647 return -1;
649 curr_gw = gw_get_selected_gw_node(bat_priv);
650 if (!curr_gw)
651 return 0;
653 /* If old_gw != NULL then this packet is unicast.
654 * So, at this point we have to check the message type: if it is a
655 * DHCPREQUEST we have to decide whether to drop it or not */
656 if (old_gw && curr_gw->orig_node != old_gw) {
657 if (is_type_dhcprequest(skb, header_len)) {
658 /* If the dhcp packet has been sent to a different gw,
659 * we have to evaluate whether the old gw is still
660 * reliable enough */
661 neigh_curr = find_router(bat_priv, curr_gw->orig_node,
662 NULL);
663 neigh_old = find_router(bat_priv, old_gw, NULL);
664 if (!neigh_curr || !neigh_old)
665 goto free_neigh;
666 if (neigh_curr->tq_avg - neigh_old->tq_avg <
667 GW_THRESHOLD)
668 ret = -1;
671 free_neigh:
672 if (neigh_old)
673 neigh_node_free_ref(neigh_old);
674 if (neigh_curr)
675 neigh_node_free_ref(neigh_curr);
676 if (curr_gw)
677 gw_node_free_ref(curr_gw);
678 return ret;