agp: fix arbitrary kernel memory writes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / batman-adv / gateway_client.c
blob3cc43558cf9cbb0958d55f4f6ed236f9c3873322
1 /*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "gateway_client.h"
24 #include "gateway_common.h"
25 #include "hard-interface.h"
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <linux/udp.h>
29 #include <linux/if_vlan.h>
31 static void gw_node_free_rcu(struct rcu_head *rcu)
33 struct gw_node *gw_node;
35 gw_node = container_of(rcu, struct gw_node, rcu);
36 kfree(gw_node);
39 static void gw_node_free_ref(struct gw_node *gw_node)
41 if (atomic_dec_and_test(&gw_node->refcount))
42 call_rcu(&gw_node->rcu, gw_node_free_rcu);
45 void *gw_get_selected(struct bat_priv *bat_priv)
47 struct gw_node *curr_gateway_tmp;
48 struct orig_node *orig_node = NULL;
50 rcu_read_lock();
51 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
52 if (!curr_gateway_tmp)
53 goto out;
55 orig_node = curr_gateway_tmp->orig_node;
56 if (!orig_node)
57 goto out;
59 if (!atomic_inc_not_zero(&orig_node->refcount))
60 orig_node = NULL;
62 out:
63 rcu_read_unlock();
64 return orig_node;
67 void gw_deselect(struct bat_priv *bat_priv)
69 struct gw_node *gw_node;
71 spin_lock_bh(&bat_priv->gw_list_lock);
72 gw_node = rcu_dereference(bat_priv->curr_gw);
73 rcu_assign_pointer(bat_priv->curr_gw, NULL);
74 spin_unlock_bh(&bat_priv->gw_list_lock);
76 if (gw_node)
77 gw_node_free_ref(gw_node);
80 static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
82 struct gw_node *curr_gw_node;
84 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
85 new_gw_node = NULL;
87 spin_lock_bh(&bat_priv->gw_list_lock);
88 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
89 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
90 spin_unlock_bh(&bat_priv->gw_list_lock);
92 if (curr_gw_node)
93 gw_node_free_ref(curr_gw_node);
96 void gw_election(struct bat_priv *bat_priv)
98 struct hlist_node *node;
99 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
100 uint8_t max_tq = 0;
101 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
102 int down, up;
105 * The batman daemon checks here if we already passed a full originator
106 * cycle in order to make sure we don't choose the first gateway we
107 * hear about. This check is based on the daemon's uptime which we
108 * don't have.
110 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
111 return;
113 rcu_read_lock();
114 curr_gw = rcu_dereference(bat_priv->curr_gw);
115 if (curr_gw) {
116 rcu_read_unlock();
117 return;
120 if (hlist_empty(&bat_priv->gw_list)) {
122 if (curr_gw) {
123 rcu_read_unlock();
124 bat_dbg(DBG_BATMAN, bat_priv,
125 "Removing selected gateway - "
126 "no gateway in range\n");
127 gw_deselect(bat_priv);
128 } else
129 rcu_read_unlock();
131 return;
134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
135 if (!gw_node->orig_node->router)
136 continue;
138 if (gw_node->deleted)
139 continue;
141 switch (atomic_read(&bat_priv->gw_sel_class)) {
142 case 1: /* fast connection */
143 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
144 &down, &up);
146 tmp_gw_factor = (gw_node->orig_node->router->tq_avg *
147 gw_node->orig_node->router->tq_avg *
148 down * 100 * 100) /
149 (TQ_LOCAL_WINDOW_SIZE *
150 TQ_LOCAL_WINDOW_SIZE * 64);
152 if ((tmp_gw_factor > max_gw_factor) ||
153 ((tmp_gw_factor == max_gw_factor) &&
154 (gw_node->orig_node->router->tq_avg > max_tq)))
155 curr_gw_tmp = gw_node;
156 break;
158 default: /**
159 * 2: stable connection (use best statistic)
160 * 3: fast-switch (use best statistic but change as
161 * soon as a better gateway appears)
162 * XX: late-switch (use best statistic but change as
163 * soon as a better gateway appears which has
164 * $routing_class more tq points)
166 if (gw_node->orig_node->router->tq_avg > max_tq)
167 curr_gw_tmp = gw_node;
168 break;
171 if (gw_node->orig_node->router->tq_avg > max_tq)
172 max_tq = gw_node->orig_node->router->tq_avg;
174 if (tmp_gw_factor > max_gw_factor)
175 max_gw_factor = tmp_gw_factor;
178 if (curr_gw != curr_gw_tmp) {
179 if ((curr_gw) && (!curr_gw_tmp))
180 bat_dbg(DBG_BATMAN, bat_priv,
181 "Removing selected gateway - "
182 "no gateway in range\n");
183 else if ((!curr_gw) && (curr_gw_tmp))
184 bat_dbg(DBG_BATMAN, bat_priv,
185 "Adding route to gateway %pM "
186 "(gw_flags: %i, tq: %i)\n",
187 curr_gw_tmp->orig_node->orig,
188 curr_gw_tmp->orig_node->gw_flags,
189 curr_gw_tmp->orig_node->router->tq_avg);
190 else
191 bat_dbg(DBG_BATMAN, bat_priv,
192 "Changing route to gateway %pM "
193 "(gw_flags: %i, tq: %i)\n",
194 curr_gw_tmp->orig_node->orig,
195 curr_gw_tmp->orig_node->gw_flags,
196 curr_gw_tmp->orig_node->router->tq_avg);
198 gw_select(bat_priv, curr_gw_tmp);
201 rcu_read_unlock();
204 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
206 struct gw_node *curr_gateway_tmp;
207 uint8_t gw_tq_avg, orig_tq_avg;
209 rcu_read_lock();
210 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
211 if (!curr_gateway_tmp)
212 goto out_rcu;
214 if (!curr_gateway_tmp->orig_node)
215 goto deselect_rcu;
217 if (!curr_gateway_tmp->orig_node->router)
218 goto deselect_rcu;
220 /* this node already is the gateway */
221 if (curr_gateway_tmp->orig_node == orig_node)
222 goto out_rcu;
224 if (!orig_node->router)
225 goto out_rcu;
227 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg;
228 rcu_read_unlock();
230 orig_tq_avg = orig_node->router->tq_avg;
232 /* the TQ value has to be better */
233 if (orig_tq_avg < gw_tq_avg)
234 goto out;
237 * if the routing class is greater than 3 the value tells us how much
238 * greater the TQ value of the new gateway must be
240 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
241 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
242 goto out;
244 bat_dbg(DBG_BATMAN, bat_priv,
245 "Restarting gateway selection: better gateway found (tq curr: "
246 "%i, tq new: %i)\n",
247 gw_tq_avg, orig_tq_avg);
248 goto deselect;
250 out_rcu:
251 rcu_read_unlock();
252 goto out;
253 deselect_rcu:
254 rcu_read_unlock();
255 deselect:
256 gw_deselect(bat_priv);
257 out:
258 return;
261 static void gw_node_add(struct bat_priv *bat_priv,
262 struct orig_node *orig_node, uint8_t new_gwflags)
264 struct gw_node *gw_node;
265 int down, up;
267 gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC);
268 if (!gw_node)
269 return;
271 memset(gw_node, 0, sizeof(struct gw_node));
272 INIT_HLIST_NODE(&gw_node->list);
273 gw_node->orig_node = orig_node;
274 atomic_set(&gw_node->refcount, 1);
276 spin_lock_bh(&bat_priv->gw_list_lock);
277 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
278 spin_unlock_bh(&bat_priv->gw_list_lock);
280 gw_bandwidth_to_kbit(new_gwflags, &down, &up);
281 bat_dbg(DBG_BATMAN, bat_priv,
282 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
283 orig_node->orig, new_gwflags,
284 (down > 2048 ? down / 1024 : down),
285 (down > 2048 ? "MBit" : "KBit"),
286 (up > 2048 ? up / 1024 : up),
287 (up > 2048 ? "MBit" : "KBit"));
290 void gw_node_update(struct bat_priv *bat_priv,
291 struct orig_node *orig_node, uint8_t new_gwflags)
293 struct hlist_node *node;
294 struct gw_node *gw_node;
296 rcu_read_lock();
297 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
298 if (gw_node->orig_node != orig_node)
299 continue;
301 bat_dbg(DBG_BATMAN, bat_priv,
302 "Gateway class of originator %pM changed from "
303 "%i to %i\n",
304 orig_node->orig, gw_node->orig_node->gw_flags,
305 new_gwflags);
307 gw_node->deleted = 0;
309 if (new_gwflags == 0) {
310 gw_node->deleted = jiffies;
311 bat_dbg(DBG_BATMAN, bat_priv,
312 "Gateway %pM removed from gateway list\n",
313 orig_node->orig);
315 if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
316 rcu_read_unlock();
317 gw_deselect(bat_priv);
318 return;
322 rcu_read_unlock();
323 return;
325 rcu_read_unlock();
327 if (new_gwflags == 0)
328 return;
330 gw_node_add(bat_priv, orig_node, new_gwflags);
333 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
335 return gw_node_update(bat_priv, orig_node, 0);
338 void gw_node_purge(struct bat_priv *bat_priv)
340 struct gw_node *gw_node;
341 struct hlist_node *node, *node_tmp;
342 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
344 spin_lock_bh(&bat_priv->gw_list_lock);
346 hlist_for_each_entry_safe(gw_node, node, node_tmp,
347 &bat_priv->gw_list, list) {
348 if (((!gw_node->deleted) ||
349 (time_before(jiffies, gw_node->deleted + timeout))) &&
350 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
351 continue;
353 if (rcu_dereference(bat_priv->curr_gw) == gw_node)
354 gw_deselect(bat_priv);
356 hlist_del_rcu(&gw_node->list);
357 gw_node_free_ref(gw_node);
361 spin_unlock_bh(&bat_priv->gw_list_lock);
364 static int _write_buffer_text(struct bat_priv *bat_priv,
365 struct seq_file *seq, struct gw_node *gw_node)
367 struct gw_node *curr_gw;
368 int down, up, ret;
370 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
372 rcu_read_lock();
373 curr_gw = rcu_dereference(bat_priv->curr_gw);
375 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
376 (curr_gw == gw_node ? "=>" : " "),
377 gw_node->orig_node->orig,
378 gw_node->orig_node->router->tq_avg,
379 gw_node->orig_node->router->addr,
380 gw_node->orig_node->router->if_incoming->net_dev->name,
381 gw_node->orig_node->gw_flags,
382 (down > 2048 ? down / 1024 : down),
383 (down > 2048 ? "MBit" : "KBit"),
384 (up > 2048 ? up / 1024 : up),
385 (up > 2048 ? "MBit" : "KBit"));
387 rcu_read_unlock();
388 return ret;
391 int gw_client_seq_print_text(struct seq_file *seq, void *offset)
393 struct net_device *net_dev = (struct net_device *)seq->private;
394 struct bat_priv *bat_priv = netdev_priv(net_dev);
395 struct gw_node *gw_node;
396 struct hlist_node *node;
397 int gw_count = 0;
399 if (!bat_priv->primary_if) {
401 return seq_printf(seq, "BATMAN mesh %s disabled - please "
402 "specify interfaces to enable it\n",
403 net_dev->name);
406 if (bat_priv->primary_if->if_status != IF_ACTIVE) {
408 return seq_printf(seq, "BATMAN mesh %s disabled - "
409 "primary interface not active\n",
410 net_dev->name);
413 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
414 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
415 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
416 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
417 bat_priv->primary_if->net_dev->name,
418 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
420 rcu_read_lock();
421 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
422 if (gw_node->deleted)
423 continue;
425 if (!gw_node->orig_node->router)
426 continue;
428 _write_buffer_text(bat_priv, seq, gw_node);
429 gw_count++;
431 rcu_read_unlock();
433 if (gw_count == 0)
434 seq_printf(seq, "No gateways in range ...\n");
436 return 0;
439 int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
441 struct ethhdr *ethhdr;
442 struct iphdr *iphdr;
443 struct ipv6hdr *ipv6hdr;
444 struct udphdr *udphdr;
445 unsigned int header_len = 0;
447 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
448 return 0;
450 /* check for ethernet header */
451 if (!pskb_may_pull(skb, header_len + ETH_HLEN))
452 return 0;
453 ethhdr = (struct ethhdr *)skb->data;
454 header_len += ETH_HLEN;
456 /* check for initial vlan header */
457 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
458 if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
459 return 0;
460 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
461 header_len += VLAN_HLEN;
464 /* check for ip header */
465 switch (ntohs(ethhdr->h_proto)) {
466 case ETH_P_IP:
467 if (!pskb_may_pull(skb, header_len + sizeof(struct iphdr)))
468 return 0;
469 iphdr = (struct iphdr *)(skb->data + header_len);
470 header_len += iphdr->ihl * 4;
472 /* check for udp header */
473 if (iphdr->protocol != IPPROTO_UDP)
474 return 0;
476 break;
477 case ETH_P_IPV6:
478 if (!pskb_may_pull(skb, header_len + sizeof(struct ipv6hdr)))
479 return 0;
480 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
481 header_len += sizeof(struct ipv6hdr);
483 /* check for udp header */
484 if (ipv6hdr->nexthdr != IPPROTO_UDP)
485 return 0;
487 break;
488 default:
489 return 0;
492 if (!pskb_may_pull(skb, header_len + sizeof(struct udphdr)))
493 return 0;
494 udphdr = (struct udphdr *)(skb->data + header_len);
495 header_len += sizeof(struct udphdr);
497 /* check for bootp port */
498 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
499 (ntohs(udphdr->dest) != 67))
500 return 0;
502 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
503 (ntohs(udphdr->dest) != 547))
504 return 0;
506 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
507 return -1;
509 rcu_read_lock();
510 if (!rcu_dereference(bat_priv->curr_gw)) {
511 rcu_read_unlock();
512 return 0;
514 rcu_read_unlock();
516 return 1;