Staging: batman-adv: Limit queue lengths for batman and broadcast packets
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / batman-adv / send.c
blobf58a9edfc2cc3f3c0b107e9b74c05c7fd040cffa
1 /*
2 * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "types.h"
29 #include "vis.h"
30 #include "aggregation.h"
32 /* apply hop penalty for a normal link */
33 static uint8_t hop_penalty(const uint8_t tq)
35 return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
38 /* when do we schedule our own packet to be sent */
39 static unsigned long own_send_time(struct bat_priv *bat_priv)
41 return jiffies +
42 (((atomic_read(&bat_priv->orig_interval) - JITTER +
43 (random32() % 2*JITTER)) * HZ) / 1000);
46 /* when do we schedule a forwarded packet to be sent */
47 static unsigned long forward_send_time(struct bat_priv *bat_priv)
49 return jiffies + (((random32() % (JITTER/2)) * HZ) / 1000);
52 /* send out an already prepared packet to the given address via the
53 * specified batman interface */
54 int send_skb_packet(struct sk_buff *skb,
55 struct batman_if *batman_if,
56 uint8_t *dst_addr)
58 struct ethhdr *ethhdr;
60 if (batman_if->if_status != IF_ACTIVE)
61 goto send_skb_err;
63 if (unlikely(!batman_if->net_dev))
64 goto send_skb_err;
66 if (!(batman_if->net_dev->flags & IFF_UP)) {
67 printk(KERN_WARNING
68 "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
69 batman_if->dev);
70 goto send_skb_err;
73 /* push to the ethernet header. */
74 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
75 goto send_skb_err;
77 skb_reset_mac_header(skb);
79 ethhdr = (struct ethhdr *) skb_mac_header(skb);
80 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
81 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
82 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
84 skb_set_network_header(skb, ETH_HLEN);
85 skb->priority = TC_PRIO_CONTROL;
86 skb->protocol = __constant_htons(ETH_P_BATMAN);
88 skb->dev = batman_if->net_dev;
90 /* dev_queue_xmit() returns a negative result on error. However on
91 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
92 * (which is > 0). This will not be treated as an error. */
94 return dev_queue_xmit(skb);
95 send_skb_err:
96 kfree_skb(skb);
97 return NET_XMIT_DROP;
100 /* sends a raw packet. */
101 void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
102 struct batman_if *batman_if, uint8_t *dst_addr)
104 struct sk_buff *skb;
105 char *data;
107 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
108 if (!skb)
109 return;
110 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
111 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
112 /* pull back to the batman "network header" */
113 skb_pull(skb, sizeof(struct ethhdr));
114 send_skb_packet(skb, batman_if, dst_addr);
117 /* Send a packet to a given interface */
118 static void send_packet_to_if(struct forw_packet *forw_packet,
119 struct batman_if *batman_if)
121 char *fwd_str;
122 uint8_t packet_num;
123 int16_t buff_pos;
124 struct batman_packet *batman_packet;
126 if (batman_if->if_status != IF_ACTIVE)
127 return;
129 packet_num = 0;
130 buff_pos = 0;
131 batman_packet = (struct batman_packet *)
132 (forw_packet->packet_buff);
134 /* adjust all flags and log packets */
135 while (aggregated_packet(buff_pos,
136 forw_packet->packet_len,
137 batman_packet->num_hna)) {
139 /* we might have aggregated direct link packets with an
140 * ordinary base packet */
141 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
142 (forw_packet->if_incoming == batman_if))
143 batman_packet->flags |= DIRECTLINK;
144 else
145 batman_packet->flags &= ~DIRECTLINK;
147 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
148 "Sending own" :
149 "Forwarding"));
150 bat_dbg(DBG_BATMAN,
151 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
152 fwd_str,
153 (packet_num > 0 ? "aggregated " : ""),
154 batman_packet->orig, ntohs(batman_packet->seqno),
155 batman_packet->tq, batman_packet->ttl,
156 (batman_packet->flags & DIRECTLINK ?
157 "on" : "off"),
158 batman_if->dev, batman_if->addr_str);
160 buff_pos += sizeof(struct batman_packet) +
161 (batman_packet->num_hna * ETH_ALEN);
162 packet_num++;
163 batman_packet = (struct batman_packet *)
164 (forw_packet->packet_buff + buff_pos);
167 send_raw_packet(forw_packet->packet_buff,
168 forw_packet->packet_len,
169 batman_if, broadcastAddr);
172 /* send a batman packet */
173 static void send_packet(struct forw_packet *forw_packet)
175 struct batman_if *batman_if;
176 struct batman_packet *batman_packet =
177 (struct batman_packet *)(forw_packet->packet_buff);
178 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
180 if (!forw_packet->if_incoming) {
181 printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
182 return;
185 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
186 return;
188 /* multihomed peer assumed */
189 /* non-primary OGMs are only broadcasted on their interface */
190 if ((directlink && (batman_packet->ttl == 1)) ||
191 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
193 /* FIXME: what about aggregated packets ? */
194 bat_dbg(DBG_BATMAN,
195 "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%s]\n",
196 (forw_packet->own ? "Sending own" : "Forwarding"),
197 batman_packet->orig, ntohs(batman_packet->seqno),
198 batman_packet->ttl, forw_packet->if_incoming->dev,
199 forw_packet->if_incoming->addr_str);
201 send_raw_packet(forw_packet->packet_buff,
202 forw_packet->packet_len,
203 forw_packet->if_incoming,
204 broadcastAddr);
205 return;
208 /* broadcast on every interface */
209 rcu_read_lock();
210 list_for_each_entry_rcu(batman_if, &if_list, list)
211 send_packet_to_if(forw_packet, batman_if);
212 rcu_read_unlock();
215 static void rebuild_batman_packet(struct batman_if *batman_if)
217 int new_len;
218 unsigned char *new_buff;
219 struct batman_packet *batman_packet;
221 new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
222 new_buff = kmalloc(new_len, GFP_ATOMIC);
224 /* keep old buffer if kmalloc should fail */
225 if (new_buff) {
226 memcpy(new_buff, batman_if->packet_buff,
227 sizeof(struct batman_packet));
228 batman_packet = (struct batman_packet *)new_buff;
230 batman_packet->num_hna = hna_local_fill_buffer(
231 new_buff + sizeof(struct batman_packet),
232 new_len - sizeof(struct batman_packet));
234 kfree(batman_if->packet_buff);
235 batman_if->packet_buff = new_buff;
236 batman_if->packet_len = new_len;
240 void schedule_own_packet(struct batman_if *batman_if)
242 /* FIXME: each batman_if will be attached to a softif */
243 struct bat_priv *bat_priv = netdev_priv(soft_device);
244 unsigned long send_time;
245 struct batman_packet *batman_packet;
246 int vis_server;
248 if ((batman_if->if_status == IF_NOT_IN_USE) ||
249 (batman_if->if_status == IF_TO_BE_REMOVED))
250 return;
252 vis_server = atomic_read(&bat_priv->vis_mode);
255 * the interface gets activated here to avoid race conditions between
256 * the moment of activating the interface in
257 * hardif_activate_interface() where the originator mac is set and
258 * outdated packets (especially uninitialized mac addresses) in the
259 * packet queue
261 if (batman_if->if_status == IF_TO_BE_ACTIVATED)
262 batman_if->if_status = IF_ACTIVE;
264 /* if local hna has changed and interface is a primary interface */
265 if ((atomic_read(&hna_local_changed)) &&
266 (batman_if == bat_priv->primary_if))
267 rebuild_batman_packet(batman_if);
270 * NOTE: packet_buff might just have been re-allocated in
271 * rebuild_batman_packet()
273 batman_packet = (struct batman_packet *)batman_if->packet_buff;
275 /* change sequence number to network order */
276 batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
278 if (vis_server == VIS_TYPE_SERVER_SYNC)
279 batman_packet->flags = VIS_SERVER;
280 else
281 batman_packet->flags &= ~VIS_SERVER;
283 /* could be read by receive_bat_packet() */
284 atomic_inc(&batman_if->seqno);
286 slide_own_bcast_window(batman_if);
287 send_time = own_send_time(bat_priv);
288 add_bat_packet_to_list(bat_priv,
289 batman_if->packet_buff,
290 batman_if->packet_len,
291 batman_if, 1, send_time);
294 void schedule_forward_packet(struct orig_node *orig_node,
295 struct ethhdr *ethhdr,
296 struct batman_packet *batman_packet,
297 uint8_t directlink, int hna_buff_len,
298 struct batman_if *if_incoming)
300 /* FIXME: each batman_if will be attached to a softif */
301 struct bat_priv *bat_priv = netdev_priv(soft_device);
302 unsigned char in_tq, in_ttl, tq_avg = 0;
303 unsigned long send_time;
305 if (batman_packet->ttl <= 1) {
306 bat_dbg(DBG_BATMAN, "ttl exceeded\n");
307 return;
310 in_tq = batman_packet->tq;
311 in_ttl = batman_packet->ttl;
313 batman_packet->ttl--;
314 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
316 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
317 * of our best tq value */
318 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
320 /* rebroadcast ogm of best ranking neighbor as is */
321 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
322 batman_packet->tq = orig_node->router->tq_avg;
324 if (orig_node->router->last_ttl)
325 batman_packet->ttl = orig_node->router->last_ttl - 1;
328 tq_avg = orig_node->router->tq_avg;
331 /* apply hop penalty */
332 batman_packet->tq = hop_penalty(batman_packet->tq);
334 bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
335 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
336 batman_packet->ttl);
338 batman_packet->seqno = htons(batman_packet->seqno);
340 if (directlink)
341 batman_packet->flags |= DIRECTLINK;
342 else
343 batman_packet->flags &= ~DIRECTLINK;
345 send_time = forward_send_time(bat_priv);
346 add_bat_packet_to_list(bat_priv,
347 (unsigned char *)batman_packet,
348 sizeof(struct batman_packet) + hna_buff_len,
349 if_incoming, 0, send_time);
352 static void forw_packet_free(struct forw_packet *forw_packet)
354 if (forw_packet->skb)
355 kfree_skb(forw_packet->skb);
356 kfree(forw_packet->packet_buff);
357 kfree(forw_packet);
360 static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
361 unsigned long send_time)
363 unsigned long flags;
364 INIT_HLIST_NODE(&forw_packet->list);
366 /* add new packet to packet list */
367 spin_lock_irqsave(&forw_bcast_list_lock, flags);
368 hlist_add_head(&forw_packet->list, &forw_bcast_list);
369 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
371 /* start timer for this packet */
372 INIT_DELAYED_WORK(&forw_packet->delayed_work,
373 send_outstanding_bcast_packet);
374 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
375 send_time);
378 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
379 /* add a broadcast packet to the queue and setup timers. broadcast packets
380 * are sent multiple times to increase probability for beeing received.
382 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
383 * errors.
385 * The skb is not consumed, so the caller should make sure that the
386 * skb is freed. */
387 int add_bcast_packet_to_list(struct sk_buff *skb)
389 struct forw_packet *forw_packet;
391 if (!atomic_dec_not_zero(&bcast_queue_left)) {
392 bat_dbg(DBG_BATMAN, "bcast packet queue full\n");
393 goto out;
396 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
398 if (!forw_packet)
399 goto out_and_inc;
401 skb = skb_copy(skb, GFP_ATOMIC);
402 if (!skb)
403 goto packet_free;
405 skb_reset_mac_header(skb);
407 forw_packet->skb = skb;
408 forw_packet->packet_buff = NULL;
410 /* how often did we send the bcast packet ? */
411 forw_packet->num_packets = 0;
413 _add_bcast_packet_to_list(forw_packet, 1);
414 return NETDEV_TX_OK;
416 packet_free:
417 kfree(forw_packet);
418 out_and_inc:
419 atomic_inc(&bcast_queue_left);
420 out:
421 return NETDEV_TX_BUSY;
424 void send_outstanding_bcast_packet(struct work_struct *work)
426 struct batman_if *batman_if;
427 struct delayed_work *delayed_work =
428 container_of(work, struct delayed_work, work);
429 struct forw_packet *forw_packet =
430 container_of(delayed_work, struct forw_packet, delayed_work);
431 unsigned long flags;
432 struct sk_buff *skb1;
434 spin_lock_irqsave(&forw_bcast_list_lock, flags);
435 hlist_del(&forw_packet->list);
436 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
438 /* rebroadcast packet */
439 rcu_read_lock();
440 list_for_each_entry_rcu(batman_if, &if_list, list) {
441 /* send a copy of the saved skb */
442 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
443 if (skb1)
444 send_skb_packet(skb1,
445 batman_if, broadcastAddr);
447 rcu_read_unlock();
449 forw_packet->num_packets++;
451 /* if we still have some more bcasts to send and we are not shutting
452 * down */
453 if ((forw_packet->num_packets < 3) &&
454 (atomic_read(&module_state) != MODULE_DEACTIVATING))
455 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
456 else {
457 forw_packet_free(forw_packet);
458 atomic_inc(&bcast_queue_left);
462 void send_outstanding_bat_packet(struct work_struct *work)
464 struct delayed_work *delayed_work =
465 container_of(work, struct delayed_work, work);
466 struct forw_packet *forw_packet =
467 container_of(delayed_work, struct forw_packet, delayed_work);
468 unsigned long flags;
470 spin_lock_irqsave(&forw_bat_list_lock, flags);
471 hlist_del(&forw_packet->list);
472 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
474 send_packet(forw_packet);
477 * we have to have at least one packet in the queue
478 * to determine the queues wake up time unless we are
479 * shutting down
481 if ((forw_packet->own) &&
482 (atomic_read(&module_state) != MODULE_DEACTIVATING))
483 schedule_own_packet(forw_packet->if_incoming);
485 /* don't count own packet */
486 if (!forw_packet->own)
487 atomic_inc(&batman_queue_left);
489 forw_packet_free(forw_packet);
492 void purge_outstanding_packets(struct batman_if *batman_if)
494 struct forw_packet *forw_packet;
495 struct hlist_node *tmp_node, *safe_tmp_node;
496 unsigned long flags;
498 if (batman_if)
499 bat_dbg(DBG_BATMAN, "purge_outstanding_packets(): %s\n",
500 batman_if->dev);
501 else
502 bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
504 /* free bcast list */
505 spin_lock_irqsave(&forw_bcast_list_lock, flags);
506 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
507 &forw_bcast_list, list) {
510 * if purge_outstanding_packets() was called with an argmument
511 * we delete only packets belonging to the given interface
513 if ((batman_if) &&
514 (forw_packet->if_incoming != batman_if))
515 continue;
517 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
520 * send_outstanding_bcast_packet() will lock the list to
521 * delete the item from the list
523 cancel_delayed_work_sync(&forw_packet->delayed_work);
524 spin_lock_irqsave(&forw_bcast_list_lock, flags);
526 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
528 /* free batman packet list */
529 spin_lock_irqsave(&forw_bat_list_lock, flags);
530 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
531 &forw_bat_list, list) {
534 * if purge_outstanding_packets() was called with an argmument
535 * we delete only packets belonging to the given interface
537 if ((batman_if) &&
538 (forw_packet->if_incoming != batman_if))
539 continue;
541 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
544 * send_outstanding_bat_packet() will lock the list to
545 * delete the item from the list
547 cancel_delayed_work_sync(&forw_packet->delayed_work);
548 spin_lock_irqsave(&forw_bat_list_lock, flags);
550 spin_unlock_irqrestore(&forw_bat_list_lock, flags);