Staging: batman-adv: receive packets directly using skbs
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / batman-adv / send.c
blobfd48f3fa2d8819341b81f5aba50357f867bacbd8
1 /*
2 * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "types.h"
29 #include "vis.h"
30 #include "aggregation.h"
32 #include "compat.h"
34 /* apply hop penalty for a normal link */
35 static uint8_t hop_penalty(const uint8_t tq)
37 return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
40 /* when do we schedule our own packet to be sent */
41 static unsigned long own_send_time(void)
43 return jiffies +
44 (((atomic_read(&originator_interval) - JITTER +
45 (random32() % 2*JITTER)) * HZ) / 1000);
48 /* when do we schedule a forwarded packet to be sent */
49 static unsigned long forward_send_time(void)
51 unsigned long send_time = jiffies; /* Starting now plus... */
53 if (atomic_read(&aggregation_enabled))
54 send_time += (((MAX_AGGREGATION_MS - (JITTER/2) +
55 (random32() % JITTER)) * HZ) / 1000);
56 else
57 send_time += (((random32() % (JITTER/2)) * HZ) / 1000);
59 return send_time;
62 /* send out an already prepared packet to the given address via the
63 * specified batman interface */
64 int send_skb_packet(struct sk_buff *skb,
65 struct batman_if *batman_if,
66 uint8_t *dst_addr)
68 struct ethhdr *ethhdr;
70 if (batman_if->if_active != IF_ACTIVE)
71 goto send_skb_err;
73 if (unlikely(!batman_if->net_dev))
74 goto send_skb_err;
76 if (!(batman_if->net_dev->flags & IFF_UP)) {
77 printk(KERN_WARNING
78 "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
79 batman_if->dev);
80 goto send_skb_err;
83 /* push to the ethernet header. */
84 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
85 goto send_skb_err;
87 skb_reset_mac_header(skb);
89 ethhdr = (struct ethhdr *) skb_mac_header(skb);
90 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
91 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
92 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
94 skb_set_network_header(skb, ETH_HLEN);
95 skb->priority = TC_PRIO_CONTROL;
96 skb->protocol = __constant_htons(ETH_P_BATMAN);
98 skb->dev = batman_if->net_dev;
100 /* dev_queue_xmit() returns a negative result on error. However on
101 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
102 * (which is > 0). This will not be treated as an error. */
104 return dev_queue_xmit(skb);
105 send_skb_err:
106 kfree_skb(skb);
107 return NET_XMIT_DROP;
110 /* sends a raw packet. */
111 void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
112 struct batman_if *batman_if, uint8_t *dst_addr)
114 struct sk_buff *skb;
115 char *data;
117 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
118 if (!skb)
119 return;
120 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
121 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
122 /* pull back to the batman "network header" */
123 skb_pull(skb, sizeof(struct ethhdr));
124 send_skb_packet(skb, batman_if, dst_addr);
127 /* Send a packet to a given interface */
128 static void send_packet_to_if(struct forw_packet *forw_packet,
129 struct batman_if *batman_if)
131 char *fwd_str;
132 uint8_t packet_num;
133 int16_t buff_pos;
134 struct batman_packet *batman_packet;
135 char orig_str[ETH_STR_LEN];
137 if (batman_if->if_active != IF_ACTIVE)
138 return;
140 packet_num = buff_pos = 0;
141 batman_packet = (struct batman_packet *)
142 (forw_packet->packet_buff);
144 /* adjust all flags and log packets */
145 while (aggregated_packet(buff_pos,
146 forw_packet->packet_len,
147 batman_packet->num_hna)) {
149 /* we might have aggregated direct link packets with an
150 * ordinary base packet */
151 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
152 (forw_packet->if_incoming == batman_if))
153 batman_packet->flags |= DIRECTLINK;
154 else
155 batman_packet->flags &= ~DIRECTLINK;
157 addr_to_string(orig_str, batman_packet->orig);
158 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
159 "Sending own" :
160 "Forwarding"));
161 bat_dbg(DBG_BATMAN,
162 "%s %spacket (originator %s, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
163 fwd_str,
164 (packet_num > 0 ? "aggregated " : ""),
165 orig_str, ntohs(batman_packet->seqno),
166 batman_packet->tq, batman_packet->ttl,
167 (batman_packet->flags & DIRECTLINK ?
168 "on" : "off"),
169 batman_if->dev, batman_if->addr_str);
171 buff_pos += sizeof(struct batman_packet) +
172 (batman_packet->num_hna * ETH_ALEN);
173 packet_num++;
174 batman_packet = (struct batman_packet *)
175 (forw_packet->packet_buff + buff_pos);
178 send_raw_packet(forw_packet->packet_buff,
179 forw_packet->packet_len,
180 batman_if, broadcastAddr);
183 /* send a batman packet */
184 static void send_packet(struct forw_packet *forw_packet)
186 struct batman_if *batman_if;
187 struct batman_packet *batman_packet =
188 (struct batman_packet *)(forw_packet->packet_buff);
189 char orig_str[ETH_STR_LEN];
190 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
192 if (!forw_packet->if_incoming) {
193 printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
194 return;
197 if (forw_packet->if_incoming->if_active != IF_ACTIVE)
198 return;
200 addr_to_string(orig_str, batman_packet->orig);
202 /* multihomed peer assumed */
203 /* non-primary OGMs are only broadcasted on their interface */
204 if ((directlink && (batman_packet->ttl == 1)) ||
205 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
207 /* FIXME: what about aggregated packets ? */
208 bat_dbg(DBG_BATMAN,
209 "%s packet (originator %s, seqno %d, TTL %d) on interface %s [%s]\n",
210 (forw_packet->own ? "Sending own" : "Forwarding"),
211 orig_str, ntohs(batman_packet->seqno),
212 batman_packet->ttl, forw_packet->if_incoming->dev,
213 forw_packet->if_incoming->addr_str);
215 send_raw_packet(forw_packet->packet_buff,
216 forw_packet->packet_len,
217 forw_packet->if_incoming,
218 broadcastAddr);
219 return;
222 /* broadcast on every interface */
223 rcu_read_lock();
224 list_for_each_entry_rcu(batman_if, &if_list, list)
225 send_packet_to_if(forw_packet, batman_if);
226 rcu_read_unlock();
229 static void rebuild_batman_packet(struct batman_if *batman_if)
231 int new_len;
232 unsigned char *new_buff;
233 struct batman_packet *batman_packet;
235 new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
236 new_buff = kmalloc(new_len, GFP_ATOMIC);
238 /* keep old buffer if kmalloc should fail */
239 if (new_buff) {
240 memcpy(new_buff, batman_if->packet_buff,
241 sizeof(struct batman_packet));
242 batman_packet = (struct batman_packet *)new_buff;
244 batman_packet->num_hna = hna_local_fill_buffer(
245 new_buff + sizeof(struct batman_packet),
246 new_len - sizeof(struct batman_packet));
248 kfree(batman_if->packet_buff);
249 batman_if->packet_buff = new_buff;
250 batman_if->packet_len = new_len;
254 void schedule_own_packet(struct batman_if *batman_if)
256 unsigned long send_time;
257 struct batman_packet *batman_packet;
260 * the interface gets activated here to avoid race conditions between
261 * the moment of activating the interface in
262 * hardif_activate_interface() where the originator mac is set and
263 * outdated packets (especially uninitialized mac addresses) in the
264 * packet queue
266 if (batman_if->if_active == IF_TO_BE_ACTIVATED)
267 batman_if->if_active = IF_ACTIVE;
269 /* if local hna has changed and interface is a primary interface */
270 if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0))
271 rebuild_batman_packet(batman_if);
274 * NOTE: packet_buff might just have been re-allocated in
275 * rebuild_batman_packet()
277 batman_packet = (struct batman_packet *)batman_if->packet_buff;
279 /* change sequence number to network order */
280 batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
282 if (is_vis_server())
283 batman_packet->flags = VIS_SERVER;
284 else
285 batman_packet->flags = 0;
287 /* could be read by receive_bat_packet() */
288 atomic_inc(&batman_if->seqno);
290 slide_own_bcast_window(batman_if);
291 send_time = own_send_time();
292 add_bat_packet_to_list(batman_if->packet_buff,
293 batman_if->packet_len, batman_if, 1, send_time);
296 void schedule_forward_packet(struct orig_node *orig_node,
297 struct ethhdr *ethhdr,
298 struct batman_packet *batman_packet,
299 uint8_t directlink, int hna_buff_len,
300 struct batman_if *if_incoming)
302 unsigned char in_tq, in_ttl, tq_avg = 0;
303 unsigned long send_time;
305 if (batman_packet->ttl <= 1) {
306 bat_dbg(DBG_BATMAN, "ttl exceeded \n");
307 return;
310 in_tq = batman_packet->tq;
311 in_ttl = batman_packet->ttl;
313 batman_packet->ttl--;
314 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
316 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
317 * of our best tq value */
318 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
320 /* rebroadcast ogm of best ranking neighbor as is */
321 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
322 batman_packet->tq = orig_node->router->tq_avg;
324 if (orig_node->router->last_ttl)
325 batman_packet->ttl = orig_node->router->last_ttl - 1;
328 tq_avg = orig_node->router->tq_avg;
331 /* apply hop penalty */
332 batman_packet->tq = hop_penalty(batman_packet->tq);
334 bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
335 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
336 batman_packet->ttl);
338 batman_packet->seqno = htons(batman_packet->seqno);
340 if (directlink)
341 batman_packet->flags |= DIRECTLINK;
342 else
343 batman_packet->flags &= ~DIRECTLINK;
345 send_time = forward_send_time();
346 add_bat_packet_to_list((unsigned char *)batman_packet,
347 sizeof(struct batman_packet) + hna_buff_len,
348 if_incoming, 0, send_time);
351 static void forw_packet_free(struct forw_packet *forw_packet)
353 if (forw_packet->skb)
354 kfree_skb(forw_packet->skb);
355 kfree(forw_packet->packet_buff);
356 kfree(forw_packet);
359 static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
360 unsigned long send_time)
362 unsigned long flags;
363 INIT_HLIST_NODE(&forw_packet->list);
365 /* add new packet to packet list */
366 spin_lock_irqsave(&forw_bcast_list_lock, flags);
367 hlist_add_head(&forw_packet->list, &forw_bcast_list);
368 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
370 /* start timer for this packet */
371 INIT_DELAYED_WORK(&forw_packet->delayed_work,
372 send_outstanding_bcast_packet);
373 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
374 send_time);
377 void add_bcast_packet_to_list(struct sk_buff *skb)
379 struct forw_packet *forw_packet;
381 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
382 if (!forw_packet)
383 return;
385 skb = skb_copy(skb, GFP_ATOMIC);
386 if (!skb) {
387 kfree(forw_packet);
388 return;
391 skb_reset_mac_header(skb);
393 forw_packet->skb = skb;
394 forw_packet->packet_buff = NULL;
396 /* how often did we send the bcast packet ? */
397 forw_packet->num_packets = 0;
399 _add_bcast_packet_to_list(forw_packet, 1);
402 void send_outstanding_bcast_packet(struct work_struct *work)
404 struct batman_if *batman_if;
405 struct delayed_work *delayed_work =
406 container_of(work, struct delayed_work, work);
407 struct forw_packet *forw_packet =
408 container_of(delayed_work, struct forw_packet, delayed_work);
409 unsigned long flags;
410 struct sk_buff *skb1;
412 spin_lock_irqsave(&forw_bcast_list_lock, flags);
413 hlist_del(&forw_packet->list);
414 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
416 /* rebroadcast packet */
417 rcu_read_lock();
418 list_for_each_entry_rcu(batman_if, &if_list, list) {
419 /* send a copy of the saved skb */
420 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
421 if (skb1)
422 send_skb_packet(skb1,
423 batman_if, broadcastAddr);
425 rcu_read_unlock();
427 forw_packet->num_packets++;
429 /* if we still have some more bcasts to send and we are not shutting
430 * down */
431 if ((forw_packet->num_packets < 3) &&
432 (atomic_read(&module_state) != MODULE_DEACTIVATING))
433 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
434 else
435 forw_packet_free(forw_packet);
438 void send_outstanding_bat_packet(struct work_struct *work)
440 struct delayed_work *delayed_work =
441 container_of(work, struct delayed_work, work);
442 struct forw_packet *forw_packet =
443 container_of(delayed_work, struct forw_packet, delayed_work);
444 unsigned long flags;
446 spin_lock_irqsave(&forw_bat_list_lock, flags);
447 hlist_del(&forw_packet->list);
448 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
450 send_packet(forw_packet);
453 * we have to have at least one packet in the queue
454 * to determine the queues wake up time unless we are
455 * shutting down
457 if ((forw_packet->own) &&
458 (atomic_read(&module_state) != MODULE_DEACTIVATING))
459 schedule_own_packet(forw_packet->if_incoming);
461 forw_packet_free(forw_packet);
464 void purge_outstanding_packets(void)
466 struct forw_packet *forw_packet;
467 struct hlist_node *tmp_node, *safe_tmp_node;
468 unsigned long flags;
470 bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
472 /* free bcast list */
473 spin_lock_irqsave(&forw_bcast_list_lock, flags);
474 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
475 &forw_bcast_list, list) {
477 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
480 * send_outstanding_bcast_packet() will lock the list to
481 * delete the item from the list
483 cancel_delayed_work_sync(&forw_packet->delayed_work);
484 spin_lock_irqsave(&forw_bcast_list_lock, flags);
486 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
488 /* free batman packet list */
489 spin_lock_irqsave(&forw_bat_list_lock, flags);
490 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
491 &forw_bat_list, list) {
493 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
496 * send_outstanding_bat_packet() will lock the list to
497 * delete the item from the list
499 cancel_delayed_work_sync(&forw_packet->delayed_work);
500 spin_lock_irqsave(&forw_bat_list_lock, flags);
502 spin_unlock_irqrestore(&forw_bat_list_lock, flags);