2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
29 #include "gateway_common.h"
30 #include "originator.h"
32 static void send_outstanding_bcast_packet(struct work_struct
*work
);
34 /* send out an already prepared packet to the given address via the
35 * specified batman interface */
36 int send_skb_packet(struct sk_buff
*skb
, struct hard_iface
*hard_iface
,
37 const uint8_t *dst_addr
)
39 struct ethhdr
*ethhdr
;
41 if (hard_iface
->if_status
!= IF_ACTIVE
)
44 if (unlikely(!hard_iface
->net_dev
))
47 if (!(hard_iface
->net_dev
->flags
& IFF_UP
)) {
48 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
49 hard_iface
->net_dev
->name
);
53 /* push to the ethernet header. */
54 if (my_skb_head_push(skb
, ETH_HLEN
) < 0)
57 skb_reset_mac_header(skb
);
59 ethhdr
= (struct ethhdr
*)skb_mac_header(skb
);
60 memcpy(ethhdr
->h_source
, hard_iface
->net_dev
->dev_addr
, ETH_ALEN
);
61 memcpy(ethhdr
->h_dest
, dst_addr
, ETH_ALEN
);
62 ethhdr
->h_proto
= __constant_htons(ETH_P_BATMAN
);
64 skb_set_network_header(skb
, ETH_HLEN
);
65 skb
->priority
= TC_PRIO_CONTROL
;
66 skb
->protocol
= __constant_htons(ETH_P_BATMAN
);
68 skb
->dev
= hard_iface
->net_dev
;
70 /* dev_queue_xmit() returns a negative result on error. However on
71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 * (which is > 0). This will not be treated as an error. */
74 return dev_queue_xmit(skb
);
80 static void realloc_packet_buffer(struct hard_iface
*hard_iface
,
83 unsigned char *new_buff
;
85 new_buff
= kmalloc(new_len
, GFP_ATOMIC
);
87 /* keep old buffer if kmalloc should fail */
89 memcpy(new_buff
, hard_iface
->packet_buff
,
92 kfree(hard_iface
->packet_buff
);
93 hard_iface
->packet_buff
= new_buff
;
94 hard_iface
->packet_len
= new_len
;
98 /* when calling this function (hard_iface == primary_if) has to be true */
99 static int prepare_packet_buffer(struct bat_priv
*bat_priv
,
100 struct hard_iface
*hard_iface
)
104 new_len
= BATMAN_OGM_HLEN
+
105 tt_len((uint8_t)atomic_read(&bat_priv
->tt_local_changes
));
107 /* if we have too many changes for one packet don't send any
108 * and wait for the tt table request which will be fragmented */
109 if (new_len
> hard_iface
->soft_iface
->mtu
)
110 new_len
= BATMAN_OGM_HLEN
;
112 realloc_packet_buffer(hard_iface
, new_len
);
114 atomic_set(&bat_priv
->tt_crc
, tt_local_crc(bat_priv
));
116 /* reset the sending counter */
117 atomic_set(&bat_priv
->tt_ogm_append_cnt
, TT_OGM_APPEND_MAX
);
119 return tt_changes_fill_buffer(bat_priv
,
120 hard_iface
->packet_buff
+ BATMAN_OGM_HLEN
,
121 hard_iface
->packet_len
- BATMAN_OGM_HLEN
);
124 static int reset_packet_buffer(struct bat_priv
*bat_priv
,
125 struct hard_iface
*hard_iface
)
127 realloc_packet_buffer(hard_iface
, BATMAN_OGM_HLEN
);
131 void schedule_bat_ogm(struct hard_iface
*hard_iface
)
133 struct bat_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
134 struct hard_iface
*primary_if
;
135 int tt_num_changes
= -1;
137 if ((hard_iface
->if_status
== IF_NOT_IN_USE
) ||
138 (hard_iface
->if_status
== IF_TO_BE_REMOVED
))
142 * the interface gets activated here to avoid race conditions between
143 * the moment of activating the interface in
144 * hardif_activate_interface() where the originator mac is set and
145 * outdated packets (especially uninitialized mac addresses) in the
148 if (hard_iface
->if_status
== IF_TO_BE_ACTIVATED
)
149 hard_iface
->if_status
= IF_ACTIVE
;
151 primary_if
= primary_if_get_selected(bat_priv
);
153 if (hard_iface
== primary_if
) {
154 /* if at least one change happened */
155 if (atomic_read(&bat_priv
->tt_local_changes
) > 0) {
156 tt_commit_changes(bat_priv
);
157 tt_num_changes
= prepare_packet_buffer(bat_priv
,
161 /* if the changes have been sent often enough */
162 if (!atomic_dec_not_zero(&bat_priv
->tt_ogm_append_cnt
))
163 tt_num_changes
= reset_packet_buffer(bat_priv
,
168 hardif_free_ref(primary_if
);
170 bat_priv
->bat_algo_ops
->bat_ogm_schedule(hard_iface
, tt_num_changes
);
173 static void forw_packet_free(struct forw_packet
*forw_packet
)
175 if (forw_packet
->skb
)
176 kfree_skb(forw_packet
->skb
);
177 if (forw_packet
->if_incoming
)
178 hardif_free_ref(forw_packet
->if_incoming
);
182 static void _add_bcast_packet_to_list(struct bat_priv
*bat_priv
,
183 struct forw_packet
*forw_packet
,
184 unsigned long send_time
)
186 INIT_HLIST_NODE(&forw_packet
->list
);
188 /* add new packet to packet list */
189 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
190 hlist_add_head(&forw_packet
->list
, &bat_priv
->forw_bcast_list
);
191 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
193 /* start timer for this packet */
194 INIT_DELAYED_WORK(&forw_packet
->delayed_work
,
195 send_outstanding_bcast_packet
);
196 queue_delayed_work(bat_event_workqueue
, &forw_packet
->delayed_work
,
200 /* add a broadcast packet to the queue and setup timers. broadcast packets
201 * are sent multiple times to increase probability for being received.
203 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
206 * The skb is not consumed, so the caller should make sure that the
208 int add_bcast_packet_to_list(struct bat_priv
*bat_priv
,
209 const struct sk_buff
*skb
, unsigned long delay
)
211 struct hard_iface
*primary_if
= NULL
;
212 struct forw_packet
*forw_packet
;
213 struct bcast_packet
*bcast_packet
;
214 struct sk_buff
*newskb
;
216 if (!atomic_dec_not_zero(&bat_priv
->bcast_queue_left
)) {
217 bat_dbg(DBG_BATMAN
, bat_priv
, "bcast packet queue full\n");
221 primary_if
= primary_if_get_selected(bat_priv
);
225 forw_packet
= kmalloc(sizeof(*forw_packet
), GFP_ATOMIC
);
230 newskb
= skb_copy(skb
, GFP_ATOMIC
);
234 /* as we have a copy now, it is safe to decrease the TTL */
235 bcast_packet
= (struct bcast_packet
*)newskb
->data
;
236 bcast_packet
->header
.ttl
--;
238 skb_reset_mac_header(newskb
);
240 forw_packet
->skb
= newskb
;
241 forw_packet
->if_incoming
= primary_if
;
243 /* how often did we send the bcast packet ? */
244 forw_packet
->num_packets
= 0;
246 _add_bcast_packet_to_list(bat_priv
, forw_packet
, delay
);
252 atomic_inc(&bat_priv
->bcast_queue_left
);
255 hardif_free_ref(primary_if
);
256 return NETDEV_TX_BUSY
;
259 static void send_outstanding_bcast_packet(struct work_struct
*work
)
261 struct hard_iface
*hard_iface
;
262 struct delayed_work
*delayed_work
=
263 container_of(work
, struct delayed_work
, work
);
264 struct forw_packet
*forw_packet
=
265 container_of(delayed_work
, struct forw_packet
, delayed_work
);
266 struct sk_buff
*skb1
;
267 struct net_device
*soft_iface
= forw_packet
->if_incoming
->soft_iface
;
268 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
270 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
271 hlist_del(&forw_packet
->list
);
272 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
274 if (atomic_read(&bat_priv
->mesh_state
) == MESH_DEACTIVATING
)
277 /* rebroadcast packet */
279 list_for_each_entry_rcu(hard_iface
, &hardif_list
, list
) {
280 if (hard_iface
->soft_iface
!= soft_iface
)
283 /* send a copy of the saved skb */
284 skb1
= skb_clone(forw_packet
->skb
, GFP_ATOMIC
);
286 send_skb_packet(skb1
, hard_iface
, broadcast_addr
);
290 forw_packet
->num_packets
++;
292 /* if we still have some more bcasts to send */
293 if (forw_packet
->num_packets
< 3) {
294 _add_bcast_packet_to_list(bat_priv
, forw_packet
,
295 msecs_to_jiffies(5));
300 forw_packet_free(forw_packet
);
301 atomic_inc(&bat_priv
->bcast_queue_left
);
304 void send_outstanding_bat_ogm_packet(struct work_struct
*work
)
306 struct delayed_work
*delayed_work
=
307 container_of(work
, struct delayed_work
, work
);
308 struct forw_packet
*forw_packet
=
309 container_of(delayed_work
, struct forw_packet
, delayed_work
);
310 struct bat_priv
*bat_priv
;
312 bat_priv
= netdev_priv(forw_packet
->if_incoming
->soft_iface
);
313 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
314 hlist_del(&forw_packet
->list
);
315 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
317 if (atomic_read(&bat_priv
->mesh_state
) == MESH_DEACTIVATING
)
320 bat_priv
->bat_algo_ops
->bat_ogm_emit(forw_packet
);
323 * we have to have at least one packet in the queue
324 * to determine the queues wake up time unless we are
327 if (forw_packet
->own
)
328 schedule_bat_ogm(forw_packet
->if_incoming
);
331 /* don't count own packet */
332 if (!forw_packet
->own
)
333 atomic_inc(&bat_priv
->batman_queue_left
);
335 forw_packet_free(forw_packet
);
338 void purge_outstanding_packets(struct bat_priv
*bat_priv
,
339 const struct hard_iface
*hard_iface
)
341 struct forw_packet
*forw_packet
;
342 struct hlist_node
*tmp_node
, *safe_tmp_node
;
346 bat_dbg(DBG_BATMAN
, bat_priv
,
347 "purge_outstanding_packets(): %s\n",
348 hard_iface
->net_dev
->name
);
350 bat_dbg(DBG_BATMAN
, bat_priv
,
351 "purge_outstanding_packets()\n");
353 /* free bcast list */
354 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
355 hlist_for_each_entry_safe(forw_packet
, tmp_node
, safe_tmp_node
,
356 &bat_priv
->forw_bcast_list
, list
) {
359 * if purge_outstanding_packets() was called with an argument
360 * we delete only packets belonging to the given interface
363 (forw_packet
->if_incoming
!= hard_iface
))
366 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
369 * send_outstanding_bcast_packet() will lock the list to
370 * delete the item from the list
372 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
373 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
376 hlist_del(&forw_packet
->list
);
377 forw_packet_free(forw_packet
);
380 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
382 /* free batman packet list */
383 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
384 hlist_for_each_entry_safe(forw_packet
, tmp_node
, safe_tmp_node
,
385 &bat_priv
->forw_bat_list
, list
) {
388 * if purge_outstanding_packets() was called with an argument
389 * we delete only packets belonging to the given interface
392 (forw_packet
->if_incoming
!= hard_iface
))
395 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
398 * send_outstanding_bat_packet() will lock the list to
399 * delete the item from the list
401 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
402 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
405 hlist_del(&forw_packet
->list
);
406 forw_packet_free(forw_packet
);
409 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);