1 /* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "soft-interface.h"
24 #include "gateway_client.h"
25 #include "originator.h"
27 #include "translation-table.h"
29 #include "hard-interface.h"
32 static struct sk_buff
*
33 batadv_frag_merge_packet(struct list_head
*head
,
34 struct batadv_frag_packet_list_entry
*tfp
,
37 struct batadv_unicast_frag_packet
*up
;
38 struct sk_buff
*tmp_skb
;
39 struct batadv_unicast_packet
*unicast_packet
;
40 int hdr_len
= sizeof(*unicast_packet
);
41 int uni_diff
= sizeof(*up
) - hdr_len
;
44 up
= (struct batadv_unicast_frag_packet
*)skb
->data
;
45 /* set skb to the first part and tmp_skb to the second part */
46 if (up
->flags
& BATADV_UNI_FRAG_HEAD
) {
53 if (skb_linearize(skb
) < 0 || skb_linearize(tmp_skb
) < 0)
56 skb_pull(tmp_skb
, sizeof(*up
));
57 if (pskb_expand_head(skb
, 0, tmp_skb
->len
, GFP_ATOMIC
) < 0)
60 /* move free entry to end */
63 list_move_tail(&tfp
->list
, head
);
65 memcpy(skb_put(skb
, tmp_skb
->len
), tmp_skb
->data
, tmp_skb
->len
);
68 memmove(skb
->data
+ uni_diff
, skb
->data
, hdr_len
);
69 packet_pos
= skb_pull(skb
, uni_diff
);
70 unicast_packet
= (struct batadv_unicast_packet
*)packet_pos
;
71 unicast_packet
->header
.packet_type
= BATADV_UNICAST
;
76 /* free buffered skb, skb will be freed later */
81 static void batadv_frag_create_entry(struct list_head
*head
,
84 struct batadv_frag_packet_list_entry
*tfp
;
85 struct batadv_unicast_frag_packet
*up
;
87 up
= (struct batadv_unicast_frag_packet
*)skb
->data
;
89 /* free and oldest packets stand at the end */
90 tfp
= list_entry((head
)->prev
, typeof(*tfp
), list
);
93 tfp
->seqno
= ntohs(up
->seqno
);
95 list_move(&tfp
->list
, head
);
99 static int batadv_frag_create_buffer(struct list_head
*head
)
102 struct batadv_frag_packet_list_entry
*tfp
;
104 for (i
= 0; i
< BATADV_FRAG_BUFFER_SIZE
; i
++) {
105 tfp
= kmalloc(sizeof(*tfp
), GFP_ATOMIC
);
107 batadv_frag_list_free(head
);
112 INIT_LIST_HEAD(&tfp
->list
);
113 list_add(&tfp
->list
, head
);
119 static struct batadv_frag_packet_list_entry
*
120 batadv_frag_search_packet(struct list_head
*head
,
121 const struct batadv_unicast_frag_packet
*up
)
123 struct batadv_frag_packet_list_entry
*tfp
;
124 struct batadv_unicast_frag_packet
*tmp_up
= NULL
;
125 bool is_head_tmp
, is_head
;
126 uint16_t search_seqno
;
128 if (up
->flags
& BATADV_UNI_FRAG_HEAD
)
129 search_seqno
= ntohs(up
->seqno
)+1;
131 search_seqno
= ntohs(up
->seqno
)-1;
133 is_head
= up
->flags
& BATADV_UNI_FRAG_HEAD
;
135 list_for_each_entry(tfp
, head
, list
) {
139 if (tfp
->seqno
== ntohs(up
->seqno
))
142 tmp_up
= (struct batadv_unicast_frag_packet
*)tfp
->skb
->data
;
144 if (tfp
->seqno
== search_seqno
) {
145 is_head_tmp
= tmp_up
->flags
& BATADV_UNI_FRAG_HEAD
;
146 if (is_head_tmp
!= is_head
)
155 list_move_tail(&tfp
->list
, head
);
159 void batadv_frag_list_free(struct list_head
*head
)
161 struct batadv_frag_packet_list_entry
*pf
, *tmp_pf
;
163 if (!list_empty(head
)) {
164 list_for_each_entry_safe(pf
, tmp_pf
, head
, list
) {
173 /* frag_reassemble_skb():
174 * returns NET_RX_DROP if the operation failed - skb is left intact
175 * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
176 * or the skb could be reassembled (skb_new will point to the new packet and
179 int batadv_frag_reassemble_skb(struct sk_buff
*skb
,
180 struct batadv_priv
*bat_priv
,
181 struct sk_buff
**new_skb
)
183 struct batadv_orig_node
*orig_node
;
184 struct batadv_frag_packet_list_entry
*tmp_frag_entry
;
185 int ret
= NET_RX_DROP
;
186 struct batadv_unicast_frag_packet
*unicast_packet
;
188 unicast_packet
= (struct batadv_unicast_frag_packet
*)skb
->data
;
191 orig_node
= batadv_orig_hash_find(bat_priv
, unicast_packet
->orig
);
195 orig_node
->last_frag_packet
= jiffies
;
197 if (list_empty(&orig_node
->frag_list
) &&
198 batadv_frag_create_buffer(&orig_node
->frag_list
)) {
199 pr_debug("couldn't create frag buffer\n");
203 tmp_frag_entry
= batadv_frag_search_packet(&orig_node
->frag_list
,
206 if (!tmp_frag_entry
) {
207 batadv_frag_create_entry(&orig_node
->frag_list
, skb
);
208 ret
= NET_RX_SUCCESS
;
212 *new_skb
= batadv_frag_merge_packet(&orig_node
->frag_list
,
213 tmp_frag_entry
, skb
);
214 /* if not, merge failed */
216 ret
= NET_RX_SUCCESS
;
220 batadv_orig_node_free_ref(orig_node
);
224 int batadv_frag_send_skb(struct sk_buff
*skb
, struct batadv_priv
*bat_priv
,
225 struct batadv_hard_iface
*hard_iface
,
226 const uint8_t dstaddr
[])
228 struct batadv_unicast_packet tmp_uc
, *unicast_packet
;
229 struct batadv_hard_iface
*primary_if
;
230 struct sk_buff
*frag_skb
;
231 struct batadv_unicast_frag_packet
*frag1
, *frag2
;
232 int uc_hdr_len
= sizeof(*unicast_packet
);
233 int ucf_hdr_len
= sizeof(*frag1
);
234 int data_len
= skb
->len
- uc_hdr_len
;
235 int large_tail
= 0, ret
= NET_RX_DROP
;
238 primary_if
= batadv_primary_if_get_selected(bat_priv
);
242 frag_skb
= dev_alloc_skb(data_len
- (data_len
/ 2) + ucf_hdr_len
);
245 skb_reserve(frag_skb
, ucf_hdr_len
);
247 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
248 memcpy(&tmp_uc
, unicast_packet
, uc_hdr_len
);
249 skb_split(skb
, frag_skb
, data_len
/ 2 + uc_hdr_len
);
251 if (batadv_skb_head_push(skb
, ucf_hdr_len
- uc_hdr_len
) < 0 ||
252 batadv_skb_head_push(frag_skb
, ucf_hdr_len
) < 0)
255 frag1
= (struct batadv_unicast_frag_packet
*)skb
->data
;
256 frag2
= (struct batadv_unicast_frag_packet
*)frag_skb
->data
;
258 memcpy(frag1
, &tmp_uc
, sizeof(tmp_uc
));
261 frag1
->header
.version
= BATADV_COMPAT_VERSION
;
262 frag1
->header
.packet_type
= BATADV_UNICAST_FRAG
;
264 memcpy(frag1
->orig
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
265 memcpy(frag2
, frag1
, sizeof(*frag2
));
268 large_tail
= BATADV_UNI_FRAG_LARGETAIL
;
270 frag1
->flags
= BATADV_UNI_FRAG_HEAD
| large_tail
;
271 frag2
->flags
= large_tail
;
273 seqno
= atomic_add_return(2, &hard_iface
->frag_seqno
);
274 frag1
->seqno
= htons(seqno
- 1);
275 frag2
->seqno
= htons(seqno
);
277 batadv_send_skb_packet(skb
, hard_iface
, dstaddr
);
278 batadv_send_skb_packet(frag_skb
, hard_iface
, dstaddr
);
279 ret
= NET_RX_SUCCESS
;
288 batadv_hardif_free_ref(primary_if
);
293 * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
294 * common fields for unicast packets
296 * @hdr_size: amount of bytes to push at the beginning of the skb
297 * @orig_node: the destination node
299 * Returns false if the buffer extension was not possible or true otherwise
301 static bool batadv_unicast_push_and_fill_skb(struct sk_buff
*skb
, int hdr_size
,
302 struct batadv_orig_node
*orig_node
)
304 struct batadv_unicast_packet
*unicast_packet
;
305 uint8_t ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
307 if (batadv_skb_head_push(skb
, hdr_size
) < 0)
310 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
311 unicast_packet
->header
.version
= BATADV_COMPAT_VERSION
;
312 /* batman packet type: unicast */
313 unicast_packet
->header
.packet_type
= BATADV_UNICAST
;
314 /* set unicast ttl */
315 unicast_packet
->header
.ttl
= BATADV_TTL
;
316 /* copy the destination for faster routing */
317 memcpy(unicast_packet
->dest
, orig_node
->orig
, ETH_ALEN
);
318 /* set the destination tt version number */
319 unicast_packet
->ttvn
= ttvn
;
325 * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
326 * @skb: the skb containing the payload to encapsulate
327 * @orig_node: the destination node
329 * Returns false if the payload could not be encapsulated or true otherwise
331 static bool batadv_unicast_prepare_skb(struct sk_buff
*skb
,
332 struct batadv_orig_node
*orig_node
)
334 size_t uni_size
= sizeof(struct batadv_unicast_packet
);
335 return batadv_unicast_push_and_fill_skb(skb
, uni_size
, orig_node
);
339 * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
341 * @bat_priv: the bat priv with all the soft interface information
342 * @skb: the skb containing the payload to encapsulate
343 * @orig_node: the destination node
344 * @packet_subtype: the batman 4addr packet subtype to use
346 * Returns false if the payload could not be encapsulated or true otherwise
348 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv
*bat_priv
,
350 struct batadv_orig_node
*orig
,
353 struct batadv_hard_iface
*primary_if
;
354 struct batadv_unicast_4addr_packet
*unicast_4addr_packet
;
357 primary_if
= batadv_primary_if_get_selected(bat_priv
);
361 /* pull the header space and fill the unicast_packet substructure.
362 * We can do that because the first member of the unicast_4addr_packet
363 * is of type struct unicast_packet
365 if (!batadv_unicast_push_and_fill_skb(skb
,
366 sizeof(*unicast_4addr_packet
),
370 unicast_4addr_packet
= (struct batadv_unicast_4addr_packet
*)skb
->data
;
371 unicast_4addr_packet
->u
.header
.packet_type
= BATADV_UNICAST_4ADDR
;
372 memcpy(unicast_4addr_packet
->src
, primary_if
->net_dev
->dev_addr
,
374 unicast_4addr_packet
->subtype
= packet_subtype
;
375 unicast_4addr_packet
->reserved
= 0;
380 batadv_hardif_free_ref(primary_if
);
385 * batadv_unicast_generic_send_skb - send an skb as unicast
386 * @bat_priv: the bat priv with all the soft interface information
387 * @skb: payload to send
388 * @packet_type: the batman unicast packet type to use
389 * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
390 * not BATADV_UNICAT_4ADDR
392 * Returns 1 in case of error or 0 otherwise
394 int batadv_unicast_generic_send_skb(struct batadv_priv
*bat_priv
,
395 struct sk_buff
*skb
, int packet_type
,
398 struct ethhdr
*ethhdr
= (struct ethhdr
*)skb
->data
;
399 struct batadv_unicast_packet
*unicast_packet
;
400 struct batadv_orig_node
*orig_node
;
401 struct batadv_neigh_node
*neigh_node
;
402 int data_len
= skb
->len
;
403 int ret
= NET_RX_DROP
;
404 unsigned int dev_mtu
;
406 /* get routing information */
407 if (is_multicast_ether_addr(ethhdr
->h_dest
)) {
408 orig_node
= batadv_gw_get_selected_orig(bat_priv
);
413 /* check for tt host - increases orig_node refcount.
414 * returns NULL in case of AP isolation
416 orig_node
= batadv_transtable_search(bat_priv
, ethhdr
->h_source
,
421 * - if orig_node is NULL it returns NULL
422 * - increases neigh_nodes refcount if found.
424 neigh_node
= batadv_find_router(bat_priv
, orig_node
, NULL
);
429 switch (packet_type
) {
431 batadv_unicast_prepare_skb(skb
, orig_node
);
433 case BATADV_UNICAST_4ADDR
:
434 batadv_unicast_4addr_prepare_skb(bat_priv
, skb
, orig_node
,
438 /* this function supports UNICAST and UNICAST_4ADDR only. It
439 * should never be invoked with any other packet type
444 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
446 /* inform the destination node that we are still missing a correct route
447 * for this client. The destination will receive this packet and will
448 * try to reroute it because the ttvn contained in the header is less
449 * than the current one
451 if (batadv_tt_global_client_is_roaming(bat_priv
, ethhdr
->h_dest
))
452 unicast_packet
->ttvn
= unicast_packet
->ttvn
- 1;
454 dev_mtu
= neigh_node
->if_incoming
->net_dev
->mtu
;
455 /* fragmentation mechanism only works for UNICAST (now) */
456 if (packet_type
== BATADV_UNICAST
&&
457 atomic_read(&bat_priv
->fragmentation
) &&
458 data_len
+ sizeof(*unicast_packet
) > dev_mtu
) {
459 /* send frag skb decreases ttl */
460 unicast_packet
->header
.ttl
++;
461 ret
= batadv_frag_send_skb(skb
, bat_priv
,
462 neigh_node
->if_incoming
,
467 if (batadv_send_skb_to_orig(skb
, orig_node
, NULL
) != NET_XMIT_DROP
)
472 batadv_neigh_node_free_ref(neigh_node
);
474 batadv_orig_node_free_ref(orig_node
);
475 if (ret
== NET_RX_DROP
)