2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "bat_sysfs.h"
24 #include "bat_debugfs.h"
27 #include "originator.h"
28 #include "soft-interface.h"
29 #include "icmp_socket.h"
30 #include "translation-table.h"
31 #include "hard-interface.h"
32 #include "gateway_client.h"
33 #include "bridge_loop_avoidance.h"
39 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
40 * list traversals just rcu-locked */
41 struct list_head hardif_list
;
42 static int (*recv_packet_handler
[256])(struct sk_buff
*, struct hard_iface
*);
43 char bat_routing_algo
[20] = "BATMAN IV";
44 static struct hlist_head bat_algo_list
;
46 unsigned char broadcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
48 struct workqueue_struct
*bat_event_workqueue
;
50 static void recv_handler_init(void);
52 static int __init
batman_init(void)
54 INIT_LIST_HEAD(&hardif_list
);
55 INIT_HLIST_HEAD(&bat_algo_list
);
61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/ */
63 bat_event_workqueue
= create_singlethread_workqueue("bat_events");
65 if (!bat_event_workqueue
)
71 register_netdevice_notifier(&hard_if_notifier
);
73 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
74 SOURCE_VERSION
, COMPAT_VERSION
);
79 static void __exit
batman_exit(void)
82 unregister_netdevice_notifier(&hard_if_notifier
);
83 hardif_remove_interfaces();
85 flush_workqueue(bat_event_workqueue
);
86 destroy_workqueue(bat_event_workqueue
);
87 bat_event_workqueue
= NULL
;
92 int mesh_init(struct net_device
*soft_iface
)
94 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
96 spin_lock_init(&bat_priv
->forw_bat_list_lock
);
97 spin_lock_init(&bat_priv
->forw_bcast_list_lock
);
98 spin_lock_init(&bat_priv
->tt_changes_list_lock
);
99 spin_lock_init(&bat_priv
->tt_req_list_lock
);
100 spin_lock_init(&bat_priv
->tt_roam_list_lock
);
101 spin_lock_init(&bat_priv
->tt_buff_lock
);
102 spin_lock_init(&bat_priv
->gw_list_lock
);
103 spin_lock_init(&bat_priv
->vis_hash_lock
);
104 spin_lock_init(&bat_priv
->vis_list_lock
);
106 INIT_HLIST_HEAD(&bat_priv
->forw_bat_list
);
107 INIT_HLIST_HEAD(&bat_priv
->forw_bcast_list
);
108 INIT_HLIST_HEAD(&bat_priv
->gw_list
);
109 INIT_LIST_HEAD(&bat_priv
->tt_changes_list
);
110 INIT_LIST_HEAD(&bat_priv
->tt_req_list
);
111 INIT_LIST_HEAD(&bat_priv
->tt_roam_list
);
113 if (originator_init(bat_priv
) < 1)
116 if (tt_init(bat_priv
) < 1)
119 tt_local_add(soft_iface
, soft_iface
->dev_addr
, NULL_IFINDEX
);
121 if (vis_init(bat_priv
) < 1)
124 if (bla_init(bat_priv
) < 1)
127 atomic_set(&bat_priv
->gw_reselect
, 0);
128 atomic_set(&bat_priv
->mesh_state
, MESH_ACTIVE
);
132 mesh_free(soft_iface
);
139 void mesh_free(struct net_device
*soft_iface
)
141 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
143 atomic_set(&bat_priv
->mesh_state
, MESH_DEACTIVATING
);
145 purge_outstanding_packets(bat_priv
, NULL
);
149 gw_node_purge(bat_priv
);
150 originator_free(bat_priv
);
156 atomic_set(&bat_priv
->mesh_state
, MESH_INACTIVE
);
159 void inc_module_count(void)
161 try_module_get(THIS_MODULE
);
164 void dec_module_count(void)
166 module_put(THIS_MODULE
);
169 int is_my_mac(const uint8_t *addr
)
171 const struct hard_iface
*hard_iface
;
174 list_for_each_entry_rcu(hard_iface
, &hardif_list
, list
) {
175 if (hard_iface
->if_status
!= IF_ACTIVE
)
178 if (compare_eth(hard_iface
->net_dev
->dev_addr
, addr
)) {
187 static int recv_unhandled_packet(struct sk_buff
*skb
,
188 struct hard_iface
*recv_if
)
193 /* incoming packets with the batman ethertype received on any active hard
196 int batman_skb_recv(struct sk_buff
*skb
, struct net_device
*dev
,
197 struct packet_type
*ptype
, struct net_device
*orig_dev
)
199 struct bat_priv
*bat_priv
;
200 struct batman_ogm_packet
*batman_ogm_packet
;
201 struct hard_iface
*hard_iface
;
205 hard_iface
= container_of(ptype
, struct hard_iface
, batman_adv_ptype
);
206 skb
= skb_share_check(skb
, GFP_ATOMIC
);
208 /* skb was released by skb_share_check() */
212 /* packet should hold at least type and version */
213 if (unlikely(!pskb_may_pull(skb
, 2)))
216 /* expect a valid ethernet header here. */
217 if (unlikely(skb
->mac_len
!= ETH_HLEN
|| !skb_mac_header(skb
)))
220 if (!hard_iface
->soft_iface
)
223 bat_priv
= netdev_priv(hard_iface
->soft_iface
);
225 if (atomic_read(&bat_priv
->mesh_state
) != MESH_ACTIVE
)
228 /* discard frames on not active interfaces */
229 if (hard_iface
->if_status
!= IF_ACTIVE
)
232 batman_ogm_packet
= (struct batman_ogm_packet
*)skb
->data
;
234 if (batman_ogm_packet
->header
.version
!= COMPAT_VERSION
) {
235 bat_dbg(DBG_BATMAN
, bat_priv
,
236 "Drop packet: incompatible batman version (%i)\n",
237 batman_ogm_packet
->header
.version
);
241 /* all receive handlers return whether they received or reused
242 * the supplied skb. if not, we have to free the skb.
244 idx
= batman_ogm_packet
->header
.packet_type
;
245 ret
= (*recv_packet_handler
[idx
])(skb
, hard_iface
);
247 if (ret
== NET_RX_DROP
)
250 /* return NET_RX_SUCCESS in any case as we
251 * most probably dropped the packet for
252 * routing-logical reasons.
254 return NET_RX_SUCCESS
;
262 static void recv_handler_init(void)
266 for (i
= 0; i
< ARRAY_SIZE(recv_packet_handler
); i
++)
267 recv_packet_handler
[i
] = recv_unhandled_packet
;
269 /* batman icmp packet */
270 recv_packet_handler
[BAT_ICMP
] = recv_icmp_packet
;
272 recv_packet_handler
[BAT_UNICAST
] = recv_unicast_packet
;
273 /* fragmented unicast packet */
274 recv_packet_handler
[BAT_UNICAST_FRAG
] = recv_ucast_frag_packet
;
275 /* broadcast packet */
276 recv_packet_handler
[BAT_BCAST
] = recv_bcast_packet
;
278 recv_packet_handler
[BAT_VIS
] = recv_vis_packet
;
279 /* Translation table query (request or response) */
280 recv_packet_handler
[BAT_TT_QUERY
] = recv_tt_query
;
281 /* Roaming advertisement */
282 recv_packet_handler
[BAT_ROAM_ADV
] = recv_roam_adv
;
285 int recv_handler_register(uint8_t packet_type
,
286 int (*recv_handler
)(struct sk_buff
*,
287 struct hard_iface
*))
289 if (recv_packet_handler
[packet_type
] != &recv_unhandled_packet
)
292 recv_packet_handler
[packet_type
] = recv_handler
;
296 void recv_handler_unregister(uint8_t packet_type
)
298 recv_packet_handler
[packet_type
] = recv_unhandled_packet
;
301 static struct bat_algo_ops
*bat_algo_get(char *name
)
303 struct bat_algo_ops
*bat_algo_ops
= NULL
, *bat_algo_ops_tmp
;
304 struct hlist_node
*node
;
306 hlist_for_each_entry(bat_algo_ops_tmp
, node
, &bat_algo_list
, list
) {
307 if (strcmp(bat_algo_ops_tmp
->name
, name
) != 0)
310 bat_algo_ops
= bat_algo_ops_tmp
;
317 int bat_algo_register(struct bat_algo_ops
*bat_algo_ops
)
319 struct bat_algo_ops
*bat_algo_ops_tmp
;
322 bat_algo_ops_tmp
= bat_algo_get(bat_algo_ops
->name
);
323 if (bat_algo_ops_tmp
) {
324 pr_info("Trying to register already registered routing algorithm: %s\n",
329 /* all algorithms must implement all ops (for now) */
330 if (!bat_algo_ops
->bat_iface_enable
||
331 !bat_algo_ops
->bat_iface_disable
||
332 !bat_algo_ops
->bat_iface_update_mac
||
333 !bat_algo_ops
->bat_primary_iface_set
||
334 !bat_algo_ops
->bat_ogm_schedule
||
335 !bat_algo_ops
->bat_ogm_emit
) {
336 pr_info("Routing algo '%s' does not implement required ops\n",
341 INIT_HLIST_NODE(&bat_algo_ops
->list
);
342 hlist_add_head(&bat_algo_ops
->list
, &bat_algo_list
);
349 int bat_algo_select(struct bat_priv
*bat_priv
, char *name
)
351 struct bat_algo_ops
*bat_algo_ops
;
354 bat_algo_ops
= bat_algo_get(name
);
358 bat_priv
->bat_algo_ops
= bat_algo_ops
;
365 int bat_algo_seq_print_text(struct seq_file
*seq
, void *offset
)
367 struct bat_algo_ops
*bat_algo_ops
;
368 struct hlist_node
*node
;
370 seq_printf(seq
, "Available routing algorithms:\n");
372 hlist_for_each_entry(bat_algo_ops
, node
, &bat_algo_list
, list
) {
373 seq_printf(seq
, "%s\n", bat_algo_ops
->name
);
379 static int param_set_ra(const char *val
, const struct kernel_param
*kp
)
381 struct bat_algo_ops
*bat_algo_ops
;
383 bat_algo_ops
= bat_algo_get((char *)val
);
385 pr_err("Routing algorithm '%s' is not supported\n", val
);
389 return param_set_copystring(val
, kp
);
392 static const struct kernel_param_ops param_ops_ra
= {
394 .get
= param_get_string
,
397 static struct kparam_string __param_string_ra
= {
398 .maxlen
= sizeof(bat_routing_algo
),
399 .string
= bat_routing_algo
,
402 module_param_cb(routing_algo
, ¶m_ops_ra
, &__param_string_ra
, 0644);
403 module_init(batman_init
);
404 module_exit(batman_exit
);
406 MODULE_LICENSE("GPL");
408 MODULE_AUTHOR(DRIVER_AUTHOR
);
409 MODULE_DESCRIPTION(DRIVER_DESC
);
410 MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE
);
411 MODULE_VERSION(SOURCE_VERSION
);