2 * Connection oriented routing
3 * Copyright (C) 2007-2023 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 static struct notifier_block cor_netdev_notify
;
19 __u8 cor_netdev_notify_registered
;
21 __u8 cor_pack_registered
;
23 static DEFINE_SPINLOCK(cor_devs_lock
);
24 static LIST_HEAD(cor_devs
);
25 static LIST_HEAD(cor_devs_waitexit
);
27 static void cor_dev_queue_waitexit(struct work_struct
*work
);
28 DECLARE_WORK(cor_dev_waitexit_work
, cor_dev_queue_waitexit
);
31 #ifdef DEBUG_QOS_SLOWSEND
32 static DEFINE_SPINLOCK(slowsend_lock
);
33 static unsigned long cor_last_send
;
36 int _cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
)
39 unsigned long jiffies_tmp
;
41 spin_lock_bh(&slowsend_lock
);
42 jiffies_tmp
= jiffies
;
43 if (cor_last_send
!= jiffies_tmp
) {
44 if (cor_last_send
+ 1 == jiffies_tmp
)
45 cor_last_send
= jiffies_tmp
;
47 cor_last_send
= jiffies_tmp
- 1;
50 spin_unlock_bh(&slowsend_lock
);
52 /* printk(KERN_ERR "cor_dev_queue_xmit %d, %d\n", caller, allowsend); */
54 return dev_queue_xmit(skb
);
62 /*__u64 get_bufspace_used(void);
64 static void print_conn_bufstats(struct cor_neighbor *nb)
66 / * not threadsafe, but this is only for debugging... * /
68 __u64 read_remaining = 0;
73 spin_lock_irqsave(&nb->conns_waiting.lock, iflags);
75 lh = nb->conns_waiting.lh.next;
76 while (lh != &nb->conns_waiting.lh) {
77 struct cor_conn *cn = container_of(lh, struct cor_conn,
79 totalsize += cn->data_buf.datasize;
80 read_remaining += cn->data_buf.read_remaining;
84 lh = nb->conns_waiting.lh_nextpass.next;
85 while (lh != &nb->conns_waiting.lh_nextpass) {
86 struct cor_conn *cn = container_of(lh, struct cor_conn,
88 totalsize += cn->data_buf.datasize;
89 read_remaining += cn->data_buf.read_remaining;
93 numconns = nb->conns_waiting.cnt;
95 spin_unlock_irqrestore(&nb->conns_waiting.lock, iflags);
97 printk(KERN_ERR "conn %llu %llu %u\n", totalsize, read_remaining,
101 struct sk_buff
*cor_create_packet(struct cor_neighbor
*nb
, int size
,
106 ret
= alloc_skb(size
+ LL_RESERVED_SPACE(nb
->dev
) +
107 nb
->dev
->needed_tailroom
, alloc_flags
);
108 if (unlikely(ret
== 0))
111 ret
->protocol
= htons(ETH_P_COR
);
114 skb_reserve(ret
, LL_RESERVED_SPACE(nb
->dev
));
115 if (unlikely(dev_hard_header(ret
, nb
->dev
, ETH_P_COR
, nb
->mac
,
116 nb
->dev
->dev_addr
, ret
->len
) < 0))
118 skb_reset_network_header(ret
);
123 struct sk_buff
*cor_create_packet_conndata(struct cor_neighbor
*nb
, int size
,
124 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
,
125 __u8 windowused
, __u8 flush
)
130 ret
= cor_create_packet(nb
, size
+ 9, alloc_flags
);
131 if (unlikely(ret
== 0))
134 dest
= skb_put(ret
, 9);
137 BUG_ON((windowused
& (~PACKET_TYPE_CONNDATA_FLAGS_WINDOWUSED
)) != 0);
139 dest
[0] = PACKET_TYPE_CONNDATA
|
140 (flush
== 0 ? 0 : PACKET_TYPE_CONNDATA_FLAGS_FLUSH
) |
144 cor_put_u32(dest
, conn_id
);
146 cor_put_u32(dest
, seqno
);
153 static void cor_rcv_conndata(struct sk_buff
*skb
, __u8 windowused
, __u8 flush
)
155 struct cor_neighbor
*nb
= cor_get_neigh_by_mac(skb
);
165 if (unlikely(nb
== 0))
168 connid_p
= cor_pull_skb(skb
, 4);
169 if (unlikely(connid_p
== 0))
172 seqno_p
= cor_pull_skb(skb
, 4);
173 if (unlikely(seqno_p
== 0))
176 conn_id
= cor_parse_u32(connid_p
);
177 seqno
= cor_parse_u32(seqno_p
);
179 /* get_random_bytes(&rand, 1);
183 if (unlikely(skb
->len
<= 0))
186 cor_conn_rcv(nb
, skb
, 0, 0, conn_id
, seqno
, windowused
, flush
);
194 cor_nb_kref_put(nb
, "stack");
198 static void cor_rcv_cmsg(struct sk_buff
*skb
, int ackneeded
)
200 struct cor_neighbor
*nb
= cor_get_neigh_by_mac(skb
);
202 if (unlikely(nb
== 0)) {
205 cor_kernel_packet(nb
, skb
, ackneeded
);
206 cor_nb_kref_put(nb
, "stack");
210 static int cor_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
211 struct packet_type
*pt
, struct net_device
*orig_dev
)
216 if (skb
->pkt_type
== PACKET_OTHERHOST
||
217 unlikely(skb
->pkt_type
== PACKET_LOOPBACK
))
220 packet_type_p
= cor_pull_skb(skb
, 1);
222 if (unlikely(packet_type_p
== 0))
225 packet_type
= *packet_type_p
;
227 if (unlikely(packet_type
== PACKET_TYPE_ANNOUNCE
)) {
228 cor_rcv_announce(skb
);
229 return NET_RX_SUCCESS
;
230 } else if (packet_type
== PACKET_TYPE_CMSG_NOACK
) {
231 cor_rcv_cmsg(skb
, ACK_NEEDED_NO
);
232 return NET_RX_SUCCESS
;
233 } else if (packet_type
== PACKET_TYPE_CMSG_ACKSLOW
) {
234 cor_rcv_cmsg(skb
, ACK_NEEDED_SLOW
);
235 return NET_RX_SUCCESS
;
236 } else if (packet_type
== PACKET_TYPE_CMSG_ACKFAST
) {
237 cor_rcv_cmsg(skb
, ACK_NEEDED_FAST
);
238 return NET_RX_SUCCESS
;
239 } else if (likely((packet_type
& (~PACKET_TYPE_CONNDATA_FLAGS
)) ==
240 PACKET_TYPE_CONNDATA
)) {
244 if ((packet_type
& PACKET_TYPE_CONNDATA_FLAGS_FLUSH
) != 0)
246 windowused
= (packet_type
&
247 PACKET_TYPE_CONNDATA_FLAGS_WINDOWUSED
);
248 cor_rcv_conndata(skb
, windowused
, flush
);
249 return NET_RX_SUCCESS
;
252 return NET_RX_SUCCESS
;
260 void cor_dev_free(struct kref
*ref
)
262 struct cor_dev
*cd
= container_of(ref
, struct cor_dev
, ref
);
264 BUG_ON(cd
->dev
== 0);
271 static struct cor_dev
*_cor_dev_get(struct net_device
*dev
)
273 struct list_head
*curr
= cor_devs
.next
;
275 while (curr
!= (&cor_devs
)) {
276 struct cor_dev
*cd
= container_of(curr
, struct cor_dev
,
278 BUG_ON(cd
->dev
== 0);
279 if (cd
->dev
== dev
) {
288 struct cor_dev
*cor_dev_get(struct net_device
*dev
)
290 struct cor_dev
*ret
= 0;
292 spin_lock_bh(&cor_devs_lock
);
293 ret
= _cor_dev_get(dev
);
294 spin_unlock_bh(&cor_devs_lock
);
299 static void cor_dev_queue_waitexit(struct work_struct
*work
)
301 spin_lock_bh(&cor_devs_lock
);
302 while (!list_empty(&cor_devs_waitexit
)) {
303 struct cor_dev
*cd
= container_of(cor_devs_waitexit
.next
,
304 struct cor_dev
, dev_list
);
305 list_del(&cd
->dev_list
);
307 spin_unlock_bh(&cor_devs_lock
);
309 kthread_stop(cd
->send_queue
.qos_resume_thread
);
310 put_task_struct(cd
->send_queue
.qos_resume_thread
);
311 kref_put(&cd
->ref
, cor_dev_free
);
313 spin_lock_bh(&cor_devs_lock
);
315 spin_unlock_bh(&cor_devs_lock
);
318 void cor_dev_destroy(struct net_device
*dev
)
325 spin_lock_bh(&cor_devs_lock
);
328 if (list_empty(&cor_devs
)) {
331 cd
= container_of(cor_devs
.next
, struct cor_dev
,
335 cd
= _cor_dev_get(dev
);
339 spin_unlock_bh(&cor_devs_lock
);
343 list_del(&cd
->dev_list
);
344 kref_put(&cd
->ref
, cor_kreffree_bug
);
346 spin_unlock_bh(&cor_devs_lock
);
350 cor_dev_queue_destroy(cd
);
352 spin_lock_bh(&cor_devs_lock
);
353 list_add(&cd
->dev_list
, &cor_devs_waitexit
);
355 spin_unlock_bh(&cor_devs_lock
);
357 schedule_work(&cor_dev_waitexit_work
);
359 kref_put(&cd
->ref
, cor_dev_free
);
363 static int cor_dev_create(struct net_device
*dev
)
365 struct cor_dev
*cd
= kmalloc(sizeof(struct cor_dev
), GFP_KERNEL
);
370 printk(KERN_ERR
"cor: unable to allocate memory for cor_dev, not enabling device\n");
374 memset(cd
, 0, sizeof(struct cor_dev
));
381 if (cor_dev_queue_init(cd
) != 0) {
390 spin_lock_bh(&cor_devs_lock
);
391 list_add(&cd
->dev_list
, &cor_devs
);
392 spin_unlock_bh(&cor_devs_lock
);
397 int cor_netdev_notify_func(struct notifier_block
*not, unsigned long event
,
400 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
407 if (dev
->flags
& IFF_LOOPBACK
)
410 rc
= cor_dev_create(dev
);
413 if (cor_is_clientmode() == 0)
414 cor_announce_send_start(dev
, dev
->broadcast
,
415 ANNOUNCE_TYPE_BROADCAST
);
418 printk(KERN_ERR
"down 1\n");
420 printk(KERN_ERR
"down 2\n");
422 cor_announce_send_stop(dev
, 0, ANNOUNCE_TYPE_BROADCAST
);
423 printk(KERN_ERR
"down 3\n");
425 cor_reset_neighbors(dev
);
426 printk(KERN_ERR
"down 4\n");
428 cor_dev_destroy(dev
);
429 printk(KERN_ERR
"down 5\n");
432 case NETDEV_CHANGEMTU
:
433 cor_resend_rcvmtu(dev
);
437 case NETDEV_REGISTER
:
438 case NETDEV_UNREGISTER
:
439 case NETDEV_CHANGEADDR
:
440 case NETDEV_GOING_DOWN
:
441 case NETDEV_CHANGENAME
:
442 case NETDEV_FEAT_CHANGE
:
443 case NETDEV_BONDING_FAILOVER
:
452 static struct packet_type cor_ptype
= {
453 .type
= htons(ETH_P_COR
),
458 void cor_dev_down(void)
460 if (cor_pack_registered
!= 0) {
461 cor_pack_registered
= 0;
462 dev_remove_pack(&cor_ptype
);
465 if (cor_netdev_notify_registered
!= 0) {
466 if (unregister_netdevice_notifier(&cor_netdev_notify
) != 0) {
467 printk(KERN_WARNING
"warning: cor_dev_down: unregister_netdevice_notifier failed\n");
470 cor_netdev_notify_registered
= 0;
476 BUG_ON(cor_netdev_notify_registered
!= 0);
477 if (register_netdevice_notifier(&cor_netdev_notify
) != 0)
479 cor_netdev_notify_registered
= 1;
481 BUG_ON(cor_pack_registered
!= 0);
482 dev_add_pack(&cor_ptype
);
483 cor_pack_registered
= 1;
488 int __init
cor_dev_init(void)
490 memset(&cor_netdev_notify
, 0, sizeof(cor_netdev_notify
));
491 cor_netdev_notify
.notifier_call
= cor_netdev_notify_func
;
496 void __exit
cor_dev_exit1(void)
498 flush_work(&cor_dev_waitexit_work
);
501 MODULE_LICENSE("GPL");