split dev_queue
[cor.git] / net / cor / dev.c
blobec67906327649db4a27c38ba89b32da42b3b99bd
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2023 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include "cor.h"
18 static struct notifier_block cor_netdev_notify;
19 __u8 cor_netdev_notify_registered;
21 __u8 cor_pack_registered;
23 static DEFINE_SPINLOCK(cor_devs_lock);
24 static LIST_HEAD(cor_devs);
25 static LIST_HEAD(cor_devs_waitexit);
27 static void cor_dev_queue_waitexit(struct work_struct *work);
28 DECLARE_WORK(cor_dev_waitexit_work, cor_dev_queue_waitexit);
31 #ifdef DEBUG_QOS_SLOWSEND
32 static DEFINE_SPINLOCK(slowsend_lock);
33 static unsigned long cor_last_send;
36 int _cor_dev_queue_xmit(struct sk_buff *skb, int caller)
38 int allowsend = 0;
39 unsigned long jiffies_tmp;
41 spin_lock_bh(&slowsend_lock);
42 jiffies_tmp = jiffies;
43 if (cor_last_send != jiffies_tmp) {
44 if (cor_last_send + 1 == jiffies_tmp)
45 cor_last_send = jiffies_tmp;
46 else
47 cor_last_send = jiffies_tmp - 1;
48 allowsend = 1;
50 spin_unlock_bh(&slowsend_lock);
52 /* printk(KERN_ERR "cor_dev_queue_xmit %d, %d\n", caller, allowsend); */
53 if (allowsend) {
54 return dev_queue_xmit(skb);
55 } else {
56 kfree_skb(skb);
57 return NET_XMIT_DROP;
60 #endif
62 /*__u64 get_bufspace_used(void);
64 static void print_conn_bufstats(struct cor_neighbor *nb)
66 / * not threadsafe, but this is only for debugging... * /
67 __u64 totalsize = 0;
68 __u64 read_remaining = 0;
69 __u32 numconns = 0;
70 struct list_head *lh;
71 unsigned long iflags;
73 spin_lock_irqsave(&nb->conns_waiting.lock, iflags);
75 lh = nb->conns_waiting.lh.next;
76 while (lh != &nb->conns_waiting.lh) {
77 struct cor_conn *cn = container_of(lh, struct cor_conn,
78 trgt.out.rb.lh);
79 totalsize += cn->data_buf.datasize;
80 read_remaining += cn->data_buf.read_remaining;
81 lh = lh->next;
84 lh = nb->conns_waiting.lh_nextpass.next;
85 while (lh != &nb->conns_waiting.lh_nextpass) {
86 struct cor_conn *cn = container_of(lh, struct cor_conn,
87 target.out.rb.lh);
88 totalsize += cn->data_buf.datasize;
89 read_remaining += cn->data_buf.read_remaining;
90 lh = lh->next;
93 numconns = nb->conns_waiting.cnt;
95 spin_unlock_irqrestore(&nb->conns_waiting.lock, iflags);
97 printk(KERN_ERR "conn %llu %llu %u\n", totalsize, read_remaining,
98 numconns);
99 } */
101 struct sk_buff *cor_create_packet(struct cor_neighbor *nb, int size,
102 gfp_t alloc_flags)
104 struct sk_buff *ret;
106 ret = alloc_skb(size + LL_RESERVED_SPACE(nb->dev) +
107 nb->dev->needed_tailroom, alloc_flags);
108 if (unlikely(ret == 0))
109 return 0;
111 ret->protocol = htons(ETH_P_COR);
112 ret->dev = nb->dev;
114 skb_reserve(ret, LL_RESERVED_SPACE(nb->dev));
115 if (unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac,
116 nb->dev->dev_addr, ret->len) < 0))
117 return 0;
118 skb_reset_network_header(ret);
120 return ret;
123 struct sk_buff *cor_create_packet_conndata(struct cor_neighbor *nb, int size,
124 gfp_t alloc_flags, __u32 conn_id, __u32 seqno,
125 __u8 windowused, __u8 flush)
127 struct sk_buff *ret;
128 char *dest;
130 ret = cor_create_packet(nb, size + 9, alloc_flags);
131 if (unlikely(ret == 0))
132 return 0;
134 dest = skb_put(ret, 9);
135 BUG_ON(dest == 0);
137 BUG_ON((windowused & (~PACKET_TYPE_CONNDATA_FLAGS_WINDOWUSED)) != 0);
139 dest[0] = PACKET_TYPE_CONNDATA |
140 (flush == 0 ? 0 : PACKET_TYPE_CONNDATA_FLAGS_FLUSH) |
141 windowused;
142 dest += 1;
144 cor_put_u32(dest, conn_id);
145 dest += 4;
146 cor_put_u32(dest, seqno);
147 dest += 4;
149 return ret;
153 static void cor_rcv_conndata(struct sk_buff *skb, __u8 windowused, __u8 flush)
155 struct cor_neighbor *nb = cor_get_neigh_by_mac(skb);
157 __u32 conn_id;
158 __u32 seqno;
160 char *connid_p;
161 char *seqno_p;
163 /* __u8 rand; */
165 if (unlikely(nb == 0))
166 goto drop;
168 connid_p = cor_pull_skb(skb, 4);
169 if (unlikely(connid_p == 0))
170 goto drop;
172 seqno_p = cor_pull_skb(skb, 4);
173 if (unlikely(seqno_p == 0))
174 goto drop;
176 conn_id = cor_parse_u32(connid_p);
177 seqno = cor_parse_u32(seqno_p);
179 /* get_random_bytes(&rand, 1);
180 if (rand < 64)
181 goto drop; */
183 if (unlikely(skb->len <= 0))
184 goto drop;
186 cor_conn_rcv(nb, skb, 0, 0, conn_id, seqno, windowused, flush);
188 if (0) {
189 drop:
190 kfree_skb(skb);
193 if (nb != 0) {
194 cor_nb_kref_put(nb, "stack");
198 static void cor_rcv_cmsg(struct sk_buff *skb, int ackneeded)
200 struct cor_neighbor *nb = cor_get_neigh_by_mac(skb);
202 if (unlikely(nb == 0)) {
203 kfree_skb(skb);
204 } else {
205 cor_kernel_packet(nb, skb, ackneeded);
206 cor_nb_kref_put(nb, "stack");
210 static int cor_rcv(struct sk_buff *skb, struct net_device *dev,
211 struct packet_type *pt, struct net_device *orig_dev)
213 __u8 packet_type;
214 char *packet_type_p;
216 if (skb->pkt_type == PACKET_OTHERHOST ||
217 unlikely(skb->pkt_type == PACKET_LOOPBACK))
218 goto drop;
220 packet_type_p = cor_pull_skb(skb, 1);
222 if (unlikely(packet_type_p == 0))
223 goto drop;
225 packet_type = *packet_type_p;
227 if (unlikely(packet_type == PACKET_TYPE_ANNOUNCE)) {
228 cor_rcv_announce(skb);
229 return NET_RX_SUCCESS;
230 } else if (packet_type == PACKET_TYPE_CMSG_NOACK) {
231 cor_rcv_cmsg(skb, ACK_NEEDED_NO);
232 return NET_RX_SUCCESS;
233 } else if (packet_type == PACKET_TYPE_CMSG_ACKSLOW) {
234 cor_rcv_cmsg(skb, ACK_NEEDED_SLOW);
235 return NET_RX_SUCCESS;
236 } else if (packet_type == PACKET_TYPE_CMSG_ACKFAST) {
237 cor_rcv_cmsg(skb, ACK_NEEDED_FAST);
238 return NET_RX_SUCCESS;
239 } else if (likely((packet_type & (~PACKET_TYPE_CONNDATA_FLAGS)) ==
240 PACKET_TYPE_CONNDATA)) {
241 __u8 flush = 0;
242 __u8 windowused;
244 if ((packet_type & PACKET_TYPE_CONNDATA_FLAGS_FLUSH) != 0)
245 flush = 1;
246 windowused = (packet_type &
247 PACKET_TYPE_CONNDATA_FLAGS_WINDOWUSED);
248 cor_rcv_conndata(skb, windowused, flush);
249 return NET_RX_SUCCESS;
250 } else {
251 kfree_skb(skb);
252 return NET_RX_SUCCESS;
255 drop:
256 kfree_skb(skb);
257 return NET_RX_DROP;
260 void cor_dev_free(struct kref *ref)
262 struct cor_dev *cd = container_of(ref, struct cor_dev, ref);
264 BUG_ON(cd->dev == 0);
265 dev_put(cd->dev);
266 cd->dev = 0;
268 kfree(cd);
271 static struct cor_dev *_cor_dev_get(struct net_device *dev)
273 struct list_head *curr = cor_devs.next;
275 while (curr != (&cor_devs)) {
276 struct cor_dev *cd = container_of(curr, struct cor_dev,
277 dev_list);
278 BUG_ON(cd->dev == 0);
279 if (cd->dev == dev) {
280 kref_get(&cd->ref);
281 return cd;
283 curr = curr->next;
285 return 0;
288 struct cor_dev *cor_dev_get(struct net_device *dev)
290 struct cor_dev *ret = 0;
292 spin_lock_bh(&cor_devs_lock);
293 ret = _cor_dev_get(dev);
294 spin_unlock_bh(&cor_devs_lock);
296 return ret;
299 static void cor_dev_queue_waitexit(struct work_struct *work)
301 spin_lock_bh(&cor_devs_lock);
302 while (!list_empty(&cor_devs_waitexit)) {
303 struct cor_dev *cd = container_of(cor_devs_waitexit.next,
304 struct cor_dev, dev_list);
305 list_del(&cd->dev_list);
307 spin_unlock_bh(&cor_devs_lock);
309 kthread_stop(cd->send_queue.qos_resume_thread);
310 put_task_struct(cd->send_queue.qos_resume_thread);
311 kref_put(&cd->ref, cor_dev_free);
313 spin_lock_bh(&cor_devs_lock);
315 spin_unlock_bh(&cor_devs_lock);
318 void cor_dev_destroy(struct net_device *dev)
320 int rc = 1;
322 while (1) {
323 struct cor_dev *cd;
325 spin_lock_bh(&cor_devs_lock);
327 if (dev == 0) {
328 if (list_empty(&cor_devs)) {
329 cd = 0;
330 } else {
331 cd = container_of(cor_devs.next, struct cor_dev,
332 dev_list);
334 } else {
335 cd = _cor_dev_get(dev);
338 if (cd == 0) {
339 spin_unlock_bh(&cor_devs_lock);
340 break;
343 list_del(&cd->dev_list);
344 kref_put(&cd->ref, cor_kreffree_bug);
346 spin_unlock_bh(&cor_devs_lock);
348 rc = 0;
350 cor_dev_queue_destroy(cd);
352 spin_lock_bh(&cor_devs_lock);
353 list_add(&cd->dev_list, &cor_devs_waitexit);
354 kref_get(&cd->ref);
355 spin_unlock_bh(&cor_devs_lock);
357 schedule_work(&cor_dev_waitexit_work);
359 kref_put(&cd->ref, cor_dev_free);
363 static int cor_dev_create(struct net_device *dev)
365 struct cor_dev *cd = kmalloc(sizeof(struct cor_dev), GFP_KERNEL);
367 BUG_ON(dev == 0);
369 if (cd == 0) {
370 printk(KERN_ERR "cor: unable to allocate memory for cor_dev, not enabling device\n");
371 return 1;
374 memset(cd, 0, sizeof(struct cor_dev));
376 kref_init(&cd->ref);
378 cd->dev = dev;
379 dev_hold(dev);
381 if (cor_dev_queue_init(cd) != 0) {
382 dev_put(cd->dev);
383 cd->dev = 0;
385 kfree(cd);
387 return 1;
390 spin_lock_bh(&cor_devs_lock);
391 list_add(&cd->dev_list, &cor_devs);
392 spin_unlock_bh(&cor_devs_lock);
394 return 0;
397 int cor_netdev_notify_func(struct notifier_block *not, unsigned long event,
398 void *ptr)
400 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
401 int rc;
403 BUG_ON(dev == 0);
405 switch (event) {
406 case NETDEV_UP:
407 if (dev->flags & IFF_LOOPBACK)
408 break;
410 rc = cor_dev_create(dev);
411 if (rc == 1)
412 return 1;
413 if (cor_is_clientmode() == 0)
414 cor_announce_send_start(dev, dev->broadcast,
415 ANNOUNCE_TYPE_BROADCAST);
416 break;
417 case NETDEV_DOWN:
418 printk(KERN_ERR "down 1\n");
419 udelay(100);
420 printk(KERN_ERR "down 2\n");
421 udelay(100);
422 cor_announce_send_stop(dev, 0, ANNOUNCE_TYPE_BROADCAST);
423 printk(KERN_ERR "down 3\n");
424 udelay(100);
425 cor_reset_neighbors(dev);
426 printk(KERN_ERR "down 4\n");
427 udelay(100);
428 cor_dev_destroy(dev);
429 printk(KERN_ERR "down 5\n");
430 udelay(100);
431 break;
432 case NETDEV_CHANGEMTU:
433 cor_resend_rcvmtu(dev);
434 break;
435 case NETDEV_REBOOT:
436 case NETDEV_CHANGE:
437 case NETDEV_REGISTER:
438 case NETDEV_UNREGISTER:
439 case NETDEV_CHANGEADDR:
440 case NETDEV_GOING_DOWN:
441 case NETDEV_CHANGENAME:
442 case NETDEV_FEAT_CHANGE:
443 case NETDEV_BONDING_FAILOVER:
444 break;
445 default:
446 return 1;
449 return 0;
452 static struct packet_type cor_ptype = {
453 .type = htons(ETH_P_COR),
454 .dev = 0,
455 .func = cor_rcv
458 void cor_dev_down(void)
460 if (cor_pack_registered != 0) {
461 cor_pack_registered = 0;
462 dev_remove_pack(&cor_ptype);
465 if (cor_netdev_notify_registered != 0) {
466 if (unregister_netdevice_notifier(&cor_netdev_notify) != 0) {
467 printk(KERN_WARNING "warning: cor_dev_down: unregister_netdevice_notifier failed\n");
468 BUG();
470 cor_netdev_notify_registered = 0;
474 int cor_dev_up(void)
476 BUG_ON(cor_netdev_notify_registered != 0);
477 if (register_netdevice_notifier(&cor_netdev_notify) != 0)
478 return 1;
479 cor_netdev_notify_registered = 1;
481 BUG_ON(cor_pack_registered != 0);
482 dev_add_pack(&cor_ptype);
483 cor_pack_registered = 1;
485 return 0;
488 int __init cor_dev_init(void)
490 memset(&cor_netdev_notify, 0, sizeof(cor_netdev_notify));
491 cor_netdev_notify.notifier_call = cor_netdev_notify_func;
493 return 0;
496 void __exit cor_dev_exit1(void)
498 flush_work(&cor_dev_waitexit_work);
501 MODULE_LICENSE("GPL");