use list instead of heap for queueing conn_data
[cor.git] / net / cor / neighbor.c
blob6610acf42693b6455fc4d890a8eb407e6399a3ab
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2019 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/delay.h>
22 #include "cor.h"
24 /**
25 * Broadcast data format:
26 * version [2]
27 * is 0, may be increased if the protocol changes
28 * min_version [2]
29 * is 0, must be increased if a future version of the protocol is incompatible
30 * to the current version
31 * [data]
33 * Data format of the announce packet "data" field:
34 *{command [2] commandlength [2] commanddata [commandlength]}[...]
37 /* Commands */
39 /* NEIGHCMD_VERSION: version[2] minversion[2] */
40 #define NEIGHCMD_VERSION 1
42 /* NEIGHCMD_ADDR: addrlen[2] addr[addrlen] */
43 #define NEIGHCMD_ADDR 2
46 struct neighbor_discdata{
47 struct list_head lh;
48 unsigned long jiffies_created;
50 __be32 sessionid;
52 struct net_device *dev;
53 char mac[MAX_ADDR_LEN];
55 __u8 nb_allocated;
57 __u8 rcvd_version;
58 __u8 rcvd_addr;
60 __u16 version;
61 __u16 minversion;
63 char *addr;
64 __u16 addrlen;
67 static atomic_t packets_in_workqueue = ATOMIC_INIT(0);
69 static DEFINE_MUTEX(announce_rcv_lock);
70 static DEFINE_SPINLOCK(announce_snd_lock);
71 static DEFINE_SPINLOCK(neighbor_list_lock);
73 static DEFINE_MUTEX(neigh_up_lock);
75 static DEFINE_SPINLOCK(local_addr_lock);
76 static char *local_addr;
77 static __u32 local_addrlen;
78 static __be32 local_addr_sessionid;
80 static LIST_HEAD(nb_dd_list); /* protected by announce_rcv_lock */
81 static __u32 num_nb_dd = 0;
82 static struct kmem_cache *nb_dd_slab;
84 static LIST_HEAD(nb_list);
85 static struct kmem_cache *nb_slab;
86 static atomic_t num_neighs;
88 static LIST_HEAD(announce_out_list);
90 static struct notifier_block netdev_notify;
91 __u8 netdev_notify_registered = 0;
94 void neighbor_free(struct kref *ref)
96 struct neighbor *nb = container_of(ref, struct neighbor, ref);
97 /* printk(KERN_ERR "neighbor free"); */
98 BUG_ON(nb->nb_list.next != LIST_POISON1);
99 BUG_ON(nb->nb_list.prev != LIST_POISON2);
100 if (nb->addr != 0)
101 kfree(nb->addr);
102 nb->addr = 0;
103 if (nb->dev != 0)
104 dev_put(nb->dev);
105 nb->dev = 0;
106 if (nb->queue != 0)
107 kref_put(&(nb->queue->ref), free_qos);
108 nb->queue = 0;
109 kmem_cache_free(nb_slab, nb);
110 atomic_dec(&num_neighs);
113 static void stall_timer(struct work_struct *work);
115 static struct neighbor *alloc_neighbor(gfp_t allocflags)
117 struct neighbor *nb;
118 __u64 seqno;
120 if (atomic_inc_return(&num_neighs) >= MAX_NEIGHBORS) {
121 atomic_dec(&num_neighs);
122 return 0;
125 nb = kmem_cache_alloc(nb_slab, allocflags);
126 if (unlikely(nb == 0))
127 return 0;
129 memset(nb, 0, sizeof(struct neighbor));
131 kref_init(&(nb->ref));
132 atomic_set(&(nb->sessionid_rcv_needed), 1);
133 atomic_set(&(nb->sessionid_snd_needed), 1);
134 timer_setup(&(nb->cmsg_timer), controlmsg_timerfunc, 0);
135 tasklet_init(&(nb->cmsg_task), controlmsg_taskfunc, (unsigned long) nb);
136 atomic_set(&(nb->cmsg_task_scheduled), 0);
137 atomic_set(&(nb->cmsg_timer_running), 0);
138 spin_lock_init(&(nb->cmsg_lock));
139 spin_lock_init(&(nb->send_cmsg_lock));
140 INIT_LIST_HEAD(&(nb->cmsg_queue_pong));
141 INIT_LIST_HEAD(&(nb->cmsg_queue_ack));
142 INIT_LIST_HEAD(&(nb->cmsg_queue_ackconn));
143 INIT_LIST_HEAD(&(nb->cmsg_queue_conndata));
144 INIT_LIST_HEAD(&(nb->cmsg_queue_other));
145 nb->last_ping_time = jiffies;
146 nb->cmsg_interval = 1000000;
147 atomic_set(&(nb->latency_retrans_us), PING_GUESSLATENCY_MS*1000);
148 atomic_set(&(nb->latency_advertised_us), PING_GUESSLATENCY_MS*1000);
149 atomic_set(&(nb->max_remote_ack_delay_us), 1000000);
150 atomic_set(&(nb->max_remote_ackconn_delay_us), 1000000);
151 atomic_set(&(nb->max_remote_other_delay_us), 1000000);
152 spin_lock_init(&(nb->state_lock));
153 INIT_DELAYED_WORK(&(nb->stalltimeout_timer), stall_timer);
154 spin_lock_init(&(nb->connid_lock));
155 spin_lock_init(&(nb->connid_reuse_lock));
156 INIT_LIST_HEAD(&(nb->connid_reuse_list));
157 spin_lock_init(&(nb->kp_retransmits_lock));
158 get_random_bytes((char *) &seqno, sizeof(seqno));
159 atomic64_set(&(nb->kpacket_seqno), seqno);
160 atomic64_set(&(nb->priority_sum), 0);
161 spin_lock_init(&(nb->conn_list_lock));
162 INIT_LIST_HEAD(&(nb->rcv_conn_list));
163 INIT_LIST_HEAD(&(nb->stalledconn_list));
164 spin_lock_init(&(nb->stalledconn_lock));
165 INIT_WORK(&(nb->stalledconn_work), resume_nbstalled_conns);
166 INIT_LIST_HEAD(&(nb->rcv_conn_list));
167 spin_lock_init(&(nb->retrans_lock));
168 INIT_LIST_HEAD(&(nb->retrans_list));
169 INIT_LIST_HEAD(&(nb->retrans_list_conn));
171 return nb;
174 int is_from_nb(struct sk_buff *skb, struct neighbor *nb)
176 int rc;
178 char source_hw[MAX_ADDR_LEN];
179 memset(source_hw, 0, MAX_ADDR_LEN);
180 if (skb->dev->header_ops != 0 &&
181 skb->dev->header_ops->parse != 0)
182 skb->dev->header_ops->parse(skb, source_hw);
184 rc = (skb->dev == nb->dev && memcmp(nb->mac, source_hw,
185 MAX_ADDR_LEN) == 0);
186 return rc;
189 static struct neighbor *_get_neigh_by_mac(struct net_device *dev,
190 char *source_hw)
192 struct list_head *currlh;
193 struct neighbor *ret = 0;
195 spin_lock_bh(&(neighbor_list_lock));
197 currlh = nb_list.next;
199 while (currlh != &nb_list) {
200 struct neighbor *curr = container_of(currlh, struct neighbor,
201 nb_list);
203 if (curr->dev == dev && memcmp(curr->mac, source_hw,
204 MAX_ADDR_LEN) == 0) {
205 ret = curr;
206 kref_get(&(ret->ref));
207 break;
210 currlh = currlh->next;
213 spin_unlock_bh(&(neighbor_list_lock));
215 return ret;
218 struct neighbor *get_neigh_by_mac(struct sk_buff *skb)
220 char source_hw[MAX_ADDR_LEN];
221 memset(source_hw, 0, MAX_ADDR_LEN);
222 if (skb->dev->header_ops != 0 &&
223 skb->dev->header_ops->parse != 0)
224 skb->dev->header_ops->parse(skb, source_hw);
226 return _get_neigh_by_mac(skb->dev, source_hw);
229 struct neighbor *find_neigh(char *addr, __u16 addrlen)
231 struct list_head *currlh;
232 struct neighbor *ret = 0;
234 if (addr == 0 || addrlen == 0)
235 return 0;
237 spin_lock_bh(&(neighbor_list_lock));
239 currlh = nb_list.next;
241 while (currlh != &nb_list) {
242 struct neighbor *curr = container_of(currlh, struct neighbor,
243 nb_list);
245 if (curr->addr != 0 && curr->addrlen != 0 &&
246 curr->addrlen == addrlen &&
247 memcmp(curr->addr, addr, addrlen) == 0) {
248 ret = curr;
249 kref_get(&(ret->ref));
251 goto out;
254 currlh = currlh->next;
257 out:
258 spin_unlock_bh(&(neighbor_list_lock));
260 return ret;
263 //TODO throughput field
264 __u32 generate_neigh_list(char *buf, __u32 buflen)
266 struct list_head *currlh;
268 __u32 cnt = 0;
270 __u32 buf_offset = 4;
272 int rc;
275 * The variable length header rowcount need to be generated after the
276 * data. This is done by reserving the maximum space they could take. If
277 * they end up being smaller, the data is moved so that there is no gap.
280 BUG_ON(buf == 0);
281 BUG_ON(buflen < buf_offset);
283 /* num_fields */
284 rc = encode_len(buf + buf_offset, buflen - buf_offset, 2);
285 BUG_ON(rc <= 0);
286 buf_offset += rc;
288 /* addr field */
289 BUG_ON(buflen < buf_offset + 2);
290 put_u16(buf + buf_offset, LIST_NEIGH_FIELD_ADDR);
291 buf_offset += 2;
293 rc = encode_len(buf + buf_offset, buflen - buf_offset, 0);
294 BUG_ON(rc <= 0);
295 buf_offset += rc;
297 /* latency field */
298 BUG_ON(buflen < buf_offset + 2);
299 put_u16(buf + buf_offset, LIST_NEIGH_FIELD_LATENCY);
300 buf_offset += 2;
302 rc = encode_len(buf + buf_offset, buflen - buf_offset, 1);
303 BUG_ON(rc <= 0);
304 buf_offset += rc;
306 spin_lock_bh(&(neighbor_list_lock));
308 currlh = nb_list.next;
310 while (currlh != &nb_list) {
311 struct neighbor *curr = container_of(currlh, struct neighbor,
312 nb_list);
313 int state;
315 state = get_neigh_state(curr);
317 if (state != NEIGHBOR_STATE_ACTIVE)
318 goto cont;
320 BUG_ON((curr->addr == 0) != (curr->addrlen == 0));
321 if (curr->addr == 0 || curr->addrlen == 0)
322 goto cont;
324 if (unlikely(buflen < buf_offset + 4 + curr->addrlen + 1))
325 break;
327 /* fieldlen */
328 rc = encode_len(buf + buf_offset, buflen - buf_offset,
329 curr->addrlen);
330 BUG_ON(rc <= 0);
331 buf_offset += rc;
333 BUG_ON(curr->addrlen > buflen - buf_offset);
334 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
335 buf_offset += curr->addrlen;
337 buf[buf_offset] = enc_log_64_11(atomic_read(
338 &(curr->latency_advertised_us)));
339 buf_offset += 1;
341 BUG_ON(buf_offset > buflen);
343 cnt++;
345 cont:
346 currlh = currlh->next;
349 spin_unlock_bh(&(neighbor_list_lock));
351 rc = encode_len(buf, 4, cnt);
352 BUG_ON(rc <= 0);
353 BUG_ON(rc > 4);
355 if (likely(rc < 4))
356 memmove(buf + ((__u32) rc), buf+4, buf_offset);
358 return buf_offset - 4 + ((__u32) rc);
361 static void reset_all_conns(struct neighbor *nb)
363 while (1) {
364 unsigned long iflags;
365 struct conn *src_in;
366 int rc;
368 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
370 if (list_empty(&(nb->rcv_conn_list))) {
371 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
372 break;
375 src_in = container_of(nb->rcv_conn_list.next, struct conn,
376 source.in.nb_list);
377 kref_get(&(src_in->ref));
379 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
381 if (src_in->is_client) {
382 spin_lock_bh(&(src_in->rcv_lock));
383 spin_lock_bh(&(src_in->reversedir->rcv_lock));
384 } else {
385 spin_lock_bh(&(src_in->reversedir->rcv_lock));
386 spin_lock_bh(&(src_in->rcv_lock));
389 if (unlikely(unlikely(src_in->sourcetype != SOURCE_IN) ||
390 unlikely(src_in->source.in.nb != nb))) {
391 rc = 1;
392 goto unlock;
395 rc = send_reset_conn(nb, src_in->reversedir->target.out.conn_id,
398 if (unlikely(rc != 0))
399 goto unlock;
401 if (src_in->reversedir->isreset == 0)
402 src_in->reversedir->isreset = 1;
404 unlock:
405 if (src_in->is_client) {
406 spin_unlock_bh(&(src_in->rcv_lock));
407 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
408 } else {
409 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
410 spin_unlock_bh(&(src_in->rcv_lock));
413 if (rc == 0) {
414 reset_conn(src_in);
415 kref_put(&(src_in->ref), free_conn);
416 } else {
417 kref_put(&(src_in->ref), free_conn);
418 kref_get(&(nb->ref));
419 schedule_delayed_work(&(nb->stalltimeout_timer), HZ);
420 break;
425 static void reset_neighbor(struct neighbor *nb)
427 int removenblist;
428 unsigned long iflags;
430 spin_lock_irqsave(&(nb->state_lock), iflags);
431 removenblist = (nb->state != NEIGHBOR_STATE_KILLED);
432 nb->state = NEIGHBOR_STATE_KILLED;
433 spin_unlock_irqrestore(&(nb->state_lock), iflags);
435 /* if (removenblist)
436 printk(KERN_ERR "reset_neighbor"); */
438 reset_all_conns(nb);
439 #warning todo empty cmsg queues
441 if (removenblist) {
442 spin_lock_bh(&neighbor_list_lock);
443 list_del(&(nb->nb_list));
444 spin_unlock_bh(&neighbor_list_lock);
446 kref_put(&(nb->ref), neighbor_free); /* nb_list */
450 #warning todo neighbor does not get resetted???
451 #warning todo reset neighbor if stuck in initial state
452 static void reset_neighbors(struct net_device *dev)
454 struct list_head *currlh;
456 restart:
457 spin_lock_bh(&neighbor_list_lock);
459 currlh = nb_list.next;
461 while (currlh != &nb_list) {
462 unsigned long iflags;
463 struct neighbor *currnb = container_of(currlh, struct neighbor,
464 nb_list);
465 __u8 state;
467 if (dev != 0 && currnb->dev != dev)
468 goto cont;
470 spin_lock_irqsave(&(currnb->state_lock), iflags);
471 state = currnb->state;
472 spin_unlock_irqrestore(&(currnb->state_lock), iflags);
474 if (state != NEIGHBOR_STATE_KILLED) {
475 spin_unlock_bh(&neighbor_list_lock);
476 reset_neighbor(currnb);
477 goto restart;
480 cont:
481 currlh = currlh->next;
484 spin_unlock_bh(&neighbor_list_lock);
487 static void stall_timer(struct work_struct *work)
489 struct neighbor *nb = container_of(to_delayed_work(work),
490 struct neighbor, stalltimeout_timer);
492 int stall_time_ms;
493 __u8 nbstate;
495 unsigned long iflags;
497 spin_lock_irqsave(&(nb->state_lock), iflags);
498 nbstate = nb->state;
499 if (unlikely(nbstate != NEIGHBOR_STATE_STALLED))
500 nb->str_timer_pending = 0;
502 spin_unlock_irqrestore(&(nb->state_lock), iflags);
504 if (unlikely(nbstate == NEIGHBOR_STATE_ACTIVE))
505 goto kref;
507 stall_time_ms = jiffies_to_msecs(jiffies -
508 nb->state_time.last_roundtrip);
510 if (nbstate == NEIGHBOR_STATE_STALLED &&
511 stall_time_ms < NB_KILL_TIME_MS) {
512 schedule_delayed_work(&(nb->stalltimeout_timer),
513 msecs_to_jiffies(NB_KILL_TIME_MS -
514 stall_time_ms));
515 return;
518 reset_neighbor(nb);
520 kref:
521 kref_put(&(nb->ref), neighbor_free); /* stall_timer */
524 int get_neigh_state(struct neighbor *nb)
526 int ret;
527 unsigned long iflags;
528 int stall_time_ms;
530 BUG_ON(nb == 0);
532 spin_lock_irqsave(&(nb->state_lock), iflags);
534 stall_time_ms = jiffies_to_msecs(jiffies -
535 nb->state_time.last_roundtrip);
537 if (likely(nb->state == NEIGHBOR_STATE_ACTIVE) &&
538 unlikely(stall_time_ms > NB_STALL_TIME_MS) && (
539 nb->ping_intransit >= NB_STALL_MINPINGS ||
540 nb->ping_intransit >= PING_COOKIES_PER_NEIGH)) {
541 nb->state = NEIGHBOR_STATE_STALLED;
542 nb->ping_success = 0;
543 if (nb->str_timer_pending == 0) {
544 nb->str_timer_pending = 1;
545 kref_get(&(nb->ref));
547 schedule_delayed_work(&(nb->stalltimeout_timer),
548 msecs_to_jiffies(NB_KILL_TIME_MS -
549 stall_time_ms));
552 /* printk(KERN_ERR "switched to stalled"); */
553 BUG_ON(nb->ping_intransit > PING_COOKIES_PER_NEIGH);
556 ret = nb->state;
558 spin_unlock_irqrestore(&(nb->state_lock), iflags);
560 return ret;
563 static struct ping_cookie *find_cookie(struct neighbor *nb, __u32 cookie)
565 int i;
567 for(i=0;i<PING_COOKIES_PER_NEIGH;i++) {
568 if (nb->cookies[i].cookie == cookie)
569 return &(nb->cookies[i]);
571 return 0;
574 static void reset_cookie(struct neighbor *nb, struct ping_cookie *c)
576 if (c->cookie == 0)
577 return;
579 if (nb->cookie_unsent != c->cookie)
580 nb->ping_intransit--;
582 c->cookie = 0;
585 static __u32 sqrt(__u64 x)
587 int i;
588 __u64 y = 65536;
590 if (unlikely(x <= 1))
591 return 0;
593 for (i=0;i<20;i++) {
594 y = y/2 + div64_u64(x/2, y);
595 if (unlikely(y == 0))
596 y = 1;
599 if (unlikely(y > U32_MAX))
600 y = U32_MAX;
602 return (__u32) y;
605 static __u32 calc_newlatency(struct neighbor *nb_statelocked,
606 __u32 oldlatency_us, __s64 newlatency_ns)
608 __s64 oldlatency = oldlatency_us * 1000L;
609 __s64 newlatency;
611 if (unlikely(unlikely(nb_statelocked->state == NEIGHBOR_STATE_INITIAL)&&
612 nb_statelocked->ping_success < 16))
613 newlatency = div64_s64(
614 oldlatency * nb_statelocked->ping_success +
615 newlatency_ns,
616 nb_statelocked->ping_success + 1);
617 else
618 newlatency = (oldlatency * 15 + newlatency_ns) / 16;
620 newlatency = div_s64(newlatency + 500, 1000);
622 if (unlikely(newlatency < 0))
623 newlatency = 0;
624 if (unlikely(newlatency > U32_MAX))
625 newlatency = U32_MAX;
627 return (__u32) newlatency;
630 static void update_nb_latency(struct neighbor *nb_statelocked,
631 struct ping_cookie *c, __u32 respdelay)
633 ktime_t now = ktime_get();
635 __s64 pinglatency_retrans_ns = ktime_to_ns(now) -
636 ktime_to_ns(c->time_sent) - respdelay * 1000LL;
637 __s64 pinglatency_advertised_ns = ktime_to_ns(now) -
638 ktime_to_ns(c->time_created) - respdelay * 1000LL;
640 __u32 oldlatency_retrans_us =
641 atomic_read(&(nb_statelocked->latency_retrans_us));
643 __u32 newlatency_retrans_us = calc_newlatency(nb_statelocked,
644 oldlatency_retrans_us, pinglatency_retrans_ns);
646 atomic_set(&(nb_statelocked->latency_retrans_us),
647 newlatency_retrans_us);
649 if (unlikely(unlikely(nb_statelocked->state == NEIGHBOR_STATE_INITIAL)&&
650 nb_statelocked->ping_success < 16)) {
651 nb_statelocked->latency_variance_retrans_us =
652 ((__u64) newlatency_retrans_us) *
653 newlatency_retrans_us;
654 atomic_set(&(nb_statelocked->latency_stddev_retrans_us), sqrt(
655 nb_statelocked->latency_variance_retrans_us));
656 } else if (pinglatency_retrans_ns > oldlatency_retrans_us *
657 ((__s64) 1000)) {
658 __s64 newdiff = div_s64(pinglatency_retrans_ns -
659 oldlatency_retrans_us * ((__s64) 1000), 1000);
660 __u32 newdiff32 = (__u32) (unlikely(newdiff >= U32_MAX) ?
661 U32_MAX : newdiff);
662 __u64 newvar = ((__u64) newdiff32) * newdiff32;
664 __u64 oldval = nb_statelocked->latency_variance_retrans_us;
666 if (unlikely(unlikely(newvar > (1LL << 55)) || unlikely(
667 oldval > (1LL << 55)))) {
668 nb_statelocked->latency_variance_retrans_us =
669 (oldval / 16) * 15 + newvar/16;
670 } else {
671 nb_statelocked->latency_variance_retrans_us =
672 (oldval * 15 + newvar) / 16;
675 atomic_set(&(nb_statelocked->latency_stddev_retrans_us), sqrt(
676 nb_statelocked->latency_variance_retrans_us));
679 atomic_set(&(nb_statelocked->latency_advertised_us),
680 calc_newlatency(nb_statelocked,
681 atomic_read(&(nb_statelocked->latency_advertised_us)),
682 pinglatency_advertised_ns));
684 nb_statelocked->last_roundtrip_end = now;
687 void ping_resp(struct neighbor *nb, __u32 cookie, __u32 respdelay)
689 unsigned long iflags;
691 struct ping_cookie *c;
692 int i;
693 int stalledresume = 0;
695 int call_connidreuse = 0;
697 if (unlikely(cookie == 0))
698 return;
700 spin_lock_irqsave(&(nb->state_lock), iflags);
702 c = find_cookie(nb, cookie);
704 if (unlikely(c == 0))
705 goto out;
707 atomic_set(&(nb->sessionid_snd_needed), 0);
709 call_connidreuse = ktime_before_eq(nb->last_roundtrip_end,
710 c->time_created);
712 update_nb_latency(nb, c, respdelay);
714 nb->ping_success++;
716 reset_cookie(nb, c);
718 for(i=0;i<PING_COOKIES_PER_NEIGH;i++) {
719 if (nb->cookies[i].cookie != 0 && ktime_before(
720 nb->cookies[i].time_created, c->time_created)) {
721 nb->cookies[i].pongs++;
722 if (nb->cookies[i].pongs >= PING_PONGLIMIT) {
723 reset_cookie(nb, &(nb->cookies[i]));
728 if (unlikely(nb->state == NEIGHBOR_STATE_INITIAL ||
729 nb->state == NEIGHBOR_STATE_STALLED)) {
730 call_connidreuse = 0;
732 if (nb->state == NEIGHBOR_STATE_INITIAL) {
733 __u64 jiffies64 = get_jiffies_64();
734 if (nb->state_time.last_state_change == 0)
735 nb->state_time.last_state_change = jiffies64;
736 if (jiffies64 <= (nb->state_time.last_state_change +
737 msecs_to_jiffies(INITIAL_TIME_MS)))
738 goto out;
741 if ((nb->state == NEIGHBOR_STATE_INITIAL &&
742 nb->ping_success >= PING_SUCCESS_CNT_INIT) || (
743 nb->state == NEIGHBOR_STATE_STALLED &&
744 nb->ping_success >= PING_SUCCESS_CNT_STALLED)) {
745 stalledresume =
746 (nb->state == NEIGHBOR_STATE_STALLED);
747 nb->state = NEIGHBOR_STATE_ACTIVE;
748 /* printk(KERN_ERR "changed to active"); */
752 if (likely(nb->state == NEIGHBOR_STATE_ACTIVE) ||
753 nb->state == NEIGHBOR_STATE_STALLED)
754 nb->state_time.last_roundtrip = c->jiffies_sent;
756 out:
757 spin_unlock_irqrestore(&(nb->state_lock), iflags);
759 if (call_connidreuse)
760 connid_used_pingsuccess(nb);
762 if (unlikely(stalledresume)) {
763 spin_lock_bh(&(nb->retrans_lock));
764 reschedule_conn_retrans_timer(nb);
765 spin_unlock_bh(&(nb->retrans_lock));
767 spin_lock_bh(&(nb->stalledconn_lock));
768 if (nb->stalledconn_work_scheduled == 0) {
769 kref_get(&(nb->ref)),
770 schedule_work(&(nb->stalledconn_work));
771 nb->stalledconn_work_scheduled = 1;
773 spin_unlock_bh(&(nb->stalledconn_lock));
777 __u32 add_ping_req(struct neighbor *nb, unsigned long *last_ping_time,
778 ktime_t now)
780 unsigned long iflags;
781 struct ping_cookie *c;
782 __u32 i;
784 __u32 cookie;
786 spin_lock_irqsave(&(nb->state_lock), iflags);
788 if (nb->cookie_unsent != 0) {
789 c = find_cookie(nb, nb->cookie_unsent);
790 if (c != 0)
791 goto unsent;
792 c = 0;
793 nb->cookie_unsent = 0;
796 c = find_cookie(nb, 0);
797 if (c != 0)
798 goto found;
800 get_random_bytes((char *) &i, sizeof(i));
801 i = (i % PING_COOKIES_PER_NEIGH);
802 c = &(nb->cookies[i]);
803 reset_cookie(nb, c);
805 found:
806 nb->lastcookie++;
807 if (unlikely(nb->lastcookie == 0))
808 nb->lastcookie++;
809 c->cookie = nb->lastcookie;
810 c->time_created = now;
812 unsent:
813 c->pongs = 0;
814 c->time_sent = now;
815 c->jiffies_sent = jiffies;
816 cookie = c->cookie;
818 nb->ping_intransit++;
820 *last_ping_time = nb->last_ping_time;
821 nb->last_ping_time = c->jiffies_sent;
823 spin_unlock_irqrestore(&(nb->state_lock), iflags);
825 BUG_ON(cookie == 0);
827 return cookie;
830 void ping_sent(struct neighbor *nb, __u32 cookie)
832 unsigned long iflags;
834 BUG_ON(cookie == 0);
836 spin_lock_irqsave(&(nb->state_lock), iflags);
838 if (nb->cookie_unsent == cookie)
839 nb->cookie_unsent = 0;
841 spin_unlock_irqrestore(&(nb->state_lock), iflags);
844 void unadd_ping_req(struct neighbor *nb, __u32 cookie,
845 unsigned long last_ping_time, int congested)
847 unsigned long iflags;
849 struct ping_cookie *c;
851 BUG_ON(cookie == 0);
853 spin_lock_irqsave(&(nb->state_lock), iflags);
855 if (congested) {
856 BUG_ON(nb->cookie_unsent != 0 && nb->cookie_unsent != cookie);
857 nb->cookie_unsent = cookie;
860 c = find_cookie(nb, cookie);
861 if (likely(c != 0)) {
862 if (congested == 0)
863 c->cookie = 0;
864 nb->ping_intransit--;
867 nb->last_ping_time = last_ping_time;
869 spin_unlock_irqrestore(&(nb->state_lock), iflags);
872 static int get_ping_forcetime(struct neighbor *nb)
874 unsigned long iflags;
875 int fast;
876 int idle;
878 if (unlikely(get_neigh_state(nb) != NEIGHBOR_STATE_ACTIVE))
879 return PING_FORCETIME_MS;
881 spin_lock_irqsave(&(nb->state_lock), iflags);
882 fast = ((nb->ping_success < PING_ACTIVE_FASTINITIAL_COUNT) ||
883 (nb->ping_intransit > 0));
884 spin_unlock_irqrestore(&(nb->state_lock), iflags);
886 if (fast)
887 return PING_FORCETIME_ACTIVE_FAST_MS;
889 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
890 idle = list_empty(&(nb->rcv_conn_list));
891 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
893 if (idle)
894 return PING_FORCETIME_ACTIVEIDLE_MS;
895 else
896 return PING_FORCETIME_ACTIVE_MS;
899 static __u32 get_ping_mindelay(struct neighbor *nb_statelocked)
901 __u32 latency_us = ((__u32) atomic_read(
902 &(nb_statelocked->latency_advertised_us)));
903 __u32 max_remote_other_delay_us = ((__u32) atomic_read(
904 &(nb_statelocked->max_remote_other_delay_us)));
905 __u32 mindelay_ms;
907 #warning todo millisecs???
908 if (latency_us < PING_GUESSLATENCY_MS * 1000)
909 latency_us = PING_GUESSLATENCY_MS * 1000;
911 if (unlikely(nb_statelocked->state != NEIGHBOR_STATE_ACTIVE))
912 mindelay_ms = latency_us/1000;
913 else
914 mindelay_ms = ((latency_us/2 +
915 max_remote_other_delay_us/2)/500);
917 if (likely(nb_statelocked->ping_intransit < PING_COOKIES_THROTTLESTART))
918 return mindelay_ms;
920 mindelay_ms = mindelay_ms * (1 + 9 * (nb_statelocked->ping_intransit *
921 nb_statelocked->ping_intransit /
922 (PING_COOKIES_PER_NEIGH * PING_COOKIES_PER_NEIGH)));
924 return mindelay_ms;
928 * Check whether we want to send a ping now:
929 * 0... Do not send ping.
930 * 1... Send ping now, but only if it can be merged with other messages. This
931 * can happen way before the time requested by get_next_ping_time().
932 * 2... Send ping now, even if a packet has to be created just for the ping
933 * alone.
935 int time_to_send_ping(struct neighbor *nb)
937 unsigned long iflags;
938 int rc = TIMETOSENDPING_YES;
940 __u32 ms_since_last_ping;
942 __u32 forcetime = get_ping_forcetime(nb);
943 __u32 mindelay;
945 spin_lock_irqsave(&(nb->state_lock), iflags);
947 ms_since_last_ping = jiffies_to_msecs(jiffies - nb->last_ping_time);
949 mindelay = get_ping_mindelay(nb);
951 if (forcetime < (mindelay * 3))
952 forcetime = mindelay * 3;
953 else if (forcetime > (mindelay * 3))
954 mindelay = forcetime/3;
956 if (ms_since_last_ping < mindelay || ms_since_last_ping < (forcetime/4))
957 rc = TIMETOSENDPING_NO;
958 else if (ms_since_last_ping >= forcetime)
959 rc = TIMETOSENDPING_FORCE;
961 spin_unlock_irqrestore(&(nb->state_lock), iflags);
963 return rc;
966 unsigned long get_next_ping_time(struct neighbor *nb)
968 unsigned long iflags;
970 __u32 forcetime = get_ping_forcetime(nb);
971 __u32 mindelay;
973 spin_lock_irqsave(&(nb->state_lock), iflags);
974 mindelay = get_ping_mindelay(nb);
975 spin_unlock_irqrestore(&(nb->state_lock), iflags);
977 if (forcetime < (mindelay * 3))
978 forcetime = mindelay * 3;
980 return nb->last_ping_time + msecs_to_jiffies(forcetime);
983 static void add_neighbor(struct neighbor_discdata *nb_dd)
985 struct neighbor *nb;
986 struct list_head *currlh;
988 nb = alloc_neighbor(GFP_KERNEL);
989 if (unlikely(nb == 0))
990 return;
992 nb->queue = get_queue(nb_dd->dev);
993 if (nb->queue == 0) {
994 kmem_cache_free(nb_slab, nb);
995 atomic_dec(&num_neighs);
996 return;
999 dev_hold(nb_dd->dev);
1000 nb->dev = nb_dd->dev;
1002 memcpy(nb->mac, nb_dd->mac, MAX_ADDR_LEN);
1004 nb->addr = nb_dd->addr;
1005 nb->addrlen = nb_dd->addrlen;
1007 nb_dd->nb_allocated = 1;
1008 nb_dd->addr = 0;
1009 nb_dd->addrlen = 0;
1011 spin_lock_bh(&neighbor_list_lock);
1013 currlh = nb_list.next;
1015 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
1017 if (is_clientmode() && (nb->addr == 0|| nb->addrlen == 0))
1018 goto already_present;
1020 while (currlh != &nb_list) {
1021 struct neighbor *curr = container_of(currlh, struct neighbor,
1022 nb_list);
1024 BUG_ON((curr->addr == 0) != (curr->addrlen == 0));
1026 if (curr->dev == nb->dev &&
1027 memcmp(curr->mac, nb->mac, MAX_ADDR_LEN) == 0)
1028 goto already_present;
1030 if (curr->addr != 0 && curr->addrlen != 0 &&
1031 nb->addr != 0 && nb->addrlen != 0 &&
1032 curr->addrlen == nb->addrlen &&
1033 memcmp(curr->addr, nb->addr, curr->addrlen) ==0)
1034 goto already_present;
1036 currlh = currlh->next;
1039 /* printk(KERN_ERR "add_neigh"); */
1041 spin_lock_bh(&(local_addr_lock));
1042 nb->sessionid = local_addr_sessionid ^ nb_dd->sessionid;
1043 spin_unlock_bh(&(local_addr_lock));
1045 timer_setup(&(nb->retrans_timer), retransmit_timerfunc, 0);
1046 tasklet_init(&(nb->retrans_task), retransmit_taskfunc,
1047 (unsigned long) nb);
1049 timer_setup(&(nb->retrans_timer_conn), retransmit_conn_timerfunc, 0);
1050 tasklet_init(&(nb->retrans_task_conn), retransmit_conn_taskfunc,
1051 (unsigned long) nb);
1053 spin_lock_bh(&(nb->cmsg_lock));
1054 nb->last_ping_time = jiffies;
1055 nb->cmsg_interval = 1000000;
1056 schedule_controlmsg_timer(nb);
1057 spin_unlock_bh(&(nb->cmsg_lock));
1059 list_add_tail(&(nb->nb_list), &nb_list);
1061 if (0) {
1062 already_present:
1063 kmem_cache_free(nb_slab, nb);
1064 atomic_dec(&num_neighs);
1067 spin_unlock_bh(&neighbor_list_lock);
1070 static int parse_announce_version(struct neighbor_discdata *nb_dd,
1071 __u16 cmd, char *cmddata, __u16 len)
1073 __u16 version;
1074 __u16 minversion;
1076 if (unlikely(len < 4))
1077 return 1;
1079 version = parse_u16(cmddata);
1080 cmddata += 2;
1081 len -= 2;
1082 minversion = parse_u16(cmddata);
1083 cmddata += 2;
1084 len -= 2;
1086 if (minversion != 0)
1087 return 1;
1089 if (nb_dd->rcvd_version != 0) {
1090 if (nb_dd->version != version ||
1091 nb_dd->minversion != minversion)
1092 return 1;
1093 } else {
1094 nb_dd->version = version;
1095 nb_dd->minversion = minversion;
1096 nb_dd->rcvd_version = 1;
1099 return 0;
1102 static int parse_announce_addaddr(struct neighbor_discdata *nb_dd,
1103 __u16 cmd, char *cmddata, __u16 len)
1105 __u16 addrlen;
1106 char *addr;
1108 BUG_ON(cmd != NEIGHCMD_ADDR);
1109 BUG_ON((nb_dd->addr == 0) != (nb_dd->addrlen == 0));
1110 BUG_ON(nb_dd->rcvd_addr == 0 && nb_dd->addr != 0);
1112 if (len < 2)
1113 return 1;
1115 addrlen = parse_u16(cmddata);
1116 cmddata += 2;
1117 len -= 2;
1119 if (len < addrlen)
1120 return 1;
1122 addr = cmddata;
1123 cmddata += addrlen;
1124 len -= addrlen;
1126 if (nb_dd->rcvd_addr != 0) {
1127 if (nb_dd->addrlen != addrlen)
1128 return 1;
1129 if (addrlen != 0 && memcmp(nb_dd->addr, addr, addrlen) != 0)
1130 return 1;
1131 } else {
1132 if (addrlen != 0) {
1133 nb_dd->addr = kmalloc(addrlen, GFP_KERNEL);
1134 if (unlikely(nb_dd->addr == 0))
1135 return 1;
1137 memcpy(nb_dd->addr, addr, addrlen);
1139 nb_dd->addrlen = addrlen;
1141 nb_dd->rcvd_addr = 1;
1144 return 0;
1147 static int parse_announce_cmd(struct neighbor_discdata *nb_dd,
1148 __u16 cmd, char *cmddata, __u16 len)
1150 if (cmd == NEIGHCMD_VERSION) {
1151 return parse_announce_version(nb_dd, cmd, cmddata, len);
1152 } else if (cmd == NEIGHCMD_ADDR) {
1153 return parse_announce_addaddr(nb_dd, cmd, cmddata, len);
1154 } else {
1155 return 1;
1159 static int parse_announce_cmds(struct neighbor_discdata *nb_dd,
1160 char *msg, __u32 len)
1162 __u32 zeros = 0;
1163 while (zeros < len) {
1164 if (msg[len-zeros-1] != 0)
1165 break;
1166 zeros++;
1169 while (len >= 4 && len > zeros) {
1170 __u16 cmd;
1171 __u16 cmdlen;
1173 cmd = parse_u16(msg);
1174 msg += 2;
1175 len -= 2;
1176 cmdlen = parse_u16(msg);
1177 msg += 2;
1178 len -= 2;
1180 if (cmdlen > len)
1181 return 1;
1183 if (parse_announce_cmd(nb_dd, cmd, msg, cmdlen) != 0)
1184 return 1;
1186 msg += cmdlen;
1187 len -= cmdlen;
1190 if (len != 0 && len < zeros)
1191 return 1;
1193 return 0;
1196 static void neighbor_discdata_free(struct neighbor_discdata *nb_dd)
1198 list_del(&(nb_dd->lh));
1200 BUG_ON(nb_dd->dev == 0);
1201 dev_put(nb_dd->dev);
1203 if (nb_dd->addr != 0) {
1204 kfree(nb_dd->addr);
1205 nb_dd->addr = 0;
1206 nb_dd->addrlen = 0;
1209 kmem_cache_free(nb_dd_slab, nb_dd);
1211 BUG_ON(num_nb_dd == 0);
1212 num_nb_dd--;
1215 static void announce_send_start(struct net_device *dev, char *mac, int type);
1217 static struct neighbor_discdata *findoralloc_neighbor_discdata(
1218 struct net_device *dev, char *source_hw, __be32 sessionid)
1220 unsigned long jiffies_tmp = jiffies;
1221 struct list_head *currlh;
1223 __u32 neighs;
1224 struct neighbor_discdata *nb_dd;
1226 currlh = nb_dd_list.next;
1227 while (currlh != &nb_dd_list) {
1228 struct neighbor_discdata *curr = container_of(currlh,
1229 struct neighbor_discdata, lh);
1231 currlh = currlh->next;
1233 if (time_after(jiffies_tmp, curr->jiffies_created +
1234 HZ * NEIGHBOR_DISCOVERY_TIMEOUT_SEC)) {
1235 neighbor_discdata_free(curr);
1236 continue;
1239 if (curr->sessionid == sessionid && curr->dev == dev &&
1240 memcmp(curr->mac, source_hw, MAX_ADDR_LEN) == 0)
1241 return curr;
1244 neighs = atomic_read(&num_neighs);
1245 if (neighs + num_nb_dd < neighs || neighs + num_nb_dd >=MAX_NEIGHBORS)
1246 return 0;
1247 num_nb_dd++;
1249 nb_dd = kmem_cache_alloc(nb_dd_slab, GFP_KERNEL);
1250 if (unlikely(nb_dd == 0))
1251 return 0;
1253 memset(nb_dd, 0, sizeof(struct neighbor_discdata));
1255 nb_dd->sessionid = sessionid;
1257 dev_hold(dev);
1258 nb_dd->dev = dev;
1260 memcpy(nb_dd->mac, source_hw, MAX_ADDR_LEN);
1262 list_add_tail(&(nb_dd->lh), &nb_dd_list);
1263 nb_dd->jiffies_created = jiffies_tmp;
1265 if (is_clientmode())
1266 announce_send_start(dev, source_hw, ANNOUNCE_TYPE_UNICAST);
1268 return nb_dd;
1271 static void parse_announce(struct net_device *dev, char *source_hw,
1272 char *msg, __u32 len)
1274 __be32 sessionid;
1275 struct neighbor_discdata *nb_dd;
1278 if (unlikely(len < 4))
1279 return;
1281 sessionid = parse_be32(msg);
1282 msg += 4;
1283 len -= 4;
1285 nb_dd = findoralloc_neighbor_discdata(dev, source_hw, sessionid);
1286 if (unlikely(nb_dd == 0))
1287 return;
1289 if (parse_announce_cmds(nb_dd, msg, len) != 0)
1290 goto error;
1292 if (nb_dd->rcvd_version != 0 && nb_dd->rcvd_addr != 0) {
1293 add_neighbor(nb_dd);
1295 error:
1296 neighbor_discdata_free(nb_dd);
1300 static void _rcv_announce(struct work_struct *work)
1302 struct skb_procstate *ps = container_of(work,
1303 struct skb_procstate, funcstate.announce1.work);
1304 struct sk_buff *skb = skb_from_pstate(ps);
1306 char source_hw[MAX_ADDR_LEN];
1308 struct neighbor *nb;
1310 char *msg;
1311 __u16 len;
1313 memset(source_hw, 0, MAX_ADDR_LEN);
1314 if (skb->dev->header_ops != 0 &&
1315 skb->dev->header_ops->parse != 0)
1316 skb->dev->header_ops->parse(skb, source_hw);
1318 nb = _get_neigh_by_mac(skb->dev, source_hw);
1319 if (nb != 0) {
1320 kref_put(&(nb->ref), neighbor_free);
1321 nb = 0;
1322 goto discard;
1325 if (unlikely(skb->len > 65535 || skb->len < 0))
1326 goto discard;
1327 len = (__u16) skb->len;
1329 msg = cor_pull_skb(skb, len);
1330 if (msg == 0)
1331 goto discard;
1333 mutex_lock(&(announce_rcv_lock));
1334 parse_announce(skb->dev, source_hw, msg, len);
1335 mutex_unlock(&(announce_rcv_lock));
1337 discard:
1338 kfree_skb(skb);
1340 atomic_dec(&packets_in_workqueue);
1343 int rcv_announce(struct sk_buff *skb)
1345 struct skb_procstate *ps = skb_pstate(skb);
1346 long queuelen;
1348 queuelen = atomic_inc_return(&packets_in_workqueue);
1350 BUG_ON(queuelen <= 0);
1352 if (queuelen > MAX_PACKETS_IN_RCVQUEUE) {
1353 atomic_dec(&packets_in_workqueue);
1354 kfree_skb(skb);
1355 return NET_RX_SUCCESS;
1358 INIT_WORK(&(ps->funcstate.announce1.work), _rcv_announce);
1359 schedule_work(&(ps->funcstate.announce1.work));
1360 return NET_RX_SUCCESS;
1363 static int __send_announce(struct announce_data *ann)
1365 __u32 len;
1366 __u32 offset = 0;
1368 __u32 local_addrlen_tmp;
1370 char *msg = 0;
1372 struct sk_buff *skb;
1373 __u32 headroom;
1375 headroom = LL_RESERVED_SPACE(ann->dev) +
1376 ann->dev->needed_tailroom;
1378 spin_lock_bh(&(local_addr_lock));
1380 retry:
1381 BUG_ON(local_addrlen > 64);
1383 local_addrlen_tmp = local_addrlen;
1385 spin_unlock_bh(&(local_addr_lock));
1387 len = 1 + 4 + 8 + 6 + local_addrlen_tmp;
1389 BUG_ON(len > 1024);
1391 skb = alloc_skb(headroom + len, GFP_ATOMIC);
1392 if (unlikely(skb == 0))
1393 return 0;
1395 skb->protocol = htons(ETH_P_COR);
1396 skb->dev = ann->dev;
1397 skb_reserve(skb, headroom);
1399 #warning net_device locking? (other places too)
1400 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
1401 ann->mac, ann->dev->dev_addr, skb->len) < 0))
1402 goto out_err;
1404 skb_reset_network_header(skb);
1406 msg = skb_put(skb, len);
1407 if (unlikely(msg == 0))
1408 goto out_err;
1410 spin_lock_bh(&(local_addr_lock));
1412 if (unlikely(local_addrlen != local_addrlen_tmp)) {
1413 kfree_skb(skb);
1414 skb = 0;
1415 msg = 0;
1416 goto retry;
1419 msg[0] = PACKET_TYPE_ANNOUNCE;
1420 offset++;
1422 put_be32(msg + offset, local_addr_sessionid); /* sessionid */
1423 offset += 4;
1425 put_u16(msg + offset, NEIGHCMD_VERSION); /* command */
1426 offset += 2;
1427 put_u16(msg + offset, 4); /* command length */
1428 offset += 2;
1429 put_u16(msg + offset, 0); /* version */
1430 offset += 2;
1431 put_u16(msg + offset, 0); /* minversion */
1432 offset += 2;
1434 put_u16(msg + offset, NEIGHCMD_ADDR); /* command */
1435 offset += 2;
1436 put_u16(msg + offset, 2 + local_addrlen); /* command length*/
1437 offset += 2;
1438 put_u16(msg + offset, local_addrlen); /* addrlen */
1439 offset += 2;
1440 if (local_addrlen != 0) {
1441 memcpy(msg + offset, local_addr, local_addrlen); /* addr */
1442 offset += local_addrlen;
1445 spin_unlock_bh(&(local_addr_lock));
1447 BUG_ON(offset != len);
1449 return cor_dev_queue_xmit(skb, QOS_CALLER_ANNOUNCE);
1451 if (0) {
1452 out_err:
1453 kfree_skb(skb);
1454 return 0;
1458 void announce_data_free(struct kref *ref)
1460 struct announce_data *ann = container_of(ref, struct announce_data,
1461 ref);
1462 kfree(ann);
1465 int _send_announce(struct announce_data *ann, int fromqos)
1467 int reschedule = 0;
1468 int rc = 0;
1470 spin_lock_bh(&(announce_snd_lock));
1472 if (unlikely(ann->dev == 0))
1473 goto out;
1475 reschedule = 1;
1477 #warning todo reactivate may_send_announce + set rc
1478 /* if (fromqos == 0 && may_send_announce(ann->dev) == 0)
1479 rc = 1;
1480 else */
1481 /*rc = */__send_announce(ann);
1483 if (rc == 0 && ann->type != ANNOUNCE_TYPE_BROADCAST) {
1484 ann->sndcnt++;
1485 reschedule = (ann->sndcnt < ANNOUNCE_SEND_UNICAST_MAXCNT ?
1486 1 : 0);
1488 if (reschedule == 0) {
1489 dev_put(ann->dev);
1490 ann->dev = 0;
1492 list_del(&(ann->lh));
1493 kref_put(&(ann->ref), kreffree_bug);
1497 out:
1498 spin_unlock_bh(&(announce_snd_lock));
1500 if (unlikely(reschedule == 0)) {
1501 } else if (rc != 0) {
1502 if (fromqos == 0) {
1503 struct qos_queue *q = get_queue(ann->dev);
1504 if (q != 0) {
1505 qos_enqueue(q, &(ann->rb), QOS_CALLER_ANNOUNCE);
1506 kref_put(&(q->ref), free_qos);
1509 } else {
1510 kref_get(&(ann->ref));
1511 schedule_delayed_work(&(ann->announce_work), msecs_to_jiffies(
1512 ANNOUNCE_SEND_PACKETINTELVAL_MS));
1515 if (fromqos == 0)
1516 kref_put(&(ann->ref), announce_data_free);
1518 return rc == 0 ? QOS_RESUME_DONE : QOS_RESUME_CONG_NOPROGRESS;
1521 static void send_announce(struct work_struct *work)
1523 struct announce_data *ann = container_of(to_delayed_work(work),
1524 struct announce_data, announce_work);
1525 _send_announce(ann, 0);
1528 static void announce_send_start(struct net_device *dev, char *mac, int type)
1530 struct announce_data *ann;
1532 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
1534 if (unlikely(ann == 0)) {
1535 printk(KERN_ERR "cor cannot allocate memory for sending "
1536 "announces");
1537 return;
1540 memset(ann, 0, sizeof(struct announce_data));
1542 kref_init(&(ann->ref));
1544 dev_hold(dev);
1545 ann->dev = dev;
1546 memcpy(ann->mac, mac, MAX_ADDR_LEN);
1547 ann->type = type;
1549 spin_lock_bh(&(announce_snd_lock));
1550 list_add_tail(&(ann->lh), &announce_out_list);
1551 spin_unlock_bh(&(announce_snd_lock));
1553 INIT_DELAYED_WORK(&(ann->announce_work), send_announce);
1554 kref_get(&(ann->ref));
1555 schedule_delayed_work(&(ann->announce_work), 1);
1558 void announce_send_stop(struct net_device *dev, char *mac, int type)
1560 struct list_head *lh = announce_out_list.next;
1562 spin_lock_bh(&(announce_snd_lock));
1564 while (lh != &announce_out_list) {
1565 struct announce_data *ann = container_of(lh,
1566 struct announce_data, lh);
1568 lh = lh->next;
1571 if (dev != 0 && (ann->dev != dev || (
1572 type != ANNOUNCE_TYPE_BROADCAST && (
1573 ann->type != type ||
1574 memcmp(ann->mac, mac, MAX_ADDR_LEN) != 0))))
1575 continue;
1577 dev_put(ann->dev);
1578 ann->dev = 0;
1580 list_del(&(ann->lh));
1581 kref_put(&(ann->ref), kreffree_bug);
1584 spin_unlock_bh(&(announce_snd_lock));
1587 int netdev_notify_func(struct notifier_block *not, unsigned long event,
1588 void *ptr)
1590 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1591 int rc;
1593 switch(event){
1594 case NETDEV_UP:
1595 if (dev->flags & IFF_LOOPBACK)
1596 break;
1598 BUG_ON(dev == 0);
1599 rc = create_queue(dev);
1600 if (rc == 1)
1601 return 1;
1602 if (is_clientmode() == 0)
1603 announce_send_start(dev, dev->broadcast,
1604 ANNOUNCE_TYPE_BROADCAST);
1605 break;
1606 case NETDEV_DOWN:
1607 printk(KERN_ERR "down 1");
1608 udelay(100);
1609 BUG_ON(dev == 0);
1610 printk(KERN_ERR "down 2");
1611 udelay(100);
1612 announce_send_stop(dev, 0, ANNOUNCE_TYPE_BROADCAST);
1613 printk(KERN_ERR "down 3");
1614 udelay(100);
1615 reset_neighbors(dev);
1616 printk(KERN_ERR "down 4");
1617 udelay(100);
1618 destroy_queue(dev);
1619 printk(KERN_ERR "down 5");
1620 udelay(100);
1621 break;
1622 case NETDEV_REBOOT:
1623 case NETDEV_CHANGE:
1624 case NETDEV_REGISTER:
1625 case NETDEV_UNREGISTER:
1626 case NETDEV_CHANGEMTU:
1627 case NETDEV_CHANGEADDR:
1628 case NETDEV_GOING_DOWN:
1629 case NETDEV_CHANGENAME:
1630 case NETDEV_FEAT_CHANGE:
1631 case NETDEV_BONDING_FAILOVER:
1632 break;
1633 default:
1634 return 1;
1637 return 0;
1640 void _cor_neighbor_down(void)
1642 cor_rcv_down();
1644 spin_lock_bh(&(local_addr_lock));
1645 if (local_addr != 0) {
1646 kfree(local_addr);
1647 local_addr = 0;
1649 local_addrlen = 0;
1650 spin_unlock_bh(&(local_addr_lock));
1652 reset_neighbors(0);
1653 reset_neighbors(0);
1654 destroy_queue(0);
1656 announce_send_stop(0, 0, ANNOUNCE_TYPE_BROADCAST);
1658 if (netdev_notify_registered != 0 &&
1659 unregister_netdevice_notifier(&netdev_notify) != 0) {
1660 printk(KERN_WARNING "warning: cor_neighbor_down: "
1661 "unregister_netdevice_notifier failed");
1662 BUG();
1664 netdev_notify_registered = 0;
1667 void cor_neighbor_down(void)
1669 mutex_lock(&(neigh_up_lock));
1670 _cor_neighbor_down();
1671 mutex_unlock(&(neigh_up_lock));
1674 int cor_neighbor_up(char *addr2, __u32 addrlen2)
1676 int rc = 0;
1678 char *addr2_copy = kmalloc(addrlen2, GFP_KERNEL);
1679 if (unlikely(addr2_copy == 0))
1680 return 1;
1682 memcpy(addr2_copy, addr2, addrlen2);
1684 mutex_lock(&(neigh_up_lock));
1686 _cor_neighbor_down();
1688 spin_lock_bh(&(local_addr_lock));
1690 BUG_ON(local_addr != 0);
1691 BUG_ON(local_addrlen != 0);
1693 local_addr = addr2_copy;
1694 addr2_copy = 0;
1695 local_addrlen = addrlen2;
1696 get_random_bytes((char *) &local_addr_sessionid,
1697 sizeof(local_addr_sessionid));
1699 spin_unlock_bh(&(local_addr_lock));
1701 BUG_ON(netdev_notify_registered != 0);
1703 if (register_netdevice_notifier(&netdev_notify) != 0)
1704 goto out_err2;
1706 netdev_notify_registered = 1;
1708 cor_rcv_up();
1710 if (0) {
1711 out_err2:
1712 spin_lock_bh(&(local_addr_lock));
1713 kfree(local_addr);
1714 local_addr = 0;
1715 local_addrlen = 0;
1716 spin_unlock_bh(&(local_addr_lock));
1717 rc = 1;
1720 mutex_unlock(&(neigh_up_lock));
1722 return rc;
1725 int is_clientmode(void)
1727 int rc;
1728 spin_lock_bh(&(local_addr_lock));
1729 rc = (local_addrlen == 0 ? 1 : 0);
1730 spin_unlock_bh(&(local_addr_lock));
1731 return rc;
1734 int __init cor_neighbor_init(void)
1736 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
1737 0, 0);
1738 if (unlikely(nb_slab == 0))
1739 return -ENOMEM;
1741 nb_dd_slab = kmem_cache_create("cor_neighbor_discoverydata",
1742 sizeof(struct neighbor_discdata), 8, 0, 0);
1743 if (unlikely(nb_dd_slab == 0))
1744 return -ENOMEM;
1746 atomic_set(&num_neighs, 0);
1748 memset(&netdev_notify, 0, sizeof(netdev_notify));
1749 netdev_notify.notifier_call = netdev_notify_func;
1751 return 0;
1754 MODULE_LICENSE("GPL");