2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 static DEFINE_SPINLOCK(cor_announce_snd_lock
);
24 static LIST_HEAD(cor_announce_out_list
);
26 static int ___cor_send_announce(struct sk_buff
*skb
, int *sent
)
29 struct cor_qos_queue
*q
= cor_get_queue(skb
->dev
);
34 return NET_XMIT_SUCCESS
;
37 rc
= cor_dev_queue_xmit(skb
, q
, QOS_CALLER_ANNOUNCE
);
38 kref_put(&(q
->ref
), cor_free_qos
);
39 if (rc
!= NET_XMIT_DROP
)
45 static int __cor_send_announce(struct cor_announce_data
*ann
, int *sent
)
50 __u32 local_addrlen_tmp
;
57 headroom
= LL_RESERVED_SPACE(ann
->dev
) +
58 ann
->dev
->needed_tailroom
;
60 spin_lock_bh(&cor_local_addr_lock
);
63 BUG_ON(cor_local_addrlen
> 64);
65 local_addrlen_tmp
= cor_local_addrlen
;
67 spin_unlock_bh(&cor_local_addr_lock
);
69 len
= 1 + 4 + 8 + 6 + local_addrlen_tmp
;
73 skb
= alloc_skb(headroom
+ len
, GFP_ATOMIC
);
74 if (unlikely(skb
== 0))
75 return NET_XMIT_SUCCESS
;
77 skb
->protocol
= htons(ETH_P_COR
);
79 skb_reserve(skb
, headroom
);
81 #warning net_device locking? (other places too)
82 if(unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
83 ann
->mac
, ann
->dev
->dev_addr
, skb
->len
) < 0))
86 skb_reset_network_header(skb
);
88 msg
= skb_put(skb
, len
);
89 if (unlikely(msg
== 0))
92 spin_lock_bh(&cor_local_addr_lock
);
94 if (unlikely(cor_local_addrlen
!= local_addrlen_tmp
)) {
101 msg
[0] = PACKET_TYPE_ANNOUNCE
;
104 cor_put_be32(msg
+ offset
, cor_local_addr_sessionid
); /* sessionid */
107 cor_put_u16(msg
+ offset
, ANNCMD_VERSION
); /* command */
109 cor_put_u16(msg
+ offset
, 4); /* command length */
111 cor_put_u16(msg
+ offset
, 0); /* version */
113 cor_put_u16(msg
+ offset
, 0); /* minversion */
116 cor_put_u16(msg
+ offset
, ANNCMD_ADDR
); /* command */
118 cor_put_u16(msg
+ offset
, 2 + cor_local_addrlen
); /* command length */
120 cor_put_u16(msg
+ offset
, cor_local_addrlen
); /* addrlen */
122 if (cor_local_addrlen
!= 0) {
124 memcpy(msg
+ offset
, cor_local_addr
, cor_local_addrlen
);
125 offset
+= cor_local_addrlen
;
128 spin_unlock_bh(&cor_local_addr_lock
);
130 BUG_ON(offset
!= len
);
132 return ___cor_send_announce(skb
, sent
);
137 return NET_XMIT_SUCCESS
;
141 void cor_announce_data_free(struct kref
*ref
)
143 struct cor_announce_data
*ann
= container_of(ref
,
144 struct cor_announce_data
, ref
);
148 int _cor_send_announce(struct cor_announce_data
*ann
, int fromqos
, int *sent
)
153 spin_lock_bh(&(cor_announce_snd_lock
));
155 if (unlikely(ann
->dev
== 0)) {
156 rc
= NET_XMIT_SUCCESS
;
160 if (cor_is_device_configurated(ann
->dev
) == 0)
161 rc
= NET_XMIT_SUCCESS
;
162 else if (fromqos
== 0 &&
163 cor_qos_fastsend_allowed_announce(ann
->dev
) == 0)
166 rc
= __cor_send_announce(ann
, sent
);
168 if (rc
!= NET_XMIT_DROP
&& ann
->type
!= ANNOUNCE_TYPE_BROADCAST
) {
170 reschedule
= (ann
->sndcnt
< ANNOUNCE_SEND_UNICAST_MAXCNT
?
173 if (reschedule
== 0) {
177 list_del(&(ann
->lh
));
178 kref_put(&(ann
->ref
), cor_kreffree_bug
);
185 spin_unlock_bh(&(cor_announce_snd_lock
));
187 if (unlikely(reschedule
== 0)) {
188 kref_put(&(ann
->ref
), cor_announce_data_free
);
189 } else if (rc
== NET_XMIT_DROP
) {
191 struct cor_qos_queue
*q
= cor_get_queue(ann
->dev
);
193 cor_qos_enqueue(q
, &(ann
->rb
), ns_to_ktime(0),
194 QOS_CALLER_ANNOUNCE
);
195 kref_put(&(q
->ref
), cor_free_qos
);
199 schedule_delayed_work(&(ann
->announce_work
), msecs_to_jiffies(
200 ANNOUNCE_SEND_PACKETINTELVAL_MS
));
203 if (rc
!= NET_XMIT_SUCCESS
)
204 return QOS_RESUME_CONG
;
206 return QOS_RESUME_DONE
;
209 static void cor_send_announce(struct work_struct
*work
)
211 struct cor_announce_data
*ann
= container_of(to_delayed_work(work
),
212 struct cor_announce_data
, announce_work
);
214 _cor_send_announce(ann
, 0, &sent
);
217 void cor_announce_send_start(struct net_device
*dev
, char *mac
, int type
)
219 struct cor_announce_data
*ann
;
221 ann
= kmalloc(sizeof(struct cor_announce_data
), GFP_KERNEL
);
223 if (unlikely(ann
== 0)) {
224 printk(KERN_ERR
"cor cannot allocate memory for sending "
229 memset(ann
, 0, sizeof(struct cor_announce_data
));
231 kref_init(&(ann
->ref
));
235 memcpy(ann
->mac
, mac
, MAX_ADDR_LEN
);
238 spin_lock_bh(&(cor_announce_snd_lock
));
239 list_add_tail(&(ann
->lh
), &cor_announce_out_list
);
240 spin_unlock_bh(&(cor_announce_snd_lock
));
242 INIT_DELAYED_WORK(&(ann
->announce_work
), cor_send_announce
);
243 kref_get(&(ann
->ref
));
244 schedule_delayed_work(&(ann
->announce_work
), 1);
247 void cor_announce_send_stop(struct net_device
*dev
, char *mac
, int type
)
249 struct list_head
*lh
;
251 spin_lock_bh(&(cor_announce_snd_lock
));
253 lh
= cor_announce_out_list
.next
;
254 while (lh
!= &cor_announce_out_list
) {
255 struct cor_announce_data
*ann
= container_of(lh
,
256 struct cor_announce_data
, lh
);
261 if (dev
!= 0 && (ann
->dev
!= dev
|| (
262 type
!= ANNOUNCE_TYPE_BROADCAST
&& (
264 memcmp(ann
->mac
, mac
, MAX_ADDR_LEN
) != 0))))
270 list_del(&(ann
->lh
));
271 kref_put(&(ann
->ref
), cor_kreffree_bug
);
274 spin_unlock_bh(&(cor_announce_snd_lock
));
277 MODULE_LICENSE("GPL");