2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 static DEFINE_SPINLOCK(cor_announce_snd_lock
);
19 static LIST_HEAD(cor_announce_out_list
);
21 static int ___cor_send_announce(struct sk_buff
*skb
, int *sent
)
24 struct cor_qos_queue
*q
= cor_get_queue(skb
->dev
);
29 return NET_XMIT_SUCCESS
;
32 rc
= cor_dev_queue_xmit(skb
, q
, QOS_CALLER_ANNOUNCE
);
33 kref_put(&(q
->ref
), cor_free_qos
);
34 if (rc
!= NET_XMIT_DROP
)
40 static int __cor_send_announce(struct cor_announce_data
*ann
, int *sent
)
53 headroom
= LL_RESERVED_SPACE(ann
->dev
) +
54 ann
->dev
->needed_tailroom
;
56 spin_lock_bh(&cor_local_addr_lock
);
57 has_addr
= cor_local_has_addr
;
58 addr
= cor_local_addr
;
59 spin_unlock_bh(&cor_local_addr_lock
);
64 len
= 1 + 4 + 8 + 4 + 8;
66 skb
= alloc_skb(headroom
+ len
, GFP_ATOMIC
);
67 if (unlikely(skb
== 0))
68 return NET_XMIT_SUCCESS
;
70 skb
->protocol
= htons(ETH_P_COR
);
72 skb_reserve(skb
, headroom
);
74 #warning net_device locking? (other places too)
75 if (unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
76 ann
->mac
, ann
->dev
->dev_addr
, skb
->len
) < 0))
79 skb_reset_network_header(skb
);
81 msg
= skb_put(skb
, len
);
82 if (unlikely(msg
== 0))
85 msg
[0] = PACKET_TYPE_ANNOUNCE
;
88 cor_put_be32(msg
+ offset
, cor_local_addr_sessionid
); /* sessionid */
91 cor_put_u16(msg
+ offset
, ANNCMD_VERSION
); /* command */
93 cor_put_u16(msg
+ offset
, 4); /* command length */
95 cor_put_u16(msg
+ offset
, 0); /* version */
97 cor_put_u16(msg
+ offset
, 0); /* minversion */
101 cor_put_u16(msg
+ offset
, ANNCMD_NOADDR
); /* command */
103 cor_put_u16(msg
+ offset
, 0); /* command length */
106 cor_put_u16(msg
+ offset
, ANNCMD_ADDR
); /* command */
108 cor_put_u16(msg
+ offset
, 8); /* command length */
110 cor_put_be64(msg
+ offset
, addr
); /* addr */
114 BUILD_BUG_ON(offset
!= len
);
116 return ___cor_send_announce(skb
, sent
);
121 return NET_XMIT_SUCCESS
;
125 void cor_announce_data_free(struct kref
*ref
)
127 struct cor_announce_data
*ann
= container_of(ref
,
128 struct cor_announce_data
, ref
);
132 int _cor_send_announce(struct cor_announce_data
*ann
, int fromqos
, int *sent
)
137 spin_lock_bh(&(cor_announce_snd_lock
));
139 if (unlikely(ann
->dev
== 0)) {
140 rc
= NET_XMIT_SUCCESS
;
144 if (cor_is_device_configurated(ann
->dev
) == 0)
145 rc
= NET_XMIT_SUCCESS
;
146 else if (fromqos
== 0 &&
147 cor_qos_fastsend_allowed_announce(ann
->dev
) == 0)
150 rc
= __cor_send_announce(ann
, sent
);
152 if (rc
!= NET_XMIT_DROP
&& ann
->type
!= ANNOUNCE_TYPE_BROADCAST
) {
154 reschedule
= (ann
->sndcnt
< ANNOUNCE_SEND_UNICAST_MAXCNT
?
157 if (reschedule
== 0) {
161 list_del(&(ann
->lh
));
162 kref_put(&(ann
->ref
), cor_kreffree_bug
);
169 spin_unlock_bh(&(cor_announce_snd_lock
));
171 if (unlikely(reschedule
== 0)) {
172 kref_put(&(ann
->ref
), cor_announce_data_free
);
173 } else if (rc
== NET_XMIT_DROP
) {
175 struct cor_qos_queue
*q
= cor_get_queue(ann
->dev
);
177 cor_qos_enqueue(q
, &(ann
->rb
), 0,
179 QOS_CALLER_ANNOUNCE
, 0);
180 kref_put(&(q
->ref
), cor_free_qos
);
184 schedule_delayed_work(&(ann
->announce_work
), msecs_to_jiffies(
185 ANNOUNCE_SEND_PACKETINTELVAL_MS
));
188 if (rc
!= NET_XMIT_SUCCESS
)
189 return QOS_RESUME_CONG
;
191 return QOS_RESUME_DONE
;
194 static void cor_send_announce(struct work_struct
*work
)
196 struct cor_announce_data
*ann
= container_of(to_delayed_work(work
),
197 struct cor_announce_data
, announce_work
);
199 _cor_send_announce(ann
, 0, &sent
);
202 void cor_announce_send_start(struct net_device
*dev
, char *mac
, int type
)
204 struct cor_announce_data
*ann
;
206 ann
= kmalloc(sizeof(struct cor_announce_data
), GFP_KERNEL
);
208 if (unlikely(ann
== 0)) {
209 printk(KERN_ERR
"cor cannot allocate memory for sending announces\n");
213 memset(ann
, 0, sizeof(struct cor_announce_data
));
215 kref_init(&(ann
->ref
));
219 memcpy(ann
->mac
, mac
, MAX_ADDR_LEN
);
222 spin_lock_bh(&(cor_announce_snd_lock
));
223 list_add_tail(&(ann
->lh
), &cor_announce_out_list
);
224 spin_unlock_bh(&(cor_announce_snd_lock
));
226 INIT_DELAYED_WORK(&(ann
->announce_work
), cor_send_announce
);
227 kref_get(&(ann
->ref
));
228 schedule_delayed_work(&(ann
->announce_work
), 1);
231 void cor_announce_send_stop(struct net_device
*dev
, char *mac
, int type
)
233 struct list_head
*lh
;
235 spin_lock_bh(&(cor_announce_snd_lock
));
237 lh
= cor_announce_out_list
.next
;
238 while (lh
!= &cor_announce_out_list
) {
239 struct cor_announce_data
*ann
= container_of(lh
,
240 struct cor_announce_data
, lh
);
245 if (dev
!= 0 && (ann
->dev
!= dev
|| (
246 type
!= ANNOUNCE_TYPE_BROADCAST
&& (
248 memcmp(ann
->mac
, mac
, MAX_ADDR_LEN
) != 0))))
254 list_del(&(ann
->lh
));
255 kref_put(&(ann
->ref
), cor_kreffree_bug
);
258 spin_unlock_bh(&(cor_announce_snd_lock
));
261 MODULE_LICENSE("GPL");