add conn_src_sock.c+conn_trgt_sock.c, remove credits.c
[cor.git] / net / cor / neigh_ann_snd.c
blob0176aa4ab84feb39568bdcb6130dd2c8c3f971a1
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 static DEFINE_SPINLOCK(cor_announce_snd_lock);
24 static LIST_HEAD(cor_announce_out_list);
26 static int ___cor_send_announce(struct sk_buff *skb, int *sent)
28 int rc;
29 struct cor_qos_queue *q = cor_get_queue(skb->dev);
31 if (q == 0) {
32 kfree_skb(skb);
33 *sent = 1;
34 return NET_XMIT_SUCCESS;
37 rc = cor_dev_queue_xmit(skb, q, QOS_CALLER_ANNOUNCE);
38 kref_put(&(q->ref), cor_free_qos);
39 if (rc != NET_XMIT_DROP)
40 *sent = 1;
42 return rc;
45 static int __cor_send_announce(struct cor_announce_data *ann, int *sent)
47 __u32 len;
48 __u32 offset = 0;
50 __u32 local_addrlen_tmp;
52 char *msg = 0;
54 struct sk_buff *skb;
55 __u32 headroom;
57 headroom = LL_RESERVED_SPACE(ann->dev) +
58 ann->dev->needed_tailroom;
60 spin_lock_bh(&cor_local_addr_lock);
62 retry:
63 BUG_ON(cor_local_addrlen > 64);
65 local_addrlen_tmp = cor_local_addrlen;
67 spin_unlock_bh(&cor_local_addr_lock);
69 len = 1 + 4 + 8 + 6 + local_addrlen_tmp;
71 BUG_ON(len > 1024);
73 skb = alloc_skb(headroom + len, GFP_ATOMIC);
74 if (unlikely(skb == 0))
75 return NET_XMIT_SUCCESS;
77 skb->protocol = htons(ETH_P_COR);
78 skb->dev = ann->dev;
79 skb_reserve(skb, headroom);
81 #warning net_device locking? (other places too)
82 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
83 ann->mac, ann->dev->dev_addr, skb->len) < 0))
84 goto out_err;
86 skb_reset_network_header(skb);
88 msg = skb_put(skb, len);
89 if (unlikely(msg == 0))
90 goto out_err;
92 spin_lock_bh(&cor_local_addr_lock);
94 if (unlikely(cor_local_addrlen != local_addrlen_tmp)) {
95 kfree_skb(skb);
96 skb = 0;
97 msg = 0;
98 goto retry;
101 msg[0] = PACKET_TYPE_ANNOUNCE;
102 offset++;
104 cor_put_be32(msg + offset, cor_local_addr_sessionid); /* sessionid */
105 offset += 4;
107 cor_put_u16(msg + offset, ANNCMD_VERSION); /* command */
108 offset += 2;
109 cor_put_u16(msg + offset, 4); /* command length */
110 offset += 2;
111 cor_put_u16(msg + offset, 0); /* version */
112 offset += 2;
113 cor_put_u16(msg + offset, 0); /* minversion */
114 offset += 2;
116 cor_put_u16(msg + offset, ANNCMD_ADDR); /* command */
117 offset += 2;
118 cor_put_u16(msg + offset, 2 + cor_local_addrlen); /* command length */
119 offset += 2;
120 cor_put_u16(msg + offset, cor_local_addrlen); /* addrlen */
121 offset += 2;
122 if (cor_local_addrlen != 0) {
123 /* addr */
124 memcpy(msg + offset, cor_local_addr, cor_local_addrlen);
125 offset += cor_local_addrlen;
128 spin_unlock_bh(&cor_local_addr_lock);
130 BUG_ON(offset != len);
132 return ___cor_send_announce(skb, sent);
134 if (0) {
135 out_err:
136 kfree_skb(skb);
137 return NET_XMIT_SUCCESS;
141 void cor_announce_data_free(struct kref *ref)
143 struct cor_announce_data *ann = container_of(ref,
144 struct cor_announce_data, ref);
145 kfree(ann);
148 int _cor_send_announce(struct cor_announce_data *ann, int fromqos, int *sent)
150 int reschedule = 0;
151 int rc = 0;
153 spin_lock_bh(&(cor_announce_snd_lock));
155 if (unlikely(ann->dev == 0)) {
156 rc = NET_XMIT_SUCCESS;
157 goto out;
160 if (cor_is_device_configurated(ann->dev) == 0)
161 rc = NET_XMIT_SUCCESS;
162 else if (fromqos == 0 &&
163 cor_qos_fastsend_allowed_announce(ann->dev) == 0)
164 rc = NET_XMIT_DROP;
165 else
166 rc = __cor_send_announce(ann, sent);
168 if (rc != NET_XMIT_DROP && ann->type != ANNOUNCE_TYPE_BROADCAST) {
169 ann->sndcnt++;
170 reschedule = (ann->sndcnt < ANNOUNCE_SEND_UNICAST_MAXCNT ?
171 1 : 0);
173 if (reschedule == 0) {
174 dev_put(ann->dev);
175 ann->dev = 0;
177 list_del(&(ann->lh));
178 kref_put(&(ann->ref), cor_kreffree_bug);
180 } else {
181 reschedule = 1;
184 out:
185 spin_unlock_bh(&(cor_announce_snd_lock));
187 if (unlikely(reschedule == 0)) {
188 kref_put(&(ann->ref), cor_announce_data_free);
189 } else if (rc == NET_XMIT_DROP) {
190 if (fromqos == 0) {
191 struct cor_qos_queue *q = cor_get_queue(ann->dev);
192 if (q != 0) {
193 cor_qos_enqueue(q, &(ann->rb), ns_to_ktime(0),
194 QOS_CALLER_ANNOUNCE);
195 kref_put(&(q->ref), cor_free_qos);
198 } else {
199 schedule_delayed_work(&(ann->announce_work), msecs_to_jiffies(
200 ANNOUNCE_SEND_PACKETINTELVAL_MS));
203 if (rc != NET_XMIT_SUCCESS)
204 return QOS_RESUME_CONG;
206 return QOS_RESUME_DONE;
209 static void cor_send_announce(struct work_struct *work)
211 struct cor_announce_data *ann = container_of(to_delayed_work(work),
212 struct cor_announce_data, announce_work);
213 int sent = 0;
214 _cor_send_announce(ann, 0, &sent);
217 void cor_announce_send_start(struct net_device *dev, char *mac, int type)
219 struct cor_announce_data *ann;
221 ann = kmalloc(sizeof(struct cor_announce_data), GFP_KERNEL);
223 if (unlikely(ann == 0)) {
224 printk(KERN_ERR "cor cannot allocate memory for sending "
225 "announces");
226 return;
229 memset(ann, 0, sizeof(struct cor_announce_data));
231 kref_init(&(ann->ref));
233 dev_hold(dev);
234 ann->dev = dev;
235 memcpy(ann->mac, mac, MAX_ADDR_LEN);
236 ann->type = type;
238 spin_lock_bh(&(cor_announce_snd_lock));
239 list_add_tail(&(ann->lh), &cor_announce_out_list);
240 spin_unlock_bh(&(cor_announce_snd_lock));
242 INIT_DELAYED_WORK(&(ann->announce_work), cor_send_announce);
243 kref_get(&(ann->ref));
244 schedule_delayed_work(&(ann->announce_work), 1);
247 void cor_announce_send_stop(struct net_device *dev, char *mac, int type)
249 struct list_head *lh;
251 spin_lock_bh(&(cor_announce_snd_lock));
253 lh = cor_announce_out_list.next;
254 while (lh != &cor_announce_out_list) {
255 struct cor_announce_data *ann = container_of(lh,
256 struct cor_announce_data, lh);
258 lh = lh->next;
261 if (dev != 0 && (ann->dev != dev || (
262 type != ANNOUNCE_TYPE_BROADCAST && (
263 ann->type != type ||
264 memcmp(ann->mac, mac, MAX_ADDR_LEN) != 0))))
265 continue;
267 dev_put(ann->dev);
268 ann->dev = 0;
270 list_del(&(ann->lh));
271 kref_put(&(ann->ref), cor_kreffree_bug);
274 spin_unlock_bh(&(cor_announce_snd_lock));
277 MODULE_LICENSE("GPL");