hashtable bugfix, neighbor init bugfix, create_packet bugfix
[cor_2_6_31.git] / net / cor / snd.c
blob3b5916cfbfacfb00113e8c7ce14291aa7fc10a0f
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 static struct htable retransmits;
29 static atomic_t kpacket_seqno = ATOMIC_INIT(1);
31 static void free_skb(struct ref_counter *cnt)
33 struct skb_procstate *ps = container_of(cnt, struct skb_procstate,
34 funcstate.retransmit_queue.refs);
36 struct sk_buff *skb = skb_from_pstate(ps);
38 ref_counter_decr(&(ps->rconn->refs));
39 kfree_skb(skb);
42 static struct ref_counter_def skb_refcnt = {
43 .free = free_skb
46 struct retransmit_matchparam {
47 __u32 conn_id;
48 __u32 seqno;
49 struct neighbor *nb;
52 static __u32 rm_to_key(struct retransmit_matchparam *rm)
54 return rm->conn_id ^ rm->seqno;
57 /* static struct sk_buff * cor_dequeue(struct Qdisc *sch)
59 struct sk_buff *ret;
61 struct cor_sched_data *q = qdisc_priv(sch);
63 struct list_head *ln = q->conn_list.next;
64 struct conn *best = 0;
66 __u64 currcost_limit = 0;
67 __u64 currcost = 0;
69 spin_lock(&(q->lock));
71 if (!(skb_queue_empty(&(q->requeue_queue)))) {
72 ret = __skb_dequeue(&(q->requeue_queue));
73 goto out;
76 while (&(q->conn_list) != ln) {
77 __u32 max1, max2, maxcost;
78 struct conn *curr = (struct conn *)
79 (((char *) ln) - offsetof(struct conn,
80 target.out.queue_list));
82 BUG_ON(TARGET_OUT != curr->targettype);
83 max1 = (256 * ((__u64)curr->credits)) /
84 ((__u64)curr->bytes_queued + curr->avg_rate);
86 max2 = (256 * ((__u64)curr->credits +
87 curr->credit_sender - curr->credit_recp)) /
88 ((__u64)curr->bytes_queued + 2*curr->avg_rate);
90 maxcost = max((__u32) 0, min((max1), (max2)));
92 if (maxcost > currcost_limit) {
93 currcost = currcost_limit;
94 currcost_limit = maxcost;
95 best = curr;
98 ln = ln->next;
101 best->credits -= currcost;
103 ret = __skb_dequeue(&(best->target.out.queue));
105 if (skb_queue_empty(&(best->target.out.queue))) {
106 list_del(&(best->target.out.queue_list));
107 best->target.out.qdisc_active = 0;
110 out:
111 spin_unlock(&(q->lock));
113 if (likely(0 != ret)) {
114 sch->qstats.backlog -= ret->len;
115 sch->q.qlen--;
118 return ret;
121 static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
123 struct cor_sched_data *q = qdisc_priv(sch);
124 struct conn *rconn;
126 rconn = skb_pstate(skb)->rconn;
128 BUG_ON(TARGET_OUT != rconn->targettype);
130 spin_lock(&(rconn->target.out.qdisc_lock));
132 __skb_queue_tail(&(rconn->target.out.queue), skb);
134 if (unlikely(0 == rconn->target.out.qdisc_active)) {
135 spin_lock(&(q->lock));
136 list_add(&(rconn->target.out.queue_list), &(q->conn_list));
137 rconn->target.out.qdisc_active = 1;
138 spin_unlock(&(q->lock));
141 spin_unlock(&(rconn->target.out.qdisc_lock));
143 sch->bstats.bytes += skb->len;
144 sch->bstats.packets++;
145 sch->q.qlen++;
147 return NET_XMIT_SUCCESS;
148 } */
150 static void cor_xmit(struct sk_buff *skb)
152 struct sk_buff *skb2;
154 BUG_ON(skb == 0);
156 skb2 = skb_clone(skb, __GFP_DMA | GFP_KERNEL);
158 if (skb2 == 0) {
159 printk(KERN_WARNING "cor_xmit: cannot clone skb, "
160 "allocation failure?");
163 dev_queue_xmit(skb2);
166 void retransmit_timerfunc(unsigned long arg)
168 unsigned long iflags;
170 struct neighbor *nb = (struct neighbor *) arg;
171 struct sk_buff *skb = 0;
172 struct skb_procstate *ps = 0;
173 unsigned long timeout;
176 while (1) {
177 spin_lock_irqsave( &(nb->retrans_lock), iflags );
178 skb = __skb_dequeue(&(nb->retrans_list));
180 if (0 == skb)
181 goto out;
183 ps = skb_pstate(skb);
184 timeout = ps->funcstate.retransmit_queue.timeout;
186 if (time_before(timeout, jiffies)) {
187 __skb_queue_head(&(nb->retrans_list), skb);
188 goto modtimer;
191 ps->funcstate.retransmit_queue.timeout = jiffies + nb->latency;
192 __skb_queue_tail(&(nb->retrans_list), skb);
194 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
195 cor_xmit(skb);
198 modtimer:
199 mod_timer(&(nb->retrans_timer), timeout);
201 out:
202 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
205 static struct sk_buff *create_packet(struct neighbor *nb, int size,
206 gfp_t alloc_flags, __u32 keyid, __u32 keyseq, __u32 conn_id,
207 __u32 seqno)
209 struct net_device *dev = nb->dev;
210 struct sk_buff *ret;
211 struct skb_procstate *ps;
212 char *dest;
214 ret = alloc_skb(size + 17 + LL_ALLOCATED_SPACE(dev), alloc_flags);
215 if (unlikely(0 == ret))
216 return 0;
218 ret->protocol = htons(ETH_P_COR);
219 ps = skb_pstate(ret);
220 ps->funcstate.retransmit_queue.conn_id = conn_id;
221 ps->funcstate.retransmit_queue.seqno = seqno;
222 skb_reserve(ret, LL_RESERVED_SPACE(dev));
223 if(unlikely(dev_hard_header(ret, dev, ETH_P_COR, nb->mac,
224 dev->dev_addr, ret->len) < 0))
225 return 0;
227 skb_reserve(ret, LL_RESERVED_SPACE(dev));
228 skb_reset_network_header(ret);
230 dest = skb_put(ret, 17);
231 BUG_ON(0 == dest);
233 dest[0] = PACKET_TYPE_DATA;
234 dest += 1;
236 put_u32(dest, keyid, 1);
237 dest += 4;
238 put_u32(dest, keyseq, 1);
239 dest += 4;
240 put_u32(dest, conn_id, 1);
241 dest += 4;
242 put_u32(dest, seqno, 1);
243 dest += 4;
245 return ret;
248 struct sk_buff *create_packet_conn(struct conn *target, int size,
249 gfp_t alloc_flags)
251 __u32 connid = target->target.out.conn_id;
252 __u32 seqno;
254 seqno = target->target.out.seqno;
255 target->target.out.seqno += size;
257 atomic_inc(&(target->target.out.inflight_packets));
258 return create_packet(target->target.out.nb, size, alloc_flags, 0, 0,
259 connid, seqno);
262 struct sk_buff *create_packet_kernel(struct neighbor *nb, int size,
263 gfp_t alloc_flags)
265 __u32 seqno = 0;
267 while (seqno == 0) {
268 atomic_add_return(1, &kpacket_seqno);
270 return create_packet(nb, size, alloc_flags, 0, 0, 0, seqno);
273 void send_conn_flushdata(struct conn *rconn, char *data, __u32 datalen)
275 __u32 seqno;
277 seqno = rconn->target.out.seqno;
278 rconn->target.out.seqno += datalen;
280 #warning todo retransmit
282 send_conndata(rconn->target.out.nb, rconn->target.out.conn_id, seqno,
283 data, data, datalen);
286 void send_packet(struct sk_buff *skb, struct neighbor *nb)
288 unsigned long iflags;
290 struct skb_procstate *ps = skb_pstate(skb);
291 struct retransmit_matchparam rm;
292 int first;
294 BUG_ON(ps->rconn->targettype != TARGET_OUT);
296 rm.conn_id = ps->funcstate.retransmit_queue.conn_id;
297 rm.seqno = ps->funcstate.retransmit_queue.seqno;
298 rm.nb = nb;
300 ps->funcstate.retransmit_queue.timeout = jiffies + nb->latency;
301 ref_counter_init(&(ps->funcstate.retransmit_queue.refs), &skb_refcnt);
303 htable_insert(&retransmits, (char *) skb, rm_to_key(&rm));
305 spin_lock_irqsave( &(nb->retrans_lock), iflags );
307 first = unlikely(skb_queue_empty(&(nb->retrans_list)));
308 __skb_queue_tail(&(nb->retrans_list), skb);
310 if (first) {
311 mod_timer(&(nb->retrans_timer),
312 ps->funcstate.retransmit_queue.timeout);
315 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
317 ref_counter_incr(&(ps->rconn->refs));
319 cor_xmit(skb);
321 ref_counter_incr(&(ps->funcstate.retransmit_queue.refs));
324 void ack_received(struct neighbor *nb, __u32 conn_id, __u32 seqno,
325 int nack)
327 unsigned long iflags;
329 struct sk_buff *skb = 0;
330 struct skb_procstate *ps;
331 struct retransmit_matchparam rm;
333 int first = 0;
335 int ret;
337 rm.conn_id = conn_id;
338 rm.seqno = seqno;
339 rm.nb = nb;
341 skb = (struct sk_buff *) htable_get(&retransmits, rm_to_key(&rm), &rm);
343 if (0 == skb)
344 return;
346 ps = skb_pstate(skb);
348 ret = htable_delete(&retransmits, (char *) skb, rm_to_key(&rm), &rm);
349 if (ret) {
350 /* somebody else has already deleted it in the meantime */
351 return;
354 spin_lock_irqsave( &(nb->retrans_lock), iflags );
356 if (unlikely(nb->retrans_list.next == skb))
357 first = 1;
358 skb->next->prev = skb->prev;
359 skb->prev->next = skb->next;
361 if (first) {
362 if (unlikely(skb_queue_empty(&(nb->retrans_list)))) {
363 mod_timer(&(nb->retrans_timer), jiffies + nb->latency);
367 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
369 if (atomic_dec_and_test(&(ps->rconn->target.out.inflight_packets))) {
370 if (ps->rconn->sourcetype == SOURCE_IN) {
371 mutex_lock(&(ps->rconn->source.in.rcv_lock));
372 flush_rbuf(ps->rconn);
373 mutex_unlock(&(ps->rconn->source.in.rcv_lock));
374 } else if (ps->rconn->sourcetype == SOURCE_SOCK) {
375 mutex_lock(&(ps->rconn->source.sock.lock));
376 cor_flush_sockbuf(ps->rconn);
377 mutex_unlock(&(ps->rconn->source.sock.lock));
378 } else {
379 BUG();
384 static int matches_skb_connid_seqno(void *htentry, void *searcheditem)
386 struct sk_buff *skb = (struct sk_buff *) htentry;
387 struct skb_procstate *ps = skb_pstate(skb);
388 struct retransmit_matchparam *rm = (struct retransmit_matchparam *)
389 searcheditem;
391 return rm->conn_id == ps->funcstate.retransmit_queue.conn_id &&
392 rm->seqno == ps->funcstate.retransmit_queue.seqno &&
393 rm->nb == ps->rconn->target.out.nb;
396 static inline __u32 retransmit_entryoffset(void)
398 return offsetof(struct sk_buff, cb) + offsetof(struct skb_procstate,
399 funcstate.retransmit_queue.htab_entry);
402 static inline __u32 retransmit_refsoffset(void)
404 return offsetof(struct sk_buff, cb) + offsetof(struct skb_procstate,
405 funcstate.retransmit_queue.refs);
408 int __init cor_snd_init(void)
410 htable_init(&retransmits, matches_skb_connid_seqno,
411 retransmit_entryoffset(), retransmit_refsoffset());
413 return 0;
416 MODULE_LICENSE("GPL");