2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 static struct htable retransmits
;
29 static atomic_t kpacket_seqno
= ATOMIC_INIT(1);
31 static void free_skb(struct ref_counter
*cnt
)
33 struct skb_procstate
*ps
= container_of(cnt
, struct skb_procstate
,
34 funcstate
.retransmit_queue
.refs
);
36 struct sk_buff
*skb
= skb_from_pstate(ps
);
38 ref_counter_decr(&(ps
->rconn
->refs
));
42 static struct ref_counter_def skb_refcnt
= {
46 struct retransmit_matchparam
{
52 static __u32
rm_to_key(struct retransmit_matchparam
*rm
)
54 return rm
->conn_id
^ rm
->seqno
;
57 /* static struct sk_buff * cor_dequeue(struct Qdisc *sch)
61 struct cor_sched_data *q = qdisc_priv(sch);
63 struct list_head *ln = q->conn_list.next;
64 struct conn *best = 0;
66 __u64 currcost_limit = 0;
69 spin_lock(&(q->lock));
71 if (!(skb_queue_empty(&(q->requeue_queue)))) {
72 ret = __skb_dequeue(&(q->requeue_queue));
76 while (&(q->conn_list) != ln) {
77 __u32 max1, max2, maxcost;
78 struct conn *curr = (struct conn *)
79 (((char *) ln) - offsetof(struct conn,
80 target.out.queue_list));
82 BUG_ON(TARGET_OUT != curr->targettype);
83 max1 = (256 * ((__u64)curr->credits)) /
84 ((__u64)curr->bytes_queued + curr->avg_rate);
86 max2 = (256 * ((__u64)curr->credits +
87 curr->credit_sender - curr->credit_recp)) /
88 ((__u64)curr->bytes_queued + 2*curr->avg_rate);
90 maxcost = max((__u32) 0, min((max1), (max2)));
92 if (maxcost > currcost_limit) {
93 currcost = currcost_limit;
94 currcost_limit = maxcost;
101 best->credits -= currcost;
103 ret = __skb_dequeue(&(best->target.out.queue));
105 if (skb_queue_empty(&(best->target.out.queue))) {
106 list_del(&(best->target.out.queue_list));
107 best->target.out.qdisc_active = 0;
111 spin_unlock(&(q->lock));
113 if (likely(0 != ret)) {
114 sch->qstats.backlog -= ret->len;
121 static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
123 struct cor_sched_data *q = qdisc_priv(sch);
126 rconn = skb_pstate(skb)->rconn;
128 BUG_ON(TARGET_OUT != rconn->targettype);
130 spin_lock(&(rconn->target.out.qdisc_lock));
132 __skb_queue_tail(&(rconn->target.out.queue), skb);
134 if (unlikely(0 == rconn->target.out.qdisc_active)) {
135 spin_lock(&(q->lock));
136 list_add(&(rconn->target.out.queue_list), &(q->conn_list));
137 rconn->target.out.qdisc_active = 1;
138 spin_unlock(&(q->lock));
141 spin_unlock(&(rconn->target.out.qdisc_lock));
143 sch->bstats.bytes += skb->len;
144 sch->bstats.packets++;
147 return NET_XMIT_SUCCESS;
150 static void cor_xmit(struct sk_buff
*skb
)
152 struct sk_buff
*skb2
;
156 skb2
= skb_clone(skb
, __GFP_DMA
| GFP_KERNEL
);
159 printk(KERN_WARNING
"cor_xmit: cannot clone skb, "
160 "allocation failure?");
163 dev_queue_xmit(skb2
);
166 void retransmit_timerfunc(unsigned long arg
)
168 unsigned long iflags
;
170 struct neighbor
*nb
= (struct neighbor
*) arg
;
171 struct sk_buff
*skb
= 0;
172 struct skb_procstate
*ps
= 0;
173 unsigned long timeout
;
177 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
178 skb
= __skb_dequeue(&(nb
->retrans_list
));
183 ps
= skb_pstate(skb
);
184 timeout
= ps
->funcstate
.retransmit_queue
.timeout
;
186 if (time_before(timeout
, jiffies
)) {
187 __skb_queue_head(&(nb
->retrans_list
), skb
);
191 ps
->funcstate
.retransmit_queue
.timeout
= jiffies
+ nb
->latency
;
192 __skb_queue_tail(&(nb
->retrans_list
), skb
);
194 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
199 mod_timer(&(nb
->retrans_timer
), timeout
);
202 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
205 static struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
206 gfp_t alloc_flags
, __u32 keyid
, __u32 keyseq
, __u32 conn_id
,
209 struct net_device
*dev
= nb
->dev
;
211 struct skb_procstate
*ps
;
214 ret
= alloc_skb(size
+ 17 + LL_ALLOCATED_SPACE(dev
), alloc_flags
);
215 if (unlikely(0 == ret
))
218 ret
->protocol
= htons(ETH_P_COR
);
219 ps
= skb_pstate(ret
);
220 ps
->funcstate
.retransmit_queue
.conn_id
= conn_id
;
221 ps
->funcstate
.retransmit_queue
.seqno
= seqno
;
222 skb_reserve(ret
, LL_RESERVED_SPACE(dev
));
223 if(unlikely(dev_hard_header(ret
, dev
, ETH_P_COR
, nb
->mac
,
224 dev
->dev_addr
, ret
->len
) < 0))
227 skb_reserve(ret
, LL_RESERVED_SPACE(dev
));
228 skb_reset_network_header(ret
);
230 dest
= skb_put(ret
, 17);
233 dest
[0] = PACKET_TYPE_DATA
;
236 put_u32(dest
, keyid
, 1);
238 put_u32(dest
, keyseq
, 1);
240 put_u32(dest
, conn_id
, 1);
242 put_u32(dest
, seqno
, 1);
248 struct sk_buff
*create_packet_conn(struct conn
*target
, int size
,
251 __u32 connid
= target
->target
.out
.conn_id
;
254 seqno
= target
->target
.out
.seqno
;
255 target
->target
.out
.seqno
+= size
;
257 atomic_inc(&(target
->target
.out
.inflight_packets
));
258 return create_packet(target
->target
.out
.nb
, size
, alloc_flags
, 0, 0,
262 struct sk_buff
*create_packet_kernel(struct neighbor
*nb
, int size
,
268 atomic_add_return(1, &kpacket_seqno
);
270 return create_packet(nb
, size
, alloc_flags
, 0, 0, 0, seqno
);
273 void send_conn_flushdata(struct conn
*rconn
, char *data
, __u32 datalen
)
277 seqno
= rconn
->target
.out
.seqno
;
278 rconn
->target
.out
.seqno
+= datalen
;
280 #warning todo retransmit
282 send_conndata(rconn
->target
.out
.nb
, rconn
->target
.out
.conn_id
, seqno
,
283 data
, data
, datalen
);
286 void send_packet(struct sk_buff
*skb
, struct neighbor
*nb
)
288 unsigned long iflags
;
290 struct skb_procstate
*ps
= skb_pstate(skb
);
291 struct retransmit_matchparam rm
;
294 BUG_ON(ps
->rconn
->targettype
!= TARGET_OUT
);
296 rm
.conn_id
= ps
->funcstate
.retransmit_queue
.conn_id
;
297 rm
.seqno
= ps
->funcstate
.retransmit_queue
.seqno
;
300 ps
->funcstate
.retransmit_queue
.timeout
= jiffies
+ nb
->latency
;
301 ref_counter_init(&(ps
->funcstate
.retransmit_queue
.refs
), &skb_refcnt
);
303 htable_insert(&retransmits
, (char *) skb
, rm_to_key(&rm
));
305 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
307 first
= unlikely(skb_queue_empty(&(nb
->retrans_list
)));
308 __skb_queue_tail(&(nb
->retrans_list
), skb
);
311 mod_timer(&(nb
->retrans_timer
),
312 ps
->funcstate
.retransmit_queue
.timeout
);
315 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
317 ref_counter_incr(&(ps
->rconn
->refs
));
321 ref_counter_incr(&(ps
->funcstate
.retransmit_queue
.refs
));
324 void ack_received(struct neighbor
*nb
, __u32 conn_id
, __u32 seqno
,
327 unsigned long iflags
;
329 struct sk_buff
*skb
= 0;
330 struct skb_procstate
*ps
;
331 struct retransmit_matchparam rm
;
337 rm
.conn_id
= conn_id
;
341 skb
= (struct sk_buff
*) htable_get(&retransmits
, rm_to_key(&rm
), &rm
);
346 ps
= skb_pstate(skb
);
348 ret
= htable_delete(&retransmits
, (char *) skb
, rm_to_key(&rm
), &rm
);
350 /* somebody else has already deleted it in the meantime */
354 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
356 if (unlikely(nb
->retrans_list
.next
== skb
))
358 skb
->next
->prev
= skb
->prev
;
359 skb
->prev
->next
= skb
->next
;
362 if (unlikely(skb_queue_empty(&(nb
->retrans_list
)))) {
363 mod_timer(&(nb
->retrans_timer
), jiffies
+ nb
->latency
);
367 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
369 if (atomic_dec_and_test(&(ps
->rconn
->target
.out
.inflight_packets
))) {
370 if (ps
->rconn
->sourcetype
== SOURCE_IN
) {
371 mutex_lock(&(ps
->rconn
->source
.in
.rcv_lock
));
372 flush_rbuf(ps
->rconn
);
373 mutex_unlock(&(ps
->rconn
->source
.in
.rcv_lock
));
374 } else if (ps
->rconn
->sourcetype
== SOURCE_SOCK
) {
375 mutex_lock(&(ps
->rconn
->source
.sock
.lock
));
376 cor_flush_sockbuf(ps
->rconn
);
377 mutex_unlock(&(ps
->rconn
->source
.sock
.lock
));
384 static int matches_skb_connid_seqno(void *htentry
, void *searcheditem
)
386 struct sk_buff
*skb
= (struct sk_buff
*) htentry
;
387 struct skb_procstate
*ps
= skb_pstate(skb
);
388 struct retransmit_matchparam
*rm
= (struct retransmit_matchparam
*)
391 return rm
->conn_id
== ps
->funcstate
.retransmit_queue
.conn_id
&&
392 rm
->seqno
== ps
->funcstate
.retransmit_queue
.seqno
&&
393 rm
->nb
== ps
->rconn
->target
.out
.nb
;
396 static inline __u32
retransmit_entryoffset(void)
398 return offsetof(struct sk_buff
, cb
) + offsetof(struct skb_procstate
,
399 funcstate
.retransmit_queue
.htab_entry
);
402 static inline __u32
retransmit_refsoffset(void)
404 return offsetof(struct sk_buff
, cb
) + offsetof(struct skb_procstate
,
405 funcstate
.retransmit_queue
.refs
);
408 int __init
cor_snd_init(void)
410 htable_init(&retransmits
, matches_skb_connid_seqno
,
411 retransmit_entryoffset(), retransmit_refsoffset());
416 MODULE_LICENSE("GPL");