2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/mutex.h>
25 DEFINE_MUTEX(cor_bindnodes
);
26 DEFINE_MUTEX(conn_free
);
28 DEFINE_MUTEX(connid_gen
);
37 struct kmem_cache
*conn_slab
;
39 struct htable connid_table
;
41 struct kmem_cache
*bindnode_slab
;
42 struct kmem_cache
*connlistener_slab
;
44 void ref_counter_decr(struct ref_counter
*cnt
)
49 BUG_ON(0 == cnt
->def
);
51 spin_lock_irqsave(&(cnt
->lock
), iflags
);
53 if (unlikely(cnt
->refs
== 0)) {
55 local_irq_restore(iflags
);
58 spin_unlock_irqrestore(&(cnt
->lock
), iflags
);
61 int ref_counter_incr(struct ref_counter
*cnt
)
66 spin_lock_irqsave(&(cnt
->lock
), iflags
);
67 if (likely(cnt
->is_active
)) {
71 spin_unlock_irqrestore(&(cnt
->lock
), iflags
);
76 void ref_counter_init(struct ref_counter
*cnt
, struct ref_counter_def
*def
)
81 spin_lock_init(&(cnt
->lock
));
88 static inline int hdr_size(void)
90 return ((sizeof(struct cell_hdr
) + sizeof(void *) - 1) / sizeof(void *)
94 static inline int elements_per_cell(int cell_size
)
96 return (cell_size
- hdr_size())/sizeof(void *);
99 static inline struct cell_hdr
*cell_addr(struct htable
*ht
, __u32 id
)
101 int idx
= (id
%ht
->htable_size
) / (elements_per_cell(ht
->cell_size
));
102 return (struct cell_hdr
*) (((char *)ht
->htable
) + ht
->cell_size
* idx
);
105 static inline char **element_addr(struct htable
*ht
, __u32 id
)
107 int idx
= (id
%ht
->htable_size
) % (elements_per_cell(ht
->cell_size
));
109 ( ((char *)cell_addr(ht
, id
)) +
110 hdr_size() + idx
*sizeof(void *) );
114 static inline char **next_element(struct htable
*ht
, char *element
)
116 return (char **)(element
+ ht
->entry_offset
);
119 static inline struct ref_counter
*element_refcnt(struct htable
*ht
, char *element
)
121 return (struct ref_counter
*)(element
+ ht
->ref_counter_offset
);
125 static inline void unlock_element(struct htable
*ht
, __u32 key
)
127 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
128 spin_unlock( &(hdr
->lock
) );
132 static char **get_element_nounlock(struct htable
*ht
, __u32 key
,
135 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
136 char **element
= element_addr(ht
, key
);
138 BUG_ON(0 == element
);
140 spin_lock( &(hdr
->lock
) );
145 if (searcheditem
!= 0 && ht
->matches(*element
, searcheditem
))
148 element
= next_element(ht
, *element
);
154 char *htable_get(struct htable
*ht
, __u32 key
, void *searcheditem
)
156 unsigned long iflags
;
159 local_irq_save(iflags
);
160 element
= *(get_element_nounlock(ht
, key
, searcheditem
));
162 ref_counter_incr(element_refcnt(ht
, element
));
163 unlock_element(ht
, key
);
164 local_irq_restore(iflags
);
169 int htable_delete(struct htable
*ht
, char *oldelement
, __u32 key
,
172 unsigned long iflags
;
177 local_irq_save(iflags
);
179 element
= get_element_nounlock(ht
, key
, searcheditem
);
180 BUG_ON(0 == element
);
183 /* key not in table */
188 next
= next_element(ht
, *element
);
189 ref_counter_decr(element_refcnt(ht
, *element
));
193 unlock_element(ht
, key
);
194 local_irq_restore(iflags
);
199 void htable_insert(struct htable
*ht
, char *newelement
, __u32 key
)
201 unsigned long iflags
;
205 local_irq_save(iflags
);
207 element
= get_element_nounlock(ht
, key
, 0);
208 BUG_ON(0 == element
);
209 ref_counter_incr(element_refcnt(ht
, newelement
));
211 *element
= newelement
;
212 next
= next_element(ht
, *element
);
215 unlock_element(ht
, key
);
216 local_irq_restore(iflags
);
220 void htable_init(struct htable
*ht
, int (*matches
)(void *htentry
,
221 void *searcheditem
), __u32 entry_offset
, __u32 ref_counter_offset
)
227 ht
->htable
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
230 num_cells
= PAGE_SIZE
/ht
->cell_size
;
232 for (j
=0;j
<num_cells
;j
++) {
233 struct cell_hdr
*hdr
= (struct cell_hdr
*)
234 ( ((char *) ht
->htable
) + j
* ht
->cell_size
);
235 spin_lock_init(&(hdr
->lock
));
238 ht
->htable_size
= num_cells
* elements_per_cell(ht
->cell_size
);
239 ht
->num_elements
= 0;
241 ht
->matches
= matches
;
242 ht
->entry_offset
= entry_offset
;
243 ht
->ref_counter_offset
= ref_counter_offset
;
246 struct conn
*get_conn(__u32 conn_id
)
248 return (struct conn
*) htable_get(&connid_table
, conn_id
, &conn_id
);
251 static int connid_alloc(struct conn
*sconn
)
256 BUG_ON(sconn
->sourcetype
!= SOURCE_IN
);
258 mutex_lock(&connid_gen
);
263 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
268 tmp
= get_conn(conn_id
);
270 ref_counter_decr(&(tmp
->refs
));
276 mutex_unlock(&connid_gen
);
281 sconn
->source
.in
.conn_id
= conn_id
;
282 htable_insert(&connid_table
, (char *) sconn
, conn_id
);
283 mutex_unlock(&connid_gen
);
287 static void free_conn(struct ref_counter
*cnt
)
289 struct conn
*conn
= container_of(cnt
, struct conn
, refs
);
291 BUG_ON(conn
->isreset
== 0);
293 mutex_lock(&conn_free
);
294 if (conn
->sourcetype
== SOURCE_IN
) {
295 htable_delete(&connid_table
, (char *) conn
,
296 conn
->source
.in
.conn_id
,
297 &(conn
->source
.in
.conn_id
));
300 databuf_free(&(conn
->buf
));
305 if (conn
->reversedir
!= 0)
306 conn
->reversedir
->reversedir
= 0;
308 kmem_cache_free(conn_slab
, conn
);
309 mutex_unlock(&conn_free
);
312 static struct ref_counter_def conn_refcnt
= {
317 * rconn ==> the connection we received the commend from
318 * ==> init rconn->target.out + rconn->reversedir->source.in
320 * rc == 1 ==> connid allocation failed
322 int conn_init_out(struct conn
*rconn
, struct neighbor
*nb
)
324 struct conn
*sconn
= rconn
->reversedir
;
326 BUG_ON(TARGET_UNCONNECTED
!= rconn
->targettype
);
328 BUG_ON(SOURCE_NONE
!= sconn
->sourcetype
);
330 memset(&(rconn
->target
.out
), 0, sizeof(rconn
->target
.out
));
331 memset(&(sconn
->source
.in
), 0, sizeof(sconn
->source
.in
));
333 rconn
->targettype
= TARGET_OUT
;
334 sconn
->sourcetype
= SOURCE_IN
;
336 rconn
->target
.out
.nb
= nb
;
337 sconn
->source
.in
.nb
= nb
;
339 skb_queue_head_init(&(sconn
->source
.in
.reorder_queue
));
342 * connid_alloc has to be called last, because packets may be received
343 * immediately after its execution
345 if (connid_alloc(sconn
)) {
349 mutex_lock(&(nb
->conn_list_lock
));
350 list_add_tail(&(sconn
->source
.in
.nb_list
), &(nb
->rcv_conn_list
));
351 list_add_tail(&(rconn
->target
.out
.nb_list
), &(nb
->snd_conn_list
));
352 mutex_unlock(&(nb
->conn_list_lock
));
355 #warning this should not be refs, they should be freed via free_conn automatically, if possible (race?)
356 ref_counter_incr(&(rconn
->refs
));
357 ref_counter_incr(&(sconn
->refs
));
362 void conn_init_sock_source(struct conn
*conn
)
366 conn
->sourcetype
= SOURCE_SOCK
;
368 memset(&(conn
->source
.sock
), 0, sizeof(conn
->source
.sock
));
370 init_waitqueue_head(&(conn
->source
.sock
.wait
));
373 void conn_init_sock_target(struct conn
*conn
)
376 conn
->targettype
= TARGET_SOCK
;
377 init_waitqueue_head(&(conn
->target
.sock
.wait
));
380 struct conn
* alloc_conn(gfp_t allocflags
)
382 struct conn
*rconn
= 0;
383 struct conn
*sconn
= 0;
385 rconn
= kmem_cache_alloc(conn_slab
, allocflags
);
386 if (unlikely(0 == rconn
))
389 sconn
= kmem_cache_alloc(conn_slab
, allocflags
);
390 if (unlikely(0 == sconn
))
393 memset(rconn
, 0, sizeof(struct conn
));
394 memset(sconn
, 0, sizeof(struct conn
));
396 rconn
->reversedir
= sconn
;
397 sconn
->reversedir
= rconn
;
399 ref_counter_init(&(rconn
->refs
), &conn_refcnt
);
400 ref_counter_init(&(sconn
->refs
), &conn_refcnt
);
402 mutex_init(&(rconn
->rcv_lock
));
403 mutex_init(&(sconn
->rcv_lock
));
405 rconn
->sockstate
= SOCKSTATE_CONN
;
406 sconn
->sockstate
= SOCKSTATE_CONN
;
408 databuf_init(&(rconn
->buf
));
409 databuf_init(&(sconn
->buf
));
414 kmem_cache_free(conn_slab
, rconn
);
419 static struct connlistener
*get_connlistener(__be64 port
)
421 struct list_head
*curr
= openports
.next
;
423 while (curr
!= &openports
) {
424 struct bindnode
*currnode
= ((struct bindnode
*)
425 (((char *)curr
) - offsetof(struct bindnode
, lh
)));
426 if (currnode
->port
== port
) {
427 BUG_ON(currnode
->owner
== 0);
428 return currnode
->owner
;
437 void close_port(struct connlistener
*listener
)
442 struct connlistener
*open_port(__be64 port
)
445 struct bindnode
*bn
= 0;
446 struct connlistener
*listener
= 0;
448 mutex_lock(&cor_bindnodes
);
449 if (get_connlistener(port
) != 0)
453 bn
= kmem_cache_alloc(bindnode_slab
, GFP_KERNEL
);
454 listener
= kmem_cache_alloc(connlistener_slab
, GFP_KERNEL
);
456 memset(bn
, 0, sizeof(struct bindnode
));
457 memset(listener
, 0, sizeof(struct connlistener
));
459 bn
->owner
= listener
;
462 /* refcounter is not actually used */
463 listener
->sockstate
= SOCKSTATE_LISTENER
;
465 mutex_init(&(listener
->lock
));
466 INIT_LIST_HEAD(&(listener
->conn_queue
));
467 init_waitqueue_head(&(listener
->wait
));
469 list_add_tail((struct list_head
*) &(bn
->lh
), &openports
);
472 mutex_unlock(&cor_bindnodes
);
479 * rc == 2 port not open
480 * rc == 3 listener queue full
482 int connect_port(struct conn
*rconn
, __be64 port
)
485 struct connlistener
*listener
;
488 mutex_lock(&cor_bindnodes
);
490 listener
= get_connlistener(port
);
496 mutex_lock(&(listener
->lock
));
498 if (listener
->queue_len
>= listener
->queue_maxlen
) {
499 if (listener
->queue_maxlen
<= 0)
507 if (ref_counter_incr(&(rconn
->reversedir
->refs
)))
510 conn_init_sock_target(rconn
);
511 conn_init_sock_source(rconn
->reversedir
);
513 list_add_tail(&(rconn
->reversedir
->source
.sock
.cl_list
),
514 &(listener
->conn_queue
));
515 listener
->queue_len
++;
516 wake_up_interruptible(&(listener
->wait
));
519 mutex_unlock(&(listener
->lock
));
522 mutex_unlock(&cor_bindnodes
);
528 * rc == 2 addrtype not found
529 * rc == 3 addr not found
530 * rc == 4 ==> connid allocation failed
532 int connect_neigh(struct conn
*rconn
,
533 __u16 addrtypelen
, __u8
*addrtype
,
534 __u16 addrlen
, __u8
*addr
)
536 struct neighbor
*nb
= find_neigh(addrtypelen
, addrtype
, addrlen
, addr
);
539 if (conn_init_out(rconn
, nb
))
542 ref_counter_incr(&(nb
->refs
));
543 send_connect_nb(rconn
);
548 void reset_conn(struct conn
*conn
)
551 conn
->reversedir
->isreset
= 1;
556 static int matches_connid_in(void *htentry
, void *searcheditem
)
558 struct conn
*conn
= (struct conn
*) htentry
;
559 __u32 conn_id
= *((__u32
*) searcheditem
);
561 BUG_ON(conn
->sourcetype
!= SOURCE_IN
);
563 return conn
->source
.in
.conn_id
== conn_id
;
566 static int __init
cor_common_init(void)
572 printk(KERN_ERR
"sizeof conn: %d", sizeof(c
));
573 printk(KERN_ERR
" conn.source: %d", sizeof(c
.source
));
574 printk(KERN_ERR
" conn.target: %d", sizeof(c
.target
));
575 printk(KERN_ERR
" conn.target.out: %d", sizeof(c
.target
.out
));
576 printk(KERN_ERR
" conn.target.buf: %d", sizeof(c
.buf
));
578 printk(KERN_ERR
" mutex: %d", sizeof(struct mutex
));
579 printk(KERN_ERR
" spinlock: %d", sizeof(spinlock_t
));
580 printk(KERN_ERR
" kref: %d", sizeof(struct kref
));
583 conn_slab
= kmem_cache_create("cor_conn", sizeof(struct conn
), 8, 0, 0);
584 htable_init(&connid_table
, matches_connid_in
,
585 offsetof(struct conn
, source
.in
.htab_entry
),
586 offsetof(struct conn
, refs
));
588 bindnode_slab
= kmem_cache_create("cor_bindnode",
589 sizeof(struct bindnode
), 8, 0, 0);
590 connlistener_slab
= kmem_cache_create("cor_connlistener",
591 sizeof(struct connlistener
), 8, 0, 0);
599 rc
= cor_neighbor_init();
610 module_init(cor_common_init
);
611 MODULE_LICENSE("GPL");