struct conn size optimisation
[cor_2_6_31.git] / net / cor / common.c
blobe8debb4cf0551a9d051720403b0afc7837e4e495
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_MUTEX(cor_bindnodes);
26 DEFINE_MUTEX(conn_free);
28 DEFINE_MUTEX(connid_gen);
30 LIST_HEAD(openports);
33 struct cell_hdr{
34 spinlock_t lock;
37 struct kmem_cache *conn_slab;
39 struct htable connid_table;
41 struct kmem_cache *bindnode_slab;
42 struct kmem_cache *connlistener_slab;
44 void ref_counter_decr(struct ref_counter *cnt)
46 unsigned long iflags;
48 BUG_ON(0 == cnt);
49 BUG_ON(0 == cnt->def);
51 spin_lock_irqsave(&(cnt->lock), iflags);
52 cnt->refs--;
53 if (unlikely(cnt->refs == 0)) {
54 cnt->def->free(cnt);
55 local_irq_restore(iflags);
56 return;
58 spin_unlock_irqrestore(&(cnt->lock), iflags);
61 int ref_counter_incr(struct ref_counter *cnt)
63 unsigned long iflags;
64 int ret = 1;
66 spin_lock_irqsave(&(cnt->lock), iflags);
67 if (likely(cnt->is_active)) {
68 ret = 0;
69 cnt->refs++;
71 spin_unlock_irqrestore(&(cnt->lock), iflags);
73 return ret;
76 void ref_counter_init(struct ref_counter *cnt, struct ref_counter_def *def)
78 BUG_ON(0 == cnt);
79 BUG_ON(0 == def);
81 spin_lock_init(&(cnt->lock));
82 cnt->refs = 1;
83 cnt->is_active = 1;
84 cnt->def = def;
88 static inline int hdr_size(void)
90 return ((sizeof(struct cell_hdr) + sizeof(void *) - 1) / sizeof(void *)
91 ) * sizeof(void *);
94 static inline int elements_per_cell(int cell_size)
96 return (cell_size - hdr_size())/sizeof(void *);
99 static inline struct cell_hdr *cell_addr(struct htable *ht, __u32 id)
101 int idx = (id%ht->htable_size) / (elements_per_cell(ht->cell_size));
102 return (struct cell_hdr *) (((char *)ht->htable) + ht->cell_size * idx);
105 static inline char **element_addr(struct htable *ht, __u32 id)
107 int idx = (id%ht->htable_size) % (elements_per_cell(ht->cell_size));
108 return (char **)
109 ( ((char *)cell_addr(ht, id)) +
110 hdr_size() + idx*sizeof(void *) );
114 static inline char **next_element(struct htable *ht, char *element)
116 return (char **)(element + ht->entry_offset);
119 static inline struct ref_counter *element_refcnt(struct htable *ht, char *element)
121 return (struct ref_counter *)(element + ht->ref_counter_offset);
125 static inline void unlock_element(struct htable *ht, __u32 key)
127 struct cell_hdr *hdr = cell_addr(ht, key);
128 spin_unlock( &(hdr->lock) );
132 static char **get_element_nounlock(struct htable *ht, __u32 key,
133 void *searcheditem)
135 struct cell_hdr *hdr = cell_addr(ht, key);
136 char **element = element_addr(ht, key);
138 BUG_ON(0 == element);
140 spin_lock( &(hdr->lock) );
142 while (1) {
143 if (*element == 0)
144 break;
145 if (searcheditem != 0 && ht->matches(*element, searcheditem))
146 break;
148 element = next_element(ht, *element);
151 return element;
154 char *htable_get(struct htable *ht, __u32 key, void *searcheditem)
156 unsigned long iflags;
157 char *element;
159 local_irq_save(iflags);
160 element = *(get_element_nounlock(ht, key, searcheditem));
161 if (element != 0)
162 ref_counter_incr(element_refcnt(ht, element));
163 unlock_element(ht, key);
164 local_irq_restore(iflags);
166 return element;
169 int htable_delete(struct htable *ht, char *oldelement, __u32 key,
170 void *searcheditem)
172 unsigned long iflags;
173 char **element;
174 char **next;
175 int rc = 0;
177 local_irq_save(iflags);
179 element = get_element_nounlock(ht, key, searcheditem);
180 BUG_ON(0 == element);
182 if (0 == *element) {
183 /* key not in table */
184 rc = 1;
185 goto out;
188 next = next_element(ht, *element);
189 ref_counter_decr(element_refcnt(ht, *element));
190 *element = *next;
192 out:
193 unlock_element(ht, key);
194 local_irq_restore(iflags);
196 return rc;
199 void htable_insert(struct htable *ht, char *newelement, __u32 key)
201 unsigned long iflags;
202 char **element;
203 char **next;
205 local_irq_save(iflags);
207 element = get_element_nounlock(ht, key, 0);
208 BUG_ON(0 == element);
209 ref_counter_incr(element_refcnt(ht, newelement));
211 *element = newelement;
212 next = next_element(ht, *element);
213 *next = 0;
215 unlock_element(ht, key);
216 local_irq_restore(iflags);
220 void htable_init(struct htable *ht, int (*matches)(void *htentry,
221 void *searcheditem), __u32 entry_offset, __u32 ref_counter_offset)
223 int num_cells;
224 int j;
226 BUG_ON(0 == ht);
227 ht->htable = kmalloc(PAGE_SIZE, GFP_KERNEL);
228 ht->cell_size = 256;
230 num_cells = PAGE_SIZE/ht->cell_size;
232 for (j=0;j<num_cells;j++) {
233 struct cell_hdr *hdr = (struct cell_hdr *)
234 ( ((char *) ht->htable) + j * ht->cell_size );
235 spin_lock_init(&(hdr->lock));
238 ht->htable_size = num_cells * elements_per_cell(ht->cell_size);
239 ht->num_elements = 0;
241 ht->matches = matches;
242 ht->entry_offset = entry_offset;
243 ht->ref_counter_offset = ref_counter_offset;
246 struct conn *get_conn(__u32 conn_id)
248 return (struct conn *) htable_get(&connid_table, conn_id, &conn_id);
251 static int connid_alloc(struct conn *sconn)
253 __u32 conn_id;
254 int i;
256 BUG_ON(sconn->sourcetype != SOURCE_IN);
258 mutex_lock(&connid_gen);
259 for(i=0;i<16;i++) {
260 struct conn *tmp;
262 conn_id = 0;
263 get_random_bytes((char *) &conn_id, sizeof(conn_id));
265 if (conn_id == 0)
266 continue;
268 tmp = get_conn(conn_id);
269 if (tmp != 0) {
270 ref_counter_decr(&(tmp->refs));
271 continue;
274 goto found;
276 mutex_unlock(&connid_gen);
278 return 1;
280 found:
281 sconn->source.in.conn_id = conn_id;
282 htable_insert(&connid_table, (char *) sconn, conn_id);
283 mutex_unlock(&connid_gen);
284 return 0;
287 static void free_conn(struct ref_counter *cnt)
289 struct conn *conn = container_of(cnt, struct conn, refs);
291 BUG_ON(conn->isreset == 0);
293 mutex_lock(&conn_free);
294 if (conn->sourcetype == SOURCE_IN) {
295 htable_delete(&connid_table, (char *) conn,
296 conn->source.in.conn_id,
297 &(conn->source.in.conn_id));
300 databuf_free(&(conn->buf));
305 if (conn->reversedir != 0)
306 conn->reversedir->reversedir = 0;
308 kmem_cache_free(conn_slab, conn);
309 mutex_unlock(&conn_free);
312 static struct ref_counter_def conn_refcnt = {
313 .free = free_conn
317 * rconn ==> the connection we received the commend from
318 * ==> init rconn->target.out + rconn->reversedir->source.in
319 * rc == 0 ==> ok
320 * rc == 1 ==> connid allocation failed
322 int conn_init_out(struct conn *rconn, struct neighbor *nb)
324 struct conn *sconn = rconn->reversedir;
326 BUG_ON(TARGET_UNCONNECTED != rconn->targettype);
327 BUG_ON(0 == sconn);
328 BUG_ON(SOURCE_NONE != sconn->sourcetype);
330 memset(&(rconn->target.out), 0, sizeof(rconn->target.out));
331 memset(&(sconn->source.in), 0, sizeof(sconn->source.in));
333 rconn->targettype = TARGET_OUT;
334 sconn->sourcetype = SOURCE_IN;
336 rconn->target.out.nb = nb;
337 sconn->source.in.nb = nb;
339 skb_queue_head_init(&(sconn->source.in.reorder_queue));
342 * connid_alloc has to be called last, because packets may be received
343 * immediately after its execution
345 if (connid_alloc(sconn)) {
346 return 1;
349 mutex_lock(&(nb->conn_list_lock));
350 list_add_tail(&(sconn->source.in.nb_list), &(nb->rcv_conn_list));
351 list_add_tail(&(rconn->target.out.nb_list), &(nb->snd_conn_list));
352 mutex_unlock(&(nb->conn_list_lock));
354 /* neighbor lists */
355 #warning this should not be refs, they should be freed via free_conn automatically, if possible (race?)
356 ref_counter_incr(&(rconn->refs));
357 ref_counter_incr(&(sconn->refs));
359 return 0;
362 void conn_init_sock_source(struct conn *conn)
364 BUG_ON(conn == 0);
366 conn->sourcetype = SOURCE_SOCK;
368 memset(&(conn->source.sock), 0, sizeof(conn->source.sock));
370 init_waitqueue_head(&(conn->source.sock.wait));
373 void conn_init_sock_target(struct conn *conn)
375 BUG_ON(conn == 0);
376 conn->targettype = TARGET_SOCK;
377 init_waitqueue_head(&(conn->target.sock.wait));
380 struct conn* alloc_conn(gfp_t allocflags)
382 struct conn *rconn = 0;
383 struct conn *sconn = 0;
385 rconn = kmem_cache_alloc(conn_slab, allocflags);
386 if (unlikely(0 == rconn))
387 goto out_err0;
389 sconn = kmem_cache_alloc(conn_slab, allocflags);
390 if (unlikely(0 == sconn))
391 goto out_err1;
393 memset(rconn, 0, sizeof(struct conn));
394 memset(sconn, 0, sizeof(struct conn));
396 rconn->reversedir = sconn;
397 sconn->reversedir = rconn;
399 ref_counter_init(&(rconn->refs), &conn_refcnt);
400 ref_counter_init(&(sconn->refs), &conn_refcnt);
402 mutex_init(&(rconn->rcv_lock));
403 mutex_init(&(sconn->rcv_lock));
405 rconn->sockstate = SOCKSTATE_CONN;
406 sconn->sockstate = SOCKSTATE_CONN;
408 databuf_init(&(rconn->buf));
409 databuf_init(&(sconn->buf));
411 return rconn;
413 out_err1:
414 kmem_cache_free(conn_slab, rconn);
415 out_err0:
416 return 0;
419 static struct connlistener *get_connlistener(__be64 port)
421 struct list_head *curr = openports.next;
423 while (curr != &openports) {
424 struct bindnode *currnode = ((struct bindnode *)
425 (((char *)curr) - offsetof(struct bindnode, lh)));
426 if (currnode->port == port) {
427 BUG_ON(currnode->owner == 0);
428 return currnode->owner;
431 curr = curr->next;
434 return 0;
437 void close_port(struct connlistener *listener)
439 #warning todo
442 struct connlistener *open_port(__be64 port)
445 struct bindnode *bn = 0;
446 struct connlistener *listener = 0;
448 mutex_lock(&cor_bindnodes);
449 if (get_connlistener(port) != 0)
450 goto out;
453 bn = kmem_cache_alloc(bindnode_slab, GFP_KERNEL);
454 listener = kmem_cache_alloc(connlistener_slab, GFP_KERNEL);
456 memset(bn, 0, sizeof(struct bindnode));
457 memset(listener, 0, sizeof(struct connlistener));
459 bn->owner = listener;
460 bn->port = port;
462 /* refcounter is not actually used */
463 listener->sockstate = SOCKSTATE_LISTENER;
464 listener->bn = bn;
465 mutex_init(&(listener->lock));
466 INIT_LIST_HEAD(&(listener->conn_queue));
467 init_waitqueue_head(&(listener->wait));
469 list_add_tail((struct list_head *) &(bn->lh), &openports);
471 out:
472 mutex_unlock(&cor_bindnodes);
474 return listener;
478 * rc == 0 connected
479 * rc == 2 port not open
480 * rc == 3 listener queue full
482 int connect_port(struct conn *rconn, __be64 port)
485 struct connlistener *listener;
486 int rc = 0;
488 mutex_lock(&cor_bindnodes);
490 listener = get_connlistener(port);
491 if (listener == 0) {
492 rc = 2;
493 goto out;
496 mutex_lock(&(listener->lock));
498 if (listener->queue_len >= listener->queue_maxlen) {
499 if (listener->queue_maxlen <= 0)
500 rc = 2;
501 else
502 rc = 3;
504 goto out2;
507 if (ref_counter_incr(&(rconn->reversedir->refs)))
508 BUG();
510 conn_init_sock_target(rconn);
511 conn_init_sock_source(rconn->reversedir);
513 list_add_tail(&(rconn->reversedir->source.sock.cl_list),
514 &(listener->conn_queue));
515 listener->queue_len++;
516 wake_up_interruptible(&(listener->wait));
518 out2:
519 mutex_unlock(&(listener->lock));
521 out:
522 mutex_unlock(&cor_bindnodes);
523 return rc;
527 * rc == 0 connected
528 * rc == 2 addrtype not found
529 * rc == 3 addr not found
530 * rc == 4 ==> connid allocation failed
532 int connect_neigh(struct conn *rconn,
533 __u16 addrtypelen, __u8 *addrtype,
534 __u16 addrlen, __u8 *addr)
536 struct neighbor *nb = find_neigh(addrtypelen, addrtype, addrlen, addr);
537 if (0 == nb)
538 return 3;
539 if (conn_init_out(rconn, nb))
540 return 4;
542 ref_counter_incr(&(nb->refs));
543 send_connect_nb(rconn);
545 return 0;
548 void reset_conn(struct conn *conn)
550 conn->isreset = 1;
551 conn->reversedir->isreset = 1;
553 #warning todo
556 static int matches_connid_in(void *htentry, void *searcheditem)
558 struct conn *conn = (struct conn *) htentry;
559 __u32 conn_id = *((__u32 *) searcheditem);
561 BUG_ON(conn->sourcetype != SOURCE_IN);
563 return conn->source.in.conn_id == conn_id;
566 static int __init cor_common_init(void)
568 int rc;
570 struct conn c;
572 printk(KERN_ERR "sizeof conn: %d", sizeof(c));
573 printk(KERN_ERR " conn.source: %d", sizeof(c.source));
574 printk(KERN_ERR " conn.target: %d", sizeof(c.target));
575 printk(KERN_ERR " conn.target.out: %d", sizeof(c.target.out));
576 printk(KERN_ERR " conn.target.buf: %d", sizeof(c.buf));
578 printk(KERN_ERR " mutex: %d", sizeof(struct mutex));
579 printk(KERN_ERR " spinlock: %d", sizeof(spinlock_t));
580 printk(KERN_ERR " kref: %d", sizeof(struct kref));
583 conn_slab = kmem_cache_create("cor_conn", sizeof(struct conn), 8, 0, 0);
584 htable_init(&connid_table, matches_connid_in,
585 offsetof(struct conn, source.in.htab_entry),
586 offsetof(struct conn, refs));
588 bindnode_slab = kmem_cache_create("cor_bindnode",
589 sizeof(struct bindnode), 8, 0, 0);
590 connlistener_slab = kmem_cache_create("cor_connlistener",
591 sizeof(struct connlistener), 8, 0, 0);
593 forward_init();
595 rc = cor_snd_init();
596 if (rc != 0)
597 return rc;
599 rc = cor_neighbor_init();
600 if (rc != 0)
601 return rc;
603 rc = cor_rcv_init();
604 if (rc != 0)
605 return rc;
607 return 0;
610 module_init(cor_common_init);
611 MODULE_LICENSE("GPL");