2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 #include <linux/net.h>
23 #include <asm/uaccess.h>
27 static int check_connlistener_state(struct connlistener
*cl
)
29 if (likely(cl
!= 0 && cl
->sockstate
== SOCKSTATE_LISTENER
))
35 static int check_conn_state(struct conn
*conn
)
37 if (likely(conn
!= 0 && conn
->sockstate
== SOCKSTATE_CONN
))
43 int cor_socket_release(struct socket
*sock
)
45 struct connlistener
*cl
= (struct connlistener
*) sock
->sk
;
46 struct conn
*rconn
= (struct conn
*) sock
->sk
;
51 if (cl
->sockstate
== SOCKSTATE_LISTENER
) {
53 } else if (rconn
->sockstate
== SOCKSTATE_CONN
) {
62 int cor_socket_bind(struct socket
*sock
, struct sockaddr
*myaddr
,
65 struct connlistener
*listener
;
66 struct cor_sockaddr
*addr
= (struct cor_sockaddr
*) myaddr
;
71 if (sockaddr_len
< sizeof(struct cor_sockaddr
))
74 if (addr
->type
!= SOCKADDRTYPE_PORT
)
77 listener
= open_port(addr
->addr
.port
);
82 sock
->sk
= (struct sock
*) listener
;
87 int cor_socket_connect(struct socket
*sock
, struct sockaddr
*vaddr
,
88 int sockaddr_len
, int flags
)
90 struct conn
*conn_rcv
;
95 conn_rcv
= alloc_conn(GFP_KERNEL
);
100 conn_init_sock_source(conn_rcv
);
101 conn_init_sock_target(conn_rcv
->reversedir
);
103 sock
->sk
= (struct sock
*) conn_rcv
;
104 sock
->state
= SS_CONNECTED
;
109 static int cor_rdytoaccept(struct connlistener
*cl
)
112 mutex_lock(&(cl
->lock
));
113 rc
= (list_empty(&(cl
->conn_queue
)) == 0);
114 mutex_unlock(&(cl
->lock
));
118 const struct proto_ops cor_proto_ops
;
120 int cor_socket_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
122 struct connlistener
*cl
= (struct connlistener
*) sock
->sk
;
124 int rc
= check_connlistener_state(cl
);
126 struct conn
*newconn
;
131 mutex_lock(&(cl
->lock
));
133 if (unlikely(cl
->queue_maxlen
<= 0)) {
134 mutex_unlock(&(cl
->lock
));
138 while (list_empty(&(cl
->conn_queue
))) {
139 mutex_unlock(&(cl
->lock
));
140 if (wait_event_interruptible(cl
->wait
, cor_rdytoaccept(cl
))) {
143 mutex_lock(&(cl
->lock
));
146 newconn
= container_of(cl
->conn_queue
.next
, struct conn
,
147 source
.sock
.cl_list
);
149 BUG_ON(newconn
->sourcetype
!= SOURCE_SOCK
);
151 list_del(cl
->conn_queue
.next
);
155 mutex_unlock(&(cl
->lock
));
157 newsock
->ops
= &cor_proto_ops
;
158 newsock
->sk
= (struct sock
*) newconn
;
159 newsock
->state
= SS_CONNECTED
;
164 int cor_socket_listen(struct socket
*sock
, int len
)
166 struct connlistener
*cl
= (struct connlistener
*) sock
->sk
;
168 int rc
= check_connlistener_state(cl
);
173 mutex_lock(&(cl
->lock
));
174 cl
->queue_maxlen
= len
;
175 mutex_unlock(&(cl
->lock
));
180 int cor_socket_shutdown(struct socket
*sock
, int flags
)
185 int cor_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
190 static int cor_readytowriterbuf(struct conn
*rconn
)
193 mutex_lock(&(rconn
->target
.sock
.lock
));
194 rc
= ringbuffer_maypush(&(rconn
->target
.sock
.rbuf
));
195 mutex_unlock(&(rconn
->target
.sock
.lock
));
199 static int cor_sendmsg_socket(struct conn
*rconn
, __user
char *msg
, int len
,
205 BUG_ON(rconn
->targettype
!= TARGET_SOCK
);
207 mutex_lock(&(rconn
->target
.sock
.lock
));
208 ret
= ringbuffer_put(&(rconn
->target
.sock
.rbuf
), msg
, len
, 1);
209 mutex_unlock(&(rconn
->target
.sock
.lock
));
214 if (ret
== 0 && !blocking
) {
216 } else if (ret
== 0 && blocking
) {
217 if (wait_event_interruptible(rconn
->source
.sock
.wait
,
218 cor_readytowriterbuf(rconn
)))
221 wake_up_interruptible(&(rconn
->target
.sock
.wait
));
223 } while (blocking
&& ret
<= 0);
228 void cor_flush_sockbuf(struct conn
*rconn
)
233 if (rconn
->target
.out
.skb_written
== 0)
236 if (atomic_read(&(rconn
->target
.out
.inflight_packets
)) != 0)
239 len
= rconn
->target
.out
.skb_written
;
240 buf
= kmalloc(len
, GFP_KERNEL
);
242 memcpy(buf
, rconn
->target
.out
.skb
->tail
- len
, len
);
244 rconn
->target
.out
.seqno
-= rconn
->target
.out
.skb_mss
;
245 kfree_skb(rconn
->target
.out
.skb
);
246 rconn
->target
.out
.skb
= 0;
247 rconn
->target
.out
.skb_written
= 0;
249 send_conn_flushdata(rconn
, buf
, len
);
252 static int cor_sendmsg_out(struct conn
*rconn
, __user
char *msg
, int len
, int blocking
)
255 int targetmss
= mss(rconn
->target
.out
.nb
);
259 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
261 if (rconn
->target
.out
.skb
== 0) {
262 struct sk_buff
*skb
= create_packet_conn(rconn
, targetmss
,
266 rconn
->target
.out
.skb
= skb
;
267 rconn
->target
.out
.skb_written
= 0;
268 rconn
->target
.out
.skb_mss
= targetmss
;
271 copy
= skb_tailroom(rconn
->target
.out
.skb
);
278 rc
= skb_add_data(rconn
->target
.out
.skb
, msg
, copy
);
285 rconn
->target
.out
.skb_written
+= copy
;
288 send_packet(rconn
->target
.out
.skb
, rconn
->target
.out
.nb
);
289 rconn
->target
.out
.skb
= 0;
290 rconn
->target
.out
.skb_written
= 0;
296 int cor_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
,
301 struct conn
*rconn
= (struct conn
*) sock
->sk
;
306 int rc
= check_conn_state(rconn
);
311 #warning todo nonblocking io
312 mutex_lock(&(rconn
->source
.sock
.lock
));
314 while (iovidx
< msg
->msg_iovlen
) {
315 struct iovec
*iov
= msg
->msg_iov
+ iovidx
;
316 __user
char *userbuf
= iov
->iov_base
+ iovread
;
317 int len
= iov
->iov_len
- iovread
;
319 int copy
= -ECONNRESET
;
320 if (rconn
->targettype
== TARGET_UNCONNECTED
) {
324 long cpy
= copy_from_user(&buf
, userbuf
, 1);
330 #warning todo check if receive buffer is empty, otherwise there may be a deadlock
332 data
.type
= TYPE_BUF
;
333 data
.data
.buf
.buf
= buf
;
334 data
.data
.buf
.len
= 1;
336 copy
= 1 - data
.data
.buf
.len
;
337 } else if (rconn
->targettype
== TARGET_SOCK
) {
338 copy
= cor_sendmsg_socket(rconn
, userbuf
, len
,
340 } else if (rconn
->targettype
== TARGET_OUT
) {
341 copy
= cor_sendmsg_out(rconn
, userbuf
, len
,
358 if (iov
->iov_len
== iovread
) {
364 mutex_unlock(&(rconn
->source
.sock
.lock
));
369 static int cor_readytoread(struct conn
*sconn
)
372 mutex_lock(&(sconn
->target
.sock
.lock
));
374 if (sconn
->target
.sock
.queue
.next
!= 0)
376 else if (ringbuffer_maypull(&(sconn
->target
.sock
.rbuf
)))
379 mutex_unlock(&(sconn
->target
.sock
.lock
));
384 static int cor_recvmsg_skb(struct conn
*sconn
, __user
char *msg
,
388 struct sk_buff
*skb
= skb_dequeue(&(sconn
->target
.sock
.queue
));
392 len
= min(len
, skb
->len
);
393 rc
= copy_to_user(msg
, skb
->data
, len
);
396 cor_pull_skb(skb
, len
);
399 skb_queue_head(&(sconn
->target
.sock
.queue
), skb
);
403 return rc
== 0 ? 0 : -EFAULT
;
406 int cor_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
,
407 size_t total_len
, int flags
)
409 struct conn
*rconn
= (struct conn
*) sock
->sk
;
410 struct conn
*sconn
= rconn
->reversedir
;
416 int rc
= check_conn_state(rconn
);
423 mutex_lock(&(sconn
->target
.sock
.lock
));
425 while (iovidx
< msg
->msg_iovlen
) {
428 struct iovec
*iov
= msg
->msg_iov
+ iovidx
;
429 __user
char *msg
= iov
->iov_base
+ iovread
;
430 unsigned int len
= iov
->iov_len
- iovread
;
438 if (ringbuffer_maypull(&(sconn
->target
.sock
.rbuf
)))
439 rc
= ringbuffer_pull(&(sconn
->target
.sock
.rbuf
), msg
, len
, 1);
440 else if (sconn
->target
.sock
.queue
.next
!= 0)
441 rc
= cor_recvmsg_skb(sconn
, msg
, len
);
445 if (rc
< 0 && copied
== 0) {
458 if (flags
& MSG_DONTWAIT
) {
463 mutex_unlock(&(sconn
->target
.sock
.lock
));
464 if (wait_event_interruptible(sconn
->target
.sock
.wait
,
465 cor_readytoread(sconn
)))
466 copied
= -ERESTARTSYS
;
467 mutex_lock(&(sconn
->target
.sock
.lock
));
473 mutex_unlock(&(sconn
->target
.sock
.lock
));
475 if (sconn
->sourcetype
== SOURCE_SOCK
)
476 wake_up_interruptible(&(sconn
->target
.sock
.wait
));
481 const struct proto_ops cor_proto_ops
= {
483 .owner
= THIS_MODULE
,
484 .release
= cor_socket_release
,
485 .bind
= cor_socket_bind
,
486 .connect
= cor_socket_connect
,
487 .accept
= cor_socket_accept
,
488 .listen
= cor_socket_listen
,
489 .shutdown
= cor_socket_shutdown
,
491 .sendmsg
= cor_sendmsg
,
492 .recvmsg
= cor_recvmsg
507 int cor_createsock(struct net
*net
, struct socket
*sock
, int protocol
)
510 return -EPROTONOSUPPORT
;
512 sock
->state
= SS_UNCONNECTED
;
513 sock
->ops
= &cor_proto_ops
;
518 static struct net_proto_family cor_net_proto_family
= {
520 .create
= cor_createsock
,
524 static int __init
cor_sock_init(void)
526 sock_register(&cor_net_proto_family
);
530 module_init(cor_sock_init
);
532 MODULE_LICENSE("GPL");