response sending
[cor_2_6_31.git] / net / cor / sock.c
blob441fb2f24872d0a43f7e2550fbd8715136d08bb9
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <net/sock.h>
22 #include <linux/net.h>
23 #include <asm/uaccess.h>
25 #include "cor.h"
27 static int check_connlistener_state(struct connlistener *cl)
29 if (likely(cl != 0 && cl->sockstate == SOCKSTATE_LISTENER))
30 return 0;
32 return 1;
35 static int check_conn_state(struct conn *conn)
37 if (likely(conn != 0 && conn->sockstate == SOCKSTATE_CONN))
38 return 0;
40 return 1;
43 int cor_socket_release(struct socket *sock)
45 struct connlistener *cl = (struct connlistener *) sock->sk;
46 struct conn *rconn = (struct conn *) sock->sk;
48 if (sock->sk == 0)
49 return 0;
51 if (cl->sockstate == SOCKSTATE_LISTENER) {
52 close_port(cl);
53 } else if (rconn->sockstate == SOCKSTATE_CONN) {
54 reset_conn(rconn);
55 } else {
56 BUG();
59 return 0;
62 int cor_socket_bind(struct socket *sock, struct sockaddr *myaddr,
63 int sockaddr_len)
65 struct connlistener *listener;
66 struct cor_sockaddr *addr = (struct cor_sockaddr *) myaddr;
68 if (sock->sk != 0)
69 return -EINVAL;
71 if (sockaddr_len < sizeof(struct cor_sockaddr))
72 return -EINVAL;
74 if (addr->type != SOCKADDRTYPE_PORT)
75 return -EINVAL;
77 listener = open_port(addr->addr.port);
79 if (listener == 0)
80 return -EADDRINUSE;
82 sock->sk = (struct sock *) listener;
84 return 0;
87 int cor_socket_connect(struct socket *sock, struct sockaddr *vaddr,
88 int sockaddr_len, int flags)
90 struct conn *conn_rcv;
92 if (sock->sk != 0)
93 return -EISCONN;
95 conn_rcv = alloc_conn(GFP_KERNEL);
97 if (0 == conn_rcv)
98 return -ENOMEM;
100 conn_init_sock_source(conn_rcv);
101 conn_init_sock_target(conn_rcv->reversedir);
103 sock->sk = (struct sock *) conn_rcv;
104 sock->state = SS_CONNECTED;
106 return 0;
109 static int cor_rdytoaccept(struct connlistener *cl)
111 int rc;
112 mutex_lock(&(cl->lock));
113 rc = (list_empty(&(cl->conn_queue)) == 0);
114 mutex_unlock(&(cl->lock));
115 return rc;
118 const struct proto_ops cor_proto_ops;
120 int cor_socket_accept(struct socket *sock, struct socket *newsock, int flags)
122 struct connlistener *cl = (struct connlistener *) sock->sk;
124 int rc = check_connlistener_state(cl);
126 struct conn *newconn;
128 if (unlikely(rc))
129 return -EINVAL;
131 mutex_lock(&(cl->lock));
133 if (unlikely(cl->queue_maxlen <= 0)) {
134 mutex_unlock(&(cl->lock));
135 return -EINVAL;
138 while (list_empty(&(cl->conn_queue))) {
139 mutex_unlock(&(cl->lock));
140 if (wait_event_interruptible(cl->wait, cor_rdytoaccept(cl))) {
141 return -ERESTARTSYS;
143 mutex_lock(&(cl->lock));
146 newconn = container_of(cl->conn_queue.next, struct conn,
147 source.sock.cl_list);
149 BUG_ON(newconn->sourcetype != SOURCE_SOCK);
151 list_del(cl->conn_queue.next);
153 cl->queue_len--;
155 mutex_unlock(&(cl->lock));
157 newsock->ops = &cor_proto_ops;
158 newsock->sk = (struct sock *) newconn;
159 newsock->state = SS_CONNECTED;
161 return 0;
164 int cor_socket_listen(struct socket *sock, int len)
166 struct connlistener *cl = (struct connlistener *) sock->sk;
168 int rc = check_connlistener_state(cl);
170 if (unlikely(rc))
171 return -EOPNOTSUPP;
173 mutex_lock(&(cl->lock));
174 cl->queue_maxlen = len;
175 mutex_unlock(&(cl->lock));
177 return 0;
180 int cor_socket_shutdown(struct socket *sock, int flags)
182 return -ENOTSUPP;
185 int cor_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
187 return -ENOIOCTLCMD;
190 static int cor_readytowriterbuf(struct conn *rconn)
192 int rc = 0;
193 mutex_lock(&(rconn->target.sock.lock));
194 rc = ringbuffer_maypush(&(rconn->target.sock.rbuf));
195 mutex_unlock(&(rconn->target.sock.lock));
196 return rc;
199 static int cor_sendmsg_socket(struct conn *rconn, __user char *msg, int len,
200 int blocking)
202 int ret;
204 do {
205 BUG_ON(rconn->targettype != TARGET_SOCK);
207 mutex_lock(&(rconn->target.sock.lock));
208 ret = ringbuffer_put(&(rconn->target.sock.rbuf), msg, len, 1);
209 mutex_unlock(&(rconn->target.sock.lock));
211 if (ret < 0)
212 return ret;
214 if (ret == 0 && !blocking) {
215 ret = -EAGAIN;
216 } else if (ret == 0 && blocking) {
217 if (wait_event_interruptible(rconn->source.sock.wait,
218 cor_readytowriterbuf(rconn)))
219 ret = -ERESTARTSYS;
220 } else {
221 wake_up_interruptible(&(rconn->target.sock.wait));
223 } while (blocking && ret <= 0);
225 return ret;
228 void cor_flush_sockbuf(struct conn *rconn)
230 char *buf;
231 __u32 len;
233 if (rconn->target.out.skb_written == 0)
234 return;
236 if (atomic_read(&(rconn->target.out.inflight_packets)) != 0)
237 return;
239 len = rconn->target.out.skb_written;
240 buf = kmalloc(len, GFP_KERNEL);
242 memcpy(buf, rconn->target.out.skb->tail - len, len);
244 rconn->target.out.seqno -= rconn->target.out.skb_mss;
245 kfree_skb(rconn->target.out.skb);
246 rconn->target.out.skb = 0;
247 rconn->target.out.skb_written = 0;
249 send_conn_flushdata(rconn, buf, len);
252 static int cor_sendmsg_out(struct conn *rconn, __user char *msg, int len, int blocking)
254 int send = 1;
255 int targetmss = mss(rconn->target.out.nb);
256 int copy;
257 int rc;
259 BUG_ON(rconn->targettype != TARGET_OUT);
261 if (rconn->target.out.skb == 0) {
262 struct sk_buff *skb = create_packet_conn(rconn, targetmss,
263 GFP_KERNEL);
264 if (skb == 0)
265 return -ENOMEM;
266 rconn->target.out.skb = skb;
267 rconn->target.out.skb_written = 0;
268 rconn->target.out.skb_mss = targetmss;
271 copy = skb_tailroom(rconn->target.out.skb);
272 if (len < copy) {
273 copy = len;
274 send = 0;
278 rc = skb_add_data(rconn->target.out.skb, msg, copy);
280 BUG_ON(0 > rc);
282 if (rc != 0)
283 return rc;
285 rconn->target.out.skb_written += copy;
287 if (send) {
288 send_packet(rconn->target.out.skb, rconn->target.out.nb);
289 rconn->target.out.skb = 0;
290 rconn->target.out.skb_written = 0;
293 return copy;
296 int cor_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
297 size_t total_len)
299 int copied = 0;
301 struct conn *rconn = (struct conn *) sock->sk;
303 int iovidx = 0;
304 int iovread = 0;
306 int rc = check_conn_state(rconn);
308 if (unlikely(rc))
309 return -EBADF;
311 #warning todo nonblocking io
312 mutex_lock(&(rconn->source.sock.lock));
314 while (iovidx < msg->msg_iovlen) {
315 struct iovec *iov = msg->msg_iov + iovidx;
316 __user char *userbuf = iov->iov_base + iovread;
317 int len = iov->iov_len - iovread;
319 int copy = -ECONNRESET;
320 if (rconn->targettype == TARGET_UNCONNECTED) {
321 struct data data;
323 char buf[1];
324 long cpy = copy_from_user(&buf, userbuf, 1);
325 if (cpy != 0) {
326 copy = -EFAULT;
327 goto out;
330 #warning todo check if receive buffer is empty, otherwise there may be a deadlock
332 data.type = TYPE_BUF;
333 data.data.buf.buf = buf;
334 data.data.buf.len = 1;
335 parse(&data, rconn);
336 copy = 1 - data.data.buf.len;
337 } else if (rconn->targettype == TARGET_SOCK) {
338 copy = cor_sendmsg_socket(rconn, userbuf, len,
339 copied == 0);
340 } else if (rconn->targettype == TARGET_OUT) {
341 copy = cor_sendmsg_out(rconn, userbuf, len,
342 copied == 0);
343 } else {
344 BUG();
347 out:
349 if (copy < 0) {
350 if (copied == 0)
351 copied = copy;
352 break;
355 copied += copy;
356 iovread += copy;
358 if (iov->iov_len == iovread) {
359 iovidx++;
360 iovread = 0;
364 mutex_unlock(&(rconn->source.sock.lock));
366 return copied;
369 static int cor_readytoread(struct conn *sconn)
371 int rc = 0;
372 mutex_lock(&(sconn->target.sock.lock));
374 if (sconn->target.sock.queue.next != 0)
375 rc = 1;
376 else if (ringbuffer_maypull(&(sconn->target.sock.rbuf)))
377 rc = 1;
379 mutex_unlock(&(sconn->target.sock.lock));
381 return rc;
384 static int cor_recvmsg_skb(struct conn *sconn, __user char *msg,
385 unsigned int len)
387 int rc;
388 struct sk_buff *skb = skb_dequeue(&(sconn->target.sock.queue));
390 BUG_ON(skb == 0);
392 len = min(len, skb->len);
393 rc = copy_to_user(msg, skb->data, len);
395 if (rc == 0)
396 cor_pull_skb(skb, len);
398 if (skb->len != 0)
399 skb_queue_head(&(sconn->target.sock.queue), skb);
401 BUG_ON(0 < rc);
403 return rc == 0 ? 0 : -EFAULT;
406 int cor_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
407 size_t total_len, int flags)
409 struct conn *rconn = (struct conn *) sock->sk;
410 struct conn *sconn = rconn->reversedir;
411 size_t copied = 0;
413 int iovidx = 0;
414 int iovread = 0;
416 int rc = check_conn_state(rconn);
418 if (unlikely(rc))
419 return -EBADF;
421 BUG_ON(sconn == 0);
423 mutex_lock(&(sconn->target.sock.lock));
425 while (iovidx < msg->msg_iovlen) {
426 int rc;
428 struct iovec *iov = msg->msg_iov + iovidx;
429 __user char *msg = iov->iov_base + iovread;
430 unsigned int len = iov->iov_len - iovread;
432 if (len == 0) {
433 iovidx++;
434 iovread = 0;
435 continue;
438 if (ringbuffer_maypull(&(sconn->target.sock.rbuf)))
439 rc = ringbuffer_pull(&(sconn->target.sock.rbuf), msg, len, 1);
440 else if (sconn->target.sock.queue.next != 0)
441 rc = cor_recvmsg_skb(sconn, msg, len);
442 else
443 rc = 0;
445 if (rc < 0 && copied == 0) {
446 copied = rc;
447 break;
450 if (rc > 0) {
451 copied += rc;
452 continue;
455 if (0 > copied)
456 break;
458 if (flags & MSG_DONTWAIT) {
459 copied = -EAGAIN;
460 break;
463 mutex_unlock(&(sconn->target.sock.lock));
464 if (wait_event_interruptible(sconn->target.sock.wait,
465 cor_readytoread(sconn)))
466 copied = -ERESTARTSYS;
467 mutex_lock(&(sconn->target.sock.lock));
469 if (copied != 0)
470 break;
473 mutex_unlock(&(sconn->target.sock.lock));
475 if (sconn->sourcetype == SOURCE_SOCK)
476 wake_up_interruptible(&(sconn->target.sock.wait));
478 return copied;
481 const struct proto_ops cor_proto_ops = {
482 .family = PF_COR,
483 .owner = THIS_MODULE,
484 .release = cor_socket_release,
485 .bind = cor_socket_bind,
486 .connect = cor_socket_connect,
487 .accept = cor_socket_accept,
488 .listen = cor_socket_listen,
489 .shutdown = cor_socket_shutdown,
490 .ioctl = cor_ioctl,
491 .sendmsg = cor_sendmsg,
492 .recvmsg = cor_recvmsg
494 /*socketpair
495 getname
496 poll
497 compat_ioctl
498 setsockopt
499 getsockopt
500 compat_setsockopt
501 compat_getsockopt
502 mmap
503 sendpage
504 splice_read*/
507 int cor_createsock(struct net *net, struct socket *sock, int protocol)
509 if (0 != protocol)
510 return -EPROTONOSUPPORT;
512 sock->state = SS_UNCONNECTED;
513 sock->ops = &cor_proto_ops;
515 return 0;
518 static struct net_proto_family cor_net_proto_family = {
519 .family = PF_COR,
520 .create = cor_createsock,
521 .owner = THIS_MODULE
524 static int __init cor_sock_init(void)
526 sock_register(&cor_net_proto_family);
527 return 0;
530 module_init(cor_sock_init);
532 MODULE_LICENSE("GPL");