hashtable bugfix, neighbor init bugfix, create_packet bugfix
[cor_2_6_31.git] / net / cor / sock.c
blobf23e5a4f164981985a6fad5a6d20af29a391d00c
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <net/sock.h>
22 #include <linux/net.h>
23 #include <asm/uaccess.h>
25 #include "cor.h"
27 static int check_connlistener_state(struct connlistener *cl)
29 if (likely(cl != 0 && cl->sockstate == SOCKSTATE_LISTENER))
30 return 0;
32 return 1;
35 static int check_conn_state(struct conn *conn)
37 if (likely(conn != 0 && conn->sockstate == SOCKSTATE_CONN))
38 return 0;
40 return 1;
43 int cor_socket_release(struct socket *sock)
45 struct connlistener *cl = (struct connlistener *) sock->sk;
46 struct conn *rconn = (struct conn *) sock->sk;
48 if (sock->sk == 0)
49 return 0;
51 if (cl->sockstate == SOCKSTATE_LISTENER) {
52 close_port(cl);
53 } else if (rconn->sockstate == SOCKSTATE_CONN) {
54 reset_conn(rconn);
55 } else {
56 BUG();
59 return 0;
62 int cor_socket_bind(struct socket *sock, struct sockaddr *myaddr,
63 int sockaddr_len)
65 struct connlistener *listener;
66 struct cor_sockaddr *addr = (struct cor_sockaddr *) myaddr;
68 if (sock->sk != 0)
69 return -EINVAL;
71 if (sockaddr_len < sizeof(struct cor_sockaddr))
72 return -EINVAL;
74 if (addr->type != SOCKADDRTYPE_PORT)
75 return -EINVAL;
77 listener = open_port(addr->addr.port);
79 if (listener == 0)
80 return -EADDRINUSE;
82 sock->sk = (struct sock *) listener;
84 return 0;
87 int cor_socket_connect(struct socket *sock, struct sockaddr *vaddr,
88 int sockaddr_len, int flags)
90 struct conn *conn_rcv;
92 if (sock->sk != 0)
93 return -EISCONN;
95 conn_rcv = alloc_conn(GFP_KERNEL);
97 if (0 == conn_rcv)
98 return -ENOMEM;
100 conn_init_sock_source(conn_rcv);
101 conn_init_sock_target(conn_rcv->reversedir);
103 sock->sk = (struct sock *) conn_rcv;
104 sock->state = SS_CONNECTED;
106 return 0;
109 static int cor_rdytoaccept(struct connlistener *cl)
111 int rc;
112 mutex_lock(&(cl->lock));
113 rc = (list_empty(&(cl->conn_queue)) == 0);
114 mutex_unlock(&(cl->lock));
115 return rc;
118 const struct proto_ops cor_proto_ops;
120 int cor_socket_accept(struct socket *sock, struct socket *newsock, int flags)
122 struct connlistener *cl = (struct connlistener *) sock->sk;
124 int rc = check_connlistener_state(cl);
126 struct conn *newconn;
128 if (unlikely(rc))
129 return -EINVAL;
131 mutex_lock(&(cl->lock));
133 if (unlikely(cl->queue_maxlen <= 0)) {
134 mutex_unlock(&(cl->lock));
135 return -EINVAL;
138 while (list_empty(&(cl->conn_queue))) {
139 mutex_unlock(&(cl->lock));
140 if (wait_event_interruptible(cl->wait, cor_rdytoaccept(cl))) {
141 return -ERESTARTSYS;
143 mutex_lock(&(cl->lock));
146 newconn = container_of(cl->conn_queue.next, struct conn,
147 source.sock.cl_list);
149 BUG_ON(newconn->sourcetype != SOURCE_SOCK);
151 list_del(cl->conn_queue.next);
153 cl->queue_len--;
155 mutex_unlock(&(cl->lock));
157 newsock->ops = &cor_proto_ops;
158 newsock->sk = (struct sock *) newconn;
159 newsock->state = SS_CONNECTED;
161 return 0;
164 int cor_socket_listen(struct socket *sock, int len)
166 struct connlistener *cl = (struct connlistener *) sock->sk;
168 int rc = check_connlistener_state(cl);
170 if (unlikely(rc))
171 return -EOPNOTSUPP;
173 mutex_lock(&(cl->lock));
174 cl->queue_maxlen = len;
175 mutex_unlock(&(cl->lock));
177 return 0;
180 int cor_socket_shutdown(struct socket *sock, int flags)
182 return -ENOTSUPP;
185 int cor_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
187 return -ENOIOCTLCMD;
190 static int cor_readytowriterbuf(struct conn *rconn)
192 int rc = 0;
193 mutex_lock(&(rconn->target.sock.lock));
194 rc = ringbuffer_maypush(&(rconn->target.sock.rbuf));
195 mutex_unlock(&(rconn->target.sock.lock));
196 return rc;
199 static int cor_sendmsg_socket(struct conn *rconn, __user char *msg, int len,
200 int blocking)
202 int ret;
204 do {
205 BUG_ON(rconn->targettype != TARGET_SOCK);
207 mutex_lock(&(rconn->target.sock.lock));
208 ret = ringbuffer_put(&(rconn->target.sock.rbuf), msg, len, 1);
209 mutex_unlock(&(rconn->target.sock.lock));
211 if (ret < 0)
212 return ret;
214 if (ret == 0 && !blocking) {
215 ret = -EAGAIN;
216 } else if (ret == 0 && blocking) {
217 if (wait_event_interruptible(rconn->source.sock.wait,
218 cor_readytowriterbuf(rconn)))
219 ret = -ERESTARTSYS;
220 } else {
221 wake_up_interruptible(&(rconn->target.sock.wait));
223 } while (blocking && ret <= 0);
225 return ret;
228 void cor_flush_sockbuf(struct conn *rconn)
230 char *buf;
231 __u32 len;
233 if (rconn->target.out.skb_written == 0)
234 return;
236 if (atomic_read(&(rconn->target.out.inflight_packets)) != 0)
237 return;
239 len = rconn->target.out.skb_written;
240 buf = kmalloc(len, GFP_KERNEL);
242 memcpy(buf, rconn->target.out.skb->tail - len, len);
244 rconn->target.out.seqno -= rconn->target.out.skb_mss;
245 kfree_skb(rconn->target.out.skb);
246 rconn->target.out.skb = 0;
247 rconn->target.out.skb_written = 0;
249 send_conn_flushdata(rconn, buf, len);
252 static int cor_sendmsg_out(struct conn *rconn, __user char *msg, int len, int blocking)
254 int send = 1;
255 int targetmss = mss(rconn->target.out.nb);
256 int copy;
257 int rc;
259 BUG_ON(rconn->targettype != TARGET_OUT);
260 printk(KERN_ERR "out1");
261 if (rconn->target.out.skb == 0) {
262 struct sk_buff *skb = create_packet_conn(rconn, targetmss,
263 GFP_KERNEL);
264 if (skb == 0)
265 return -ENOMEM;
266 rconn->target.out.skb = skb;
267 rconn->target.out.skb_written = 0;
268 rconn->target.out.skb_mss = targetmss;
271 copy = skb_tailroom(rconn->target.out.skb);
272 if (len < copy) {
273 copy = len;
274 send = 0;
277 rc = skb_add_data(rconn->target.out.skb, msg, copy);
278 BUG_ON(0 > rc);
279 if (rc != 0)
280 return rc;
281 rconn->target.out.skb_written += copy;
283 if (send) {
284 send_packet(rconn->target.out.skb, rconn->target.out.nb);
285 rconn->target.out.skb = 0;
286 rconn->target.out.skb_written = 0;
289 return copy;
292 int cor_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
293 size_t total_len)
295 int copied = 0;
297 struct conn *rconn = (struct conn *) sock->sk;
299 int iovidx = 0;
300 int iovread = 0;
302 int rc = check_conn_state(rconn);
304 if (unlikely(rc))
305 return -EBADF;
307 #warning todo nonblocking io
308 mutex_lock(&(rconn->source.sock.lock));
309 while (iovidx < msg->msg_iovlen) {
310 struct iovec *iov = msg->msg_iov + iovidx;
311 __user char *userbuf = iov->iov_base + iovread;
312 int len = iov->iov_len - iovread;
314 int copy = -ECONNRESET;
315 if (rconn->targettype == TARGET_UNCONNECTED) {
316 struct data data;
318 char buf[1];
319 long cpy = copy_from_user(&buf, userbuf, 1);
320 if (cpy != 0) {
321 copy = -EFAULT;
322 goto out;
325 #warning todo check if receive buffer is empty, otherwise there may be a deadlock
327 data.type = TYPE_BUF;
328 data.data.buf.buf = buf;
329 data.data.buf.len = 1;
330 parse(&data, rconn);
331 copy = 1 - data.data.buf.len;
332 } else if (rconn->targettype == TARGET_SOCK) {
333 copy = cor_sendmsg_socket(rconn, userbuf, len,
334 copied == 0);
335 } else if (rconn->targettype == TARGET_OUT) {
336 copy = cor_sendmsg_out(rconn, userbuf, len,
337 copied == 0);
338 } else {
339 BUG();
342 out:
344 if (copy < 0) {
345 if (copied == 0)
346 copied = copy;
347 break;
350 copied += copy;
351 iovread += copy;
353 if (iov->iov_len == iovread) {
354 iovidx++;
355 iovread = 0;
358 mutex_unlock(&(rconn->source.sock.lock));
360 return copied;
363 static int cor_readytoread(struct conn *sconn)
365 int rc = 0;
366 mutex_lock(&(sconn->target.sock.lock));
368 if (!skb_queue_empty(&(sconn->target.sock.queue)))
369 rc = 1;
370 else if (ringbuffer_maypull(&(sconn->target.sock.rbuf)))
371 rc = 1;
373 mutex_unlock(&(sconn->target.sock.lock));
375 return rc;
378 static int cor_recvmsg_skb(struct conn *sconn, __user char *msg,
379 unsigned int len)
381 int rc;
382 struct sk_buff *skb = skb_dequeue(&(sconn->target.sock.queue));
384 BUG_ON(skb == 0);
386 len = min(len, skb->len);
387 rc = copy_to_user(msg, skb->data, len);
389 if (rc == 0)
390 cor_pull_skb(skb, len);
392 if (skb->len != 0)
393 skb_queue_head(&(sconn->target.sock.queue), skb);
395 BUG_ON(0 < rc);
397 return rc == 0 ? 0 : -EFAULT;
400 int cor_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
401 size_t total_len, int flags)
403 struct conn *rconn = (struct conn *) sock->sk;
404 struct conn *sconn = rconn->reversedir;
405 size_t copied = 0;
407 int iovidx = 0;
408 int iovread = 0;
410 int rc = check_conn_state(rconn);
412 if (unlikely(rc))
413 return -EBADF;
415 BUG_ON(sconn == 0);
417 mutex_lock(&(sconn->target.sock.lock));
419 while (iovidx < msg->msg_iovlen) {
420 int rc;
422 struct iovec *iov = msg->msg_iov + iovidx;
423 __user char *msg = iov->iov_base + iovread;
424 unsigned int len = iov->iov_len - iovread;
426 if (len == 0) {
427 iovidx++;
428 iovread = 0;
429 continue;
432 if (ringbuffer_maypull(&(sconn->target.sock.rbuf)))
433 rc = ringbuffer_pull(&(sconn->target.sock.rbuf), msg, len, 1);
434 else if (!skb_queue_empty(&(sconn->target.sock.queue)))
435 rc = cor_recvmsg_skb(sconn, msg, len);
436 else
437 rc = 0;
439 if (rc < 0 && copied == 0) {
440 copied = rc;
441 break;
444 if (rc > 0) {
445 copied += rc;
446 iovread += rc;
447 continue;
450 if (copied > 0)
451 break;
453 if (flags & MSG_DONTWAIT) {
454 copied = -EAGAIN;
455 break;
458 if (sconn->sourcetype == SOURCE_NONE) {
459 copied = -EPIPE;
460 break;
463 mutex_unlock(&(sconn->target.sock.lock));
465 if (wait_event_interruptible(sconn->target.sock.wait,
466 cor_readytoread(sconn)))
467 copied = -ERESTARTSYS;
468 mutex_lock(&(sconn->target.sock.lock));
470 if (copied != 0)
471 break;
474 mutex_unlock(&(sconn->target.sock.lock));
476 if (sconn->sourcetype == SOURCE_SOCK)
477 wake_up_interruptible(&(sconn->target.sock.wait));
479 return copied;
482 const struct proto_ops cor_proto_ops = {
483 .family = PF_COR,
484 .owner = THIS_MODULE,
485 .release = cor_socket_release,
486 .bind = cor_socket_bind,
487 .connect = cor_socket_connect,
488 .accept = cor_socket_accept,
489 .listen = cor_socket_listen,
490 .shutdown = cor_socket_shutdown,
491 .ioctl = cor_ioctl,
492 .sendmsg = cor_sendmsg,
493 .recvmsg = cor_recvmsg
495 /*socketpair
496 getname
497 poll
498 compat_ioctl
499 setsockopt
500 getsockopt
501 compat_setsockopt
502 compat_getsockopt
503 mmap
504 sendpage
505 splice_read*/
508 int cor_createsock(struct net *net, struct socket *sock, int protocol)
510 if (0 != protocol)
511 return -EPROTONOSUPPORT;
513 sock->state = SS_UNCONNECTED;
514 sock->ops = &cor_proto_ops;
516 return 0;
519 static struct net_proto_family cor_net_proto_family = {
520 .family = PF_COR,
521 .create = cor_createsock,
522 .owner = THIS_MODULE
525 static int __init cor_sock_init(void)
527 sock_register(&cor_net_proto_family);
528 return 0;
531 module_init(cor_sock_init);
533 MODULE_LICENSE("GPL");