2 * Main inter process networking (virtual distributed ethernet) module
3 * (part of the View-OS project: wiki.virtualsquare.org)
5 * Copyright (C) 2007 Renzo Davoli (renzo@cs.unibo.it)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Due to this file being licensed under the GPL there is controversy over
13 * whether this permits you to write a module that #includes this file
14 * without placing your module under the GPL. Please consult a lawyer for
15 * advice before doing this.
17 * WARNING: THIS CODE IS ALREADY EXPERIMENTAL
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/poll.h>
26 #include <linux/list.h>
27 #include <linux/mount.h>
28 #include <linux/version.h>
31 #include <net/af_ipn.h>
34 #include "ipn_netdev.h"
35 #include "ipn_msgbuf.h"
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("VIEW-OS TEAM");
39 MODULE_DESCRIPTION("IPN Kernel Module");
41 #define IPN_MAX_PROTO 4
43 /*extension of RCV_SHUTDOWN defined in include/net/sock.h
44 * when the bit is set recv fails */
45 /* NO_OOB: do not send OOB */
46 #define RCV_SHUTDOWN_NO_OOB 4
47 /* EXTENDED MASK including OOB */
48 #define SHUTDOWN_XMASK (SHUTDOWN_MASK | RCV_SHUTDOWN_NO_OOB)
49 /* if XRCV_SHUTDOWN is all set recv fails */
50 #define XRCV_SHUTDOWN (RCV_SHUTDOWN | RCV_SHUTDOWN_NO_OOB)
52 /* Network table and hash */
53 struct hlist_head ipn_network_table
[IPN_HASH_SIZE
+ 1];
54 /* not needed. Now protected by ipn_glob_mutex
56 * DEFINE_SPINLOCK(ipn_table_lock);
58 static struct kmem_cache
*ipn_network_cache
;
59 static struct kmem_cache
*ipn_node_cache
;
60 static struct kmem_cache
*ipn_msgitem_cache
;
61 static DECLARE_MUTEX(ipn_glob_mutex
);
63 /* Protocol 1: HUB/Broadcast default protocol. Function Prototypes */
64 static int ipn_bcast_newport(struct ipn_node
*newport
);
65 static int ipn_bcast_handlemsg(struct ipn_node
*from
,
66 struct msgpool_item
*msgitem
);
68 /* default protocol IPN_BROADCAST (0) */
69 static struct ipn_protocol ipn_bcast
= {
71 .ipn_p_newport
=ipn_bcast_newport
,
72 .ipn_p_handlemsg
=ipn_bcast_handlemsg
};
74 static struct ipn_protocol
*ipn_protocol_table
[IPN_MAX_PROTO
]={&ipn_bcast
};
76 /* Socket call function prototypes */
77 static int ipn_release(struct socket
*);
78 static int ipn_bind(struct socket
*, struct sockaddr
*, int);
79 static int ipn_connect(struct socket
*, struct sockaddr
*,
80 int addr_len
, int flags
);
81 static int ipn_getname(struct socket
*, struct sockaddr
*, int *, int);
82 static unsigned int ipn_poll(struct file
*, struct socket
*, poll_table
*);
83 static int ipn_ioctl(struct socket
*, unsigned int, unsigned long);
84 static int ipn_shutdown(struct socket
*, int);
85 static int ipn_sendmsg(struct kiocb
*, struct socket
*,
86 struct msghdr
*, size_t);
87 static int ipn_recvmsg(struct kiocb
*, struct socket
*,
88 struct msghdr
*, size_t, int);
89 static int ipn_setsockopt(struct socket
*sock
, int level
, int optname
,
90 char __user
*optval
, int optlen
);
91 static int ipn_getsockopt(struct socket
*sock
, int level
, int optname
,
92 char __user
*optval
, int __user
*optlen
);
94 /* Network table Management
95 * inode->ipn_network hash table
96 * LOCKING: MUTEX ipn_glob_mutex must be LOCKED*/
97 static inline void ipn_insert_network(struct hlist_head
*list
, struct ipn_network
*ipnn
)
99 /* *IPNTL* spin_lock(&ipn_table_lock); */
100 hlist_add_head(&ipnn
->hnode
, list
);
101 /* *IPNTL* spin_unlock(&ipn_table_lock); */
104 static inline void ipn_remove_network(struct ipn_network
*ipnn
)
106 /* *IPNTL* spin_lock(&ipn_table_lock); */
107 hlist_del(&ipnn
->hnode
);
108 /* *IPNTL* spin_unlock(&ipn_table_lock); */
111 static struct ipn_network
*ipn_find_network_byinode(struct inode
*i
)
113 struct ipn_network
*ipnn
;
114 struct hlist_node
*node
;
116 /* *IPNTL* spin_lock(&ipn_table_lock);*/
117 hlist_for_each_entry(ipnn
, node
,
118 &ipn_network_table
[i
->i_ino
& (IPN_HASH_SIZE
- 1)], hnode
) {
119 struct dentry
*dentry
= ipnn
->dentry
;
121 if(ipnn
->refcnt
> 0 && dentry
&& dentry
->d_inode
== i
)
126 /* *IPNTL* spin_unlock(&ipn_table_lock); */
130 /* msgpool management
131 * msgpool_item are ipn_network dependent (each net has its own MTU)
132 * for each message sent there is one msgpool_item and many struct msgitem
133 * one for each receipient.
134 * msgitem are connected to the node's msgqueue or oobmsgqueue.
135 * when a message is delivered to a process the msgitem is deleted and
136 * the count of the msgpool_item is decreased.
137 * msgpool_item elements gets deleted automatically when count is 0*/
140 struct list_head list
;
141 struct msgpool_item
*msg
;
144 /* alloc a fresh msgpool item. count is set to 1.
147 * for each receipient
148 * enqueue messages to the process (using msgitem), ipn_msgpool_hold
150 * The message can be delivered concurrently. init count to 1 guarantees
151 * that it survives at least until is has been enqueued to all
153 static struct msgpool_item
*_ipn_msgpool_alloc(struct ipn_network
*ipnn
)
155 struct msgpool_item
*new;
156 if ((new=kmem_cache_alloc(ipnn
->msgpool_cache
,GFP_KERNEL
)) != NULL
) {
157 atomic_set(&new->count
,1);
158 atomic_inc(&ipnn
->msgpool_nelem
);
163 struct msgpool_item
*ipn_msgpool_alloc(struct ipn_network
*ipnn
,int leaky
)
165 if (leaky
&& (ipnn
->flags
& IPN_FLAG_LOSSLESS
) &&
166 atomic_read(&ipnn
->msgpool_nelem
) < ipnn
->msgpool_size
)
169 return _ipn_msgpool_alloc(ipnn
);
172 /* If the service il LOSSLESS, this msgpool call waits for an
173 * available msgpool item */
174 static struct msgpool_item
*ipn_msgpool_alloc_locking(struct ipn_network
*ipnn
)
176 if (ipnn
->flags
& IPN_FLAG_LOSSLESS
) {
177 while (atomic_read(&ipnn
->msgpool_nelem
) >= ipnn
->msgpool_size
) {
178 if (wait_event_interruptible_exclusive(ipnn
->send_wait
,
179 atomic_read(&ipnn
->msgpool_nelem
) < ipnn
->msgpool_size
))
183 return _ipn_msgpool_alloc(ipnn
);
186 static inline void ipn_msgpool_hold(struct msgpool_item
*msg
)
188 atomic_inc(&msg
->count
);
191 /* decrease count and delete msgpool_item if count == 0 */
192 void ipn_msgpool_put(struct msgpool_item
*old
,
193 struct ipn_network
*ipnn
)
195 if (atomic_dec_and_test(&old
->count
)) {
196 kmem_cache_free(ipnn
->msgpool_cache
,old
);
197 atomic_dec(&ipnn
->msgpool_nelem
);
198 if (ipnn
->flags
& IPN_FLAG_LOSSLESS
) /* this could be done anyway */
199 wake_up_interruptible(&ipnn
->send_wait
);
204 static const struct proto_ops ipn_ops
= {
206 .owner
= THIS_MODULE
,
207 .release
= ipn_release
,
209 .connect
= ipn_connect
,
210 .socketpair
= sock_no_socketpair
,
211 .accept
= sock_no_accept
,
212 .getname
= ipn_getname
,
215 .listen
= sock_no_listen
,
216 .shutdown
= ipn_shutdown
,
217 .setsockopt
= ipn_setsockopt
,
218 .getsockopt
= ipn_getsockopt
,
219 .sendmsg
= ipn_sendmsg
,
220 .recvmsg
= ipn_recvmsg
,
221 .mmap
= sock_no_mmap
,
222 .sendpage
= sock_no_sendpage
,
225 static struct proto ipn_proto
= {
227 .owner
= THIS_MODULE
,
228 .obj_size
= sizeof(struct ipn_sock
),
232 * ipn_node is a separate structure, pointed by ipn_sock -> node
233 * when a node is "persistent", ipn_node survives while ipn_sock gets released*/
234 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
235 static int ipn_create(struct socket
*sock
, int protocol
)
237 static int ipn_create(struct net
*net
,struct socket
*sock
, int protocol
)
240 struct ipn_sock
*ipn_sk
;
241 struct ipn_node
*ipn_node
;
243 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
246 if (net
!= &init_net
)
247 return -EAFNOSUPPORT
;
250 if (sock
->type
!= SOCK_RAW
)
255 protocol
=IPN_BROADCAST
-1;
256 if (protocol
< 0 || protocol
>= IPN_MAX_PROTO
||
257 ipn_protocol_table
[protocol
] == NULL
)
258 return -EPROTONOSUPPORT
;
259 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
260 ipn_sk
= (struct ipn_sock
*) sk_alloc(PF_IPN
, GFP_KERNEL
, &ipn_proto
, 1);
262 ipn_sk
= (struct ipn_sock
*) sk_alloc(net
, PF_IPN
, GFP_KERNEL
, &ipn_proto
);
267 ipn_sk
->node
=ipn_node
=kmem_cache_alloc(ipn_node_cache
,GFP_KERNEL
);
269 sock_put((struct sock
*) ipn_sk
);
272 sock_init_data(sock
,(struct sock
*) ipn_sk
);
273 sock
->state
= SS_UNCONNECTED
;
274 sock
->ops
= &ipn_ops
;
275 sock
->sk
=(struct sock
*)ipn_sk
;
276 INIT_LIST_HEAD(&ipn_node
->nodelist
);
277 ipn_node
->protocol
=protocol
;
278 ipn_node
->flags
=IPN_NODEFLAG_INUSE
;
279 ipn_node
->shutdown
=RCV_SHUTDOWN_NO_OOB
;
280 ipn_node
->descr
[0]=0;
281 ipn_node
->portno
=IPN_PORTNO_ANY
;
284 ipn_node
->proto_private
=NULL
;
285 ipn_node
->totmsgcount
=0;
286 ipn_node
->oobmsgcount
=0;
287 spin_lock_init(&ipn_node
->msglock
);
288 INIT_LIST_HEAD(&ipn_node
->msgqueue
);
289 INIT_LIST_HEAD(&ipn_node
->oobmsgqueue
);
291 init_waitqueue_head(&ipn_node
->read_wait
);
296 /* update # of readers and # of writers counters for an ipn network.
297 * This function sends oob messages to nodes requesting the service */
298 /* LOCKING ipnn_mutex is locked */
299 static void ipn_net_update_counters(struct ipn_network
*ipnn
,
300 int chg_readers
, int chg_writers
) {
301 ipnn
->numreaders
+= chg_readers
;
302 ipnn
->numwriters
+= chg_writers
;
303 if (ipnn
->mtu
>= sizeof(struct numnode_oob
))
305 struct msgpool_item
*ipn_msg
=_ipn_msgpool_alloc(ipnn
);
307 struct numnode_oob
*oob_msg
=(struct numnode_oob
*)(ipn_msg
->data
);
308 struct ipn_node
*ipn_node
;
309 ipn_msg
->len
=sizeof(struct numnode_oob
);
310 oob_msg
->level
=IPN_ANY
;
311 oob_msg
->tag
=IPN_OOB_NUMNODE_TAG
;
312 oob_msg
->numreaders
=ipnn
->numreaders
;
313 oob_msg
->numwriters
=ipnn
->numwriters
;
314 list_for_each_entry(ipn_node
, &ipnn
->connectqueue
, nodelist
) {
315 if (ipn_node
->flags
& IPN_NODEFLAG_OOB_NUMNODES
)
316 ipn_proto_oobsendmsg(ipn_node
,ipn_msg
);
318 ipn_msgpool_put(ipn_msg
,ipnn
);
323 /* flush pending messages (for close and shutdown RCV) */
324 /* LOCKING: ipnn_mutex is locked */
325 static void ipn_flush_recvqueue(struct ipn_node
*ipn_node
)
327 struct ipn_network
*ipnn
=ipn_node
->ipn
;
328 spin_lock(&ipn_node
->msglock
);
329 while (!list_empty(&ipn_node
->msgqueue
)) {
330 struct msgitem
*msgitem
=
331 list_first_entry(&ipn_node
->msgqueue
, struct msgitem
, list
);
332 list_del(&msgitem
->list
);
333 ipn_node
->totmsgcount
--;
334 ipn_msgpool_put(msgitem
->msg
,ipnn
);
335 kmem_cache_free(ipn_msgitem_cache
,msgitem
);
337 spin_unlock(&ipn_node
->msglock
);
340 /* flush pending oob messages (for socket close) */
341 /* LOCKING: ipnn_mutex is locked */
342 static void ipn_flush_oobrecvqueue(struct ipn_node
*ipn_node
)
344 struct ipn_network
*ipnn
=ipn_node
->ipn
;
345 spin_lock(&ipn_node
->msglock
);
346 while (!list_empty(&ipn_node
->oobmsgqueue
)) {
347 struct msgitem
*msgitem
=
348 list_first_entry(&ipn_node
->oobmsgqueue
, struct msgitem
, list
);
349 list_del(&msgitem
->list
);
350 ipn_node
->totmsgcount
--;
351 ipn_node
->oobmsgcount
--;
352 ipn_msgpool_put(msgitem
->msg
,ipnn
);
353 kmem_cache_free(ipn_msgitem_cache
,msgitem
);
355 spin_unlock(&ipn_node
->msglock
);
358 /* Terminate node. The node is "logically" terminated. */
359 /* LOCKING: ipn_glob_lock must be locked here */
360 static int ipn_terminate_node(struct ipn_node
*ipn_node
)
362 struct ipn_network
*ipnn
=ipn_node
->ipn
;
364 if (down_interruptible(&ipnn
->ipnn_mutex
))
366 if (ipn_node
->portno
>= 0) {
367 ipn_protocol_table
[ipnn
->protocol
]->ipn_p_predelport(ipn_node
);
368 ipnn
->connport
[ipn_node
->portno
]=NULL
;
370 list_del(&ipn_node
->nodelist
);
371 ipn_flush_recvqueue(ipn_node
);
372 ipn_flush_oobrecvqueue(ipn_node
);
373 if (ipn_node
->portno
>= 0)
374 ipn_protocol_table
[ipnn
->protocol
]->ipn_p_delport(ipn_node
);
376 ipn_net_update_counters(ipnn
,
377 (ipn_node
->shutdown
& RCV_SHUTDOWN
)?0:-1,
378 (ipn_node
->shutdown
& SEND_SHUTDOWN
)?0:-1);
379 ipn_node
->shutdown
= SHUTDOWN_XMASK
;
380 up(&ipnn
->ipnn_mutex
);
382 ipn_netdev_close(ipn_node
);
383 /* No more network elements */
385 if (ipnn
->refcnt
== 0)
387 ipn_protocol_table
[ipnn
->protocol
]->ipn_p_delnet(ipnn
);
388 ipn_remove_network(ipnn
);
389 ipn_protocol_table
[ipnn
->protocol
]->refcnt
--;
394 if (ipnn
->msgpool_cache
)
395 /*kmem_cache_destroy(ipnn->msgpool_cache);*/
396 ipn_msgbuf_put(ipnn
->msgpool_cache
);
398 kfree(ipnn
->connport
);
399 kmem_cache_free(ipn_network_cache
, ipnn
);
400 module_put(THIS_MODULE
);
404 kfree(ipn_node
->pbp
);
410 /* release of a socket */
411 static int ipn_release (struct socket
*sock
)
413 struct ipn_sock
*ipn_sk
=(struct ipn_sock
*)sock
->sk
;
414 struct ipn_node
*ipn_node
=ipn_sk
->node
;
416 if (down_interruptible(&ipn_glob_mutex
))
418 if (ipn_node
->flags
& IPN_NODEFLAG_PERSIST
) {
419 ipn_node
->flags
&= ~IPN_NODEFLAG_INUSE
;
423 rv
=ipn_terminate_node(ipn_node
);
427 kmem_cache_free(ipn_node_cache
,ipn_node
);
431 sock_put((struct sock
*) ipn_sk
);
435 /* _set persist, change the persistence of a node,
436 * when persistence gets cleared and the node is no longer used
437 * the node is terminated and freed.
438 * ipn_glob_mutex must be locked */
439 static int _ipn_setpersist(struct ipn_node
*ipn_node
, int persist
)
443 ipn_node
->flags
|= IPN_NODEFLAG_PERSIST
;
445 ipn_node
->flags
&= ~IPN_NODEFLAG_PERSIST
;
446 if (!(ipn_node
->flags
& IPN_NODEFLAG_INUSE
)) {
447 rv
=ipn_terminate_node(ipn_node
);
449 kmem_cache_free(ipn_node_cache
,ipn_node
);
456 * lock ipn_glob_mutex and call __ipn_setpersist above */
457 static int ipn_setpersist(struct ipn_node
*ipn_node
, int persist
)
460 if (ipn_node
->dev
== NULL
)
462 if (down_interruptible(&ipn_glob_mutex
))
464 rv
=_ipn_setpersist(ipn_node
,persist
);
469 /* several network parameters can be set by setsockopt prior to bind */
470 /* struct pre_bind_parms is a temporary stucture connected to ipn_node->pbp
471 * to keep the parameter values. */
472 struct pre_bind_parms
{
473 unsigned short maxports
;
474 unsigned short flags
;
475 unsigned short msgpoolsize
;
480 /* STD_PARMS: BITS_PER_LONG nodes, no flags, BITS_PER_BYTE pending msgs,
481 * Ethernet + VLAN MTU*/
482 #define STD_BIND_PARMS {BITS_PER_LONG, 0, BITS_PER_BYTE, 1514, 0x777};
484 static int ipn_mkname(struct sockaddr_un
* sunaddr
, int len
)
486 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
488 if (!sunaddr
|| sunaddr
->sun_family
!= AF_IPN
)
491 * This may look like an off by one error but it is a bit more
492 * subtle. 108 is the longest valid AF_IPN path for a binding.
493 * sun_path[108] doesnt as such exist. However in kernel space
494 * we are guaranteed that it is a valid memory location in our
495 * kernel address buffer.
497 ((char *)sunaddr
)[len
]=0;
498 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
503 static int ipn_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
505 struct sockaddr_un
*sunaddr
=(struct sockaddr_un
*)uaddr
;
506 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
508 struct ipn_network
*ipnn
;
509 struct dentry
* dentry
= NULL
;
511 struct pre_bind_parms parms
=STD_BIND_PARMS
;
513 //printk("IPN bind\n");
515 if (down_interruptible(&ipn_glob_mutex
))
517 if (sock
->state
!= SS_UNCONNECTED
||
518 ipn_node
->ipn
!= NULL
) {
523 if (ipn_node
->protocol
>= 0 &&
524 (ipn_node
->protocol
>= IPN_MAX_PROTO
||
525 ipn_protocol_table
[ipn_node
->protocol
] == NULL
)) {
526 err
= -EPROTONOSUPPORT
;
530 addr_len
= ipn_mkname(sunaddr
, addr_len
);
536 /* check if there is already an ipn-network socket with that name */
537 err
= path_lookup(sunaddr
->sun_path
, LOOKUP_FOLLOW
, &nd
);
538 if (err
) { /* it does not exist, NEW IPN socket! */
540 /* Is it everything okay with the parent? */
541 err
= path_lookup(sunaddr
->sun_path
, LOOKUP_PARENT
, &nd
);
543 goto out_mknod_parent
;
544 /* Do I have the permission to create a file? */
545 dentry
= lookup_create(&nd
, 0);
546 err
= PTR_ERR(dentry
);
548 goto out_mknod_unlock
;
550 * All right, let's create it.
553 mode
= ipn_node
->pbp
->mode
;
555 mode
= SOCK_INODE(sock
)->i_mode
;
556 mode
= S_IFSOCK
| (mode
& ~current
->fs
->umask
);
558 err
= vfs_mknod(nd
.dentry
->d_inode
, dentry
, nd
.mnt
, mode
, 0);
560 err
= vfs_mknod(nd
.dentry
->d_inode
, dentry
, mode
, 0);
564 mutex_unlock(&nd
.dentry
->d_inode
->i_mutex
);
567 /* create a new ipn_network item */
569 parms
=*ipn_node
->pbp
;
570 ipnn
=kmem_cache_zalloc(ipn_network_cache
,GFP_KERNEL
);
573 goto out_mknod_dput_ipnn
;
575 ipnn
->connport
=kzalloc(parms
.maxports
* sizeof(struct ipn_node
*),GFP_KERNEL
);
576 if (!ipnn
->connport
) {
578 goto out_mknod_dput_ipnn2
;
581 /* module refcnt is incremented for each network, thus
582 * rmmod is forbidden if there are persistent node */
583 if (!try_module_get(THIS_MODULE
)) {
585 goto out_mknod_dput_ipnn2
;
587 memcpy(&ipnn
->sunaddr
,sunaddr
,addr_len
);
589 /*ipnn->msgpool_cache=kmem_cache_create(ipnn->sunaddr.sun_path,sizeof(struct msgpool_item)+ipnn->mtu,0,0,NULL);*/
590 ipnn
->msgpool_cache
=ipn_msgbuf_get(ipnn
->mtu
);
591 if (!ipnn
->msgpool_cache
) {
593 goto out_mknod_dput_putmodule
;
595 INIT_LIST_HEAD(&ipnn
->unconnectqueue
);
596 INIT_LIST_HEAD(&ipnn
->connectqueue
);
598 ipnn
->dentry
=nd
.dentry
;
600 init_MUTEX(&ipnn
->ipnn_mutex
);
601 ipnn
->sunaddr_len
=addr_len
;
602 ipnn
->protocol
=ipn_node
->protocol
;
603 if (ipnn
->protocol
< 0) ipnn
->protocol
= 0;
604 ipn_protocol_table
[ipnn
->protocol
]->refcnt
++;
605 ipnn
->flags
=parms
.flags
;
608 ipnn
->maxports
=parms
.maxports
;
609 atomic_set(&ipnn
->msgpool_nelem
,0);
610 ipnn
->msgpool_size
=parms
.msgpoolsize
;
611 ipnn
->proto_private
=NULL
;
612 init_waitqueue_head(&ipnn
->send_wait
);
613 err
=ipn_protocol_table
[ipnn
->protocol
]->ipn_p_newnet(ipnn
);
615 goto out_mknod_dput_putmodule
;
616 ipn_insert_network(&ipn_network_table
[nd
.dentry
->d_inode
->i_ino
& (IPN_HASH_SIZE
-1)],ipnn
);
618 /* join an existing network */
619 if (parms
.flags
& IPN_FLAG_EXCL
) {
623 err
= vfs_permission(&nd
, MAY_EXEC
);
627 if (!S_ISSOCK(nd
.dentry
->d_inode
->i_mode
))
629 ipnn
=ipn_find_network_byinode(nd
.dentry
->d_inode
);
630 if (!ipnn
|| (ipnn
->flags
& IPN_FLAG_TERMINATED
) ||
631 (ipnn
->flags
& IPN_FLAG_EXCL
))
633 list_add_tail(&ipn_node
->nodelist
,&ipnn
->unconnectqueue
);
637 kfree(ipn_node
->pbp
);
641 ipn_node
->flags
|= IPN_NODEFLAG_BOUND
;
651 out_mknod_dput_putmodule
:
652 module_put(THIS_MODULE
);
653 out_mknod_dput_ipnn2
:
654 kfree(ipnn
->connport
);
656 kmem_cache_free(ipn_network_cache
,ipnn
);
660 mutex_unlock(&nd
.dentry
->d_inode
->i_mutex
);
670 static int ipn_connect(struct socket
*sock
, struct sockaddr
*addr
,
671 int addr_len
, int flags
){
672 struct sockaddr_un
*sunaddr
=(struct sockaddr_un
*)addr
;
673 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
675 struct ipn_network
*ipnn
,*previousipnn
;
679 /* the socket cannot be connected twice */
680 if (sock
->state
!= SS_UNCONNECTED
)
683 if (down_interruptible(&ipn_glob_mutex
))
686 if ((previousipnn
=ipn_node
->ipn
) == NULL
) { /* unbound */
687 unsigned char mustshutdown
=0;
688 err
= ipn_mkname(sunaddr
, addr_len
);
692 err
= path_lookup(sunaddr
->sun_path
, LOOKUP_FOLLOW
, &nd
);
695 err
= vfs_permission(&nd
, MAY_READ
);
697 if (err
== -EACCES
|| err
== -EROFS
)
698 mustshutdown
|=RCV_SHUTDOWN
;
702 err
= vfs_permission(&nd
, MAY_WRITE
);
705 mustshutdown
|=SEND_SHUTDOWN
;
709 mustshutdown
|= ipn_node
->shutdown
;
710 /* if the combination of shutdown and permissions leaves
711 * no abilities, connect returns EACCES */
712 if (mustshutdown
== SHUTDOWN_XMASK
) {
717 ipn_node
->shutdown
=mustshutdown
;
719 if (!S_ISSOCK(nd
.dentry
->d_inode
->i_mode
)) {
723 ipnn
=ipn_find_network_byinode(nd
.dentry
->d_inode
);
724 if (!ipnn
|| (ipnn
->flags
& IPN_FLAG_TERMINATED
)) {
728 if (ipn_node
->protocol
== IPN_ANY
)
729 ipn_node
->protocol
=ipnn
->protocol
;
730 else if (ipnn
->protocol
!= ipn_node
->protocol
) {
739 if (down_interruptible(&ipnn
->ipnn_mutex
)) {
743 portno
= ipn_protocol_table
[ipnn
->protocol
]->ipn_p_newport(ipn_node
);
744 if (portno
>= 0 && portno
<ipnn
->maxports
) {
745 sock
->state
= SS_CONNECTED
;
746 ipn_node
->portno
=portno
;
747 ipnn
->connport
[portno
]=ipn_node
;
748 if (!(ipn_node
->flags
& IPN_NODEFLAG_BOUND
)) {
750 list_del(&ipn_node
->nodelist
);
752 list_add_tail(&ipn_node
->nodelist
,&ipnn
->connectqueue
);
753 ipn_net_update_counters(ipnn
,
754 (ipn_node
->shutdown
& RCV_SHUTDOWN
)?0:1,
755 (ipn_node
->shutdown
& SEND_SHUTDOWN
)?0:1);
757 ipn_node
->ipn
=previousipnn
; /* undo changes on ipn_node->ipn */
760 up(&ipnn
->ipnn_mutex
);
771 static int ipn_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
772 int *uaddr_len
, int peer
) {
773 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
774 struct ipn_network
*ipnn
=ipn_node
->ipn
;
775 struct sockaddr_un
*sunaddr
=(struct sockaddr_un
*)uaddr
;
778 if (down_interruptible(&ipn_glob_mutex
))
781 *uaddr_len
= ipnn
->sunaddr_len
;
782 memcpy(sunaddr
,&ipnn
->sunaddr
,*uaddr_len
);
790 static unsigned int ipn_poll(struct file
*file
, struct socket
*sock
,
792 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
793 struct ipn_network
*ipnn
=ipn_node
->ipn
;
797 poll_wait(file
,&ipn_node
->read_wait
,wait
);
798 if (ipnn
->flags
& IPN_FLAG_LOSSLESS
)
799 poll_wait(file
,&ipnn
->send_wait
,wait
);
800 /* POLLIN if recv succeeds,
801 * POLL{PRI,RDNORM} if there are {oob,non-oob} messages */
802 if (ipn_node
->totmsgcount
> 0) mask
|= POLLIN
;
803 if (!(list_empty(&ipn_node
->msgqueue
))) mask
|= POLLRDNORM
;
804 if (!(list_empty(&ipn_node
->oobmsgqueue
))) mask
|= POLLPRI
;
805 if ((!(ipnn
->flags
& IPN_FLAG_LOSSLESS
)) |
806 (atomic_read(&ipnn
->msgpool_nelem
) < ipnn
->msgpool_size
))
807 mask
|= POLLOUT
| POLLWRNORM
;
812 /* connect netdev (from ioctl). connect a bound socket to a
813 * network device TAP or GRAB */
814 static int ipn_connect_netdev(struct socket
*sock
,struct ifreq
*ifr
)
817 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
818 struct ipn_network
*ipnn
=ipn_node
->ipn
;
819 if (!capable(CAP_NET_ADMIN
))
821 if (sock
->state
!= SS_UNCONNECTED
)
824 return -ENOTCONN
; /* Maybe we need a different error for "NOT BOUND" */
825 if (down_interruptible(&ipn_glob_mutex
))
827 if (down_interruptible(&ipnn
->ipnn_mutex
)) {
831 ipn_node
->dev
=ipn_netdev_alloc(ipn_node
->net
,ifr
->ifr_flags
,ifr
->ifr_name
,&err
);
834 portno
= ipn_protocol_table
[ipnn
->protocol
]->ipn_p_newport(ipn_node
);
835 if (portno
>= 0 && portno
<ipnn
->maxports
) {
836 sock
->state
= SS_CONNECTED
;
837 ipn_node
->portno
=portno
;
838 ipn_node
->flags
|= ifr
->ifr_flags
& IPN_NODEFLAG_DEVMASK
;
839 ipnn
->connport
[portno
]=ipn_node
;
840 err
=ipn_netdev_activate(ipn_node
);
842 sock
->state
= SS_UNCONNECTED
;
843 ipn_protocol_table
[ipnn
->protocol
]->ipn_p_delport(ipn_node
);
845 ipn_node
->portno
= -1;
846 ipn_node
->flags
&= ~IPN_NODEFLAG_DEVMASK
;
847 ipnn
->connport
[portno
]=NULL
;
849 ipn_protocol_table
[ipnn
->protocol
]->ipn_p_postnewport(ipn_node
);
850 list_del(&ipn_node
->nodelist
);
851 list_add_tail(&ipn_node
->nodelist
,&ipnn
->connectqueue
);
854 ipn_netdev_close(ipn_node
);
860 up(&ipnn
->ipnn_mutex
);
865 /* join a netdev, a socket gets connected to a persistent node
866 * not connected to another socket */
867 static int ipn_join_netdev(struct socket
*sock
,struct ifreq
*ifr
)
870 struct net_device
*dev
;
871 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
872 struct ipn_node
*ipn_joined
;
873 struct ipn_network
*ipnn
=ipn_node
->ipn
;
874 if (sock
->state
!= SS_UNCONNECTED
)
876 if (down_interruptible(&ipn_glob_mutex
))
878 if (down_interruptible(&ipnn
->ipnn_mutex
)) {
882 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
883 dev
=__dev_get_by_name(ifr
->ifr_name
);
885 dev
=__dev_get_by_name(ipn_node
->net
,ifr
->ifr_name
);
888 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
889 dev
=__dev_get_by_index(ifr
->ifr_ifindex
);
891 dev
=__dev_get_by_index(ipn_node
->net
,ifr
->ifr_ifindex
);
893 if (dev
&& (ipn_joined
=ipn_netdev2node(dev
)) != NULL
) { /* the interface does exist */
895 for (i
=0;i
<ipnn
->maxports
&& ipn_joined
!= ipnn
->connport
[i
] ;i
++)
897 if (i
< ipnn
->maxports
) { /* found */
898 /* ipn_joined is substituted to ipn_node */
899 ((struct ipn_sock
*)sock
->sk
)->node
=ipn_joined
;
900 ipn_joined
->flags
|= IPN_NODEFLAG_INUSE
;
902 kmem_cache_free(ipn_node_cache
,ipn_node
);
907 up(&ipnn
->ipnn_mutex
);
912 /* set persistence of a node looking for it by interface name
913 * (it is for sysadm, to close network interfaces)*/
914 static int ipn_setpersist_netdev(struct ifreq
*ifr
, int value
)
916 struct net_device
*dev
;
917 struct ipn_node
*ipn_node
;
919 if (!capable(CAP_NET_ADMIN
))
921 if (down_interruptible(&ipn_glob_mutex
))
923 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
924 dev
=__dev_get_by_name(ifr
->ifr_name
);
926 dev
=__dev_get_by_name(&init_net
,ifr
->ifr_name
);
929 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
930 dev
=__dev_get_by_index(ifr
->ifr_ifindex
);
932 dev
=__dev_get_by_index(&init_net
,ifr
->ifr_ifindex
);
934 if (dev
&& (ipn_node
=ipn_netdev2node(dev
)) != NULL
)
935 _ipn_setpersist(ipn_node
,value
);
943 static int ipn_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
) {
944 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
945 struct ipn_network
*ipnn
=ipn_node
->ipn
;
946 void __user
* argp
= (void __user
*)arg
;
949 if (ipn_node
->shutdown
== SHUTDOWN_XMASK
)
956 case IPN_SETPERSIST_NETDEV
:
957 case IPN_CLRPERSIST_NETDEV
:
958 case IPN_CONN_NETDEV
:
959 case IPN_JOIN_NETDEV
:
961 if (copy_from_user(&ifr
, argp
, sizeof ifr
))
963 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
966 /* actions for unconnected and unbound sockets */
968 case IPN_SETPERSIST_NETDEV
:
969 return ipn_setpersist_netdev(&ifr
,1);
970 case IPN_CLRPERSIST_NETDEV
:
971 return ipn_setpersist_netdev(&ifr
,0);
973 if (capable(CAP_NET_ADMIN
))
975 if (ipn_node
->dev
&& (ipn_node
->flags
&IPN_NODEFLAG_TAP
))
976 return dev_set_mac_address(ipn_node
->dev
, &ifr
.ifr_hwaddr
);
978 return -EADDRNOTAVAIL
;
980 if (ipnn
== NULL
|| (ipnn
->flags
& IPN_FLAG_TERMINATED
))
982 /* actions for connected or bound sockets */
984 case IPN_CONN_NETDEV
:
985 return ipn_connect_netdev(sock
,&ifr
);
986 case IPN_JOIN_NETDEV
:
987 return ipn_join_netdev(sock
,&ifr
);
989 return ipn_setpersist(ipn_node
,arg
);
993 if (down_interruptible(&ipnn
->ipnn_mutex
))
995 rv
=ipn_protocol_table
[ipn_node
->protocol
]->ipn_p_ioctl(ipn_node
,cmd
,arg
);
996 up(&ipnn
->ipnn_mutex
);
1003 /* shutdown: close socket for input or for output.
1004 * shutdown can be called prior to connect and it is not reversible */
1005 static int ipn_shutdown(struct socket
*sock
, int mode
) {
1006 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
1007 struct ipn_network
*ipnn
=ipn_node
->ipn
;
1008 int oldshutdown
=ipn_node
->shutdown
;
1009 mode
= (mode
+1)&(RCV_SHUTDOWN
|SEND_SHUTDOWN
);
1011 ipn_node
->shutdown
|= mode
;
1014 if (down_interruptible(&ipnn
->ipnn_mutex
)) {
1015 ipn_node
->shutdown
= oldshutdown
;
1016 return -ERESTARTSYS
;
1018 oldshutdown
=ipn_node
->shutdown
-oldshutdown
;
1019 if (sock
->state
== SS_CONNECTED
&& oldshutdown
) {
1020 ipn_net_update_counters(ipnn
,
1021 (ipn_node
->shutdown
& RCV_SHUTDOWN
)?0:-1,
1022 (ipn_node
->shutdown
& SEND_SHUTDOWN
)?0:-1);
1025 /* if recv channel has been shut down, flush the recv queue */
1026 if ((ipn_node
->shutdown
& RCV_SHUTDOWN
))
1027 ipn_flush_recvqueue(ipn_node
);
1028 up(&ipnn
->ipnn_mutex
);
1033 /* injectmsg: a new message is entering the ipn network.
1034 * injectmsg gets called by send and by the grab/tap node */
1035 int ipn_proto_injectmsg(struct ipn_node
*from
, struct msgpool_item
*msg
)
1037 struct ipn_network
*ipnn
=from
->ipn
;
1039 if (down_interruptible(&ipnn
->ipnn_mutex
))
1042 ipn_protocol_table
[ipnn
->protocol
]->ipn_p_handlemsg(from
, msg
);
1043 up(&ipnn
->ipnn_mutex
);
1049 static int ipn_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1050 struct msghdr
*msg
, size_t len
) {
1051 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
1052 struct ipn_network
*ipnn
=ipn_node
->ipn
;
1053 struct msgpool_item
*newmsg
;
1056 if (unlikely(sock
->state
!= SS_CONNECTED
))
1058 if (unlikely(ipn_node
->shutdown
& SEND_SHUTDOWN
)) {
1059 if (ipn_node
->shutdown
== SHUTDOWN_XMASK
)
1064 if (len
> ipnn
->mtu
)
1066 newmsg
=ipn_msgpool_alloc_locking(ipnn
);
1070 err
=memcpy_fromiovec(newmsg
->data
, msg
->msg_iov
, len
);
1072 ipn_proto_injectmsg(ipn_node
, newmsg
);
1073 ipn_msgpool_put(newmsg
,ipnn
);
1077 /* enqueue an oob message. "to" is the destination */
1078 void ipn_proto_oobsendmsg(struct ipn_node
*to
, struct msgpool_item
*msg
)
1081 if (!to
->dev
) { /* no oob to netdev */
1082 struct msgitem
*msgitem
;
1083 struct ipn_network
*ipnn
=to
->ipn
;
1084 spin_lock(&to
->msglock
);
1085 if ((to
->shutdown
& RCV_SHUTDOWN_NO_OOB
) == 0 &&
1086 (ipnn
->flags
& IPN_FLAG_LOSSLESS
||
1087 to
->oobmsgcount
< ipnn
->msgpool_size
)) {
1088 if ((msgitem
=kmem_cache_alloc(ipn_msgitem_cache
,GFP_KERNEL
))!=NULL
) {
1092 list_add_tail(&msgitem
->list
, &to
->oobmsgqueue
);
1093 ipn_msgpool_hold(msg
);
1096 spin_unlock(&to
->msglock
);
1097 wake_up_interruptible(&to
->read_wait
);
1102 /* ipn_proto_sendmsg is called by protocol implementation to enqueue a
1103 * for a destination (to).*/
1104 void ipn_proto_sendmsg(struct ipn_node
*to
, struct msgpool_item
*msg
)
1108 ipn_netdev_sendmsg(to
,msg
);
1111 struct msgitem
*msgitem
;
1112 struct ipn_network
*ipnn
=to
->ipn
;
1113 spin_lock(&to
->msglock
);
1114 if ((ipnn
->flags
& IPN_FLAG_LOSSLESS
||
1115 to
->totmsgcount
< ipnn
->msgpool_size
) &&
1116 (to
->shutdown
& RCV_SHUTDOWN
)==0) {
1117 if ((msgitem
=kmem_cache_alloc(ipn_msgitem_cache
,GFP_KERNEL
))!=NULL
) {
1120 list_add_tail(&msgitem
->list
, &to
->msgqueue
);
1121 ipn_msgpool_hold(msg
);
1124 spin_unlock(&to
->msglock
);
1125 wake_up_interruptible(&to
->read_wait
);
1131 static int ipn_recvmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1132 struct msghdr
*msg
, size_t len
, int flags
) {
1133 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
1134 struct ipn_network
*ipnn
=ipn_node
->ipn
;
1135 struct msgitem
*msgitem
;
1136 struct msgpool_item
*currmsg
;
1138 if (unlikely(sock
->state
!= SS_CONNECTED
))
1141 if (unlikely((ipn_node
->shutdown
& XRCV_SHUTDOWN
) == XRCV_SHUTDOWN
)) {
1142 if (ipn_node
->shutdown
== SHUTDOWN_XMASK
) /*EOF, nothing can be read*/
1145 return -EPIPE
; /*trying to read on a write only node */
1148 /* wait for a message */
1149 spin_lock(&ipn_node
->msglock
);
1150 while (ipn_node
->totmsgcount
== 0) {
1151 spin_unlock(&ipn_node
->msglock
);
1152 if (wait_event_interruptible(ipn_node
->read_wait
,
1153 !(ipn_node
->totmsgcount
== 0)))
1154 return -ERESTARTSYS
;
1155 spin_lock(&ipn_node
->msglock
);
1157 /* oob gets delivered first. oob are rare */
1158 if (likely(list_empty(&ipn_node
->oobmsgqueue
)))
1159 msgitem
=list_first_entry(&ipn_node
->msgqueue
, struct msgitem
, list
);
1161 msgitem
=list_first_entry(&ipn_node
->oobmsgqueue
, struct msgitem
, list
);
1162 msg
->msg_flags
|= MSG_OOB
;
1163 ipn_node
->oobmsgcount
--;
1165 list_del(&msgitem
->list
);
1166 ipn_node
->totmsgcount
--;
1167 spin_unlock(&ipn_node
->msglock
);
1168 currmsg
=msgitem
->msg
;
1169 if (currmsg
->len
< len
)
1171 memcpy_toiovec(msg
->msg_iov
, currmsg
->data
, len
);
1172 ipn_msgpool_put(currmsg
,ipnn
);
1173 kmem_cache_free(ipn_msgitem_cache
,msgitem
);
1178 /* resize a network: change the # of communication ports (connport) */
1179 static int ipn_netresize(struct ipn_network
*ipnn
,int newsize
)
1182 struct ipn_node
**newconnport
;
1183 struct ipn_node
**oldconnport
;
1185 if (down_interruptible(&ipnn
->ipnn_mutex
))
1186 return -ERESTARTSYS
;
1187 oldsize
=ipnn
->maxports
;
1188 if (newsize
== oldsize
) {
1189 up(&ipnn
->ipnn_mutex
);
1193 /* shrink a network. all the ports we are going to eliminate
1194 * must be unused! */
1195 if (newsize
< oldsize
) {
1197 for (i
=newsize
; i
<oldsize
; i
++)
1198 if (ipnn
->connport
[i
]) {
1199 up(&ipnn
->ipnn_mutex
);
1204 oldconnport
=ipnn
->connport
;
1205 /* allocate the new connport array and copy the old one */
1206 newconnport
=kzalloc(newsize
* sizeof(struct ipn_node
*),GFP_KERNEL
);
1208 up(&ipnn
->ipnn_mutex
);
1211 memcpy(newconnport
,oldconnport
,min
* sizeof(struct ipn_node
*));
1212 ipnn
->connport
=newconnport
;
1213 ipnn
->maxports
=newsize
;
1214 /* notify the protocol that the netowrk has been resized */
1215 err
=ipn_protocol_table
[ipnn
->protocol
]->ipn_p_resizenet(ipnn
,oldsize
,newsize
);
1217 /* roll back if the resize operation failed for the protocol */
1218 ipnn
->connport
=oldconnport
;
1219 ipnn
->maxports
=oldsize
;
1222 /* successful mission, network resized */
1224 up(&ipnn
->ipnn_mutex
);
1228 /* IPN SETSOCKOPT */
1229 static int ipn_setsockopt(struct socket
*sock
, int level
, int optname
,
1230 char __user
*optval
, int optlen
) {
1231 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
1232 struct ipn_network
*ipnn
=ipn_node
->ipn
;
1234 if (ipn_node
->shutdown
== SHUTDOWN_XMASK
)
1236 if (level
!= 0 && level
!= ipn_node
->protocol
+1)
1237 return -EPROTONOSUPPORT
;
1239 /* protocol specific sockopt */
1242 if (down_interruptible(&ipnn
->ipnn_mutex
))
1243 return -ERESTARTSYS
;
1244 rv
=ipn_protocol_table
[ipn_node
->protocol
]->ipn_p_setsockopt(ipn_node
,optname
,optval
,optlen
);
1245 up(&ipnn
->ipnn_mutex
);
1250 if (optname
== IPN_SO_DESCR
) {
1251 if (optlen
> IPN_DESCRLEN
)
1254 memset(ipn_node
->descr
,0,IPN_DESCRLEN
);
1255 if (copy_from_user(ipn_node
->descr
,optval
,optlen
))
1256 ipn_node
->descr
[0]=0;
1258 ipn_node
->descr
[optlen
-1]=0;
1262 if (optlen
< sizeof(int))
1264 else if ((optname
& IPN_SO_PREBIND
) && (ipnn
!= NULL
))
1268 get_user(val
, (int __user
*) optval
);
1269 if ((optname
& IPN_SO_PREBIND
) && !ipn_node
->pbp
) {
1270 struct pre_bind_parms std
=STD_BIND_PARMS
;
1271 ipn_node
->pbp
=kzalloc(sizeof(struct pre_bind_parms
),GFP_KERNEL
);
1274 *(ipn_node
->pbp
)=std
;
1278 if (sock
->state
== SS_UNCONNECTED
)
1279 ipn_node
->portno
=val
;
1283 case IPN_SO_CHANGE_NUMNODES
:
1284 if ((ipn_node
->flags
& IPN_NODEFLAG_BOUND
)!=0) {
1288 return ipn_netresize(ipnn
,val
);
1292 case IPN_SO_WANT_OOB_NUMNODES
:
1294 ipn_node
->flags
|= IPN_NODEFLAG_OOB_NUMNODES
;
1296 ipn_node
->flags
&= ~IPN_NODEFLAG_OOB_NUMNODES
;
1298 case IPN_SO_HANDLE_OOB
:
1300 ipn_node
->shutdown
&= ~RCV_SHUTDOWN_NO_OOB
;
1302 ipn_node
->shutdown
|= RCV_SHUTDOWN_NO_OOB
;
1308 ipn_node
->pbp
->mtu
=val
;
1310 case IPN_SO_NUMNODES
:
1314 ipn_node
->pbp
->maxports
=val
;
1316 case IPN_SO_MSGPOOLSIZE
:
1320 ipn_node
->pbp
->msgpoolsize
=val
;
1323 ipn_node
->pbp
->flags
=val
;
1326 ipn_node
->pbp
->mode
=val
;
1335 /* IPN GETSOCKOPT */
1336 static int ipn_getsockopt(struct socket
*sock
, int level
, int optname
,
1337 char __user
*optval
, int __user
*optlen
) {
1338 struct ipn_node
*ipn_node
=((struct ipn_sock
*)sock
->sk
)->node
;
1339 struct ipn_network
*ipnn
=ipn_node
->ipn
;
1342 if (ipn_node
->shutdown
== SHUTDOWN_XMASK
)
1344 if (level
!= 0 && level
!= ipn_node
->protocol
+1)
1345 return -EPROTONOSUPPORT
;
1349 /* protocol specific sockopt */
1350 if (down_interruptible(&ipnn
->ipnn_mutex
))
1351 return -ERESTARTSYS
;
1352 rv
=ipn_protocol_table
[ipn_node
->protocol
]->ipn_p_getsockopt(ipn_node
,optname
,optval
,optlen
);
1353 up(&ipnn
->ipnn_mutex
);
1358 if (get_user(len
, optlen
))
1360 if (optname
== IPN_SO_DESCR
) {
1361 if (len
< IPN_DESCRLEN
)
1364 if (len
> IPN_DESCRLEN
)
1366 if(put_user(len
, optlen
))
1368 if(copy_to_user(optval
,ipn_node
->descr
,len
))
1376 val
=ipn_node
->portno
;
1381 else if (ipn_node
->pbp
)
1382 val
=ipn_node
->pbp
->mtu
;
1384 case IPN_SO_NUMNODES
:
1387 else if (ipn_node
->pbp
)
1388 val
=ipn_node
->pbp
->maxports
;
1390 case IPN_SO_MSGPOOLSIZE
:
1392 val
=ipnn
->msgpool_size
;
1393 else if (ipn_node
->pbp
)
1394 val
=ipn_node
->pbp
->msgpoolsize
;
1399 else if (ipn_node
->pbp
)
1400 val
=ipn_node
->pbp
->flags
;
1405 else if (ipn_node
->pbp
)
1406 val
=ipn_node
->pbp
->mode
;
1412 if (len
< sizeof(int))
1416 if(put_user(len
, optlen
))
1418 if(copy_to_user(optval
,&val
,len
))
1427 /* BROADCAST/HUB implementation */
1429 static int ipn_bcast_newport(struct ipn_node
*newport
) {
1430 struct ipn_network
*ipnn
=newport
->ipn
;
1432 for (i
=0;i
<ipnn
->maxports
;i
++) {
1433 if (ipnn
->connport
[i
] == NULL
)
1439 static int ipn_bcast_handlemsg(struct ipn_node
*from
,
1440 struct msgpool_item
*msgitem
){
1441 struct ipn_network
*ipnn
=from
->ipn
;
1443 struct ipn_node
*ipn_node
;
1444 list_for_each_entry(ipn_node
, &ipnn
->connectqueue
, nodelist
) {
1445 if (ipn_node
!= from
)
1446 ipn_proto_sendmsg(ipn_node
,msgitem
);
1451 static void ipn_null_delport(struct ipn_node
*oldport
) {}
1452 static void ipn_null_postnewport(struct ipn_node
*newport
) {}
1453 static void ipn_null_predelport(struct ipn_node
*oldport
) {}
1454 static int ipn_null_newnet(struct ipn_network
*newnet
) {return 0;}
1455 static int ipn_null_resizenet(struct ipn_network
*net
,int oldsize
,int newsize
) {
1457 static void ipn_null_delnet(struct ipn_network
*oldnet
) {}
1458 static int ipn_null_setsockopt(struct ipn_node
*port
,int optname
,
1459 char __user
*optval
, int optlen
) {return -EOPNOTSUPP
;}
1460 static int ipn_null_getsockopt(struct ipn_node
*port
,int optname
,
1461 char __user
*optval
, int *optlen
) {return -EOPNOTSUPP
;}
1462 static int ipn_null_ioctl(struct ipn_node
*port
,unsigned int request
,
1463 unsigned long arg
) {return -EOPNOTSUPP
;}
1465 /* Protocol Registration/deregisteration */
1467 void ipn_init_protocol(struct ipn_protocol
*p
)
1469 if (p
->ipn_p_delport
== NULL
) p
->ipn_p_delport
=ipn_null_delport
;
1470 if (p
->ipn_p_postnewport
== NULL
) p
->ipn_p_postnewport
=ipn_null_postnewport
;
1471 if (p
->ipn_p_predelport
== NULL
) p
->ipn_p_predelport
=ipn_null_predelport
;
1472 if (p
->ipn_p_newnet
== NULL
) p
->ipn_p_newnet
=ipn_null_newnet
;
1473 if (p
->ipn_p_resizenet
== NULL
) p
->ipn_p_resizenet
=ipn_null_resizenet
;
1474 if (p
->ipn_p_delnet
== NULL
) p
->ipn_p_delnet
=ipn_null_delnet
;
1475 if (p
->ipn_p_setsockopt
== NULL
) p
->ipn_p_setsockopt
=ipn_null_setsockopt
;
1476 if (p
->ipn_p_getsockopt
== NULL
) p
->ipn_p_getsockopt
=ipn_null_getsockopt
;
1477 if (p
->ipn_p_ioctl
== NULL
) p
->ipn_p_ioctl
=ipn_null_ioctl
;
1480 int ipn_proto_register(int protocol
,struct ipn_protocol
*ipn_service
)
1483 if (ipn_service
->ipn_p_newport
== NULL
||
1484 ipn_service
->ipn_p_handlemsg
== NULL
)
1486 ipn_init_protocol(ipn_service
);
1487 if (down_interruptible(&ipn_glob_mutex
))
1488 return -ERESTARTSYS
;
1489 if (protocol
> 1 && protocol
<= IPN_MAX_PROTO
) {
1491 if (ipn_protocol_table
[protocol
])
1494 ipn_service
->refcnt
=0;
1495 ipn_protocol_table
[protocol
]=ipn_service
;
1496 printk(KERN_INFO
"IPN: Registered protocol %d\n",protocol
+1);
1500 up(&ipn_glob_mutex
);
1504 int ipn_proto_deregister(int protocol
)
1507 if (down_interruptible(&ipn_glob_mutex
))
1508 return -ERESTARTSYS
;
1509 if (protocol
> 1 && protocol
<= IPN_MAX_PROTO
) {
1511 if (ipn_protocol_table
[protocol
]) {
1512 if (ipn_protocol_table
[protocol
]->refcnt
== 0) {
1513 ipn_protocol_table
[protocol
]=NULL
;
1514 printk(KERN_INFO
"IPN: Unregistered protocol %d\n",protocol
+1);
1521 up(&ipn_glob_mutex
);
1526 /* Module constructor/destructor */
1527 static struct net_proto_family ipn_family_ops
= {
1529 .create
= ipn_create
,
1530 .owner
= THIS_MODULE
,
1533 /* IPN constructor */
1534 static int ipn_init(void)
1538 ipn_init_protocol(&ipn_bcast
);
1539 ipn_network_cache
=kmem_cache_create("ipn_network",sizeof(struct ipn_network
),0,0,NULL
);
1540 if (!ipn_network_cache
) {
1541 printk(KERN_CRIT
"%s: Cannot create ipn_network SLAB cache!\n",
1547 ipn_node_cache
=kmem_cache_create("ipn_node",sizeof(struct ipn_node
),0,0,NULL
);
1548 if (!ipn_node_cache
) {
1549 printk(KERN_CRIT
"%s: Cannot create ipn_node SLAB cache!\n",
1555 ipn_msgitem_cache
=kmem_cache_create("ipn_msgitem",sizeof(struct msgitem
),0,0,NULL
);
1556 if (!ipn_msgitem_cache
) {
1557 printk(KERN_CRIT
"%s: Cannot create ipn_msgitem SLAB cache!\n",
1563 rc
=ipn_msgbuf_init();
1565 printk(KERN_CRIT
"%s: Cannot create ipn_msgbuf SLAB cache\n",
1567 goto out_net_node_msg
;
1570 rc
=proto_register(&ipn_proto
,1);
1572 printk(KERN_CRIT
"%s: Cannot register the protocol!\n",
1574 goto out_net_node_msg_msgbuf
;
1577 sock_register(&ipn_family_ops
);
1579 printk(KERN_INFO
"IPN: Virtual Square Project, University of Bologna 2007\n");
1582 out_net_node_msg_msgbuf
:
1585 kmem_cache_destroy(ipn_msgitem_cache
);
1587 kmem_cache_destroy(ipn_node_cache
);
1589 kmem_cache_destroy(ipn_network_cache
);
1594 /* IPN destructor */
1595 static void ipn_exit(void)
1598 if (ipn_msgitem_cache
)
1599 kmem_cache_destroy(ipn_msgitem_cache
);
1601 kmem_cache_destroy(ipn_node_cache
);
1602 if (ipn_network_cache
)
1603 kmem_cache_destroy(ipn_network_cache
);
1605 sock_unregister(PF_IPN
);
1606 proto_unregister(&ipn_proto
);
1607 printk(KERN_INFO
"IPN removed\n");
1610 module_init(ipn_init
);
1611 module_exit(ipn_exit
);
1613 EXPORT_SYMBOL_GPL(ipn_proto_register
);
1614 EXPORT_SYMBOL_GPL(ipn_proto_deregister
);
1615 EXPORT_SYMBOL_GPL(ipn_proto_sendmsg
);
1616 EXPORT_SYMBOL_GPL(ipn_proto_oobsendmsg
);
1617 EXPORT_SYMBOL_GPL(ipn_msgpool_alloc
);
1618 EXPORT_SYMBOL_GPL(ipn_msgpool_put
);