[PATCH] pSeries hvsi char driver null pointer deref
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / decnet / af_decnet.c
blob5486247735f6a250eb0126ebc72a95e134f1159b
2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Socket Layer Interface
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
12 * Changes:
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
15 * below.
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
19 * Caulfield.
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
24 * code.
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
37 * when required.
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
52 any later version.
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
59 HISTORY:
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
68 connections.
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
73 Port to new kernel development version.
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for incoming connections
79 so we can start developing server apps
80 on Linux.
82 Module Support
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
93 order
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes
98 Patrick J. Caulfield
99 dn_bind fixes
100 *******************************************************************************/
102 #include <linux/module.h>
103 #include <linux/errno.h>
104 #include <linux/types.h>
105 #include <linux/slab.h>
106 #include <linux/socket.h>
107 #include <linux/in.h>
108 #include <linux/kernel.h>
109 #include <linux/sched.h>
110 #include <linux/timer.h>
111 #include <linux/string.h>
112 #include <linux/sockios.h>
113 #include <linux/net.h>
114 #include <linux/netdevice.h>
115 #include <linux/inet.h>
116 #include <linux/route.h>
117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h>
119 #include <net/sock.h>
120 #include <net/tcp_states.h>
121 #include <net/flow.h>
122 #include <asm/system.h>
123 #include <asm/ioctls.h>
124 #include <linux/capability.h>
125 #include <linux/mm.h>
126 #include <linux/interrupt.h>
127 #include <linux/proc_fs.h>
128 #include <linux/stat.h>
129 #include <linux/init.h>
130 #include <linux/poll.h>
131 #include <net/neighbour.h>
132 #include <net/dst.h>
133 #include <net/dn.h>
134 #include <net/dn_nsp.h>
135 #include <net/dn_dev.h>
136 #include <net/dn_route.h>
137 #include <net/dn_fib.h>
138 #include <net/dn_neigh.h>
140 struct dn_sock {
141 struct sock sk;
142 struct dn_scp scp;
145 static void dn_keepalive(struct sock *sk);
147 #define DN_SK_HASH_SHIFT 8
148 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
149 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
152 static const struct proto_ops dn_proto_ops;
153 static DEFINE_RWLOCK(dn_hash_lock);
154 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
155 static struct hlist_head dn_wild_sk;
156 static atomic_t decnet_memory_allocated;
158 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags);
159 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
161 static struct hlist_head *dn_find_list(struct sock *sk)
163 struct dn_scp *scp = DN_SK(sk);
165 if (scp->addr.sdn_flags & SDF_WILD)
166 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
168 return &dn_sk_hash[scp->addrloc & DN_SK_HASH_MASK];
172 * Valid ports are those greater than zero and not already in use.
174 static int check_port(__le16 port)
176 struct sock *sk;
177 struct hlist_node *node;
179 if (port == 0)
180 return -1;
182 sk_for_each(sk, node, &dn_sk_hash[port & DN_SK_HASH_MASK]) {
183 struct dn_scp *scp = DN_SK(sk);
184 if (scp->addrloc == port)
185 return -1;
187 return 0;
190 static unsigned short port_alloc(struct sock *sk)
192 struct dn_scp *scp = DN_SK(sk);
193 static unsigned short port = 0x2000;
194 unsigned short i_port = port;
196 while(check_port(++port) != 0) {
197 if (port == i_port)
198 return 0;
201 scp->addrloc = port;
203 return 1;
207 * Since this is only ever called from user
208 * level, we don't need a write_lock() version
209 * of this.
211 static int dn_hash_sock(struct sock *sk)
213 struct dn_scp *scp = DN_SK(sk);
214 struct hlist_head *list;
215 int rv = -EUSERS;
217 BUG_ON(sk_hashed(sk));
219 write_lock_bh(&dn_hash_lock);
221 if (!scp->addrloc && !port_alloc(sk))
222 goto out;
224 rv = -EADDRINUSE;
225 if ((list = dn_find_list(sk)) == NULL)
226 goto out;
228 sk_add_node(sk, list);
229 rv = 0;
230 out:
231 write_unlock_bh(&dn_hash_lock);
232 return rv;
235 static void dn_unhash_sock(struct sock *sk)
237 write_lock(&dn_hash_lock);
238 sk_del_node_init(sk);
239 write_unlock(&dn_hash_lock);
242 static void dn_unhash_sock_bh(struct sock *sk)
244 write_lock_bh(&dn_hash_lock);
245 sk_del_node_init(sk);
246 write_unlock_bh(&dn_hash_lock);
249 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
251 int i;
252 unsigned hash = addr->sdn_objnum;
254 if (hash == 0) {
255 hash = addr->sdn_objnamel;
256 for(i = 0; i < dn_ntohs(addr->sdn_objnamel); i++) {
257 hash ^= addr->sdn_objname[i];
258 hash ^= (hash << 3);
262 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
266 * Called to transform a socket from bound (i.e. with a local address)
267 * into a listening socket (doesn't need a local port number) and rehashes
268 * based upon the object name/number.
270 static void dn_rehash_sock(struct sock *sk)
272 struct hlist_head *list;
273 struct dn_scp *scp = DN_SK(sk);
275 if (scp->addr.sdn_flags & SDF_WILD)
276 return;
278 write_lock_bh(&dn_hash_lock);
279 sk_del_node_init(sk);
280 DN_SK(sk)->addrloc = 0;
281 list = listen_hash(&DN_SK(sk)->addr);
282 sk_add_node(sk, list);
283 write_unlock_bh(&dn_hash_lock);
286 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
288 int len = 2;
290 *buf++ = type;
292 switch(type) {
293 case 0:
294 *buf++ = sdn->sdn_objnum;
295 break;
296 case 1:
297 *buf++ = 0;
298 *buf++ = dn_ntohs(sdn->sdn_objnamel);
299 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel));
300 len = 3 + dn_ntohs(sdn->sdn_objnamel);
301 break;
302 case 2:
303 memset(buf, 0, 5);
304 buf += 5;
305 *buf++ = dn_ntohs(sdn->sdn_objnamel);
306 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel));
307 len = 7 + dn_ntohs(sdn->sdn_objnamel);
308 break;
311 return len;
315 * On reception of usernames, we handle types 1 and 0 for destination
316 * addresses only. Types 2 and 4 are used for source addresses, but the
317 * UIC, GIC are ignored and they are both treated the same way. Type 3
318 * is never used as I've no idea what its purpose might be or what its
319 * format is.
321 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
323 unsigned char type;
324 int size = len;
325 int namel = 12;
327 sdn->sdn_objnum = 0;
328 sdn->sdn_objnamel = dn_htons(0);
329 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
331 if (len < 2)
332 return -1;
334 len -= 2;
335 *fmt = *data++;
336 type = *data++;
338 switch(*fmt) {
339 case 0:
340 sdn->sdn_objnum = type;
341 return 2;
342 case 1:
343 namel = 16;
344 break;
345 case 2:
346 len -= 4;
347 data += 4;
348 break;
349 case 4:
350 len -= 8;
351 data += 8;
352 break;
353 default:
354 return -1;
357 len -= 1;
359 if (len < 0)
360 return -1;
362 sdn->sdn_objnamel = dn_htons(*data++);
363 len -= dn_ntohs(sdn->sdn_objnamel);
365 if ((len < 0) || (dn_ntohs(sdn->sdn_objnamel) > namel))
366 return -1;
368 memcpy(sdn->sdn_objname, data, dn_ntohs(sdn->sdn_objnamel));
370 return size - len;
373 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
375 struct hlist_head *list = listen_hash(addr);
376 struct hlist_node *node;
377 struct sock *sk;
379 read_lock(&dn_hash_lock);
380 sk_for_each(sk, node, list) {
381 struct dn_scp *scp = DN_SK(sk);
382 if (sk->sk_state != TCP_LISTEN)
383 continue;
384 if (scp->addr.sdn_objnum) {
385 if (scp->addr.sdn_objnum != addr->sdn_objnum)
386 continue;
387 } else {
388 if (addr->sdn_objnum)
389 continue;
390 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
391 continue;
392 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, dn_ntohs(addr->sdn_objnamel)) != 0)
393 continue;
395 sock_hold(sk);
396 read_unlock(&dn_hash_lock);
397 return sk;
400 sk = sk_head(&dn_wild_sk);
401 if (sk) {
402 if (sk->sk_state == TCP_LISTEN)
403 sock_hold(sk);
404 else
405 sk = NULL;
408 read_unlock(&dn_hash_lock);
409 return sk;
412 struct sock *dn_find_by_skb(struct sk_buff *skb)
414 struct dn_skb_cb *cb = DN_SKB_CB(skb);
415 struct sock *sk;
416 struct hlist_node *node;
417 struct dn_scp *scp;
419 read_lock(&dn_hash_lock);
420 sk_for_each(sk, node, &dn_sk_hash[cb->dst_port & DN_SK_HASH_MASK]) {
421 scp = DN_SK(sk);
422 if (cb->src != dn_saddr2dn(&scp->peer))
423 continue;
424 if (cb->dst_port != scp->addrloc)
425 continue;
426 if (scp->addrrem && (cb->src_port != scp->addrrem))
427 continue;
428 sock_hold(sk);
429 goto found;
431 sk = NULL;
432 found:
433 read_unlock(&dn_hash_lock);
434 return sk;
439 static void dn_destruct(struct sock *sk)
441 struct dn_scp *scp = DN_SK(sk);
443 skb_queue_purge(&scp->data_xmit_queue);
444 skb_queue_purge(&scp->other_xmit_queue);
445 skb_queue_purge(&scp->other_receive_queue);
447 dst_release(xchg(&sk->sk_dst_cache, NULL));
450 static int dn_memory_pressure;
452 static void dn_enter_memory_pressure(void)
454 if (!dn_memory_pressure) {
455 dn_memory_pressure = 1;
459 static struct proto dn_proto = {
460 .name = "NSP",
461 .owner = THIS_MODULE,
462 .enter_memory_pressure = dn_enter_memory_pressure,
463 .memory_pressure = &dn_memory_pressure,
464 .memory_allocated = &decnet_memory_allocated,
465 .sysctl_mem = sysctl_decnet_mem,
466 .sysctl_wmem = sysctl_decnet_wmem,
467 .sysctl_rmem = sysctl_decnet_rmem,
468 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
469 .obj_size = sizeof(struct dn_sock),
472 static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp)
474 struct dn_scp *scp;
475 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1);
477 if (!sk)
478 goto out;
480 if (sock)
481 sock->ops = &dn_proto_ops;
482 sock_init_data(sock, sk);
484 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
485 sk->sk_destruct = dn_destruct;
486 sk->sk_no_check = 1;
487 sk->sk_family = PF_DECnet;
488 sk->sk_protocol = 0;
489 sk->sk_allocation = gfp;
490 sk->sk_sndbuf = sysctl_decnet_wmem[1];
491 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
493 /* Initialization of DECnet Session Control Port */
494 scp = DN_SK(sk);
495 scp->state = DN_O; /* Open */
496 scp->numdat = 1; /* Next data seg to tx */
497 scp->numoth = 1; /* Next oth data to tx */
498 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
499 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
500 scp->ackrcv_dat = 0; /* Highest data ack recv*/
501 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
502 scp->flowrem_sw = DN_SEND;
503 scp->flowloc_sw = DN_SEND;
504 scp->flowrem_dat = 0;
505 scp->flowrem_oth = 1;
506 scp->flowloc_dat = 0;
507 scp->flowloc_oth = 1;
508 scp->services_rem = 0;
509 scp->services_loc = 1 | NSP_FC_NONE;
510 scp->info_rem = 0;
511 scp->info_loc = 0x03; /* NSP version 4.1 */
512 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
513 scp->nonagle = 0;
514 scp->multi_ireq = 1;
515 scp->accept_mode = ACC_IMMED;
516 scp->addr.sdn_family = AF_DECnet;
517 scp->peer.sdn_family = AF_DECnet;
518 scp->accessdata.acc_accl = 5;
519 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
521 scp->max_window = NSP_MAX_WINDOW;
522 scp->snd_window = NSP_MIN_WINDOW;
523 scp->nsp_srtt = NSP_INITIAL_SRTT;
524 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
525 scp->nsp_rxtshift = 0;
527 skb_queue_head_init(&scp->data_xmit_queue);
528 skb_queue_head_init(&scp->other_xmit_queue);
529 skb_queue_head_init(&scp->other_receive_queue);
531 scp->persist = 0;
532 scp->persist_fxn = NULL;
533 scp->keepalive = 10 * HZ;
534 scp->keepalive_fxn = dn_keepalive;
536 init_timer(&scp->delack_timer);
537 scp->delack_pending = 0;
538 scp->delack_fxn = dn_nsp_delayed_ack;
540 dn_start_slow_timer(sk);
541 out:
542 return sk;
546 * Keepalive timer.
547 * FIXME: Should respond to SO_KEEPALIVE etc.
549 static void dn_keepalive(struct sock *sk)
551 struct dn_scp *scp = DN_SK(sk);
554 * By checking the other_data transmit queue is empty
555 * we are double checking that we are not sending too
556 * many of these keepalive frames.
558 if (skb_queue_empty(&scp->other_xmit_queue))
559 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
564 * Timer for shutdown/destroyed sockets.
565 * When socket is dead & no packets have been sent for a
566 * certain amount of time, they are removed by this
567 * routine. Also takes care of sending out DI & DC
568 * frames at correct times.
570 int dn_destroy_timer(struct sock *sk)
572 struct dn_scp *scp = DN_SK(sk);
574 scp->persist = dn_nsp_persist(sk);
576 switch(scp->state) {
577 case DN_DI:
578 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
579 if (scp->nsp_rxtshift >= decnet_di_count)
580 scp->state = DN_CN;
581 return 0;
583 case DN_DR:
584 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
585 if (scp->nsp_rxtshift >= decnet_dr_count)
586 scp->state = DN_DRC;
587 return 0;
589 case DN_DN:
590 if (scp->nsp_rxtshift < decnet_dn_count) {
591 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
592 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
593 return 0;
597 scp->persist = (HZ * decnet_time_wait);
599 if (sk->sk_socket)
600 return 0;
602 if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
603 dn_unhash_sock(sk);
604 sock_put(sk);
605 return 1;
608 return 0;
611 static void dn_destroy_sock(struct sock *sk)
613 struct dn_scp *scp = DN_SK(sk);
615 scp->nsp_rxtshift = 0; /* reset back off */
617 if (sk->sk_socket) {
618 if (sk->sk_socket->state != SS_UNCONNECTED)
619 sk->sk_socket->state = SS_DISCONNECTING;
622 sk->sk_state = TCP_CLOSE;
624 switch(scp->state) {
625 case DN_DN:
626 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
627 sk->sk_allocation);
628 scp->persist_fxn = dn_destroy_timer;
629 scp->persist = dn_nsp_persist(sk);
630 break;
631 case DN_CR:
632 scp->state = DN_DR;
633 goto disc_reject;
634 case DN_RUN:
635 scp->state = DN_DI;
636 case DN_DI:
637 case DN_DR:
638 disc_reject:
639 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
640 case DN_NC:
641 case DN_NR:
642 case DN_RJ:
643 case DN_DIC:
644 case DN_CN:
645 case DN_DRC:
646 case DN_CI:
647 case DN_CD:
648 scp->persist_fxn = dn_destroy_timer;
649 scp->persist = dn_nsp_persist(sk);
650 break;
651 default:
652 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
653 case DN_O:
654 dn_stop_slow_timer(sk);
656 dn_unhash_sock_bh(sk);
657 sock_put(sk);
659 break;
663 char *dn_addr2asc(__u16 addr, char *buf)
665 unsigned short node, area;
667 node = addr & 0x03ff;
668 area = addr >> 10;
669 sprintf(buf, "%hd.%hd", area, node);
671 return buf;
676 static int dn_create(struct socket *sock, int protocol)
678 struct sock *sk;
680 switch(sock->type) {
681 case SOCK_SEQPACKET:
682 if (protocol != DNPROTO_NSP)
683 return -EPROTONOSUPPORT;
684 break;
685 case SOCK_STREAM:
686 break;
687 default:
688 return -ESOCKTNOSUPPORT;
692 if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL)
693 return -ENOBUFS;
695 sk->sk_protocol = protocol;
697 return 0;
701 static int
702 dn_release(struct socket *sock)
704 struct sock *sk = sock->sk;
706 if (sk) {
707 sock_orphan(sk);
708 sock_hold(sk);
709 lock_sock(sk);
710 dn_destroy_sock(sk);
711 release_sock(sk);
712 sock_put(sk);
715 return 0;
718 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
720 struct sock *sk = sock->sk;
721 struct dn_scp *scp = DN_SK(sk);
722 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
723 struct net_device *dev;
724 int rv;
726 if (addr_len != sizeof(struct sockaddr_dn))
727 return -EINVAL;
729 if (saddr->sdn_family != AF_DECnet)
730 return -EINVAL;
732 if (dn_ntohs(saddr->sdn_nodeaddrl) && (dn_ntohs(saddr->sdn_nodeaddrl) != 2))
733 return -EINVAL;
735 if (dn_ntohs(saddr->sdn_objnamel) > DN_MAXOBJL)
736 return -EINVAL;
738 if (saddr->sdn_flags & ~SDF_WILD)
739 return -EINVAL;
741 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
742 (saddr->sdn_flags & SDF_WILD)))
743 return -EACCES;
745 if (!(saddr->sdn_flags & SDF_WILD)) {
746 if (dn_ntohs(saddr->sdn_nodeaddrl)) {
747 read_lock(&dev_base_lock);
748 for(dev = dev_base; dev; dev = dev->next) {
749 if (!dev->dn_ptr)
750 continue;
751 if (dn_dev_islocal(dev, dn_saddr2dn(saddr)))
752 break;
754 read_unlock(&dev_base_lock);
755 if (dev == NULL)
756 return -EADDRNOTAVAIL;
760 rv = -EINVAL;
761 lock_sock(sk);
762 if (sock_flag(sk, SOCK_ZAPPED)) {
763 memcpy(&scp->addr, saddr, addr_len);
764 sock_reset_flag(sk, SOCK_ZAPPED);
766 rv = dn_hash_sock(sk);
767 if (rv)
768 sock_set_flag(sk, SOCK_ZAPPED);
770 release_sock(sk);
772 return rv;
776 static int dn_auto_bind(struct socket *sock)
778 struct sock *sk = sock->sk;
779 struct dn_scp *scp = DN_SK(sk);
780 int rv;
782 sock_reset_flag(sk, SOCK_ZAPPED);
784 scp->addr.sdn_flags = 0;
785 scp->addr.sdn_objnum = 0;
788 * This stuff is to keep compatibility with Eduardo's
789 * patch. I hope I can dispense with it shortly...
791 if ((scp->accessdata.acc_accl != 0) &&
792 (scp->accessdata.acc_accl <= 12)) {
794 scp->addr.sdn_objnamel = dn_htons(scp->accessdata.acc_accl);
795 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, dn_ntohs(scp->addr.sdn_objnamel));
797 scp->accessdata.acc_accl = 0;
798 memset(scp->accessdata.acc_acc, 0, 40);
800 /* End of compatibility stuff */
802 scp->addr.sdn_add.a_len = dn_htons(2);
803 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
804 if (rv == 0) {
805 rv = dn_hash_sock(sk);
806 if (rv)
807 sock_set_flag(sk, SOCK_ZAPPED);
810 return rv;
813 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
815 struct dn_scp *scp = DN_SK(sk);
816 DEFINE_WAIT(wait);
817 int err;
819 if (scp->state != DN_CR)
820 return -EINVAL;
822 scp->state = DN_CC;
823 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
824 dn_send_conn_conf(sk, allocation);
826 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
827 for(;;) {
828 release_sock(sk);
829 if (scp->state == DN_CC)
830 *timeo = schedule_timeout(*timeo);
831 lock_sock(sk);
832 err = 0;
833 if (scp->state == DN_RUN)
834 break;
835 err = sock_error(sk);
836 if (err)
837 break;
838 err = sock_intr_errno(*timeo);
839 if (signal_pending(current))
840 break;
841 err = -EAGAIN;
842 if (!*timeo)
843 break;
844 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
846 finish_wait(sk->sk_sleep, &wait);
847 if (err == 0) {
848 sk->sk_socket->state = SS_CONNECTED;
849 } else if (scp->state != DN_CC) {
850 sk->sk_socket->state = SS_UNCONNECTED;
852 return err;
855 static int dn_wait_run(struct sock *sk, long *timeo)
857 struct dn_scp *scp = DN_SK(sk);
858 DEFINE_WAIT(wait);
859 int err = 0;
861 if (scp->state == DN_RUN)
862 goto out;
864 if (!*timeo)
865 return -EALREADY;
867 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
868 for(;;) {
869 release_sock(sk);
870 if (scp->state == DN_CI || scp->state == DN_CC)
871 *timeo = schedule_timeout(*timeo);
872 lock_sock(sk);
873 err = 0;
874 if (scp->state == DN_RUN)
875 break;
876 err = sock_error(sk);
877 if (err)
878 break;
879 err = sock_intr_errno(*timeo);
880 if (signal_pending(current))
881 break;
882 err = -ETIMEDOUT;
883 if (!*timeo)
884 break;
885 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
887 finish_wait(sk->sk_sleep, &wait);
888 out:
889 if (err == 0) {
890 sk->sk_socket->state = SS_CONNECTED;
891 } else if (scp->state != DN_CI && scp->state != DN_CC) {
892 sk->sk_socket->state = SS_UNCONNECTED;
894 return err;
897 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
899 struct socket *sock = sk->sk_socket;
900 struct dn_scp *scp = DN_SK(sk);
901 int err = -EISCONN;
902 struct flowi fl;
904 if (sock->state == SS_CONNECTED)
905 goto out;
907 if (sock->state == SS_CONNECTING) {
908 err = 0;
909 if (scp->state == DN_RUN) {
910 sock->state = SS_CONNECTED;
911 goto out;
913 err = -ECONNREFUSED;
914 if (scp->state != DN_CI && scp->state != DN_CC) {
915 sock->state = SS_UNCONNECTED;
916 goto out;
918 return dn_wait_run(sk, timeo);
921 err = -EINVAL;
922 if (scp->state != DN_O)
923 goto out;
925 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
926 goto out;
927 if (addr->sdn_family != AF_DECnet)
928 goto out;
929 if (addr->sdn_flags & SDF_WILD)
930 goto out;
932 if (sock_flag(sk, SOCK_ZAPPED)) {
933 err = dn_auto_bind(sk->sk_socket);
934 if (err)
935 goto out;
938 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
940 err = -EHOSTUNREACH;
941 memset(&fl, 0, sizeof(fl));
942 fl.oif = sk->sk_bound_dev_if;
943 fl.fld_dst = dn_saddr2dn(&scp->peer);
944 fl.fld_src = dn_saddr2dn(&scp->addr);
945 dn_sk_ports_copy(&fl, scp);
946 fl.proto = DNPROTO_NSP;
947 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
948 goto out;
949 sk->sk_route_caps = sk->sk_dst_cache->dev->features;
950 sock->state = SS_CONNECTING;
951 scp->state = DN_CI;
952 scp->segsize_loc = dst_metric(sk->sk_dst_cache, RTAX_ADVMSS);
954 dn_nsp_send_conninit(sk, NSP_CI);
955 err = -EINPROGRESS;
956 if (*timeo) {
957 err = dn_wait_run(sk, timeo);
959 out:
960 return err;
963 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
965 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
966 struct sock *sk = sock->sk;
967 int err;
968 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
970 lock_sock(sk);
971 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
972 release_sock(sk);
974 return err;
977 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
979 struct dn_scp *scp = DN_SK(sk);
981 switch(scp->state) {
982 case DN_RUN:
983 return 0;
984 case DN_CR:
985 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
986 case DN_CI:
987 case DN_CC:
988 return dn_wait_run(sk, timeo);
989 case DN_O:
990 return __dn_connect(sk, addr, addrlen, timeo, flags);
993 return -EINVAL;
997 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
999 unsigned char *ptr = skb->data;
1001 acc->acc_userl = *ptr++;
1002 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1003 ptr += acc->acc_userl;
1005 acc->acc_passl = *ptr++;
1006 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1007 ptr += acc->acc_passl;
1009 acc->acc_accl = *ptr++;
1010 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1012 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1016 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1018 unsigned char *ptr = skb->data;
1020 opt->opt_optl = *ptr++;
1021 opt->opt_status = 0;
1022 memcpy(opt->opt_data, ptr, opt->opt_optl);
1023 skb_pull(skb, dn_ntohs(opt->opt_optl) + 1);
1027 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1029 DEFINE_WAIT(wait);
1030 struct sk_buff *skb = NULL;
1031 int err = 0;
1033 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1034 for(;;) {
1035 release_sock(sk);
1036 skb = skb_dequeue(&sk->sk_receive_queue);
1037 if (skb == NULL) {
1038 *timeo = schedule_timeout(*timeo);
1039 skb = skb_dequeue(&sk->sk_receive_queue);
1041 lock_sock(sk);
1042 if (skb != NULL)
1043 break;
1044 err = -EINVAL;
1045 if (sk->sk_state != TCP_LISTEN)
1046 break;
1047 err = sock_intr_errno(*timeo);
1048 if (signal_pending(current))
1049 break;
1050 err = -EAGAIN;
1051 if (!*timeo)
1052 break;
1053 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1055 finish_wait(sk->sk_sleep, &wait);
1057 return skb == NULL ? ERR_PTR(err) : skb;
1060 static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1062 struct sock *sk = sock->sk, *newsk;
1063 struct sk_buff *skb = NULL;
1064 struct dn_skb_cb *cb;
1065 unsigned char menuver;
1066 int err = 0;
1067 unsigned char type;
1068 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1070 lock_sock(sk);
1072 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1073 release_sock(sk);
1074 return -EINVAL;
1077 skb = skb_dequeue(&sk->sk_receive_queue);
1078 if (skb == NULL) {
1079 skb = dn_wait_for_connect(sk, &timeo);
1080 if (IS_ERR(skb)) {
1081 release_sock(sk);
1082 return PTR_ERR(skb);
1086 cb = DN_SKB_CB(skb);
1087 sk->sk_ack_backlog--;
1088 newsk = dn_alloc_sock(newsock, sk->sk_allocation);
1089 if (newsk == NULL) {
1090 release_sock(sk);
1091 kfree_skb(skb);
1092 return -ENOBUFS;
1094 release_sock(sk);
1096 dst_release(xchg(&newsk->sk_dst_cache, skb->dst));
1097 skb->dst = NULL;
1099 DN_SK(newsk)->state = DN_CR;
1100 DN_SK(newsk)->addrrem = cb->src_port;
1101 DN_SK(newsk)->services_rem = cb->services;
1102 DN_SK(newsk)->info_rem = cb->info;
1103 DN_SK(newsk)->segsize_rem = cb->segsize;
1104 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1106 if (DN_SK(newsk)->segsize_rem < 230)
1107 DN_SK(newsk)->segsize_rem = 230;
1109 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1110 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1112 newsk->sk_state = TCP_LISTEN;
1113 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1116 * If we are listening on a wild socket, we don't want
1117 * the newly created socket on the wrong hash queue.
1119 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1121 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1122 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1123 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1124 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1126 menuver = *skb->data;
1127 skb_pull(skb, 1);
1129 if (menuver & DN_MENUVER_ACC)
1130 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1132 if (menuver & DN_MENUVER_USR)
1133 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1135 if (menuver & DN_MENUVER_PRX)
1136 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1138 if (menuver & DN_MENUVER_UIC)
1139 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1141 kfree_skb(skb);
1143 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1144 sizeof(struct optdata_dn));
1145 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1146 sizeof(struct optdata_dn));
1148 lock_sock(newsk);
1149 err = dn_hash_sock(newsk);
1150 if (err == 0) {
1151 sock_reset_flag(newsk, SOCK_ZAPPED);
1152 dn_send_conn_ack(newsk);
1155 * Here we use sk->sk_allocation since although the conn conf is
1156 * for the newsk, the context is the old socket.
1158 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1159 err = dn_confirm_accept(newsk, &timeo,
1160 sk->sk_allocation);
1162 release_sock(newsk);
1163 return err;
1167 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer)
1169 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1170 struct sock *sk = sock->sk;
1171 struct dn_scp *scp = DN_SK(sk);
1173 *uaddr_len = sizeof(struct sockaddr_dn);
1175 lock_sock(sk);
1177 if (peer) {
1178 if ((sock->state != SS_CONNECTED &&
1179 sock->state != SS_CONNECTING) &&
1180 scp->accept_mode == ACC_IMMED)
1181 return -ENOTCONN;
1183 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1184 } else {
1185 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1188 release_sock(sk);
1190 return 0;
1194 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1196 struct sock *sk = sock->sk;
1197 struct dn_scp *scp = DN_SK(sk);
1198 int mask = datagram_poll(file, sock, wait);
1200 if (!skb_queue_empty(&scp->other_receive_queue))
1201 mask |= POLLRDBAND;
1203 return mask;
1206 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1208 struct sock *sk = sock->sk;
1209 struct dn_scp *scp = DN_SK(sk);
1210 int err = -EOPNOTSUPP;
1211 long amount = 0;
1212 struct sk_buff *skb;
1213 int val;
1215 switch(cmd)
1217 case SIOCGIFADDR:
1218 case SIOCSIFADDR:
1219 return dn_dev_ioctl(cmd, (void __user *)arg);
1221 case SIOCATMARK:
1222 lock_sock(sk);
1223 val = !skb_queue_empty(&scp->other_receive_queue);
1224 if (scp->state != DN_RUN)
1225 val = -ENOTCONN;
1226 release_sock(sk);
1227 return val;
1229 case TIOCOUTQ:
1230 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1231 if (amount < 0)
1232 amount = 0;
1233 err = put_user(amount, (int __user *)arg);
1234 break;
1236 case TIOCINQ:
1237 lock_sock(sk);
1238 if ((skb = skb_peek(&scp->other_receive_queue)) != NULL) {
1239 amount = skb->len;
1240 } else {
1241 struct sk_buff *skb = sk->sk_receive_queue.next;
1242 for(;;) {
1243 if (skb ==
1244 (struct sk_buff *)&sk->sk_receive_queue)
1245 break;
1246 amount += skb->len;
1247 skb = skb->next;
1250 release_sock(sk);
1251 err = put_user(amount, (int __user *)arg);
1252 break;
1254 default:
1255 err = -ENOIOCTLCMD;
1256 break;
1259 return err;
1262 static int dn_listen(struct socket *sock, int backlog)
1264 struct sock *sk = sock->sk;
1265 int err = -EINVAL;
1267 lock_sock(sk);
1269 if (sock_flag(sk, SOCK_ZAPPED))
1270 goto out;
1272 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1273 goto out;
1275 sk->sk_max_ack_backlog = backlog;
1276 sk->sk_ack_backlog = 0;
1277 sk->sk_state = TCP_LISTEN;
1278 err = 0;
1279 dn_rehash_sock(sk);
1281 out:
1282 release_sock(sk);
1284 return err;
1288 static int dn_shutdown(struct socket *sock, int how)
1290 struct sock *sk = sock->sk;
1291 struct dn_scp *scp = DN_SK(sk);
1292 int err = -ENOTCONN;
1294 lock_sock(sk);
1296 if (sock->state == SS_UNCONNECTED)
1297 goto out;
1299 err = 0;
1300 if (sock->state == SS_DISCONNECTING)
1301 goto out;
1303 err = -EINVAL;
1304 if (scp->state == DN_O)
1305 goto out;
1307 if (how != SHUTDOWN_MASK)
1308 goto out;
1310 sk->sk_shutdown = how;
1311 dn_destroy_sock(sk);
1312 err = 0;
1314 out:
1315 release_sock(sk);
1317 return err;
1320 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1322 struct sock *sk = sock->sk;
1323 int err;
1325 lock_sock(sk);
1326 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1327 release_sock(sk);
1329 return err;
1332 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags)
1334 struct sock *sk = sock->sk;
1335 struct dn_scp *scp = DN_SK(sk);
1336 long timeo;
1337 union {
1338 struct optdata_dn opt;
1339 struct accessdata_dn acc;
1340 int mode;
1341 unsigned long win;
1342 int val;
1343 unsigned char services;
1344 unsigned char info;
1345 } u;
1346 int err;
1348 if (optlen && !optval)
1349 return -EINVAL;
1351 if (optlen > sizeof(u))
1352 return -EINVAL;
1354 if (copy_from_user(&u, optval, optlen))
1355 return -EFAULT;
1357 switch(optname) {
1358 case DSO_CONDATA:
1359 if (sock->state == SS_CONNECTED)
1360 return -EISCONN;
1361 if ((scp->state != DN_O) && (scp->state != DN_CR))
1362 return -EINVAL;
1364 if (optlen != sizeof(struct optdata_dn))
1365 return -EINVAL;
1367 if (dn_ntohs(u.opt.opt_optl) > 16)
1368 return -EINVAL;
1370 memcpy(&scp->conndata_out, &u.opt, optlen);
1371 break;
1373 case DSO_DISDATA:
1374 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1375 return -ENOTCONN;
1377 if (optlen != sizeof(struct optdata_dn))
1378 return -EINVAL;
1380 if (dn_ntohs(u.opt.opt_optl) > 16)
1381 return -EINVAL;
1383 memcpy(&scp->discdata_out, &u.opt, optlen);
1384 break;
1386 case DSO_CONACCESS:
1387 if (sock->state == SS_CONNECTED)
1388 return -EISCONN;
1389 if (scp->state != DN_O)
1390 return -EINVAL;
1392 if (optlen != sizeof(struct accessdata_dn))
1393 return -EINVAL;
1395 if ((u.acc.acc_accl > DN_MAXACCL) ||
1396 (u.acc.acc_passl > DN_MAXACCL) ||
1397 (u.acc.acc_userl > DN_MAXACCL))
1398 return -EINVAL;
1400 memcpy(&scp->accessdata, &u.acc, optlen);
1401 break;
1403 case DSO_ACCEPTMODE:
1404 if (sock->state == SS_CONNECTED)
1405 return -EISCONN;
1406 if (scp->state != DN_O)
1407 return -EINVAL;
1409 if (optlen != sizeof(int))
1410 return -EINVAL;
1412 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1413 return -EINVAL;
1415 scp->accept_mode = (unsigned char)u.mode;
1416 break;
1418 case DSO_CONACCEPT:
1420 if (scp->state != DN_CR)
1421 return -EINVAL;
1422 timeo = sock_rcvtimeo(sk, 0);
1423 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1424 return err;
1426 case DSO_CONREJECT:
1428 if (scp->state != DN_CR)
1429 return -EINVAL;
1431 scp->state = DN_DR;
1432 sk->sk_shutdown = SHUTDOWN_MASK;
1433 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1434 break;
1436 default:
1437 #ifdef CONFIG_NETFILTER
1438 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1439 #endif
1440 case DSO_LINKINFO:
1441 case DSO_STREAM:
1442 case DSO_SEQPACKET:
1443 return -ENOPROTOOPT;
1445 case DSO_MAXWINDOW:
1446 if (optlen != sizeof(unsigned long))
1447 return -EINVAL;
1448 if (u.win > NSP_MAX_WINDOW)
1449 u.win = NSP_MAX_WINDOW;
1450 if (u.win == 0)
1451 return -EINVAL;
1452 scp->max_window = u.win;
1453 if (scp->snd_window > u.win)
1454 scp->snd_window = u.win;
1455 break;
1457 case DSO_NODELAY:
1458 if (optlen != sizeof(int))
1459 return -EINVAL;
1460 if (scp->nonagle == 2)
1461 return -EINVAL;
1462 scp->nonagle = (u.val == 0) ? 0 : 1;
1463 /* if (scp->nonagle == 1) { Push pending frames } */
1464 break;
1466 case DSO_CORK:
1467 if (optlen != sizeof(int))
1468 return -EINVAL;
1469 if (scp->nonagle == 1)
1470 return -EINVAL;
1471 scp->nonagle = (u.val == 0) ? 0 : 2;
1472 /* if (scp->nonagle == 0) { Push pending frames } */
1473 break;
1475 case DSO_SERVICES:
1476 if (optlen != sizeof(unsigned char))
1477 return -EINVAL;
1478 if ((u.services & ~NSP_FC_MASK) != 0x01)
1479 return -EINVAL;
1480 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1481 return -EINVAL;
1482 scp->services_loc = u.services;
1483 break;
1485 case DSO_INFO:
1486 if (optlen != sizeof(unsigned char))
1487 return -EINVAL;
1488 if (u.info & 0xfc)
1489 return -EINVAL;
1490 scp->info_loc = u.info;
1491 break;
1494 return 0;
1497 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1499 struct sock *sk = sock->sk;
1500 int err;
1502 lock_sock(sk);
1503 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1504 release_sock(sk);
1506 return err;
1509 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1511 struct sock *sk = sock->sk;
1512 struct dn_scp *scp = DN_SK(sk);
1513 struct linkinfo_dn link;
1514 unsigned int r_len;
1515 void *r_data = NULL;
1516 unsigned int val;
1518 if(get_user(r_len , optlen))
1519 return -EFAULT;
1521 switch(optname) {
1522 case DSO_CONDATA:
1523 if (r_len > sizeof(struct optdata_dn))
1524 r_len = sizeof(struct optdata_dn);
1525 r_data = &scp->conndata_in;
1526 break;
1528 case DSO_DISDATA:
1529 if (r_len > sizeof(struct optdata_dn))
1530 r_len = sizeof(struct optdata_dn);
1531 r_data = &scp->discdata_in;
1532 break;
1534 case DSO_CONACCESS:
1535 if (r_len > sizeof(struct accessdata_dn))
1536 r_len = sizeof(struct accessdata_dn);
1537 r_data = &scp->accessdata;
1538 break;
1540 case DSO_ACCEPTMODE:
1541 if (r_len > sizeof(unsigned char))
1542 r_len = sizeof(unsigned char);
1543 r_data = &scp->accept_mode;
1544 break;
1546 case DSO_LINKINFO:
1547 if (r_len > sizeof(struct linkinfo_dn))
1548 r_len = sizeof(struct linkinfo_dn);
1550 switch(sock->state) {
1551 case SS_CONNECTING:
1552 link.idn_linkstate = LL_CONNECTING;
1553 break;
1554 case SS_DISCONNECTING:
1555 link.idn_linkstate = LL_DISCONNECTING;
1556 break;
1557 case SS_CONNECTED:
1558 link.idn_linkstate = LL_RUNNING;
1559 break;
1560 default:
1561 link.idn_linkstate = LL_INACTIVE;
1564 link.idn_segsize = scp->segsize_rem;
1565 r_data = &link;
1566 break;
1568 default:
1569 #ifdef CONFIG_NETFILTER
1571 int val, len;
1573 if(get_user(len, optlen))
1574 return -EFAULT;
1576 val = nf_getsockopt(sk, PF_DECnet, optname,
1577 optval, &len);
1578 if (val >= 0)
1579 val = put_user(len, optlen);
1580 return val;
1582 #endif
1583 case DSO_STREAM:
1584 case DSO_SEQPACKET:
1585 case DSO_CONACCEPT:
1586 case DSO_CONREJECT:
1587 return -ENOPROTOOPT;
1589 case DSO_MAXWINDOW:
1590 if (r_len > sizeof(unsigned long))
1591 r_len = sizeof(unsigned long);
1592 r_data = &scp->max_window;
1593 break;
1595 case DSO_NODELAY:
1596 if (r_len > sizeof(int))
1597 r_len = sizeof(int);
1598 val = (scp->nonagle == 1);
1599 r_data = &val;
1600 break;
1602 case DSO_CORK:
1603 if (r_len > sizeof(int))
1604 r_len = sizeof(int);
1605 val = (scp->nonagle == 2);
1606 r_data = &val;
1607 break;
1609 case DSO_SERVICES:
1610 if (r_len > sizeof(unsigned char))
1611 r_len = sizeof(unsigned char);
1612 r_data = &scp->services_rem;
1613 break;
1615 case DSO_INFO:
1616 if (r_len > sizeof(unsigned char))
1617 r_len = sizeof(unsigned char);
1618 r_data = &scp->info_rem;
1619 break;
1622 if (r_data) {
1623 if (copy_to_user(optval, r_data, r_len))
1624 return -EFAULT;
1625 if (put_user(r_len, optlen))
1626 return -EFAULT;
1629 return 0;
1633 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1635 struct sk_buff *skb = q->next;
1636 int len = 0;
1638 if (flags & MSG_OOB)
1639 return !skb_queue_empty(q) ? 1 : 0;
1641 while(skb != (struct sk_buff *)q) {
1642 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1643 len += skb->len;
1645 if (cb->nsp_flags & 0x40) {
1646 /* SOCK_SEQPACKET reads to EOM */
1647 if (sk->sk_type == SOCK_SEQPACKET)
1648 return 1;
1649 /* so does SOCK_STREAM unless WAITALL is specified */
1650 if (!(flags & MSG_WAITALL))
1651 return 1;
1654 /* minimum data length for read exceeded */
1655 if (len >= target)
1656 return 1;
1658 skb = skb->next;
1661 return 0;
1665 static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1666 struct msghdr *msg, size_t size, int flags)
1668 struct sock *sk = sock->sk;
1669 struct dn_scp *scp = DN_SK(sk);
1670 struct sk_buff_head *queue = &sk->sk_receive_queue;
1671 size_t target = size > 1 ? 1 : 0;
1672 size_t copied = 0;
1673 int rv = 0;
1674 struct sk_buff *skb, *nskb;
1675 struct dn_skb_cb *cb = NULL;
1676 unsigned char eor = 0;
1677 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1679 lock_sock(sk);
1681 if (sock_flag(sk, SOCK_ZAPPED)) {
1682 rv = -EADDRNOTAVAIL;
1683 goto out;
1686 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1687 rv = 0;
1688 goto out;
1691 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1692 if (rv)
1693 goto out;
1695 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1696 rv = -EOPNOTSUPP;
1697 goto out;
1700 if (flags & MSG_OOB)
1701 queue = &scp->other_receive_queue;
1703 if (flags & MSG_WAITALL)
1704 target = size;
1708 * See if there is data ready to read, sleep if there isn't
1710 for(;;) {
1711 if (sk->sk_err)
1712 goto out;
1714 if (!skb_queue_empty(&scp->other_receive_queue)) {
1715 if (!(flags & MSG_OOB)) {
1716 msg->msg_flags |= MSG_OOB;
1717 if (!scp->other_report) {
1718 scp->other_report = 1;
1719 goto out;
1724 if (scp->state != DN_RUN)
1725 goto out;
1727 if (signal_pending(current)) {
1728 rv = sock_intr_errno(timeo);
1729 goto out;
1732 if (dn_data_ready(sk, queue, flags, target))
1733 break;
1735 if (flags & MSG_DONTWAIT) {
1736 rv = -EWOULDBLOCK;
1737 goto out;
1740 set_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1741 SOCK_SLEEP_PRE(sk)
1743 if (!dn_data_ready(sk, queue, flags, target))
1744 schedule();
1746 SOCK_SLEEP_POST(sk)
1747 clear_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1750 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) {
1751 unsigned int chunk = skb->len;
1752 cb = DN_SKB_CB(skb);
1754 if ((chunk + copied) > size)
1755 chunk = size - copied;
1757 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1758 rv = -EFAULT;
1759 break;
1761 copied += chunk;
1763 if (!(flags & MSG_PEEK))
1764 skb_pull(skb, chunk);
1766 eor = cb->nsp_flags & 0x40;
1767 nskb = skb->next;
1769 if (skb->len == 0) {
1770 skb_unlink(skb, queue);
1771 kfree_skb(skb);
1773 * N.B. Don't refer to skb or cb after this point
1774 * in loop.
1776 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1777 scp->flowloc_sw = DN_SEND;
1778 dn_nsp_send_link(sk, DN_SEND, 0);
1782 if (eor) {
1783 if (sk->sk_type == SOCK_SEQPACKET)
1784 break;
1785 if (!(flags & MSG_WAITALL))
1786 break;
1789 if (flags & MSG_OOB)
1790 break;
1792 if (copied >= target)
1793 break;
1796 rv = copied;
1799 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1800 msg->msg_flags |= MSG_EOR;
1802 out:
1803 if (rv == 0)
1804 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1806 if ((rv >= 0) && msg->msg_name) {
1807 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1808 msg->msg_namelen = sizeof(struct sockaddr_dn);
1811 release_sock(sk);
1813 return rv;
1817 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1819 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1820 if (skb_queue_len(queue) >= scp->snd_window)
1821 return 1;
1822 if (fctype != NSP_FC_NONE) {
1823 if (flags & MSG_OOB) {
1824 if (scp->flowrem_oth == 0)
1825 return 1;
1826 } else {
1827 if (scp->flowrem_dat == 0)
1828 return 1;
1831 return 0;
1835 * The DECnet spec requires the the "routing layer" accepts packets which
1836 * are at least 230 bytes in size. This excludes any headers which the NSP
1837 * layer might add, so we always assume that we'll be using the maximal
1838 * length header on data packets. The variation in length is due to the
1839 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1840 * make much practical difference.
1842 unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1844 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1845 if (dev) {
1846 struct dn_dev *dn_db = dev->dn_ptr;
1847 mtu -= LL_RESERVED_SPACE(dev);
1848 if (dn_db->use_long)
1849 mtu -= 21;
1850 else
1851 mtu -= 6;
1852 mtu -= DN_MAX_NSP_DATA_HEADER;
1853 } else {
1855 * 21 = long header, 16 = guess at MAC header length
1857 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1859 if (mtu > mss)
1860 mss = mtu;
1861 return mss;
1864 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1866 struct dst_entry *dst = __sk_dst_get(sk);
1867 struct dn_scp *scp = DN_SK(sk);
1868 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1870 /* Other data messages are limited to 16 bytes per packet */
1871 if (flags & MSG_OOB)
1872 return 16;
1874 /* This works out the maximum size of segment we can send out */
1875 if (dst) {
1876 u32 mtu = dst_mtu(dst);
1877 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1880 return mss_now;
1884 * N.B. We get the timeout wrong here, but then we always did get it
1885 * wrong before and this is another step along the road to correcting
1886 * it. It ought to get updated each time we pass through the routine,
1887 * but in practise it probably doesn't matter too much for now.
1889 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1890 unsigned long datalen, int noblock,
1891 int *errcode)
1893 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1894 noblock, errcode);
1895 if (skb) {
1896 skb->protocol = __constant_htons(ETH_P_DNA_RT);
1897 skb->pkt_type = PACKET_OUTGOING;
1899 return skb;
1902 static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1903 struct msghdr *msg, size_t size)
1905 struct sock *sk = sock->sk;
1906 struct dn_scp *scp = DN_SK(sk);
1907 size_t mss;
1908 struct sk_buff_head *queue = &scp->data_xmit_queue;
1909 int flags = msg->msg_flags;
1910 int err = 0;
1911 size_t sent = 0;
1912 int addr_len = msg->msg_namelen;
1913 struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name;
1914 struct sk_buff *skb = NULL;
1915 struct dn_skb_cb *cb;
1916 size_t len;
1917 unsigned char fctype;
1918 long timeo;
1920 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1921 return -EOPNOTSUPP;
1923 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1924 return -EINVAL;
1926 lock_sock(sk);
1927 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1929 * The only difference between stream sockets and sequenced packet
1930 * sockets is that the stream sockets always behave as if MSG_EOR
1931 * has been set.
1933 if (sock->type == SOCK_STREAM) {
1934 if (flags & MSG_EOR) {
1935 err = -EINVAL;
1936 goto out;
1938 flags |= MSG_EOR;
1942 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1943 if (err)
1944 goto out_err;
1946 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1947 err = -EPIPE;
1948 if (!(flags & MSG_NOSIGNAL))
1949 send_sig(SIGPIPE, current, 0);
1950 goto out_err;
1953 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1954 dst_negative_advice(&sk->sk_dst_cache);
1956 mss = scp->segsize_rem;
1957 fctype = scp->services_rem & NSP_FC_MASK;
1959 mss = dn_current_mss(sk, flags);
1961 if (flags & MSG_OOB) {
1962 queue = &scp->other_xmit_queue;
1963 if (size > mss) {
1964 err = -EMSGSIZE;
1965 goto out;
1969 scp->persist_fxn = dn_nsp_xmit_timeout;
1971 while(sent < size) {
1972 err = sock_error(sk);
1973 if (err)
1974 goto out;
1976 if (signal_pending(current)) {
1977 err = sock_intr_errno(timeo);
1978 goto out;
1982 * Calculate size that we wish to send.
1984 len = size - sent;
1986 if (len > mss)
1987 len = mss;
1990 * Wait for queue size to go down below the window
1991 * size.
1993 if (dn_queue_too_long(scp, queue, flags)) {
1994 if (flags & MSG_DONTWAIT) {
1995 err = -EWOULDBLOCK;
1996 goto out;
1999 SOCK_SLEEP_PRE(sk)
2001 if (dn_queue_too_long(scp, queue, flags))
2002 schedule();
2004 SOCK_SLEEP_POST(sk)
2006 continue;
2010 * Get a suitably sized skb.
2011 * 64 is a bit of a hack really, but its larger than any
2012 * link-layer headers and has served us well as a good
2013 * guess as to their real length.
2015 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2016 flags & MSG_DONTWAIT, &err);
2018 if (err)
2019 break;
2021 if (!skb)
2022 continue;
2024 cb = DN_SKB_CB(skb);
2026 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2028 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2029 err = -EFAULT;
2030 goto out;
2033 if (flags & MSG_OOB) {
2034 cb->nsp_flags = 0x30;
2035 if (fctype != NSP_FC_NONE)
2036 scp->flowrem_oth--;
2037 } else {
2038 cb->nsp_flags = 0x00;
2039 if (scp->seg_total == 0)
2040 cb->nsp_flags |= 0x20;
2042 scp->seg_total += len;
2044 if (((sent + len) == size) && (flags & MSG_EOR)) {
2045 cb->nsp_flags |= 0x40;
2046 scp->seg_total = 0;
2047 if (fctype == NSP_FC_SCMC)
2048 scp->flowrem_dat--;
2050 if (fctype == NSP_FC_SRC)
2051 scp->flowrem_dat--;
2054 sent += len;
2055 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2056 skb = NULL;
2058 scp->persist = dn_nsp_persist(sk);
2061 out:
2063 if (skb)
2064 kfree_skb(skb);
2066 release_sock(sk);
2068 return sent ? sent : err;
2070 out_err:
2071 err = sk_stream_error(sk, flags, err);
2072 release_sock(sk);
2073 return err;
2076 static int dn_device_event(struct notifier_block *this, unsigned long event,
2077 void *ptr)
2079 struct net_device *dev = (struct net_device *)ptr;
2081 switch(event) {
2082 case NETDEV_UP:
2083 dn_dev_up(dev);
2084 break;
2085 case NETDEV_DOWN:
2086 dn_dev_down(dev);
2087 break;
2088 default:
2089 break;
2092 return NOTIFY_DONE;
2095 static struct notifier_block dn_dev_notifier = {
2096 .notifier_call = dn_device_event,
2099 extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2101 static struct packet_type dn_dix_packet_type = {
2102 .type = __constant_htons(ETH_P_DNA_RT),
2103 .dev = NULL, /* All devices */
2104 .func = dn_route_rcv,
2107 #ifdef CONFIG_PROC_FS
2108 struct dn_iter_state {
2109 int bucket;
2112 static struct sock *dn_socket_get_first(struct seq_file *seq)
2114 struct dn_iter_state *state = seq->private;
2115 struct sock *n = NULL;
2117 for(state->bucket = 0;
2118 state->bucket < DN_SK_HASH_SIZE;
2119 ++state->bucket) {
2120 n = sk_head(&dn_sk_hash[state->bucket]);
2121 if (n)
2122 break;
2125 return n;
2128 static struct sock *dn_socket_get_next(struct seq_file *seq,
2129 struct sock *n)
2131 struct dn_iter_state *state = seq->private;
2133 n = sk_next(n);
2134 try_again:
2135 if (n)
2136 goto out;
2137 if (++state->bucket >= DN_SK_HASH_SIZE)
2138 goto out;
2139 n = sk_head(&dn_sk_hash[state->bucket]);
2140 goto try_again;
2141 out:
2142 return n;
2145 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2147 struct sock *sk = dn_socket_get_first(seq);
2149 if (sk) {
2150 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2151 --*pos;
2153 return *pos ? NULL : sk;
2156 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2158 void *rc;
2159 read_lock_bh(&dn_hash_lock);
2160 rc = socket_get_idx(seq, &pos);
2161 if (!rc) {
2162 read_unlock_bh(&dn_hash_lock);
2164 return rc;
2167 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2169 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2172 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2174 void *rc;
2176 if (v == SEQ_START_TOKEN) {
2177 rc = dn_socket_get_idx(seq, 0);
2178 goto out;
2181 rc = dn_socket_get_next(seq, v);
2182 if (rc)
2183 goto out;
2184 read_unlock_bh(&dn_hash_lock);
2185 out:
2186 ++*pos;
2187 return rc;
2190 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2192 if (v && v != SEQ_START_TOKEN)
2193 read_unlock_bh(&dn_hash_lock);
2196 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2198 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2200 int i;
2202 switch (dn_ntohs(dn->sdn_objnamel)) {
2203 case 0:
2204 sprintf(buf, "%d", dn->sdn_objnum);
2205 break;
2206 default:
2207 for (i = 0; i < dn_ntohs(dn->sdn_objnamel); i++) {
2208 buf[i] = dn->sdn_objname[i];
2209 if (IS_NOT_PRINTABLE(buf[i]))
2210 buf[i] = '.';
2212 buf[i] = 0;
2216 static char *dn_state2asc(unsigned char state)
2218 switch(state) {
2219 case DN_O:
2220 return "OPEN";
2221 case DN_CR:
2222 return " CR";
2223 case DN_DR:
2224 return " DR";
2225 case DN_DRC:
2226 return " DRC";
2227 case DN_CC:
2228 return " CC";
2229 case DN_CI:
2230 return " CI";
2231 case DN_NR:
2232 return " NR";
2233 case DN_NC:
2234 return " NC";
2235 case DN_CD:
2236 return " CD";
2237 case DN_RJ:
2238 return " RJ";
2239 case DN_RUN:
2240 return " RUN";
2241 case DN_DI:
2242 return " DI";
2243 case DN_DIC:
2244 return " DIC";
2245 case DN_DN:
2246 return " DN";
2247 case DN_CL:
2248 return " CL";
2249 case DN_CN:
2250 return " CN";
2253 return "????";
2256 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2258 struct dn_scp *scp = DN_SK(sk);
2259 char buf1[DN_ASCBUF_LEN];
2260 char buf2[DN_ASCBUF_LEN];
2261 char local_object[DN_MAXOBJL+3];
2262 char remote_object[DN_MAXOBJL+3];
2264 dn_printable_object(&scp->addr, local_object);
2265 dn_printable_object(&scp->peer, remote_object);
2267 seq_printf(seq,
2268 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2269 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2270 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->addr)), buf1),
2271 scp->addrloc,
2272 scp->numdat,
2273 scp->numoth,
2274 scp->ackxmt_dat,
2275 scp->ackxmt_oth,
2276 scp->flowloc_sw,
2277 local_object,
2278 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->peer)), buf2),
2279 scp->addrrem,
2280 scp->numdat_rcv,
2281 scp->numoth_rcv,
2282 scp->ackrcv_dat,
2283 scp->ackrcv_oth,
2284 scp->flowrem_sw,
2285 remote_object,
2286 dn_state2asc(scp->state),
2287 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2290 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2292 if (v == SEQ_START_TOKEN) {
2293 seq_puts(seq, "Local Remote\n");
2294 } else {
2295 dn_socket_format_entry(seq, v);
2297 return 0;
2300 static struct seq_operations dn_socket_seq_ops = {
2301 .start = dn_socket_seq_start,
2302 .next = dn_socket_seq_next,
2303 .stop = dn_socket_seq_stop,
2304 .show = dn_socket_seq_show,
2307 static int dn_socket_seq_open(struct inode *inode, struct file *file)
2309 struct seq_file *seq;
2310 int rc = -ENOMEM;
2311 struct dn_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
2313 if (!s)
2314 goto out;
2316 rc = seq_open(file, &dn_socket_seq_ops);
2317 if (rc)
2318 goto out_kfree;
2320 seq = file->private_data;
2321 seq->private = s;
2322 memset(s, 0, sizeof(*s));
2323 out:
2324 return rc;
2325 out_kfree:
2326 kfree(s);
2327 goto out;
2330 static struct file_operations dn_socket_seq_fops = {
2331 .owner = THIS_MODULE,
2332 .open = dn_socket_seq_open,
2333 .read = seq_read,
2334 .llseek = seq_lseek,
2335 .release = seq_release_private,
2337 #endif
2339 static struct net_proto_family dn_family_ops = {
2340 .family = AF_DECnet,
2341 .create = dn_create,
2342 .owner = THIS_MODULE,
2345 static const struct proto_ops dn_proto_ops = {
2346 .family = AF_DECnet,
2347 .owner = THIS_MODULE,
2348 .release = dn_release,
2349 .bind = dn_bind,
2350 .connect = dn_connect,
2351 .socketpair = sock_no_socketpair,
2352 .accept = dn_accept,
2353 .getname = dn_getname,
2354 .poll = dn_poll,
2355 .ioctl = dn_ioctl,
2356 .listen = dn_listen,
2357 .shutdown = dn_shutdown,
2358 .setsockopt = dn_setsockopt,
2359 .getsockopt = dn_getsockopt,
2360 .sendmsg = dn_sendmsg,
2361 .recvmsg = dn_recvmsg,
2362 .mmap = sock_no_mmap,
2363 .sendpage = sock_no_sendpage,
2366 void dn_register_sysctl(void);
2367 void dn_unregister_sysctl(void);
2369 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2370 MODULE_AUTHOR("Linux DECnet Project Team");
2371 MODULE_LICENSE("GPL");
2372 MODULE_ALIAS_NETPROTO(PF_DECnet);
2374 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2376 static int __init decnet_init(void)
2378 int rc;
2380 printk(banner);
2382 rc = proto_register(&dn_proto, 1);
2383 if (rc != 0)
2384 goto out;
2386 dn_neigh_init();
2387 dn_dev_init();
2388 dn_route_init();
2389 dn_fib_init();
2391 sock_register(&dn_family_ops);
2392 dev_add_pack(&dn_dix_packet_type);
2393 register_netdevice_notifier(&dn_dev_notifier);
2395 proc_net_fops_create("decnet", S_IRUGO, &dn_socket_seq_fops);
2396 dn_register_sysctl();
2397 out:
2398 return rc;
2401 module_init(decnet_init);
2404 * Prevent DECnet module unloading until its fixed properly.
2405 * Requires an audit of the code to check for memory leaks and
2406 * initialisation problems etc.
2408 #if 0
2409 static void __exit decnet_exit(void)
2411 sock_unregister(AF_DECnet);
2412 dev_remove_pack(&dn_dix_packet_type);
2414 dn_unregister_sysctl();
2416 unregister_netdevice_notifier(&dn_dev_notifier);
2418 dn_route_cleanup();
2419 dn_dev_cleanup();
2420 dn_neigh_cleanup();
2421 dn_fib_cleanup();
2423 proc_net_remove("decnet");
2425 proto_unregister(&dn_proto);
2427 module_exit(decnet_exit);
2428 #endif