[MIPS] Make unwind_stack() can dig into interrupted context
[linux-2.6/linux-mips.git] / net / decnet / af_decnet.c
blob70e027375682cdf52030314be662ed7477b4e888
2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Socket Layer Interface
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
12 * Changes:
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
15 * below.
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
19 * Caulfield.
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
24 * code.
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
37 * when required.
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
52 any later version.
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
59 HISTORY:
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
68 connections.
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
73 Port to new kernel development version.
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for incoming connections
79 so we can start developing server apps
80 on Linux.
82 Module Support
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
93 order
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes
98 Patrick J. Caulfield
99 dn_bind fixes
100 *******************************************************************************/
102 #include <linux/module.h>
103 #include <linux/errno.h>
104 #include <linux/types.h>
105 #include <linux/slab.h>
106 #include <linux/socket.h>
107 #include <linux/in.h>
108 #include <linux/kernel.h>
109 #include <linux/sched.h>
110 #include <linux/timer.h>
111 #include <linux/string.h>
112 #include <linux/sockios.h>
113 #include <linux/net.h>
114 #include <linux/netdevice.h>
115 #include <linux/inet.h>
116 #include <linux/route.h>
117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h>
119 #include <net/sock.h>
120 #include <net/tcp_states.h>
121 #include <net/flow.h>
122 #include <asm/system.h>
123 #include <asm/ioctls.h>
124 #include <linux/capability.h>
125 #include <linux/mm.h>
126 #include <linux/interrupt.h>
127 #include <linux/proc_fs.h>
128 #include <linux/stat.h>
129 #include <linux/init.h>
130 #include <linux/poll.h>
131 #include <net/neighbour.h>
132 #include <net/dst.h>
133 #include <net/fib_rules.h>
134 #include <net/dn.h>
135 #include <net/dn_nsp.h>
136 #include <net/dn_dev.h>
137 #include <net/dn_route.h>
138 #include <net/dn_fib.h>
139 #include <net/dn_neigh.h>
141 struct dn_sock {
142 struct sock sk;
143 struct dn_scp scp;
146 static void dn_keepalive(struct sock *sk);
148 #define DN_SK_HASH_SHIFT 8
149 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
150 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
153 static const struct proto_ops dn_proto_ops;
154 static DEFINE_RWLOCK(dn_hash_lock);
155 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
156 static struct hlist_head dn_wild_sk;
157 static atomic_t decnet_memory_allocated;
159 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags);
160 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
162 static struct hlist_head *dn_find_list(struct sock *sk)
164 struct dn_scp *scp = DN_SK(sk);
166 if (scp->addr.sdn_flags & SDF_WILD)
167 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
169 return &dn_sk_hash[scp->addrloc & DN_SK_HASH_MASK];
173 * Valid ports are those greater than zero and not already in use.
175 static int check_port(__le16 port)
177 struct sock *sk;
178 struct hlist_node *node;
180 if (port == 0)
181 return -1;
183 sk_for_each(sk, node, &dn_sk_hash[port & DN_SK_HASH_MASK]) {
184 struct dn_scp *scp = DN_SK(sk);
185 if (scp->addrloc == port)
186 return -1;
188 return 0;
191 static unsigned short port_alloc(struct sock *sk)
193 struct dn_scp *scp = DN_SK(sk);
194 static unsigned short port = 0x2000;
195 unsigned short i_port = port;
197 while(check_port(++port) != 0) {
198 if (port == i_port)
199 return 0;
202 scp->addrloc = port;
204 return 1;
208 * Since this is only ever called from user
209 * level, we don't need a write_lock() version
210 * of this.
212 static int dn_hash_sock(struct sock *sk)
214 struct dn_scp *scp = DN_SK(sk);
215 struct hlist_head *list;
216 int rv = -EUSERS;
218 BUG_ON(sk_hashed(sk));
220 write_lock_bh(&dn_hash_lock);
222 if (!scp->addrloc && !port_alloc(sk))
223 goto out;
225 rv = -EADDRINUSE;
226 if ((list = dn_find_list(sk)) == NULL)
227 goto out;
229 sk_add_node(sk, list);
230 rv = 0;
231 out:
232 write_unlock_bh(&dn_hash_lock);
233 return rv;
236 static void dn_unhash_sock(struct sock *sk)
238 write_lock(&dn_hash_lock);
239 sk_del_node_init(sk);
240 write_unlock(&dn_hash_lock);
243 static void dn_unhash_sock_bh(struct sock *sk)
245 write_lock_bh(&dn_hash_lock);
246 sk_del_node_init(sk);
247 write_unlock_bh(&dn_hash_lock);
250 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
252 int i;
253 unsigned hash = addr->sdn_objnum;
255 if (hash == 0) {
256 hash = addr->sdn_objnamel;
257 for(i = 0; i < dn_ntohs(addr->sdn_objnamel); i++) {
258 hash ^= addr->sdn_objname[i];
259 hash ^= (hash << 3);
263 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
267 * Called to transform a socket from bound (i.e. with a local address)
268 * into a listening socket (doesn't need a local port number) and rehashes
269 * based upon the object name/number.
271 static void dn_rehash_sock(struct sock *sk)
273 struct hlist_head *list;
274 struct dn_scp *scp = DN_SK(sk);
276 if (scp->addr.sdn_flags & SDF_WILD)
277 return;
279 write_lock_bh(&dn_hash_lock);
280 sk_del_node_init(sk);
281 DN_SK(sk)->addrloc = 0;
282 list = listen_hash(&DN_SK(sk)->addr);
283 sk_add_node(sk, list);
284 write_unlock_bh(&dn_hash_lock);
287 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
289 int len = 2;
291 *buf++ = type;
293 switch(type) {
294 case 0:
295 *buf++ = sdn->sdn_objnum;
296 break;
297 case 1:
298 *buf++ = 0;
299 *buf++ = dn_ntohs(sdn->sdn_objnamel);
300 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel));
301 len = 3 + dn_ntohs(sdn->sdn_objnamel);
302 break;
303 case 2:
304 memset(buf, 0, 5);
305 buf += 5;
306 *buf++ = dn_ntohs(sdn->sdn_objnamel);
307 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel));
308 len = 7 + dn_ntohs(sdn->sdn_objnamel);
309 break;
312 return len;
316 * On reception of usernames, we handle types 1 and 0 for destination
317 * addresses only. Types 2 and 4 are used for source addresses, but the
318 * UIC, GIC are ignored and they are both treated the same way. Type 3
319 * is never used as I've no idea what its purpose might be or what its
320 * format is.
322 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
324 unsigned char type;
325 int size = len;
326 int namel = 12;
328 sdn->sdn_objnum = 0;
329 sdn->sdn_objnamel = dn_htons(0);
330 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
332 if (len < 2)
333 return -1;
335 len -= 2;
336 *fmt = *data++;
337 type = *data++;
339 switch(*fmt) {
340 case 0:
341 sdn->sdn_objnum = type;
342 return 2;
343 case 1:
344 namel = 16;
345 break;
346 case 2:
347 len -= 4;
348 data += 4;
349 break;
350 case 4:
351 len -= 8;
352 data += 8;
353 break;
354 default:
355 return -1;
358 len -= 1;
360 if (len < 0)
361 return -1;
363 sdn->sdn_objnamel = dn_htons(*data++);
364 len -= dn_ntohs(sdn->sdn_objnamel);
366 if ((len < 0) || (dn_ntohs(sdn->sdn_objnamel) > namel))
367 return -1;
369 memcpy(sdn->sdn_objname, data, dn_ntohs(sdn->sdn_objnamel));
371 return size - len;
374 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
376 struct hlist_head *list = listen_hash(addr);
377 struct hlist_node *node;
378 struct sock *sk;
380 read_lock(&dn_hash_lock);
381 sk_for_each(sk, node, list) {
382 struct dn_scp *scp = DN_SK(sk);
383 if (sk->sk_state != TCP_LISTEN)
384 continue;
385 if (scp->addr.sdn_objnum) {
386 if (scp->addr.sdn_objnum != addr->sdn_objnum)
387 continue;
388 } else {
389 if (addr->sdn_objnum)
390 continue;
391 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
392 continue;
393 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, dn_ntohs(addr->sdn_objnamel)) != 0)
394 continue;
396 sock_hold(sk);
397 read_unlock(&dn_hash_lock);
398 return sk;
401 sk = sk_head(&dn_wild_sk);
402 if (sk) {
403 if (sk->sk_state == TCP_LISTEN)
404 sock_hold(sk);
405 else
406 sk = NULL;
409 read_unlock(&dn_hash_lock);
410 return sk;
413 struct sock *dn_find_by_skb(struct sk_buff *skb)
415 struct dn_skb_cb *cb = DN_SKB_CB(skb);
416 struct sock *sk;
417 struct hlist_node *node;
418 struct dn_scp *scp;
420 read_lock(&dn_hash_lock);
421 sk_for_each(sk, node, &dn_sk_hash[cb->dst_port & DN_SK_HASH_MASK]) {
422 scp = DN_SK(sk);
423 if (cb->src != dn_saddr2dn(&scp->peer))
424 continue;
425 if (cb->dst_port != scp->addrloc)
426 continue;
427 if (scp->addrrem && (cb->src_port != scp->addrrem))
428 continue;
429 sock_hold(sk);
430 goto found;
432 sk = NULL;
433 found:
434 read_unlock(&dn_hash_lock);
435 return sk;
440 static void dn_destruct(struct sock *sk)
442 struct dn_scp *scp = DN_SK(sk);
444 skb_queue_purge(&scp->data_xmit_queue);
445 skb_queue_purge(&scp->other_xmit_queue);
446 skb_queue_purge(&scp->other_receive_queue);
448 dst_release(xchg(&sk->sk_dst_cache, NULL));
451 static int dn_memory_pressure;
453 static void dn_enter_memory_pressure(void)
455 if (!dn_memory_pressure) {
456 dn_memory_pressure = 1;
460 static struct proto dn_proto = {
461 .name = "NSP",
462 .owner = THIS_MODULE,
463 .enter_memory_pressure = dn_enter_memory_pressure,
464 .memory_pressure = &dn_memory_pressure,
465 .memory_allocated = &decnet_memory_allocated,
466 .sysctl_mem = sysctl_decnet_mem,
467 .sysctl_wmem = sysctl_decnet_wmem,
468 .sysctl_rmem = sysctl_decnet_rmem,
469 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
470 .obj_size = sizeof(struct dn_sock),
473 static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp)
475 struct dn_scp *scp;
476 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1);
478 if (!sk)
479 goto out;
481 if (sock)
482 sock->ops = &dn_proto_ops;
483 sock_init_data(sock, sk);
485 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
486 sk->sk_destruct = dn_destruct;
487 sk->sk_no_check = 1;
488 sk->sk_family = PF_DECnet;
489 sk->sk_protocol = 0;
490 sk->sk_allocation = gfp;
491 sk->sk_sndbuf = sysctl_decnet_wmem[1];
492 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
494 /* Initialization of DECnet Session Control Port */
495 scp = DN_SK(sk);
496 scp->state = DN_O; /* Open */
497 scp->numdat = 1; /* Next data seg to tx */
498 scp->numoth = 1; /* Next oth data to tx */
499 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
500 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
501 scp->ackrcv_dat = 0; /* Highest data ack recv*/
502 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
503 scp->flowrem_sw = DN_SEND;
504 scp->flowloc_sw = DN_SEND;
505 scp->flowrem_dat = 0;
506 scp->flowrem_oth = 1;
507 scp->flowloc_dat = 0;
508 scp->flowloc_oth = 1;
509 scp->services_rem = 0;
510 scp->services_loc = 1 | NSP_FC_NONE;
511 scp->info_rem = 0;
512 scp->info_loc = 0x03; /* NSP version 4.1 */
513 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
514 scp->nonagle = 0;
515 scp->multi_ireq = 1;
516 scp->accept_mode = ACC_IMMED;
517 scp->addr.sdn_family = AF_DECnet;
518 scp->peer.sdn_family = AF_DECnet;
519 scp->accessdata.acc_accl = 5;
520 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
522 scp->max_window = NSP_MAX_WINDOW;
523 scp->snd_window = NSP_MIN_WINDOW;
524 scp->nsp_srtt = NSP_INITIAL_SRTT;
525 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
526 scp->nsp_rxtshift = 0;
528 skb_queue_head_init(&scp->data_xmit_queue);
529 skb_queue_head_init(&scp->other_xmit_queue);
530 skb_queue_head_init(&scp->other_receive_queue);
532 scp->persist = 0;
533 scp->persist_fxn = NULL;
534 scp->keepalive = 10 * HZ;
535 scp->keepalive_fxn = dn_keepalive;
537 init_timer(&scp->delack_timer);
538 scp->delack_pending = 0;
539 scp->delack_fxn = dn_nsp_delayed_ack;
541 dn_start_slow_timer(sk);
542 out:
543 return sk;
547 * Keepalive timer.
548 * FIXME: Should respond to SO_KEEPALIVE etc.
550 static void dn_keepalive(struct sock *sk)
552 struct dn_scp *scp = DN_SK(sk);
555 * By checking the other_data transmit queue is empty
556 * we are double checking that we are not sending too
557 * many of these keepalive frames.
559 if (skb_queue_empty(&scp->other_xmit_queue))
560 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
565 * Timer for shutdown/destroyed sockets.
566 * When socket is dead & no packets have been sent for a
567 * certain amount of time, they are removed by this
568 * routine. Also takes care of sending out DI & DC
569 * frames at correct times.
571 int dn_destroy_timer(struct sock *sk)
573 struct dn_scp *scp = DN_SK(sk);
575 scp->persist = dn_nsp_persist(sk);
577 switch(scp->state) {
578 case DN_DI:
579 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
580 if (scp->nsp_rxtshift >= decnet_di_count)
581 scp->state = DN_CN;
582 return 0;
584 case DN_DR:
585 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
586 if (scp->nsp_rxtshift >= decnet_dr_count)
587 scp->state = DN_DRC;
588 return 0;
590 case DN_DN:
591 if (scp->nsp_rxtshift < decnet_dn_count) {
592 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
593 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
594 return 0;
598 scp->persist = (HZ * decnet_time_wait);
600 if (sk->sk_socket)
601 return 0;
603 if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
604 dn_unhash_sock(sk);
605 sock_put(sk);
606 return 1;
609 return 0;
612 static void dn_destroy_sock(struct sock *sk)
614 struct dn_scp *scp = DN_SK(sk);
616 scp->nsp_rxtshift = 0; /* reset back off */
618 if (sk->sk_socket) {
619 if (sk->sk_socket->state != SS_UNCONNECTED)
620 sk->sk_socket->state = SS_DISCONNECTING;
623 sk->sk_state = TCP_CLOSE;
625 switch(scp->state) {
626 case DN_DN:
627 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
628 sk->sk_allocation);
629 scp->persist_fxn = dn_destroy_timer;
630 scp->persist = dn_nsp_persist(sk);
631 break;
632 case DN_CR:
633 scp->state = DN_DR;
634 goto disc_reject;
635 case DN_RUN:
636 scp->state = DN_DI;
637 case DN_DI:
638 case DN_DR:
639 disc_reject:
640 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
641 case DN_NC:
642 case DN_NR:
643 case DN_RJ:
644 case DN_DIC:
645 case DN_CN:
646 case DN_DRC:
647 case DN_CI:
648 case DN_CD:
649 scp->persist_fxn = dn_destroy_timer;
650 scp->persist = dn_nsp_persist(sk);
651 break;
652 default:
653 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
654 case DN_O:
655 dn_stop_slow_timer(sk);
657 dn_unhash_sock_bh(sk);
658 sock_put(sk);
660 break;
664 char *dn_addr2asc(__u16 addr, char *buf)
666 unsigned short node, area;
668 node = addr & 0x03ff;
669 area = addr >> 10;
670 sprintf(buf, "%hd.%hd", area, node);
672 return buf;
677 static int dn_create(struct socket *sock, int protocol)
679 struct sock *sk;
681 switch(sock->type) {
682 case SOCK_SEQPACKET:
683 if (protocol != DNPROTO_NSP)
684 return -EPROTONOSUPPORT;
685 break;
686 case SOCK_STREAM:
687 break;
688 default:
689 return -ESOCKTNOSUPPORT;
693 if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL)
694 return -ENOBUFS;
696 sk->sk_protocol = protocol;
698 return 0;
702 static int
703 dn_release(struct socket *sock)
705 struct sock *sk = sock->sk;
707 if (sk) {
708 sock_orphan(sk);
709 sock_hold(sk);
710 lock_sock(sk);
711 dn_destroy_sock(sk);
712 release_sock(sk);
713 sock_put(sk);
716 return 0;
719 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
721 struct sock *sk = sock->sk;
722 struct dn_scp *scp = DN_SK(sk);
723 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
724 struct net_device *dev;
725 int rv;
727 if (addr_len != sizeof(struct sockaddr_dn))
728 return -EINVAL;
730 if (saddr->sdn_family != AF_DECnet)
731 return -EINVAL;
733 if (dn_ntohs(saddr->sdn_nodeaddrl) && (dn_ntohs(saddr->sdn_nodeaddrl) != 2))
734 return -EINVAL;
736 if (dn_ntohs(saddr->sdn_objnamel) > DN_MAXOBJL)
737 return -EINVAL;
739 if (saddr->sdn_flags & ~SDF_WILD)
740 return -EINVAL;
742 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
743 (saddr->sdn_flags & SDF_WILD)))
744 return -EACCES;
746 if (!(saddr->sdn_flags & SDF_WILD)) {
747 if (dn_ntohs(saddr->sdn_nodeaddrl)) {
748 read_lock(&dev_base_lock);
749 for(dev = dev_base; dev; dev = dev->next) {
750 if (!dev->dn_ptr)
751 continue;
752 if (dn_dev_islocal(dev, dn_saddr2dn(saddr)))
753 break;
755 read_unlock(&dev_base_lock);
756 if (dev == NULL)
757 return -EADDRNOTAVAIL;
761 rv = -EINVAL;
762 lock_sock(sk);
763 if (sock_flag(sk, SOCK_ZAPPED)) {
764 memcpy(&scp->addr, saddr, addr_len);
765 sock_reset_flag(sk, SOCK_ZAPPED);
767 rv = dn_hash_sock(sk);
768 if (rv)
769 sock_set_flag(sk, SOCK_ZAPPED);
771 release_sock(sk);
773 return rv;
777 static int dn_auto_bind(struct socket *sock)
779 struct sock *sk = sock->sk;
780 struct dn_scp *scp = DN_SK(sk);
781 int rv;
783 sock_reset_flag(sk, SOCK_ZAPPED);
785 scp->addr.sdn_flags = 0;
786 scp->addr.sdn_objnum = 0;
789 * This stuff is to keep compatibility with Eduardo's
790 * patch. I hope I can dispense with it shortly...
792 if ((scp->accessdata.acc_accl != 0) &&
793 (scp->accessdata.acc_accl <= 12)) {
795 scp->addr.sdn_objnamel = dn_htons(scp->accessdata.acc_accl);
796 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, dn_ntohs(scp->addr.sdn_objnamel));
798 scp->accessdata.acc_accl = 0;
799 memset(scp->accessdata.acc_acc, 0, 40);
801 /* End of compatibility stuff */
803 scp->addr.sdn_add.a_len = dn_htons(2);
804 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
805 if (rv == 0) {
806 rv = dn_hash_sock(sk);
807 if (rv)
808 sock_set_flag(sk, SOCK_ZAPPED);
811 return rv;
814 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
816 struct dn_scp *scp = DN_SK(sk);
817 DEFINE_WAIT(wait);
818 int err;
820 if (scp->state != DN_CR)
821 return -EINVAL;
823 scp->state = DN_CC;
824 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
825 dn_send_conn_conf(sk, allocation);
827 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
828 for(;;) {
829 release_sock(sk);
830 if (scp->state == DN_CC)
831 *timeo = schedule_timeout(*timeo);
832 lock_sock(sk);
833 err = 0;
834 if (scp->state == DN_RUN)
835 break;
836 err = sock_error(sk);
837 if (err)
838 break;
839 err = sock_intr_errno(*timeo);
840 if (signal_pending(current))
841 break;
842 err = -EAGAIN;
843 if (!*timeo)
844 break;
845 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
847 finish_wait(sk->sk_sleep, &wait);
848 if (err == 0) {
849 sk->sk_socket->state = SS_CONNECTED;
850 } else if (scp->state != DN_CC) {
851 sk->sk_socket->state = SS_UNCONNECTED;
853 return err;
856 static int dn_wait_run(struct sock *sk, long *timeo)
858 struct dn_scp *scp = DN_SK(sk);
859 DEFINE_WAIT(wait);
860 int err = 0;
862 if (scp->state == DN_RUN)
863 goto out;
865 if (!*timeo)
866 return -EALREADY;
868 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
869 for(;;) {
870 release_sock(sk);
871 if (scp->state == DN_CI || scp->state == DN_CC)
872 *timeo = schedule_timeout(*timeo);
873 lock_sock(sk);
874 err = 0;
875 if (scp->state == DN_RUN)
876 break;
877 err = sock_error(sk);
878 if (err)
879 break;
880 err = sock_intr_errno(*timeo);
881 if (signal_pending(current))
882 break;
883 err = -ETIMEDOUT;
884 if (!*timeo)
885 break;
886 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
888 finish_wait(sk->sk_sleep, &wait);
889 out:
890 if (err == 0) {
891 sk->sk_socket->state = SS_CONNECTED;
892 } else if (scp->state != DN_CI && scp->state != DN_CC) {
893 sk->sk_socket->state = SS_UNCONNECTED;
895 return err;
898 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
900 struct socket *sock = sk->sk_socket;
901 struct dn_scp *scp = DN_SK(sk);
902 int err = -EISCONN;
903 struct flowi fl;
905 if (sock->state == SS_CONNECTED)
906 goto out;
908 if (sock->state == SS_CONNECTING) {
909 err = 0;
910 if (scp->state == DN_RUN) {
911 sock->state = SS_CONNECTED;
912 goto out;
914 err = -ECONNREFUSED;
915 if (scp->state != DN_CI && scp->state != DN_CC) {
916 sock->state = SS_UNCONNECTED;
917 goto out;
919 return dn_wait_run(sk, timeo);
922 err = -EINVAL;
923 if (scp->state != DN_O)
924 goto out;
926 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
927 goto out;
928 if (addr->sdn_family != AF_DECnet)
929 goto out;
930 if (addr->sdn_flags & SDF_WILD)
931 goto out;
933 if (sock_flag(sk, SOCK_ZAPPED)) {
934 err = dn_auto_bind(sk->sk_socket);
935 if (err)
936 goto out;
939 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
941 err = -EHOSTUNREACH;
942 memset(&fl, 0, sizeof(fl));
943 fl.oif = sk->sk_bound_dev_if;
944 fl.fld_dst = dn_saddr2dn(&scp->peer);
945 fl.fld_src = dn_saddr2dn(&scp->addr);
946 dn_sk_ports_copy(&fl, scp);
947 fl.proto = DNPROTO_NSP;
948 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
949 goto out;
950 sk->sk_route_caps = sk->sk_dst_cache->dev->features;
951 sock->state = SS_CONNECTING;
952 scp->state = DN_CI;
953 scp->segsize_loc = dst_metric(sk->sk_dst_cache, RTAX_ADVMSS);
955 dn_nsp_send_conninit(sk, NSP_CI);
956 err = -EINPROGRESS;
957 if (*timeo) {
958 err = dn_wait_run(sk, timeo);
960 out:
961 return err;
964 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
966 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
967 struct sock *sk = sock->sk;
968 int err;
969 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
971 lock_sock(sk);
972 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
973 release_sock(sk);
975 return err;
978 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
980 struct dn_scp *scp = DN_SK(sk);
982 switch(scp->state) {
983 case DN_RUN:
984 return 0;
985 case DN_CR:
986 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
987 case DN_CI:
988 case DN_CC:
989 return dn_wait_run(sk, timeo);
990 case DN_O:
991 return __dn_connect(sk, addr, addrlen, timeo, flags);
994 return -EINVAL;
998 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1000 unsigned char *ptr = skb->data;
1002 acc->acc_userl = *ptr++;
1003 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1004 ptr += acc->acc_userl;
1006 acc->acc_passl = *ptr++;
1007 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1008 ptr += acc->acc_passl;
1010 acc->acc_accl = *ptr++;
1011 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1013 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1017 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1019 unsigned char *ptr = skb->data;
1021 opt->opt_optl = *ptr++;
1022 opt->opt_status = 0;
1023 memcpy(opt->opt_data, ptr, opt->opt_optl);
1024 skb_pull(skb, dn_ntohs(opt->opt_optl) + 1);
1028 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1030 DEFINE_WAIT(wait);
1031 struct sk_buff *skb = NULL;
1032 int err = 0;
1034 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1035 for(;;) {
1036 release_sock(sk);
1037 skb = skb_dequeue(&sk->sk_receive_queue);
1038 if (skb == NULL) {
1039 *timeo = schedule_timeout(*timeo);
1040 skb = skb_dequeue(&sk->sk_receive_queue);
1042 lock_sock(sk);
1043 if (skb != NULL)
1044 break;
1045 err = -EINVAL;
1046 if (sk->sk_state != TCP_LISTEN)
1047 break;
1048 err = sock_intr_errno(*timeo);
1049 if (signal_pending(current))
1050 break;
1051 err = -EAGAIN;
1052 if (!*timeo)
1053 break;
1054 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1056 finish_wait(sk->sk_sleep, &wait);
1058 return skb == NULL ? ERR_PTR(err) : skb;
1061 static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1063 struct sock *sk = sock->sk, *newsk;
1064 struct sk_buff *skb = NULL;
1065 struct dn_skb_cb *cb;
1066 unsigned char menuver;
1067 int err = 0;
1068 unsigned char type;
1069 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1071 lock_sock(sk);
1073 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1074 release_sock(sk);
1075 return -EINVAL;
1078 skb = skb_dequeue(&sk->sk_receive_queue);
1079 if (skb == NULL) {
1080 skb = dn_wait_for_connect(sk, &timeo);
1081 if (IS_ERR(skb)) {
1082 release_sock(sk);
1083 return PTR_ERR(skb);
1087 cb = DN_SKB_CB(skb);
1088 sk->sk_ack_backlog--;
1089 newsk = dn_alloc_sock(newsock, sk->sk_allocation);
1090 if (newsk == NULL) {
1091 release_sock(sk);
1092 kfree_skb(skb);
1093 return -ENOBUFS;
1095 release_sock(sk);
1097 dst_release(xchg(&newsk->sk_dst_cache, skb->dst));
1098 skb->dst = NULL;
1100 DN_SK(newsk)->state = DN_CR;
1101 DN_SK(newsk)->addrrem = cb->src_port;
1102 DN_SK(newsk)->services_rem = cb->services;
1103 DN_SK(newsk)->info_rem = cb->info;
1104 DN_SK(newsk)->segsize_rem = cb->segsize;
1105 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1107 if (DN_SK(newsk)->segsize_rem < 230)
1108 DN_SK(newsk)->segsize_rem = 230;
1110 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1111 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1113 newsk->sk_state = TCP_LISTEN;
1114 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1117 * If we are listening on a wild socket, we don't want
1118 * the newly created socket on the wrong hash queue.
1120 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1122 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1123 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1124 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1125 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1127 menuver = *skb->data;
1128 skb_pull(skb, 1);
1130 if (menuver & DN_MENUVER_ACC)
1131 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1133 if (menuver & DN_MENUVER_USR)
1134 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1136 if (menuver & DN_MENUVER_PRX)
1137 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1139 if (menuver & DN_MENUVER_UIC)
1140 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1142 kfree_skb(skb);
1144 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1145 sizeof(struct optdata_dn));
1146 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1147 sizeof(struct optdata_dn));
1149 lock_sock(newsk);
1150 err = dn_hash_sock(newsk);
1151 if (err == 0) {
1152 sock_reset_flag(newsk, SOCK_ZAPPED);
1153 dn_send_conn_ack(newsk);
1156 * Here we use sk->sk_allocation since although the conn conf is
1157 * for the newsk, the context is the old socket.
1159 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1160 err = dn_confirm_accept(newsk, &timeo,
1161 sk->sk_allocation);
1163 release_sock(newsk);
1164 return err;
1168 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer)
1170 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1171 struct sock *sk = sock->sk;
1172 struct dn_scp *scp = DN_SK(sk);
1174 *uaddr_len = sizeof(struct sockaddr_dn);
1176 lock_sock(sk);
1178 if (peer) {
1179 if ((sock->state != SS_CONNECTED &&
1180 sock->state != SS_CONNECTING) &&
1181 scp->accept_mode == ACC_IMMED)
1182 return -ENOTCONN;
1184 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1185 } else {
1186 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1189 release_sock(sk);
1191 return 0;
1195 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1197 struct sock *sk = sock->sk;
1198 struct dn_scp *scp = DN_SK(sk);
1199 int mask = datagram_poll(file, sock, wait);
1201 if (!skb_queue_empty(&scp->other_receive_queue))
1202 mask |= POLLRDBAND;
1204 return mask;
1207 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1209 struct sock *sk = sock->sk;
1210 struct dn_scp *scp = DN_SK(sk);
1211 int err = -EOPNOTSUPP;
1212 long amount = 0;
1213 struct sk_buff *skb;
1214 int val;
1216 switch(cmd)
1218 case SIOCGIFADDR:
1219 case SIOCSIFADDR:
1220 return dn_dev_ioctl(cmd, (void __user *)arg);
1222 case SIOCATMARK:
1223 lock_sock(sk);
1224 val = !skb_queue_empty(&scp->other_receive_queue);
1225 if (scp->state != DN_RUN)
1226 val = -ENOTCONN;
1227 release_sock(sk);
1228 return val;
1230 case TIOCOUTQ:
1231 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1232 if (amount < 0)
1233 amount = 0;
1234 err = put_user(amount, (int __user *)arg);
1235 break;
1237 case TIOCINQ:
1238 lock_sock(sk);
1239 if ((skb = skb_peek(&scp->other_receive_queue)) != NULL) {
1240 amount = skb->len;
1241 } else {
1242 struct sk_buff *skb = sk->sk_receive_queue.next;
1243 for(;;) {
1244 if (skb ==
1245 (struct sk_buff *)&sk->sk_receive_queue)
1246 break;
1247 amount += skb->len;
1248 skb = skb->next;
1251 release_sock(sk);
1252 err = put_user(amount, (int __user *)arg);
1253 break;
1255 default:
1256 err = -ENOIOCTLCMD;
1257 break;
1260 return err;
1263 static int dn_listen(struct socket *sock, int backlog)
1265 struct sock *sk = sock->sk;
1266 int err = -EINVAL;
1268 lock_sock(sk);
1270 if (sock_flag(sk, SOCK_ZAPPED))
1271 goto out;
1273 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1274 goto out;
1276 sk->sk_max_ack_backlog = backlog;
1277 sk->sk_ack_backlog = 0;
1278 sk->sk_state = TCP_LISTEN;
1279 err = 0;
1280 dn_rehash_sock(sk);
1282 out:
1283 release_sock(sk);
1285 return err;
1289 static int dn_shutdown(struct socket *sock, int how)
1291 struct sock *sk = sock->sk;
1292 struct dn_scp *scp = DN_SK(sk);
1293 int err = -ENOTCONN;
1295 lock_sock(sk);
1297 if (sock->state == SS_UNCONNECTED)
1298 goto out;
1300 err = 0;
1301 if (sock->state == SS_DISCONNECTING)
1302 goto out;
1304 err = -EINVAL;
1305 if (scp->state == DN_O)
1306 goto out;
1308 if (how != SHUTDOWN_MASK)
1309 goto out;
1311 sk->sk_shutdown = how;
1312 dn_destroy_sock(sk);
1313 err = 0;
1315 out:
1316 release_sock(sk);
1318 return err;
1321 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1323 struct sock *sk = sock->sk;
1324 int err;
1326 lock_sock(sk);
1327 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1328 release_sock(sk);
1330 return err;
1333 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags)
1335 struct sock *sk = sock->sk;
1336 struct dn_scp *scp = DN_SK(sk);
1337 long timeo;
1338 union {
1339 struct optdata_dn opt;
1340 struct accessdata_dn acc;
1341 int mode;
1342 unsigned long win;
1343 int val;
1344 unsigned char services;
1345 unsigned char info;
1346 } u;
1347 int err;
1349 if (optlen && !optval)
1350 return -EINVAL;
1352 if (optlen > sizeof(u))
1353 return -EINVAL;
1355 if (copy_from_user(&u, optval, optlen))
1356 return -EFAULT;
1358 switch(optname) {
1359 case DSO_CONDATA:
1360 if (sock->state == SS_CONNECTED)
1361 return -EISCONN;
1362 if ((scp->state != DN_O) && (scp->state != DN_CR))
1363 return -EINVAL;
1365 if (optlen != sizeof(struct optdata_dn))
1366 return -EINVAL;
1368 if (dn_ntohs(u.opt.opt_optl) > 16)
1369 return -EINVAL;
1371 memcpy(&scp->conndata_out, &u.opt, optlen);
1372 break;
1374 case DSO_DISDATA:
1375 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1376 return -ENOTCONN;
1378 if (optlen != sizeof(struct optdata_dn))
1379 return -EINVAL;
1381 if (dn_ntohs(u.opt.opt_optl) > 16)
1382 return -EINVAL;
1384 memcpy(&scp->discdata_out, &u.opt, optlen);
1385 break;
1387 case DSO_CONACCESS:
1388 if (sock->state == SS_CONNECTED)
1389 return -EISCONN;
1390 if (scp->state != DN_O)
1391 return -EINVAL;
1393 if (optlen != sizeof(struct accessdata_dn))
1394 return -EINVAL;
1396 if ((u.acc.acc_accl > DN_MAXACCL) ||
1397 (u.acc.acc_passl > DN_MAXACCL) ||
1398 (u.acc.acc_userl > DN_MAXACCL))
1399 return -EINVAL;
1401 memcpy(&scp->accessdata, &u.acc, optlen);
1402 break;
1404 case DSO_ACCEPTMODE:
1405 if (sock->state == SS_CONNECTED)
1406 return -EISCONN;
1407 if (scp->state != DN_O)
1408 return -EINVAL;
1410 if (optlen != sizeof(int))
1411 return -EINVAL;
1413 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1414 return -EINVAL;
1416 scp->accept_mode = (unsigned char)u.mode;
1417 break;
1419 case DSO_CONACCEPT:
1421 if (scp->state != DN_CR)
1422 return -EINVAL;
1423 timeo = sock_rcvtimeo(sk, 0);
1424 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1425 return err;
1427 case DSO_CONREJECT:
1429 if (scp->state != DN_CR)
1430 return -EINVAL;
1432 scp->state = DN_DR;
1433 sk->sk_shutdown = SHUTDOWN_MASK;
1434 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1435 break;
1437 default:
1438 #ifdef CONFIG_NETFILTER
1439 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1440 #endif
1441 case DSO_LINKINFO:
1442 case DSO_STREAM:
1443 case DSO_SEQPACKET:
1444 return -ENOPROTOOPT;
1446 case DSO_MAXWINDOW:
1447 if (optlen != sizeof(unsigned long))
1448 return -EINVAL;
1449 if (u.win > NSP_MAX_WINDOW)
1450 u.win = NSP_MAX_WINDOW;
1451 if (u.win == 0)
1452 return -EINVAL;
1453 scp->max_window = u.win;
1454 if (scp->snd_window > u.win)
1455 scp->snd_window = u.win;
1456 break;
1458 case DSO_NODELAY:
1459 if (optlen != sizeof(int))
1460 return -EINVAL;
1461 if (scp->nonagle == 2)
1462 return -EINVAL;
1463 scp->nonagle = (u.val == 0) ? 0 : 1;
1464 /* if (scp->nonagle == 1) { Push pending frames } */
1465 break;
1467 case DSO_CORK:
1468 if (optlen != sizeof(int))
1469 return -EINVAL;
1470 if (scp->nonagle == 1)
1471 return -EINVAL;
1472 scp->nonagle = (u.val == 0) ? 0 : 2;
1473 /* if (scp->nonagle == 0) { Push pending frames } */
1474 break;
1476 case DSO_SERVICES:
1477 if (optlen != sizeof(unsigned char))
1478 return -EINVAL;
1479 if ((u.services & ~NSP_FC_MASK) != 0x01)
1480 return -EINVAL;
1481 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1482 return -EINVAL;
1483 scp->services_loc = u.services;
1484 break;
1486 case DSO_INFO:
1487 if (optlen != sizeof(unsigned char))
1488 return -EINVAL;
1489 if (u.info & 0xfc)
1490 return -EINVAL;
1491 scp->info_loc = u.info;
1492 break;
1495 return 0;
1498 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1500 struct sock *sk = sock->sk;
1501 int err;
1503 lock_sock(sk);
1504 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1505 release_sock(sk);
1507 return err;
1510 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1512 struct sock *sk = sock->sk;
1513 struct dn_scp *scp = DN_SK(sk);
1514 struct linkinfo_dn link;
1515 unsigned int r_len;
1516 void *r_data = NULL;
1517 unsigned int val;
1519 if(get_user(r_len , optlen))
1520 return -EFAULT;
1522 switch(optname) {
1523 case DSO_CONDATA:
1524 if (r_len > sizeof(struct optdata_dn))
1525 r_len = sizeof(struct optdata_dn);
1526 r_data = &scp->conndata_in;
1527 break;
1529 case DSO_DISDATA:
1530 if (r_len > sizeof(struct optdata_dn))
1531 r_len = sizeof(struct optdata_dn);
1532 r_data = &scp->discdata_in;
1533 break;
1535 case DSO_CONACCESS:
1536 if (r_len > sizeof(struct accessdata_dn))
1537 r_len = sizeof(struct accessdata_dn);
1538 r_data = &scp->accessdata;
1539 break;
1541 case DSO_ACCEPTMODE:
1542 if (r_len > sizeof(unsigned char))
1543 r_len = sizeof(unsigned char);
1544 r_data = &scp->accept_mode;
1545 break;
1547 case DSO_LINKINFO:
1548 if (r_len > sizeof(struct linkinfo_dn))
1549 r_len = sizeof(struct linkinfo_dn);
1551 switch(sock->state) {
1552 case SS_CONNECTING:
1553 link.idn_linkstate = LL_CONNECTING;
1554 break;
1555 case SS_DISCONNECTING:
1556 link.idn_linkstate = LL_DISCONNECTING;
1557 break;
1558 case SS_CONNECTED:
1559 link.idn_linkstate = LL_RUNNING;
1560 break;
1561 default:
1562 link.idn_linkstate = LL_INACTIVE;
1565 link.idn_segsize = scp->segsize_rem;
1566 r_data = &link;
1567 break;
1569 default:
1570 #ifdef CONFIG_NETFILTER
1572 int val, len;
1574 if(get_user(len, optlen))
1575 return -EFAULT;
1577 val = nf_getsockopt(sk, PF_DECnet, optname,
1578 optval, &len);
1579 if (val >= 0)
1580 val = put_user(len, optlen);
1581 return val;
1583 #endif
1584 case DSO_STREAM:
1585 case DSO_SEQPACKET:
1586 case DSO_CONACCEPT:
1587 case DSO_CONREJECT:
1588 return -ENOPROTOOPT;
1590 case DSO_MAXWINDOW:
1591 if (r_len > sizeof(unsigned long))
1592 r_len = sizeof(unsigned long);
1593 r_data = &scp->max_window;
1594 break;
1596 case DSO_NODELAY:
1597 if (r_len > sizeof(int))
1598 r_len = sizeof(int);
1599 val = (scp->nonagle == 1);
1600 r_data = &val;
1601 break;
1603 case DSO_CORK:
1604 if (r_len > sizeof(int))
1605 r_len = sizeof(int);
1606 val = (scp->nonagle == 2);
1607 r_data = &val;
1608 break;
1610 case DSO_SERVICES:
1611 if (r_len > sizeof(unsigned char))
1612 r_len = sizeof(unsigned char);
1613 r_data = &scp->services_rem;
1614 break;
1616 case DSO_INFO:
1617 if (r_len > sizeof(unsigned char))
1618 r_len = sizeof(unsigned char);
1619 r_data = &scp->info_rem;
1620 break;
1623 if (r_data) {
1624 if (copy_to_user(optval, r_data, r_len))
1625 return -EFAULT;
1626 if (put_user(r_len, optlen))
1627 return -EFAULT;
1630 return 0;
1634 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1636 struct sk_buff *skb = q->next;
1637 int len = 0;
1639 if (flags & MSG_OOB)
1640 return !skb_queue_empty(q) ? 1 : 0;
1642 while(skb != (struct sk_buff *)q) {
1643 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1644 len += skb->len;
1646 if (cb->nsp_flags & 0x40) {
1647 /* SOCK_SEQPACKET reads to EOM */
1648 if (sk->sk_type == SOCK_SEQPACKET)
1649 return 1;
1650 /* so does SOCK_STREAM unless WAITALL is specified */
1651 if (!(flags & MSG_WAITALL))
1652 return 1;
1655 /* minimum data length for read exceeded */
1656 if (len >= target)
1657 return 1;
1659 skb = skb->next;
1662 return 0;
1666 static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1667 struct msghdr *msg, size_t size, int flags)
1669 struct sock *sk = sock->sk;
1670 struct dn_scp *scp = DN_SK(sk);
1671 struct sk_buff_head *queue = &sk->sk_receive_queue;
1672 size_t target = size > 1 ? 1 : 0;
1673 size_t copied = 0;
1674 int rv = 0;
1675 struct sk_buff *skb, *nskb;
1676 struct dn_skb_cb *cb = NULL;
1677 unsigned char eor = 0;
1678 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1680 lock_sock(sk);
1682 if (sock_flag(sk, SOCK_ZAPPED)) {
1683 rv = -EADDRNOTAVAIL;
1684 goto out;
1687 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1688 rv = 0;
1689 goto out;
1692 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1693 if (rv)
1694 goto out;
1696 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1697 rv = -EOPNOTSUPP;
1698 goto out;
1701 if (flags & MSG_OOB)
1702 queue = &scp->other_receive_queue;
1704 if (flags & MSG_WAITALL)
1705 target = size;
1709 * See if there is data ready to read, sleep if there isn't
1711 for(;;) {
1712 if (sk->sk_err)
1713 goto out;
1715 if (!skb_queue_empty(&scp->other_receive_queue)) {
1716 if (!(flags & MSG_OOB)) {
1717 msg->msg_flags |= MSG_OOB;
1718 if (!scp->other_report) {
1719 scp->other_report = 1;
1720 goto out;
1725 if (scp->state != DN_RUN)
1726 goto out;
1728 if (signal_pending(current)) {
1729 rv = sock_intr_errno(timeo);
1730 goto out;
1733 if (dn_data_ready(sk, queue, flags, target))
1734 break;
1736 if (flags & MSG_DONTWAIT) {
1737 rv = -EWOULDBLOCK;
1738 goto out;
1741 set_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1742 SOCK_SLEEP_PRE(sk)
1744 if (!dn_data_ready(sk, queue, flags, target))
1745 schedule();
1747 SOCK_SLEEP_POST(sk)
1748 clear_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1751 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) {
1752 unsigned int chunk = skb->len;
1753 cb = DN_SKB_CB(skb);
1755 if ((chunk + copied) > size)
1756 chunk = size - copied;
1758 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1759 rv = -EFAULT;
1760 break;
1762 copied += chunk;
1764 if (!(flags & MSG_PEEK))
1765 skb_pull(skb, chunk);
1767 eor = cb->nsp_flags & 0x40;
1768 nskb = skb->next;
1770 if (skb->len == 0) {
1771 skb_unlink(skb, queue);
1772 kfree_skb(skb);
1774 * N.B. Don't refer to skb or cb after this point
1775 * in loop.
1777 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1778 scp->flowloc_sw = DN_SEND;
1779 dn_nsp_send_link(sk, DN_SEND, 0);
1783 if (eor) {
1784 if (sk->sk_type == SOCK_SEQPACKET)
1785 break;
1786 if (!(flags & MSG_WAITALL))
1787 break;
1790 if (flags & MSG_OOB)
1791 break;
1793 if (copied >= target)
1794 break;
1797 rv = copied;
1800 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1801 msg->msg_flags |= MSG_EOR;
1803 out:
1804 if (rv == 0)
1805 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1807 if ((rv >= 0) && msg->msg_name) {
1808 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1809 msg->msg_namelen = sizeof(struct sockaddr_dn);
1812 release_sock(sk);
1814 return rv;
1818 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1820 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1821 if (skb_queue_len(queue) >= scp->snd_window)
1822 return 1;
1823 if (fctype != NSP_FC_NONE) {
1824 if (flags & MSG_OOB) {
1825 if (scp->flowrem_oth == 0)
1826 return 1;
1827 } else {
1828 if (scp->flowrem_dat == 0)
1829 return 1;
1832 return 0;
1836 * The DECnet spec requires the the "routing layer" accepts packets which
1837 * are at least 230 bytes in size. This excludes any headers which the NSP
1838 * layer might add, so we always assume that we'll be using the maximal
1839 * length header on data packets. The variation in length is due to the
1840 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1841 * make much practical difference.
1843 unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1845 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1846 if (dev) {
1847 struct dn_dev *dn_db = dev->dn_ptr;
1848 mtu -= LL_RESERVED_SPACE(dev);
1849 if (dn_db->use_long)
1850 mtu -= 21;
1851 else
1852 mtu -= 6;
1853 mtu -= DN_MAX_NSP_DATA_HEADER;
1854 } else {
1856 * 21 = long header, 16 = guess at MAC header length
1858 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1860 if (mtu > mss)
1861 mss = mtu;
1862 return mss;
1865 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1867 struct dst_entry *dst = __sk_dst_get(sk);
1868 struct dn_scp *scp = DN_SK(sk);
1869 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1871 /* Other data messages are limited to 16 bytes per packet */
1872 if (flags & MSG_OOB)
1873 return 16;
1875 /* This works out the maximum size of segment we can send out */
1876 if (dst) {
1877 u32 mtu = dst_mtu(dst);
1878 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1881 return mss_now;
1885 * N.B. We get the timeout wrong here, but then we always did get it
1886 * wrong before and this is another step along the road to correcting
1887 * it. It ought to get updated each time we pass through the routine,
1888 * but in practise it probably doesn't matter too much for now.
1890 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1891 unsigned long datalen, int noblock,
1892 int *errcode)
1894 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1895 noblock, errcode);
1896 if (skb) {
1897 skb->protocol = __constant_htons(ETH_P_DNA_RT);
1898 skb->pkt_type = PACKET_OUTGOING;
1900 return skb;
1903 static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1904 struct msghdr *msg, size_t size)
1906 struct sock *sk = sock->sk;
1907 struct dn_scp *scp = DN_SK(sk);
1908 size_t mss;
1909 struct sk_buff_head *queue = &scp->data_xmit_queue;
1910 int flags = msg->msg_flags;
1911 int err = 0;
1912 size_t sent = 0;
1913 int addr_len = msg->msg_namelen;
1914 struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name;
1915 struct sk_buff *skb = NULL;
1916 struct dn_skb_cb *cb;
1917 size_t len;
1918 unsigned char fctype;
1919 long timeo;
1921 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1922 return -EOPNOTSUPP;
1924 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1925 return -EINVAL;
1927 lock_sock(sk);
1928 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1930 * The only difference between stream sockets and sequenced packet
1931 * sockets is that the stream sockets always behave as if MSG_EOR
1932 * has been set.
1934 if (sock->type == SOCK_STREAM) {
1935 if (flags & MSG_EOR) {
1936 err = -EINVAL;
1937 goto out;
1939 flags |= MSG_EOR;
1943 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1944 if (err)
1945 goto out_err;
1947 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1948 err = -EPIPE;
1949 if (!(flags & MSG_NOSIGNAL))
1950 send_sig(SIGPIPE, current, 0);
1951 goto out_err;
1954 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1955 dst_negative_advice(&sk->sk_dst_cache);
1957 mss = scp->segsize_rem;
1958 fctype = scp->services_rem & NSP_FC_MASK;
1960 mss = dn_current_mss(sk, flags);
1962 if (flags & MSG_OOB) {
1963 queue = &scp->other_xmit_queue;
1964 if (size > mss) {
1965 err = -EMSGSIZE;
1966 goto out;
1970 scp->persist_fxn = dn_nsp_xmit_timeout;
1972 while(sent < size) {
1973 err = sock_error(sk);
1974 if (err)
1975 goto out;
1977 if (signal_pending(current)) {
1978 err = sock_intr_errno(timeo);
1979 goto out;
1983 * Calculate size that we wish to send.
1985 len = size - sent;
1987 if (len > mss)
1988 len = mss;
1991 * Wait for queue size to go down below the window
1992 * size.
1994 if (dn_queue_too_long(scp, queue, flags)) {
1995 if (flags & MSG_DONTWAIT) {
1996 err = -EWOULDBLOCK;
1997 goto out;
2000 SOCK_SLEEP_PRE(sk)
2002 if (dn_queue_too_long(scp, queue, flags))
2003 schedule();
2005 SOCK_SLEEP_POST(sk)
2007 continue;
2011 * Get a suitably sized skb.
2012 * 64 is a bit of a hack really, but its larger than any
2013 * link-layer headers and has served us well as a good
2014 * guess as to their real length.
2016 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2017 flags & MSG_DONTWAIT, &err);
2019 if (err)
2020 break;
2022 if (!skb)
2023 continue;
2025 cb = DN_SKB_CB(skb);
2027 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2029 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2030 err = -EFAULT;
2031 goto out;
2034 if (flags & MSG_OOB) {
2035 cb->nsp_flags = 0x30;
2036 if (fctype != NSP_FC_NONE)
2037 scp->flowrem_oth--;
2038 } else {
2039 cb->nsp_flags = 0x00;
2040 if (scp->seg_total == 0)
2041 cb->nsp_flags |= 0x20;
2043 scp->seg_total += len;
2045 if (((sent + len) == size) && (flags & MSG_EOR)) {
2046 cb->nsp_flags |= 0x40;
2047 scp->seg_total = 0;
2048 if (fctype == NSP_FC_SCMC)
2049 scp->flowrem_dat--;
2051 if (fctype == NSP_FC_SRC)
2052 scp->flowrem_dat--;
2055 sent += len;
2056 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2057 skb = NULL;
2059 scp->persist = dn_nsp_persist(sk);
2062 out:
2064 if (skb)
2065 kfree_skb(skb);
2067 release_sock(sk);
2069 return sent ? sent : err;
2071 out_err:
2072 err = sk_stream_error(sk, flags, err);
2073 release_sock(sk);
2074 return err;
2077 static int dn_device_event(struct notifier_block *this, unsigned long event,
2078 void *ptr)
2080 struct net_device *dev = (struct net_device *)ptr;
2082 switch(event) {
2083 case NETDEV_UP:
2084 dn_dev_up(dev);
2085 break;
2086 case NETDEV_DOWN:
2087 dn_dev_down(dev);
2088 break;
2089 default:
2090 break;
2093 return NOTIFY_DONE;
2096 static struct notifier_block dn_dev_notifier = {
2097 .notifier_call = dn_device_event,
2100 extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2102 static struct packet_type dn_dix_packet_type = {
2103 .type = __constant_htons(ETH_P_DNA_RT),
2104 .dev = NULL, /* All devices */
2105 .func = dn_route_rcv,
2108 #ifdef CONFIG_PROC_FS
2109 struct dn_iter_state {
2110 int bucket;
2113 static struct sock *dn_socket_get_first(struct seq_file *seq)
2115 struct dn_iter_state *state = seq->private;
2116 struct sock *n = NULL;
2118 for(state->bucket = 0;
2119 state->bucket < DN_SK_HASH_SIZE;
2120 ++state->bucket) {
2121 n = sk_head(&dn_sk_hash[state->bucket]);
2122 if (n)
2123 break;
2126 return n;
2129 static struct sock *dn_socket_get_next(struct seq_file *seq,
2130 struct sock *n)
2132 struct dn_iter_state *state = seq->private;
2134 n = sk_next(n);
2135 try_again:
2136 if (n)
2137 goto out;
2138 if (++state->bucket >= DN_SK_HASH_SIZE)
2139 goto out;
2140 n = sk_head(&dn_sk_hash[state->bucket]);
2141 goto try_again;
2142 out:
2143 return n;
2146 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2148 struct sock *sk = dn_socket_get_first(seq);
2150 if (sk) {
2151 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2152 --*pos;
2154 return *pos ? NULL : sk;
2157 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2159 void *rc;
2160 read_lock_bh(&dn_hash_lock);
2161 rc = socket_get_idx(seq, &pos);
2162 if (!rc) {
2163 read_unlock_bh(&dn_hash_lock);
2165 return rc;
2168 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2170 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2173 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2175 void *rc;
2177 if (v == SEQ_START_TOKEN) {
2178 rc = dn_socket_get_idx(seq, 0);
2179 goto out;
2182 rc = dn_socket_get_next(seq, v);
2183 if (rc)
2184 goto out;
2185 read_unlock_bh(&dn_hash_lock);
2186 out:
2187 ++*pos;
2188 return rc;
2191 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2193 if (v && v != SEQ_START_TOKEN)
2194 read_unlock_bh(&dn_hash_lock);
2197 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2199 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2201 int i;
2203 switch (dn_ntohs(dn->sdn_objnamel)) {
2204 case 0:
2205 sprintf(buf, "%d", dn->sdn_objnum);
2206 break;
2207 default:
2208 for (i = 0; i < dn_ntohs(dn->sdn_objnamel); i++) {
2209 buf[i] = dn->sdn_objname[i];
2210 if (IS_NOT_PRINTABLE(buf[i]))
2211 buf[i] = '.';
2213 buf[i] = 0;
2217 static char *dn_state2asc(unsigned char state)
2219 switch(state) {
2220 case DN_O:
2221 return "OPEN";
2222 case DN_CR:
2223 return " CR";
2224 case DN_DR:
2225 return " DR";
2226 case DN_DRC:
2227 return " DRC";
2228 case DN_CC:
2229 return " CC";
2230 case DN_CI:
2231 return " CI";
2232 case DN_NR:
2233 return " NR";
2234 case DN_NC:
2235 return " NC";
2236 case DN_CD:
2237 return " CD";
2238 case DN_RJ:
2239 return " RJ";
2240 case DN_RUN:
2241 return " RUN";
2242 case DN_DI:
2243 return " DI";
2244 case DN_DIC:
2245 return " DIC";
2246 case DN_DN:
2247 return " DN";
2248 case DN_CL:
2249 return " CL";
2250 case DN_CN:
2251 return " CN";
2254 return "????";
2257 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2259 struct dn_scp *scp = DN_SK(sk);
2260 char buf1[DN_ASCBUF_LEN];
2261 char buf2[DN_ASCBUF_LEN];
2262 char local_object[DN_MAXOBJL+3];
2263 char remote_object[DN_MAXOBJL+3];
2265 dn_printable_object(&scp->addr, local_object);
2266 dn_printable_object(&scp->peer, remote_object);
2268 seq_printf(seq,
2269 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2270 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2271 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->addr)), buf1),
2272 scp->addrloc,
2273 scp->numdat,
2274 scp->numoth,
2275 scp->ackxmt_dat,
2276 scp->ackxmt_oth,
2277 scp->flowloc_sw,
2278 local_object,
2279 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->peer)), buf2),
2280 scp->addrrem,
2281 scp->numdat_rcv,
2282 scp->numoth_rcv,
2283 scp->ackrcv_dat,
2284 scp->ackrcv_oth,
2285 scp->flowrem_sw,
2286 remote_object,
2287 dn_state2asc(scp->state),
2288 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2291 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2293 if (v == SEQ_START_TOKEN) {
2294 seq_puts(seq, "Local Remote\n");
2295 } else {
2296 dn_socket_format_entry(seq, v);
2298 return 0;
2301 static struct seq_operations dn_socket_seq_ops = {
2302 .start = dn_socket_seq_start,
2303 .next = dn_socket_seq_next,
2304 .stop = dn_socket_seq_stop,
2305 .show = dn_socket_seq_show,
2308 static int dn_socket_seq_open(struct inode *inode, struct file *file)
2310 struct seq_file *seq;
2311 int rc = -ENOMEM;
2312 struct dn_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
2314 if (!s)
2315 goto out;
2317 rc = seq_open(file, &dn_socket_seq_ops);
2318 if (rc)
2319 goto out_kfree;
2321 seq = file->private_data;
2322 seq->private = s;
2323 memset(s, 0, sizeof(*s));
2324 out:
2325 return rc;
2326 out_kfree:
2327 kfree(s);
2328 goto out;
2331 static struct file_operations dn_socket_seq_fops = {
2332 .owner = THIS_MODULE,
2333 .open = dn_socket_seq_open,
2334 .read = seq_read,
2335 .llseek = seq_lseek,
2336 .release = seq_release_private,
2338 #endif
2340 static struct net_proto_family dn_family_ops = {
2341 .family = AF_DECnet,
2342 .create = dn_create,
2343 .owner = THIS_MODULE,
2346 static const struct proto_ops dn_proto_ops = {
2347 .family = AF_DECnet,
2348 .owner = THIS_MODULE,
2349 .release = dn_release,
2350 .bind = dn_bind,
2351 .connect = dn_connect,
2352 .socketpair = sock_no_socketpair,
2353 .accept = dn_accept,
2354 .getname = dn_getname,
2355 .poll = dn_poll,
2356 .ioctl = dn_ioctl,
2357 .listen = dn_listen,
2358 .shutdown = dn_shutdown,
2359 .setsockopt = dn_setsockopt,
2360 .getsockopt = dn_getsockopt,
2361 .sendmsg = dn_sendmsg,
2362 .recvmsg = dn_recvmsg,
2363 .mmap = sock_no_mmap,
2364 .sendpage = sock_no_sendpage,
2367 void dn_register_sysctl(void);
2368 void dn_unregister_sysctl(void);
2370 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2371 MODULE_AUTHOR("Linux DECnet Project Team");
2372 MODULE_LICENSE("GPL");
2373 MODULE_ALIAS_NETPROTO(PF_DECnet);
2375 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2377 static int __init decnet_init(void)
2379 int rc;
2381 printk(banner);
2383 rc = proto_register(&dn_proto, 1);
2384 if (rc != 0)
2385 goto out;
2387 dn_neigh_init();
2388 dn_dev_init();
2389 dn_route_init();
2390 dn_fib_init();
2392 sock_register(&dn_family_ops);
2393 dev_add_pack(&dn_dix_packet_type);
2394 register_netdevice_notifier(&dn_dev_notifier);
2396 proc_net_fops_create("decnet", S_IRUGO, &dn_socket_seq_fops);
2397 dn_register_sysctl();
2398 out:
2399 return rc;
2402 module_init(decnet_init);
2405 * Prevent DECnet module unloading until its fixed properly.
2406 * Requires an audit of the code to check for memory leaks and
2407 * initialisation problems etc.
2409 #if 0
2410 static void __exit decnet_exit(void)
2412 sock_unregister(AF_DECnet);
2413 dev_remove_pack(&dn_dix_packet_type);
2415 dn_unregister_sysctl();
2417 unregister_netdevice_notifier(&dn_dev_notifier);
2419 dn_route_cleanup();
2420 dn_dev_cleanup();
2421 dn_neigh_cleanup();
2422 dn_fib_cleanup();
2424 proc_net_remove("decnet");
2426 proto_unregister(&dn_proto);
2428 module_exit(decnet_exit);
2429 #endif