RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / decnet / af_decnet.c
blob7ecdfa2ad89d3a452e5aa2b7e623bf2a9e53bf18
2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Socket Layer Interface
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
12 * Changes:
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
15 * below.
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
19 * Caulfield.
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
24 * code.
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
37 * when required.
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
52 any later version.
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
59 HISTORY:
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
68 connections.
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
73 Port to new kernel development version.
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for incoming connections
79 so we can start developing server apps
80 on Linux.
82 Module Support
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
93 order
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes
98 Patrick J. Caulfield
99 dn_bind fixes
100 *******************************************************************************/
102 #include <linux/module.h>
103 #include <linux/errno.h>
104 #include <linux/types.h>
105 #include <linux/slab.h>
106 #include <linux/socket.h>
107 #include <linux/in.h>
108 #include <linux/kernel.h>
109 #include <linux/sched.h>
110 #include <linux/timer.h>
111 #include <linux/string.h>
112 #include <linux/sockios.h>
113 #include <linux/net.h>
114 #include <linux/netdevice.h>
115 #include <linux/inet.h>
116 #include <linux/route.h>
117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h>
119 #include <net/sock.h>
120 #include <net/tcp_states.h>
121 #include <net/flow.h>
122 #include <asm/system.h>
123 #include <asm/ioctls.h>
124 #include <linux/capability.h>
125 #include <linux/mm.h>
126 #include <linux/interrupt.h>
127 #include <linux/proc_fs.h>
128 #include <linux/stat.h>
129 #include <linux/init.h>
130 #include <linux/poll.h>
131 #include <net/net_namespace.h>
132 #include <net/neighbour.h>
133 #include <net/dst.h>
134 #include <net/fib_rules.h>
135 #include <net/dn.h>
136 #include <net/dn_nsp.h>
137 #include <net/dn_dev.h>
138 #include <net/dn_route.h>
139 #include <net/dn_fib.h>
140 #include <net/dn_neigh.h>
142 struct dn_sock {
143 struct sock sk;
144 struct dn_scp scp;
147 static void dn_keepalive(struct sock *sk);
149 #define DN_SK_HASH_SHIFT 8
150 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
151 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
154 static const struct proto_ops dn_proto_ops;
155 static DEFINE_RWLOCK(dn_hash_lock);
156 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157 static struct hlist_head dn_wild_sk;
158 static atomic_t decnet_memory_allocated;
160 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
163 static struct hlist_head *dn_find_list(struct sock *sk)
165 struct dn_scp *scp = DN_SK(sk);
167 if (scp->addr.sdn_flags & SDF_WILD)
168 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
170 return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
174 * Valid ports are those greater than zero and not already in use.
176 static int check_port(__le16 port)
178 struct sock *sk;
179 struct hlist_node *node;
181 if (port == 0)
182 return -1;
184 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
185 struct dn_scp *scp = DN_SK(sk);
186 if (scp->addrloc == port)
187 return -1;
189 return 0;
192 static unsigned short port_alloc(struct sock *sk)
194 struct dn_scp *scp = DN_SK(sk);
195 static unsigned short port = 0x2000;
196 unsigned short i_port = port;
198 while(check_port(cpu_to_le16(++port)) != 0) {
199 if (port == i_port)
200 return 0;
203 scp->addrloc = cpu_to_le16(port);
205 return 1;
209 * Since this is only ever called from user
210 * level, we don't need a write_lock() version
211 * of this.
213 static int dn_hash_sock(struct sock *sk)
215 struct dn_scp *scp = DN_SK(sk);
216 struct hlist_head *list;
217 int rv = -EUSERS;
219 BUG_ON(sk_hashed(sk));
221 write_lock_bh(&dn_hash_lock);
223 if (!scp->addrloc && !port_alloc(sk))
224 goto out;
226 rv = -EADDRINUSE;
227 if ((list = dn_find_list(sk)) == NULL)
228 goto out;
230 sk_add_node(sk, list);
231 rv = 0;
232 out:
233 write_unlock_bh(&dn_hash_lock);
234 return rv;
237 static void dn_unhash_sock(struct sock *sk)
239 write_lock(&dn_hash_lock);
240 sk_del_node_init(sk);
241 write_unlock(&dn_hash_lock);
244 static void dn_unhash_sock_bh(struct sock *sk)
246 write_lock_bh(&dn_hash_lock);
247 sk_del_node_init(sk);
248 write_unlock_bh(&dn_hash_lock);
251 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
253 int i;
254 unsigned hash = addr->sdn_objnum;
256 if (hash == 0) {
257 hash = addr->sdn_objnamel;
258 for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
259 hash ^= addr->sdn_objname[i];
260 hash ^= (hash << 3);
264 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
268 * Called to transform a socket from bound (i.e. with a local address)
269 * into a listening socket (doesn't need a local port number) and rehashes
270 * based upon the object name/number.
272 static void dn_rehash_sock(struct sock *sk)
274 struct hlist_head *list;
275 struct dn_scp *scp = DN_SK(sk);
277 if (scp->addr.sdn_flags & SDF_WILD)
278 return;
280 write_lock_bh(&dn_hash_lock);
281 sk_del_node_init(sk);
282 DN_SK(sk)->addrloc = 0;
283 list = listen_hash(&DN_SK(sk)->addr);
284 sk_add_node(sk, list);
285 write_unlock_bh(&dn_hash_lock);
288 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
290 int len = 2;
292 *buf++ = type;
294 switch(type) {
295 case 0:
296 *buf++ = sdn->sdn_objnum;
297 break;
298 case 1:
299 *buf++ = 0;
300 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
301 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
302 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
303 break;
304 case 2:
305 memset(buf, 0, 5);
306 buf += 5;
307 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
308 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
309 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
310 break;
313 return len;
317 * On reception of usernames, we handle types 1 and 0 for destination
318 * addresses only. Types 2 and 4 are used for source addresses, but the
319 * UIC, GIC are ignored and they are both treated the same way. Type 3
320 * is never used as I've no idea what its purpose might be or what its
321 * format is.
323 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
325 unsigned char type;
326 int size = len;
327 int namel = 12;
329 sdn->sdn_objnum = 0;
330 sdn->sdn_objnamel = cpu_to_le16(0);
331 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
333 if (len < 2)
334 return -1;
336 len -= 2;
337 *fmt = *data++;
338 type = *data++;
340 switch(*fmt) {
341 case 0:
342 sdn->sdn_objnum = type;
343 return 2;
344 case 1:
345 namel = 16;
346 break;
347 case 2:
348 len -= 4;
349 data += 4;
350 break;
351 case 4:
352 len -= 8;
353 data += 8;
354 break;
355 default:
356 return -1;
359 len -= 1;
361 if (len < 0)
362 return -1;
364 sdn->sdn_objnamel = cpu_to_le16(*data++);
365 len -= le16_to_cpu(sdn->sdn_objnamel);
367 if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
368 return -1;
370 memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
372 return size - len;
375 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
377 struct hlist_head *list = listen_hash(addr);
378 struct hlist_node *node;
379 struct sock *sk;
381 read_lock(&dn_hash_lock);
382 sk_for_each(sk, node, list) {
383 struct dn_scp *scp = DN_SK(sk);
384 if (sk->sk_state != TCP_LISTEN)
385 continue;
386 if (scp->addr.sdn_objnum) {
387 if (scp->addr.sdn_objnum != addr->sdn_objnum)
388 continue;
389 } else {
390 if (addr->sdn_objnum)
391 continue;
392 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
393 continue;
394 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
395 continue;
397 sock_hold(sk);
398 read_unlock(&dn_hash_lock);
399 return sk;
402 sk = sk_head(&dn_wild_sk);
403 if (sk) {
404 if (sk->sk_state == TCP_LISTEN)
405 sock_hold(sk);
406 else
407 sk = NULL;
410 read_unlock(&dn_hash_lock);
411 return sk;
414 struct sock *dn_find_by_skb(struct sk_buff *skb)
416 struct dn_skb_cb *cb = DN_SKB_CB(skb);
417 struct sock *sk;
418 struct hlist_node *node;
419 struct dn_scp *scp;
421 read_lock(&dn_hash_lock);
422 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
423 scp = DN_SK(sk);
424 if (cb->src != dn_saddr2dn(&scp->peer))
425 continue;
426 if (cb->dst_port != scp->addrloc)
427 continue;
428 if (scp->addrrem && (cb->src_port != scp->addrrem))
429 continue;
430 sock_hold(sk);
431 goto found;
433 sk = NULL;
434 found:
435 read_unlock(&dn_hash_lock);
436 return sk;
441 static void dn_destruct(struct sock *sk)
443 struct dn_scp *scp = DN_SK(sk);
445 skb_queue_purge(&scp->data_xmit_queue);
446 skb_queue_purge(&scp->other_xmit_queue);
447 skb_queue_purge(&scp->other_receive_queue);
449 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
452 static int dn_memory_pressure;
454 static void dn_enter_memory_pressure(struct sock *sk)
456 if (!dn_memory_pressure) {
457 dn_memory_pressure = 1;
461 static struct proto dn_proto = {
462 .name = "NSP",
463 .owner = THIS_MODULE,
464 .enter_memory_pressure = dn_enter_memory_pressure,
465 .memory_pressure = &dn_memory_pressure,
466 .memory_allocated = &decnet_memory_allocated,
467 .sysctl_mem = sysctl_decnet_mem,
468 .sysctl_wmem = sysctl_decnet_wmem,
469 .sysctl_rmem = sysctl_decnet_rmem,
470 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
471 .obj_size = sizeof(struct dn_sock),
474 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
476 struct dn_scp *scp;
477 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
479 if (!sk)
480 goto out;
482 if (sock)
483 sock->ops = &dn_proto_ops;
484 sock_init_data(sock, sk);
486 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
487 sk->sk_destruct = dn_destruct;
488 sk->sk_no_check = 1;
489 sk->sk_family = PF_DECnet;
490 sk->sk_protocol = 0;
491 sk->sk_allocation = gfp;
492 sk->sk_sndbuf = sysctl_decnet_wmem[1];
493 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
495 /* Initialization of DECnet Session Control Port */
496 scp = DN_SK(sk);
497 scp->state = DN_O; /* Open */
498 scp->numdat = 1; /* Next data seg to tx */
499 scp->numoth = 1; /* Next oth data to tx */
500 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
501 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
502 scp->ackrcv_dat = 0; /* Highest data ack recv*/
503 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
504 scp->flowrem_sw = DN_SEND;
505 scp->flowloc_sw = DN_SEND;
506 scp->flowrem_dat = 0;
507 scp->flowrem_oth = 1;
508 scp->flowloc_dat = 0;
509 scp->flowloc_oth = 1;
510 scp->services_rem = 0;
511 scp->services_loc = 1 | NSP_FC_NONE;
512 scp->info_rem = 0;
513 scp->info_loc = 0x03; /* NSP version 4.1 */
514 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
515 scp->nonagle = 0;
516 scp->multi_ireq = 1;
517 scp->accept_mode = ACC_IMMED;
518 scp->addr.sdn_family = AF_DECnet;
519 scp->peer.sdn_family = AF_DECnet;
520 scp->accessdata.acc_accl = 5;
521 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
523 scp->max_window = NSP_MAX_WINDOW;
524 scp->snd_window = NSP_MIN_WINDOW;
525 scp->nsp_srtt = NSP_INITIAL_SRTT;
526 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
527 scp->nsp_rxtshift = 0;
529 skb_queue_head_init(&scp->data_xmit_queue);
530 skb_queue_head_init(&scp->other_xmit_queue);
531 skb_queue_head_init(&scp->other_receive_queue);
533 scp->persist = 0;
534 scp->persist_fxn = NULL;
535 scp->keepalive = 10 * HZ;
536 scp->keepalive_fxn = dn_keepalive;
538 init_timer(&scp->delack_timer);
539 scp->delack_pending = 0;
540 scp->delack_fxn = dn_nsp_delayed_ack;
542 dn_start_slow_timer(sk);
543 out:
544 return sk;
547 static void dn_keepalive(struct sock *sk)
549 struct dn_scp *scp = DN_SK(sk);
552 * By checking the other_data transmit queue is empty
553 * we are double checking that we are not sending too
554 * many of these keepalive frames.
556 if (skb_queue_empty(&scp->other_xmit_queue))
557 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
562 * Timer for shutdown/destroyed sockets.
563 * When socket is dead & no packets have been sent for a
564 * certain amount of time, they are removed by this
565 * routine. Also takes care of sending out DI & DC
566 * frames at correct times.
568 int dn_destroy_timer(struct sock *sk)
570 struct dn_scp *scp = DN_SK(sk);
572 scp->persist = dn_nsp_persist(sk);
574 switch(scp->state) {
575 case DN_DI:
576 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
577 if (scp->nsp_rxtshift >= decnet_di_count)
578 scp->state = DN_CN;
579 return 0;
581 case DN_DR:
582 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
583 if (scp->nsp_rxtshift >= decnet_dr_count)
584 scp->state = DN_DRC;
585 return 0;
587 case DN_DN:
588 if (scp->nsp_rxtshift < decnet_dn_count) {
589 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
590 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
591 return 0;
595 scp->persist = (HZ * decnet_time_wait);
597 if (sk->sk_socket)
598 return 0;
600 if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
601 dn_unhash_sock(sk);
602 sock_put(sk);
603 return 1;
606 return 0;
609 static void dn_destroy_sock(struct sock *sk)
611 struct dn_scp *scp = DN_SK(sk);
613 scp->nsp_rxtshift = 0; /* reset back off */
615 if (sk->sk_socket) {
616 if (sk->sk_socket->state != SS_UNCONNECTED)
617 sk->sk_socket->state = SS_DISCONNECTING;
620 sk->sk_state = TCP_CLOSE;
622 switch(scp->state) {
623 case DN_DN:
624 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
625 sk->sk_allocation);
626 scp->persist_fxn = dn_destroy_timer;
627 scp->persist = dn_nsp_persist(sk);
628 break;
629 case DN_CR:
630 scp->state = DN_DR;
631 goto disc_reject;
632 case DN_RUN:
633 scp->state = DN_DI;
634 case DN_DI:
635 case DN_DR:
636 disc_reject:
637 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
638 case DN_NC:
639 case DN_NR:
640 case DN_RJ:
641 case DN_DIC:
642 case DN_CN:
643 case DN_DRC:
644 case DN_CI:
645 case DN_CD:
646 scp->persist_fxn = dn_destroy_timer;
647 scp->persist = dn_nsp_persist(sk);
648 break;
649 default:
650 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
651 case DN_O:
652 dn_stop_slow_timer(sk);
654 dn_unhash_sock_bh(sk);
655 sock_put(sk);
657 break;
661 char *dn_addr2asc(__u16 addr, char *buf)
663 unsigned short node, area;
665 node = addr & 0x03ff;
666 area = addr >> 10;
667 sprintf(buf, "%hd.%hd", area, node);
669 return buf;
674 static int dn_create(struct net *net, struct socket *sock, int protocol,
675 int kern)
677 struct sock *sk;
679 if (!net_eq(net, &init_net))
680 return -EAFNOSUPPORT;
682 switch(sock->type) {
683 case SOCK_SEQPACKET:
684 if (protocol != DNPROTO_NSP)
685 return -EPROTONOSUPPORT;
686 break;
687 case SOCK_STREAM:
688 break;
689 default:
690 return -ESOCKTNOSUPPORT;
694 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL)
695 return -ENOBUFS;
697 sk->sk_protocol = protocol;
699 return 0;
703 static int
704 dn_release(struct socket *sock)
706 struct sock *sk = sock->sk;
708 if (sk) {
709 sock_orphan(sk);
710 sock_hold(sk);
711 lock_sock(sk);
712 dn_destroy_sock(sk);
713 release_sock(sk);
714 sock_put(sk);
717 return 0;
720 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
722 struct sock *sk = sock->sk;
723 struct dn_scp *scp = DN_SK(sk);
724 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
725 struct net_device *dev, *ldev;
726 int rv;
728 if (addr_len != sizeof(struct sockaddr_dn))
729 return -EINVAL;
731 if (saddr->sdn_family != AF_DECnet)
732 return -EINVAL;
734 if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
735 return -EINVAL;
737 if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
738 return -EINVAL;
740 if (saddr->sdn_flags & ~SDF_WILD)
741 return -EINVAL;
743 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
744 (saddr->sdn_flags & SDF_WILD)))
745 return -EACCES;
747 if (!(saddr->sdn_flags & SDF_WILD)) {
748 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
749 rcu_read_lock();
750 ldev = NULL;
751 for_each_netdev_rcu(&init_net, dev) {
752 if (!dev->dn_ptr)
753 continue;
754 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
755 ldev = dev;
756 break;
759 rcu_read_unlock();
760 if (ldev == NULL)
761 return -EADDRNOTAVAIL;
765 rv = -EINVAL;
766 lock_sock(sk);
767 if (sock_flag(sk, SOCK_ZAPPED)) {
768 memcpy(&scp->addr, saddr, addr_len);
769 sock_reset_flag(sk, SOCK_ZAPPED);
771 rv = dn_hash_sock(sk);
772 if (rv)
773 sock_set_flag(sk, SOCK_ZAPPED);
775 release_sock(sk);
777 return rv;
781 static int dn_auto_bind(struct socket *sock)
783 struct sock *sk = sock->sk;
784 struct dn_scp *scp = DN_SK(sk);
785 int rv;
787 sock_reset_flag(sk, SOCK_ZAPPED);
789 scp->addr.sdn_flags = 0;
790 scp->addr.sdn_objnum = 0;
793 * This stuff is to keep compatibility with Eduardo's
794 * patch. I hope I can dispense with it shortly...
796 if ((scp->accessdata.acc_accl != 0) &&
797 (scp->accessdata.acc_accl <= 12)) {
799 scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
800 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
802 scp->accessdata.acc_accl = 0;
803 memset(scp->accessdata.acc_acc, 0, 40);
805 /* End of compatibility stuff */
807 scp->addr.sdn_add.a_len = cpu_to_le16(2);
808 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
809 if (rv == 0) {
810 rv = dn_hash_sock(sk);
811 if (rv)
812 sock_set_flag(sk, SOCK_ZAPPED);
815 return rv;
818 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
820 struct dn_scp *scp = DN_SK(sk);
821 DEFINE_WAIT(wait);
822 int err;
824 if (scp->state != DN_CR)
825 return -EINVAL;
827 scp->state = DN_CC;
828 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
829 dn_send_conn_conf(sk, allocation);
831 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
832 for(;;) {
833 release_sock(sk);
834 if (scp->state == DN_CC)
835 *timeo = schedule_timeout(*timeo);
836 lock_sock(sk);
837 err = 0;
838 if (scp->state == DN_RUN)
839 break;
840 err = sock_error(sk);
841 if (err)
842 break;
843 err = sock_intr_errno(*timeo);
844 if (signal_pending(current))
845 break;
846 err = -EAGAIN;
847 if (!*timeo)
848 break;
849 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
851 finish_wait(sk_sleep(sk), &wait);
852 if (err == 0) {
853 sk->sk_socket->state = SS_CONNECTED;
854 } else if (scp->state != DN_CC) {
855 sk->sk_socket->state = SS_UNCONNECTED;
857 return err;
860 static int dn_wait_run(struct sock *sk, long *timeo)
862 struct dn_scp *scp = DN_SK(sk);
863 DEFINE_WAIT(wait);
864 int err = 0;
866 if (scp->state == DN_RUN)
867 goto out;
869 if (!*timeo)
870 return -EALREADY;
872 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
873 for(;;) {
874 release_sock(sk);
875 if (scp->state == DN_CI || scp->state == DN_CC)
876 *timeo = schedule_timeout(*timeo);
877 lock_sock(sk);
878 err = 0;
879 if (scp->state == DN_RUN)
880 break;
881 err = sock_error(sk);
882 if (err)
883 break;
884 err = sock_intr_errno(*timeo);
885 if (signal_pending(current))
886 break;
887 err = -ETIMEDOUT;
888 if (!*timeo)
889 break;
890 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
892 finish_wait(sk_sleep(sk), &wait);
893 out:
894 if (err == 0) {
895 sk->sk_socket->state = SS_CONNECTED;
896 } else if (scp->state != DN_CI && scp->state != DN_CC) {
897 sk->sk_socket->state = SS_UNCONNECTED;
899 return err;
902 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
904 struct socket *sock = sk->sk_socket;
905 struct dn_scp *scp = DN_SK(sk);
906 int err = -EISCONN;
907 struct flowi fl;
909 if (sock->state == SS_CONNECTED)
910 goto out;
912 if (sock->state == SS_CONNECTING) {
913 err = 0;
914 if (scp->state == DN_RUN) {
915 sock->state = SS_CONNECTED;
916 goto out;
918 err = -ECONNREFUSED;
919 if (scp->state != DN_CI && scp->state != DN_CC) {
920 sock->state = SS_UNCONNECTED;
921 goto out;
923 return dn_wait_run(sk, timeo);
926 err = -EINVAL;
927 if (scp->state != DN_O)
928 goto out;
930 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
931 goto out;
932 if (addr->sdn_family != AF_DECnet)
933 goto out;
934 if (addr->sdn_flags & SDF_WILD)
935 goto out;
937 if (sock_flag(sk, SOCK_ZAPPED)) {
938 err = dn_auto_bind(sk->sk_socket);
939 if (err)
940 goto out;
943 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
945 err = -EHOSTUNREACH;
946 memset(&fl, 0, sizeof(fl));
947 fl.oif = sk->sk_bound_dev_if;
948 fl.fld_dst = dn_saddr2dn(&scp->peer);
949 fl.fld_src = dn_saddr2dn(&scp->addr);
950 dn_sk_ports_copy(&fl, scp);
951 fl.proto = DNPROTO_NSP;
952 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
953 goto out;
954 sk->sk_route_caps = sk->sk_dst_cache->dev->features;
955 sock->state = SS_CONNECTING;
956 scp->state = DN_CI;
957 scp->segsize_loc = dst_metric(sk->sk_dst_cache, RTAX_ADVMSS);
959 dn_nsp_send_conninit(sk, NSP_CI);
960 err = -EINPROGRESS;
961 if (*timeo) {
962 err = dn_wait_run(sk, timeo);
964 out:
965 return err;
968 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
970 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
971 struct sock *sk = sock->sk;
972 int err;
973 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
975 lock_sock(sk);
976 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
977 release_sock(sk);
979 return err;
982 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
984 struct dn_scp *scp = DN_SK(sk);
986 switch(scp->state) {
987 case DN_RUN:
988 return 0;
989 case DN_CR:
990 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
991 case DN_CI:
992 case DN_CC:
993 return dn_wait_run(sk, timeo);
994 case DN_O:
995 return __dn_connect(sk, addr, addrlen, timeo, flags);
998 return -EINVAL;
1002 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1004 unsigned char *ptr = skb->data;
1006 acc->acc_userl = *ptr++;
1007 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1008 ptr += acc->acc_userl;
1010 acc->acc_passl = *ptr++;
1011 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1012 ptr += acc->acc_passl;
1014 acc->acc_accl = *ptr++;
1015 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1017 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1021 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1023 unsigned char *ptr = skb->data;
1024 u16 len = *ptr++; /* yes, it's 8bit on the wire */
1026 BUG_ON(len > 16); /* we've checked the contents earlier */
1027 opt->opt_optl = cpu_to_le16(len);
1028 opt->opt_status = 0;
1029 memcpy(opt->opt_data, ptr, len);
1030 skb_pull(skb, len + 1);
1033 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1035 DEFINE_WAIT(wait);
1036 struct sk_buff *skb = NULL;
1037 int err = 0;
1039 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1040 for(;;) {
1041 release_sock(sk);
1042 skb = skb_dequeue(&sk->sk_receive_queue);
1043 if (skb == NULL) {
1044 *timeo = schedule_timeout(*timeo);
1045 skb = skb_dequeue(&sk->sk_receive_queue);
1047 lock_sock(sk);
1048 if (skb != NULL)
1049 break;
1050 err = -EINVAL;
1051 if (sk->sk_state != TCP_LISTEN)
1052 break;
1053 err = sock_intr_errno(*timeo);
1054 if (signal_pending(current))
1055 break;
1056 err = -EAGAIN;
1057 if (!*timeo)
1058 break;
1059 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1061 finish_wait(sk_sleep(sk), &wait);
1063 return skb == NULL ? ERR_PTR(err) : skb;
1066 static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1068 struct sock *sk = sock->sk, *newsk;
1069 struct sk_buff *skb = NULL;
1070 struct dn_skb_cb *cb;
1071 unsigned char menuver;
1072 int err = 0;
1073 unsigned char type;
1074 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1075 struct dst_entry *dst;
1077 lock_sock(sk);
1079 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1080 release_sock(sk);
1081 return -EINVAL;
1084 skb = skb_dequeue(&sk->sk_receive_queue);
1085 if (skb == NULL) {
1086 skb = dn_wait_for_connect(sk, &timeo);
1087 if (IS_ERR(skb)) {
1088 release_sock(sk);
1089 return PTR_ERR(skb);
1093 cb = DN_SKB_CB(skb);
1094 sk->sk_ack_backlog--;
1095 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation);
1096 if (newsk == NULL) {
1097 release_sock(sk);
1098 kfree_skb(skb);
1099 return -ENOBUFS;
1101 release_sock(sk);
1103 dst = skb_dst(skb);
1104 sk_dst_set(newsk, dst);
1105 skb_dst_set(skb, NULL);
1107 DN_SK(newsk)->state = DN_CR;
1108 DN_SK(newsk)->addrrem = cb->src_port;
1109 DN_SK(newsk)->services_rem = cb->services;
1110 DN_SK(newsk)->info_rem = cb->info;
1111 DN_SK(newsk)->segsize_rem = cb->segsize;
1112 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1114 if (DN_SK(newsk)->segsize_rem < 230)
1115 DN_SK(newsk)->segsize_rem = 230;
1117 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1118 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1120 newsk->sk_state = TCP_LISTEN;
1121 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1124 * If we are listening on a wild socket, we don't want
1125 * the newly created socket on the wrong hash queue.
1127 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1129 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1130 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1131 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1132 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1134 menuver = *skb->data;
1135 skb_pull(skb, 1);
1137 if (menuver & DN_MENUVER_ACC)
1138 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1140 if (menuver & DN_MENUVER_USR)
1141 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1143 if (menuver & DN_MENUVER_PRX)
1144 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1146 if (menuver & DN_MENUVER_UIC)
1147 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1149 kfree_skb(skb);
1151 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1152 sizeof(struct optdata_dn));
1153 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1154 sizeof(struct optdata_dn));
1156 lock_sock(newsk);
1157 err = dn_hash_sock(newsk);
1158 if (err == 0) {
1159 sock_reset_flag(newsk, SOCK_ZAPPED);
1160 dn_send_conn_ack(newsk);
1163 * Here we use sk->sk_allocation since although the conn conf is
1164 * for the newsk, the context is the old socket.
1166 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1167 err = dn_confirm_accept(newsk, &timeo,
1168 sk->sk_allocation);
1170 release_sock(newsk);
1171 return err;
1175 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer)
1177 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1178 struct sock *sk = sock->sk;
1179 struct dn_scp *scp = DN_SK(sk);
1181 *uaddr_len = sizeof(struct sockaddr_dn);
1183 lock_sock(sk);
1185 if (peer) {
1186 if ((sock->state != SS_CONNECTED &&
1187 sock->state != SS_CONNECTING) &&
1188 scp->accept_mode == ACC_IMMED) {
1189 release_sock(sk);
1190 return -ENOTCONN;
1193 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1194 } else {
1195 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1198 release_sock(sk);
1200 return 0;
1204 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1206 struct sock *sk = sock->sk;
1207 struct dn_scp *scp = DN_SK(sk);
1208 int mask = datagram_poll(file, sock, wait);
1210 if (!skb_queue_empty(&scp->other_receive_queue))
1211 mask |= POLLRDBAND;
1213 return mask;
1216 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1218 struct sock *sk = sock->sk;
1219 struct dn_scp *scp = DN_SK(sk);
1220 int err = -EOPNOTSUPP;
1221 long amount = 0;
1222 struct sk_buff *skb;
1223 int val;
1225 switch(cmd)
1227 case SIOCGIFADDR:
1228 case SIOCSIFADDR:
1229 return dn_dev_ioctl(cmd, (void __user *)arg);
1231 case SIOCATMARK:
1232 lock_sock(sk);
1233 val = !skb_queue_empty(&scp->other_receive_queue);
1234 if (scp->state != DN_RUN)
1235 val = -ENOTCONN;
1236 release_sock(sk);
1237 return val;
1239 case TIOCOUTQ:
1240 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1241 if (amount < 0)
1242 amount = 0;
1243 err = put_user(amount, (int __user *)arg);
1244 break;
1246 case TIOCINQ:
1247 lock_sock(sk);
1248 skb = skb_peek(&scp->other_receive_queue);
1249 if (skb) {
1250 amount = skb->len;
1251 } else {
1252 skb_queue_walk(&sk->sk_receive_queue, skb)
1253 amount += skb->len;
1255 release_sock(sk);
1256 err = put_user(amount, (int __user *)arg);
1257 break;
1259 default:
1260 err = -ENOIOCTLCMD;
1261 break;
1264 return err;
1267 static int dn_listen(struct socket *sock, int backlog)
1269 struct sock *sk = sock->sk;
1270 int err = -EINVAL;
1272 lock_sock(sk);
1274 if (sock_flag(sk, SOCK_ZAPPED))
1275 goto out;
1277 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1278 goto out;
1280 sk->sk_max_ack_backlog = backlog;
1281 sk->sk_ack_backlog = 0;
1282 sk->sk_state = TCP_LISTEN;
1283 err = 0;
1284 dn_rehash_sock(sk);
1286 out:
1287 release_sock(sk);
1289 return err;
1293 static int dn_shutdown(struct socket *sock, int how)
1295 struct sock *sk = sock->sk;
1296 struct dn_scp *scp = DN_SK(sk);
1297 int err = -ENOTCONN;
1299 lock_sock(sk);
1301 if (sock->state == SS_UNCONNECTED)
1302 goto out;
1304 err = 0;
1305 if (sock->state == SS_DISCONNECTING)
1306 goto out;
1308 err = -EINVAL;
1309 if (scp->state == DN_O)
1310 goto out;
1312 if (how != SHUTDOWN_MASK)
1313 goto out;
1315 sk->sk_shutdown = how;
1316 dn_destroy_sock(sk);
1317 err = 0;
1319 out:
1320 release_sock(sk);
1322 return err;
1325 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1327 struct sock *sk = sock->sk;
1328 int err;
1330 lock_sock(sk);
1331 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1332 release_sock(sk);
1334 return err;
1337 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
1339 struct sock *sk = sock->sk;
1340 struct dn_scp *scp = DN_SK(sk);
1341 long timeo;
1342 union {
1343 struct optdata_dn opt;
1344 struct accessdata_dn acc;
1345 int mode;
1346 unsigned long win;
1347 int val;
1348 unsigned char services;
1349 unsigned char info;
1350 } u;
1351 int err;
1353 if (optlen && !optval)
1354 return -EINVAL;
1356 if (optlen > sizeof(u))
1357 return -EINVAL;
1359 if (copy_from_user(&u, optval, optlen))
1360 return -EFAULT;
1362 switch(optname) {
1363 case DSO_CONDATA:
1364 if (sock->state == SS_CONNECTED)
1365 return -EISCONN;
1366 if ((scp->state != DN_O) && (scp->state != DN_CR))
1367 return -EINVAL;
1369 if (optlen != sizeof(struct optdata_dn))
1370 return -EINVAL;
1372 if (le16_to_cpu(u.opt.opt_optl) > 16)
1373 return -EINVAL;
1375 memcpy(&scp->conndata_out, &u.opt, optlen);
1376 break;
1378 case DSO_DISDATA:
1379 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1380 return -ENOTCONN;
1382 if (optlen != sizeof(struct optdata_dn))
1383 return -EINVAL;
1385 if (le16_to_cpu(u.opt.opt_optl) > 16)
1386 return -EINVAL;
1388 memcpy(&scp->discdata_out, &u.opt, optlen);
1389 break;
1391 case DSO_CONACCESS:
1392 if (sock->state == SS_CONNECTED)
1393 return -EISCONN;
1394 if (scp->state != DN_O)
1395 return -EINVAL;
1397 if (optlen != sizeof(struct accessdata_dn))
1398 return -EINVAL;
1400 if ((u.acc.acc_accl > DN_MAXACCL) ||
1401 (u.acc.acc_passl > DN_MAXACCL) ||
1402 (u.acc.acc_userl > DN_MAXACCL))
1403 return -EINVAL;
1405 memcpy(&scp->accessdata, &u.acc, optlen);
1406 break;
1408 case DSO_ACCEPTMODE:
1409 if (sock->state == SS_CONNECTED)
1410 return -EISCONN;
1411 if (scp->state != DN_O)
1412 return -EINVAL;
1414 if (optlen != sizeof(int))
1415 return -EINVAL;
1417 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1418 return -EINVAL;
1420 scp->accept_mode = (unsigned char)u.mode;
1421 break;
1423 case DSO_CONACCEPT:
1425 if (scp->state != DN_CR)
1426 return -EINVAL;
1427 timeo = sock_rcvtimeo(sk, 0);
1428 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1429 return err;
1431 case DSO_CONREJECT:
1433 if (scp->state != DN_CR)
1434 return -EINVAL;
1436 scp->state = DN_DR;
1437 sk->sk_shutdown = SHUTDOWN_MASK;
1438 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1439 break;
1441 default:
1442 #ifdef CONFIG_NETFILTER
1443 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1444 #endif
1445 case DSO_LINKINFO:
1446 case DSO_STREAM:
1447 case DSO_SEQPACKET:
1448 return -ENOPROTOOPT;
1450 case DSO_MAXWINDOW:
1451 if (optlen != sizeof(unsigned long))
1452 return -EINVAL;
1453 if (u.win > NSP_MAX_WINDOW)
1454 u.win = NSP_MAX_WINDOW;
1455 if (u.win == 0)
1456 return -EINVAL;
1457 scp->max_window = u.win;
1458 if (scp->snd_window > u.win)
1459 scp->snd_window = u.win;
1460 break;
1462 case DSO_NODELAY:
1463 if (optlen != sizeof(int))
1464 return -EINVAL;
1465 if (scp->nonagle == 2)
1466 return -EINVAL;
1467 scp->nonagle = (u.val == 0) ? 0 : 1;
1468 /* if (scp->nonagle == 1) { Push pending frames } */
1469 break;
1471 case DSO_CORK:
1472 if (optlen != sizeof(int))
1473 return -EINVAL;
1474 if (scp->nonagle == 1)
1475 return -EINVAL;
1476 scp->nonagle = (u.val == 0) ? 0 : 2;
1477 /* if (scp->nonagle == 0) { Push pending frames } */
1478 break;
1480 case DSO_SERVICES:
1481 if (optlen != sizeof(unsigned char))
1482 return -EINVAL;
1483 if ((u.services & ~NSP_FC_MASK) != 0x01)
1484 return -EINVAL;
1485 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1486 return -EINVAL;
1487 scp->services_loc = u.services;
1488 break;
1490 case DSO_INFO:
1491 if (optlen != sizeof(unsigned char))
1492 return -EINVAL;
1493 if (u.info & 0xfc)
1494 return -EINVAL;
1495 scp->info_loc = u.info;
1496 break;
1499 return 0;
1502 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1504 struct sock *sk = sock->sk;
1505 int err;
1507 lock_sock(sk);
1508 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1509 release_sock(sk);
1511 return err;
1514 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1516 struct sock *sk = sock->sk;
1517 struct dn_scp *scp = DN_SK(sk);
1518 struct linkinfo_dn link;
1519 unsigned int r_len;
1520 void *r_data = NULL;
1521 unsigned int val;
1523 if(get_user(r_len , optlen))
1524 return -EFAULT;
1526 switch(optname) {
1527 case DSO_CONDATA:
1528 if (r_len > sizeof(struct optdata_dn))
1529 r_len = sizeof(struct optdata_dn);
1530 r_data = &scp->conndata_in;
1531 break;
1533 case DSO_DISDATA:
1534 if (r_len > sizeof(struct optdata_dn))
1535 r_len = sizeof(struct optdata_dn);
1536 r_data = &scp->discdata_in;
1537 break;
1539 case DSO_CONACCESS:
1540 if (r_len > sizeof(struct accessdata_dn))
1541 r_len = sizeof(struct accessdata_dn);
1542 r_data = &scp->accessdata;
1543 break;
1545 case DSO_ACCEPTMODE:
1546 if (r_len > sizeof(unsigned char))
1547 r_len = sizeof(unsigned char);
1548 r_data = &scp->accept_mode;
1549 break;
1551 case DSO_LINKINFO:
1552 if (r_len > sizeof(struct linkinfo_dn))
1553 r_len = sizeof(struct linkinfo_dn);
1555 memset(&link, 0, sizeof(link));
1557 switch(sock->state) {
1558 case SS_CONNECTING:
1559 link.idn_linkstate = LL_CONNECTING;
1560 break;
1561 case SS_DISCONNECTING:
1562 link.idn_linkstate = LL_DISCONNECTING;
1563 break;
1564 case SS_CONNECTED:
1565 link.idn_linkstate = LL_RUNNING;
1566 break;
1567 default:
1568 link.idn_linkstate = LL_INACTIVE;
1571 link.idn_segsize = scp->segsize_rem;
1572 r_data = &link;
1573 break;
1575 default:
1576 #ifdef CONFIG_NETFILTER
1578 int ret, len;
1580 if(get_user(len, optlen))
1581 return -EFAULT;
1583 ret = nf_getsockopt(sk, PF_DECnet, optname,
1584 optval, &len);
1585 if (ret >= 0)
1586 ret = put_user(len, optlen);
1587 return ret;
1589 #endif
1590 case DSO_STREAM:
1591 case DSO_SEQPACKET:
1592 case DSO_CONACCEPT:
1593 case DSO_CONREJECT:
1594 return -ENOPROTOOPT;
1596 case DSO_MAXWINDOW:
1597 if (r_len > sizeof(unsigned long))
1598 r_len = sizeof(unsigned long);
1599 r_data = &scp->max_window;
1600 break;
1602 case DSO_NODELAY:
1603 if (r_len > sizeof(int))
1604 r_len = sizeof(int);
1605 val = (scp->nonagle == 1);
1606 r_data = &val;
1607 break;
1609 case DSO_CORK:
1610 if (r_len > sizeof(int))
1611 r_len = sizeof(int);
1612 val = (scp->nonagle == 2);
1613 r_data = &val;
1614 break;
1616 case DSO_SERVICES:
1617 if (r_len > sizeof(unsigned char))
1618 r_len = sizeof(unsigned char);
1619 r_data = &scp->services_rem;
1620 break;
1622 case DSO_INFO:
1623 if (r_len > sizeof(unsigned char))
1624 r_len = sizeof(unsigned char);
1625 r_data = &scp->info_rem;
1626 break;
1629 if (r_data) {
1630 if (copy_to_user(optval, r_data, r_len))
1631 return -EFAULT;
1632 if (put_user(r_len, optlen))
1633 return -EFAULT;
1636 return 0;
1640 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1642 struct sk_buff *skb;
1643 int len = 0;
1645 if (flags & MSG_OOB)
1646 return !skb_queue_empty(q) ? 1 : 0;
1648 skb_queue_walk(q, skb) {
1649 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1650 len += skb->len;
1652 if (cb->nsp_flags & 0x40) {
1653 /* SOCK_SEQPACKET reads to EOM */
1654 if (sk->sk_type == SOCK_SEQPACKET)
1655 return 1;
1656 /* so does SOCK_STREAM unless WAITALL is specified */
1657 if (!(flags & MSG_WAITALL))
1658 return 1;
1661 /* minimum data length for read exceeded */
1662 if (len >= target)
1663 return 1;
1666 return 0;
1670 static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1671 struct msghdr *msg, size_t size, int flags)
1673 struct sock *sk = sock->sk;
1674 struct dn_scp *scp = DN_SK(sk);
1675 struct sk_buff_head *queue = &sk->sk_receive_queue;
1676 size_t target = size > 1 ? 1 : 0;
1677 size_t copied = 0;
1678 int rv = 0;
1679 struct sk_buff *skb, *n;
1680 struct dn_skb_cb *cb = NULL;
1681 unsigned char eor = 0;
1682 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1684 lock_sock(sk);
1686 if (sock_flag(sk, SOCK_ZAPPED)) {
1687 rv = -EADDRNOTAVAIL;
1688 goto out;
1691 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1692 rv = 0;
1693 goto out;
1696 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1697 if (rv)
1698 goto out;
1700 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1701 rv = -EOPNOTSUPP;
1702 goto out;
1705 if (flags & MSG_OOB)
1706 queue = &scp->other_receive_queue;
1708 if (flags & MSG_WAITALL)
1709 target = size;
1713 * See if there is data ready to read, sleep if there isn't
1715 for(;;) {
1716 DEFINE_WAIT(wait);
1718 if (sk->sk_err)
1719 goto out;
1721 if (!skb_queue_empty(&scp->other_receive_queue)) {
1722 if (!(flags & MSG_OOB)) {
1723 msg->msg_flags |= MSG_OOB;
1724 if (!scp->other_report) {
1725 scp->other_report = 1;
1726 goto out;
1731 if (scp->state != DN_RUN)
1732 goto out;
1734 if (signal_pending(current)) {
1735 rv = sock_intr_errno(timeo);
1736 goto out;
1739 if (dn_data_ready(sk, queue, flags, target))
1740 break;
1742 if (flags & MSG_DONTWAIT) {
1743 rv = -EWOULDBLOCK;
1744 goto out;
1747 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1748 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1749 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1750 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1751 finish_wait(sk_sleep(sk), &wait);
1754 skb_queue_walk_safe(queue, skb, n) {
1755 unsigned int chunk = skb->len;
1756 cb = DN_SKB_CB(skb);
1758 if ((chunk + copied) > size)
1759 chunk = size - copied;
1761 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1762 rv = -EFAULT;
1763 break;
1765 copied += chunk;
1767 if (!(flags & MSG_PEEK))
1768 skb_pull(skb, chunk);
1770 eor = cb->nsp_flags & 0x40;
1772 if (skb->len == 0) {
1773 skb_unlink(skb, queue);
1774 kfree_skb(skb);
1776 * N.B. Don't refer to skb or cb after this point
1777 * in loop.
1779 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1780 scp->flowloc_sw = DN_SEND;
1781 dn_nsp_send_link(sk, DN_SEND, 0);
1785 if (eor) {
1786 if (sk->sk_type == SOCK_SEQPACKET)
1787 break;
1788 if (!(flags & MSG_WAITALL))
1789 break;
1792 if (flags & MSG_OOB)
1793 break;
1795 if (copied >= target)
1796 break;
1799 rv = copied;
1802 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1803 msg->msg_flags |= MSG_EOR;
1805 out:
1806 if (rv == 0)
1807 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1809 if ((rv >= 0) && msg->msg_name) {
1810 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1811 msg->msg_namelen = sizeof(struct sockaddr_dn);
1814 release_sock(sk);
1816 return rv;
1820 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1822 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1823 if (skb_queue_len(queue) >= scp->snd_window)
1824 return 1;
1825 if (fctype != NSP_FC_NONE) {
1826 if (flags & MSG_OOB) {
1827 if (scp->flowrem_oth == 0)
1828 return 1;
1829 } else {
1830 if (scp->flowrem_dat == 0)
1831 return 1;
1834 return 0;
1838 * The DECnet spec requires that the "routing layer" accepts packets which
1839 * are at least 230 bytes in size. This excludes any headers which the NSP
1840 * layer might add, so we always assume that we'll be using the maximal
1841 * length header on data packets. The variation in length is due to the
1842 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1843 * make much practical difference.
1845 unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1847 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1848 if (dev) {
1849 struct dn_dev *dn_db = dev->dn_ptr;
1850 mtu -= LL_RESERVED_SPACE(dev);
1851 if (dn_db->use_long)
1852 mtu -= 21;
1853 else
1854 mtu -= 6;
1855 mtu -= DN_MAX_NSP_DATA_HEADER;
1856 } else {
1858 * 21 = long header, 16 = guess at MAC header length
1860 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1862 if (mtu > mss)
1863 mss = mtu;
1864 return mss;
1867 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1869 struct dst_entry *dst = __sk_dst_get(sk);
1870 struct dn_scp *scp = DN_SK(sk);
1871 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1873 /* Other data messages are limited to 16 bytes per packet */
1874 if (flags & MSG_OOB)
1875 return 16;
1877 /* This works out the maximum size of segment we can send out */
1878 if (dst) {
1879 u32 mtu = dst_mtu(dst);
1880 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1883 return mss_now;
1887 * N.B. We get the timeout wrong here, but then we always did get it
1888 * wrong before and this is another step along the road to correcting
1889 * it. It ought to get updated each time we pass through the routine,
1890 * but in practise it probably doesn't matter too much for now.
1892 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1893 unsigned long datalen, int noblock,
1894 int *errcode)
1896 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1897 noblock, errcode);
1898 if (skb) {
1899 skb->protocol = htons(ETH_P_DNA_RT);
1900 skb->pkt_type = PACKET_OUTGOING;
1902 return skb;
1905 static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1906 struct msghdr *msg, size_t size)
1908 struct sock *sk = sock->sk;
1909 struct dn_scp *scp = DN_SK(sk);
1910 size_t mss;
1911 struct sk_buff_head *queue = &scp->data_xmit_queue;
1912 int flags = msg->msg_flags;
1913 int err = 0;
1914 size_t sent = 0;
1915 int addr_len = msg->msg_namelen;
1916 struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name;
1917 struct sk_buff *skb = NULL;
1918 struct dn_skb_cb *cb;
1919 size_t len;
1920 unsigned char fctype;
1921 long timeo;
1923 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1924 return -EOPNOTSUPP;
1926 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1927 return -EINVAL;
1929 lock_sock(sk);
1930 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1932 * The only difference between stream sockets and sequenced packet
1933 * sockets is that the stream sockets always behave as if MSG_EOR
1934 * has been set.
1936 if (sock->type == SOCK_STREAM) {
1937 if (flags & MSG_EOR) {
1938 err = -EINVAL;
1939 goto out;
1941 flags |= MSG_EOR;
1945 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1946 if (err)
1947 goto out_err;
1949 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1950 err = -EPIPE;
1951 if (!(flags & MSG_NOSIGNAL))
1952 send_sig(SIGPIPE, current, 0);
1953 goto out_err;
1956 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1957 dst_negative_advice(sk);
1959 mss = scp->segsize_rem;
1960 fctype = scp->services_rem & NSP_FC_MASK;
1962 mss = dn_current_mss(sk, flags);
1964 if (flags & MSG_OOB) {
1965 queue = &scp->other_xmit_queue;
1966 if (size > mss) {
1967 err = -EMSGSIZE;
1968 goto out;
1972 scp->persist_fxn = dn_nsp_xmit_timeout;
1974 while(sent < size) {
1975 err = sock_error(sk);
1976 if (err)
1977 goto out;
1979 if (signal_pending(current)) {
1980 err = sock_intr_errno(timeo);
1981 goto out;
1985 * Calculate size that we wish to send.
1987 len = size - sent;
1989 if (len > mss)
1990 len = mss;
1993 * Wait for queue size to go down below the window
1994 * size.
1996 if (dn_queue_too_long(scp, queue, flags)) {
1997 DEFINE_WAIT(wait);
1999 if (flags & MSG_DONTWAIT) {
2000 err = -EWOULDBLOCK;
2001 goto out;
2004 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2005 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2006 sk_wait_event(sk, &timeo,
2007 !dn_queue_too_long(scp, queue, flags));
2008 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2009 finish_wait(sk_sleep(sk), &wait);
2010 continue;
2014 * Get a suitably sized skb.
2015 * 64 is a bit of a hack really, but its larger than any
2016 * link-layer headers and has served us well as a good
2017 * guess as to their real length.
2019 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2020 flags & MSG_DONTWAIT, &err);
2022 if (err)
2023 break;
2025 if (!skb)
2026 continue;
2028 cb = DN_SKB_CB(skb);
2030 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2032 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2033 err = -EFAULT;
2034 goto out;
2037 if (flags & MSG_OOB) {
2038 cb->nsp_flags = 0x30;
2039 if (fctype != NSP_FC_NONE)
2040 scp->flowrem_oth--;
2041 } else {
2042 cb->nsp_flags = 0x00;
2043 if (scp->seg_total == 0)
2044 cb->nsp_flags |= 0x20;
2046 scp->seg_total += len;
2048 if (((sent + len) == size) && (flags & MSG_EOR)) {
2049 cb->nsp_flags |= 0x40;
2050 scp->seg_total = 0;
2051 if (fctype == NSP_FC_SCMC)
2052 scp->flowrem_dat--;
2054 if (fctype == NSP_FC_SRC)
2055 scp->flowrem_dat--;
2058 sent += len;
2059 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2060 skb = NULL;
2062 scp->persist = dn_nsp_persist(sk);
2065 out:
2067 kfree_skb(skb);
2069 release_sock(sk);
2071 return sent ? sent : err;
2073 out_err:
2074 err = sk_stream_error(sk, flags, err);
2075 release_sock(sk);
2076 return err;
2079 static int dn_device_event(struct notifier_block *this, unsigned long event,
2080 void *ptr)
2082 struct net_device *dev = (struct net_device *)ptr;
2084 if (!net_eq(dev_net(dev), &init_net))
2085 return NOTIFY_DONE;
2087 switch(event) {
2088 case NETDEV_UP:
2089 dn_dev_up(dev);
2090 break;
2091 case NETDEV_DOWN:
2092 dn_dev_down(dev);
2093 break;
2094 default:
2095 break;
2098 return NOTIFY_DONE;
2101 static struct notifier_block dn_dev_notifier = {
2102 .notifier_call = dn_device_event,
2105 extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2107 static struct packet_type dn_dix_packet_type __read_mostly = {
2108 .type = cpu_to_be16(ETH_P_DNA_RT),
2109 .func = dn_route_rcv,
2112 #ifdef CONFIG_PROC_FS
2113 struct dn_iter_state {
2114 int bucket;
2117 static struct sock *dn_socket_get_first(struct seq_file *seq)
2119 struct dn_iter_state *state = seq->private;
2120 struct sock *n = NULL;
2122 for(state->bucket = 0;
2123 state->bucket < DN_SK_HASH_SIZE;
2124 ++state->bucket) {
2125 n = sk_head(&dn_sk_hash[state->bucket]);
2126 if (n)
2127 break;
2130 return n;
2133 static struct sock *dn_socket_get_next(struct seq_file *seq,
2134 struct sock *n)
2136 struct dn_iter_state *state = seq->private;
2138 n = sk_next(n);
2139 try_again:
2140 if (n)
2141 goto out;
2142 if (++state->bucket >= DN_SK_HASH_SIZE)
2143 goto out;
2144 n = sk_head(&dn_sk_hash[state->bucket]);
2145 goto try_again;
2146 out:
2147 return n;
2150 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2152 struct sock *sk = dn_socket_get_first(seq);
2154 if (sk) {
2155 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2156 --*pos;
2158 return *pos ? NULL : sk;
2161 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2163 void *rc;
2164 read_lock_bh(&dn_hash_lock);
2165 rc = socket_get_idx(seq, &pos);
2166 if (!rc) {
2167 read_unlock_bh(&dn_hash_lock);
2169 return rc;
2172 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2174 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2177 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2179 void *rc;
2181 if (v == SEQ_START_TOKEN) {
2182 rc = dn_socket_get_idx(seq, 0);
2183 goto out;
2186 rc = dn_socket_get_next(seq, v);
2187 if (rc)
2188 goto out;
2189 read_unlock_bh(&dn_hash_lock);
2190 out:
2191 ++*pos;
2192 return rc;
2195 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2197 if (v && v != SEQ_START_TOKEN)
2198 read_unlock_bh(&dn_hash_lock);
2201 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2203 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2205 int i;
2207 switch (le16_to_cpu(dn->sdn_objnamel)) {
2208 case 0:
2209 sprintf(buf, "%d", dn->sdn_objnum);
2210 break;
2211 default:
2212 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2213 buf[i] = dn->sdn_objname[i];
2214 if (IS_NOT_PRINTABLE(buf[i]))
2215 buf[i] = '.';
2217 buf[i] = 0;
2221 static char *dn_state2asc(unsigned char state)
2223 switch(state) {
2224 case DN_O:
2225 return "OPEN";
2226 case DN_CR:
2227 return " CR";
2228 case DN_DR:
2229 return " DR";
2230 case DN_DRC:
2231 return " DRC";
2232 case DN_CC:
2233 return " CC";
2234 case DN_CI:
2235 return " CI";
2236 case DN_NR:
2237 return " NR";
2238 case DN_NC:
2239 return " NC";
2240 case DN_CD:
2241 return " CD";
2242 case DN_RJ:
2243 return " RJ";
2244 case DN_RUN:
2245 return " RUN";
2246 case DN_DI:
2247 return " DI";
2248 case DN_DIC:
2249 return " DIC";
2250 case DN_DN:
2251 return " DN";
2252 case DN_CL:
2253 return " CL";
2254 case DN_CN:
2255 return " CN";
2258 return "????";
2261 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2263 struct dn_scp *scp = DN_SK(sk);
2264 char buf1[DN_ASCBUF_LEN];
2265 char buf2[DN_ASCBUF_LEN];
2266 char local_object[DN_MAXOBJL+3];
2267 char remote_object[DN_MAXOBJL+3];
2269 dn_printable_object(&scp->addr, local_object);
2270 dn_printable_object(&scp->peer, remote_object);
2272 seq_printf(seq,
2273 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2274 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2275 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
2276 scp->addrloc,
2277 scp->numdat,
2278 scp->numoth,
2279 scp->ackxmt_dat,
2280 scp->ackxmt_oth,
2281 scp->flowloc_sw,
2282 local_object,
2283 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
2284 scp->addrrem,
2285 scp->numdat_rcv,
2286 scp->numoth_rcv,
2287 scp->ackrcv_dat,
2288 scp->ackrcv_oth,
2289 scp->flowrem_sw,
2290 remote_object,
2291 dn_state2asc(scp->state),
2292 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2295 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2297 if (v == SEQ_START_TOKEN) {
2298 seq_puts(seq, "Local Remote\n");
2299 } else {
2300 dn_socket_format_entry(seq, v);
2302 return 0;
2305 static const struct seq_operations dn_socket_seq_ops = {
2306 .start = dn_socket_seq_start,
2307 .next = dn_socket_seq_next,
2308 .stop = dn_socket_seq_stop,
2309 .show = dn_socket_seq_show,
2312 static int dn_socket_seq_open(struct inode *inode, struct file *file)
2314 return seq_open_private(file, &dn_socket_seq_ops,
2315 sizeof(struct dn_iter_state));
2318 static const struct file_operations dn_socket_seq_fops = {
2319 .owner = THIS_MODULE,
2320 .open = dn_socket_seq_open,
2321 .read = seq_read,
2322 .llseek = seq_lseek,
2323 .release = seq_release_private,
2325 #endif
2327 static const struct net_proto_family dn_family_ops = {
2328 .family = AF_DECnet,
2329 .create = dn_create,
2330 .owner = THIS_MODULE,
2333 static const struct proto_ops dn_proto_ops = {
2334 .family = AF_DECnet,
2335 .owner = THIS_MODULE,
2336 .release = dn_release,
2337 .bind = dn_bind,
2338 .connect = dn_connect,
2339 .socketpair = sock_no_socketpair,
2340 .accept = dn_accept,
2341 .getname = dn_getname,
2342 .poll = dn_poll,
2343 .ioctl = dn_ioctl,
2344 .listen = dn_listen,
2345 .shutdown = dn_shutdown,
2346 .setsockopt = dn_setsockopt,
2347 .getsockopt = dn_getsockopt,
2348 .sendmsg = dn_sendmsg,
2349 .recvmsg = dn_recvmsg,
2350 .mmap = sock_no_mmap,
2351 .sendpage = sock_no_sendpage,
2354 void dn_register_sysctl(void);
2355 void dn_unregister_sysctl(void);
2357 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2358 MODULE_AUTHOR("Linux DECnet Project Team");
2359 MODULE_LICENSE("GPL");
2360 MODULE_ALIAS_NETPROTO(PF_DECnet);
2362 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2364 static int __init decnet_init(void)
2366 int rc;
2368 printk(banner);
2370 rc = proto_register(&dn_proto, 1);
2371 if (rc != 0)
2372 goto out;
2374 dn_neigh_init();
2375 dn_dev_init();
2376 dn_route_init();
2377 dn_fib_init();
2379 sock_register(&dn_family_ops);
2380 dev_add_pack(&dn_dix_packet_type);
2381 register_netdevice_notifier(&dn_dev_notifier);
2383 proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops);
2384 dn_register_sysctl();
2385 out:
2386 return rc;
2389 module_init(decnet_init);
2392 * Prevent DECnet module unloading until its fixed properly.
2393 * Requires an audit of the code to check for memory leaks and
2394 * initialisation problems etc.