3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Socket Layer Interface
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
73 Port to new kernel development version.
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for incoming connections
79 so we can start developing server apps
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
100 *******************************************************************************/
102 #include <linux/module.h>
103 #include <linux/errno.h>
104 #include <linux/types.h>
105 #include <linux/slab.h>
106 #include <linux/socket.h>
107 #include <linux/in.h>
108 #include <linux/kernel.h>
109 #include <linux/sched.h>
110 #include <linux/timer.h>
111 #include <linux/string.h>
112 #include <linux/sockios.h>
113 #include <linux/net.h>
114 #include <linux/netdevice.h>
115 #include <linux/inet.h>
116 #include <linux/route.h>
117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h>
119 #include <net/sock.h>
120 #include <net/tcp_states.h>
121 #include <net/flow.h>
122 #include <asm/ioctls.h>
123 #include <linux/capability.h>
124 #include <linux/mm.h>
125 #include <linux/interrupt.h>
126 #include <linux/proc_fs.h>
127 #include <linux/stat.h>
128 #include <linux/init.h>
129 #include <linux/poll.h>
130 #include <net/net_namespace.h>
131 #include <net/neighbour.h>
133 #include <net/fib_rules.h>
135 #include <net/dn_nsp.h>
136 #include <net/dn_dev.h>
137 #include <net/dn_route.h>
138 #include <net/dn_fib.h>
139 #include <net/dn_neigh.h>
146 static void dn_keepalive(struct sock
*sk
);
148 #define DN_SK_HASH_SHIFT 8
149 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
150 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
153 static const struct proto_ops dn_proto_ops
;
154 static DEFINE_RWLOCK(dn_hash_lock
);
155 static struct hlist_head dn_sk_hash
[DN_SK_HASH_SIZE
];
156 static struct hlist_head dn_wild_sk
;
157 static atomic_long_t decnet_memory_allocated
;
159 static int __dn_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
, int flags
);
160 static int __dn_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
, int flags
);
162 static struct hlist_head
*dn_find_list(struct sock
*sk
)
164 struct dn_scp
*scp
= DN_SK(sk
);
166 if (scp
->addr
.sdn_flags
& SDF_WILD
)
167 return hlist_empty(&dn_wild_sk
) ? &dn_wild_sk
: NULL
;
169 return &dn_sk_hash
[le16_to_cpu(scp
->addrloc
) & DN_SK_HASH_MASK
];
173 * Valid ports are those greater than zero and not already in use.
175 static int check_port(__le16 port
)
178 struct hlist_node
*node
;
183 sk_for_each(sk
, node
, &dn_sk_hash
[le16_to_cpu(port
) & DN_SK_HASH_MASK
]) {
184 struct dn_scp
*scp
= DN_SK(sk
);
185 if (scp
->addrloc
== port
)
191 static unsigned short port_alloc(struct sock
*sk
)
193 struct dn_scp
*scp
= DN_SK(sk
);
194 static unsigned short port
= 0x2000;
195 unsigned short i_port
= port
;
197 while(check_port(cpu_to_le16(++port
)) != 0) {
202 scp
->addrloc
= cpu_to_le16(port
);
208 * Since this is only ever called from user
209 * level, we don't need a write_lock() version
212 static int dn_hash_sock(struct sock
*sk
)
214 struct dn_scp
*scp
= DN_SK(sk
);
215 struct hlist_head
*list
;
218 BUG_ON(sk_hashed(sk
));
220 write_lock_bh(&dn_hash_lock
);
222 if (!scp
->addrloc
&& !port_alloc(sk
))
226 if ((list
= dn_find_list(sk
)) == NULL
)
229 sk_add_node(sk
, list
);
232 write_unlock_bh(&dn_hash_lock
);
236 static void dn_unhash_sock(struct sock
*sk
)
238 write_lock(&dn_hash_lock
);
239 sk_del_node_init(sk
);
240 write_unlock(&dn_hash_lock
);
243 static void dn_unhash_sock_bh(struct sock
*sk
)
245 write_lock_bh(&dn_hash_lock
);
246 sk_del_node_init(sk
);
247 write_unlock_bh(&dn_hash_lock
);
250 static struct hlist_head
*listen_hash(struct sockaddr_dn
*addr
)
253 unsigned int hash
= addr
->sdn_objnum
;
256 hash
= addr
->sdn_objnamel
;
257 for(i
= 0; i
< le16_to_cpu(addr
->sdn_objnamel
); i
++) {
258 hash
^= addr
->sdn_objname
[i
];
263 return &dn_sk_hash
[hash
& DN_SK_HASH_MASK
];
267 * Called to transform a socket from bound (i.e. with a local address)
268 * into a listening socket (doesn't need a local port number) and rehashes
269 * based upon the object name/number.
271 static void dn_rehash_sock(struct sock
*sk
)
273 struct hlist_head
*list
;
274 struct dn_scp
*scp
= DN_SK(sk
);
276 if (scp
->addr
.sdn_flags
& SDF_WILD
)
279 write_lock_bh(&dn_hash_lock
);
280 sk_del_node_init(sk
);
281 DN_SK(sk
)->addrloc
= 0;
282 list
= listen_hash(&DN_SK(sk
)->addr
);
283 sk_add_node(sk
, list
);
284 write_unlock_bh(&dn_hash_lock
);
287 int dn_sockaddr2username(struct sockaddr_dn
*sdn
, unsigned char *buf
, unsigned char type
)
295 *buf
++ = sdn
->sdn_objnum
;
299 *buf
++ = le16_to_cpu(sdn
->sdn_objnamel
);
300 memcpy(buf
, sdn
->sdn_objname
, le16_to_cpu(sdn
->sdn_objnamel
));
301 len
= 3 + le16_to_cpu(sdn
->sdn_objnamel
);
306 *buf
++ = le16_to_cpu(sdn
->sdn_objnamel
);
307 memcpy(buf
, sdn
->sdn_objname
, le16_to_cpu(sdn
->sdn_objnamel
));
308 len
= 7 + le16_to_cpu(sdn
->sdn_objnamel
);
316 * On reception of usernames, we handle types 1 and 0 for destination
317 * addresses only. Types 2 and 4 are used for source addresses, but the
318 * UIC, GIC are ignored and they are both treated the same way. Type 3
319 * is never used as I've no idea what its purpose might be or what its
322 int dn_username2sockaddr(unsigned char *data
, int len
, struct sockaddr_dn
*sdn
, unsigned char *fmt
)
329 sdn
->sdn_objnamel
= cpu_to_le16(0);
330 memset(sdn
->sdn_objname
, 0, DN_MAXOBJL
);
341 sdn
->sdn_objnum
= type
;
363 sdn
->sdn_objnamel
= cpu_to_le16(*data
++);
364 len
-= le16_to_cpu(sdn
->sdn_objnamel
);
366 if ((len
< 0) || (le16_to_cpu(sdn
->sdn_objnamel
) > namel
))
369 memcpy(sdn
->sdn_objname
, data
, le16_to_cpu(sdn
->sdn_objnamel
));
374 struct sock
*dn_sklist_find_listener(struct sockaddr_dn
*addr
)
376 struct hlist_head
*list
= listen_hash(addr
);
377 struct hlist_node
*node
;
380 read_lock(&dn_hash_lock
);
381 sk_for_each(sk
, node
, list
) {
382 struct dn_scp
*scp
= DN_SK(sk
);
383 if (sk
->sk_state
!= TCP_LISTEN
)
385 if (scp
->addr
.sdn_objnum
) {
386 if (scp
->addr
.sdn_objnum
!= addr
->sdn_objnum
)
389 if (addr
->sdn_objnum
)
391 if (scp
->addr
.sdn_objnamel
!= addr
->sdn_objnamel
)
393 if (memcmp(scp
->addr
.sdn_objname
, addr
->sdn_objname
, le16_to_cpu(addr
->sdn_objnamel
)) != 0)
397 read_unlock(&dn_hash_lock
);
401 sk
= sk_head(&dn_wild_sk
);
403 if (sk
->sk_state
== TCP_LISTEN
)
409 read_unlock(&dn_hash_lock
);
413 struct sock
*dn_find_by_skb(struct sk_buff
*skb
)
415 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
417 struct hlist_node
*node
;
420 read_lock(&dn_hash_lock
);
421 sk_for_each(sk
, node
, &dn_sk_hash
[le16_to_cpu(cb
->dst_port
) & DN_SK_HASH_MASK
]) {
423 if (cb
->src
!= dn_saddr2dn(&scp
->peer
))
425 if (cb
->dst_port
!= scp
->addrloc
)
427 if (scp
->addrrem
&& (cb
->src_port
!= scp
->addrrem
))
434 read_unlock(&dn_hash_lock
);
440 static void dn_destruct(struct sock
*sk
)
442 struct dn_scp
*scp
= DN_SK(sk
);
444 skb_queue_purge(&scp
->data_xmit_queue
);
445 skb_queue_purge(&scp
->other_xmit_queue
);
446 skb_queue_purge(&scp
->other_receive_queue
);
448 dst_release(rcu_dereference_check(sk
->sk_dst_cache
, 1));
451 static int dn_memory_pressure
;
453 static void dn_enter_memory_pressure(struct sock
*sk
)
455 if (!dn_memory_pressure
) {
456 dn_memory_pressure
= 1;
460 static struct proto dn_proto
= {
462 .owner
= THIS_MODULE
,
463 .enter_memory_pressure
= dn_enter_memory_pressure
,
464 .memory_pressure
= &dn_memory_pressure
,
465 .memory_allocated
= &decnet_memory_allocated
,
466 .sysctl_mem
= sysctl_decnet_mem
,
467 .sysctl_wmem
= sysctl_decnet_wmem
,
468 .sysctl_rmem
= sysctl_decnet_rmem
,
469 .max_header
= DN_MAX_NSP_DATA_HEADER
+ 64,
470 .obj_size
= sizeof(struct dn_sock
),
473 static struct sock
*dn_alloc_sock(struct net
*net
, struct socket
*sock
, gfp_t gfp
)
476 struct sock
*sk
= sk_alloc(net
, PF_DECnet
, gfp
, &dn_proto
);
482 sock
->ops
= &dn_proto_ops
;
483 sock_init_data(sock
, sk
);
485 sk
->sk_backlog_rcv
= dn_nsp_backlog_rcv
;
486 sk
->sk_destruct
= dn_destruct
;
488 sk
->sk_family
= PF_DECnet
;
490 sk
->sk_allocation
= gfp
;
491 sk
->sk_sndbuf
= sysctl_decnet_wmem
[1];
492 sk
->sk_rcvbuf
= sysctl_decnet_rmem
[1];
494 /* Initialization of DECnet Session Control Port */
496 scp
->state
= DN_O
; /* Open */
497 scp
->numdat
= 1; /* Next data seg to tx */
498 scp
->numoth
= 1; /* Next oth data to tx */
499 scp
->ackxmt_dat
= 0; /* Last data seg ack'ed */
500 scp
->ackxmt_oth
= 0; /* Last oth data ack'ed */
501 scp
->ackrcv_dat
= 0; /* Highest data ack recv*/
502 scp
->ackrcv_oth
= 0; /* Last oth data ack rec*/
503 scp
->flowrem_sw
= DN_SEND
;
504 scp
->flowloc_sw
= DN_SEND
;
505 scp
->flowrem_dat
= 0;
506 scp
->flowrem_oth
= 1;
507 scp
->flowloc_dat
= 0;
508 scp
->flowloc_oth
= 1;
509 scp
->services_rem
= 0;
510 scp
->services_loc
= 1 | NSP_FC_NONE
;
512 scp
->info_loc
= 0x03; /* NSP version 4.1 */
513 scp
->segsize_rem
= 230 - DN_MAX_NSP_DATA_HEADER
; /* Default: Updated by remote segsize */
516 scp
->accept_mode
= ACC_IMMED
;
517 scp
->addr
.sdn_family
= AF_DECnet
;
518 scp
->peer
.sdn_family
= AF_DECnet
;
519 scp
->accessdata
.acc_accl
= 5;
520 memcpy(scp
->accessdata
.acc_acc
, "LINUX", 5);
522 scp
->max_window
= NSP_MAX_WINDOW
;
523 scp
->snd_window
= NSP_MIN_WINDOW
;
524 scp
->nsp_srtt
= NSP_INITIAL_SRTT
;
525 scp
->nsp_rttvar
= NSP_INITIAL_RTTVAR
;
526 scp
->nsp_rxtshift
= 0;
528 skb_queue_head_init(&scp
->data_xmit_queue
);
529 skb_queue_head_init(&scp
->other_xmit_queue
);
530 skb_queue_head_init(&scp
->other_receive_queue
);
533 scp
->persist_fxn
= NULL
;
534 scp
->keepalive
= 10 * HZ
;
535 scp
->keepalive_fxn
= dn_keepalive
;
537 init_timer(&scp
->delack_timer
);
538 scp
->delack_pending
= 0;
539 scp
->delack_fxn
= dn_nsp_delayed_ack
;
541 dn_start_slow_timer(sk
);
548 * FIXME: Should respond to SO_KEEPALIVE etc.
550 static void dn_keepalive(struct sock
*sk
)
552 struct dn_scp
*scp
= DN_SK(sk
);
555 * By checking the other_data transmit queue is empty
556 * we are double checking that we are not sending too
557 * many of these keepalive frames.
559 if (skb_queue_empty(&scp
->other_xmit_queue
))
560 dn_nsp_send_link(sk
, DN_NOCHANGE
, 0);
565 * Timer for shutdown/destroyed sockets.
566 * When socket is dead & no packets have been sent for a
567 * certain amount of time, they are removed by this
568 * routine. Also takes care of sending out DI & DC
569 * frames at correct times.
571 int dn_destroy_timer(struct sock
*sk
)
573 struct dn_scp
*scp
= DN_SK(sk
);
575 scp
->persist
= dn_nsp_persist(sk
);
577 switch (scp
->state
) {
579 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, GFP_ATOMIC
);
580 if (scp
->nsp_rxtshift
>= decnet_di_count
)
585 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, GFP_ATOMIC
);
586 if (scp
->nsp_rxtshift
>= decnet_dr_count
)
591 if (scp
->nsp_rxtshift
< decnet_dn_count
) {
592 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
593 dn_nsp_send_disc(sk
, NSP_DISCCONF
, NSP_REASON_DC
,
599 scp
->persist
= (HZ
* decnet_time_wait
);
604 if ((jiffies
- scp
->stamp
) >= (HZ
* decnet_time_wait
)) {
613 static void dn_destroy_sock(struct sock
*sk
)
615 struct dn_scp
*scp
= DN_SK(sk
);
617 scp
->nsp_rxtshift
= 0; /* reset back off */
620 if (sk
->sk_socket
->state
!= SS_UNCONNECTED
)
621 sk
->sk_socket
->state
= SS_DISCONNECTING
;
624 sk
->sk_state
= TCP_CLOSE
;
626 switch (scp
->state
) {
628 dn_nsp_send_disc(sk
, NSP_DISCCONF
, NSP_REASON_DC
,
630 scp
->persist_fxn
= dn_destroy_timer
;
631 scp
->persist
= dn_nsp_persist(sk
);
641 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, sk
->sk_allocation
);
650 scp
->persist_fxn
= dn_destroy_timer
;
651 scp
->persist
= dn_nsp_persist(sk
);
654 printk(KERN_DEBUG
"DECnet: dn_destroy_sock passed socket in invalid state\n");
656 dn_stop_slow_timer(sk
);
658 dn_unhash_sock_bh(sk
);
665 char *dn_addr2asc(__u16 addr
, char *buf
)
667 unsigned short node
, area
;
669 node
= addr
& 0x03ff;
671 sprintf(buf
, "%hd.%hd", area
, node
);
678 static int dn_create(struct net
*net
, struct socket
*sock
, int protocol
,
683 if (!net_eq(net
, &init_net
))
684 return -EAFNOSUPPORT
;
686 switch (sock
->type
) {
688 if (protocol
!= DNPROTO_NSP
)
689 return -EPROTONOSUPPORT
;
694 return -ESOCKTNOSUPPORT
;
698 if ((sk
= dn_alloc_sock(net
, sock
, GFP_KERNEL
)) == NULL
)
701 sk
->sk_protocol
= protocol
;
708 dn_release(struct socket
*sock
)
710 struct sock
*sk
= sock
->sk
;
724 static int dn_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
726 struct sock
*sk
= sock
->sk
;
727 struct dn_scp
*scp
= DN_SK(sk
);
728 struct sockaddr_dn
*saddr
= (struct sockaddr_dn
*)uaddr
;
729 struct net_device
*dev
, *ldev
;
732 if (addr_len
!= sizeof(struct sockaddr_dn
))
735 if (saddr
->sdn_family
!= AF_DECnet
)
738 if (le16_to_cpu(saddr
->sdn_nodeaddrl
) && (le16_to_cpu(saddr
->sdn_nodeaddrl
) != 2))
741 if (le16_to_cpu(saddr
->sdn_objnamel
) > DN_MAXOBJL
)
744 if (saddr
->sdn_flags
& ~SDF_WILD
)
747 if (!capable(CAP_NET_BIND_SERVICE
) && (saddr
->sdn_objnum
||
748 (saddr
->sdn_flags
& SDF_WILD
)))
751 if (!(saddr
->sdn_flags
& SDF_WILD
)) {
752 if (le16_to_cpu(saddr
->sdn_nodeaddrl
)) {
755 for_each_netdev_rcu(&init_net
, dev
) {
758 if (dn_dev_islocal(dev
, dn_saddr2dn(saddr
))) {
765 return -EADDRNOTAVAIL
;
771 if (sock_flag(sk
, SOCK_ZAPPED
)) {
772 memcpy(&scp
->addr
, saddr
, addr_len
);
773 sock_reset_flag(sk
, SOCK_ZAPPED
);
775 rv
= dn_hash_sock(sk
);
777 sock_set_flag(sk
, SOCK_ZAPPED
);
785 static int dn_auto_bind(struct socket
*sock
)
787 struct sock
*sk
= sock
->sk
;
788 struct dn_scp
*scp
= DN_SK(sk
);
791 sock_reset_flag(sk
, SOCK_ZAPPED
);
793 scp
->addr
.sdn_flags
= 0;
794 scp
->addr
.sdn_objnum
= 0;
797 * This stuff is to keep compatibility with Eduardo's
798 * patch. I hope I can dispense with it shortly...
800 if ((scp
->accessdata
.acc_accl
!= 0) &&
801 (scp
->accessdata
.acc_accl
<= 12)) {
803 scp
->addr
.sdn_objnamel
= cpu_to_le16(scp
->accessdata
.acc_accl
);
804 memcpy(scp
->addr
.sdn_objname
, scp
->accessdata
.acc_acc
, le16_to_cpu(scp
->addr
.sdn_objnamel
));
806 scp
->accessdata
.acc_accl
= 0;
807 memset(scp
->accessdata
.acc_acc
, 0, 40);
809 /* End of compatibility stuff */
811 scp
->addr
.sdn_add
.a_len
= cpu_to_le16(2);
812 rv
= dn_dev_bind_default((__le16
*)scp
->addr
.sdn_add
.a_addr
);
814 rv
= dn_hash_sock(sk
);
816 sock_set_flag(sk
, SOCK_ZAPPED
);
822 static int dn_confirm_accept(struct sock
*sk
, long *timeo
, gfp_t allocation
)
824 struct dn_scp
*scp
= DN_SK(sk
);
828 if (scp
->state
!= DN_CR
)
832 scp
->segsize_loc
= dst_metric_advmss(__sk_dst_get(sk
));
833 dn_send_conn_conf(sk
, allocation
);
835 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
838 if (scp
->state
== DN_CC
)
839 *timeo
= schedule_timeout(*timeo
);
842 if (scp
->state
== DN_RUN
)
844 err
= sock_error(sk
);
847 err
= sock_intr_errno(*timeo
);
848 if (signal_pending(current
))
853 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
855 finish_wait(sk_sleep(sk
), &wait
);
857 sk
->sk_socket
->state
= SS_CONNECTED
;
858 } else if (scp
->state
!= DN_CC
) {
859 sk
->sk_socket
->state
= SS_UNCONNECTED
;
864 static int dn_wait_run(struct sock
*sk
, long *timeo
)
866 struct dn_scp
*scp
= DN_SK(sk
);
870 if (scp
->state
== DN_RUN
)
876 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
879 if (scp
->state
== DN_CI
|| scp
->state
== DN_CC
)
880 *timeo
= schedule_timeout(*timeo
);
883 if (scp
->state
== DN_RUN
)
885 err
= sock_error(sk
);
888 err
= sock_intr_errno(*timeo
);
889 if (signal_pending(current
))
894 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
896 finish_wait(sk_sleep(sk
), &wait
);
899 sk
->sk_socket
->state
= SS_CONNECTED
;
900 } else if (scp
->state
!= DN_CI
&& scp
->state
!= DN_CC
) {
901 sk
->sk_socket
->state
= SS_UNCONNECTED
;
906 static int __dn_connect(struct sock
*sk
, struct sockaddr_dn
*addr
, int addrlen
, long *timeo
, int flags
)
908 struct socket
*sock
= sk
->sk_socket
;
909 struct dn_scp
*scp
= DN_SK(sk
);
913 if (sock
->state
== SS_CONNECTED
)
916 if (sock
->state
== SS_CONNECTING
) {
918 if (scp
->state
== DN_RUN
) {
919 sock
->state
= SS_CONNECTED
;
923 if (scp
->state
!= DN_CI
&& scp
->state
!= DN_CC
) {
924 sock
->state
= SS_UNCONNECTED
;
927 return dn_wait_run(sk
, timeo
);
931 if (scp
->state
!= DN_O
)
934 if (addr
== NULL
|| addrlen
!= sizeof(struct sockaddr_dn
))
936 if (addr
->sdn_family
!= AF_DECnet
)
938 if (addr
->sdn_flags
& SDF_WILD
)
941 if (sock_flag(sk
, SOCK_ZAPPED
)) {
942 err
= dn_auto_bind(sk
->sk_socket
);
947 memcpy(&scp
->peer
, addr
, sizeof(struct sockaddr_dn
));
950 memset(&fld
, 0, sizeof(fld
));
951 fld
.flowidn_oif
= sk
->sk_bound_dev_if
;
952 fld
.daddr
= dn_saddr2dn(&scp
->peer
);
953 fld
.saddr
= dn_saddr2dn(&scp
->addr
);
954 dn_sk_ports_copy(&fld
, scp
);
955 fld
.flowidn_proto
= DNPROTO_NSP
;
956 if (dn_route_output_sock(&sk
->sk_dst_cache
, &fld
, sk
, flags
) < 0)
958 sk
->sk_route_caps
= sk
->sk_dst_cache
->dev
->features
;
959 sock
->state
= SS_CONNECTING
;
961 scp
->segsize_loc
= dst_metric_advmss(sk
->sk_dst_cache
);
963 dn_nsp_send_conninit(sk
, NSP_CI
);
966 err
= dn_wait_run(sk
, timeo
);
972 static int dn_connect(struct socket
*sock
, struct sockaddr
*uaddr
, int addrlen
, int flags
)
974 struct sockaddr_dn
*addr
= (struct sockaddr_dn
*)uaddr
;
975 struct sock
*sk
= sock
->sk
;
977 long timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
980 err
= __dn_connect(sk
, addr
, addrlen
, &timeo
, 0);
986 static inline int dn_check_state(struct sock
*sk
, struct sockaddr_dn
*addr
, int addrlen
, long *timeo
, int flags
)
988 struct dn_scp
*scp
= DN_SK(sk
);
990 switch (scp
->state
) {
994 return dn_confirm_accept(sk
, timeo
, sk
->sk_allocation
);
997 return dn_wait_run(sk
, timeo
);
999 return __dn_connect(sk
, addr
, addrlen
, timeo
, flags
);
1006 static void dn_access_copy(struct sk_buff
*skb
, struct accessdata_dn
*acc
)
1008 unsigned char *ptr
= skb
->data
;
1010 acc
->acc_userl
= *ptr
++;
1011 memcpy(&acc
->acc_user
, ptr
, acc
->acc_userl
);
1012 ptr
+= acc
->acc_userl
;
1014 acc
->acc_passl
= *ptr
++;
1015 memcpy(&acc
->acc_pass
, ptr
, acc
->acc_passl
);
1016 ptr
+= acc
->acc_passl
;
1018 acc
->acc_accl
= *ptr
++;
1019 memcpy(&acc
->acc_acc
, ptr
, acc
->acc_accl
);
1021 skb_pull(skb
, acc
->acc_accl
+ acc
->acc_passl
+ acc
->acc_userl
+ 3);
1025 static void dn_user_copy(struct sk_buff
*skb
, struct optdata_dn
*opt
)
1027 unsigned char *ptr
= skb
->data
;
1028 u16 len
= *ptr
++; /* yes, it's 8bit on the wire */
1030 BUG_ON(len
> 16); /* we've checked the contents earlier */
1031 opt
->opt_optl
= cpu_to_le16(len
);
1032 opt
->opt_status
= 0;
1033 memcpy(opt
->opt_data
, ptr
, len
);
1034 skb_pull(skb
, len
+ 1);
1037 static struct sk_buff
*dn_wait_for_connect(struct sock
*sk
, long *timeo
)
1040 struct sk_buff
*skb
= NULL
;
1043 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1046 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1048 *timeo
= schedule_timeout(*timeo
);
1049 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1055 if (sk
->sk_state
!= TCP_LISTEN
)
1057 err
= sock_intr_errno(*timeo
);
1058 if (signal_pending(current
))
1063 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1065 finish_wait(sk_sleep(sk
), &wait
);
1067 return skb
== NULL
? ERR_PTR(err
) : skb
;
1070 static int dn_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1072 struct sock
*sk
= sock
->sk
, *newsk
;
1073 struct sk_buff
*skb
= NULL
;
1074 struct dn_skb_cb
*cb
;
1075 unsigned char menuver
;
1078 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1079 struct dst_entry
*dst
;
1083 if (sk
->sk_state
!= TCP_LISTEN
|| DN_SK(sk
)->state
!= DN_O
) {
1088 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1090 skb
= dn_wait_for_connect(sk
, &timeo
);
1093 return PTR_ERR(skb
);
1097 cb
= DN_SKB_CB(skb
);
1098 sk
->sk_ack_backlog
--;
1099 newsk
= dn_alloc_sock(sock_net(sk
), newsock
, sk
->sk_allocation
);
1100 if (newsk
== NULL
) {
1108 sk_dst_set(newsk
, dst
);
1109 skb_dst_set(skb
, NULL
);
1111 DN_SK(newsk
)->state
= DN_CR
;
1112 DN_SK(newsk
)->addrrem
= cb
->src_port
;
1113 DN_SK(newsk
)->services_rem
= cb
->services
;
1114 DN_SK(newsk
)->info_rem
= cb
->info
;
1115 DN_SK(newsk
)->segsize_rem
= cb
->segsize
;
1116 DN_SK(newsk
)->accept_mode
= DN_SK(sk
)->accept_mode
;
1118 if (DN_SK(newsk
)->segsize_rem
< 230)
1119 DN_SK(newsk
)->segsize_rem
= 230;
1121 if ((DN_SK(newsk
)->services_rem
& NSP_FC_MASK
) == NSP_FC_NONE
)
1122 DN_SK(newsk
)->max_window
= decnet_no_fc_max_cwnd
;
1124 newsk
->sk_state
= TCP_LISTEN
;
1125 memcpy(&(DN_SK(newsk
)->addr
), &(DN_SK(sk
)->addr
), sizeof(struct sockaddr_dn
));
1128 * If we are listening on a wild socket, we don't want
1129 * the newly created socket on the wrong hash queue.
1131 DN_SK(newsk
)->addr
.sdn_flags
&= ~SDF_WILD
;
1133 skb_pull(skb
, dn_username2sockaddr(skb
->data
, skb
->len
, &(DN_SK(newsk
)->addr
), &type
));
1134 skb_pull(skb
, dn_username2sockaddr(skb
->data
, skb
->len
, &(DN_SK(newsk
)->peer
), &type
));
1135 *(__le16
*)(DN_SK(newsk
)->peer
.sdn_add
.a_addr
) = cb
->src
;
1136 *(__le16
*)(DN_SK(newsk
)->addr
.sdn_add
.a_addr
) = cb
->dst
;
1138 menuver
= *skb
->data
;
1141 if (menuver
& DN_MENUVER_ACC
)
1142 dn_access_copy(skb
, &(DN_SK(newsk
)->accessdata
));
1144 if (menuver
& DN_MENUVER_USR
)
1145 dn_user_copy(skb
, &(DN_SK(newsk
)->conndata_in
));
1147 if (menuver
& DN_MENUVER_PRX
)
1148 DN_SK(newsk
)->peer
.sdn_flags
|= SDF_PROXY
;
1150 if (menuver
& DN_MENUVER_UIC
)
1151 DN_SK(newsk
)->peer
.sdn_flags
|= SDF_UICPROXY
;
1155 memcpy(&(DN_SK(newsk
)->conndata_out
), &(DN_SK(sk
)->conndata_out
),
1156 sizeof(struct optdata_dn
));
1157 memcpy(&(DN_SK(newsk
)->discdata_out
), &(DN_SK(sk
)->discdata_out
),
1158 sizeof(struct optdata_dn
));
1161 err
= dn_hash_sock(newsk
);
1163 sock_reset_flag(newsk
, SOCK_ZAPPED
);
1164 dn_send_conn_ack(newsk
);
1167 * Here we use sk->sk_allocation since although the conn conf is
1168 * for the newsk, the context is the old socket.
1170 if (DN_SK(newsk
)->accept_mode
== ACC_IMMED
)
1171 err
= dn_confirm_accept(newsk
, &timeo
,
1174 release_sock(newsk
);
1179 static int dn_getname(struct socket
*sock
, struct sockaddr
*uaddr
,int *uaddr_len
,int peer
)
1181 struct sockaddr_dn
*sa
= (struct sockaddr_dn
*)uaddr
;
1182 struct sock
*sk
= sock
->sk
;
1183 struct dn_scp
*scp
= DN_SK(sk
);
1185 *uaddr_len
= sizeof(struct sockaddr_dn
);
1190 if ((sock
->state
!= SS_CONNECTED
&&
1191 sock
->state
!= SS_CONNECTING
) &&
1192 scp
->accept_mode
== ACC_IMMED
) {
1197 memcpy(sa
, &scp
->peer
, sizeof(struct sockaddr_dn
));
1199 memcpy(sa
, &scp
->addr
, sizeof(struct sockaddr_dn
));
1208 static unsigned int dn_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
1210 struct sock
*sk
= sock
->sk
;
1211 struct dn_scp
*scp
= DN_SK(sk
);
1212 int mask
= datagram_poll(file
, sock
, wait
);
1214 if (!skb_queue_empty(&scp
->other_receive_queue
))
1220 static int dn_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1222 struct sock
*sk
= sock
->sk
;
1223 struct dn_scp
*scp
= DN_SK(sk
);
1224 int err
= -EOPNOTSUPP
;
1226 struct sk_buff
*skb
;
1233 return dn_dev_ioctl(cmd
, (void __user
*)arg
);
1237 val
= !skb_queue_empty(&scp
->other_receive_queue
);
1238 if (scp
->state
!= DN_RUN
)
1244 amount
= sk
->sk_sndbuf
- sk_wmem_alloc_get(sk
);
1247 err
= put_user(amount
, (int __user
*)arg
);
1252 skb
= skb_peek(&scp
->other_receive_queue
);
1256 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
1260 err
= put_user(amount
, (int __user
*)arg
);
1271 static int dn_listen(struct socket
*sock
, int backlog
)
1273 struct sock
*sk
= sock
->sk
;
1278 if (sock_flag(sk
, SOCK_ZAPPED
))
1281 if ((DN_SK(sk
)->state
!= DN_O
) || (sk
->sk_state
== TCP_LISTEN
))
1284 sk
->sk_max_ack_backlog
= backlog
;
1285 sk
->sk_ack_backlog
= 0;
1286 sk
->sk_state
= TCP_LISTEN
;
1297 static int dn_shutdown(struct socket
*sock
, int how
)
1299 struct sock
*sk
= sock
->sk
;
1300 struct dn_scp
*scp
= DN_SK(sk
);
1301 int err
= -ENOTCONN
;
1305 if (sock
->state
== SS_UNCONNECTED
)
1309 if (sock
->state
== SS_DISCONNECTING
)
1313 if (scp
->state
== DN_O
)
1316 if (how
!= SHUT_RDWR
)
1319 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1320 dn_destroy_sock(sk
);
1329 static int dn_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1331 struct sock
*sk
= sock
->sk
;
1335 err
= __dn_setsockopt(sock
, level
, optname
, optval
, optlen
, 0);
1341 static int __dn_setsockopt(struct socket
*sock
, int level
,int optname
, char __user
*optval
, unsigned int optlen
, int flags
)
1343 struct sock
*sk
= sock
->sk
;
1344 struct dn_scp
*scp
= DN_SK(sk
);
1347 struct optdata_dn opt
;
1348 struct accessdata_dn acc
;
1352 unsigned char services
;
1357 if (optlen
&& !optval
)
1360 if (optlen
> sizeof(u
))
1363 if (copy_from_user(&u
, optval
, optlen
))
1368 if (sock
->state
== SS_CONNECTED
)
1370 if ((scp
->state
!= DN_O
) && (scp
->state
!= DN_CR
))
1373 if (optlen
!= sizeof(struct optdata_dn
))
1376 if (le16_to_cpu(u
.opt
.opt_optl
) > 16)
1379 memcpy(&scp
->conndata_out
, &u
.opt
, optlen
);
1383 if (sock
->state
!= SS_CONNECTED
&&
1384 scp
->accept_mode
== ACC_IMMED
)
1387 if (optlen
!= sizeof(struct optdata_dn
))
1390 if (le16_to_cpu(u
.opt
.opt_optl
) > 16)
1393 memcpy(&scp
->discdata_out
, &u
.opt
, optlen
);
1397 if (sock
->state
== SS_CONNECTED
)
1399 if (scp
->state
!= DN_O
)
1402 if (optlen
!= sizeof(struct accessdata_dn
))
1405 if ((u
.acc
.acc_accl
> DN_MAXACCL
) ||
1406 (u
.acc
.acc_passl
> DN_MAXACCL
) ||
1407 (u
.acc
.acc_userl
> DN_MAXACCL
))
1410 memcpy(&scp
->accessdata
, &u
.acc
, optlen
);
1413 case DSO_ACCEPTMODE
:
1414 if (sock
->state
== SS_CONNECTED
)
1416 if (scp
->state
!= DN_O
)
1419 if (optlen
!= sizeof(int))
1422 if ((u
.mode
!= ACC_IMMED
) && (u
.mode
!= ACC_DEFER
))
1425 scp
->accept_mode
= (unsigned char)u
.mode
;
1429 if (scp
->state
!= DN_CR
)
1431 timeo
= sock_rcvtimeo(sk
, 0);
1432 err
= dn_confirm_accept(sk
, &timeo
, sk
->sk_allocation
);
1436 if (scp
->state
!= DN_CR
)
1440 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1441 dn_nsp_send_disc(sk
, 0x38, 0, sk
->sk_allocation
);
1445 #ifdef CONFIG_NETFILTER
1446 return nf_setsockopt(sk
, PF_DECnet
, optname
, optval
, optlen
);
1451 return -ENOPROTOOPT
;
1454 if (optlen
!= sizeof(unsigned long))
1456 if (u
.win
> NSP_MAX_WINDOW
)
1457 u
.win
= NSP_MAX_WINDOW
;
1460 scp
->max_window
= u
.win
;
1461 if (scp
->snd_window
> u
.win
)
1462 scp
->snd_window
= u
.win
;
1466 if (optlen
!= sizeof(int))
1468 if (scp
->nonagle
== 2)
1470 scp
->nonagle
= (u
.val
== 0) ? 0 : 1;
1471 /* if (scp->nonagle == 1) { Push pending frames } */
1475 if (optlen
!= sizeof(int))
1477 if (scp
->nonagle
== 1)
1479 scp
->nonagle
= (u
.val
== 0) ? 0 : 2;
1480 /* if (scp->nonagle == 0) { Push pending frames } */
1484 if (optlen
!= sizeof(unsigned char))
1486 if ((u
.services
& ~NSP_FC_MASK
) != 0x01)
1488 if ((u
.services
& NSP_FC_MASK
) == NSP_FC_MASK
)
1490 scp
->services_loc
= u
.services
;
1494 if (optlen
!= sizeof(unsigned char))
1498 scp
->info_loc
= u
.info
;
1505 static int dn_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1507 struct sock
*sk
= sock
->sk
;
1511 err
= __dn_getsockopt(sock
, level
, optname
, optval
, optlen
, 0);
1517 static int __dn_getsockopt(struct socket
*sock
, int level
,int optname
, char __user
*optval
,int __user
*optlen
, int flags
)
1519 struct sock
*sk
= sock
->sk
;
1520 struct dn_scp
*scp
= DN_SK(sk
);
1521 struct linkinfo_dn link
;
1523 void *r_data
= NULL
;
1526 if(get_user(r_len
, optlen
))
1531 if (r_len
> sizeof(struct optdata_dn
))
1532 r_len
= sizeof(struct optdata_dn
);
1533 r_data
= &scp
->conndata_in
;
1537 if (r_len
> sizeof(struct optdata_dn
))
1538 r_len
= sizeof(struct optdata_dn
);
1539 r_data
= &scp
->discdata_in
;
1543 if (r_len
> sizeof(struct accessdata_dn
))
1544 r_len
= sizeof(struct accessdata_dn
);
1545 r_data
= &scp
->accessdata
;
1548 case DSO_ACCEPTMODE
:
1549 if (r_len
> sizeof(unsigned char))
1550 r_len
= sizeof(unsigned char);
1551 r_data
= &scp
->accept_mode
;
1555 if (r_len
> sizeof(struct linkinfo_dn
))
1556 r_len
= sizeof(struct linkinfo_dn
);
1558 memset(&link
, 0, sizeof(link
));
1560 switch (sock
->state
) {
1562 link
.idn_linkstate
= LL_CONNECTING
;
1564 case SS_DISCONNECTING
:
1565 link
.idn_linkstate
= LL_DISCONNECTING
;
1568 link
.idn_linkstate
= LL_RUNNING
;
1571 link
.idn_linkstate
= LL_INACTIVE
;
1574 link
.idn_segsize
= scp
->segsize_rem
;
1579 #ifdef CONFIG_NETFILTER
1583 if (get_user(len
, optlen
))
1586 ret
= nf_getsockopt(sk
, PF_DECnet
, optname
, optval
, &len
);
1588 ret
= put_user(len
, optlen
);
1596 return -ENOPROTOOPT
;
1599 if (r_len
> sizeof(unsigned long))
1600 r_len
= sizeof(unsigned long);
1601 r_data
= &scp
->max_window
;
1605 if (r_len
> sizeof(int))
1606 r_len
= sizeof(int);
1607 val
= (scp
->nonagle
== 1);
1612 if (r_len
> sizeof(int))
1613 r_len
= sizeof(int);
1614 val
= (scp
->nonagle
== 2);
1619 if (r_len
> sizeof(unsigned char))
1620 r_len
= sizeof(unsigned char);
1621 r_data
= &scp
->services_rem
;
1625 if (r_len
> sizeof(unsigned char))
1626 r_len
= sizeof(unsigned char);
1627 r_data
= &scp
->info_rem
;
1632 if (copy_to_user(optval
, r_data
, r_len
))
1634 if (put_user(r_len
, optlen
))
1642 static int dn_data_ready(struct sock
*sk
, struct sk_buff_head
*q
, int flags
, int target
)
1644 struct sk_buff
*skb
;
1647 if (flags
& MSG_OOB
)
1648 return !skb_queue_empty(q
) ? 1 : 0;
1650 skb_queue_walk(q
, skb
) {
1651 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
1654 if (cb
->nsp_flags
& 0x40) {
1655 /* SOCK_SEQPACKET reads to EOM */
1656 if (sk
->sk_type
== SOCK_SEQPACKET
)
1658 /* so does SOCK_STREAM unless WAITALL is specified */
1659 if (!(flags
& MSG_WAITALL
))
1663 /* minimum data length for read exceeded */
1672 static int dn_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1673 struct msghdr
*msg
, size_t size
, int flags
)
1675 struct sock
*sk
= sock
->sk
;
1676 struct dn_scp
*scp
= DN_SK(sk
);
1677 struct sk_buff_head
*queue
= &sk
->sk_receive_queue
;
1678 size_t target
= size
> 1 ? 1 : 0;
1681 struct sk_buff
*skb
, *n
;
1682 struct dn_skb_cb
*cb
= NULL
;
1683 unsigned char eor
= 0;
1684 long timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1688 if (sock_flag(sk
, SOCK_ZAPPED
)) {
1689 rv
= -EADDRNOTAVAIL
;
1693 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1698 rv
= dn_check_state(sk
, NULL
, 0, &timeo
, flags
);
1702 if (flags
& ~(MSG_CMSG_COMPAT
|MSG_PEEK
|MSG_OOB
|MSG_WAITALL
|MSG_DONTWAIT
|MSG_NOSIGNAL
)) {
1707 if (flags
& MSG_OOB
)
1708 queue
= &scp
->other_receive_queue
;
1710 if (flags
& MSG_WAITALL
)
1715 * See if there is data ready to read, sleep if there isn't
1723 if (!skb_queue_empty(&scp
->other_receive_queue
)) {
1724 if (!(flags
& MSG_OOB
)) {
1725 msg
->msg_flags
|= MSG_OOB
;
1726 if (!scp
->other_report
) {
1727 scp
->other_report
= 1;
1733 if (scp
->state
!= DN_RUN
)
1736 if (signal_pending(current
)) {
1737 rv
= sock_intr_errno(timeo
);
1741 if (dn_data_ready(sk
, queue
, flags
, target
))
1744 if (flags
& MSG_DONTWAIT
) {
1749 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1750 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1751 sk_wait_event(sk
, &timeo
, dn_data_ready(sk
, queue
, flags
, target
));
1752 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1753 finish_wait(sk_sleep(sk
), &wait
);
1756 skb_queue_walk_safe(queue
, skb
, n
) {
1757 unsigned int chunk
= skb
->len
;
1758 cb
= DN_SKB_CB(skb
);
1760 if ((chunk
+ copied
) > size
)
1761 chunk
= size
- copied
;
1763 if (memcpy_toiovec(msg
->msg_iov
, skb
->data
, chunk
)) {
1769 if (!(flags
& MSG_PEEK
))
1770 skb_pull(skb
, chunk
);
1772 eor
= cb
->nsp_flags
& 0x40;
1774 if (skb
->len
== 0) {
1775 skb_unlink(skb
, queue
);
1778 * N.B. Don't refer to skb or cb after this point
1781 if ((scp
->flowloc_sw
== DN_DONTSEND
) && !dn_congested(sk
)) {
1782 scp
->flowloc_sw
= DN_SEND
;
1783 dn_nsp_send_link(sk
, DN_SEND
, 0);
1788 if (sk
->sk_type
== SOCK_SEQPACKET
)
1790 if (!(flags
& MSG_WAITALL
))
1794 if (flags
& MSG_OOB
)
1797 if (copied
>= target
)
1804 if (eor
&& (sk
->sk_type
== SOCK_SEQPACKET
))
1805 msg
->msg_flags
|= MSG_EOR
;
1809 rv
= (flags
& MSG_PEEK
) ? -sk
->sk_err
: sock_error(sk
);
1811 if ((rv
>= 0) && msg
->msg_name
) {
1812 memcpy(msg
->msg_name
, &scp
->peer
, sizeof(struct sockaddr_dn
));
1813 msg
->msg_namelen
= sizeof(struct sockaddr_dn
);
1822 static inline int dn_queue_too_long(struct dn_scp
*scp
, struct sk_buff_head
*queue
, int flags
)
1824 unsigned char fctype
= scp
->services_rem
& NSP_FC_MASK
;
1825 if (skb_queue_len(queue
) >= scp
->snd_window
)
1827 if (fctype
!= NSP_FC_NONE
) {
1828 if (flags
& MSG_OOB
) {
1829 if (scp
->flowrem_oth
== 0)
1832 if (scp
->flowrem_dat
== 0)
1840 * The DECnet spec requires that the "routing layer" accepts packets which
1841 * are at least 230 bytes in size. This excludes any headers which the NSP
1842 * layer might add, so we always assume that we'll be using the maximal
1843 * length header on data packets. The variation in length is due to the
1844 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1845 * make much practical difference.
1847 unsigned int dn_mss_from_pmtu(struct net_device
*dev
, int mtu
)
1849 unsigned int mss
= 230 - DN_MAX_NSP_DATA_HEADER
;
1851 struct dn_dev
*dn_db
= rcu_dereference_raw(dev
->dn_ptr
);
1852 mtu
-= LL_RESERVED_SPACE(dev
);
1853 if (dn_db
->use_long
)
1857 mtu
-= DN_MAX_NSP_DATA_HEADER
;
1860 * 21 = long header, 16 = guess at MAC header length
1862 mtu
-= (21 + DN_MAX_NSP_DATA_HEADER
+ 16);
1869 static inline unsigned int dn_current_mss(struct sock
*sk
, int flags
)
1871 struct dst_entry
*dst
= __sk_dst_get(sk
);
1872 struct dn_scp
*scp
= DN_SK(sk
);
1873 int mss_now
= min_t(int, scp
->segsize_loc
, scp
->segsize_rem
);
1875 /* Other data messages are limited to 16 bytes per packet */
1876 if (flags
& MSG_OOB
)
1879 /* This works out the maximum size of segment we can send out */
1881 u32 mtu
= dst_mtu(dst
);
1882 mss_now
= min_t(int, dn_mss_from_pmtu(dst
->dev
, mtu
), mss_now
);
1889 * N.B. We get the timeout wrong here, but then we always did get it
1890 * wrong before and this is another step along the road to correcting
1891 * it. It ought to get updated each time we pass through the routine,
1892 * but in practise it probably doesn't matter too much for now.
1894 static inline struct sk_buff
*dn_alloc_send_pskb(struct sock
*sk
,
1895 unsigned long datalen
, int noblock
,
1898 struct sk_buff
*skb
= sock_alloc_send_skb(sk
, datalen
,
1901 skb
->protocol
= htons(ETH_P_DNA_RT
);
1902 skb
->pkt_type
= PACKET_OUTGOING
;
1907 static int dn_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1908 struct msghdr
*msg
, size_t size
)
1910 struct sock
*sk
= sock
->sk
;
1911 struct dn_scp
*scp
= DN_SK(sk
);
1913 struct sk_buff_head
*queue
= &scp
->data_xmit_queue
;
1914 int flags
= msg
->msg_flags
;
1917 int addr_len
= msg
->msg_namelen
;
1918 struct sockaddr_dn
*addr
= (struct sockaddr_dn
*)msg
->msg_name
;
1919 struct sk_buff
*skb
= NULL
;
1920 struct dn_skb_cb
*cb
;
1922 unsigned char fctype
;
1925 if (flags
& ~(MSG_TRYHARD
|MSG_OOB
|MSG_DONTWAIT
|MSG_EOR
|MSG_NOSIGNAL
|MSG_MORE
|MSG_CMSG_COMPAT
))
1928 if (addr_len
&& (addr_len
!= sizeof(struct sockaddr_dn
)))
1932 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1934 * The only difference between stream sockets and sequenced packet
1935 * sockets is that the stream sockets always behave as if MSG_EOR
1938 if (sock
->type
== SOCK_STREAM
) {
1939 if (flags
& MSG_EOR
) {
1947 err
= dn_check_state(sk
, addr
, addr_len
, &timeo
, flags
);
1951 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1953 if (!(flags
& MSG_NOSIGNAL
))
1954 send_sig(SIGPIPE
, current
, 0);
1958 if ((flags
& MSG_TRYHARD
) && sk
->sk_dst_cache
)
1959 dst_negative_advice(sk
);
1961 mss
= scp
->segsize_rem
;
1962 fctype
= scp
->services_rem
& NSP_FC_MASK
;
1964 mss
= dn_current_mss(sk
, flags
);
1966 if (flags
& MSG_OOB
) {
1967 queue
= &scp
->other_xmit_queue
;
1974 scp
->persist_fxn
= dn_nsp_xmit_timeout
;
1976 while(sent
< size
) {
1977 err
= sock_error(sk
);
1981 if (signal_pending(current
)) {
1982 err
= sock_intr_errno(timeo
);
1987 * Calculate size that we wish to send.
1995 * Wait for queue size to go down below the window
1998 if (dn_queue_too_long(scp
, queue
, flags
)) {
2001 if (flags
& MSG_DONTWAIT
) {
2006 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
2007 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
2008 sk_wait_event(sk
, &timeo
,
2009 !dn_queue_too_long(scp
, queue
, flags
));
2010 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
2011 finish_wait(sk_sleep(sk
), &wait
);
2016 * Get a suitably sized skb.
2017 * 64 is a bit of a hack really, but its larger than any
2018 * link-layer headers and has served us well as a good
2019 * guess as to their real length.
2021 skb
= dn_alloc_send_pskb(sk
, len
+ 64 + DN_MAX_NSP_DATA_HEADER
,
2022 flags
& MSG_DONTWAIT
, &err
);
2030 cb
= DN_SKB_CB(skb
);
2032 skb_reserve(skb
, 64 + DN_MAX_NSP_DATA_HEADER
);
2034 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
2039 if (flags
& MSG_OOB
) {
2040 cb
->nsp_flags
= 0x30;
2041 if (fctype
!= NSP_FC_NONE
)
2044 cb
->nsp_flags
= 0x00;
2045 if (scp
->seg_total
== 0)
2046 cb
->nsp_flags
|= 0x20;
2048 scp
->seg_total
+= len
;
2050 if (((sent
+ len
) == size
) && (flags
& MSG_EOR
)) {
2051 cb
->nsp_flags
|= 0x40;
2053 if (fctype
== NSP_FC_SCMC
)
2056 if (fctype
== NSP_FC_SRC
)
2061 dn_nsp_queue_xmit(sk
, skb
, sk
->sk_allocation
, flags
& MSG_OOB
);
2064 scp
->persist
= dn_nsp_persist(sk
);
2073 return sent
? sent
: err
;
2076 err
= sk_stream_error(sk
, flags
, err
);
2081 static int dn_device_event(struct notifier_block
*this, unsigned long event
,
2084 struct net_device
*dev
= (struct net_device
*)ptr
;
2086 if (!net_eq(dev_net(dev
), &init_net
))
2103 static struct notifier_block dn_dev_notifier
= {
2104 .notifier_call
= dn_device_event
,
2107 extern int dn_route_rcv(struct sk_buff
*, struct net_device
*, struct packet_type
*, struct net_device
*);
2109 static struct packet_type dn_dix_packet_type __read_mostly
= {
2110 .type
= cpu_to_be16(ETH_P_DNA_RT
),
2111 .func
= dn_route_rcv
,
2114 #ifdef CONFIG_PROC_FS
2115 struct dn_iter_state
{
2119 static struct sock
*dn_socket_get_first(struct seq_file
*seq
)
2121 struct dn_iter_state
*state
= seq
->private;
2122 struct sock
*n
= NULL
;
2124 for(state
->bucket
= 0;
2125 state
->bucket
< DN_SK_HASH_SIZE
;
2127 n
= sk_head(&dn_sk_hash
[state
->bucket
]);
2135 static struct sock
*dn_socket_get_next(struct seq_file
*seq
,
2138 struct dn_iter_state
*state
= seq
->private;
2144 if (++state
->bucket
>= DN_SK_HASH_SIZE
)
2146 n
= sk_head(&dn_sk_hash
[state
->bucket
]);
2152 static struct sock
*socket_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2154 struct sock
*sk
= dn_socket_get_first(seq
);
2157 while(*pos
&& (sk
= dn_socket_get_next(seq
, sk
)))
2160 return *pos
? NULL
: sk
;
2163 static void *dn_socket_get_idx(struct seq_file
*seq
, loff_t pos
)
2166 read_lock_bh(&dn_hash_lock
);
2167 rc
= socket_get_idx(seq
, &pos
);
2169 read_unlock_bh(&dn_hash_lock
);
2174 static void *dn_socket_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2176 return *pos
? dn_socket_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2179 static void *dn_socket_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2183 if (v
== SEQ_START_TOKEN
) {
2184 rc
= dn_socket_get_idx(seq
, 0);
2188 rc
= dn_socket_get_next(seq
, v
);
2191 read_unlock_bh(&dn_hash_lock
);
2197 static void dn_socket_seq_stop(struct seq_file
*seq
, void *v
)
2199 if (v
&& v
!= SEQ_START_TOKEN
)
2200 read_unlock_bh(&dn_hash_lock
);
2203 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2205 static void dn_printable_object(struct sockaddr_dn
*dn
, unsigned char *buf
)
2209 switch (le16_to_cpu(dn
->sdn_objnamel
)) {
2211 sprintf(buf
, "%d", dn
->sdn_objnum
);
2214 for (i
= 0; i
< le16_to_cpu(dn
->sdn_objnamel
); i
++) {
2215 buf
[i
] = dn
->sdn_objname
[i
];
2216 if (IS_NOT_PRINTABLE(buf
[i
]))
2223 static char *dn_state2asc(unsigned char state
)
2263 static inline void dn_socket_format_entry(struct seq_file
*seq
, struct sock
*sk
)
2265 struct dn_scp
*scp
= DN_SK(sk
);
2266 char buf1
[DN_ASCBUF_LEN
];
2267 char buf2
[DN_ASCBUF_LEN
];
2268 char local_object
[DN_MAXOBJL
+3];
2269 char remote_object
[DN_MAXOBJL
+3];
2271 dn_printable_object(&scp
->addr
, local_object
);
2272 dn_printable_object(&scp
->peer
, remote_object
);
2275 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2276 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2277 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp
->addr
)), buf1
),
2285 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp
->peer
)), buf2
),
2293 dn_state2asc(scp
->state
),
2294 ((scp
->accept_mode
== ACC_IMMED
) ? "IMMED" : "DEFER"));
2297 static int dn_socket_seq_show(struct seq_file
*seq
, void *v
)
2299 if (v
== SEQ_START_TOKEN
) {
2300 seq_puts(seq
, "Local Remote\n");
2302 dn_socket_format_entry(seq
, v
);
2307 static const struct seq_operations dn_socket_seq_ops
= {
2308 .start
= dn_socket_seq_start
,
2309 .next
= dn_socket_seq_next
,
2310 .stop
= dn_socket_seq_stop
,
2311 .show
= dn_socket_seq_show
,
2314 static int dn_socket_seq_open(struct inode
*inode
, struct file
*file
)
2316 return seq_open_private(file
, &dn_socket_seq_ops
,
2317 sizeof(struct dn_iter_state
));
2320 static const struct file_operations dn_socket_seq_fops
= {
2321 .owner
= THIS_MODULE
,
2322 .open
= dn_socket_seq_open
,
2324 .llseek
= seq_lseek
,
2325 .release
= seq_release_private
,
2329 static const struct net_proto_family dn_family_ops
= {
2330 .family
= AF_DECnet
,
2331 .create
= dn_create
,
2332 .owner
= THIS_MODULE
,
2335 static const struct proto_ops dn_proto_ops
= {
2336 .family
= AF_DECnet
,
2337 .owner
= THIS_MODULE
,
2338 .release
= dn_release
,
2340 .connect
= dn_connect
,
2341 .socketpair
= sock_no_socketpair
,
2342 .accept
= dn_accept
,
2343 .getname
= dn_getname
,
2346 .listen
= dn_listen
,
2347 .shutdown
= dn_shutdown
,
2348 .setsockopt
= dn_setsockopt
,
2349 .getsockopt
= dn_getsockopt
,
2350 .sendmsg
= dn_sendmsg
,
2351 .recvmsg
= dn_recvmsg
,
2352 .mmap
= sock_no_mmap
,
2353 .sendpage
= sock_no_sendpage
,
2356 void dn_register_sysctl(void);
2357 void dn_unregister_sysctl(void);
2359 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2360 MODULE_AUTHOR("Linux DECnet Project Team");
2361 MODULE_LICENSE("GPL");
2362 MODULE_ALIAS_NETPROTO(PF_DECnet
);
2364 static char banner
[] __initdata
= KERN_INFO
"NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2366 static int __init
decnet_init(void)
2372 rc
= proto_register(&dn_proto
, 1);
2381 sock_register(&dn_family_ops
);
2382 dev_add_pack(&dn_dix_packet_type
);
2383 register_netdevice_notifier(&dn_dev_notifier
);
2385 proc_net_fops_create(&init_net
, "decnet", S_IRUGO
, &dn_socket_seq_fops
);
2386 dn_register_sysctl();
2391 module_init(decnet_init
);
2394 * Prevent DECnet module unloading until its fixed properly.
2395 * Requires an audit of the code to check for memory leaks and
2396 * initialisation problems etc.
2399 static void __exit
decnet_exit(void)
2401 sock_unregister(AF_DECnet
);
2402 rtnl_unregister_all(PF_DECnet
);
2403 dev_remove_pack(&dn_dix_packet_type
);
2405 dn_unregister_sysctl();
2407 unregister_netdevice_notifier(&dn_dev_notifier
);
2414 proc_net_remove(&init_net
, "decnet");
2416 proto_unregister(&dn_proto
);
2418 rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */
2420 module_exit(decnet_exit
);