2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor Centralised disconnect handling.
19 * New timer architecture.
20 * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
21 * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
22 * facilities negotiation and increased
23 * the throughput upper limit.
24 * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
25 * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
26 * Fixed x25_output() related skb leakage.
27 * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
28 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
29 * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
30 * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
31 * x25_proc.c, using seq_file
32 * 2005-04-02 Shaun Pereira Selective sub address matching
34 * 2005-04-15 Shaun Pereira Fast select with no restriction on
38 #include <linux/module.h>
39 #include <linux/capability.h>
40 #include <linux/errno.h>
41 #include <linux/kernel.h>
42 #include <linux/sched.h>
43 #include <linux/smp_lock.h>
44 #include <linux/timer.h>
45 #include <linux/string.h>
46 #include <linux/net.h>
47 #include <linux/netdevice.h>
48 #include <linux/if_arp.h>
49 #include <linux/skbuff.h>
51 #include <net/tcp_states.h>
52 #include <asm/uaccess.h>
53 #include <linux/fcntl.h>
54 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
55 #include <linux/notifier.h>
56 #include <linux/init.h>
57 #include <linux/compat.h>
58 #include <linux/ctype.h>
61 #include <net/compat.h>
63 int sysctl_x25_restart_request_timeout
= X25_DEFAULT_T20
;
64 int sysctl_x25_call_request_timeout
= X25_DEFAULT_T21
;
65 int sysctl_x25_reset_request_timeout
= X25_DEFAULT_T22
;
66 int sysctl_x25_clear_request_timeout
= X25_DEFAULT_T23
;
67 int sysctl_x25_ack_holdback_timeout
= X25_DEFAULT_T2
;
68 int sysctl_x25_forward
= 0;
71 DEFINE_RWLOCK(x25_list_lock
);
73 static const struct proto_ops x25_proto_ops
;
75 static struct x25_address null_x25_address
= {" "};
78 struct compat_x25_subscrip_struct
{
79 char device
[200-sizeof(compat_ulong_t
)];
80 compat_ulong_t global_facil_mask
;
81 compat_uint_t extended
;
85 int x25_addr_ntoa(unsigned char *p
, struct x25_address
*called_addr
,
86 struct x25_address
*calling_addr
)
88 unsigned int called_len
, calling_len
;
89 char *called
, *calling
;
92 called_len
= (*p
>> 0) & 0x0F;
93 calling_len
= (*p
>> 4) & 0x0F;
95 called
= called_addr
->x25_addr
;
96 calling
= calling_addr
->x25_addr
;
99 for (i
= 0; i
< (called_len
+ calling_len
); i
++) {
100 if (i
< called_len
) {
102 *called
++ = ((*p
>> 0) & 0x0F) + '0';
105 *called
++ = ((*p
>> 4) & 0x0F) + '0';
109 *calling
++ = ((*p
>> 0) & 0x0F) + '0';
112 *calling
++ = ((*p
>> 4) & 0x0F) + '0';
117 *called
= *calling
= '\0';
119 return 1 + (called_len
+ calling_len
+ 1) / 2;
122 int x25_addr_aton(unsigned char *p
, struct x25_address
*called_addr
,
123 struct x25_address
*calling_addr
)
125 unsigned int called_len
, calling_len
;
126 char *called
, *calling
;
129 called
= called_addr
->x25_addr
;
130 calling
= calling_addr
->x25_addr
;
132 called_len
= strlen(called
);
133 calling_len
= strlen(calling
);
135 *p
++ = (calling_len
<< 4) | (called_len
<< 0);
137 for (i
= 0; i
< (called_len
+ calling_len
); i
++) {
138 if (i
< called_len
) {
140 *p
|= (*called
++ - '0') << 0;
144 *p
|= (*called
++ - '0') << 4;
148 *p
|= (*calling
++ - '0') << 0;
152 *p
|= (*calling
++ - '0') << 4;
157 return 1 + (called_len
+ calling_len
+ 1) / 2;
161 * Socket removal during an interrupt is now safe.
163 static void x25_remove_socket(struct sock
*sk
)
165 write_lock_bh(&x25_list_lock
);
166 sk_del_node_init(sk
);
167 write_unlock_bh(&x25_list_lock
);
171 * Kill all bound sockets on a dropped device.
173 static void x25_kill_by_device(struct net_device
*dev
)
176 struct hlist_node
*node
;
178 write_lock_bh(&x25_list_lock
);
180 sk_for_each(s
, node
, &x25_list
)
181 if (x25_sk(s
)->neighbour
&& x25_sk(s
)->neighbour
->dev
== dev
)
182 x25_disconnect(s
, ENETUNREACH
, 0, 0);
184 write_unlock_bh(&x25_list_lock
);
188 * Handle device status changes.
190 static int x25_device_event(struct notifier_block
*this, unsigned long event
,
193 struct net_device
*dev
= ptr
;
194 struct x25_neigh
*nb
;
196 if (!net_eq(dev_net(dev
), &init_net
))
199 if (dev
->type
== ARPHRD_X25
200 #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
201 || dev
->type
== ARPHRD_ETHER
206 x25_link_device_up(dev
);
208 case NETDEV_GOING_DOWN
:
209 nb
= x25_get_neigh(dev
);
211 x25_terminate_link(nb
);
216 x25_kill_by_device(dev
);
217 x25_route_device_down(dev
);
218 x25_link_device_down(dev
);
227 * Add a socket to the bound sockets list.
229 static void x25_insert_socket(struct sock
*sk
)
231 write_lock_bh(&x25_list_lock
);
232 sk_add_node(sk
, &x25_list
);
233 write_unlock_bh(&x25_list_lock
);
237 * Find a socket that wants to accept the Call Request we just
238 * received. Check the full list for an address/cud match.
239 * If no cuds match return the next_best thing, an address match.
240 * Note: if a listening socket has cud set it must only get calls
243 static struct sock
*x25_find_listener(struct x25_address
*addr
,
247 struct sock
*next_best
;
248 struct hlist_node
*node
;
250 read_lock_bh(&x25_list_lock
);
253 sk_for_each(s
, node
, &x25_list
)
254 if ((!strcmp(addr
->x25_addr
,
255 x25_sk(s
)->source_addr
.x25_addr
) ||
256 !strcmp(addr
->x25_addr
,
257 null_x25_address
.x25_addr
)) &&
258 s
->sk_state
== TCP_LISTEN
) {
260 * Found a listening socket, now check the incoming
261 * call user data vs this sockets call user data
263 if(skb
->len
> 0 && x25_sk(s
)->cudmatchlength
> 0) {
264 if((memcmp(x25_sk(s
)->calluserdata
.cuddata
,
266 x25_sk(s
)->cudmatchlength
)) == 0) {
280 read_unlock_bh(&x25_list_lock
);
285 * Find a connected X.25 socket given my LCI and neighbour.
287 static struct sock
*__x25_find_socket(unsigned int lci
, struct x25_neigh
*nb
)
290 struct hlist_node
*node
;
292 sk_for_each(s
, node
, &x25_list
)
293 if (x25_sk(s
)->lci
== lci
&& x25_sk(s
)->neighbour
== nb
) {
302 struct sock
*x25_find_socket(unsigned int lci
, struct x25_neigh
*nb
)
306 read_lock_bh(&x25_list_lock
);
307 s
= __x25_find_socket(lci
, nb
);
308 read_unlock_bh(&x25_list_lock
);
313 * Find a unique LCI for a given device.
315 static unsigned int x25_new_lci(struct x25_neigh
*nb
)
317 unsigned int lci
= 1;
320 read_lock_bh(&x25_list_lock
);
322 while ((sk
= __x25_find_socket(lci
, nb
)) != NULL
) {
330 read_unlock_bh(&x25_list_lock
);
337 static void __x25_destroy_socket(struct sock
*);
340 * handler for deferred kills.
342 static void x25_destroy_timer(unsigned long data
)
344 x25_destroy_socket_from_timer((struct sock
*)data
);
348 * This is called from user mode and the timers. Thus it protects itself
349 * against interrupt users but doesn't worry about being called during
350 * work. Once it is removed from the queue no interrupt or bottom half
351 * will touch it and we are (fairly 8-) ) safe.
352 * Not static as it's used by the timer
354 static void __x25_destroy_socket(struct sock
*sk
)
358 x25_stop_heartbeat(sk
);
361 x25_remove_socket(sk
);
362 x25_clear_queues(sk
); /* Flush the queues */
364 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
365 if (skb
->sk
!= sk
) { /* A pending connection */
367 * Queue the unaccepted socket for death
369 sock_set_flag(skb
->sk
, SOCK_DEAD
);
370 x25_start_heartbeat(skb
->sk
);
371 x25_sk(skb
->sk
)->state
= X25_STATE_0
;
377 if (sk_has_allocations(sk
)) {
378 /* Defer: outstanding buffers */
379 sk
->sk_timer
.expires
= jiffies
+ 10 * HZ
;
380 sk
->sk_timer
.function
= x25_destroy_timer
;
381 sk
->sk_timer
.data
= (unsigned long)sk
;
382 add_timer(&sk
->sk_timer
);
384 /* drop last reference so sock_put will free */
389 void x25_destroy_socket_from_timer(struct sock
*sk
)
393 __x25_destroy_socket(sk
);
398 static void x25_destroy_socket(struct sock
*sk
)
402 __x25_destroy_socket(sk
);
408 * Handling for system calls applied via the various interfaces to a
409 * X.25 socket object.
412 static int x25_setsockopt(struct socket
*sock
, int level
, int optname
,
413 char __user
*optval
, unsigned int optlen
)
416 struct sock
*sk
= sock
->sk
;
417 int rc
= -ENOPROTOOPT
;
420 if (level
!= SOL_X25
|| optname
!= X25_QBITINCL
)
424 if (optlen
< sizeof(int))
428 if (get_user(opt
, (int __user
*)optval
))
431 x25_sk(sk
)->qbitincl
= !!opt
;
438 static int x25_getsockopt(struct socket
*sock
, int level
, int optname
,
439 char __user
*optval
, int __user
*optlen
)
441 struct sock
*sk
= sock
->sk
;
442 int val
, len
, rc
= -ENOPROTOOPT
;
445 if (level
!= SOL_X25
|| optname
!= X25_QBITINCL
)
449 if (get_user(len
, optlen
))
452 len
= min_t(unsigned int, len
, sizeof(int));
459 if (put_user(len
, optlen
))
462 val
= x25_sk(sk
)->qbitincl
;
463 rc
= copy_to_user(optval
, &val
, len
) ? -EFAULT
: 0;
469 static int x25_listen(struct socket
*sock
, int backlog
)
471 struct sock
*sk
= sock
->sk
;
472 int rc
= -EOPNOTSUPP
;
475 if (sk
->sk_state
!= TCP_LISTEN
) {
476 memset(&x25_sk(sk
)->dest_addr
, 0, X25_ADDR_LEN
);
477 sk
->sk_max_ack_backlog
= backlog
;
478 sk
->sk_state
= TCP_LISTEN
;
486 static struct proto x25_proto
= {
488 .owner
= THIS_MODULE
,
489 .obj_size
= sizeof(struct x25_sock
),
492 static struct sock
*x25_alloc_socket(struct net
*net
)
494 struct x25_sock
*x25
;
495 struct sock
*sk
= sk_alloc(net
, AF_X25
, GFP_ATOMIC
, &x25_proto
);
500 sock_init_data(NULL
, sk
);
503 skb_queue_head_init(&x25
->ack_queue
);
504 skb_queue_head_init(&x25
->fragment_queue
);
505 skb_queue_head_init(&x25
->interrupt_in_queue
);
506 skb_queue_head_init(&x25
->interrupt_out_queue
);
511 static int x25_create(struct net
*net
, struct socket
*sock
, int protocol
,
515 struct x25_sock
*x25
;
516 int rc
= -EAFNOSUPPORT
;
518 if (!net_eq(net
, &init_net
))
521 rc
= -ESOCKTNOSUPPORT
;
522 if (sock
->type
!= SOCK_SEQPACKET
)
530 if ((sk
= x25_alloc_socket(net
)) == NULL
)
535 sock_init_data(sock
, sk
);
539 sock
->ops
= &x25_proto_ops
;
540 sk
->sk_protocol
= protocol
;
541 sk
->sk_backlog_rcv
= x25_backlog_rcv
;
543 x25
->t21
= sysctl_x25_call_request_timeout
;
544 x25
->t22
= sysctl_x25_reset_request_timeout
;
545 x25
->t23
= sysctl_x25_clear_request_timeout
;
546 x25
->t2
= sysctl_x25_ack_holdback_timeout
;
547 x25
->state
= X25_STATE_0
;
548 x25
->cudmatchlength
= 0;
549 x25
->accptapprv
= X25_DENY_ACCPT_APPRV
; /* normally no cud */
552 x25
->facilities
.winsize_in
= X25_DEFAULT_WINDOW_SIZE
;
553 x25
->facilities
.winsize_out
= X25_DEFAULT_WINDOW_SIZE
;
554 x25
->facilities
.pacsize_in
= X25_DEFAULT_PACKET_SIZE
;
555 x25
->facilities
.pacsize_out
= X25_DEFAULT_PACKET_SIZE
;
556 x25
->facilities
.throughput
= X25_DEFAULT_THROUGHPUT
;
557 x25
->facilities
.reverse
= X25_DEFAULT_REVERSE
;
558 x25
->dte_facilities
.calling_len
= 0;
559 x25
->dte_facilities
.called_len
= 0;
560 memset(x25
->dte_facilities
.called_ae
, '\0',
561 sizeof(x25
->dte_facilities
.called_ae
));
562 memset(x25
->dte_facilities
.calling_ae
, '\0',
563 sizeof(x25
->dte_facilities
.calling_ae
));
570 static struct sock
*x25_make_new(struct sock
*osk
)
572 struct sock
*sk
= NULL
;
573 struct x25_sock
*x25
, *ox25
;
575 if (osk
->sk_type
!= SOCK_SEQPACKET
)
578 if ((sk
= x25_alloc_socket(sock_net(osk
))) == NULL
)
583 sk
->sk_type
= osk
->sk_type
;
584 sk
->sk_priority
= osk
->sk_priority
;
585 sk
->sk_protocol
= osk
->sk_protocol
;
586 sk
->sk_rcvbuf
= osk
->sk_rcvbuf
;
587 sk
->sk_sndbuf
= osk
->sk_sndbuf
;
588 sk
->sk_state
= TCP_ESTABLISHED
;
589 sk
->sk_backlog_rcv
= osk
->sk_backlog_rcv
;
590 sock_copy_flags(sk
, osk
);
593 x25
->t21
= ox25
->t21
;
594 x25
->t22
= ox25
->t22
;
595 x25
->t23
= ox25
->t23
;
597 x25
->facilities
= ox25
->facilities
;
598 x25
->qbitincl
= ox25
->qbitincl
;
599 x25
->dte_facilities
= ox25
->dte_facilities
;
600 x25
->cudmatchlength
= ox25
->cudmatchlength
;
601 x25
->accptapprv
= ox25
->accptapprv
;
608 static int x25_release(struct socket
*sock
)
610 struct sock
*sk
= sock
->sk
;
611 struct x25_sock
*x25
;
619 switch (x25
->state
) {
623 x25_disconnect(sk
, 0, 0, 0);
624 x25_destroy_socket(sk
);
630 x25_clear_queues(sk
);
631 x25_write_internal(sk
, X25_CLEAR_REQUEST
);
632 x25_start_t23timer(sk
);
633 x25
->state
= X25_STATE_2
;
634 sk
->sk_state
= TCP_CLOSE
;
635 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
636 sk
->sk_state_change(sk
);
637 sock_set_flag(sk
, SOCK_DEAD
);
638 sock_set_flag(sk
, SOCK_DESTROY
);
648 static int x25_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
650 struct sock
*sk
= sock
->sk
;
651 struct sockaddr_x25
*addr
= (struct sockaddr_x25
*)uaddr
;
655 if (!sock_flag(sk
, SOCK_ZAPPED
) ||
656 addr_len
!= sizeof(struct sockaddr_x25
) ||
657 addr
->sx25_family
!= AF_X25
) {
662 len
= strlen(addr
->sx25_addr
.x25_addr
);
663 for (i
= 0; i
< len
; i
++) {
664 if (!isdigit(addr
->sx25_addr
.x25_addr
[i
])) {
670 x25_sk(sk
)->source_addr
= addr
->sx25_addr
;
671 x25_insert_socket(sk
);
672 sock_reset_flag(sk
, SOCK_ZAPPED
);
673 SOCK_DEBUG(sk
, "x25_bind: socket is bound\n");
679 static int x25_wait_for_connection_establishment(struct sock
*sk
)
681 DECLARE_WAITQUEUE(wait
, current
);
684 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
686 __set_current_state(TASK_INTERRUPTIBLE
);
688 if (signal_pending(current
))
692 sk
->sk_socket
->state
= SS_UNCONNECTED
;
696 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
703 __set_current_state(TASK_RUNNING
);
704 remove_wait_queue(sk
->sk_sleep
, &wait
);
708 static int x25_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
709 int addr_len
, int flags
)
711 struct sock
*sk
= sock
->sk
;
712 struct x25_sock
*x25
= x25_sk(sk
);
713 struct sockaddr_x25
*addr
= (struct sockaddr_x25
*)uaddr
;
714 struct x25_route
*rt
;
719 if (sk
->sk_state
== TCP_ESTABLISHED
&& sock
->state
== SS_CONNECTING
) {
720 sock
->state
= SS_CONNECTED
;
721 goto out
; /* Connect completed during a ERESTARTSYS event */
725 if (sk
->sk_state
== TCP_CLOSE
&& sock
->state
== SS_CONNECTING
) {
726 sock
->state
= SS_UNCONNECTED
;
730 rc
= -EISCONN
; /* No reconnect on a seqpacket socket */
731 if (sk
->sk_state
== TCP_ESTABLISHED
)
734 sk
->sk_state
= TCP_CLOSE
;
735 sock
->state
= SS_UNCONNECTED
;
738 if (addr_len
!= sizeof(struct sockaddr_x25
) ||
739 addr
->sx25_family
!= AF_X25
)
743 rt
= x25_get_route(&addr
->sx25_addr
);
747 x25
->neighbour
= x25_get_neigh(rt
->dev
);
751 x25_limit_facilities(&x25
->facilities
, x25
->neighbour
);
753 x25
->lci
= x25_new_lci(x25
->neighbour
);
758 if (sock_flag(sk
, SOCK_ZAPPED
)) /* Must bind first - autobinding does not work */
761 if (!strcmp(x25
->source_addr
.x25_addr
, null_x25_address
.x25_addr
))
762 memset(&x25
->source_addr
, '\0', X25_ADDR_LEN
);
764 x25
->dest_addr
= addr
->sx25_addr
;
766 /* Move to connecting socket, start sending Connect Requests */
767 sock
->state
= SS_CONNECTING
;
768 sk
->sk_state
= TCP_SYN_SENT
;
770 x25
->state
= X25_STATE_1
;
772 x25_write_internal(sk
, X25_CALL_REQUEST
);
774 x25_start_heartbeat(sk
);
775 x25_start_t21timer(sk
);
779 if (sk
->sk_state
!= TCP_ESTABLISHED
&& (flags
& O_NONBLOCK
))
782 rc
= x25_wait_for_connection_establishment(sk
);
786 sock
->state
= SS_CONNECTED
;
790 x25_neigh_put(x25
->neighbour
);
799 static int x25_wait_for_data(struct sock
*sk
, long timeout
)
801 DECLARE_WAITQUEUE(wait
, current
);
804 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
806 __set_current_state(TASK_INTERRUPTIBLE
);
807 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
810 if (signal_pending(current
))
816 if (skb_queue_empty(&sk
->sk_receive_queue
)) {
818 timeout
= schedule_timeout(timeout
);
823 __set_current_state(TASK_RUNNING
);
824 remove_wait_queue(sk
->sk_sleep
, &wait
);
828 static int x25_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
830 struct sock
*sk
= sock
->sk
;
836 if (!sk
|| sk
->sk_state
!= TCP_LISTEN
)
840 if (sk
->sk_type
!= SOCK_SEQPACKET
)
844 rc
= x25_wait_for_data(sk
, sk
->sk_rcvtimeo
);
847 skb
= skb_dequeue(&sk
->sk_receive_queue
);
852 sock_graft(newsk
, newsock
);
854 /* Now attach up the new socket */
857 sk
->sk_ack_backlog
--;
858 newsock
->state
= SS_CONNECTED
;
867 static int x25_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
868 int *uaddr_len
, int peer
)
870 struct sockaddr_x25
*sx25
= (struct sockaddr_x25
*)uaddr
;
871 struct sock
*sk
= sock
->sk
;
872 struct x25_sock
*x25
= x25_sk(sk
);
877 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
881 sx25
->sx25_addr
= x25
->dest_addr
;
883 sx25
->sx25_addr
= x25
->source_addr
;
885 sx25
->sx25_family
= AF_X25
;
886 *uaddr_len
= sizeof(*sx25
);
893 static unsigned int x25_datagram_poll(struct file
*file
, struct socket
*sock
,
899 rc
= datagram_poll(file
, sock
, wait
);
905 int x25_rx_call_request(struct sk_buff
*skb
, struct x25_neigh
*nb
,
910 struct x25_sock
*makex25
;
911 struct x25_address source_addr
, dest_addr
;
912 struct x25_facilities facilities
;
913 struct x25_dte_facilities dte_facilities
;
914 int len
, addr_len
, rc
;
917 * Remove the LCI and frame type.
919 skb_pull(skb
, X25_STD_MIN_LEN
);
922 * Extract the X.25 addresses and convert them to ASCII strings,
925 addr_len
= x25_addr_ntoa(skb
->data
, &source_addr
, &dest_addr
);
926 skb_pull(skb
, addr_len
);
929 * Get the length of the facilities, skip past them for the moment
930 * get the call user data because this is needed to determine
931 * the correct listener
933 len
= skb
->data
[0] + 1;
937 * Find a listener for the particular address/cud pair.
939 sk
= x25_find_listener(&source_addr
,skb
);
942 if (sk
!= NULL
&& sk_acceptq_is_full(sk
)) {
947 * We dont have any listeners for this incoming call.
951 skb_push(skb
, addr_len
+ X25_STD_MIN_LEN
);
952 if (sysctl_x25_forward
&&
953 x25_forward_call(&dest_addr
, nb
, skb
, lci
) > 0)
955 /* Call was forwarded, dont process it any more */
960 /* No listeners, can't forward, clear the call */
961 goto out_clear_request
;
966 * Try to reach a compromise on the requested facilities.
968 len
= x25_negotiate_facilities(skb
, sk
, &facilities
, &dte_facilities
);
973 * current neighbour/link might impose additional limits
974 * on certain facilties
977 x25_limit_facilities(&facilities
, nb
);
980 * Try to create a new socket.
982 make
= x25_make_new(sk
);
987 * Remove the facilities
992 make
->sk_state
= TCP_ESTABLISHED
;
994 makex25
= x25_sk(make
);
996 makex25
->dest_addr
= dest_addr
;
997 makex25
->source_addr
= source_addr
;
998 makex25
->neighbour
= nb
;
999 makex25
->facilities
= facilities
;
1000 makex25
->dte_facilities
= dte_facilities
;
1001 makex25
->vc_facil_mask
= x25_sk(sk
)->vc_facil_mask
;
1002 /* ensure no reverse facil on accept */
1003 makex25
->vc_facil_mask
&= ~X25_MASK_REVERSE
;
1004 /* ensure no calling address extension on accept */
1005 makex25
->vc_facil_mask
&= ~X25_MASK_CALLING_AE
;
1006 makex25
->cudmatchlength
= x25_sk(sk
)->cudmatchlength
;
1008 /* Normally all calls are accepted immediatly */
1009 if(makex25
->accptapprv
& X25_DENY_ACCPT_APPRV
) {
1010 x25_write_internal(make
, X25_CALL_ACCEPTED
);
1011 makex25
->state
= X25_STATE_3
;
1015 * Incoming Call User Data.
1017 skb_copy_from_linear_data(skb
, makex25
->calluserdata
.cuddata
, skb
->len
);
1018 makex25
->calluserdata
.cudlength
= skb
->len
;
1020 sk
->sk_ack_backlog
++;
1022 x25_insert_socket(make
);
1024 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1026 x25_start_heartbeat(make
);
1028 if (!sock_flag(sk
, SOCK_DEAD
))
1029 sk
->sk_data_ready(sk
, skb
->len
);
1038 x25_transmit_clear_request(nb
, lci
, 0x01);
1042 static int x25_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1043 struct msghdr
*msg
, size_t len
)
1045 struct sock
*sk
= sock
->sk
;
1046 struct x25_sock
*x25
= x25_sk(sk
);
1047 struct sockaddr_x25
*usx25
= (struct sockaddr_x25
*)msg
->msg_name
;
1048 struct sockaddr_x25 sx25
;
1049 struct sk_buff
*skb
;
1050 unsigned char *asmptr
;
1051 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
1053 int qbit
= 0, rc
= -EINVAL
;
1056 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_OOB
|MSG_EOR
|MSG_CMSG_COMPAT
))
1059 /* we currently don't support segmented records at the user interface */
1060 if (!(msg
->msg_flags
& (MSG_EOR
|MSG_OOB
)))
1063 rc
= -EADDRNOTAVAIL
;
1064 if (sock_flag(sk
, SOCK_ZAPPED
))
1068 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1069 send_sig(SIGPIPE
, current
, 0);
1074 if (!x25
->neighbour
)
1079 if (msg
->msg_namelen
< sizeof(sx25
))
1081 memcpy(&sx25
, usx25
, sizeof(sx25
));
1083 if (strcmp(x25
->dest_addr
.x25_addr
, sx25
.sx25_addr
.x25_addr
))
1086 if (sx25
.sx25_family
!= AF_X25
)
1090 * FIXME 1003.1g - if the socket is like this because
1091 * it has become closed (not started closed) we ought
1092 * to SIGPIPE, EPIPE;
1095 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1098 sx25
.sx25_family
= AF_X25
;
1099 sx25
.sx25_addr
= x25
->dest_addr
;
1102 /* Sanity check the packet size */
1108 SOCK_DEBUG(sk
, "x25_sendmsg: sendto: Addresses built.\n");
1110 /* Build a packet */
1111 SOCK_DEBUG(sk
, "x25_sendmsg: sendto: building packet.\n");
1113 if ((msg
->msg_flags
& MSG_OOB
) && len
> 32)
1116 size
= len
+ X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
;
1118 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
1121 X25_SKB_CB(skb
)->flags
= msg
->msg_flags
;
1123 skb_reserve(skb
, X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
);
1126 * Put the data on the end
1128 SOCK_DEBUG(sk
, "x25_sendmsg: Copying user data\n");
1130 skb_reset_transport_header(skb
);
1133 rc
= memcpy_fromiovec(skb_transport_header(skb
), msg
->msg_iov
, len
);
1138 * If the Q BIT Include socket option is in force, the first
1139 * byte of the user data is the logical value of the Q Bit.
1141 if (x25
->qbitincl
) {
1142 qbit
= skb
->data
[0];
1147 * Push down the X.25 header
1149 SOCK_DEBUG(sk
, "x25_sendmsg: Building X.25 Header.\n");
1151 if (msg
->msg_flags
& MSG_OOB
) {
1152 if (x25
->neighbour
->extended
) {
1153 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1154 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_EXTSEQ
;
1155 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1156 *asmptr
++ = X25_INTERRUPT
;
1158 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1159 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_STDSEQ
;
1160 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1161 *asmptr
++ = X25_INTERRUPT
;
1164 if (x25
->neighbour
->extended
) {
1165 /* Build an Extended X.25 header */
1166 asmptr
= skb_push(skb
, X25_EXT_MIN_LEN
);
1167 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_EXTSEQ
;
1168 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1169 *asmptr
++ = X25_DATA
;
1170 *asmptr
++ = X25_DATA
;
1172 /* Build an Standard X.25 header */
1173 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1174 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_STDSEQ
;
1175 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1176 *asmptr
++ = X25_DATA
;
1180 skb
->data
[0] |= X25_Q_BIT
;
1183 SOCK_DEBUG(sk
, "x25_sendmsg: Built header.\n");
1184 SOCK_DEBUG(sk
, "x25_sendmsg: Transmitting buffer\n");
1187 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1190 if (msg
->msg_flags
& MSG_OOB
)
1191 skb_queue_tail(&x25
->interrupt_out_queue
, skb
);
1193 rc
= x25_output(sk
, skb
);
1197 else if (x25
->qbitincl
)
1202 * lock_sock() is currently only used to serialize this x25_kick()
1203 * against input-driven x25_kick() calls. It currently only blocks
1204 * incoming packets for this socket and does not protect against
1205 * any other socket state changes and is not called from anywhere
1206 * else. As x25_kick() cannot block and as long as all socket
1207 * operations are BKL-wrapped, we don't need take to care about
1208 * purging the backlog queue in x25_release().
1210 * Using lock_sock() to protect all socket operations entirely
1211 * (and making the whole x25 stack SMP aware) unfortunately would
1212 * require major changes to {send,recv}msg and skb allocation methods.
1228 static int x25_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1229 struct msghdr
*msg
, size_t size
,
1232 struct sock
*sk
= sock
->sk
;
1233 struct x25_sock
*x25
= x25_sk(sk
);
1234 struct sockaddr_x25
*sx25
= (struct sockaddr_x25
*)msg
->msg_name
;
1237 struct sk_buff
*skb
;
1238 unsigned char *asmptr
;
1243 * This works for seqpacket too. The receiver has ordered the queue for
1244 * us! We do one quick check first though
1246 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1249 if (flags
& MSG_OOB
) {
1251 if (sock_flag(sk
, SOCK_URGINLINE
) ||
1252 !skb_peek(&x25
->interrupt_in_queue
))
1255 skb
= skb_dequeue(&x25
->interrupt_in_queue
);
1257 skb_pull(skb
, X25_STD_MIN_LEN
);
1260 * No Q bit information on Interrupt data.
1262 if (x25
->qbitincl
) {
1263 asmptr
= skb_push(skb
, 1);
1267 msg
->msg_flags
|= MSG_OOB
;
1269 /* Now we can treat all alike */
1270 skb
= skb_recv_datagram(sk
, flags
& ~MSG_DONTWAIT
,
1271 flags
& MSG_DONTWAIT
, &rc
);
1275 qbit
= (skb
->data
[0] & X25_Q_BIT
) == X25_Q_BIT
;
1277 skb_pull(skb
, x25
->neighbour
->extended
?
1278 X25_EXT_MIN_LEN
: X25_STD_MIN_LEN
);
1280 if (x25
->qbitincl
) {
1281 asmptr
= skb_push(skb
, 1);
1286 skb_reset_transport_header(skb
);
1289 if (copied
> size
) {
1291 msg
->msg_flags
|= MSG_TRUNC
;
1294 /* Currently, each datagram always contains a complete record */
1295 msg
->msg_flags
|= MSG_EOR
;
1297 rc
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1299 goto out_free_dgram
;
1302 sx25
->sx25_family
= AF_X25
;
1303 sx25
->sx25_addr
= x25
->dest_addr
;
1306 msg
->msg_namelen
= sizeof(struct sockaddr_x25
);
1313 skb_free_datagram(sk
, skb
);
1320 static int x25_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1322 struct sock
*sk
= sock
->sk
;
1323 struct x25_sock
*x25
= x25_sk(sk
);
1324 void __user
*argp
= (void __user
*)arg
;
1330 int amount
= sk
->sk_sndbuf
- sk_wmem_alloc_get(sk
);
1334 rc
= put_user(amount
, (unsigned int __user
*)argp
);
1339 struct sk_buff
*skb
;
1342 * These two are safe on a single CPU system as
1343 * only user tasks fiddle here
1345 if ((skb
= skb_peek(&sk
->sk_receive_queue
)) != NULL
)
1347 rc
= put_user(amount
, (unsigned int __user
*)argp
);
1354 rc
= sock_get_timestamp(sk
,
1355 (struct timeval __user
*)argp
);
1360 rc
= sock_get_timestampns(sk
,
1361 (struct timespec __user
*)argp
);
1365 case SIOCGIFDSTADDR
:
1366 case SIOCSIFDSTADDR
:
1367 case SIOCGIFBRDADDR
:
1368 case SIOCSIFBRDADDR
:
1369 case SIOCGIFNETMASK
:
1370 case SIOCSIFNETMASK
:
1378 if (!capable(CAP_NET_ADMIN
))
1380 rc
= x25_route_ioctl(cmd
, argp
);
1382 case SIOCX25GSUBSCRIP
:
1383 rc
= x25_subscr_ioctl(cmd
, argp
);
1385 case SIOCX25SSUBSCRIP
:
1387 if (!capable(CAP_NET_ADMIN
))
1389 rc
= x25_subscr_ioctl(cmd
, argp
);
1391 case SIOCX25GFACILITIES
: {
1392 struct x25_facilities fac
= x25
->facilities
;
1393 rc
= copy_to_user(argp
, &fac
,
1394 sizeof(fac
)) ? -EFAULT
: 0;
1398 case SIOCX25SFACILITIES
: {
1399 struct x25_facilities facilities
;
1401 if (copy_from_user(&facilities
, argp
,
1402 sizeof(facilities
)))
1405 if (sk
->sk_state
!= TCP_LISTEN
&&
1406 sk
->sk_state
!= TCP_CLOSE
)
1408 if (facilities
.pacsize_in
< X25_PS16
||
1409 facilities
.pacsize_in
> X25_PS4096
)
1411 if (facilities
.pacsize_out
< X25_PS16
||
1412 facilities
.pacsize_out
> X25_PS4096
)
1414 if (facilities
.winsize_in
< 1 ||
1415 facilities
.winsize_in
> 127)
1417 if (facilities
.throughput
< 0x03 ||
1418 facilities
.throughput
> 0xDD)
1420 if (facilities
.reverse
&&
1421 (facilities
.reverse
& 0x81) != 0x81)
1423 x25
->facilities
= facilities
;
1428 case SIOCX25GDTEFACILITIES
: {
1429 rc
= copy_to_user(argp
, &x25
->dte_facilities
,
1430 sizeof(x25
->dte_facilities
));
1436 case SIOCX25SDTEFACILITIES
: {
1437 struct x25_dte_facilities dtefacs
;
1439 if (copy_from_user(&dtefacs
, argp
, sizeof(dtefacs
)))
1442 if (sk
->sk_state
!= TCP_LISTEN
&&
1443 sk
->sk_state
!= TCP_CLOSE
)
1445 if (dtefacs
.calling_len
> X25_MAX_AE_LEN
)
1447 if (dtefacs
.calling_ae
== NULL
)
1449 if (dtefacs
.called_len
> X25_MAX_AE_LEN
)
1451 if (dtefacs
.called_ae
== NULL
)
1453 x25
->dte_facilities
= dtefacs
;
1458 case SIOCX25GCALLUSERDATA
: {
1459 struct x25_calluserdata cud
= x25
->calluserdata
;
1460 rc
= copy_to_user(argp
, &cud
,
1461 sizeof(cud
)) ? -EFAULT
: 0;
1465 case SIOCX25SCALLUSERDATA
: {
1466 struct x25_calluserdata calluserdata
;
1469 if (copy_from_user(&calluserdata
, argp
,
1470 sizeof(calluserdata
)))
1473 if (calluserdata
.cudlength
> X25_MAX_CUD_LEN
)
1475 x25
->calluserdata
= calluserdata
;
1480 case SIOCX25GCAUSEDIAG
: {
1481 struct x25_causediag causediag
;
1482 causediag
= x25
->causediag
;
1483 rc
= copy_to_user(argp
, &causediag
,
1484 sizeof(causediag
)) ? -EFAULT
: 0;
1488 case SIOCX25SCAUSEDIAG
: {
1489 struct x25_causediag causediag
;
1491 if (copy_from_user(&causediag
, argp
, sizeof(causediag
)))
1493 x25
->causediag
= causediag
;
1499 case SIOCX25SCUDMATCHLEN
: {
1500 struct x25_subaddr sub_addr
;
1502 if(sk
->sk_state
!= TCP_CLOSE
)
1505 if (copy_from_user(&sub_addr
, argp
,
1509 if(sub_addr
.cudmatchlength
> X25_MAX_CUD_LEN
)
1511 x25
->cudmatchlength
= sub_addr
.cudmatchlength
;
1516 case SIOCX25CALLACCPTAPPRV
: {
1518 if (sk
->sk_state
!= TCP_CLOSE
)
1520 x25
->accptapprv
= X25_ALLOW_ACCPT_APPRV
;
1525 case SIOCX25SENDCALLACCPT
: {
1527 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1529 if (x25
->accptapprv
) /* must call accptapprv above */
1531 x25_write_internal(sk
, X25_CALL_ACCEPTED
);
1532 x25
->state
= X25_STATE_3
;
1546 static const struct net_proto_family x25_family_ops
= {
1548 .create
= x25_create
,
1549 .owner
= THIS_MODULE
,
1552 #ifdef CONFIG_COMPAT
1553 static int compat_x25_subscr_ioctl(unsigned int cmd
,
1554 struct compat_x25_subscrip_struct __user
*x25_subscr32
)
1556 struct compat_x25_subscrip_struct x25_subscr
;
1557 struct x25_neigh
*nb
;
1558 struct net_device
*dev
;
1562 if (copy_from_user(&x25_subscr
, x25_subscr32
, sizeof(*x25_subscr32
)))
1566 dev
= x25_dev_get(x25_subscr
.device
);
1570 nb
= x25_get_neigh(dev
);
1576 if (cmd
== SIOCX25GSUBSCRIP
) {
1577 x25_subscr
.extended
= nb
->extended
;
1578 x25_subscr
.global_facil_mask
= nb
->global_facil_mask
;
1579 rc
= copy_to_user(x25_subscr32
, &x25_subscr
,
1580 sizeof(*x25_subscr32
)) ? -EFAULT
: 0;
1583 if (x25_subscr
.extended
== 0 || x25_subscr
.extended
== 1) {
1585 nb
->extended
= x25_subscr
.extended
;
1586 nb
->global_facil_mask
= x25_subscr
.global_facil_mask
;
1597 static int compat_x25_ioctl(struct socket
*sock
, unsigned int cmd
,
1600 void __user
*argp
= compat_ptr(arg
);
1601 struct sock
*sk
= sock
->sk
;
1603 int rc
= -ENOIOCTLCMD
;
1608 rc
= x25_ioctl(sock
, cmd
, (unsigned long)argp
);
1614 rc
= compat_sock_get_timestamp(sk
,
1615 (struct timeval __user
*)argp
);
1622 rc
= compat_sock_get_timestampns(sk
,
1623 (struct timespec __user
*)argp
);
1628 case SIOCGIFDSTADDR
:
1629 case SIOCSIFDSTADDR
:
1630 case SIOCGIFBRDADDR
:
1631 case SIOCSIFBRDADDR
:
1632 case SIOCGIFNETMASK
:
1633 case SIOCSIFNETMASK
:
1641 if (!capable(CAP_NET_ADMIN
))
1644 rc
= x25_route_ioctl(cmd
, argp
);
1647 case SIOCX25GSUBSCRIP
:
1649 rc
= compat_x25_subscr_ioctl(cmd
, argp
);
1652 case SIOCX25SSUBSCRIP
:
1654 if (!capable(CAP_NET_ADMIN
))
1657 rc
= compat_x25_subscr_ioctl(cmd
, argp
);
1660 case SIOCX25GFACILITIES
:
1661 case SIOCX25SFACILITIES
:
1662 case SIOCX25GDTEFACILITIES
:
1663 case SIOCX25SDTEFACILITIES
:
1664 case SIOCX25GCALLUSERDATA
:
1665 case SIOCX25SCALLUSERDATA
:
1666 case SIOCX25GCAUSEDIAG
:
1667 case SIOCX25SCAUSEDIAG
:
1668 case SIOCX25SCUDMATCHLEN
:
1669 case SIOCX25CALLACCPTAPPRV
:
1670 case SIOCX25SENDCALLACCPT
:
1671 rc
= x25_ioctl(sock
, cmd
, (unsigned long)argp
);
1681 static const struct proto_ops x25_proto_ops
= {
1683 .owner
= THIS_MODULE
,
1684 .release
= x25_release
,
1686 .connect
= x25_connect
,
1687 .socketpair
= sock_no_socketpair
,
1688 .accept
= x25_accept
,
1689 .getname
= x25_getname
,
1690 .poll
= x25_datagram_poll
,
1692 #ifdef CONFIG_COMPAT
1693 .compat_ioctl
= compat_x25_ioctl
,
1695 .listen
= x25_listen
,
1696 .shutdown
= sock_no_shutdown
,
1697 .setsockopt
= x25_setsockopt
,
1698 .getsockopt
= x25_getsockopt
,
1699 .sendmsg
= x25_sendmsg
,
1700 .recvmsg
= x25_recvmsg
,
1701 .mmap
= sock_no_mmap
,
1702 .sendpage
= sock_no_sendpage
,
1705 static struct packet_type x25_packet_type __read_mostly
= {
1706 .type
= cpu_to_be16(ETH_P_X25
),
1707 .func
= x25_lapb_receive_frame
,
1710 static struct notifier_block x25_dev_notifier
= {
1711 .notifier_call
= x25_device_event
,
1714 void x25_kill_by_neigh(struct x25_neigh
*nb
)
1717 struct hlist_node
*node
;
1719 write_lock_bh(&x25_list_lock
);
1721 sk_for_each(s
, node
, &x25_list
)
1722 if (x25_sk(s
)->neighbour
== nb
)
1723 x25_disconnect(s
, ENETUNREACH
, 0, 0);
1725 write_unlock_bh(&x25_list_lock
);
1727 /* Remove any related forwards */
1728 x25_clear_forward_by_dev(nb
->dev
);
1731 static int __init
x25_init(void)
1733 int rc
= proto_register(&x25_proto
, 0);
1738 rc
= sock_register(&x25_family_ops
);
1742 dev_add_pack(&x25_packet_type
);
1744 rc
= register_netdevice_notifier(&x25_dev_notifier
);
1748 printk(KERN_INFO
"X.25 for Linux Version 0.2\n");
1750 x25_register_sysctl();
1751 rc
= x25_proc_init();
1757 unregister_netdevice_notifier(&x25_dev_notifier
);
1759 sock_unregister(AF_X25
);
1761 proto_unregister(&x25_proto
);
1764 module_init(x25_init
);
1766 static void __exit
x25_exit(void)
1772 x25_unregister_sysctl();
1774 unregister_netdevice_notifier(&x25_dev_notifier
);
1776 dev_remove_pack(&x25_packet_type
);
1778 sock_unregister(AF_X25
);
1779 proto_unregister(&x25_proto
);
1781 module_exit(x25_exit
);
1783 MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
1784 MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
1785 MODULE_LICENSE("GPL");
1786 MODULE_ALIAS_NETPROTO(PF_X25
);