2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor Centralised disconnect handling.
19 * New timer architecture.
20 * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
21 * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
22 * facilities negotiation and increased
23 * the throughput upper limit.
24 * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
25 * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
26 * Fixed x25_output() related skb leakage.
27 * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
28 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
29 * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
30 * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
31 * x25_proc.c, using seq_file
32 * 2005-04-02 Shaun Pereira Selective sub address matching
34 * 2005-04-15 Shaun Pereira Fast select with no restriction on
38 #include <linux/module.h>
39 #include <linux/capability.h>
40 #include <linux/errno.h>
41 #include <linux/kernel.h>
42 #include <linux/sched.h>
43 #include <linux/smp_lock.h>
44 #include <linux/timer.h>
45 #include <linux/string.h>
46 #include <linux/net.h>
47 #include <linux/netdevice.h>
48 #include <linux/if_arp.h>
49 #include <linux/skbuff.h>
51 #include <net/tcp_states.h>
52 #include <asm/uaccess.h>
53 #include <linux/fcntl.h>
54 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
55 #include <linux/notifier.h>
56 #include <linux/init.h>
57 #include <linux/compat.h>
60 #include <net/compat.h>
62 int sysctl_x25_restart_request_timeout
= X25_DEFAULT_T20
;
63 int sysctl_x25_call_request_timeout
= X25_DEFAULT_T21
;
64 int sysctl_x25_reset_request_timeout
= X25_DEFAULT_T22
;
65 int sysctl_x25_clear_request_timeout
= X25_DEFAULT_T23
;
66 int sysctl_x25_ack_holdback_timeout
= X25_DEFAULT_T2
;
67 int sysctl_x25_forward
= 0;
70 DEFINE_RWLOCK(x25_list_lock
);
72 static const struct proto_ops x25_proto_ops
;
74 static struct x25_address null_x25_address
= {" "};
77 struct compat_x25_subscrip_struct
{
78 char device
[200-sizeof(compat_ulong_t
)];
79 compat_ulong_t global_facil_mask
;
80 compat_uint_t extended
;
84 int x25_addr_ntoa(unsigned char *p
, struct x25_address
*called_addr
,
85 struct x25_address
*calling_addr
)
87 unsigned int called_len
, calling_len
;
88 char *called
, *calling
;
91 called_len
= (*p
>> 0) & 0x0F;
92 calling_len
= (*p
>> 4) & 0x0F;
94 called
= called_addr
->x25_addr
;
95 calling
= calling_addr
->x25_addr
;
98 for (i
= 0; i
< (called_len
+ calling_len
); i
++) {
101 *called
++ = ((*p
>> 0) & 0x0F) + '0';
104 *called
++ = ((*p
>> 4) & 0x0F) + '0';
108 *calling
++ = ((*p
>> 0) & 0x0F) + '0';
111 *calling
++ = ((*p
>> 4) & 0x0F) + '0';
116 *called
= *calling
= '\0';
118 return 1 + (called_len
+ calling_len
+ 1) / 2;
121 int x25_addr_aton(unsigned char *p
, struct x25_address
*called_addr
,
122 struct x25_address
*calling_addr
)
124 unsigned int called_len
, calling_len
;
125 char *called
, *calling
;
128 called
= called_addr
->x25_addr
;
129 calling
= calling_addr
->x25_addr
;
131 called_len
= strlen(called
);
132 calling_len
= strlen(calling
);
134 *p
++ = (calling_len
<< 4) | (called_len
<< 0);
136 for (i
= 0; i
< (called_len
+ calling_len
); i
++) {
137 if (i
< called_len
) {
139 *p
|= (*called
++ - '0') << 0;
143 *p
|= (*called
++ - '0') << 4;
147 *p
|= (*calling
++ - '0') << 0;
151 *p
|= (*calling
++ - '0') << 4;
156 return 1 + (called_len
+ calling_len
+ 1) / 2;
160 * Socket removal during an interrupt is now safe.
162 static void x25_remove_socket(struct sock
*sk
)
164 write_lock_bh(&x25_list_lock
);
165 sk_del_node_init(sk
);
166 write_unlock_bh(&x25_list_lock
);
170 * Kill all bound sockets on a dropped device.
172 static void x25_kill_by_device(struct net_device
*dev
)
175 struct hlist_node
*node
;
177 write_lock_bh(&x25_list_lock
);
179 sk_for_each(s
, node
, &x25_list
)
180 if (x25_sk(s
)->neighbour
&& x25_sk(s
)->neighbour
->dev
== dev
)
181 x25_disconnect(s
, ENETUNREACH
, 0, 0);
183 write_unlock_bh(&x25_list_lock
);
187 * Handle device status changes.
189 static int x25_device_event(struct notifier_block
*this, unsigned long event
,
192 struct net_device
*dev
= ptr
;
193 struct x25_neigh
*nb
;
195 if (!net_eq(dev_net(dev
), &init_net
))
198 if (dev
->type
== ARPHRD_X25
199 #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
200 || dev
->type
== ARPHRD_ETHER
205 x25_link_device_up(dev
);
207 case NETDEV_GOING_DOWN
:
208 nb
= x25_get_neigh(dev
);
210 x25_terminate_link(nb
);
215 x25_kill_by_device(dev
);
216 x25_route_device_down(dev
);
217 x25_link_device_down(dev
);
226 * Add a socket to the bound sockets list.
228 static void x25_insert_socket(struct sock
*sk
)
230 write_lock_bh(&x25_list_lock
);
231 sk_add_node(sk
, &x25_list
);
232 write_unlock_bh(&x25_list_lock
);
236 * Find a socket that wants to accept the Call Request we just
237 * received. Check the full list for an address/cud match.
238 * If no cuds match return the next_best thing, an address match.
239 * Note: if a listening socket has cud set it must only get calls
242 static struct sock
*x25_find_listener(struct x25_address
*addr
,
246 struct sock
*next_best
;
247 struct hlist_node
*node
;
249 read_lock_bh(&x25_list_lock
);
252 sk_for_each(s
, node
, &x25_list
)
253 if ((!strcmp(addr
->x25_addr
,
254 x25_sk(s
)->source_addr
.x25_addr
) ||
255 !strcmp(addr
->x25_addr
,
256 null_x25_address
.x25_addr
)) &&
257 s
->sk_state
== TCP_LISTEN
) {
259 * Found a listening socket, now check the incoming
260 * call user data vs this sockets call user data
262 if(skb
->len
> 0 && x25_sk(s
)->cudmatchlength
> 0) {
263 if((memcmp(x25_sk(s
)->calluserdata
.cuddata
,
265 x25_sk(s
)->cudmatchlength
)) == 0) {
279 read_unlock_bh(&x25_list_lock
);
284 * Find a connected X.25 socket given my LCI and neighbour.
286 static struct sock
*__x25_find_socket(unsigned int lci
, struct x25_neigh
*nb
)
289 struct hlist_node
*node
;
291 sk_for_each(s
, node
, &x25_list
)
292 if (x25_sk(s
)->lci
== lci
&& x25_sk(s
)->neighbour
== nb
) {
301 struct sock
*x25_find_socket(unsigned int lci
, struct x25_neigh
*nb
)
305 read_lock_bh(&x25_list_lock
);
306 s
= __x25_find_socket(lci
, nb
);
307 read_unlock_bh(&x25_list_lock
);
312 * Find a unique LCI for a given device.
314 static unsigned int x25_new_lci(struct x25_neigh
*nb
)
316 unsigned int lci
= 1;
319 read_lock_bh(&x25_list_lock
);
321 while ((sk
= __x25_find_socket(lci
, nb
)) != NULL
) {
329 read_unlock_bh(&x25_list_lock
);
336 static void __x25_destroy_socket(struct sock
*);
339 * handler for deferred kills.
341 static void x25_destroy_timer(unsigned long data
)
343 x25_destroy_socket_from_timer((struct sock
*)data
);
347 * This is called from user mode and the timers. Thus it protects itself
348 * against interrupt users but doesn't worry about being called during
349 * work. Once it is removed from the queue no interrupt or bottom half
350 * will touch it and we are (fairly 8-) ) safe.
351 * Not static as it's used by the timer
353 static void __x25_destroy_socket(struct sock
*sk
)
357 x25_stop_heartbeat(sk
);
360 x25_remove_socket(sk
);
361 x25_clear_queues(sk
); /* Flush the queues */
363 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
364 if (skb
->sk
!= sk
) { /* A pending connection */
366 * Queue the unaccepted socket for death
368 sock_set_flag(skb
->sk
, SOCK_DEAD
);
369 x25_start_heartbeat(skb
->sk
);
370 x25_sk(skb
->sk
)->state
= X25_STATE_0
;
376 if (sk_has_allocations(sk
)) {
377 /* Defer: outstanding buffers */
378 sk
->sk_timer
.expires
= jiffies
+ 10 * HZ
;
379 sk
->sk_timer
.function
= x25_destroy_timer
;
380 sk
->sk_timer
.data
= (unsigned long)sk
;
381 add_timer(&sk
->sk_timer
);
383 /* drop last reference so sock_put will free */
388 void x25_destroy_socket_from_timer(struct sock
*sk
)
392 __x25_destroy_socket(sk
);
397 static void x25_destroy_socket(struct sock
*sk
)
401 __x25_destroy_socket(sk
);
407 * Handling for system calls applied via the various interfaces to a
408 * X.25 socket object.
411 static int x25_setsockopt(struct socket
*sock
, int level
, int optname
,
412 char __user
*optval
, int optlen
)
415 struct sock
*sk
= sock
->sk
;
416 int rc
= -ENOPROTOOPT
;
418 if (level
!= SOL_X25
|| optname
!= X25_QBITINCL
)
422 if (optlen
< sizeof(int))
426 if (get_user(opt
, (int __user
*)optval
))
429 x25_sk(sk
)->qbitincl
= !!opt
;
435 static int x25_getsockopt(struct socket
*sock
, int level
, int optname
,
436 char __user
*optval
, int __user
*optlen
)
438 struct sock
*sk
= sock
->sk
;
439 int val
, len
, rc
= -ENOPROTOOPT
;
441 if (level
!= SOL_X25
|| optname
!= X25_QBITINCL
)
445 if (get_user(len
, optlen
))
448 len
= min_t(unsigned int, len
, sizeof(int));
455 if (put_user(len
, optlen
))
458 val
= x25_sk(sk
)->qbitincl
;
459 rc
= copy_to_user(optval
, &val
, len
) ? -EFAULT
: 0;
464 static int x25_listen(struct socket
*sock
, int backlog
)
466 struct sock
*sk
= sock
->sk
;
467 int rc
= -EOPNOTSUPP
;
469 if (sk
->sk_state
!= TCP_LISTEN
) {
470 memset(&x25_sk(sk
)->dest_addr
, 0, X25_ADDR_LEN
);
471 sk
->sk_max_ack_backlog
= backlog
;
472 sk
->sk_state
= TCP_LISTEN
;
479 static struct proto x25_proto
= {
481 .owner
= THIS_MODULE
,
482 .obj_size
= sizeof(struct x25_sock
),
485 static struct sock
*x25_alloc_socket(struct net
*net
)
487 struct x25_sock
*x25
;
488 struct sock
*sk
= sk_alloc(net
, AF_X25
, GFP_ATOMIC
, &x25_proto
);
493 sock_init_data(NULL
, sk
);
496 skb_queue_head_init(&x25
->ack_queue
);
497 skb_queue_head_init(&x25
->fragment_queue
);
498 skb_queue_head_init(&x25
->interrupt_in_queue
);
499 skb_queue_head_init(&x25
->interrupt_out_queue
);
504 static int x25_create(struct net
*net
, struct socket
*sock
, int protocol
)
507 struct x25_sock
*x25
;
508 int rc
= -ESOCKTNOSUPPORT
;
510 if (net
!= &init_net
)
511 return -EAFNOSUPPORT
;
513 if (sock
->type
!= SOCK_SEQPACKET
|| protocol
)
517 if ((sk
= x25_alloc_socket(net
)) == NULL
)
522 sock_init_data(sock
, sk
);
526 sock
->ops
= &x25_proto_ops
;
527 sk
->sk_protocol
= protocol
;
528 sk
->sk_backlog_rcv
= x25_backlog_rcv
;
530 x25
->t21
= sysctl_x25_call_request_timeout
;
531 x25
->t22
= sysctl_x25_reset_request_timeout
;
532 x25
->t23
= sysctl_x25_clear_request_timeout
;
533 x25
->t2
= sysctl_x25_ack_holdback_timeout
;
534 x25
->state
= X25_STATE_0
;
535 x25
->cudmatchlength
= 0;
536 x25
->accptapprv
= X25_DENY_ACCPT_APPRV
; /* normally no cud */
539 x25
->facilities
.winsize_in
= X25_DEFAULT_WINDOW_SIZE
;
540 x25
->facilities
.winsize_out
= X25_DEFAULT_WINDOW_SIZE
;
541 x25
->facilities
.pacsize_in
= X25_DEFAULT_PACKET_SIZE
;
542 x25
->facilities
.pacsize_out
= X25_DEFAULT_PACKET_SIZE
;
543 x25
->facilities
.throughput
= X25_DEFAULT_THROUGHPUT
;
544 x25
->facilities
.reverse
= X25_DEFAULT_REVERSE
;
545 x25
->dte_facilities
.calling_len
= 0;
546 x25
->dte_facilities
.called_len
= 0;
547 memset(x25
->dte_facilities
.called_ae
, '\0',
548 sizeof(x25
->dte_facilities
.called_ae
));
549 memset(x25
->dte_facilities
.calling_ae
, '\0',
550 sizeof(x25
->dte_facilities
.calling_ae
));
557 static struct sock
*x25_make_new(struct sock
*osk
)
559 struct sock
*sk
= NULL
;
560 struct x25_sock
*x25
, *ox25
;
562 if (osk
->sk_type
!= SOCK_SEQPACKET
)
565 if ((sk
= x25_alloc_socket(sock_net(osk
))) == NULL
)
570 sk
->sk_type
= osk
->sk_type
;
571 sk
->sk_priority
= osk
->sk_priority
;
572 sk
->sk_protocol
= osk
->sk_protocol
;
573 sk
->sk_rcvbuf
= osk
->sk_rcvbuf
;
574 sk
->sk_sndbuf
= osk
->sk_sndbuf
;
575 sk
->sk_state
= TCP_ESTABLISHED
;
576 sk
->sk_backlog_rcv
= osk
->sk_backlog_rcv
;
577 sock_copy_flags(sk
, osk
);
580 x25
->t21
= ox25
->t21
;
581 x25
->t22
= ox25
->t22
;
582 x25
->t23
= ox25
->t23
;
584 x25
->facilities
= ox25
->facilities
;
585 x25
->qbitincl
= ox25
->qbitincl
;
586 x25
->dte_facilities
= ox25
->dte_facilities
;
587 x25
->cudmatchlength
= ox25
->cudmatchlength
;
588 x25
->accptapprv
= ox25
->accptapprv
;
595 static int x25_release(struct socket
*sock
)
597 struct sock
*sk
= sock
->sk
;
598 struct x25_sock
*x25
;
605 switch (x25
->state
) {
609 x25_disconnect(sk
, 0, 0, 0);
610 x25_destroy_socket(sk
);
616 x25_clear_queues(sk
);
617 x25_write_internal(sk
, X25_CLEAR_REQUEST
);
618 x25_start_t23timer(sk
);
619 x25
->state
= X25_STATE_2
;
620 sk
->sk_state
= TCP_CLOSE
;
621 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
622 sk
->sk_state_change(sk
);
623 sock_set_flag(sk
, SOCK_DEAD
);
624 sock_set_flag(sk
, SOCK_DESTROY
);
633 static int x25_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
635 struct sock
*sk
= sock
->sk
;
636 struct sockaddr_x25
*addr
= (struct sockaddr_x25
*)uaddr
;
638 if (!sock_flag(sk
, SOCK_ZAPPED
) ||
639 addr_len
!= sizeof(struct sockaddr_x25
) ||
640 addr
->sx25_family
!= AF_X25
)
643 x25_sk(sk
)->source_addr
= addr
->sx25_addr
;
644 x25_insert_socket(sk
);
645 sock_reset_flag(sk
, SOCK_ZAPPED
);
646 SOCK_DEBUG(sk
, "x25_bind: socket is bound\n");
651 static int x25_wait_for_connection_establishment(struct sock
*sk
)
653 DECLARE_WAITQUEUE(wait
, current
);
656 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
658 __set_current_state(TASK_INTERRUPTIBLE
);
660 if (signal_pending(current
))
664 sk
->sk_socket
->state
= SS_UNCONNECTED
;
668 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
675 __set_current_state(TASK_RUNNING
);
676 remove_wait_queue(sk
->sk_sleep
, &wait
);
680 static int x25_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
681 int addr_len
, int flags
)
683 struct sock
*sk
= sock
->sk
;
684 struct x25_sock
*x25
= x25_sk(sk
);
685 struct sockaddr_x25
*addr
= (struct sockaddr_x25
*)uaddr
;
686 struct x25_route
*rt
;
690 if (sk
->sk_state
== TCP_ESTABLISHED
&& sock
->state
== SS_CONNECTING
) {
691 sock
->state
= SS_CONNECTED
;
692 goto out
; /* Connect completed during a ERESTARTSYS event */
696 if (sk
->sk_state
== TCP_CLOSE
&& sock
->state
== SS_CONNECTING
) {
697 sock
->state
= SS_UNCONNECTED
;
701 rc
= -EISCONN
; /* No reconnect on a seqpacket socket */
702 if (sk
->sk_state
== TCP_ESTABLISHED
)
705 sk
->sk_state
= TCP_CLOSE
;
706 sock
->state
= SS_UNCONNECTED
;
709 if (addr_len
!= sizeof(struct sockaddr_x25
) ||
710 addr
->sx25_family
!= AF_X25
)
714 rt
= x25_get_route(&addr
->sx25_addr
);
718 x25
->neighbour
= x25_get_neigh(rt
->dev
);
722 x25_limit_facilities(&x25
->facilities
, x25
->neighbour
);
724 x25
->lci
= x25_new_lci(x25
->neighbour
);
729 if (sock_flag(sk
, SOCK_ZAPPED
)) /* Must bind first - autobinding does not work */
732 if (!strcmp(x25
->source_addr
.x25_addr
, null_x25_address
.x25_addr
))
733 memset(&x25
->source_addr
, '\0', X25_ADDR_LEN
);
735 x25
->dest_addr
= addr
->sx25_addr
;
737 /* Move to connecting socket, start sending Connect Requests */
738 sock
->state
= SS_CONNECTING
;
739 sk
->sk_state
= TCP_SYN_SENT
;
741 x25
->state
= X25_STATE_1
;
743 x25_write_internal(sk
, X25_CALL_REQUEST
);
745 x25_start_heartbeat(sk
);
746 x25_start_t21timer(sk
);
750 if (sk
->sk_state
!= TCP_ESTABLISHED
&& (flags
& O_NONBLOCK
))
753 rc
= x25_wait_for_connection_establishment(sk
);
757 sock
->state
= SS_CONNECTED
;
761 x25_neigh_put(x25
->neighbour
);
769 static int x25_wait_for_data(struct sock
*sk
, long timeout
)
771 DECLARE_WAITQUEUE(wait
, current
);
774 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
776 __set_current_state(TASK_INTERRUPTIBLE
);
777 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
780 if (signal_pending(current
))
786 if (skb_queue_empty(&sk
->sk_receive_queue
)) {
788 timeout
= schedule_timeout(timeout
);
793 __set_current_state(TASK_RUNNING
);
794 remove_wait_queue(sk
->sk_sleep
, &wait
);
798 static int x25_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
800 struct sock
*sk
= sock
->sk
;
805 if (!sk
|| sk
->sk_state
!= TCP_LISTEN
)
809 if (sk
->sk_type
!= SOCK_SEQPACKET
)
813 rc
= x25_wait_for_data(sk
, sk
->sk_rcvtimeo
);
816 skb
= skb_dequeue(&sk
->sk_receive_queue
);
821 sock_graft(newsk
, newsock
);
823 /* Now attach up the new socket */
826 sk
->sk_ack_backlog
--;
827 newsock
->state
= SS_CONNECTED
;
835 static int x25_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
836 int *uaddr_len
, int peer
)
838 struct sockaddr_x25
*sx25
= (struct sockaddr_x25
*)uaddr
;
839 struct sock
*sk
= sock
->sk
;
840 struct x25_sock
*x25
= x25_sk(sk
);
843 if (sk
->sk_state
!= TCP_ESTABLISHED
)
845 sx25
->sx25_addr
= x25
->dest_addr
;
847 sx25
->sx25_addr
= x25
->source_addr
;
849 sx25
->sx25_family
= AF_X25
;
850 *uaddr_len
= sizeof(*sx25
);
855 int x25_rx_call_request(struct sk_buff
*skb
, struct x25_neigh
*nb
,
860 struct x25_sock
*makex25
;
861 struct x25_address source_addr
, dest_addr
;
862 struct x25_facilities facilities
;
863 struct x25_dte_facilities dte_facilities
;
864 int len
, addr_len
, rc
;
867 * Remove the LCI and frame type.
869 skb_pull(skb
, X25_STD_MIN_LEN
);
872 * Extract the X.25 addresses and convert them to ASCII strings,
875 addr_len
= x25_addr_ntoa(skb
->data
, &source_addr
, &dest_addr
);
876 skb_pull(skb
, addr_len
);
879 * Get the length of the facilities, skip past them for the moment
880 * get the call user data because this is needed to determine
881 * the correct listener
883 len
= skb
->data
[0] + 1;
887 * Find a listener for the particular address/cud pair.
889 sk
= x25_find_listener(&source_addr
,skb
);
892 if (sk
!= NULL
&& sk_acceptq_is_full(sk
)) {
897 * We dont have any listeners for this incoming call.
901 skb_push(skb
, addr_len
+ X25_STD_MIN_LEN
);
902 if (sysctl_x25_forward
&&
903 x25_forward_call(&dest_addr
, nb
, skb
, lci
) > 0)
905 /* Call was forwarded, dont process it any more */
910 /* No listeners, can't forward, clear the call */
911 goto out_clear_request
;
916 * Try to reach a compromise on the requested facilities.
918 len
= x25_negotiate_facilities(skb
, sk
, &facilities
, &dte_facilities
);
923 * current neighbour/link might impose additional limits
924 * on certain facilties
927 x25_limit_facilities(&facilities
, nb
);
930 * Try to create a new socket.
932 make
= x25_make_new(sk
);
937 * Remove the facilities
942 make
->sk_state
= TCP_ESTABLISHED
;
944 makex25
= x25_sk(make
);
946 makex25
->dest_addr
= dest_addr
;
947 makex25
->source_addr
= source_addr
;
948 makex25
->neighbour
= nb
;
949 makex25
->facilities
= facilities
;
950 makex25
->dte_facilities
= dte_facilities
;
951 makex25
->vc_facil_mask
= x25_sk(sk
)->vc_facil_mask
;
952 /* ensure no reverse facil on accept */
953 makex25
->vc_facil_mask
&= ~X25_MASK_REVERSE
;
954 /* ensure no calling address extension on accept */
955 makex25
->vc_facil_mask
&= ~X25_MASK_CALLING_AE
;
956 makex25
->cudmatchlength
= x25_sk(sk
)->cudmatchlength
;
958 /* Normally all calls are accepted immediatly */
959 if(makex25
->accptapprv
& X25_DENY_ACCPT_APPRV
) {
960 x25_write_internal(make
, X25_CALL_ACCEPTED
);
961 makex25
->state
= X25_STATE_3
;
965 * Incoming Call User Data.
967 skb_copy_from_linear_data(skb
, makex25
->calluserdata
.cuddata
, skb
->len
);
968 makex25
->calluserdata
.cudlength
= skb
->len
;
970 sk
->sk_ack_backlog
++;
972 x25_insert_socket(make
);
974 skb_queue_head(&sk
->sk_receive_queue
, skb
);
976 x25_start_heartbeat(make
);
978 if (!sock_flag(sk
, SOCK_DEAD
))
979 sk
->sk_data_ready(sk
, skb
->len
);
988 x25_transmit_clear_request(nb
, lci
, 0x01);
992 static int x25_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
993 struct msghdr
*msg
, size_t len
)
995 struct sock
*sk
= sock
->sk
;
996 struct x25_sock
*x25
= x25_sk(sk
);
997 struct sockaddr_x25
*usx25
= (struct sockaddr_x25
*)msg
->msg_name
;
998 struct sockaddr_x25 sx25
;
1000 unsigned char *asmptr
;
1001 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
1003 int qbit
= 0, rc
= -EINVAL
;
1005 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_OOB
|MSG_EOR
|MSG_CMSG_COMPAT
))
1008 /* we currently don't support segmented records at the user interface */
1009 if (!(msg
->msg_flags
& (MSG_EOR
|MSG_OOB
)))
1012 rc
= -EADDRNOTAVAIL
;
1013 if (sock_flag(sk
, SOCK_ZAPPED
))
1017 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1018 send_sig(SIGPIPE
, current
, 0);
1023 if (!x25
->neighbour
)
1028 if (msg
->msg_namelen
< sizeof(sx25
))
1030 memcpy(&sx25
, usx25
, sizeof(sx25
));
1032 if (strcmp(x25
->dest_addr
.x25_addr
, sx25
.sx25_addr
.x25_addr
))
1035 if (sx25
.sx25_family
!= AF_X25
)
1039 * FIXME 1003.1g - if the socket is like this because
1040 * it has become closed (not started closed) we ought
1041 * to SIGPIPE, EPIPE;
1044 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1047 sx25
.sx25_family
= AF_X25
;
1048 sx25
.sx25_addr
= x25
->dest_addr
;
1051 /* Sanity check the packet size */
1057 SOCK_DEBUG(sk
, "x25_sendmsg: sendto: Addresses built.\n");
1059 /* Build a packet */
1060 SOCK_DEBUG(sk
, "x25_sendmsg: sendto: building packet.\n");
1062 if ((msg
->msg_flags
& MSG_OOB
) && len
> 32)
1065 size
= len
+ X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
;
1067 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
1070 X25_SKB_CB(skb
)->flags
= msg
->msg_flags
;
1072 skb_reserve(skb
, X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
);
1075 * Put the data on the end
1077 SOCK_DEBUG(sk
, "x25_sendmsg: Copying user data\n");
1079 skb_reset_transport_header(skb
);
1082 rc
= memcpy_fromiovec(skb_transport_header(skb
), msg
->msg_iov
, len
);
1087 * If the Q BIT Include socket option is in force, the first
1088 * byte of the user data is the logical value of the Q Bit.
1090 if (x25
->qbitincl
) {
1091 qbit
= skb
->data
[0];
1096 * Push down the X.25 header
1098 SOCK_DEBUG(sk
, "x25_sendmsg: Building X.25 Header.\n");
1100 if (msg
->msg_flags
& MSG_OOB
) {
1101 if (x25
->neighbour
->extended
) {
1102 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1103 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_EXTSEQ
;
1104 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1105 *asmptr
++ = X25_INTERRUPT
;
1107 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1108 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_STDSEQ
;
1109 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1110 *asmptr
++ = X25_INTERRUPT
;
1113 if (x25
->neighbour
->extended
) {
1114 /* Build an Extended X.25 header */
1115 asmptr
= skb_push(skb
, X25_EXT_MIN_LEN
);
1116 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_EXTSEQ
;
1117 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1118 *asmptr
++ = X25_DATA
;
1119 *asmptr
++ = X25_DATA
;
1121 /* Build an Standard X.25 header */
1122 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1123 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_STDSEQ
;
1124 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1125 *asmptr
++ = X25_DATA
;
1129 skb
->data
[0] |= X25_Q_BIT
;
1132 SOCK_DEBUG(sk
, "x25_sendmsg: Built header.\n");
1133 SOCK_DEBUG(sk
, "x25_sendmsg: Transmitting buffer\n");
1136 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1139 if (msg
->msg_flags
& MSG_OOB
)
1140 skb_queue_tail(&x25
->interrupt_out_queue
, skb
);
1142 rc
= x25_output(sk
, skb
);
1146 else if (x25
->qbitincl
)
1151 * lock_sock() is currently only used to serialize this x25_kick()
1152 * against input-driven x25_kick() calls. It currently only blocks
1153 * incoming packets for this socket and does not protect against
1154 * any other socket state changes and is not called from anywhere
1155 * else. As x25_kick() cannot block and as long as all socket
1156 * operations are BKL-wrapped, we don't need take to care about
1157 * purging the backlog queue in x25_release().
1159 * Using lock_sock() to protect all socket operations entirely
1160 * (and making the whole x25 stack SMP aware) unfortunately would
1161 * require major changes to {send,recv}msg and skb allocation methods.
1176 static int x25_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1177 struct msghdr
*msg
, size_t size
,
1180 struct sock
*sk
= sock
->sk
;
1181 struct x25_sock
*x25
= x25_sk(sk
);
1182 struct sockaddr_x25
*sx25
= (struct sockaddr_x25
*)msg
->msg_name
;
1185 struct sk_buff
*skb
;
1186 unsigned char *asmptr
;
1190 * This works for seqpacket too. The receiver has ordered the queue for
1191 * us! We do one quick check first though
1193 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1196 if (flags
& MSG_OOB
) {
1198 if (sock_flag(sk
, SOCK_URGINLINE
) ||
1199 !skb_peek(&x25
->interrupt_in_queue
))
1202 skb
= skb_dequeue(&x25
->interrupt_in_queue
);
1204 skb_pull(skb
, X25_STD_MIN_LEN
);
1207 * No Q bit information on Interrupt data.
1209 if (x25
->qbitincl
) {
1210 asmptr
= skb_push(skb
, 1);
1214 msg
->msg_flags
|= MSG_OOB
;
1216 /* Now we can treat all alike */
1217 skb
= skb_recv_datagram(sk
, flags
& ~MSG_DONTWAIT
,
1218 flags
& MSG_DONTWAIT
, &rc
);
1222 qbit
= (skb
->data
[0] & X25_Q_BIT
) == X25_Q_BIT
;
1224 skb_pull(skb
, x25
->neighbour
->extended
?
1225 X25_EXT_MIN_LEN
: X25_STD_MIN_LEN
);
1227 if (x25
->qbitincl
) {
1228 asmptr
= skb_push(skb
, 1);
1233 skb_reset_transport_header(skb
);
1236 if (copied
> size
) {
1238 msg
->msg_flags
|= MSG_TRUNC
;
1241 /* Currently, each datagram always contains a complete record */
1242 msg
->msg_flags
|= MSG_EOR
;
1244 rc
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1246 goto out_free_dgram
;
1249 sx25
->sx25_family
= AF_X25
;
1250 sx25
->sx25_addr
= x25
->dest_addr
;
1253 msg
->msg_namelen
= sizeof(struct sockaddr_x25
);
1260 skb_free_datagram(sk
, skb
);
1266 static int x25_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1268 struct sock
*sk
= sock
->sk
;
1269 struct x25_sock
*x25
= x25_sk(sk
);
1270 void __user
*argp
= (void __user
*)arg
;
1275 int amount
= sk
->sk_sndbuf
- sk_wmem_alloc_get(sk
);
1279 rc
= put_user(amount
, (unsigned int __user
*)argp
);
1284 struct sk_buff
*skb
;
1287 * These two are safe on a single CPU system as
1288 * only user tasks fiddle here
1290 if ((skb
= skb_peek(&sk
->sk_receive_queue
)) != NULL
)
1292 rc
= put_user(amount
, (unsigned int __user
*)argp
);
1299 rc
= sock_get_timestamp(sk
,
1300 (struct timeval __user
*)argp
);
1305 rc
= sock_get_timestampns(sk
,
1306 (struct timespec __user
*)argp
);
1310 case SIOCGIFDSTADDR
:
1311 case SIOCSIFDSTADDR
:
1312 case SIOCGIFBRDADDR
:
1313 case SIOCSIFBRDADDR
:
1314 case SIOCGIFNETMASK
:
1315 case SIOCSIFNETMASK
:
1323 if (!capable(CAP_NET_ADMIN
))
1325 rc
= x25_route_ioctl(cmd
, argp
);
1327 case SIOCX25GSUBSCRIP
:
1328 rc
= x25_subscr_ioctl(cmd
, argp
);
1330 case SIOCX25SSUBSCRIP
:
1332 if (!capable(CAP_NET_ADMIN
))
1334 rc
= x25_subscr_ioctl(cmd
, argp
);
1336 case SIOCX25GFACILITIES
: {
1337 struct x25_facilities fac
= x25
->facilities
;
1338 rc
= copy_to_user(argp
, &fac
,
1339 sizeof(fac
)) ? -EFAULT
: 0;
1343 case SIOCX25SFACILITIES
: {
1344 struct x25_facilities facilities
;
1346 if (copy_from_user(&facilities
, argp
,
1347 sizeof(facilities
)))
1350 if (sk
->sk_state
!= TCP_LISTEN
&&
1351 sk
->sk_state
!= TCP_CLOSE
)
1353 if (facilities
.pacsize_in
< X25_PS16
||
1354 facilities
.pacsize_in
> X25_PS4096
)
1356 if (facilities
.pacsize_out
< X25_PS16
||
1357 facilities
.pacsize_out
> X25_PS4096
)
1359 if (facilities
.winsize_in
< 1 ||
1360 facilities
.winsize_in
> 127)
1362 if (facilities
.throughput
< 0x03 ||
1363 facilities
.throughput
> 0xDD)
1365 if (facilities
.reverse
&&
1366 (facilities
.reverse
| 0x81)!= 0x81)
1368 x25
->facilities
= facilities
;
1373 case SIOCX25GDTEFACILITIES
: {
1374 rc
= copy_to_user(argp
, &x25
->dte_facilities
,
1375 sizeof(x25
->dte_facilities
));
1381 case SIOCX25SDTEFACILITIES
: {
1382 struct x25_dte_facilities dtefacs
;
1384 if (copy_from_user(&dtefacs
, argp
, sizeof(dtefacs
)))
1387 if (sk
->sk_state
!= TCP_LISTEN
&&
1388 sk
->sk_state
!= TCP_CLOSE
)
1390 if (dtefacs
.calling_len
> X25_MAX_AE_LEN
)
1392 if (dtefacs
.calling_ae
== NULL
)
1394 if (dtefacs
.called_len
> X25_MAX_AE_LEN
)
1396 if (dtefacs
.called_ae
== NULL
)
1398 x25
->dte_facilities
= dtefacs
;
1403 case SIOCX25GCALLUSERDATA
: {
1404 struct x25_calluserdata cud
= x25
->calluserdata
;
1405 rc
= copy_to_user(argp
, &cud
,
1406 sizeof(cud
)) ? -EFAULT
: 0;
1410 case SIOCX25SCALLUSERDATA
: {
1411 struct x25_calluserdata calluserdata
;
1414 if (copy_from_user(&calluserdata
, argp
,
1415 sizeof(calluserdata
)))
1418 if (calluserdata
.cudlength
> X25_MAX_CUD_LEN
)
1420 x25
->calluserdata
= calluserdata
;
1425 case SIOCX25GCAUSEDIAG
: {
1426 struct x25_causediag causediag
;
1427 causediag
= x25
->causediag
;
1428 rc
= copy_to_user(argp
, &causediag
,
1429 sizeof(causediag
)) ? -EFAULT
: 0;
1433 case SIOCX25SCUDMATCHLEN
: {
1434 struct x25_subaddr sub_addr
;
1436 if(sk
->sk_state
!= TCP_CLOSE
)
1439 if (copy_from_user(&sub_addr
, argp
,
1443 if(sub_addr
.cudmatchlength
> X25_MAX_CUD_LEN
)
1445 x25
->cudmatchlength
= sub_addr
.cudmatchlength
;
1450 case SIOCX25CALLACCPTAPPRV
: {
1452 if (sk
->sk_state
!= TCP_CLOSE
)
1454 x25
->accptapprv
= X25_ALLOW_ACCPT_APPRV
;
1459 case SIOCX25SENDCALLACCPT
: {
1461 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1463 if (x25
->accptapprv
) /* must call accptapprv above */
1465 x25_write_internal(sk
, X25_CALL_ACCEPTED
);
1466 x25
->state
= X25_STATE_3
;
1479 static struct net_proto_family x25_family_ops
= {
1481 .create
= x25_create
,
1482 .owner
= THIS_MODULE
,
1485 #ifdef CONFIG_COMPAT
1486 static int compat_x25_subscr_ioctl(unsigned int cmd
,
1487 struct compat_x25_subscrip_struct __user
*x25_subscr32
)
1489 struct compat_x25_subscrip_struct x25_subscr
;
1490 struct x25_neigh
*nb
;
1491 struct net_device
*dev
;
1495 if (copy_from_user(&x25_subscr
, x25_subscr32
, sizeof(*x25_subscr32
)))
1499 dev
= x25_dev_get(x25_subscr
.device
);
1503 nb
= x25_get_neigh(dev
);
1509 if (cmd
== SIOCX25GSUBSCRIP
) {
1510 x25_subscr
.extended
= nb
->extended
;
1511 x25_subscr
.global_facil_mask
= nb
->global_facil_mask
;
1512 rc
= copy_to_user(x25_subscr32
, &x25_subscr
,
1513 sizeof(*x25_subscr32
)) ? -EFAULT
: 0;
1516 if (x25_subscr
.extended
== 0 || x25_subscr
.extended
== 1) {
1518 nb
->extended
= x25_subscr
.extended
;
1519 nb
->global_facil_mask
= x25_subscr
.global_facil_mask
;
1530 static int compat_x25_ioctl(struct socket
*sock
, unsigned int cmd
,
1533 void __user
*argp
= compat_ptr(arg
);
1534 struct sock
*sk
= sock
->sk
;
1536 int rc
= -ENOIOCTLCMD
;
1541 rc
= x25_ioctl(sock
, cmd
, (unsigned long)argp
);
1546 rc
= compat_sock_get_timestamp(sk
,
1547 (struct timeval __user
*)argp
);
1552 rc
= compat_sock_get_timestampns(sk
,
1553 (struct timespec __user
*)argp
);
1557 case SIOCGIFDSTADDR
:
1558 case SIOCSIFDSTADDR
:
1559 case SIOCGIFBRDADDR
:
1560 case SIOCSIFBRDADDR
:
1561 case SIOCGIFNETMASK
:
1562 case SIOCSIFNETMASK
:
1570 if (!capable(CAP_NET_ADMIN
))
1572 rc
= x25_route_ioctl(cmd
, argp
);
1574 case SIOCX25GSUBSCRIP
:
1575 rc
= compat_x25_subscr_ioctl(cmd
, argp
);
1577 case SIOCX25SSUBSCRIP
:
1579 if (!capable(CAP_NET_ADMIN
))
1581 rc
= compat_x25_subscr_ioctl(cmd
, argp
);
1583 case SIOCX25GFACILITIES
:
1584 case SIOCX25SFACILITIES
:
1585 case SIOCX25GDTEFACILITIES
:
1586 case SIOCX25SDTEFACILITIES
:
1587 case SIOCX25GCALLUSERDATA
:
1588 case SIOCX25SCALLUSERDATA
:
1589 case SIOCX25GCAUSEDIAG
:
1590 case SIOCX25SCUDMATCHLEN
:
1591 case SIOCX25CALLACCPTAPPRV
:
1592 case SIOCX25SENDCALLACCPT
:
1593 rc
= x25_ioctl(sock
, cmd
, (unsigned long)argp
);
1603 static const struct proto_ops
SOCKOPS_WRAPPED(x25_proto_ops
) = {
1605 .owner
= THIS_MODULE
,
1606 .release
= x25_release
,
1608 .connect
= x25_connect
,
1609 .socketpair
= sock_no_socketpair
,
1610 .accept
= x25_accept
,
1611 .getname
= x25_getname
,
1612 .poll
= datagram_poll
,
1614 #ifdef CONFIG_COMPAT
1615 .compat_ioctl
= compat_x25_ioctl
,
1617 .listen
= x25_listen
,
1618 .shutdown
= sock_no_shutdown
,
1619 .setsockopt
= x25_setsockopt
,
1620 .getsockopt
= x25_getsockopt
,
1621 .sendmsg
= x25_sendmsg
,
1622 .recvmsg
= x25_recvmsg
,
1623 .mmap
= sock_no_mmap
,
1624 .sendpage
= sock_no_sendpage
,
1627 SOCKOPS_WRAP(x25_proto
, AF_X25
);
1629 static struct packet_type x25_packet_type __read_mostly
= {
1630 .type
= cpu_to_be16(ETH_P_X25
),
1631 .func
= x25_lapb_receive_frame
,
1634 static struct notifier_block x25_dev_notifier
= {
1635 .notifier_call
= x25_device_event
,
1638 void x25_kill_by_neigh(struct x25_neigh
*nb
)
1641 struct hlist_node
*node
;
1643 write_lock_bh(&x25_list_lock
);
1645 sk_for_each(s
, node
, &x25_list
)
1646 if (x25_sk(s
)->neighbour
== nb
)
1647 x25_disconnect(s
, ENETUNREACH
, 0, 0);
1649 write_unlock_bh(&x25_list_lock
);
1651 /* Remove any related forwards */
1652 x25_clear_forward_by_dev(nb
->dev
);
1655 static int __init
x25_init(void)
1657 int rc
= proto_register(&x25_proto
, 0);
1662 sock_register(&x25_family_ops
);
1664 dev_add_pack(&x25_packet_type
);
1666 register_netdevice_notifier(&x25_dev_notifier
);
1668 printk(KERN_INFO
"X.25 for Linux Version 0.2\n");
1670 #ifdef CONFIG_SYSCTL
1671 x25_register_sysctl();
1677 module_init(x25_init
);
1679 static void __exit
x25_exit(void)
1685 #ifdef CONFIG_SYSCTL
1686 x25_unregister_sysctl();
1689 unregister_netdevice_notifier(&x25_dev_notifier
);
1691 dev_remove_pack(&x25_packet_type
);
1693 sock_unregister(AF_X25
);
1694 proto_unregister(&x25_proto
);
1696 module_exit(x25_exit
);
1698 MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
1699 MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
1700 MODULE_LICENSE("GPL");
1701 MODULE_ALIAS_NETPROTO(PF_X25
);