2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor Centralised disconnect handling.
19 * New timer architecture.
20 * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
21 * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
22 * facilities negotiation and increased
23 * the throughput upper limit.
24 * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
25 * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
26 * Fixed x25_output() related skb leakage.
27 * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
28 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
29 * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
30 * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
31 * x25_proc.c, using seq_file
32 * 2005-04-02 Shaun Pereira Selective sub address matching
34 * 2005-04-15 Shaun Pereira Fast select with no restriction on
38 #include <linux/module.h>
39 #include <linux/capability.h>
40 #include <linux/errno.h>
41 #include <linux/kernel.h>
42 #include <linux/sched.h>
43 #include <linux/smp_lock.h>
44 #include <linux/timer.h>
45 #include <linux/string.h>
46 #include <linux/net.h>
47 #include <linux/netdevice.h>
48 #include <linux/if_arp.h>
49 #include <linux/skbuff.h>
51 #include <net/tcp_states.h>
52 #include <asm/uaccess.h>
53 #include <linux/fcntl.h>
54 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
55 #include <linux/notifier.h>
56 #include <linux/init.h>
57 #include <linux/compat.h>
60 #include <net/compat.h>
62 int sysctl_x25_restart_request_timeout
= X25_DEFAULT_T20
;
63 int sysctl_x25_call_request_timeout
= X25_DEFAULT_T21
;
64 int sysctl_x25_reset_request_timeout
= X25_DEFAULT_T22
;
65 int sysctl_x25_clear_request_timeout
= X25_DEFAULT_T23
;
66 int sysctl_x25_ack_holdback_timeout
= X25_DEFAULT_T2
;
67 int sysctl_x25_forward
= 0;
70 DEFINE_RWLOCK(x25_list_lock
);
72 static const struct proto_ops x25_proto_ops
;
74 static struct x25_address null_x25_address
= {" "};
77 struct compat_x25_subscrip_struct
{
78 char device
[200-sizeof(compat_ulong_t
)];
79 compat_ulong_t global_facil_mask
;
80 compat_uint_t extended
;
84 int x25_addr_ntoa(unsigned char *p
, struct x25_address
*called_addr
,
85 struct x25_address
*calling_addr
)
87 unsigned int called_len
, calling_len
;
88 char *called
, *calling
;
91 called_len
= (*p
>> 0) & 0x0F;
92 calling_len
= (*p
>> 4) & 0x0F;
94 called
= called_addr
->x25_addr
;
95 calling
= calling_addr
->x25_addr
;
98 for (i
= 0; i
< (called_len
+ calling_len
); i
++) {
101 *called
++ = ((*p
>> 0) & 0x0F) + '0';
104 *called
++ = ((*p
>> 4) & 0x0F) + '0';
108 *calling
++ = ((*p
>> 0) & 0x0F) + '0';
111 *calling
++ = ((*p
>> 4) & 0x0F) + '0';
116 *called
= *calling
= '\0';
118 return 1 + (called_len
+ calling_len
+ 1) / 2;
121 int x25_addr_aton(unsigned char *p
, struct x25_address
*called_addr
,
122 struct x25_address
*calling_addr
)
124 unsigned int called_len
, calling_len
;
125 char *called
, *calling
;
128 called
= called_addr
->x25_addr
;
129 calling
= calling_addr
->x25_addr
;
131 called_len
= strlen(called
);
132 calling_len
= strlen(calling
);
134 *p
++ = (calling_len
<< 4) | (called_len
<< 0);
136 for (i
= 0; i
< (called_len
+ calling_len
); i
++) {
137 if (i
< called_len
) {
139 *p
|= (*called
++ - '0') << 0;
143 *p
|= (*called
++ - '0') << 4;
147 *p
|= (*calling
++ - '0') << 0;
151 *p
|= (*calling
++ - '0') << 4;
156 return 1 + (called_len
+ calling_len
+ 1) / 2;
160 * Socket removal during an interrupt is now safe.
162 static void x25_remove_socket(struct sock
*sk
)
164 write_lock_bh(&x25_list_lock
);
165 sk_del_node_init(sk
);
166 write_unlock_bh(&x25_list_lock
);
170 * Kill all bound sockets on a dropped device.
172 static void x25_kill_by_device(struct net_device
*dev
)
175 struct hlist_node
*node
;
177 write_lock_bh(&x25_list_lock
);
179 sk_for_each(s
, node
, &x25_list
)
180 if (x25_sk(s
)->neighbour
&& x25_sk(s
)->neighbour
->dev
== dev
)
181 x25_disconnect(s
, ENETUNREACH
, 0, 0);
183 write_unlock_bh(&x25_list_lock
);
187 * Handle device status changes.
189 static int x25_device_event(struct notifier_block
*this, unsigned long event
,
192 struct net_device
*dev
= ptr
;
193 struct x25_neigh
*nb
;
195 if (!net_eq(dev_net(dev
), &init_net
))
198 if (dev
->type
== ARPHRD_X25
199 #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
200 || dev
->type
== ARPHRD_ETHER
205 x25_link_device_up(dev
);
207 case NETDEV_GOING_DOWN
:
208 nb
= x25_get_neigh(dev
);
210 x25_terminate_link(nb
);
215 x25_kill_by_device(dev
);
216 x25_route_device_down(dev
);
217 x25_link_device_down(dev
);
226 * Add a socket to the bound sockets list.
228 static void x25_insert_socket(struct sock
*sk
)
230 write_lock_bh(&x25_list_lock
);
231 sk_add_node(sk
, &x25_list
);
232 write_unlock_bh(&x25_list_lock
);
236 * Find a socket that wants to accept the Call Request we just
237 * received. Check the full list for an address/cud match.
238 * If no cuds match return the next_best thing, an address match.
239 * Note: if a listening socket has cud set it must only get calls
242 static struct sock
*x25_find_listener(struct x25_address
*addr
,
246 struct sock
*next_best
;
247 struct hlist_node
*node
;
249 read_lock_bh(&x25_list_lock
);
252 sk_for_each(s
, node
, &x25_list
)
253 if ((!strcmp(addr
->x25_addr
,
254 x25_sk(s
)->source_addr
.x25_addr
) ||
255 !strcmp(addr
->x25_addr
,
256 null_x25_address
.x25_addr
)) &&
257 s
->sk_state
== TCP_LISTEN
) {
259 * Found a listening socket, now check the incoming
260 * call user data vs this sockets call user data
262 if(skb
->len
> 0 && x25_sk(s
)->cudmatchlength
> 0) {
263 if((memcmp(x25_sk(s
)->calluserdata
.cuddata
,
265 x25_sk(s
)->cudmatchlength
)) == 0) {
279 read_unlock_bh(&x25_list_lock
);
284 * Find a connected X.25 socket given my LCI and neighbour.
286 static struct sock
*__x25_find_socket(unsigned int lci
, struct x25_neigh
*nb
)
289 struct hlist_node
*node
;
291 sk_for_each(s
, node
, &x25_list
)
292 if (x25_sk(s
)->lci
== lci
&& x25_sk(s
)->neighbour
== nb
) {
301 struct sock
*x25_find_socket(unsigned int lci
, struct x25_neigh
*nb
)
305 read_lock_bh(&x25_list_lock
);
306 s
= __x25_find_socket(lci
, nb
);
307 read_unlock_bh(&x25_list_lock
);
312 * Find a unique LCI for a given device.
314 static unsigned int x25_new_lci(struct x25_neigh
*nb
)
316 unsigned int lci
= 1;
319 read_lock_bh(&x25_list_lock
);
321 while ((sk
= __x25_find_socket(lci
, nb
)) != NULL
) {
329 read_unlock_bh(&x25_list_lock
);
336 static void __x25_destroy_socket(struct sock
*);
339 * handler for deferred kills.
341 static void x25_destroy_timer(unsigned long data
)
343 x25_destroy_socket_from_timer((struct sock
*)data
);
347 * This is called from user mode and the timers. Thus it protects itself
348 * against interrupt users but doesn't worry about being called during
349 * work. Once it is removed from the queue no interrupt or bottom half
350 * will touch it and we are (fairly 8-) ) safe.
351 * Not static as it's used by the timer
353 static void __x25_destroy_socket(struct sock
*sk
)
357 x25_stop_heartbeat(sk
);
360 x25_remove_socket(sk
);
361 x25_clear_queues(sk
); /* Flush the queues */
363 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
364 if (skb
->sk
!= sk
) { /* A pending connection */
366 * Queue the unaccepted socket for death
368 sock_set_flag(skb
->sk
, SOCK_DEAD
);
369 x25_start_heartbeat(skb
->sk
);
370 x25_sk(skb
->sk
)->state
= X25_STATE_0
;
376 if (sk_has_allocations(sk
)) {
377 /* Defer: outstanding buffers */
378 sk
->sk_timer
.expires
= jiffies
+ 10 * HZ
;
379 sk
->sk_timer
.function
= x25_destroy_timer
;
380 sk
->sk_timer
.data
= (unsigned long)sk
;
381 add_timer(&sk
->sk_timer
);
383 /* drop last reference so sock_put will free */
388 void x25_destroy_socket_from_timer(struct sock
*sk
)
392 __x25_destroy_socket(sk
);
397 static void x25_destroy_socket(struct sock
*sk
)
401 __x25_destroy_socket(sk
);
407 * Handling for system calls applied via the various interfaces to a
408 * X.25 socket object.
411 static int x25_setsockopt(struct socket
*sock
, int level
, int optname
,
412 char __user
*optval
, unsigned int optlen
)
415 struct sock
*sk
= sock
->sk
;
416 int rc
= -ENOPROTOOPT
;
419 if (level
!= SOL_X25
|| optname
!= X25_QBITINCL
)
423 if (optlen
< sizeof(int))
427 if (get_user(opt
, (int __user
*)optval
))
430 x25_sk(sk
)->qbitincl
= !!opt
;
437 static int x25_getsockopt(struct socket
*sock
, int level
, int optname
,
438 char __user
*optval
, int __user
*optlen
)
440 struct sock
*sk
= sock
->sk
;
441 int val
, len
, rc
= -ENOPROTOOPT
;
444 if (level
!= SOL_X25
|| optname
!= X25_QBITINCL
)
448 if (get_user(len
, optlen
))
451 len
= min_t(unsigned int, len
, sizeof(int));
458 if (put_user(len
, optlen
))
461 val
= x25_sk(sk
)->qbitincl
;
462 rc
= copy_to_user(optval
, &val
, len
) ? -EFAULT
: 0;
468 static int x25_listen(struct socket
*sock
, int backlog
)
470 struct sock
*sk
= sock
->sk
;
471 int rc
= -EOPNOTSUPP
;
474 if (sk
->sk_state
!= TCP_LISTEN
) {
475 memset(&x25_sk(sk
)->dest_addr
, 0, X25_ADDR_LEN
);
476 sk
->sk_max_ack_backlog
= backlog
;
477 sk
->sk_state
= TCP_LISTEN
;
485 static struct proto x25_proto
= {
487 .owner
= THIS_MODULE
,
488 .obj_size
= sizeof(struct x25_sock
),
491 static struct sock
*x25_alloc_socket(struct net
*net
)
493 struct x25_sock
*x25
;
494 struct sock
*sk
= sk_alloc(net
, AF_X25
, GFP_ATOMIC
, &x25_proto
);
499 sock_init_data(NULL
, sk
);
502 skb_queue_head_init(&x25
->ack_queue
);
503 skb_queue_head_init(&x25
->fragment_queue
);
504 skb_queue_head_init(&x25
->interrupt_in_queue
);
505 skb_queue_head_init(&x25
->interrupt_out_queue
);
510 static int x25_create(struct net
*net
, struct socket
*sock
, int protocol
,
514 struct x25_sock
*x25
;
515 int rc
= -ESOCKTNOSUPPORT
;
517 if (!net_eq(net
, &init_net
))
518 return -EAFNOSUPPORT
;
520 if (sock
->type
!= SOCK_SEQPACKET
|| protocol
)
524 if ((sk
= x25_alloc_socket(net
)) == NULL
)
529 sock_init_data(sock
, sk
);
533 sock
->ops
= &x25_proto_ops
;
534 sk
->sk_protocol
= protocol
;
535 sk
->sk_backlog_rcv
= x25_backlog_rcv
;
537 x25
->t21
= sysctl_x25_call_request_timeout
;
538 x25
->t22
= sysctl_x25_reset_request_timeout
;
539 x25
->t23
= sysctl_x25_clear_request_timeout
;
540 x25
->t2
= sysctl_x25_ack_holdback_timeout
;
541 x25
->state
= X25_STATE_0
;
542 x25
->cudmatchlength
= 0;
543 x25
->accptapprv
= X25_DENY_ACCPT_APPRV
; /* normally no cud */
546 x25
->facilities
.winsize_in
= X25_DEFAULT_WINDOW_SIZE
;
547 x25
->facilities
.winsize_out
= X25_DEFAULT_WINDOW_SIZE
;
548 x25
->facilities
.pacsize_in
= X25_DEFAULT_PACKET_SIZE
;
549 x25
->facilities
.pacsize_out
= X25_DEFAULT_PACKET_SIZE
;
550 x25
->facilities
.throughput
= X25_DEFAULT_THROUGHPUT
;
551 x25
->facilities
.reverse
= X25_DEFAULT_REVERSE
;
552 x25
->dte_facilities
.calling_len
= 0;
553 x25
->dte_facilities
.called_len
= 0;
554 memset(x25
->dte_facilities
.called_ae
, '\0',
555 sizeof(x25
->dte_facilities
.called_ae
));
556 memset(x25
->dte_facilities
.calling_ae
, '\0',
557 sizeof(x25
->dte_facilities
.calling_ae
));
564 static struct sock
*x25_make_new(struct sock
*osk
)
566 struct sock
*sk
= NULL
;
567 struct x25_sock
*x25
, *ox25
;
569 if (osk
->sk_type
!= SOCK_SEQPACKET
)
572 if ((sk
= x25_alloc_socket(sock_net(osk
))) == NULL
)
577 sk
->sk_type
= osk
->sk_type
;
578 sk
->sk_priority
= osk
->sk_priority
;
579 sk
->sk_protocol
= osk
->sk_protocol
;
580 sk
->sk_rcvbuf
= osk
->sk_rcvbuf
;
581 sk
->sk_sndbuf
= osk
->sk_sndbuf
;
582 sk
->sk_state
= TCP_ESTABLISHED
;
583 sk
->sk_backlog_rcv
= osk
->sk_backlog_rcv
;
584 sock_copy_flags(sk
, osk
);
587 x25
->t21
= ox25
->t21
;
588 x25
->t22
= ox25
->t22
;
589 x25
->t23
= ox25
->t23
;
591 x25
->facilities
= ox25
->facilities
;
592 x25
->qbitincl
= ox25
->qbitincl
;
593 x25
->dte_facilities
= ox25
->dte_facilities
;
594 x25
->cudmatchlength
= ox25
->cudmatchlength
;
595 x25
->accptapprv
= ox25
->accptapprv
;
602 static int x25_release(struct socket
*sock
)
604 struct sock
*sk
= sock
->sk
;
605 struct x25_sock
*x25
;
613 switch (x25
->state
) {
617 x25_disconnect(sk
, 0, 0, 0);
618 x25_destroy_socket(sk
);
624 x25_clear_queues(sk
);
625 x25_write_internal(sk
, X25_CLEAR_REQUEST
);
626 x25_start_t23timer(sk
);
627 x25
->state
= X25_STATE_2
;
628 sk
->sk_state
= TCP_CLOSE
;
629 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
630 sk
->sk_state_change(sk
);
631 sock_set_flag(sk
, SOCK_DEAD
);
632 sock_set_flag(sk
, SOCK_DESTROY
);
642 static int x25_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
644 struct sock
*sk
= sock
->sk
;
645 struct sockaddr_x25
*addr
= (struct sockaddr_x25
*)uaddr
;
649 if (!sock_flag(sk
, SOCK_ZAPPED
) ||
650 addr_len
!= sizeof(struct sockaddr_x25
) ||
651 addr
->sx25_family
!= AF_X25
) {
656 x25_sk(sk
)->source_addr
= addr
->sx25_addr
;
657 x25_insert_socket(sk
);
658 sock_reset_flag(sk
, SOCK_ZAPPED
);
659 SOCK_DEBUG(sk
, "x25_bind: socket is bound\n");
665 static int x25_wait_for_connection_establishment(struct sock
*sk
)
667 DECLARE_WAITQUEUE(wait
, current
);
670 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
672 __set_current_state(TASK_INTERRUPTIBLE
);
674 if (signal_pending(current
))
678 sk
->sk_socket
->state
= SS_UNCONNECTED
;
682 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
689 __set_current_state(TASK_RUNNING
);
690 remove_wait_queue(sk
->sk_sleep
, &wait
);
694 static int x25_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
695 int addr_len
, int flags
)
697 struct sock
*sk
= sock
->sk
;
698 struct x25_sock
*x25
= x25_sk(sk
);
699 struct sockaddr_x25
*addr
= (struct sockaddr_x25
*)uaddr
;
700 struct x25_route
*rt
;
705 if (sk
->sk_state
== TCP_ESTABLISHED
&& sock
->state
== SS_CONNECTING
) {
706 sock
->state
= SS_CONNECTED
;
707 goto out
; /* Connect completed during a ERESTARTSYS event */
711 if (sk
->sk_state
== TCP_CLOSE
&& sock
->state
== SS_CONNECTING
) {
712 sock
->state
= SS_UNCONNECTED
;
716 rc
= -EISCONN
; /* No reconnect on a seqpacket socket */
717 if (sk
->sk_state
== TCP_ESTABLISHED
)
720 sk
->sk_state
= TCP_CLOSE
;
721 sock
->state
= SS_UNCONNECTED
;
724 if (addr_len
!= sizeof(struct sockaddr_x25
) ||
725 addr
->sx25_family
!= AF_X25
)
729 rt
= x25_get_route(&addr
->sx25_addr
);
733 x25
->neighbour
= x25_get_neigh(rt
->dev
);
737 x25_limit_facilities(&x25
->facilities
, x25
->neighbour
);
739 x25
->lci
= x25_new_lci(x25
->neighbour
);
744 if (sock_flag(sk
, SOCK_ZAPPED
)) /* Must bind first - autobinding does not work */
747 if (!strcmp(x25
->source_addr
.x25_addr
, null_x25_address
.x25_addr
))
748 memset(&x25
->source_addr
, '\0', X25_ADDR_LEN
);
750 x25
->dest_addr
= addr
->sx25_addr
;
752 /* Move to connecting socket, start sending Connect Requests */
753 sock
->state
= SS_CONNECTING
;
754 sk
->sk_state
= TCP_SYN_SENT
;
756 x25
->state
= X25_STATE_1
;
758 x25_write_internal(sk
, X25_CALL_REQUEST
);
760 x25_start_heartbeat(sk
);
761 x25_start_t21timer(sk
);
765 if (sk
->sk_state
!= TCP_ESTABLISHED
&& (flags
& O_NONBLOCK
))
768 rc
= x25_wait_for_connection_establishment(sk
);
772 sock
->state
= SS_CONNECTED
;
776 x25_neigh_put(x25
->neighbour
);
785 static int x25_wait_for_data(struct sock
*sk
, long timeout
)
787 DECLARE_WAITQUEUE(wait
, current
);
790 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
792 __set_current_state(TASK_INTERRUPTIBLE
);
793 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
796 if (signal_pending(current
))
802 if (skb_queue_empty(&sk
->sk_receive_queue
)) {
804 timeout
= schedule_timeout(timeout
);
809 __set_current_state(TASK_RUNNING
);
810 remove_wait_queue(sk
->sk_sleep
, &wait
);
814 static int x25_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
816 struct sock
*sk
= sock
->sk
;
822 if (!sk
|| sk
->sk_state
!= TCP_LISTEN
)
826 if (sk
->sk_type
!= SOCK_SEQPACKET
)
830 rc
= x25_wait_for_data(sk
, sk
->sk_rcvtimeo
);
833 skb
= skb_dequeue(&sk
->sk_receive_queue
);
838 sock_graft(newsk
, newsock
);
840 /* Now attach up the new socket */
843 sk
->sk_ack_backlog
--;
844 newsock
->state
= SS_CONNECTED
;
853 static int x25_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
854 int *uaddr_len
, int peer
)
856 struct sockaddr_x25
*sx25
= (struct sockaddr_x25
*)uaddr
;
857 struct sock
*sk
= sock
->sk
;
858 struct x25_sock
*x25
= x25_sk(sk
);
863 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
867 sx25
->sx25_addr
= x25
->dest_addr
;
869 sx25
->sx25_addr
= x25
->source_addr
;
871 sx25
->sx25_family
= AF_X25
;
872 *uaddr_len
= sizeof(*sx25
);
879 static unsigned int x25_datagram_poll(struct file
*file
, struct socket
*sock
,
885 rc
= datagram_poll(file
, sock
, wait
);
891 int x25_rx_call_request(struct sk_buff
*skb
, struct x25_neigh
*nb
,
896 struct x25_sock
*makex25
;
897 struct x25_address source_addr
, dest_addr
;
898 struct x25_facilities facilities
;
899 struct x25_dte_facilities dte_facilities
;
900 int len
, addr_len
, rc
;
903 * Remove the LCI and frame type.
905 skb_pull(skb
, X25_STD_MIN_LEN
);
908 * Extract the X.25 addresses and convert them to ASCII strings,
911 addr_len
= x25_addr_ntoa(skb
->data
, &source_addr
, &dest_addr
);
912 skb_pull(skb
, addr_len
);
915 * Get the length of the facilities, skip past them for the moment
916 * get the call user data because this is needed to determine
917 * the correct listener
919 len
= skb
->data
[0] + 1;
923 * Find a listener for the particular address/cud pair.
925 sk
= x25_find_listener(&source_addr
,skb
);
928 if (sk
!= NULL
&& sk_acceptq_is_full(sk
)) {
933 * We dont have any listeners for this incoming call.
937 skb_push(skb
, addr_len
+ X25_STD_MIN_LEN
);
938 if (sysctl_x25_forward
&&
939 x25_forward_call(&dest_addr
, nb
, skb
, lci
) > 0)
941 /* Call was forwarded, dont process it any more */
946 /* No listeners, can't forward, clear the call */
947 goto out_clear_request
;
952 * Try to reach a compromise on the requested facilities.
954 len
= x25_negotiate_facilities(skb
, sk
, &facilities
, &dte_facilities
);
959 * current neighbour/link might impose additional limits
960 * on certain facilties
963 x25_limit_facilities(&facilities
, nb
);
966 * Try to create a new socket.
968 make
= x25_make_new(sk
);
973 * Remove the facilities
978 make
->sk_state
= TCP_ESTABLISHED
;
980 makex25
= x25_sk(make
);
982 makex25
->dest_addr
= dest_addr
;
983 makex25
->source_addr
= source_addr
;
984 makex25
->neighbour
= nb
;
985 makex25
->facilities
= facilities
;
986 makex25
->dte_facilities
= dte_facilities
;
987 makex25
->vc_facil_mask
= x25_sk(sk
)->vc_facil_mask
;
988 /* ensure no reverse facil on accept */
989 makex25
->vc_facil_mask
&= ~X25_MASK_REVERSE
;
990 /* ensure no calling address extension on accept */
991 makex25
->vc_facil_mask
&= ~X25_MASK_CALLING_AE
;
992 makex25
->cudmatchlength
= x25_sk(sk
)->cudmatchlength
;
994 /* Normally all calls are accepted immediatly */
995 if(makex25
->accptapprv
& X25_DENY_ACCPT_APPRV
) {
996 x25_write_internal(make
, X25_CALL_ACCEPTED
);
997 makex25
->state
= X25_STATE_3
;
1001 * Incoming Call User Data.
1003 skb_copy_from_linear_data(skb
, makex25
->calluserdata
.cuddata
, skb
->len
);
1004 makex25
->calluserdata
.cudlength
= skb
->len
;
1006 sk
->sk_ack_backlog
++;
1008 x25_insert_socket(make
);
1010 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1012 x25_start_heartbeat(make
);
1014 if (!sock_flag(sk
, SOCK_DEAD
))
1015 sk
->sk_data_ready(sk
, skb
->len
);
1024 x25_transmit_clear_request(nb
, lci
, 0x01);
1028 static int x25_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1029 struct msghdr
*msg
, size_t len
)
1031 struct sock
*sk
= sock
->sk
;
1032 struct x25_sock
*x25
= x25_sk(sk
);
1033 struct sockaddr_x25
*usx25
= (struct sockaddr_x25
*)msg
->msg_name
;
1034 struct sockaddr_x25 sx25
;
1035 struct sk_buff
*skb
;
1036 unsigned char *asmptr
;
1037 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
1039 int qbit
= 0, rc
= -EINVAL
;
1042 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_OOB
|MSG_EOR
|MSG_CMSG_COMPAT
))
1045 /* we currently don't support segmented records at the user interface */
1046 if (!(msg
->msg_flags
& (MSG_EOR
|MSG_OOB
)))
1049 rc
= -EADDRNOTAVAIL
;
1050 if (sock_flag(sk
, SOCK_ZAPPED
))
1054 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1055 send_sig(SIGPIPE
, current
, 0);
1060 if (!x25
->neighbour
)
1065 if (msg
->msg_namelen
< sizeof(sx25
))
1067 memcpy(&sx25
, usx25
, sizeof(sx25
));
1069 if (strcmp(x25
->dest_addr
.x25_addr
, sx25
.sx25_addr
.x25_addr
))
1072 if (sx25
.sx25_family
!= AF_X25
)
1076 * FIXME 1003.1g - if the socket is like this because
1077 * it has become closed (not started closed) we ought
1078 * to SIGPIPE, EPIPE;
1081 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1084 sx25
.sx25_family
= AF_X25
;
1085 sx25
.sx25_addr
= x25
->dest_addr
;
1088 /* Sanity check the packet size */
1094 SOCK_DEBUG(sk
, "x25_sendmsg: sendto: Addresses built.\n");
1096 /* Build a packet */
1097 SOCK_DEBUG(sk
, "x25_sendmsg: sendto: building packet.\n");
1099 if ((msg
->msg_flags
& MSG_OOB
) && len
> 32)
1102 size
= len
+ X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
;
1104 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
1107 X25_SKB_CB(skb
)->flags
= msg
->msg_flags
;
1109 skb_reserve(skb
, X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
);
1112 * Put the data on the end
1114 SOCK_DEBUG(sk
, "x25_sendmsg: Copying user data\n");
1116 skb_reset_transport_header(skb
);
1119 rc
= memcpy_fromiovec(skb_transport_header(skb
), msg
->msg_iov
, len
);
1124 * If the Q BIT Include socket option is in force, the first
1125 * byte of the user data is the logical value of the Q Bit.
1127 if (x25
->qbitincl
) {
1128 qbit
= skb
->data
[0];
1133 * Push down the X.25 header
1135 SOCK_DEBUG(sk
, "x25_sendmsg: Building X.25 Header.\n");
1137 if (msg
->msg_flags
& MSG_OOB
) {
1138 if (x25
->neighbour
->extended
) {
1139 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1140 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_EXTSEQ
;
1141 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1142 *asmptr
++ = X25_INTERRUPT
;
1144 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1145 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_STDSEQ
;
1146 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1147 *asmptr
++ = X25_INTERRUPT
;
1150 if (x25
->neighbour
->extended
) {
1151 /* Build an Extended X.25 header */
1152 asmptr
= skb_push(skb
, X25_EXT_MIN_LEN
);
1153 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_EXTSEQ
;
1154 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1155 *asmptr
++ = X25_DATA
;
1156 *asmptr
++ = X25_DATA
;
1158 /* Build an Standard X.25 header */
1159 asmptr
= skb_push(skb
, X25_STD_MIN_LEN
);
1160 *asmptr
++ = ((x25
->lci
>> 8) & 0x0F) | X25_GFI_STDSEQ
;
1161 *asmptr
++ = (x25
->lci
>> 0) & 0xFF;
1162 *asmptr
++ = X25_DATA
;
1166 skb
->data
[0] |= X25_Q_BIT
;
1169 SOCK_DEBUG(sk
, "x25_sendmsg: Built header.\n");
1170 SOCK_DEBUG(sk
, "x25_sendmsg: Transmitting buffer\n");
1173 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1176 if (msg
->msg_flags
& MSG_OOB
)
1177 skb_queue_tail(&x25
->interrupt_out_queue
, skb
);
1179 rc
= x25_output(sk
, skb
);
1183 else if (x25
->qbitincl
)
1188 * lock_sock() is currently only used to serialize this x25_kick()
1189 * against input-driven x25_kick() calls. It currently only blocks
1190 * incoming packets for this socket and does not protect against
1191 * any other socket state changes and is not called from anywhere
1192 * else. As x25_kick() cannot block and as long as all socket
1193 * operations are BKL-wrapped, we don't need take to care about
1194 * purging the backlog queue in x25_release().
1196 * Using lock_sock() to protect all socket operations entirely
1197 * (and making the whole x25 stack SMP aware) unfortunately would
1198 * require major changes to {send,recv}msg and skb allocation methods.
1214 static int x25_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1215 struct msghdr
*msg
, size_t size
,
1218 struct sock
*sk
= sock
->sk
;
1219 struct x25_sock
*x25
= x25_sk(sk
);
1220 struct sockaddr_x25
*sx25
= (struct sockaddr_x25
*)msg
->msg_name
;
1223 struct sk_buff
*skb
;
1224 unsigned char *asmptr
;
1229 * This works for seqpacket too. The receiver has ordered the queue for
1230 * us! We do one quick check first though
1232 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1235 if (flags
& MSG_OOB
) {
1237 if (sock_flag(sk
, SOCK_URGINLINE
) ||
1238 !skb_peek(&x25
->interrupt_in_queue
))
1241 skb
= skb_dequeue(&x25
->interrupt_in_queue
);
1243 skb_pull(skb
, X25_STD_MIN_LEN
);
1246 * No Q bit information on Interrupt data.
1248 if (x25
->qbitincl
) {
1249 asmptr
= skb_push(skb
, 1);
1253 msg
->msg_flags
|= MSG_OOB
;
1255 /* Now we can treat all alike */
1256 skb
= skb_recv_datagram(sk
, flags
& ~MSG_DONTWAIT
,
1257 flags
& MSG_DONTWAIT
, &rc
);
1261 qbit
= (skb
->data
[0] & X25_Q_BIT
) == X25_Q_BIT
;
1263 skb_pull(skb
, x25
->neighbour
->extended
?
1264 X25_EXT_MIN_LEN
: X25_STD_MIN_LEN
);
1266 if (x25
->qbitincl
) {
1267 asmptr
= skb_push(skb
, 1);
1272 skb_reset_transport_header(skb
);
1275 if (copied
> size
) {
1277 msg
->msg_flags
|= MSG_TRUNC
;
1280 /* Currently, each datagram always contains a complete record */
1281 msg
->msg_flags
|= MSG_EOR
;
1283 rc
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1285 goto out_free_dgram
;
1288 sx25
->sx25_family
= AF_X25
;
1289 sx25
->sx25_addr
= x25
->dest_addr
;
1292 msg
->msg_namelen
= sizeof(struct sockaddr_x25
);
1299 skb_free_datagram(sk
, skb
);
1306 static int x25_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1308 struct sock
*sk
= sock
->sk
;
1309 struct x25_sock
*x25
= x25_sk(sk
);
1310 void __user
*argp
= (void __user
*)arg
;
1316 int amount
= sk
->sk_sndbuf
- sk_wmem_alloc_get(sk
);
1320 rc
= put_user(amount
, (unsigned int __user
*)argp
);
1325 struct sk_buff
*skb
;
1328 * These two are safe on a single CPU system as
1329 * only user tasks fiddle here
1331 if ((skb
= skb_peek(&sk
->sk_receive_queue
)) != NULL
)
1333 rc
= put_user(amount
, (unsigned int __user
*)argp
);
1340 rc
= sock_get_timestamp(sk
,
1341 (struct timeval __user
*)argp
);
1346 rc
= sock_get_timestampns(sk
,
1347 (struct timespec __user
*)argp
);
1351 case SIOCGIFDSTADDR
:
1352 case SIOCSIFDSTADDR
:
1353 case SIOCGIFBRDADDR
:
1354 case SIOCSIFBRDADDR
:
1355 case SIOCGIFNETMASK
:
1356 case SIOCSIFNETMASK
:
1364 if (!capable(CAP_NET_ADMIN
))
1366 rc
= x25_route_ioctl(cmd
, argp
);
1368 case SIOCX25GSUBSCRIP
:
1369 rc
= x25_subscr_ioctl(cmd
, argp
);
1371 case SIOCX25SSUBSCRIP
:
1373 if (!capable(CAP_NET_ADMIN
))
1375 rc
= x25_subscr_ioctl(cmd
, argp
);
1377 case SIOCX25GFACILITIES
: {
1378 struct x25_facilities fac
= x25
->facilities
;
1379 rc
= copy_to_user(argp
, &fac
,
1380 sizeof(fac
)) ? -EFAULT
: 0;
1384 case SIOCX25SFACILITIES
: {
1385 struct x25_facilities facilities
;
1387 if (copy_from_user(&facilities
, argp
,
1388 sizeof(facilities
)))
1391 if (sk
->sk_state
!= TCP_LISTEN
&&
1392 sk
->sk_state
!= TCP_CLOSE
)
1394 if (facilities
.pacsize_in
< X25_PS16
||
1395 facilities
.pacsize_in
> X25_PS4096
)
1397 if (facilities
.pacsize_out
< X25_PS16
||
1398 facilities
.pacsize_out
> X25_PS4096
)
1400 if (facilities
.winsize_in
< 1 ||
1401 facilities
.winsize_in
> 127)
1403 if (facilities
.throughput
< 0x03 ||
1404 facilities
.throughput
> 0xDD)
1406 if (facilities
.reverse
&&
1407 (facilities
.reverse
& 0x81) != 0x81)
1409 x25
->facilities
= facilities
;
1414 case SIOCX25GDTEFACILITIES
: {
1415 rc
= copy_to_user(argp
, &x25
->dte_facilities
,
1416 sizeof(x25
->dte_facilities
));
1422 case SIOCX25SDTEFACILITIES
: {
1423 struct x25_dte_facilities dtefacs
;
1425 if (copy_from_user(&dtefacs
, argp
, sizeof(dtefacs
)))
1428 if (sk
->sk_state
!= TCP_LISTEN
&&
1429 sk
->sk_state
!= TCP_CLOSE
)
1431 if (dtefacs
.calling_len
> X25_MAX_AE_LEN
)
1433 if (dtefacs
.calling_ae
== NULL
)
1435 if (dtefacs
.called_len
> X25_MAX_AE_LEN
)
1437 if (dtefacs
.called_ae
== NULL
)
1439 x25
->dte_facilities
= dtefacs
;
1444 case SIOCX25GCALLUSERDATA
: {
1445 struct x25_calluserdata cud
= x25
->calluserdata
;
1446 rc
= copy_to_user(argp
, &cud
,
1447 sizeof(cud
)) ? -EFAULT
: 0;
1451 case SIOCX25SCALLUSERDATA
: {
1452 struct x25_calluserdata calluserdata
;
1455 if (copy_from_user(&calluserdata
, argp
,
1456 sizeof(calluserdata
)))
1459 if (calluserdata
.cudlength
> X25_MAX_CUD_LEN
)
1461 x25
->calluserdata
= calluserdata
;
1466 case SIOCX25GCAUSEDIAG
: {
1467 struct x25_causediag causediag
;
1468 causediag
= x25
->causediag
;
1469 rc
= copy_to_user(argp
, &causediag
,
1470 sizeof(causediag
)) ? -EFAULT
: 0;
1474 case SIOCX25SCAUSEDIAG
: {
1475 struct x25_causediag causediag
;
1477 if (copy_from_user(&causediag
, argp
, sizeof(causediag
)))
1479 x25
->causediag
= causediag
;
1485 case SIOCX25SCUDMATCHLEN
: {
1486 struct x25_subaddr sub_addr
;
1488 if(sk
->sk_state
!= TCP_CLOSE
)
1491 if (copy_from_user(&sub_addr
, argp
,
1495 if(sub_addr
.cudmatchlength
> X25_MAX_CUD_LEN
)
1497 x25
->cudmatchlength
= sub_addr
.cudmatchlength
;
1502 case SIOCX25CALLACCPTAPPRV
: {
1504 if (sk
->sk_state
!= TCP_CLOSE
)
1506 x25
->accptapprv
= X25_ALLOW_ACCPT_APPRV
;
1511 case SIOCX25SENDCALLACCPT
: {
1513 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1515 if (x25
->accptapprv
) /* must call accptapprv above */
1517 x25_write_internal(sk
, X25_CALL_ACCEPTED
);
1518 x25
->state
= X25_STATE_3
;
1532 static const struct net_proto_family x25_family_ops
= {
1534 .create
= x25_create
,
1535 .owner
= THIS_MODULE
,
1538 #ifdef CONFIG_COMPAT
1539 static int compat_x25_subscr_ioctl(unsigned int cmd
,
1540 struct compat_x25_subscrip_struct __user
*x25_subscr32
)
1542 struct compat_x25_subscrip_struct x25_subscr
;
1543 struct x25_neigh
*nb
;
1544 struct net_device
*dev
;
1548 if (copy_from_user(&x25_subscr
, x25_subscr32
, sizeof(*x25_subscr32
)))
1552 dev
= x25_dev_get(x25_subscr
.device
);
1556 nb
= x25_get_neigh(dev
);
1562 if (cmd
== SIOCX25GSUBSCRIP
) {
1563 x25_subscr
.extended
= nb
->extended
;
1564 x25_subscr
.global_facil_mask
= nb
->global_facil_mask
;
1565 rc
= copy_to_user(x25_subscr32
, &x25_subscr
,
1566 sizeof(*x25_subscr32
)) ? -EFAULT
: 0;
1569 if (x25_subscr
.extended
== 0 || x25_subscr
.extended
== 1) {
1571 nb
->extended
= x25_subscr
.extended
;
1572 nb
->global_facil_mask
= x25_subscr
.global_facil_mask
;
1583 static int compat_x25_ioctl(struct socket
*sock
, unsigned int cmd
,
1586 void __user
*argp
= compat_ptr(arg
);
1587 struct sock
*sk
= sock
->sk
;
1589 int rc
= -ENOIOCTLCMD
;
1594 rc
= x25_ioctl(sock
, cmd
, (unsigned long)argp
);
1600 rc
= compat_sock_get_timestamp(sk
,
1601 (struct timeval __user
*)argp
);
1608 rc
= compat_sock_get_timestampns(sk
,
1609 (struct timespec __user
*)argp
);
1614 case SIOCGIFDSTADDR
:
1615 case SIOCSIFDSTADDR
:
1616 case SIOCGIFBRDADDR
:
1617 case SIOCSIFBRDADDR
:
1618 case SIOCGIFNETMASK
:
1619 case SIOCSIFNETMASK
:
1627 if (!capable(CAP_NET_ADMIN
))
1630 rc
= x25_route_ioctl(cmd
, argp
);
1633 case SIOCX25GSUBSCRIP
:
1635 rc
= compat_x25_subscr_ioctl(cmd
, argp
);
1638 case SIOCX25SSUBSCRIP
:
1640 if (!capable(CAP_NET_ADMIN
))
1643 rc
= compat_x25_subscr_ioctl(cmd
, argp
);
1646 case SIOCX25GFACILITIES
:
1647 case SIOCX25SFACILITIES
:
1648 case SIOCX25GDTEFACILITIES
:
1649 case SIOCX25SDTEFACILITIES
:
1650 case SIOCX25GCALLUSERDATA
:
1651 case SIOCX25SCALLUSERDATA
:
1652 case SIOCX25GCAUSEDIAG
:
1653 case SIOCX25SCAUSEDIAG
:
1654 case SIOCX25SCUDMATCHLEN
:
1655 case SIOCX25CALLACCPTAPPRV
:
1656 case SIOCX25SENDCALLACCPT
:
1657 rc
= x25_ioctl(sock
, cmd
, (unsigned long)argp
);
1667 static const struct proto_ops x25_proto_ops
= {
1669 .owner
= THIS_MODULE
,
1670 .release
= x25_release
,
1672 .connect
= x25_connect
,
1673 .socketpair
= sock_no_socketpair
,
1674 .accept
= x25_accept
,
1675 .getname
= x25_getname
,
1676 .poll
= x25_datagram_poll
,
1678 #ifdef CONFIG_COMPAT
1679 .compat_ioctl
= compat_x25_ioctl
,
1681 .listen
= x25_listen
,
1682 .shutdown
= sock_no_shutdown
,
1683 .setsockopt
= x25_setsockopt
,
1684 .getsockopt
= x25_getsockopt
,
1685 .sendmsg
= x25_sendmsg
,
1686 .recvmsg
= x25_recvmsg
,
1687 .mmap
= sock_no_mmap
,
1688 .sendpage
= sock_no_sendpage
,
1691 static struct packet_type x25_packet_type __read_mostly
= {
1692 .type
= cpu_to_be16(ETH_P_X25
),
1693 .func
= x25_lapb_receive_frame
,
1696 static struct notifier_block x25_dev_notifier
= {
1697 .notifier_call
= x25_device_event
,
1700 void x25_kill_by_neigh(struct x25_neigh
*nb
)
1703 struct hlist_node
*node
;
1705 write_lock_bh(&x25_list_lock
);
1707 sk_for_each(s
, node
, &x25_list
)
1708 if (x25_sk(s
)->neighbour
== nb
)
1709 x25_disconnect(s
, ENETUNREACH
, 0, 0);
1711 write_unlock_bh(&x25_list_lock
);
1713 /* Remove any related forwards */
1714 x25_clear_forward_by_dev(nb
->dev
);
1717 static int __init
x25_init(void)
1719 int rc
= proto_register(&x25_proto
, 0);
1724 rc
= sock_register(&x25_family_ops
);
1728 dev_add_pack(&x25_packet_type
);
1730 rc
= register_netdevice_notifier(&x25_dev_notifier
);
1734 printk(KERN_INFO
"X.25 for Linux Version 0.2\n");
1736 x25_register_sysctl();
1737 rc
= x25_proc_init();
1743 unregister_netdevice_notifier(&x25_dev_notifier
);
1745 sock_unregister(AF_X25
);
1747 proto_unregister(&x25_proto
);
1750 module_init(x25_init
);
1752 static void __exit
x25_exit(void)
1758 x25_unregister_sysctl();
1760 unregister_netdevice_notifier(&x25_dev_notifier
);
1762 dev_remove_pack(&x25_packet_type
);
1764 sock_unregister(AF_X25
);
1765 proto_unregister(&x25_proto
);
1767 module_exit(x25_exit
);
1769 MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
1770 MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
1771 MODULE_LICENSE("GPL");
1772 MODULE_ALIAS_NETPROTO(PF_X25
);