2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
29 #define CONFIG_IUCV_SOCK_DEBUG 1
34 static char iucv_userid
[80];
36 static struct proto_ops iucv_sock_ops
;
38 static struct proto iucv_proto
= {
41 .obj_size
= sizeof(struct iucv_sock
),
44 /* Call Back functions */
45 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
46 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
47 static void iucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
48 static int iucv_callback_connreq(struct iucv_path
*, u8 ipvmid
[8],
50 static void iucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
52 static struct iucv_sock_list iucv_sk_list
= {
53 .lock
= RW_LOCK_UNLOCKED
,
54 .autobind_name
= ATOMIC_INIT(0)
57 static struct iucv_handler af_iucv_handler
= {
58 .path_pending
= iucv_callback_connreq
,
59 .path_complete
= iucv_callback_connack
,
60 .path_severed
= iucv_callback_connrej
,
61 .message_pending
= iucv_callback_rx
,
62 .message_complete
= iucv_callback_txdone
65 static inline void high_nmcpy(unsigned char *dst
, char *src
)
70 static inline void low_nmcpy(unsigned char *dst
, char *src
)
72 memcpy(&dst
[8], src
, 8);
76 static void iucv_sock_timeout(unsigned long arg
)
78 struct sock
*sk
= (struct sock
*)arg
;
81 sk
->sk_err
= ETIMEDOUT
;
82 sk
->sk_state_change(sk
);
89 static void iucv_sock_clear_timer(struct sock
*sk
)
91 sk_stop_timer(sk
, &sk
->sk_timer
);
94 static void iucv_sock_init_timer(struct sock
*sk
)
96 init_timer(&sk
->sk_timer
);
97 sk
->sk_timer
.function
= iucv_sock_timeout
;
98 sk
->sk_timer
.data
= (unsigned long)sk
;
101 static struct sock
*__iucv_get_sock_by_name(char *nm
)
104 struct hlist_node
*node
;
106 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
107 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
113 static void iucv_sock_destruct(struct sock
*sk
)
115 skb_queue_purge(&sk
->sk_receive_queue
);
116 skb_queue_purge(&sk
->sk_write_queue
);
120 static void iucv_sock_cleanup_listen(struct sock
*parent
)
124 /* Close non-accepted connections */
125 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
130 parent
->sk_state
= IUCV_CLOSED
;
131 sock_set_flag(parent
, SOCK_ZAPPED
);
135 static void iucv_sock_kill(struct sock
*sk
)
137 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
140 iucv_sock_unlink(&iucv_sk_list
, sk
);
141 sock_set_flag(sk
, SOCK_DEAD
);
145 /* Close an IUCV socket */
146 static void iucv_sock_close(struct sock
*sk
)
148 unsigned char user_data
[16];
149 struct iucv_sock
*iucv
= iucv_sk(sk
);
153 iucv_sock_clear_timer(sk
);
156 switch (sk
->sk_state
) {
158 iucv_sock_cleanup_listen(sk
);
165 sk
->sk_state
= IUCV_CLOSING
;
166 sk
->sk_state_change(sk
);
168 if (!skb_queue_empty(&iucv
->send_skb_q
)) {
169 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
170 timeo
= sk
->sk_lingertime
;
172 timeo
= IUCV_DISCONN_TIMEOUT
;
173 err
= iucv_sock_wait_state(sk
, IUCV_CLOSED
, 0, timeo
);
176 sk
->sk_state
= IUCV_CLOSED
;
177 sk
->sk_state_change(sk
);
180 low_nmcpy(user_data
, iucv
->src_name
);
181 high_nmcpy(user_data
, iucv
->dst_name
);
182 ASCEBC(user_data
, sizeof(user_data
));
183 err
= iucv_path_sever(iucv
->path
, user_data
);
184 iucv_path_free(iucv
->path
);
188 sk
->sk_err
= ECONNRESET
;
189 sk
->sk_state_change(sk
);
191 skb_queue_purge(&iucv
->send_skb_q
);
192 skb_queue_purge(&iucv
->backlog_skb_q
);
194 sock_set_flag(sk
, SOCK_ZAPPED
);
198 sock_set_flag(sk
, SOCK_ZAPPED
);
206 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
209 sk
->sk_type
= parent
->sk_type
;
212 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
)
216 sk
= sk_alloc(PF_IUCV
, prio
, &iucv_proto
, 1);
220 sock_init_data(sock
, sk
);
221 INIT_LIST_HEAD(&iucv_sk(sk
)->accept_q
);
222 skb_queue_head_init(&iucv_sk(sk
)->send_skb_q
);
223 skb_queue_head_init(&iucv_sk(sk
)->backlog_skb_q
);
224 iucv_sk(sk
)->send_tag
= 0;
226 sk
->sk_destruct
= iucv_sock_destruct
;
227 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
228 sk
->sk_allocation
= GFP_DMA
;
230 sock_reset_flag(sk
, SOCK_ZAPPED
);
232 sk
->sk_protocol
= proto
;
233 sk
->sk_state
= IUCV_OPEN
;
235 iucv_sock_init_timer(sk
);
237 iucv_sock_link(&iucv_sk_list
, sk
);
241 /* Create an IUCV socket */
242 static int iucv_sock_create(struct socket
*sock
, int protocol
)
246 if (sock
->type
!= SOCK_STREAM
)
247 return -ESOCKTNOSUPPORT
;
249 sock
->state
= SS_UNCONNECTED
;
250 sock
->ops
= &iucv_sock_ops
;
252 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
);
256 iucv_sock_init(sk
, NULL
);
261 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
263 write_lock_bh(&l
->lock
);
264 sk_add_node(sk
, &l
->head
);
265 write_unlock_bh(&l
->lock
);
268 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
270 write_lock_bh(&l
->lock
);
271 sk_del_node_init(sk
);
272 write_unlock_bh(&l
->lock
);
275 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
278 list_add_tail(&iucv_sk(sk
)->accept_q
, &iucv_sk(parent
)->accept_q
);
279 iucv_sk(sk
)->parent
= parent
;
280 parent
->sk_ack_backlog
++;
283 void iucv_accept_unlink(struct sock
*sk
)
285 list_del_init(&iucv_sk(sk
)->accept_q
);
286 iucv_sk(sk
)->parent
->sk_ack_backlog
--;
287 iucv_sk(sk
)->parent
= NULL
;
291 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
293 struct iucv_sock
*isk
, *n
;
296 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
297 sk
= (struct sock
*) isk
;
300 if (sk
->sk_state
== IUCV_CLOSED
) {
302 iucv_accept_unlink(sk
);
306 if (sk
->sk_state
== IUCV_CONNECTED
||
307 sk
->sk_state
== IUCV_SEVERED
||
309 iucv_accept_unlink(sk
);
311 sock_graft(sk
, newsock
);
313 if (sk
->sk_state
== IUCV_SEVERED
)
314 sk
->sk_state
= IUCV_DISCONN
;
325 int iucv_sock_wait_state(struct sock
*sk
, int state
, int state2
,
328 DECLARE_WAITQUEUE(wait
, current
);
331 add_wait_queue(sk
->sk_sleep
, &wait
);
332 while (sk
->sk_state
!= state
&& sk
->sk_state
!= state2
) {
333 set_current_state(TASK_INTERRUPTIBLE
);
340 if (signal_pending(current
)) {
341 err
= sock_intr_errno(timeo
);
346 timeo
= schedule_timeout(timeo
);
349 err
= sock_error(sk
);
353 set_current_state(TASK_RUNNING
);
354 remove_wait_queue(sk
->sk_sleep
, &wait
);
358 /* Bind an unbound socket */
359 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
362 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
363 struct sock
*sk
= sock
->sk
;
364 struct iucv_sock
*iucv
;
367 /* Verify the input sockaddr */
368 if (!addr
|| addr
->sa_family
!= AF_IUCV
)
372 if (sk
->sk_state
!= IUCV_OPEN
) {
377 write_lock_bh(&iucv_sk_list
.lock
);
380 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
389 /* Bind the socket */
390 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
392 /* Copy the user id */
393 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
394 sk
->sk_state
= IUCV_BOUND
;
398 /* Release the socket list lock */
399 write_unlock_bh(&iucv_sk_list
.lock
);
405 /* Automatically bind an unbound socket */
406 static int iucv_sock_autobind(struct sock
*sk
)
408 struct iucv_sock
*iucv
= iucv_sk(sk
);
409 char query_buffer
[80];
413 /* Set the userid and name */
414 cpcmd("QUERY USERID", query_buffer
, sizeof(query_buffer
), &err
);
418 memcpy(iucv
->src_user_id
, query_buffer
, 8);
420 write_lock_bh(&iucv_sk_list
.lock
);
422 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
423 while (__iucv_get_sock_by_name(name
)) {
424 sprintf(name
, "%08x",
425 atomic_inc_return(&iucv_sk_list
.autobind_name
));
428 write_unlock_bh(&iucv_sk_list
.lock
);
430 memcpy(&iucv
->src_name
, name
, 8);
435 /* Connect an unconnected socket */
436 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
439 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
440 struct sock
*sk
= sock
->sk
;
441 struct iucv_sock
*iucv
;
442 unsigned char user_data
[16];
445 if (addr
->sa_family
!= AF_IUCV
|| alen
< sizeof(struct sockaddr_iucv
))
448 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
451 if (sk
->sk_type
!= SOCK_STREAM
)
456 if (sk
->sk_state
== IUCV_OPEN
) {
457 err
= iucv_sock_autobind(sk
);
464 /* Set the destination information */
465 memcpy(iucv_sk(sk
)->dst_user_id
, sa
->siucv_user_id
, 8);
466 memcpy(iucv_sk(sk
)->dst_name
, sa
->siucv_name
, 8);
468 high_nmcpy(user_data
, sa
->siucv_name
);
469 low_nmcpy(user_data
, iucv_sk(sk
)->src_name
);
470 ASCEBC(user_data
, sizeof(user_data
));
474 iucv
->path
= iucv_path_alloc(IUCV_QUEUELEN_DEFAULT
,
475 IPRMDATA
, GFP_KERNEL
);
476 err
= iucv_path_connect(iucv
->path
, &af_iucv_handler
,
477 sa
->siucv_user_id
, NULL
, user_data
, sk
);
479 iucv_path_free(iucv
->path
);
485 if (sk
->sk_state
!= IUCV_CONNECTED
) {
486 err
= iucv_sock_wait_state(sk
, IUCV_CONNECTED
, IUCV_DISCONN
,
487 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
490 if (sk
->sk_state
== IUCV_DISCONN
) {
492 return -ECONNREFUSED
;
499 /* Move a socket into listening state. */
500 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
502 struct sock
*sk
= sock
->sk
;
508 if (sk
->sk_state
!= IUCV_BOUND
|| sock
->type
!= SOCK_STREAM
)
511 sk
->sk_max_ack_backlog
= backlog
;
512 sk
->sk_ack_backlog
= 0;
513 sk
->sk_state
= IUCV_LISTEN
;
521 /* Accept a pending connection */
522 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
525 DECLARE_WAITQUEUE(wait
, current
);
526 struct sock
*sk
= sock
->sk
, *nsk
;
530 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
532 if (sk
->sk_state
!= IUCV_LISTEN
) {
537 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
539 /* Wait for an incoming connection */
540 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
541 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
542 set_current_state(TASK_INTERRUPTIBLE
);
549 timeo
= schedule_timeout(timeo
);
550 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
552 if (sk
->sk_state
!= IUCV_LISTEN
) {
557 if (signal_pending(current
)) {
558 err
= sock_intr_errno(timeo
);
563 set_current_state(TASK_RUNNING
);
564 remove_wait_queue(sk
->sk_sleep
, &wait
);
569 newsock
->state
= SS_CONNECTED
;
576 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
579 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
580 struct sock
*sk
= sock
->sk
;
582 addr
->sa_family
= AF_IUCV
;
583 *len
= sizeof(struct sockaddr_iucv
);
586 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->dst_user_id
, 8);
587 memcpy(siucv
->siucv_name
, &iucv_sk(sk
)->dst_name
, 8);
589 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->src_user_id
, 8);
590 memcpy(siucv
->siucv_name
, iucv_sk(sk
)->src_name
, 8);
592 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
593 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
594 memset(siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
599 static int iucv_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
600 struct msghdr
*msg
, size_t len
)
602 struct sock
*sk
= sock
->sk
;
603 struct iucv_sock
*iucv
= iucv_sk(sk
);
605 struct iucv_message txmsg
;
608 err
= sock_error(sk
);
612 if (msg
->msg_flags
& MSG_OOB
)
617 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
622 if (sk
->sk_state
== IUCV_CONNECTED
) {
623 if (!(skb
= sock_alloc_send_skb(sk
, len
,
624 msg
->msg_flags
& MSG_DONTWAIT
,
628 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
634 txmsg
.tag
= iucv
->send_tag
++;
635 memcpy(skb
->cb
, &txmsg
.tag
, 4);
636 skb_queue_tail(&iucv
->send_skb_q
, skb
);
637 err
= iucv_message_send(iucv
->path
, &txmsg
, 0, 0,
638 (void *) skb
->data
, skb
->len
);
641 printk(KERN_ERR
"AF_IUCV msg limit exceeded\n");
642 skb_unlink(skb
, &iucv
->send_skb_q
);
662 static int iucv_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
663 struct msghdr
*msg
, size_t len
, int flags
)
665 int noblock
= flags
& MSG_DONTWAIT
;
666 struct sock
*sk
= sock
->sk
;
667 struct iucv_sock
*iucv
= iucv_sk(sk
);
668 int target
, copied
= 0;
669 struct sk_buff
*skb
, *rskb
, *cskb
;
672 if ((sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
) &&
673 skb_queue_empty(&iucv
->backlog_skb_q
) &&
674 skb_queue_empty(&sk
->sk_receive_queue
))
677 if (flags
& (MSG_OOB
))
680 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
682 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
684 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
689 copied
= min_t(unsigned int, skb
->len
, len
);
692 if (memcpy_toiovec(msg
->msg_iov
, cskb
->data
, copied
)) {
693 skb_queue_head(&sk
->sk_receive_queue
, skb
);
701 /* Mark read part of skb as used */
702 if (!(flags
& MSG_PEEK
)) {
703 skb_pull(skb
, copied
);
706 skb_queue_head(&sk
->sk_receive_queue
, skb
);
712 /* Queue backlog skbs */
713 rskb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
715 if (sock_queue_rcv_skb(sk
, rskb
)) {
716 skb_queue_head(&iucv_sk(sk
)->backlog_skb_q
,
720 rskb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
724 skb_queue_head(&sk
->sk_receive_queue
, skb
);
727 return err
? : copied
;
730 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
732 struct iucv_sock
*isk
, *n
;
735 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
736 sk
= (struct sock
*) isk
;
738 if (sk
->sk_state
== IUCV_CONNECTED
)
739 return POLLIN
| POLLRDNORM
;
745 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
748 struct sock
*sk
= sock
->sk
;
749 unsigned int mask
= 0;
751 poll_wait(file
, sk
->sk_sleep
, wait
);
753 if (sk
->sk_state
== IUCV_LISTEN
)
754 return iucv_accept_poll(sk
);
756 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
759 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
762 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
765 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
766 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
767 mask
|= POLLIN
| POLLRDNORM
;
769 if (sk
->sk_state
== IUCV_CLOSED
)
772 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
)
775 if (sock_writeable(sk
))
776 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
778 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
783 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
785 struct sock
*sk
= sock
->sk
;
786 struct iucv_sock
*iucv
= iucv_sk(sk
);
787 struct iucv_message txmsg
;
789 u8 prmmsg
[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
793 if ((how
& ~SHUTDOWN_MASK
) || !how
)
797 switch (sk
->sk_state
) {
803 sk
->sk_shutdown
|= how
;
807 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
810 err
= iucv_message_send(iucv
->path
, &txmsg
, IUCV_IPRMDATA
, 0,
827 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
828 err
= iucv_path_quiesce(iucv_sk(sk
)->path
, NULL
);
832 skb_queue_purge(&sk
->sk_receive_queue
);
835 /* Wake up anyone sleeping in poll */
836 sk
->sk_state_change(sk
);
843 static int iucv_sock_release(struct socket
*sock
)
845 struct sock
*sk
= sock
->sk
;
853 /* Unregister with IUCV base support */
854 if (iucv_sk(sk
)->path
) {
855 iucv_path_sever(iucv_sk(sk
)->path
, NULL
);
856 iucv_path_free(iucv_sk(sk
)->path
);
857 iucv_sk(sk
)->path
= NULL
;
865 /* Callback wrappers - called from iucv base support */
866 static int iucv_callback_connreq(struct iucv_path
*path
,
867 u8 ipvmid
[8], u8 ipuser
[16])
869 unsigned char user_data
[16];
870 unsigned char nuser_data
[16];
871 unsigned char src_name
[8];
872 struct hlist_node
*node
;
873 struct sock
*sk
, *nsk
;
874 struct iucv_sock
*iucv
, *niucv
;
877 memcpy(src_name
, ipuser
, 8);
879 /* Find out if this path belongs to af_iucv. */
880 read_lock(&iucv_sk_list
.lock
);
882 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
883 if (sk
->sk_state
== IUCV_LISTEN
&&
884 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
886 * Found a listening socket with
887 * src_name == ipuser[0-7].
892 read_unlock(&iucv_sk_list
.lock
);
894 /* No socket found, not one of our paths. */
899 /* Check if parent socket is listening */
900 low_nmcpy(user_data
, iucv
->src_name
);
901 high_nmcpy(user_data
, iucv
->dst_name
);
902 ASCEBC(user_data
, sizeof(user_data
));
903 if (sk
->sk_state
!= IUCV_LISTEN
) {
904 err
= iucv_path_sever(path
, user_data
);
908 /* Check for backlog size */
909 if (sk_acceptq_is_full(sk
)) {
910 err
= iucv_path_sever(path
, user_data
);
914 /* Create the new socket */
915 nsk
= iucv_sock_alloc(NULL
, SOCK_STREAM
, GFP_ATOMIC
);
917 err
= iucv_path_sever(path
, user_data
);
921 niucv
= iucv_sk(nsk
);
922 iucv_sock_init(nsk
, sk
);
924 /* Set the new iucv_sock */
925 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
926 EBCASC(niucv
->dst_name
, 8);
927 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
928 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
929 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
932 /* Call iucv_accept */
933 high_nmcpy(nuser_data
, ipuser
+ 8);
934 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
935 ASCEBC(nuser_data
+ 8, 8);
937 path
->msglim
= IUCV_QUEUELEN_DEFAULT
;
938 err
= iucv_path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
940 err
= iucv_path_sever(path
, user_data
);
944 iucv_accept_enqueue(sk
, nsk
);
947 nsk
->sk_state
= IUCV_CONNECTED
;
948 sk
->sk_data_ready(sk
, 1);
955 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
957 struct sock
*sk
= path
->private;
959 sk
->sk_state
= IUCV_CONNECTED
;
960 sk
->sk_state_change(sk
);
963 static int iucv_fragment_skb(struct sock
*sk
, struct sk_buff
*skb
, int len
,
964 struct sk_buff_head
*fragmented_skb_q
)
966 int dataleft
, size
, copied
= 0;
967 struct sk_buff
*nskb
;
971 if (dataleft
>= sk
->sk_rcvbuf
/ 4)
972 size
= sk
->sk_rcvbuf
/ 4;
976 nskb
= alloc_skb(size
, GFP_ATOMIC
| GFP_DMA
);
980 memcpy(nskb
->data
, skb
->data
+ copied
, size
);
984 skb_reset_transport_header(nskb
);
985 skb_reset_network_header(nskb
);
988 skb_queue_tail(fragmented_skb_q
, nskb
);
994 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
996 struct sock
*sk
= path
->private;
997 struct iucv_sock
*iucv
= iucv_sk(sk
);
998 struct sk_buff
*skb
, *fskb
;
999 struct sk_buff_head fragmented_skb_q
;
1002 skb_queue_head_init(&fragmented_skb_q
);
1004 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1007 skb
= alloc_skb(msg
->length
, GFP_ATOMIC
| GFP_DMA
);
1009 iucv_path_sever(path
, NULL
);
1013 if (msg
->flags
& IPRMDATA
) {
1017 rc
= iucv_message_receive(path
, msg
, 0, skb
->data
,
1023 if (skb
->truesize
>= sk
->sk_rcvbuf
/ 4) {
1024 rc
= iucv_fragment_skb(sk
, skb
, msg
->length
,
1029 iucv_path_sever(path
, NULL
);
1033 skb_reset_transport_header(skb
);
1034 skb_reset_network_header(skb
);
1035 skb
->len
= msg
->length
;
1038 /* Queue the fragmented skb */
1039 fskb
= skb_dequeue(&fragmented_skb_q
);
1041 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1042 skb_queue_tail(&iucv
->backlog_skb_q
, fskb
);
1043 else if (sock_queue_rcv_skb(sk
, fskb
))
1044 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, fskb
);
1045 fskb
= skb_dequeue(&fragmented_skb_q
);
1048 /* Queue the original skb if it exists (was not fragmented) */
1050 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1051 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1052 else if (sock_queue_rcv_skb(sk
, skb
))
1053 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1058 static void iucv_callback_txdone(struct iucv_path
*path
,
1059 struct iucv_message
*msg
)
1061 struct sock
*sk
= path
->private;
1062 struct sk_buff
*this;
1063 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1064 struct sk_buff
*list_skb
= list
->next
;
1065 unsigned long flags
;
1068 spin_lock_irqsave(&list
->lock
, flags
);
1072 list_skb
= list_skb
->next
;
1073 } while (memcmp(&msg
->tag
, this->cb
, 4) && list_skb
);
1075 spin_unlock_irqrestore(&list
->lock
, flags
);
1077 skb_unlink(this, &iucv_sk(sk
)->send_skb_q
);
1081 if (sk
->sk_state
== IUCV_CLOSING
) {
1082 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1083 sk
->sk_state
= IUCV_CLOSED
;
1084 sk
->sk_state_change(sk
);
1090 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1092 struct sock
*sk
= path
->private;
1094 if (!list_empty(&iucv_sk(sk
)->accept_q
))
1095 sk
->sk_state
= IUCV_SEVERED
;
1097 sk
->sk_state
= IUCV_DISCONN
;
1099 sk
->sk_state_change(sk
);
1102 static struct proto_ops iucv_sock_ops
= {
1104 .owner
= THIS_MODULE
,
1105 .release
= iucv_sock_release
,
1106 .bind
= iucv_sock_bind
,
1107 .connect
= iucv_sock_connect
,
1108 .listen
= iucv_sock_listen
,
1109 .accept
= iucv_sock_accept
,
1110 .getname
= iucv_sock_getname
,
1111 .sendmsg
= iucv_sock_sendmsg
,
1112 .recvmsg
= iucv_sock_recvmsg
,
1113 .poll
= iucv_sock_poll
,
1114 .ioctl
= sock_no_ioctl
,
1115 .mmap
= sock_no_mmap
,
1116 .socketpair
= sock_no_socketpair
,
1117 .shutdown
= iucv_sock_shutdown
,
1118 .setsockopt
= sock_no_setsockopt
,
1119 .getsockopt
= sock_no_getsockopt
1122 static struct net_proto_family iucv_sock_family_ops
= {
1124 .owner
= THIS_MODULE
,
1125 .create
= iucv_sock_create
,
1128 static int __init
afiucv_init(void)
1132 if (!MACHINE_IS_VM
) {
1133 printk(KERN_ERR
"AF_IUCV connection needs VM as base\n");
1134 err
= -EPROTONOSUPPORT
;
1137 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
1138 if (unlikely(err
)) {
1139 printk(KERN_ERR
"AF_IUCV needs the VM userid\n");
1140 err
= -EPROTONOSUPPORT
;
1144 err
= iucv_register(&af_iucv_handler
, 0);
1147 err
= proto_register(&iucv_proto
, 0);
1150 err
= sock_register(&iucv_sock_family_ops
);
1153 printk(KERN_INFO
"AF_IUCV lowlevel driver initialized\n");
1157 proto_unregister(&iucv_proto
);
1159 iucv_unregister(&af_iucv_handler
, 0);
1164 static void __exit
afiucv_exit(void)
1166 sock_unregister(PF_IUCV
);
1167 proto_unregister(&iucv_proto
);
1168 iucv_unregister(&af_iucv_handler
, 0);
1170 printk(KERN_INFO
"AF_IUCV lowlevel driver unloaded\n");
1173 module_init(afiucv_init
);
1174 module_exit(afiucv_exit
);
1176 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1177 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
1178 MODULE_VERSION(VERSION
);
1179 MODULE_LICENSE("GPL");
1180 MODULE_ALIAS_NETPROTO(PF_IUCV
);