2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #define KMSG_COMPONENT "af_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
25 #include <asm/ebcdic.h>
26 #include <asm/cpcmd.h>
27 #include <linux/kmod.h>
29 #include <net/iucv/iucv.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid
[80];
36 static struct proto_ops iucv_sock_ops
;
38 static struct proto iucv_proto
= {
41 .obj_size
= sizeof(struct iucv_sock
),
44 /* special AF_IUCV IPRM messages */
45 static const u8 iprm_shutdown
[8] =
46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
48 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
50 /* macros to set/get socket control buffer at correct offset */
51 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
52 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
53 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
57 static void iucv_sock_kill(struct sock
*sk
);
58 static void iucv_sock_close(struct sock
*sk
);
60 /* Call Back functions */
61 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
62 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
63 static void iucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
64 static int iucv_callback_connreq(struct iucv_path
*, u8 ipvmid
[8],
66 static void iucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
67 static void iucv_callback_shutdown(struct iucv_path
*, u8 ipuser
[16]);
69 static struct iucv_sock_list iucv_sk_list
= {
70 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
71 .autobind_name
= ATOMIC_INIT(0)
74 static struct iucv_handler af_iucv_handler
= {
75 .path_pending
= iucv_callback_connreq
,
76 .path_complete
= iucv_callback_connack
,
77 .path_severed
= iucv_callback_connrej
,
78 .message_pending
= iucv_callback_rx
,
79 .message_complete
= iucv_callback_txdone
,
80 .path_quiesced
= iucv_callback_shutdown
,
83 static inline void high_nmcpy(unsigned char *dst
, char *src
)
88 static inline void low_nmcpy(unsigned char *dst
, char *src
)
90 memcpy(&dst
[8], src
, 8);
94 * iucv_msg_length() - Returns the length of an iucv message.
95 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
97 * The function returns the length of the specified iucv message @msg of data
98 * stored in a buffer and of data stored in the parameter list (PRMDATA).
100 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
102 * PRMDATA[0..6] socket data (max 7 bytes);
103 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
105 * The socket data length is computed by substracting the socket data length
107 * If the socket data len is greater 7, then PRMDATA can be used for special
108 * notifications (see iucv_sock_shutdown); and further,
109 * if the socket data len is > 7, the function returns 8.
111 * Use this function to allocate socket buffers to store iucv message data.
113 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
117 if (msg
->flags
& IUCV_IPRMDATA
) {
118 datalen
= 0xff - msg
->rmmsg
[7];
119 return (datalen
< 8) ? datalen
: 8;
125 static void iucv_sock_timeout(unsigned long arg
)
127 struct sock
*sk
= (struct sock
*)arg
;
130 sk
->sk_err
= ETIMEDOUT
;
131 sk
->sk_state_change(sk
);
138 static void iucv_sock_clear_timer(struct sock
*sk
)
140 sk_stop_timer(sk
, &sk
->sk_timer
);
143 static struct sock
*__iucv_get_sock_by_name(char *nm
)
146 struct hlist_node
*node
;
148 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
149 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
155 static void iucv_sock_destruct(struct sock
*sk
)
157 skb_queue_purge(&sk
->sk_receive_queue
);
158 skb_queue_purge(&sk
->sk_write_queue
);
162 static void iucv_sock_cleanup_listen(struct sock
*parent
)
166 /* Close non-accepted connections */
167 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
172 parent
->sk_state
= IUCV_CLOSED
;
173 sock_set_flag(parent
, SOCK_ZAPPED
);
177 static void iucv_sock_kill(struct sock
*sk
)
179 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
182 iucv_sock_unlink(&iucv_sk_list
, sk
);
183 sock_set_flag(sk
, SOCK_DEAD
);
187 /* Close an IUCV socket */
188 static void iucv_sock_close(struct sock
*sk
)
190 unsigned char user_data
[16];
191 struct iucv_sock
*iucv
= iucv_sk(sk
);
195 iucv_sock_clear_timer(sk
);
198 switch (sk
->sk_state
) {
200 iucv_sock_cleanup_listen(sk
);
207 sk
->sk_state
= IUCV_CLOSING
;
208 sk
->sk_state_change(sk
);
210 if (!skb_queue_empty(&iucv
->send_skb_q
)) {
211 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
212 timeo
= sk
->sk_lingertime
;
214 timeo
= IUCV_DISCONN_TIMEOUT
;
215 err
= iucv_sock_wait_state(sk
, IUCV_CLOSED
, 0, timeo
);
218 case IUCV_CLOSING
: /* fall through */
219 sk
->sk_state
= IUCV_CLOSED
;
220 sk
->sk_state_change(sk
);
223 low_nmcpy(user_data
, iucv
->src_name
);
224 high_nmcpy(user_data
, iucv
->dst_name
);
225 ASCEBC(user_data
, sizeof(user_data
));
226 err
= iucv_path_sever(iucv
->path
, user_data
);
227 iucv_path_free(iucv
->path
);
231 sk
->sk_err
= ECONNRESET
;
232 sk
->sk_state_change(sk
);
234 skb_queue_purge(&iucv
->send_skb_q
);
235 skb_queue_purge(&iucv
->backlog_skb_q
);
237 sock_set_flag(sk
, SOCK_ZAPPED
);
241 sock_set_flag(sk
, SOCK_ZAPPED
);
249 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
252 sk
->sk_type
= parent
->sk_type
;
255 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
)
259 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
);
263 sock_init_data(sock
, sk
);
264 INIT_LIST_HEAD(&iucv_sk(sk
)->accept_q
);
265 spin_lock_init(&iucv_sk(sk
)->accept_q_lock
);
266 skb_queue_head_init(&iucv_sk(sk
)->send_skb_q
);
267 INIT_LIST_HEAD(&iucv_sk(sk
)->message_q
.list
);
268 spin_lock_init(&iucv_sk(sk
)->message_q
.lock
);
269 skb_queue_head_init(&iucv_sk(sk
)->backlog_skb_q
);
270 iucv_sk(sk
)->send_tag
= 0;
271 iucv_sk(sk
)->flags
= 0;
272 iucv_sk(sk
)->msglimit
= IUCV_QUEUELEN_DEFAULT
;
273 iucv_sk(sk
)->path
= NULL
;
274 memset(&iucv_sk(sk
)->src_user_id
, 0, 32);
276 sk
->sk_destruct
= iucv_sock_destruct
;
277 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
278 sk
->sk_allocation
= GFP_DMA
;
280 sock_reset_flag(sk
, SOCK_ZAPPED
);
282 sk
->sk_protocol
= proto
;
283 sk
->sk_state
= IUCV_OPEN
;
285 setup_timer(&sk
->sk_timer
, iucv_sock_timeout
, (unsigned long)sk
);
287 iucv_sock_link(&iucv_sk_list
, sk
);
291 /* Create an IUCV socket */
292 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
)
296 if (protocol
&& protocol
!= PF_IUCV
)
297 return -EPROTONOSUPPORT
;
299 sock
->state
= SS_UNCONNECTED
;
301 switch (sock
->type
) {
303 sock
->ops
= &iucv_sock_ops
;
306 /* currently, proto ops can handle both sk types */
307 sock
->ops
= &iucv_sock_ops
;
310 return -ESOCKTNOSUPPORT
;
313 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
);
317 iucv_sock_init(sk
, NULL
);
322 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
324 write_lock_bh(&l
->lock
);
325 sk_add_node(sk
, &l
->head
);
326 write_unlock_bh(&l
->lock
);
329 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
331 write_lock_bh(&l
->lock
);
332 sk_del_node_init(sk
);
333 write_unlock_bh(&l
->lock
);
336 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
339 struct iucv_sock
*par
= iucv_sk(parent
);
342 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
343 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
344 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
345 iucv_sk(sk
)->parent
= parent
;
346 parent
->sk_ack_backlog
++;
349 void iucv_accept_unlink(struct sock
*sk
)
352 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
354 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
355 list_del_init(&iucv_sk(sk
)->accept_q
);
356 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
357 iucv_sk(sk
)->parent
->sk_ack_backlog
--;
358 iucv_sk(sk
)->parent
= NULL
;
362 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
364 struct iucv_sock
*isk
, *n
;
367 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
368 sk
= (struct sock
*) isk
;
371 if (sk
->sk_state
== IUCV_CLOSED
) {
372 iucv_accept_unlink(sk
);
377 if (sk
->sk_state
== IUCV_CONNECTED
||
378 sk
->sk_state
== IUCV_SEVERED
||
380 iucv_accept_unlink(sk
);
382 sock_graft(sk
, newsock
);
384 if (sk
->sk_state
== IUCV_SEVERED
)
385 sk
->sk_state
= IUCV_DISCONN
;
396 int iucv_sock_wait_state(struct sock
*sk
, int state
, int state2
,
399 DECLARE_WAITQUEUE(wait
, current
);
402 add_wait_queue(sk
->sk_sleep
, &wait
);
403 while (sk
->sk_state
!= state
&& sk
->sk_state
!= state2
) {
404 set_current_state(TASK_INTERRUPTIBLE
);
411 if (signal_pending(current
)) {
412 err
= sock_intr_errno(timeo
);
417 timeo
= schedule_timeout(timeo
);
420 err
= sock_error(sk
);
424 set_current_state(TASK_RUNNING
);
425 remove_wait_queue(sk
->sk_sleep
, &wait
);
429 /* Bind an unbound socket */
430 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
433 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
434 struct sock
*sk
= sock
->sk
;
435 struct iucv_sock
*iucv
;
438 /* Verify the input sockaddr */
439 if (!addr
|| addr
->sa_family
!= AF_IUCV
)
443 if (sk
->sk_state
!= IUCV_OPEN
) {
448 write_lock_bh(&iucv_sk_list
.lock
);
451 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
460 /* Bind the socket */
461 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
463 /* Copy the user id */
464 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
465 sk
->sk_state
= IUCV_BOUND
;
469 /* Release the socket list lock */
470 write_unlock_bh(&iucv_sk_list
.lock
);
476 /* Automatically bind an unbound socket */
477 static int iucv_sock_autobind(struct sock
*sk
)
479 struct iucv_sock
*iucv
= iucv_sk(sk
);
480 char query_buffer
[80];
484 /* Set the userid and name */
485 cpcmd("QUERY USERID", query_buffer
, sizeof(query_buffer
), &err
);
489 memcpy(iucv
->src_user_id
, query_buffer
, 8);
491 write_lock_bh(&iucv_sk_list
.lock
);
493 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
494 while (__iucv_get_sock_by_name(name
)) {
495 sprintf(name
, "%08x",
496 atomic_inc_return(&iucv_sk_list
.autobind_name
));
499 write_unlock_bh(&iucv_sk_list
.lock
);
501 memcpy(&iucv
->src_name
, name
, 8);
506 /* Connect an unconnected socket */
507 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
510 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
511 struct sock
*sk
= sock
->sk
;
512 struct iucv_sock
*iucv
;
513 unsigned char user_data
[16];
516 if (addr
->sa_family
!= AF_IUCV
|| alen
< sizeof(struct sockaddr_iucv
))
519 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
522 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
525 if (sk
->sk_state
== IUCV_OPEN
) {
526 err
= iucv_sock_autobind(sk
);
533 /* Set the destination information */
534 memcpy(iucv_sk(sk
)->dst_user_id
, sa
->siucv_user_id
, 8);
535 memcpy(iucv_sk(sk
)->dst_name
, sa
->siucv_name
, 8);
537 high_nmcpy(user_data
, sa
->siucv_name
);
538 low_nmcpy(user_data
, iucv_sk(sk
)->src_name
);
539 ASCEBC(user_data
, sizeof(user_data
));
543 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
544 IUCV_IPRMDATA
, GFP_KERNEL
);
549 err
= iucv_path_connect(iucv
->path
, &af_iucv_handler
,
550 sa
->siucv_user_id
, NULL
, user_data
, sk
);
552 iucv_path_free(iucv
->path
);
555 case 0x0b: /* Target communicator is not logged on */
558 case 0x0d: /* Max connections for this guest exceeded */
559 case 0x0e: /* Max connections for target guest exceeded */
562 case 0x0f: /* Missing IUCV authorization */
572 if (sk
->sk_state
!= IUCV_CONNECTED
) {
573 err
= iucv_sock_wait_state(sk
, IUCV_CONNECTED
, IUCV_DISCONN
,
574 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
577 if (sk
->sk_state
== IUCV_DISCONN
) {
582 iucv_path_sever(iucv
->path
, NULL
);
583 iucv_path_free(iucv
->path
);
592 /* Move a socket into listening state. */
593 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
595 struct sock
*sk
= sock
->sk
;
601 if (sk
->sk_state
!= IUCV_BOUND
)
604 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
607 sk
->sk_max_ack_backlog
= backlog
;
608 sk
->sk_ack_backlog
= 0;
609 sk
->sk_state
= IUCV_LISTEN
;
617 /* Accept a pending connection */
618 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
621 DECLARE_WAITQUEUE(wait
, current
);
622 struct sock
*sk
= sock
->sk
, *nsk
;
626 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
628 if (sk
->sk_state
!= IUCV_LISTEN
) {
633 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
635 /* Wait for an incoming connection */
636 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
637 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
638 set_current_state(TASK_INTERRUPTIBLE
);
645 timeo
= schedule_timeout(timeo
);
646 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
648 if (sk
->sk_state
!= IUCV_LISTEN
) {
653 if (signal_pending(current
)) {
654 err
= sock_intr_errno(timeo
);
659 set_current_state(TASK_RUNNING
);
660 remove_wait_queue(sk
->sk_sleep
, &wait
);
665 newsock
->state
= SS_CONNECTED
;
672 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
675 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
676 struct sock
*sk
= sock
->sk
;
678 addr
->sa_family
= AF_IUCV
;
679 *len
= sizeof(struct sockaddr_iucv
);
682 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->dst_user_id
, 8);
683 memcpy(siucv
->siucv_name
, &iucv_sk(sk
)->dst_name
, 8);
685 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->src_user_id
, 8);
686 memcpy(siucv
->siucv_name
, iucv_sk(sk
)->src_name
, 8);
688 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
689 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
690 memset(siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
696 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
698 * @msg: Pointer to a struct iucv_message
699 * @skb: The socket data to send, skb->len MUST BE <= 7
701 * Send the socket data in the parameter list in the iucv message
702 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
703 * list and the socket data len at index 7 (last byte).
704 * See also iucv_msg_length().
706 * Returns the error code from the iucv_message_send() call.
708 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
713 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
714 prmdata
[7] = 0xff - (u8
) skb
->len
;
715 return iucv_message_send(path
, msg
, IUCV_IPRMDATA
, 0,
716 (void *) prmdata
, 8);
719 static int iucv_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
720 struct msghdr
*msg
, size_t len
)
722 struct sock
*sk
= sock
->sk
;
723 struct iucv_sock
*iucv
= iucv_sk(sk
);
725 struct iucv_message txmsg
;
726 struct cmsghdr
*cmsg
;
732 err
= sock_error(sk
);
736 if (msg
->msg_flags
& MSG_OOB
)
739 /* SOCK_SEQPACKET: we do not support segmented records */
740 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
745 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
750 if (sk
->sk_state
== IUCV_CONNECTED
) {
751 /* initialize defaults */
752 cmsg_done
= 0; /* check for duplicate headers */
755 /* iterate over control messages */
756 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
;
757 cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
759 if (!CMSG_OK(msg
, cmsg
)) {
764 if (cmsg
->cmsg_level
!= SOL_IUCV
)
767 if (cmsg
->cmsg_type
& cmsg_done
) {
771 cmsg_done
|= cmsg
->cmsg_type
;
773 switch (cmsg
->cmsg_type
) {
774 case SCM_IUCV_TRGCLS
:
775 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
780 /* set iucv message target class */
782 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
793 /* allocate one skb for each iucv message:
794 * this is fine for SOCK_SEQPACKET (unless we want to support
795 * segmented records using the MSG_EOR flag), but
796 * for SOCK_STREAM we might want to improve it in future */
797 if (!(skb
= sock_alloc_send_skb(sk
, len
,
798 msg
->msg_flags
& MSG_DONTWAIT
,
802 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
807 /* increment and save iucv message tag for msg_completion cbk */
808 txmsg
.tag
= iucv
->send_tag
++;
809 memcpy(CB_TAG(skb
), &txmsg
.tag
, CB_TAG_LEN
);
810 skb_queue_tail(&iucv
->send_skb_q
, skb
);
812 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
)
814 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
816 /* on success: there is no message_complete callback
817 * for an IPRMDATA msg; remove skb from send queue */
819 skb_unlink(skb
, &iucv
->send_skb_q
);
823 /* this error should never happen since the
824 * IUCV_IPRMDATA path flag is set... sever path */
826 iucv_path_sever(iucv
->path
, NULL
);
827 skb_unlink(skb
, &iucv
->send_skb_q
);
832 err
= iucv_message_send(iucv
->path
, &txmsg
, 0, 0,
833 (void *) skb
->data
, skb
->len
);
837 memcpy(user_id
, iucv
->dst_user_id
, 8);
839 memcpy(appl_id
, iucv
->dst_name
, 8);
840 pr_err("Application %s on z/VM guest %s"
841 " exceeds message limit\n",
844 skb_unlink(skb
, &iucv
->send_skb_q
);
864 static int iucv_fragment_skb(struct sock
*sk
, struct sk_buff
*skb
, int len
)
866 int dataleft
, size
, copied
= 0;
867 struct sk_buff
*nskb
;
871 if (dataleft
>= sk
->sk_rcvbuf
/ 4)
872 size
= sk
->sk_rcvbuf
/ 4;
876 nskb
= alloc_skb(size
, GFP_ATOMIC
| GFP_DMA
);
880 /* copy target class to control buffer of new skb */
881 memcpy(CB_TRGCLS(nskb
), CB_TRGCLS(skb
), CB_TRGCLS_LEN
);
883 /* copy data fragment */
884 memcpy(nskb
->data
, skb
->data
+ copied
, size
);
888 skb_reset_transport_header(nskb
);
889 skb_reset_network_header(nskb
);
892 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, nskb
);
898 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
899 struct iucv_path
*path
,
900 struct iucv_message
*msg
)
905 len
= iucv_msg_length(msg
);
907 /* store msg target class in the second 4 bytes of skb ctrl buffer */
908 /* Note: the first 4 bytes are reserved for msg tag */
909 memcpy(CB_TRGCLS(skb
), &msg
->class, CB_TRGCLS_LEN
);
911 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
912 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
913 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
918 rc
= iucv_message_receive(path
, msg
, msg
->flags
& IUCV_IPRMDATA
,
919 skb
->data
, len
, NULL
);
924 /* we need to fragment iucv messages for SOCK_STREAM only;
925 * for SOCK_SEQPACKET, it is only relevant if we support
926 * record segmentation using MSG_EOR (see also recvmsg()) */
927 if (sk
->sk_type
== SOCK_STREAM
&&
928 skb
->truesize
>= sk
->sk_rcvbuf
/ 4) {
929 rc
= iucv_fragment_skb(sk
, skb
, len
);
933 iucv_path_sever(path
, NULL
);
936 skb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
938 skb_reset_transport_header(skb
);
939 skb_reset_network_header(skb
);
944 if (sock_queue_rcv_skb(sk
, skb
))
945 skb_queue_head(&iucv_sk(sk
)->backlog_skb_q
, skb
);
948 static void iucv_process_message_q(struct sock
*sk
)
950 struct iucv_sock
*iucv
= iucv_sk(sk
);
952 struct sock_msg_q
*p
, *n
;
954 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
955 skb
= alloc_skb(iucv_msg_length(&p
->msg
), GFP_ATOMIC
| GFP_DMA
);
958 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
961 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
966 static int iucv_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
967 struct msghdr
*msg
, size_t len
, int flags
)
969 int noblock
= flags
& MSG_DONTWAIT
;
970 struct sock
*sk
= sock
->sk
;
971 struct iucv_sock
*iucv
= iucv_sk(sk
);
972 unsigned int copied
, rlen
;
973 struct sk_buff
*skb
, *rskb
, *cskb
;
976 if ((sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
) &&
977 skb_queue_empty(&iucv
->backlog_skb_q
) &&
978 skb_queue_empty(&sk
->sk_receive_queue
) &&
979 list_empty(&iucv
->message_q
.list
))
982 if (flags
& (MSG_OOB
))
985 /* receive/dequeue next skb:
986 * the function understands MSG_PEEK and, thus, does not dequeue skb */
987 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
989 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
994 rlen
= skb
->len
; /* real length of skb */
995 copied
= min_t(unsigned int, rlen
, len
);
998 if (memcpy_toiovec(msg
->msg_iov
, cskb
->data
, copied
)) {
999 if (!(flags
& MSG_PEEK
))
1000 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1004 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1005 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1007 msg
->msg_flags
|= MSG_TRUNC
;
1008 /* each iucv message contains a complete record */
1009 msg
->msg_flags
|= MSG_EOR
;
1012 /* create control message to store iucv msg target class:
1013 * get the trgcls from the control buffer of the skb due to
1014 * fragmentation of original iucv message. */
1015 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1016 CB_TRGCLS_LEN
, CB_TRGCLS(skb
));
1018 if (!(flags
& MSG_PEEK
))
1019 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1023 /* Mark read part of skb as used */
1024 if (!(flags
& MSG_PEEK
)) {
1026 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1027 if (sk
->sk_type
== SOCK_STREAM
) {
1028 skb_pull(skb
, copied
);
1030 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1037 /* Queue backlog skbs */
1038 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1040 if (sock_queue_rcv_skb(sk
, rskb
)) {
1041 skb_queue_head(&iucv
->backlog_skb_q
,
1045 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1048 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1049 spin_lock_bh(&iucv
->message_q
.lock
);
1050 if (!list_empty(&iucv
->message_q
.list
))
1051 iucv_process_message_q(sk
);
1052 spin_unlock_bh(&iucv
->message_q
.lock
);
1057 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1058 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1064 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
1066 struct iucv_sock
*isk
, *n
;
1069 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1070 sk
= (struct sock
*) isk
;
1072 if (sk
->sk_state
== IUCV_CONNECTED
)
1073 return POLLIN
| POLLRDNORM
;
1079 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1082 struct sock
*sk
= sock
->sk
;
1083 unsigned int mask
= 0;
1085 poll_wait(file
, sk
->sk_sleep
, wait
);
1087 if (sk
->sk_state
== IUCV_LISTEN
)
1088 return iucv_accept_poll(sk
);
1090 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1093 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1096 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1099 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1100 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1101 mask
|= POLLIN
| POLLRDNORM
;
1103 if (sk
->sk_state
== IUCV_CLOSED
)
1106 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
)
1109 if (sock_writeable(sk
))
1110 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
1112 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
1117 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1119 struct sock
*sk
= sock
->sk
;
1120 struct iucv_sock
*iucv
= iucv_sk(sk
);
1121 struct iucv_message txmsg
;
1126 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1130 switch (sk
->sk_state
) {
1139 sk
->sk_shutdown
|= how
;
1143 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1146 err
= iucv_message_send(iucv
->path
, &txmsg
, IUCV_IPRMDATA
, 0,
1147 (void *) iprm_shutdown
, 8);
1163 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1164 err
= iucv_path_quiesce(iucv_sk(sk
)->path
, NULL
);
1168 skb_queue_purge(&sk
->sk_receive_queue
);
1171 /* Wake up anyone sleeping in poll */
1172 sk
->sk_state_change(sk
);
1179 static int iucv_sock_release(struct socket
*sock
)
1181 struct sock
*sk
= sock
->sk
;
1187 iucv_sock_close(sk
);
1189 /* Unregister with IUCV base support */
1190 if (iucv_sk(sk
)->path
) {
1191 iucv_path_sever(iucv_sk(sk
)->path
, NULL
);
1192 iucv_path_free(iucv_sk(sk
)->path
);
1193 iucv_sk(sk
)->path
= NULL
;
1201 /* getsockopt and setsockopt */
1202 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1203 char __user
*optval
, int optlen
)
1205 struct sock
*sk
= sock
->sk
;
1206 struct iucv_sock
*iucv
= iucv_sk(sk
);
1210 if (level
!= SOL_IUCV
)
1211 return -ENOPROTOOPT
;
1213 if (optlen
< sizeof(int))
1216 if (get_user(val
, (int __user
*) optval
))
1223 case SO_IPRMDATA_MSG
:
1225 iucv
->flags
|= IUCV_IPRMDATA
;
1227 iucv
->flags
&= ~IUCV_IPRMDATA
;
1230 switch (sk
->sk_state
) {
1233 if (val
< 1 || val
> (u16
)(~0))
1236 iucv
->msglimit
= val
;
1252 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1253 char __user
*optval
, int __user
*optlen
)
1255 struct sock
*sk
= sock
->sk
;
1256 struct iucv_sock
*iucv
= iucv_sk(sk
);
1259 if (level
!= SOL_IUCV
)
1260 return -ENOPROTOOPT
;
1262 if (get_user(len
, optlen
))
1268 len
= min_t(unsigned int, len
, sizeof(int));
1271 case SO_IPRMDATA_MSG
:
1272 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1276 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1277 : iucv
->msglimit
; /* default */
1281 return -ENOPROTOOPT
;
1284 if (put_user(len
, optlen
))
1286 if (copy_to_user(optval
, &val
, len
))
1293 /* Callback wrappers - called from iucv base support */
1294 static int iucv_callback_connreq(struct iucv_path
*path
,
1295 u8 ipvmid
[8], u8 ipuser
[16])
1297 unsigned char user_data
[16];
1298 unsigned char nuser_data
[16];
1299 unsigned char src_name
[8];
1300 struct hlist_node
*node
;
1301 struct sock
*sk
, *nsk
;
1302 struct iucv_sock
*iucv
, *niucv
;
1305 memcpy(src_name
, ipuser
, 8);
1306 EBCASC(src_name
, 8);
1307 /* Find out if this path belongs to af_iucv. */
1308 read_lock(&iucv_sk_list
.lock
);
1311 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
1312 if (sk
->sk_state
== IUCV_LISTEN
&&
1313 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1315 * Found a listening socket with
1316 * src_name == ipuser[0-7].
1321 read_unlock(&iucv_sk_list
.lock
);
1323 /* No socket found, not one of our paths. */
1328 /* Check if parent socket is listening */
1329 low_nmcpy(user_data
, iucv
->src_name
);
1330 high_nmcpy(user_data
, iucv
->dst_name
);
1331 ASCEBC(user_data
, sizeof(user_data
));
1332 if (sk
->sk_state
!= IUCV_LISTEN
) {
1333 err
= iucv_path_sever(path
, user_data
);
1334 iucv_path_free(path
);
1338 /* Check for backlog size */
1339 if (sk_acceptq_is_full(sk
)) {
1340 err
= iucv_path_sever(path
, user_data
);
1341 iucv_path_free(path
);
1345 /* Create the new socket */
1346 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
);
1348 err
= iucv_path_sever(path
, user_data
);
1349 iucv_path_free(path
);
1353 niucv
= iucv_sk(nsk
);
1354 iucv_sock_init(nsk
, sk
);
1356 /* Set the new iucv_sock */
1357 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1358 EBCASC(niucv
->dst_name
, 8);
1359 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1360 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1361 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1364 /* Call iucv_accept */
1365 high_nmcpy(nuser_data
, ipuser
+ 8);
1366 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1367 ASCEBC(nuser_data
+ 8, 8);
1369 /* set message limit for path based on msglimit of accepting socket */
1370 niucv
->msglimit
= iucv
->msglimit
;
1371 path
->msglim
= iucv
->msglimit
;
1372 err
= iucv_path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1374 err
= iucv_path_sever(path
, user_data
);
1375 iucv_path_free(path
);
1376 iucv_sock_kill(nsk
);
1380 iucv_accept_enqueue(sk
, nsk
);
1382 /* Wake up accept */
1383 nsk
->sk_state
= IUCV_CONNECTED
;
1384 sk
->sk_data_ready(sk
, 1);
1391 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1393 struct sock
*sk
= path
->private;
1395 sk
->sk_state
= IUCV_CONNECTED
;
1396 sk
->sk_state_change(sk
);
1399 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1401 struct sock
*sk
= path
->private;
1402 struct iucv_sock
*iucv
= iucv_sk(sk
);
1403 struct sk_buff
*skb
;
1404 struct sock_msg_q
*save_msg
;
1407 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1408 iucv_message_reject(path
, msg
);
1412 spin_lock(&iucv
->message_q
.lock
);
1414 if (!list_empty(&iucv
->message_q
.list
) ||
1415 !skb_queue_empty(&iucv
->backlog_skb_q
))
1418 len
= atomic_read(&sk
->sk_rmem_alloc
);
1419 len
+= iucv_msg_length(msg
) + sizeof(struct sk_buff
);
1420 if (len
> sk
->sk_rcvbuf
)
1423 skb
= alloc_skb(iucv_msg_length(msg
), GFP_ATOMIC
| GFP_DMA
);
1427 iucv_process_message(sk
, skb
, path
, msg
);
1431 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1434 save_msg
->path
= path
;
1435 save_msg
->msg
= *msg
;
1437 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1440 spin_unlock(&iucv
->message_q
.lock
);
1443 static void iucv_callback_txdone(struct iucv_path
*path
,
1444 struct iucv_message
*msg
)
1446 struct sock
*sk
= path
->private;
1447 struct sk_buff
*this = NULL
;
1448 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1449 struct sk_buff
*list_skb
= list
->next
;
1450 unsigned long flags
;
1452 if (!skb_queue_empty(list
)) {
1453 spin_lock_irqsave(&list
->lock
, flags
);
1455 while (list_skb
!= (struct sk_buff
*)list
) {
1456 if (!memcmp(&msg
->tag
, CB_TAG(list_skb
), CB_TAG_LEN
)) {
1460 list_skb
= list_skb
->next
;
1463 __skb_unlink(this, list
);
1465 spin_unlock_irqrestore(&list
->lock
, flags
);
1471 if (sk
->sk_state
== IUCV_CLOSING
) {
1472 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1473 sk
->sk_state
= IUCV_CLOSED
;
1474 sk
->sk_state_change(sk
);
1480 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1482 struct sock
*sk
= path
->private;
1484 if (!list_empty(&iucv_sk(sk
)->accept_q
))
1485 sk
->sk_state
= IUCV_SEVERED
;
1487 sk
->sk_state
= IUCV_DISCONN
;
1489 sk
->sk_state_change(sk
);
1492 /* called if the other communication side shuts down its RECV direction;
1493 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1495 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1497 struct sock
*sk
= path
->private;
1500 if (sk
->sk_state
!= IUCV_CLOSED
) {
1501 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1502 sk
->sk_state_change(sk
);
1507 static struct proto_ops iucv_sock_ops
= {
1509 .owner
= THIS_MODULE
,
1510 .release
= iucv_sock_release
,
1511 .bind
= iucv_sock_bind
,
1512 .connect
= iucv_sock_connect
,
1513 .listen
= iucv_sock_listen
,
1514 .accept
= iucv_sock_accept
,
1515 .getname
= iucv_sock_getname
,
1516 .sendmsg
= iucv_sock_sendmsg
,
1517 .recvmsg
= iucv_sock_recvmsg
,
1518 .poll
= iucv_sock_poll
,
1519 .ioctl
= sock_no_ioctl
,
1520 .mmap
= sock_no_mmap
,
1521 .socketpair
= sock_no_socketpair
,
1522 .shutdown
= iucv_sock_shutdown
,
1523 .setsockopt
= iucv_sock_setsockopt
,
1524 .getsockopt
= iucv_sock_getsockopt
,
1527 static struct net_proto_family iucv_sock_family_ops
= {
1529 .owner
= THIS_MODULE
,
1530 .create
= iucv_sock_create
,
1533 static int __init
afiucv_init(void)
1537 if (!MACHINE_IS_VM
) {
1538 pr_err("The af_iucv module cannot be loaded"
1540 err
= -EPROTONOSUPPORT
;
1543 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
1544 if (unlikely(err
)) {
1546 err
= -EPROTONOSUPPORT
;
1550 err
= iucv_register(&af_iucv_handler
, 0);
1553 err
= proto_register(&iucv_proto
, 0);
1556 err
= sock_register(&iucv_sock_family_ops
);
1562 proto_unregister(&iucv_proto
);
1564 iucv_unregister(&af_iucv_handler
, 0);
1569 static void __exit
afiucv_exit(void)
1571 sock_unregister(PF_IUCV
);
1572 proto_unregister(&iucv_proto
);
1573 iucv_unregister(&af_iucv_handler
, 0);
1576 module_init(afiucv_init
);
1577 module_exit(afiucv_exit
);
1579 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1580 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
1581 MODULE_VERSION(VERSION
);
1582 MODULE_LICENSE("GPL");
1583 MODULE_ALIAS_NETPROTO(PF_IUCV
);