2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/iucv.h>
31 #include <net/iucv/af_iucv.h>
35 static char iucv_userid
[80];
37 static struct proto_ops iucv_sock_ops
;
39 static struct proto iucv_proto
= {
42 .obj_size
= sizeof(struct iucv_sock
),
45 /* special AF_IUCV IPRM messages */
46 static const u8 iprm_shutdown
[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 /* macros to set/get socket control buffer at correct offset */
52 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
53 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
54 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
58 static void iucv_sock_kill(struct sock
*sk
);
59 static void iucv_sock_close(struct sock
*sk
);
61 /* Call Back functions */
62 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
63 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
64 static void iucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
65 static int iucv_callback_connreq(struct iucv_path
*, u8 ipvmid
[8],
67 static void iucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
68 static void iucv_callback_shutdown(struct iucv_path
*, u8 ipuser
[16]);
70 static struct iucv_sock_list iucv_sk_list
= {
71 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
72 .autobind_name
= ATOMIC_INIT(0)
75 static struct iucv_handler af_iucv_handler
= {
76 .path_pending
= iucv_callback_connreq
,
77 .path_complete
= iucv_callback_connack
,
78 .path_severed
= iucv_callback_connrej
,
79 .message_pending
= iucv_callback_rx
,
80 .message_complete
= iucv_callback_txdone
,
81 .path_quiesced
= iucv_callback_shutdown
,
84 static inline void high_nmcpy(unsigned char *dst
, char *src
)
89 static inline void low_nmcpy(unsigned char *dst
, char *src
)
91 memcpy(&dst
[8], src
, 8);
94 static int afiucv_pm_prepare(struct device
*dev
)
96 #ifdef CONFIG_PM_DEBUG
97 printk(KERN_WARNING
"afiucv_pm_prepare\n");
102 static void afiucv_pm_complete(struct device
*dev
)
104 #ifdef CONFIG_PM_DEBUG
105 printk(KERN_WARNING
"afiucv_pm_complete\n");
111 * afiucv_pm_freeze() - Freeze PM callback
112 * @dev: AFIUCV dummy device
114 * Sever all established IUCV communication pathes
116 static int afiucv_pm_freeze(struct device
*dev
)
118 struct iucv_sock
*iucv
;
120 struct hlist_node
*node
;
123 #ifdef CONFIG_PM_DEBUG
124 printk(KERN_WARNING
"afiucv_pm_freeze\n");
126 read_lock(&iucv_sk_list
.lock
);
127 sk_for_each(sk
, node
, &iucv_sk_list
.head
) {
129 skb_queue_purge(&iucv
->send_skb_q
);
130 skb_queue_purge(&iucv
->backlog_skb_q
);
131 switch (sk
->sk_state
) {
137 err
= iucv_path_sever(iucv
->path
, NULL
);
138 iucv_path_free(iucv
->path
);
150 read_unlock(&iucv_sk_list
.lock
);
155 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
156 * @dev: AFIUCV dummy device
158 * socket clean up after freeze
160 static int afiucv_pm_restore_thaw(struct device
*dev
)
162 struct iucv_sock
*iucv
;
164 struct hlist_node
*node
;
166 #ifdef CONFIG_PM_DEBUG
167 printk(KERN_WARNING
"afiucv_pm_restore_thaw\n");
169 read_lock(&iucv_sk_list
.lock
);
170 sk_for_each(sk
, node
, &iucv_sk_list
.head
) {
172 switch (sk
->sk_state
) {
175 sk
->sk_state
= IUCV_DISCONN
;
176 sk
->sk_state_change(sk
);
188 read_unlock(&iucv_sk_list
.lock
);
192 static struct dev_pm_ops afiucv_pm_ops
= {
193 .prepare
= afiucv_pm_prepare
,
194 .complete
= afiucv_pm_complete
,
195 .freeze
= afiucv_pm_freeze
,
196 .thaw
= afiucv_pm_restore_thaw
,
197 .restore
= afiucv_pm_restore_thaw
,
200 static struct device_driver af_iucv_driver
= {
201 .owner
= THIS_MODULE
,
204 .pm
= &afiucv_pm_ops
,
207 /* dummy device used as trigger for PM functions */
208 static struct device
*af_iucv_dev
;
211 * iucv_msg_length() - Returns the length of an iucv message.
212 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
214 * The function returns the length of the specified iucv message @msg of data
215 * stored in a buffer and of data stored in the parameter list (PRMDATA).
217 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
219 * PRMDATA[0..6] socket data (max 7 bytes);
220 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
222 * The socket data length is computed by substracting the socket data length
224 * If the socket data len is greater 7, then PRMDATA can be used for special
225 * notifications (see iucv_sock_shutdown); and further,
226 * if the socket data len is > 7, the function returns 8.
228 * Use this function to allocate socket buffers to store iucv message data.
230 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
234 if (msg
->flags
& IUCV_IPRMDATA
) {
235 datalen
= 0xff - msg
->rmmsg
[7];
236 return (datalen
< 8) ? datalen
: 8;
242 static void iucv_sock_timeout(unsigned long arg
)
244 struct sock
*sk
= (struct sock
*)arg
;
247 sk
->sk_err
= ETIMEDOUT
;
248 sk
->sk_state_change(sk
);
255 static void iucv_sock_clear_timer(struct sock
*sk
)
257 sk_stop_timer(sk
, &sk
->sk_timer
);
260 static struct sock
*__iucv_get_sock_by_name(char *nm
)
263 struct hlist_node
*node
;
265 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
266 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
272 static void iucv_sock_destruct(struct sock
*sk
)
274 skb_queue_purge(&sk
->sk_receive_queue
);
275 skb_queue_purge(&sk
->sk_write_queue
);
279 static void iucv_sock_cleanup_listen(struct sock
*parent
)
283 /* Close non-accepted connections */
284 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
289 parent
->sk_state
= IUCV_CLOSED
;
290 sock_set_flag(parent
, SOCK_ZAPPED
);
294 static void iucv_sock_kill(struct sock
*sk
)
296 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
299 iucv_sock_unlink(&iucv_sk_list
, sk
);
300 sock_set_flag(sk
, SOCK_DEAD
);
304 /* Close an IUCV socket */
305 static void iucv_sock_close(struct sock
*sk
)
307 unsigned char user_data
[16];
308 struct iucv_sock
*iucv
= iucv_sk(sk
);
312 iucv_sock_clear_timer(sk
);
315 switch (sk
->sk_state
) {
317 iucv_sock_cleanup_listen(sk
);
324 sk
->sk_state
= IUCV_CLOSING
;
325 sk
->sk_state_change(sk
);
327 if (!skb_queue_empty(&iucv
->send_skb_q
)) {
328 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
329 timeo
= sk
->sk_lingertime
;
331 timeo
= IUCV_DISCONN_TIMEOUT
;
332 err
= iucv_sock_wait_state(sk
, IUCV_CLOSED
, 0, timeo
);
335 case IUCV_CLOSING
: /* fall through */
336 sk
->sk_state
= IUCV_CLOSED
;
337 sk
->sk_state_change(sk
);
340 low_nmcpy(user_data
, iucv
->src_name
);
341 high_nmcpy(user_data
, iucv
->dst_name
);
342 ASCEBC(user_data
, sizeof(user_data
));
343 err
= iucv_path_sever(iucv
->path
, user_data
);
344 iucv_path_free(iucv
->path
);
348 sk
->sk_err
= ECONNRESET
;
349 sk
->sk_state_change(sk
);
351 skb_queue_purge(&iucv
->send_skb_q
);
352 skb_queue_purge(&iucv
->backlog_skb_q
);
354 sock_set_flag(sk
, SOCK_ZAPPED
);
358 sock_set_flag(sk
, SOCK_ZAPPED
);
366 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
369 sk
->sk_type
= parent
->sk_type
;
372 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
)
376 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
);
380 sock_init_data(sock
, sk
);
381 INIT_LIST_HEAD(&iucv_sk(sk
)->accept_q
);
382 spin_lock_init(&iucv_sk(sk
)->accept_q_lock
);
383 skb_queue_head_init(&iucv_sk(sk
)->send_skb_q
);
384 INIT_LIST_HEAD(&iucv_sk(sk
)->message_q
.list
);
385 spin_lock_init(&iucv_sk(sk
)->message_q
.lock
);
386 skb_queue_head_init(&iucv_sk(sk
)->backlog_skb_q
);
387 iucv_sk(sk
)->send_tag
= 0;
388 iucv_sk(sk
)->flags
= 0;
389 iucv_sk(sk
)->msglimit
= IUCV_QUEUELEN_DEFAULT
;
390 iucv_sk(sk
)->path
= NULL
;
391 memset(&iucv_sk(sk
)->src_user_id
, 0, 32);
393 sk
->sk_destruct
= iucv_sock_destruct
;
394 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
395 sk
->sk_allocation
= GFP_DMA
;
397 sock_reset_flag(sk
, SOCK_ZAPPED
);
399 sk
->sk_protocol
= proto
;
400 sk
->sk_state
= IUCV_OPEN
;
402 setup_timer(&sk
->sk_timer
, iucv_sock_timeout
, (unsigned long)sk
);
404 iucv_sock_link(&iucv_sk_list
, sk
);
408 /* Create an IUCV socket */
409 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
)
413 if (protocol
&& protocol
!= PF_IUCV
)
414 return -EPROTONOSUPPORT
;
416 sock
->state
= SS_UNCONNECTED
;
418 switch (sock
->type
) {
420 sock
->ops
= &iucv_sock_ops
;
423 /* currently, proto ops can handle both sk types */
424 sock
->ops
= &iucv_sock_ops
;
427 return -ESOCKTNOSUPPORT
;
430 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
);
434 iucv_sock_init(sk
, NULL
);
439 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
441 write_lock_bh(&l
->lock
);
442 sk_add_node(sk
, &l
->head
);
443 write_unlock_bh(&l
->lock
);
446 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
448 write_lock_bh(&l
->lock
);
449 sk_del_node_init(sk
);
450 write_unlock_bh(&l
->lock
);
453 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
456 struct iucv_sock
*par
= iucv_sk(parent
);
459 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
460 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
461 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
462 iucv_sk(sk
)->parent
= parent
;
463 parent
->sk_ack_backlog
++;
466 void iucv_accept_unlink(struct sock
*sk
)
469 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
471 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
472 list_del_init(&iucv_sk(sk
)->accept_q
);
473 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
474 iucv_sk(sk
)->parent
->sk_ack_backlog
--;
475 iucv_sk(sk
)->parent
= NULL
;
479 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
481 struct iucv_sock
*isk
, *n
;
484 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
485 sk
= (struct sock
*) isk
;
488 if (sk
->sk_state
== IUCV_CLOSED
) {
489 iucv_accept_unlink(sk
);
494 if (sk
->sk_state
== IUCV_CONNECTED
||
495 sk
->sk_state
== IUCV_SEVERED
||
497 iucv_accept_unlink(sk
);
499 sock_graft(sk
, newsock
);
501 if (sk
->sk_state
== IUCV_SEVERED
)
502 sk
->sk_state
= IUCV_DISCONN
;
513 int iucv_sock_wait_state(struct sock
*sk
, int state
, int state2
,
516 DECLARE_WAITQUEUE(wait
, current
);
519 add_wait_queue(sk
->sk_sleep
, &wait
);
520 while (sk
->sk_state
!= state
&& sk
->sk_state
!= state2
) {
521 set_current_state(TASK_INTERRUPTIBLE
);
528 if (signal_pending(current
)) {
529 err
= sock_intr_errno(timeo
);
534 timeo
= schedule_timeout(timeo
);
537 err
= sock_error(sk
);
541 set_current_state(TASK_RUNNING
);
542 remove_wait_queue(sk
->sk_sleep
, &wait
);
546 /* Bind an unbound socket */
547 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
550 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
551 struct sock
*sk
= sock
->sk
;
552 struct iucv_sock
*iucv
;
555 /* Verify the input sockaddr */
556 if (!addr
|| addr
->sa_family
!= AF_IUCV
)
560 if (sk
->sk_state
!= IUCV_OPEN
) {
565 write_lock_bh(&iucv_sk_list
.lock
);
568 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
577 /* Bind the socket */
578 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
580 /* Copy the user id */
581 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
582 sk
->sk_state
= IUCV_BOUND
;
586 /* Release the socket list lock */
587 write_unlock_bh(&iucv_sk_list
.lock
);
593 /* Automatically bind an unbound socket */
594 static int iucv_sock_autobind(struct sock
*sk
)
596 struct iucv_sock
*iucv
= iucv_sk(sk
);
597 char query_buffer
[80];
601 /* Set the userid and name */
602 cpcmd("QUERY USERID", query_buffer
, sizeof(query_buffer
), &err
);
606 memcpy(iucv
->src_user_id
, query_buffer
, 8);
608 write_lock_bh(&iucv_sk_list
.lock
);
610 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
611 while (__iucv_get_sock_by_name(name
)) {
612 sprintf(name
, "%08x",
613 atomic_inc_return(&iucv_sk_list
.autobind_name
));
616 write_unlock_bh(&iucv_sk_list
.lock
);
618 memcpy(&iucv
->src_name
, name
, 8);
623 /* Connect an unconnected socket */
624 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
627 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
628 struct sock
*sk
= sock
->sk
;
629 struct iucv_sock
*iucv
;
630 unsigned char user_data
[16];
633 if (addr
->sa_family
!= AF_IUCV
|| alen
< sizeof(struct sockaddr_iucv
))
636 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
639 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
642 if (sk
->sk_state
== IUCV_OPEN
) {
643 err
= iucv_sock_autobind(sk
);
650 /* Set the destination information */
651 memcpy(iucv_sk(sk
)->dst_user_id
, sa
->siucv_user_id
, 8);
652 memcpy(iucv_sk(sk
)->dst_name
, sa
->siucv_name
, 8);
654 high_nmcpy(user_data
, sa
->siucv_name
);
655 low_nmcpy(user_data
, iucv_sk(sk
)->src_name
);
656 ASCEBC(user_data
, sizeof(user_data
));
660 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
661 IUCV_IPRMDATA
, GFP_KERNEL
);
666 err
= iucv_path_connect(iucv
->path
, &af_iucv_handler
,
667 sa
->siucv_user_id
, NULL
, user_data
, sk
);
669 iucv_path_free(iucv
->path
);
672 case 0x0b: /* Target communicator is not logged on */
675 case 0x0d: /* Max connections for this guest exceeded */
676 case 0x0e: /* Max connections for target guest exceeded */
679 case 0x0f: /* Missing IUCV authorization */
689 if (sk
->sk_state
!= IUCV_CONNECTED
) {
690 err
= iucv_sock_wait_state(sk
, IUCV_CONNECTED
, IUCV_DISCONN
,
691 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
694 if (sk
->sk_state
== IUCV_DISCONN
) {
699 iucv_path_sever(iucv
->path
, NULL
);
700 iucv_path_free(iucv
->path
);
709 /* Move a socket into listening state. */
710 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
712 struct sock
*sk
= sock
->sk
;
718 if (sk
->sk_state
!= IUCV_BOUND
)
721 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
724 sk
->sk_max_ack_backlog
= backlog
;
725 sk
->sk_ack_backlog
= 0;
726 sk
->sk_state
= IUCV_LISTEN
;
734 /* Accept a pending connection */
735 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
738 DECLARE_WAITQUEUE(wait
, current
);
739 struct sock
*sk
= sock
->sk
, *nsk
;
743 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
745 if (sk
->sk_state
!= IUCV_LISTEN
) {
750 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
752 /* Wait for an incoming connection */
753 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
754 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
755 set_current_state(TASK_INTERRUPTIBLE
);
762 timeo
= schedule_timeout(timeo
);
763 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
765 if (sk
->sk_state
!= IUCV_LISTEN
) {
770 if (signal_pending(current
)) {
771 err
= sock_intr_errno(timeo
);
776 set_current_state(TASK_RUNNING
);
777 remove_wait_queue(sk
->sk_sleep
, &wait
);
782 newsock
->state
= SS_CONNECTED
;
789 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
792 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
793 struct sock
*sk
= sock
->sk
;
795 addr
->sa_family
= AF_IUCV
;
796 *len
= sizeof(struct sockaddr_iucv
);
799 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->dst_user_id
, 8);
800 memcpy(siucv
->siucv_name
, &iucv_sk(sk
)->dst_name
, 8);
802 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->src_user_id
, 8);
803 memcpy(siucv
->siucv_name
, iucv_sk(sk
)->src_name
, 8);
805 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
806 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
807 memset(siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
813 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
815 * @msg: Pointer to a struct iucv_message
816 * @skb: The socket data to send, skb->len MUST BE <= 7
818 * Send the socket data in the parameter list in the iucv message
819 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
820 * list and the socket data len at index 7 (last byte).
821 * See also iucv_msg_length().
823 * Returns the error code from the iucv_message_send() call.
825 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
830 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
831 prmdata
[7] = 0xff - (u8
) skb
->len
;
832 return iucv_message_send(path
, msg
, IUCV_IPRMDATA
, 0,
833 (void *) prmdata
, 8);
836 static int iucv_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
837 struct msghdr
*msg
, size_t len
)
839 struct sock
*sk
= sock
->sk
;
840 struct iucv_sock
*iucv
= iucv_sk(sk
);
842 struct iucv_message txmsg
;
843 struct cmsghdr
*cmsg
;
849 err
= sock_error(sk
);
853 if (msg
->msg_flags
& MSG_OOB
)
856 /* SOCK_SEQPACKET: we do not support segmented records */
857 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
862 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
867 if (sk
->sk_state
== IUCV_CONNECTED
) {
868 /* initialize defaults */
869 cmsg_done
= 0; /* check for duplicate headers */
872 /* iterate over control messages */
873 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
;
874 cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
876 if (!CMSG_OK(msg
, cmsg
)) {
881 if (cmsg
->cmsg_level
!= SOL_IUCV
)
884 if (cmsg
->cmsg_type
& cmsg_done
) {
888 cmsg_done
|= cmsg
->cmsg_type
;
890 switch (cmsg
->cmsg_type
) {
891 case SCM_IUCV_TRGCLS
:
892 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
897 /* set iucv message target class */
899 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
910 /* allocate one skb for each iucv message:
911 * this is fine for SOCK_SEQPACKET (unless we want to support
912 * segmented records using the MSG_EOR flag), but
913 * for SOCK_STREAM we might want to improve it in future */
914 if (!(skb
= sock_alloc_send_skb(sk
, len
,
915 msg
->msg_flags
& MSG_DONTWAIT
,
919 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
924 /* increment and save iucv message tag for msg_completion cbk */
925 txmsg
.tag
= iucv
->send_tag
++;
926 memcpy(CB_TAG(skb
), &txmsg
.tag
, CB_TAG_LEN
);
927 skb_queue_tail(&iucv
->send_skb_q
, skb
);
929 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
)
931 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
933 /* on success: there is no message_complete callback
934 * for an IPRMDATA msg; remove skb from send queue */
936 skb_unlink(skb
, &iucv
->send_skb_q
);
940 /* this error should never happen since the
941 * IUCV_IPRMDATA path flag is set... sever path */
943 iucv_path_sever(iucv
->path
, NULL
);
944 skb_unlink(skb
, &iucv
->send_skb_q
);
949 err
= iucv_message_send(iucv
->path
, &txmsg
, 0, 0,
950 (void *) skb
->data
, skb
->len
);
954 memcpy(user_id
, iucv
->dst_user_id
, 8);
956 memcpy(appl_id
, iucv
->dst_name
, 8);
957 pr_err("Application %s on z/VM guest %s"
958 " exceeds message limit\n",
961 skb_unlink(skb
, &iucv
->send_skb_q
);
981 static int iucv_fragment_skb(struct sock
*sk
, struct sk_buff
*skb
, int len
)
983 int dataleft
, size
, copied
= 0;
984 struct sk_buff
*nskb
;
988 if (dataleft
>= sk
->sk_rcvbuf
/ 4)
989 size
= sk
->sk_rcvbuf
/ 4;
993 nskb
= alloc_skb(size
, GFP_ATOMIC
| GFP_DMA
);
997 /* copy target class to control buffer of new skb */
998 memcpy(CB_TRGCLS(nskb
), CB_TRGCLS(skb
), CB_TRGCLS_LEN
);
1000 /* copy data fragment */
1001 memcpy(nskb
->data
, skb
->data
+ copied
, size
);
1005 skb_reset_transport_header(nskb
);
1006 skb_reset_network_header(nskb
);
1009 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, nskb
);
1015 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1016 struct iucv_path
*path
,
1017 struct iucv_message
*msg
)
1022 len
= iucv_msg_length(msg
);
1024 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1025 /* Note: the first 4 bytes are reserved for msg tag */
1026 memcpy(CB_TRGCLS(skb
), &msg
->class, CB_TRGCLS_LEN
);
1028 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1029 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1030 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1035 rc
= iucv_message_receive(path
, msg
, msg
->flags
& IUCV_IPRMDATA
,
1036 skb
->data
, len
, NULL
);
1041 /* we need to fragment iucv messages for SOCK_STREAM only;
1042 * for SOCK_SEQPACKET, it is only relevant if we support
1043 * record segmentation using MSG_EOR (see also recvmsg()) */
1044 if (sk
->sk_type
== SOCK_STREAM
&&
1045 skb
->truesize
>= sk
->sk_rcvbuf
/ 4) {
1046 rc
= iucv_fragment_skb(sk
, skb
, len
);
1050 iucv_path_sever(path
, NULL
);
1053 skb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
1055 skb_reset_transport_header(skb
);
1056 skb_reset_network_header(skb
);
1061 if (sock_queue_rcv_skb(sk
, skb
))
1062 skb_queue_head(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1065 static void iucv_process_message_q(struct sock
*sk
)
1067 struct iucv_sock
*iucv
= iucv_sk(sk
);
1068 struct sk_buff
*skb
;
1069 struct sock_msg_q
*p
, *n
;
1071 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1072 skb
= alloc_skb(iucv_msg_length(&p
->msg
), GFP_ATOMIC
| GFP_DMA
);
1075 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1078 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1083 static int iucv_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1084 struct msghdr
*msg
, size_t len
, int flags
)
1086 int noblock
= flags
& MSG_DONTWAIT
;
1087 struct sock
*sk
= sock
->sk
;
1088 struct iucv_sock
*iucv
= iucv_sk(sk
);
1089 unsigned int copied
, rlen
;
1090 struct sk_buff
*skb
, *rskb
, *cskb
;
1093 if ((sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
) &&
1094 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1095 skb_queue_empty(&sk
->sk_receive_queue
) &&
1096 list_empty(&iucv
->message_q
.list
))
1099 if (flags
& (MSG_OOB
))
1102 /* receive/dequeue next skb:
1103 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1104 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1106 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1111 rlen
= skb
->len
; /* real length of skb */
1112 copied
= min_t(unsigned int, rlen
, len
);
1115 if (memcpy_toiovec(msg
->msg_iov
, cskb
->data
, copied
)) {
1116 if (!(flags
& MSG_PEEK
))
1117 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1121 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1122 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1124 msg
->msg_flags
|= MSG_TRUNC
;
1125 /* each iucv message contains a complete record */
1126 msg
->msg_flags
|= MSG_EOR
;
1129 /* create control message to store iucv msg target class:
1130 * get the trgcls from the control buffer of the skb due to
1131 * fragmentation of original iucv message. */
1132 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1133 CB_TRGCLS_LEN
, CB_TRGCLS(skb
));
1135 if (!(flags
& MSG_PEEK
))
1136 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1140 /* Mark read part of skb as used */
1141 if (!(flags
& MSG_PEEK
)) {
1143 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1144 if (sk
->sk_type
== SOCK_STREAM
) {
1145 skb_pull(skb
, copied
);
1147 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1154 /* Queue backlog skbs */
1155 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1157 if (sock_queue_rcv_skb(sk
, rskb
)) {
1158 skb_queue_head(&iucv
->backlog_skb_q
,
1162 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1165 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1166 spin_lock_bh(&iucv
->message_q
.lock
);
1167 if (!list_empty(&iucv
->message_q
.list
))
1168 iucv_process_message_q(sk
);
1169 spin_unlock_bh(&iucv
->message_q
.lock
);
1174 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1175 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1181 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
1183 struct iucv_sock
*isk
, *n
;
1186 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1187 sk
= (struct sock
*) isk
;
1189 if (sk
->sk_state
== IUCV_CONNECTED
)
1190 return POLLIN
| POLLRDNORM
;
1196 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1199 struct sock
*sk
= sock
->sk
;
1200 unsigned int mask
= 0;
1202 poll_wait(file
, sk
->sk_sleep
, wait
);
1204 if (sk
->sk_state
== IUCV_LISTEN
)
1205 return iucv_accept_poll(sk
);
1207 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1210 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1213 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1216 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1217 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1218 mask
|= POLLIN
| POLLRDNORM
;
1220 if (sk
->sk_state
== IUCV_CLOSED
)
1223 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
)
1226 if (sock_writeable(sk
))
1227 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
1229 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
1234 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1236 struct sock
*sk
= sock
->sk
;
1237 struct iucv_sock
*iucv
= iucv_sk(sk
);
1238 struct iucv_message txmsg
;
1243 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1247 switch (sk
->sk_state
) {
1256 sk
->sk_shutdown
|= how
;
1260 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1263 err
= iucv_message_send(iucv
->path
, &txmsg
, IUCV_IPRMDATA
, 0,
1264 (void *) iprm_shutdown
, 8);
1280 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1281 err
= iucv_path_quiesce(iucv_sk(sk
)->path
, NULL
);
1285 skb_queue_purge(&sk
->sk_receive_queue
);
1288 /* Wake up anyone sleeping in poll */
1289 sk
->sk_state_change(sk
);
1296 static int iucv_sock_release(struct socket
*sock
)
1298 struct sock
*sk
= sock
->sk
;
1304 iucv_sock_close(sk
);
1306 /* Unregister with IUCV base support */
1307 if (iucv_sk(sk
)->path
) {
1308 iucv_path_sever(iucv_sk(sk
)->path
, NULL
);
1309 iucv_path_free(iucv_sk(sk
)->path
);
1310 iucv_sk(sk
)->path
= NULL
;
1318 /* getsockopt and setsockopt */
1319 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1320 char __user
*optval
, int optlen
)
1322 struct sock
*sk
= sock
->sk
;
1323 struct iucv_sock
*iucv
= iucv_sk(sk
);
1327 if (level
!= SOL_IUCV
)
1328 return -ENOPROTOOPT
;
1330 if (optlen
< sizeof(int))
1333 if (get_user(val
, (int __user
*) optval
))
1340 case SO_IPRMDATA_MSG
:
1342 iucv
->flags
|= IUCV_IPRMDATA
;
1344 iucv
->flags
&= ~IUCV_IPRMDATA
;
1347 switch (sk
->sk_state
) {
1350 if (val
< 1 || val
> (u16
)(~0))
1353 iucv
->msglimit
= val
;
1369 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1370 char __user
*optval
, int __user
*optlen
)
1372 struct sock
*sk
= sock
->sk
;
1373 struct iucv_sock
*iucv
= iucv_sk(sk
);
1376 if (level
!= SOL_IUCV
)
1377 return -ENOPROTOOPT
;
1379 if (get_user(len
, optlen
))
1385 len
= min_t(unsigned int, len
, sizeof(int));
1388 case SO_IPRMDATA_MSG
:
1389 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1393 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1394 : iucv
->msglimit
; /* default */
1398 return -ENOPROTOOPT
;
1401 if (put_user(len
, optlen
))
1403 if (copy_to_user(optval
, &val
, len
))
1410 /* Callback wrappers - called from iucv base support */
1411 static int iucv_callback_connreq(struct iucv_path
*path
,
1412 u8 ipvmid
[8], u8 ipuser
[16])
1414 unsigned char user_data
[16];
1415 unsigned char nuser_data
[16];
1416 unsigned char src_name
[8];
1417 struct hlist_node
*node
;
1418 struct sock
*sk
, *nsk
;
1419 struct iucv_sock
*iucv
, *niucv
;
1422 memcpy(src_name
, ipuser
, 8);
1423 EBCASC(src_name
, 8);
1424 /* Find out if this path belongs to af_iucv. */
1425 read_lock(&iucv_sk_list
.lock
);
1428 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
1429 if (sk
->sk_state
== IUCV_LISTEN
&&
1430 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1432 * Found a listening socket with
1433 * src_name == ipuser[0-7].
1438 read_unlock(&iucv_sk_list
.lock
);
1440 /* No socket found, not one of our paths. */
1445 /* Check if parent socket is listening */
1446 low_nmcpy(user_data
, iucv
->src_name
);
1447 high_nmcpy(user_data
, iucv
->dst_name
);
1448 ASCEBC(user_data
, sizeof(user_data
));
1449 if (sk
->sk_state
!= IUCV_LISTEN
) {
1450 err
= iucv_path_sever(path
, user_data
);
1451 iucv_path_free(path
);
1455 /* Check for backlog size */
1456 if (sk_acceptq_is_full(sk
)) {
1457 err
= iucv_path_sever(path
, user_data
);
1458 iucv_path_free(path
);
1462 /* Create the new socket */
1463 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
);
1465 err
= iucv_path_sever(path
, user_data
);
1466 iucv_path_free(path
);
1470 niucv
= iucv_sk(nsk
);
1471 iucv_sock_init(nsk
, sk
);
1473 /* Set the new iucv_sock */
1474 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1475 EBCASC(niucv
->dst_name
, 8);
1476 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1477 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1478 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1481 /* Call iucv_accept */
1482 high_nmcpy(nuser_data
, ipuser
+ 8);
1483 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1484 ASCEBC(nuser_data
+ 8, 8);
1486 /* set message limit for path based on msglimit of accepting socket */
1487 niucv
->msglimit
= iucv
->msglimit
;
1488 path
->msglim
= iucv
->msglimit
;
1489 err
= iucv_path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1491 err
= iucv_path_sever(path
, user_data
);
1492 iucv_path_free(path
);
1493 iucv_sock_kill(nsk
);
1497 iucv_accept_enqueue(sk
, nsk
);
1499 /* Wake up accept */
1500 nsk
->sk_state
= IUCV_CONNECTED
;
1501 sk
->sk_data_ready(sk
, 1);
1508 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1510 struct sock
*sk
= path
->private;
1512 sk
->sk_state
= IUCV_CONNECTED
;
1513 sk
->sk_state_change(sk
);
1516 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1518 struct sock
*sk
= path
->private;
1519 struct iucv_sock
*iucv
= iucv_sk(sk
);
1520 struct sk_buff
*skb
;
1521 struct sock_msg_q
*save_msg
;
1524 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1525 iucv_message_reject(path
, msg
);
1529 spin_lock(&iucv
->message_q
.lock
);
1531 if (!list_empty(&iucv
->message_q
.list
) ||
1532 !skb_queue_empty(&iucv
->backlog_skb_q
))
1535 len
= atomic_read(&sk
->sk_rmem_alloc
);
1536 len
+= iucv_msg_length(msg
) + sizeof(struct sk_buff
);
1537 if (len
> sk
->sk_rcvbuf
)
1540 skb
= alloc_skb(iucv_msg_length(msg
), GFP_ATOMIC
| GFP_DMA
);
1544 iucv_process_message(sk
, skb
, path
, msg
);
1548 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1551 save_msg
->path
= path
;
1552 save_msg
->msg
= *msg
;
1554 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1557 spin_unlock(&iucv
->message_q
.lock
);
1560 static void iucv_callback_txdone(struct iucv_path
*path
,
1561 struct iucv_message
*msg
)
1563 struct sock
*sk
= path
->private;
1564 struct sk_buff
*this = NULL
;
1565 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1566 struct sk_buff
*list_skb
= list
->next
;
1567 unsigned long flags
;
1569 if (!skb_queue_empty(list
)) {
1570 spin_lock_irqsave(&list
->lock
, flags
);
1572 while (list_skb
!= (struct sk_buff
*)list
) {
1573 if (!memcmp(&msg
->tag
, CB_TAG(list_skb
), CB_TAG_LEN
)) {
1577 list_skb
= list_skb
->next
;
1580 __skb_unlink(this, list
);
1582 spin_unlock_irqrestore(&list
->lock
, flags
);
1588 if (sk
->sk_state
== IUCV_CLOSING
) {
1589 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1590 sk
->sk_state
= IUCV_CLOSED
;
1591 sk
->sk_state_change(sk
);
1597 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1599 struct sock
*sk
= path
->private;
1601 if (!list_empty(&iucv_sk(sk
)->accept_q
))
1602 sk
->sk_state
= IUCV_SEVERED
;
1604 sk
->sk_state
= IUCV_DISCONN
;
1606 sk
->sk_state_change(sk
);
1609 /* called if the other communication side shuts down its RECV direction;
1610 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1612 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1614 struct sock
*sk
= path
->private;
1617 if (sk
->sk_state
!= IUCV_CLOSED
) {
1618 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1619 sk
->sk_state_change(sk
);
1624 static struct proto_ops iucv_sock_ops
= {
1626 .owner
= THIS_MODULE
,
1627 .release
= iucv_sock_release
,
1628 .bind
= iucv_sock_bind
,
1629 .connect
= iucv_sock_connect
,
1630 .listen
= iucv_sock_listen
,
1631 .accept
= iucv_sock_accept
,
1632 .getname
= iucv_sock_getname
,
1633 .sendmsg
= iucv_sock_sendmsg
,
1634 .recvmsg
= iucv_sock_recvmsg
,
1635 .poll
= iucv_sock_poll
,
1636 .ioctl
= sock_no_ioctl
,
1637 .mmap
= sock_no_mmap
,
1638 .socketpair
= sock_no_socketpair
,
1639 .shutdown
= iucv_sock_shutdown
,
1640 .setsockopt
= iucv_sock_setsockopt
,
1641 .getsockopt
= iucv_sock_getsockopt
,
1644 static struct net_proto_family iucv_sock_family_ops
= {
1646 .owner
= THIS_MODULE
,
1647 .create
= iucv_sock_create
,
1650 static int __init
afiucv_init(void)
1654 if (!MACHINE_IS_VM
) {
1655 pr_err("The af_iucv module cannot be loaded"
1657 err
= -EPROTONOSUPPORT
;
1660 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
1661 if (unlikely(err
)) {
1663 err
= -EPROTONOSUPPORT
;
1667 err
= iucv_register(&af_iucv_handler
, 0);
1670 err
= proto_register(&iucv_proto
, 0);
1673 err
= sock_register(&iucv_sock_family_ops
);
1676 /* establish dummy device */
1677 err
= driver_register(&af_iucv_driver
);
1680 af_iucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
1685 dev_set_name(af_iucv_dev
, "af_iucv");
1686 af_iucv_dev
->bus
= &iucv_bus
;
1687 af_iucv_dev
->parent
= iucv_root
;
1688 af_iucv_dev
->release
= (void (*)(struct device
*))kfree
;
1689 af_iucv_dev
->driver
= &af_iucv_driver
;
1690 err
= device_register(af_iucv_dev
);
1697 driver_unregister(&af_iucv_driver
);
1699 sock_unregister(PF_IUCV
);
1701 proto_unregister(&iucv_proto
);
1703 iucv_unregister(&af_iucv_handler
, 0);
1708 static void __exit
afiucv_exit(void)
1710 device_unregister(af_iucv_dev
);
1711 driver_unregister(&af_iucv_driver
);
1712 sock_unregister(PF_IUCV
);
1713 proto_unregister(&iucv_proto
);
1714 iucv_unregister(&af_iucv_handler
, 0);
1717 module_init(afiucv_init
);
1718 module_exit(afiucv_exit
);
1720 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1721 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
1722 MODULE_VERSION(VERSION
);
1723 MODULE_LICENSE("GPL");
1724 MODULE_ALIAS_NETPROTO(PF_IUCV
);