2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/iucv.h>
31 #include <net/iucv/af_iucv.h>
35 static char iucv_userid
[80];
37 static const struct proto_ops iucv_sock_ops
;
39 static struct proto iucv_proto
= {
42 .obj_size
= sizeof(struct iucv_sock
),
45 /* special AF_IUCV IPRM messages */
46 static const u8 iprm_shutdown
[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 /* macros to set/get socket control buffer at correct offset */
52 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
53 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
54 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
57 #define __iucv_sock_wait(sk, condition, timeo, ret) \
59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \
62 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
63 while (!(condition)) { \
68 if (signal_pending(current)) { \
69 ret = sock_intr_errno(__timeo); \
73 __timeo = schedule_timeout(__timeo); \
75 ret = sock_error(sk); \
79 finish_wait(sk->sk_sleep, &__wait); \
82 #define iucv_sock_wait(sk, condition, timeo) \
86 __iucv_sock_wait(sk, condition, timeo, __ret); \
90 static void iucv_sock_kill(struct sock
*sk
);
91 static void iucv_sock_close(struct sock
*sk
);
93 /* Call Back functions */
94 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
95 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
96 static void iucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
97 static int iucv_callback_connreq(struct iucv_path
*, u8 ipvmid
[8],
99 static void iucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
100 static void iucv_callback_shutdown(struct iucv_path
*, u8 ipuser
[16]);
102 static struct iucv_sock_list iucv_sk_list
= {
103 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
104 .autobind_name
= ATOMIC_INIT(0)
107 static struct iucv_handler af_iucv_handler
= {
108 .path_pending
= iucv_callback_connreq
,
109 .path_complete
= iucv_callback_connack
,
110 .path_severed
= iucv_callback_connrej
,
111 .message_pending
= iucv_callback_rx
,
112 .message_complete
= iucv_callback_txdone
,
113 .path_quiesced
= iucv_callback_shutdown
,
116 static inline void high_nmcpy(unsigned char *dst
, char *src
)
121 static inline void low_nmcpy(unsigned char *dst
, char *src
)
123 memcpy(&dst
[8], src
, 8);
126 static int afiucv_pm_prepare(struct device
*dev
)
128 #ifdef CONFIG_PM_DEBUG
129 printk(KERN_WARNING
"afiucv_pm_prepare\n");
134 static void afiucv_pm_complete(struct device
*dev
)
136 #ifdef CONFIG_PM_DEBUG
137 printk(KERN_WARNING
"afiucv_pm_complete\n");
143 * afiucv_pm_freeze() - Freeze PM callback
144 * @dev: AFIUCV dummy device
146 * Sever all established IUCV communication pathes
148 static int afiucv_pm_freeze(struct device
*dev
)
150 struct iucv_sock
*iucv
;
152 struct hlist_node
*node
;
155 #ifdef CONFIG_PM_DEBUG
156 printk(KERN_WARNING
"afiucv_pm_freeze\n");
158 read_lock(&iucv_sk_list
.lock
);
159 sk_for_each(sk
, node
, &iucv_sk_list
.head
) {
161 skb_queue_purge(&iucv
->send_skb_q
);
162 skb_queue_purge(&iucv
->backlog_skb_q
);
163 switch (sk
->sk_state
) {
169 err
= iucv_path_sever(iucv
->path
, NULL
);
170 iucv_path_free(iucv
->path
);
182 read_unlock(&iucv_sk_list
.lock
);
187 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
188 * @dev: AFIUCV dummy device
190 * socket clean up after freeze
192 static int afiucv_pm_restore_thaw(struct device
*dev
)
194 struct iucv_sock
*iucv
;
196 struct hlist_node
*node
;
198 #ifdef CONFIG_PM_DEBUG
199 printk(KERN_WARNING
"afiucv_pm_restore_thaw\n");
201 read_lock(&iucv_sk_list
.lock
);
202 sk_for_each(sk
, node
, &iucv_sk_list
.head
) {
204 switch (sk
->sk_state
) {
207 sk
->sk_state
= IUCV_DISCONN
;
208 sk
->sk_state_change(sk
);
220 read_unlock(&iucv_sk_list
.lock
);
224 static const struct dev_pm_ops afiucv_pm_ops
= {
225 .prepare
= afiucv_pm_prepare
,
226 .complete
= afiucv_pm_complete
,
227 .freeze
= afiucv_pm_freeze
,
228 .thaw
= afiucv_pm_restore_thaw
,
229 .restore
= afiucv_pm_restore_thaw
,
232 static struct device_driver af_iucv_driver
= {
233 .owner
= THIS_MODULE
,
236 .pm
= &afiucv_pm_ops
,
239 /* dummy device used as trigger for PM functions */
240 static struct device
*af_iucv_dev
;
243 * iucv_msg_length() - Returns the length of an iucv message.
244 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
246 * The function returns the length of the specified iucv message @msg of data
247 * stored in a buffer and of data stored in the parameter list (PRMDATA).
249 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
251 * PRMDATA[0..6] socket data (max 7 bytes);
252 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
254 * The socket data length is computed by substracting the socket data length
256 * If the socket data len is greater 7, then PRMDATA can be used for special
257 * notifications (see iucv_sock_shutdown); and further,
258 * if the socket data len is > 7, the function returns 8.
260 * Use this function to allocate socket buffers to store iucv message data.
262 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
266 if (msg
->flags
& IUCV_IPRMDATA
) {
267 datalen
= 0xff - msg
->rmmsg
[7];
268 return (datalen
< 8) ? datalen
: 8;
274 * iucv_sock_in_state() - check for specific states
275 * @sk: sock structure
276 * @state: first iucv sk state
277 * @state: second iucv sk state
279 * Returns true if the socket in either in the first or second state.
281 static int iucv_sock_in_state(struct sock
*sk
, int state
, int state2
)
283 return (sk
->sk_state
== state
|| sk
->sk_state
== state2
);
287 * iucv_below_msglim() - function to check if messages can be sent
288 * @sk: sock structure
290 * Returns true if the send queue length is lower than the message limit.
291 * Always returns true if the socket is not connected (no iucv path for
292 * checking the message limit).
294 static inline int iucv_below_msglim(struct sock
*sk
)
296 struct iucv_sock
*iucv
= iucv_sk(sk
);
298 if (sk
->sk_state
!= IUCV_CONNECTED
)
300 return (skb_queue_len(&iucv
->send_skb_q
) < iucv
->path
->msglim
);
304 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
306 static void iucv_sock_wake_msglim(struct sock
*sk
)
308 read_lock(&sk
->sk_callback_lock
);
309 if (sk_has_sleeper(sk
))
310 wake_up_interruptible_all(sk
->sk_sleep
);
311 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
312 read_unlock(&sk
->sk_callback_lock
);
316 static void iucv_sock_timeout(unsigned long arg
)
318 struct sock
*sk
= (struct sock
*)arg
;
321 sk
->sk_err
= ETIMEDOUT
;
322 sk
->sk_state_change(sk
);
329 static void iucv_sock_clear_timer(struct sock
*sk
)
331 sk_stop_timer(sk
, &sk
->sk_timer
);
334 static struct sock
*__iucv_get_sock_by_name(char *nm
)
337 struct hlist_node
*node
;
339 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
340 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
346 static void iucv_sock_destruct(struct sock
*sk
)
348 skb_queue_purge(&sk
->sk_receive_queue
);
349 skb_queue_purge(&sk
->sk_write_queue
);
353 static void iucv_sock_cleanup_listen(struct sock
*parent
)
357 /* Close non-accepted connections */
358 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
363 parent
->sk_state
= IUCV_CLOSED
;
366 /* Kill socket (only if zapped and orphaned) */
367 static void iucv_sock_kill(struct sock
*sk
)
369 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
372 iucv_sock_unlink(&iucv_sk_list
, sk
);
373 sock_set_flag(sk
, SOCK_DEAD
);
377 /* Close an IUCV socket */
378 static void iucv_sock_close(struct sock
*sk
)
380 unsigned char user_data
[16];
381 struct iucv_sock
*iucv
= iucv_sk(sk
);
385 iucv_sock_clear_timer(sk
);
388 switch (sk
->sk_state
) {
390 iucv_sock_cleanup_listen(sk
);
397 sk
->sk_state
= IUCV_CLOSING
;
398 sk
->sk_state_change(sk
);
400 if (!skb_queue_empty(&iucv
->send_skb_q
)) {
401 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
402 timeo
= sk
->sk_lingertime
;
404 timeo
= IUCV_DISCONN_TIMEOUT
;
405 err
= iucv_sock_wait(sk
,
406 iucv_sock_in_state(sk
, IUCV_CLOSED
, 0),
410 case IUCV_CLOSING
: /* fall through */
411 sk
->sk_state
= IUCV_CLOSED
;
412 sk
->sk_state_change(sk
);
415 low_nmcpy(user_data
, iucv
->src_name
);
416 high_nmcpy(user_data
, iucv
->dst_name
);
417 ASCEBC(user_data
, sizeof(user_data
));
418 err
= iucv_path_sever(iucv
->path
, user_data
);
419 iucv_path_free(iucv
->path
);
423 sk
->sk_err
= ECONNRESET
;
424 sk
->sk_state_change(sk
);
426 skb_queue_purge(&iucv
->send_skb_q
);
427 skb_queue_purge(&iucv
->backlog_skb_q
);
431 /* nothing to do here */
435 /* mark socket for deletion by iucv_sock_kill() */
436 sock_set_flag(sk
, SOCK_ZAPPED
);
441 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
444 sk
->sk_type
= parent
->sk_type
;
447 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
)
451 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
);
455 sock_init_data(sock
, sk
);
456 INIT_LIST_HEAD(&iucv_sk(sk
)->accept_q
);
457 spin_lock_init(&iucv_sk(sk
)->accept_q_lock
);
458 skb_queue_head_init(&iucv_sk(sk
)->send_skb_q
);
459 INIT_LIST_HEAD(&iucv_sk(sk
)->message_q
.list
);
460 spin_lock_init(&iucv_sk(sk
)->message_q
.lock
);
461 skb_queue_head_init(&iucv_sk(sk
)->backlog_skb_q
);
462 iucv_sk(sk
)->send_tag
= 0;
463 iucv_sk(sk
)->flags
= 0;
464 iucv_sk(sk
)->msglimit
= IUCV_QUEUELEN_DEFAULT
;
465 iucv_sk(sk
)->path
= NULL
;
466 memset(&iucv_sk(sk
)->src_user_id
, 0, 32);
468 sk
->sk_destruct
= iucv_sock_destruct
;
469 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
470 sk
->sk_allocation
= GFP_DMA
;
472 sock_reset_flag(sk
, SOCK_ZAPPED
);
474 sk
->sk_protocol
= proto
;
475 sk
->sk_state
= IUCV_OPEN
;
477 setup_timer(&sk
->sk_timer
, iucv_sock_timeout
, (unsigned long)sk
);
479 iucv_sock_link(&iucv_sk_list
, sk
);
483 /* Create an IUCV socket */
484 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
489 if (protocol
&& protocol
!= PF_IUCV
)
490 return -EPROTONOSUPPORT
;
492 sock
->state
= SS_UNCONNECTED
;
494 switch (sock
->type
) {
496 sock
->ops
= &iucv_sock_ops
;
499 /* currently, proto ops can handle both sk types */
500 sock
->ops
= &iucv_sock_ops
;
503 return -ESOCKTNOSUPPORT
;
506 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
);
510 iucv_sock_init(sk
, NULL
);
515 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
517 write_lock_bh(&l
->lock
);
518 sk_add_node(sk
, &l
->head
);
519 write_unlock_bh(&l
->lock
);
522 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
524 write_lock_bh(&l
->lock
);
525 sk_del_node_init(sk
);
526 write_unlock_bh(&l
->lock
);
529 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
532 struct iucv_sock
*par
= iucv_sk(parent
);
535 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
536 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
537 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
538 iucv_sk(sk
)->parent
= parent
;
539 sk_acceptq_added(parent
);
542 void iucv_accept_unlink(struct sock
*sk
)
545 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
547 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
548 list_del_init(&iucv_sk(sk
)->accept_q
);
549 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
550 sk_acceptq_removed(iucv_sk(sk
)->parent
);
551 iucv_sk(sk
)->parent
= NULL
;
555 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
557 struct iucv_sock
*isk
, *n
;
560 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
561 sk
= (struct sock
*) isk
;
564 if (sk
->sk_state
== IUCV_CLOSED
) {
565 iucv_accept_unlink(sk
);
570 if (sk
->sk_state
== IUCV_CONNECTED
||
571 sk
->sk_state
== IUCV_SEVERED
||
572 sk
->sk_state
== IUCV_DISCONN
|| /* due to PM restore */
574 iucv_accept_unlink(sk
);
576 sock_graft(sk
, newsock
);
578 if (sk
->sk_state
== IUCV_SEVERED
)
579 sk
->sk_state
= IUCV_DISCONN
;
590 /* Bind an unbound socket */
591 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
594 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
595 struct sock
*sk
= sock
->sk
;
596 struct iucv_sock
*iucv
;
599 /* Verify the input sockaddr */
600 if (!addr
|| addr
->sa_family
!= AF_IUCV
)
604 if (sk
->sk_state
!= IUCV_OPEN
) {
609 write_lock_bh(&iucv_sk_list
.lock
);
612 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
621 /* Bind the socket */
622 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
624 /* Copy the user id */
625 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
626 sk
->sk_state
= IUCV_BOUND
;
630 /* Release the socket list lock */
631 write_unlock_bh(&iucv_sk_list
.lock
);
637 /* Automatically bind an unbound socket */
638 static int iucv_sock_autobind(struct sock
*sk
)
640 struct iucv_sock
*iucv
= iucv_sk(sk
);
641 char query_buffer
[80];
645 /* Set the userid and name */
646 cpcmd("QUERY USERID", query_buffer
, sizeof(query_buffer
), &err
);
650 memcpy(iucv
->src_user_id
, query_buffer
, 8);
652 write_lock_bh(&iucv_sk_list
.lock
);
654 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
655 while (__iucv_get_sock_by_name(name
)) {
656 sprintf(name
, "%08x",
657 atomic_inc_return(&iucv_sk_list
.autobind_name
));
660 write_unlock_bh(&iucv_sk_list
.lock
);
662 memcpy(&iucv
->src_name
, name
, 8);
667 /* Connect an unconnected socket */
668 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
671 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
672 struct sock
*sk
= sock
->sk
;
673 struct iucv_sock
*iucv
;
674 unsigned char user_data
[16];
677 if (addr
->sa_family
!= AF_IUCV
|| alen
< sizeof(struct sockaddr_iucv
))
680 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
683 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
686 if (sk
->sk_state
== IUCV_OPEN
) {
687 err
= iucv_sock_autobind(sk
);
694 /* Set the destination information */
695 memcpy(iucv_sk(sk
)->dst_user_id
, sa
->siucv_user_id
, 8);
696 memcpy(iucv_sk(sk
)->dst_name
, sa
->siucv_name
, 8);
698 high_nmcpy(user_data
, sa
->siucv_name
);
699 low_nmcpy(user_data
, iucv_sk(sk
)->src_name
);
700 ASCEBC(user_data
, sizeof(user_data
));
704 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
705 IUCV_IPRMDATA
, GFP_KERNEL
);
710 err
= iucv_path_connect(iucv
->path
, &af_iucv_handler
,
711 sa
->siucv_user_id
, NULL
, user_data
, sk
);
713 iucv_path_free(iucv
->path
);
716 case 0x0b: /* Target communicator is not logged on */
719 case 0x0d: /* Max connections for this guest exceeded */
720 case 0x0e: /* Max connections for target guest exceeded */
723 case 0x0f: /* Missing IUCV authorization */
733 if (sk
->sk_state
!= IUCV_CONNECTED
) {
734 err
= iucv_sock_wait(sk
, iucv_sock_in_state(sk
, IUCV_CONNECTED
,
736 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
739 if (sk
->sk_state
== IUCV_DISCONN
) {
744 iucv_path_sever(iucv
->path
, NULL
);
745 iucv_path_free(iucv
->path
);
754 /* Move a socket into listening state. */
755 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
757 struct sock
*sk
= sock
->sk
;
763 if (sk
->sk_state
!= IUCV_BOUND
)
766 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
769 sk
->sk_max_ack_backlog
= backlog
;
770 sk
->sk_ack_backlog
= 0;
771 sk
->sk_state
= IUCV_LISTEN
;
779 /* Accept a pending connection */
780 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
783 DECLARE_WAITQUEUE(wait
, current
);
784 struct sock
*sk
= sock
->sk
, *nsk
;
788 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
790 if (sk
->sk_state
!= IUCV_LISTEN
) {
795 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
797 /* Wait for an incoming connection */
798 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
799 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
800 set_current_state(TASK_INTERRUPTIBLE
);
807 timeo
= schedule_timeout(timeo
);
808 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
810 if (sk
->sk_state
!= IUCV_LISTEN
) {
815 if (signal_pending(current
)) {
816 err
= sock_intr_errno(timeo
);
821 set_current_state(TASK_RUNNING
);
822 remove_wait_queue(sk
->sk_sleep
, &wait
);
827 newsock
->state
= SS_CONNECTED
;
834 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
837 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
838 struct sock
*sk
= sock
->sk
;
840 addr
->sa_family
= AF_IUCV
;
841 *len
= sizeof(struct sockaddr_iucv
);
844 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->dst_user_id
, 8);
845 memcpy(siucv
->siucv_name
, &iucv_sk(sk
)->dst_name
, 8);
847 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->src_user_id
, 8);
848 memcpy(siucv
->siucv_name
, iucv_sk(sk
)->src_name
, 8);
850 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
851 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
852 memset(siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
858 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
860 * @msg: Pointer to a struct iucv_message
861 * @skb: The socket data to send, skb->len MUST BE <= 7
863 * Send the socket data in the parameter list in the iucv message
864 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
865 * list and the socket data len at index 7 (last byte).
866 * See also iucv_msg_length().
868 * Returns the error code from the iucv_message_send() call.
870 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
875 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
876 prmdata
[7] = 0xff - (u8
) skb
->len
;
877 return iucv_message_send(path
, msg
, IUCV_IPRMDATA
, 0,
878 (void *) prmdata
, 8);
881 static int iucv_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
882 struct msghdr
*msg
, size_t len
)
884 struct sock
*sk
= sock
->sk
;
885 struct iucv_sock
*iucv
= iucv_sk(sk
);
887 struct iucv_message txmsg
;
888 struct cmsghdr
*cmsg
;
894 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
896 err
= sock_error(sk
);
900 if (msg
->msg_flags
& MSG_OOB
)
903 /* SOCK_SEQPACKET: we do not support segmented records */
904 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
909 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
914 /* Return if the socket is not in connected state */
915 if (sk
->sk_state
!= IUCV_CONNECTED
) {
920 /* initialize defaults */
921 cmsg_done
= 0; /* check for duplicate headers */
924 /* iterate over control messages */
925 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
;
926 cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
928 if (!CMSG_OK(msg
, cmsg
)) {
933 if (cmsg
->cmsg_level
!= SOL_IUCV
)
936 if (cmsg
->cmsg_type
& cmsg_done
) {
940 cmsg_done
|= cmsg
->cmsg_type
;
942 switch (cmsg
->cmsg_type
) {
943 case SCM_IUCV_TRGCLS
:
944 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
949 /* set iucv message target class */
951 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
962 /* allocate one skb for each iucv message:
963 * this is fine for SOCK_SEQPACKET (unless we want to support
964 * segmented records using the MSG_EOR flag), but
965 * for SOCK_STREAM we might want to improve it in future */
966 skb
= sock_alloc_send_skb(sk
, len
, noblock
, &err
);
969 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
974 /* wait if outstanding messages for iucv path has reached */
975 timeo
= sock_sndtimeo(sk
, noblock
);
976 err
= iucv_sock_wait(sk
, iucv_below_msglim(sk
), timeo
);
980 /* return -ECONNRESET if the socket is no longer connected */
981 if (sk
->sk_state
!= IUCV_CONNECTED
) {
986 /* increment and save iucv message tag for msg_completion cbk */
987 txmsg
.tag
= iucv
->send_tag
++;
988 memcpy(CB_TAG(skb
), &txmsg
.tag
, CB_TAG_LEN
);
989 skb_queue_tail(&iucv
->send_skb_q
, skb
);
991 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
)
993 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
995 /* on success: there is no message_complete callback
996 * for an IPRMDATA msg; remove skb from send queue */
998 skb_unlink(skb
, &iucv
->send_skb_q
);
1002 /* this error should never happen since the
1003 * IUCV_IPRMDATA path flag is set... sever path */
1005 iucv_path_sever(iucv
->path
, NULL
);
1006 skb_unlink(skb
, &iucv
->send_skb_q
);
1011 err
= iucv_message_send(iucv
->path
, &txmsg
, 0, 0,
1012 (void *) skb
->data
, skb
->len
);
1016 memcpy(user_id
, iucv
->dst_user_id
, 8);
1018 memcpy(appl_id
, iucv
->dst_name
, 8);
1019 pr_err("Application %s on z/VM guest %s"
1020 " exceeds message limit\n",
1025 skb_unlink(skb
, &iucv
->send_skb_q
);
1039 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1041 * Locking: must be called with message_q.lock held
1043 static int iucv_fragment_skb(struct sock
*sk
, struct sk_buff
*skb
, int len
)
1045 int dataleft
, size
, copied
= 0;
1046 struct sk_buff
*nskb
;
1050 if (dataleft
>= sk
->sk_rcvbuf
/ 4)
1051 size
= sk
->sk_rcvbuf
/ 4;
1055 nskb
= alloc_skb(size
, GFP_ATOMIC
| GFP_DMA
);
1059 /* copy target class to control buffer of new skb */
1060 memcpy(CB_TRGCLS(nskb
), CB_TRGCLS(skb
), CB_TRGCLS_LEN
);
1062 /* copy data fragment */
1063 memcpy(nskb
->data
, skb
->data
+ copied
, size
);
1067 skb_reset_transport_header(nskb
);
1068 skb_reset_network_header(nskb
);
1071 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, nskb
);
1077 /* iucv_process_message() - Receive a single outstanding IUCV message
1079 * Locking: must be called with message_q.lock held
1081 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1082 struct iucv_path
*path
,
1083 struct iucv_message
*msg
)
1088 len
= iucv_msg_length(msg
);
1090 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1091 /* Note: the first 4 bytes are reserved for msg tag */
1092 memcpy(CB_TRGCLS(skb
), &msg
->class, CB_TRGCLS_LEN
);
1094 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1095 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1096 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1101 rc
= iucv_message_receive(path
, msg
, msg
->flags
& IUCV_IPRMDATA
,
1102 skb
->data
, len
, NULL
);
1107 /* we need to fragment iucv messages for SOCK_STREAM only;
1108 * for SOCK_SEQPACKET, it is only relevant if we support
1109 * record segmentation using MSG_EOR (see also recvmsg()) */
1110 if (sk
->sk_type
== SOCK_STREAM
&&
1111 skb
->truesize
>= sk
->sk_rcvbuf
/ 4) {
1112 rc
= iucv_fragment_skb(sk
, skb
, len
);
1116 iucv_path_sever(path
, NULL
);
1119 skb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
1121 skb_reset_transport_header(skb
);
1122 skb_reset_network_header(skb
);
1127 if (sock_queue_rcv_skb(sk
, skb
))
1128 skb_queue_head(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1131 /* iucv_process_message_q() - Process outstanding IUCV messages
1133 * Locking: must be called with message_q.lock held
1135 static void iucv_process_message_q(struct sock
*sk
)
1137 struct iucv_sock
*iucv
= iucv_sk(sk
);
1138 struct sk_buff
*skb
;
1139 struct sock_msg_q
*p
, *n
;
1141 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1142 skb
= alloc_skb(iucv_msg_length(&p
->msg
), GFP_ATOMIC
| GFP_DMA
);
1145 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1148 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1153 static int iucv_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1154 struct msghdr
*msg
, size_t len
, int flags
)
1156 int noblock
= flags
& MSG_DONTWAIT
;
1157 struct sock
*sk
= sock
->sk
;
1158 struct iucv_sock
*iucv
= iucv_sk(sk
);
1159 unsigned int copied
, rlen
;
1160 struct sk_buff
*skb
, *rskb
, *cskb
;
1163 if ((sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
) &&
1164 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1165 skb_queue_empty(&sk
->sk_receive_queue
) &&
1166 list_empty(&iucv
->message_q
.list
))
1169 if (flags
& (MSG_OOB
))
1172 /* receive/dequeue next skb:
1173 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1174 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1176 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1181 rlen
= skb
->len
; /* real length of skb */
1182 copied
= min_t(unsigned int, rlen
, len
);
1185 if (memcpy_toiovec(msg
->msg_iov
, cskb
->data
, copied
)) {
1186 if (!(flags
& MSG_PEEK
))
1187 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1191 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1192 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1194 msg
->msg_flags
|= MSG_TRUNC
;
1195 /* each iucv message contains a complete record */
1196 msg
->msg_flags
|= MSG_EOR
;
1199 /* create control message to store iucv msg target class:
1200 * get the trgcls from the control buffer of the skb due to
1201 * fragmentation of original iucv message. */
1202 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1203 CB_TRGCLS_LEN
, CB_TRGCLS(skb
));
1205 if (!(flags
& MSG_PEEK
))
1206 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1210 /* Mark read part of skb as used */
1211 if (!(flags
& MSG_PEEK
)) {
1213 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1214 if (sk
->sk_type
== SOCK_STREAM
) {
1215 skb_pull(skb
, copied
);
1217 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1224 /* Queue backlog skbs */
1225 spin_lock_bh(&iucv
->message_q
.lock
);
1226 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1228 if (sock_queue_rcv_skb(sk
, rskb
)) {
1229 skb_queue_head(&iucv
->backlog_skb_q
,
1233 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1236 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1237 if (!list_empty(&iucv
->message_q
.list
))
1238 iucv_process_message_q(sk
);
1240 spin_unlock_bh(&iucv
->message_q
.lock
);
1244 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1245 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1251 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
1253 struct iucv_sock
*isk
, *n
;
1256 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1257 sk
= (struct sock
*) isk
;
1259 if (sk
->sk_state
== IUCV_CONNECTED
)
1260 return POLLIN
| POLLRDNORM
;
1266 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1269 struct sock
*sk
= sock
->sk
;
1270 unsigned int mask
= 0;
1272 sock_poll_wait(file
, sk
->sk_sleep
, wait
);
1274 if (sk
->sk_state
== IUCV_LISTEN
)
1275 return iucv_accept_poll(sk
);
1277 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1280 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1283 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1286 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1287 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1288 mask
|= POLLIN
| POLLRDNORM
;
1290 if (sk
->sk_state
== IUCV_CLOSED
)
1293 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
)
1296 if (sock_writeable(sk
))
1297 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
1299 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
1304 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1306 struct sock
*sk
= sock
->sk
;
1307 struct iucv_sock
*iucv
= iucv_sk(sk
);
1308 struct iucv_message txmsg
;
1313 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1317 switch (sk
->sk_state
) {
1326 sk
->sk_shutdown
|= how
;
1330 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1333 err
= iucv_message_send(iucv
->path
, &txmsg
, IUCV_IPRMDATA
, 0,
1334 (void *) iprm_shutdown
, 8);
1350 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1351 err
= iucv_path_quiesce(iucv_sk(sk
)->path
, NULL
);
1355 skb_queue_purge(&sk
->sk_receive_queue
);
1358 /* Wake up anyone sleeping in poll */
1359 sk
->sk_state_change(sk
);
1366 static int iucv_sock_release(struct socket
*sock
)
1368 struct sock
*sk
= sock
->sk
;
1374 iucv_sock_close(sk
);
1376 /* Unregister with IUCV base support */
1377 if (iucv_sk(sk
)->path
) {
1378 iucv_path_sever(iucv_sk(sk
)->path
, NULL
);
1379 iucv_path_free(iucv_sk(sk
)->path
);
1380 iucv_sk(sk
)->path
= NULL
;
1388 /* getsockopt and setsockopt */
1389 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1390 char __user
*optval
, unsigned int optlen
)
1392 struct sock
*sk
= sock
->sk
;
1393 struct iucv_sock
*iucv
= iucv_sk(sk
);
1397 if (level
!= SOL_IUCV
)
1398 return -ENOPROTOOPT
;
1400 if (optlen
< sizeof(int))
1403 if (get_user(val
, (int __user
*) optval
))
1410 case SO_IPRMDATA_MSG
:
1412 iucv
->flags
|= IUCV_IPRMDATA
;
1414 iucv
->flags
&= ~IUCV_IPRMDATA
;
1417 switch (sk
->sk_state
) {
1420 if (val
< 1 || val
> (u16
)(~0))
1423 iucv
->msglimit
= val
;
1439 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1440 char __user
*optval
, int __user
*optlen
)
1442 struct sock
*sk
= sock
->sk
;
1443 struct iucv_sock
*iucv
= iucv_sk(sk
);
1446 if (level
!= SOL_IUCV
)
1447 return -ENOPROTOOPT
;
1449 if (get_user(len
, optlen
))
1455 len
= min_t(unsigned int, len
, sizeof(int));
1458 case SO_IPRMDATA_MSG
:
1459 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1463 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1464 : iucv
->msglimit
; /* default */
1468 return -ENOPROTOOPT
;
1471 if (put_user(len
, optlen
))
1473 if (copy_to_user(optval
, &val
, len
))
1480 /* Callback wrappers - called from iucv base support */
1481 static int iucv_callback_connreq(struct iucv_path
*path
,
1482 u8 ipvmid
[8], u8 ipuser
[16])
1484 unsigned char user_data
[16];
1485 unsigned char nuser_data
[16];
1486 unsigned char src_name
[8];
1487 struct hlist_node
*node
;
1488 struct sock
*sk
, *nsk
;
1489 struct iucv_sock
*iucv
, *niucv
;
1492 memcpy(src_name
, ipuser
, 8);
1493 EBCASC(src_name
, 8);
1494 /* Find out if this path belongs to af_iucv. */
1495 read_lock(&iucv_sk_list
.lock
);
1498 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
1499 if (sk
->sk_state
== IUCV_LISTEN
&&
1500 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1502 * Found a listening socket with
1503 * src_name == ipuser[0-7].
1508 read_unlock(&iucv_sk_list
.lock
);
1510 /* No socket found, not one of our paths. */
1515 /* Check if parent socket is listening */
1516 low_nmcpy(user_data
, iucv
->src_name
);
1517 high_nmcpy(user_data
, iucv
->dst_name
);
1518 ASCEBC(user_data
, sizeof(user_data
));
1519 if (sk
->sk_state
!= IUCV_LISTEN
) {
1520 err
= iucv_path_sever(path
, user_data
);
1521 iucv_path_free(path
);
1525 /* Check for backlog size */
1526 if (sk_acceptq_is_full(sk
)) {
1527 err
= iucv_path_sever(path
, user_data
);
1528 iucv_path_free(path
);
1532 /* Create the new socket */
1533 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
);
1535 err
= iucv_path_sever(path
, user_data
);
1536 iucv_path_free(path
);
1540 niucv
= iucv_sk(nsk
);
1541 iucv_sock_init(nsk
, sk
);
1543 /* Set the new iucv_sock */
1544 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1545 EBCASC(niucv
->dst_name
, 8);
1546 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1547 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1548 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1551 /* Call iucv_accept */
1552 high_nmcpy(nuser_data
, ipuser
+ 8);
1553 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1554 ASCEBC(nuser_data
+ 8, 8);
1556 /* set message limit for path based on msglimit of accepting socket */
1557 niucv
->msglimit
= iucv
->msglimit
;
1558 path
->msglim
= iucv
->msglimit
;
1559 err
= iucv_path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1561 err
= iucv_path_sever(path
, user_data
);
1562 iucv_path_free(path
);
1563 iucv_sock_kill(nsk
);
1567 iucv_accept_enqueue(sk
, nsk
);
1569 /* Wake up accept */
1570 nsk
->sk_state
= IUCV_CONNECTED
;
1571 sk
->sk_data_ready(sk
, 1);
1578 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1580 struct sock
*sk
= path
->private;
1582 sk
->sk_state
= IUCV_CONNECTED
;
1583 sk
->sk_state_change(sk
);
1586 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1588 struct sock
*sk
= path
->private;
1589 struct iucv_sock
*iucv
= iucv_sk(sk
);
1590 struct sk_buff
*skb
;
1591 struct sock_msg_q
*save_msg
;
1594 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1595 iucv_message_reject(path
, msg
);
1599 spin_lock(&iucv
->message_q
.lock
);
1601 if (!list_empty(&iucv
->message_q
.list
) ||
1602 !skb_queue_empty(&iucv
->backlog_skb_q
))
1605 len
= atomic_read(&sk
->sk_rmem_alloc
);
1606 len
+= iucv_msg_length(msg
) + sizeof(struct sk_buff
);
1607 if (len
> sk
->sk_rcvbuf
)
1610 skb
= alloc_skb(iucv_msg_length(msg
), GFP_ATOMIC
| GFP_DMA
);
1614 iucv_process_message(sk
, skb
, path
, msg
);
1618 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1621 save_msg
->path
= path
;
1622 save_msg
->msg
= *msg
;
1624 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1627 spin_unlock(&iucv
->message_q
.lock
);
1630 static void iucv_callback_txdone(struct iucv_path
*path
,
1631 struct iucv_message
*msg
)
1633 struct sock
*sk
= path
->private;
1634 struct sk_buff
*this = NULL
;
1635 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1636 struct sk_buff
*list_skb
= list
->next
;
1637 unsigned long flags
;
1639 if (!skb_queue_empty(list
)) {
1640 spin_lock_irqsave(&list
->lock
, flags
);
1642 while (list_skb
!= (struct sk_buff
*)list
) {
1643 if (!memcmp(&msg
->tag
, CB_TAG(list_skb
), CB_TAG_LEN
)) {
1647 list_skb
= list_skb
->next
;
1650 __skb_unlink(this, list
);
1652 spin_unlock_irqrestore(&list
->lock
, flags
);
1656 /* wake up any process waiting for sending */
1657 iucv_sock_wake_msglim(sk
);
1662 if (sk
->sk_state
== IUCV_CLOSING
) {
1663 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1664 sk
->sk_state
= IUCV_CLOSED
;
1665 sk
->sk_state_change(sk
);
1671 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1673 struct sock
*sk
= path
->private;
1675 if (!list_empty(&iucv_sk(sk
)->accept_q
))
1676 sk
->sk_state
= IUCV_SEVERED
;
1678 sk
->sk_state
= IUCV_DISCONN
;
1680 sk
->sk_state_change(sk
);
1683 /* called if the other communication side shuts down its RECV direction;
1684 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1686 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1688 struct sock
*sk
= path
->private;
1691 if (sk
->sk_state
!= IUCV_CLOSED
) {
1692 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1693 sk
->sk_state_change(sk
);
1698 static const struct proto_ops iucv_sock_ops
= {
1700 .owner
= THIS_MODULE
,
1701 .release
= iucv_sock_release
,
1702 .bind
= iucv_sock_bind
,
1703 .connect
= iucv_sock_connect
,
1704 .listen
= iucv_sock_listen
,
1705 .accept
= iucv_sock_accept
,
1706 .getname
= iucv_sock_getname
,
1707 .sendmsg
= iucv_sock_sendmsg
,
1708 .recvmsg
= iucv_sock_recvmsg
,
1709 .poll
= iucv_sock_poll
,
1710 .ioctl
= sock_no_ioctl
,
1711 .mmap
= sock_no_mmap
,
1712 .socketpair
= sock_no_socketpair
,
1713 .shutdown
= iucv_sock_shutdown
,
1714 .setsockopt
= iucv_sock_setsockopt
,
1715 .getsockopt
= iucv_sock_getsockopt
,
1718 static const struct net_proto_family iucv_sock_family_ops
= {
1720 .owner
= THIS_MODULE
,
1721 .create
= iucv_sock_create
,
1724 static int __init
afiucv_init(void)
1728 if (!MACHINE_IS_VM
) {
1729 pr_err("The af_iucv module cannot be loaded"
1731 err
= -EPROTONOSUPPORT
;
1734 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
1735 if (unlikely(err
)) {
1737 err
= -EPROTONOSUPPORT
;
1741 err
= iucv_register(&af_iucv_handler
, 0);
1744 err
= proto_register(&iucv_proto
, 0);
1747 err
= sock_register(&iucv_sock_family_ops
);
1750 /* establish dummy device */
1751 err
= driver_register(&af_iucv_driver
);
1754 af_iucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
1759 dev_set_name(af_iucv_dev
, "af_iucv");
1760 af_iucv_dev
->bus
= &iucv_bus
;
1761 af_iucv_dev
->parent
= iucv_root
;
1762 af_iucv_dev
->release
= (void (*)(struct device
*))kfree
;
1763 af_iucv_dev
->driver
= &af_iucv_driver
;
1764 err
= device_register(af_iucv_dev
);
1771 driver_unregister(&af_iucv_driver
);
1773 sock_unregister(PF_IUCV
);
1775 proto_unregister(&iucv_proto
);
1777 iucv_unregister(&af_iucv_handler
, 0);
1782 static void __exit
afiucv_exit(void)
1784 device_unregister(af_iucv_dev
);
1785 driver_unregister(&af_iucv_driver
);
1786 sock_unregister(PF_IUCV
);
1787 proto_unregister(&iucv_proto
);
1788 iucv_unregister(&af_iucv_handler
, 0);
1791 module_init(afiucv_init
);
1792 module_exit(afiucv_exit
);
1794 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1795 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
1796 MODULE_VERSION(VERSION
);
1797 MODULE_LICENSE("GPL");
1798 MODULE_ALIAS_NETPROTO(PF_IUCV
);