2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <linux/security.h>
27 #include <asm/ebcdic.h>
28 #include <asm/cpcmd.h>
29 #include <linux/kmod.h>
31 #include <net/iucv/af_iucv.h>
35 static char iucv_userid
[80];
37 static const struct proto_ops iucv_sock_ops
;
39 static struct proto iucv_proto
= {
42 .obj_size
= sizeof(struct iucv_sock
),
45 static struct iucv_interface
*pr_iucv
;
47 /* special AF_IUCV IPRM messages */
48 static const u8 iprm_shutdown
[8] =
49 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
51 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
53 #define __iucv_sock_wait(sk, condition, timeo, ret) \
55 DEFINE_WAIT(__wait); \
56 long __timeo = timeo; \
58 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
59 while (!(condition)) { \
64 if (signal_pending(current)) { \
65 ret = sock_intr_errno(__timeo); \
69 __timeo = schedule_timeout(__timeo); \
71 ret = sock_error(sk); \
75 finish_wait(sk_sleep(sk), &__wait); \
78 #define iucv_sock_wait(sk, condition, timeo) \
82 __iucv_sock_wait(sk, condition, timeo, __ret); \
86 static void iucv_sock_kill(struct sock
*sk
);
87 static void iucv_sock_close(struct sock
*sk
);
88 static void iucv_sever_path(struct sock
*, int);
90 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
91 struct packet_type
*pt
, struct net_device
*orig_dev
);
92 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
93 struct sk_buff
*skb
, u8 flags
);
94 static void afiucv_hs_callback_txnotify(struct sk_buff
*, enum iucv_tx_notify
);
96 /* Call Back functions */
97 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
98 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
99 static void iucv_callback_connack(struct iucv_path
*, u8
*);
100 static int iucv_callback_connreq(struct iucv_path
*, u8
*, u8
*);
101 static void iucv_callback_connrej(struct iucv_path
*, u8
*);
102 static void iucv_callback_shutdown(struct iucv_path
*, u8
*);
104 static struct iucv_sock_list iucv_sk_list
= {
105 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
106 .autobind_name
= ATOMIC_INIT(0)
109 static struct iucv_handler af_iucv_handler
= {
110 .path_pending
= iucv_callback_connreq
,
111 .path_complete
= iucv_callback_connack
,
112 .path_severed
= iucv_callback_connrej
,
113 .message_pending
= iucv_callback_rx
,
114 .message_complete
= iucv_callback_txdone
,
115 .path_quiesced
= iucv_callback_shutdown
,
118 static inline void high_nmcpy(unsigned char *dst
, char *src
)
123 static inline void low_nmcpy(unsigned char *dst
, char *src
)
125 memcpy(&dst
[8], src
, 8);
128 static int afiucv_pm_prepare(struct device
*dev
)
130 #ifdef CONFIG_PM_DEBUG
131 printk(KERN_WARNING
"afiucv_pm_prepare\n");
136 static void afiucv_pm_complete(struct device
*dev
)
138 #ifdef CONFIG_PM_DEBUG
139 printk(KERN_WARNING
"afiucv_pm_complete\n");
144 * afiucv_pm_freeze() - Freeze PM callback
145 * @dev: AFIUCV dummy device
147 * Sever all established IUCV communication pathes
149 static int afiucv_pm_freeze(struct device
*dev
)
151 struct iucv_sock
*iucv
;
154 #ifdef CONFIG_PM_DEBUG
155 printk(KERN_WARNING
"afiucv_pm_freeze\n");
157 read_lock(&iucv_sk_list
.lock
);
158 sk_for_each(sk
, &iucv_sk_list
.head
) {
160 switch (sk
->sk_state
) {
164 iucv_sever_path(sk
, 0);
173 skb_queue_purge(&iucv
->send_skb_q
);
174 skb_queue_purge(&iucv
->backlog_skb_q
);
176 read_unlock(&iucv_sk_list
.lock
);
181 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
182 * @dev: AFIUCV dummy device
184 * socket clean up after freeze
186 static int afiucv_pm_restore_thaw(struct device
*dev
)
190 #ifdef CONFIG_PM_DEBUG
191 printk(KERN_WARNING
"afiucv_pm_restore_thaw\n");
193 read_lock(&iucv_sk_list
.lock
);
194 sk_for_each(sk
, &iucv_sk_list
.head
) {
195 switch (sk
->sk_state
) {
198 sk
->sk_state
= IUCV_DISCONN
;
199 sk
->sk_state_change(sk
);
210 read_unlock(&iucv_sk_list
.lock
);
214 static const struct dev_pm_ops afiucv_pm_ops
= {
215 .prepare
= afiucv_pm_prepare
,
216 .complete
= afiucv_pm_complete
,
217 .freeze
= afiucv_pm_freeze
,
218 .thaw
= afiucv_pm_restore_thaw
,
219 .restore
= afiucv_pm_restore_thaw
,
222 static struct device_driver af_iucv_driver
= {
223 .owner
= THIS_MODULE
,
226 .pm
= &afiucv_pm_ops
,
229 /* dummy device used as trigger for PM functions */
230 static struct device
*af_iucv_dev
;
233 * iucv_msg_length() - Returns the length of an iucv message.
234 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
236 * The function returns the length of the specified iucv message @msg of data
237 * stored in a buffer and of data stored in the parameter list (PRMDATA).
239 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
241 * PRMDATA[0..6] socket data (max 7 bytes);
242 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
244 * The socket data length is computed by subtracting the socket data length
246 * If the socket data len is greater 7, then PRMDATA can be used for special
247 * notifications (see iucv_sock_shutdown); and further,
248 * if the socket data len is > 7, the function returns 8.
250 * Use this function to allocate socket buffers to store iucv message data.
252 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
256 if (msg
->flags
& IUCV_IPRMDATA
) {
257 datalen
= 0xff - msg
->rmmsg
[7];
258 return (datalen
< 8) ? datalen
: 8;
264 * iucv_sock_in_state() - check for specific states
265 * @sk: sock structure
266 * @state: first iucv sk state
267 * @state: second iucv sk state
269 * Returns true if the socket in either in the first or second state.
271 static int iucv_sock_in_state(struct sock
*sk
, int state
, int state2
)
273 return (sk
->sk_state
== state
|| sk
->sk_state
== state2
);
277 * iucv_below_msglim() - function to check if messages can be sent
278 * @sk: sock structure
280 * Returns true if the send queue length is lower than the message limit.
281 * Always returns true if the socket is not connected (no iucv path for
282 * checking the message limit).
284 static inline int iucv_below_msglim(struct sock
*sk
)
286 struct iucv_sock
*iucv
= iucv_sk(sk
);
288 if (sk
->sk_state
!= IUCV_CONNECTED
)
290 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
)
291 return (skb_queue_len(&iucv
->send_skb_q
) < iucv
->path
->msglim
);
293 return ((atomic_read(&iucv
->msg_sent
) < iucv
->msglimit_peer
) &&
294 (atomic_read(&iucv
->pendings
) <= 0));
298 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
300 static void iucv_sock_wake_msglim(struct sock
*sk
)
302 struct socket_wq
*wq
;
305 wq
= rcu_dereference(sk
->sk_wq
);
306 if (skwq_has_sleeper(wq
))
307 wake_up_interruptible_all(&wq
->wait
);
308 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
313 * afiucv_hs_send() - send a message through HiperSockets transport
315 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
316 struct sk_buff
*skb
, u8 flags
)
318 struct iucv_sock
*iucv
= iucv_sk(sock
);
319 struct af_iucv_trans_hdr
*phs_hdr
;
320 struct sk_buff
*nskb
;
321 int err
, confirm_recv
= 0;
323 memset(skb
->head
, 0, ETH_HLEN
);
324 phs_hdr
= skb_push(skb
, sizeof(struct af_iucv_trans_hdr
));
325 skb_reset_mac_header(skb
);
326 skb_reset_network_header(skb
);
327 skb_push(skb
, ETH_HLEN
);
328 skb_reset_mac_header(skb
);
329 memset(phs_hdr
, 0, sizeof(struct af_iucv_trans_hdr
));
331 phs_hdr
->magic
= ETH_P_AF_IUCV
;
332 phs_hdr
->version
= 1;
333 phs_hdr
->flags
= flags
;
334 if (flags
== AF_IUCV_FLAG_SYN
)
335 phs_hdr
->window
= iucv
->msglimit
;
336 else if ((flags
== AF_IUCV_FLAG_WIN
) || !flags
) {
337 confirm_recv
= atomic_read(&iucv
->msg_recv
);
338 phs_hdr
->window
= confirm_recv
;
340 phs_hdr
->flags
= phs_hdr
->flags
| AF_IUCV_FLAG_WIN
;
342 memcpy(phs_hdr
->destUserID
, iucv
->dst_user_id
, 8);
343 memcpy(phs_hdr
->destAppName
, iucv
->dst_name
, 8);
344 memcpy(phs_hdr
->srcUserID
, iucv
->src_user_id
, 8);
345 memcpy(phs_hdr
->srcAppName
, iucv
->src_name
, 8);
346 ASCEBC(phs_hdr
->destUserID
, sizeof(phs_hdr
->destUserID
));
347 ASCEBC(phs_hdr
->destAppName
, sizeof(phs_hdr
->destAppName
));
348 ASCEBC(phs_hdr
->srcUserID
, sizeof(phs_hdr
->srcUserID
));
349 ASCEBC(phs_hdr
->srcAppName
, sizeof(phs_hdr
->srcAppName
));
351 memcpy(&phs_hdr
->iucv_hdr
, imsg
, sizeof(struct iucv_message
));
353 skb
->dev
= iucv
->hs_dev
;
358 if (!(skb
->dev
->flags
& IFF_UP
) || !netif_carrier_ok(skb
->dev
)) {
362 if (skb
->len
> skb
->dev
->mtu
) {
363 if (sock
->sk_type
== SOCK_SEQPACKET
) {
367 skb_trim(skb
, skb
->dev
->mtu
);
369 skb
->protocol
= cpu_to_be16(ETH_P_AF_IUCV
);
370 nskb
= skb_clone(skb
, GFP_ATOMIC
);
376 skb_queue_tail(&iucv
->send_skb_q
, nskb
);
377 err
= dev_queue_xmit(skb
);
378 if (net_xmit_eval(err
)) {
379 skb_unlink(nskb
, &iucv
->send_skb_q
);
382 atomic_sub(confirm_recv
, &iucv
->msg_recv
);
383 WARN_ON(atomic_read(&iucv
->msg_recv
) < 0);
385 return net_xmit_eval(err
);
392 static struct sock
*__iucv_get_sock_by_name(char *nm
)
396 sk_for_each(sk
, &iucv_sk_list
.head
)
397 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
403 static void iucv_sock_destruct(struct sock
*sk
)
405 skb_queue_purge(&sk
->sk_receive_queue
);
406 skb_queue_purge(&sk
->sk_error_queue
);
410 if (!sock_flag(sk
, SOCK_DEAD
)) {
411 pr_err("Attempt to release alive iucv socket %p\n", sk
);
415 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
416 WARN_ON(refcount_read(&sk
->sk_wmem_alloc
));
417 WARN_ON(sk
->sk_wmem_queued
);
418 WARN_ON(sk
->sk_forward_alloc
);
422 static void iucv_sock_cleanup_listen(struct sock
*parent
)
426 /* Close non-accepted connections */
427 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
432 parent
->sk_state
= IUCV_CLOSED
;
435 /* Kill socket (only if zapped and orphaned) */
436 static void iucv_sock_kill(struct sock
*sk
)
438 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
441 iucv_sock_unlink(&iucv_sk_list
, sk
);
442 sock_set_flag(sk
, SOCK_DEAD
);
446 /* Terminate an IUCV path */
447 static void iucv_sever_path(struct sock
*sk
, int with_user_data
)
449 unsigned char user_data
[16];
450 struct iucv_sock
*iucv
= iucv_sk(sk
);
451 struct iucv_path
*path
= iucv
->path
;
455 if (with_user_data
) {
456 low_nmcpy(user_data
, iucv
->src_name
);
457 high_nmcpy(user_data
, iucv
->dst_name
);
458 ASCEBC(user_data
, sizeof(user_data
));
459 pr_iucv
->path_sever(path
, user_data
);
461 pr_iucv
->path_sever(path
, NULL
);
462 iucv_path_free(path
);
466 /* Send controlling flags through an IUCV socket for HIPER transport */
467 static int iucv_send_ctrl(struct sock
*sk
, u8 flags
)
474 blen
= sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
;
475 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
476 /* controlling flags should be sent anyway */
477 shutdown
= sk
->sk_shutdown
;
478 sk
->sk_shutdown
&= RCV_SHUTDOWN
;
480 skb
= sock_alloc_send_skb(sk
, blen
, 1, &err
);
482 skb_reserve(skb
, blen
);
483 err
= afiucv_hs_send(NULL
, sk
, skb
, flags
);
486 sk
->sk_shutdown
= shutdown
;
490 /* Close an IUCV socket */
491 static void iucv_sock_close(struct sock
*sk
)
493 struct iucv_sock
*iucv
= iucv_sk(sk
);
499 switch (sk
->sk_state
) {
501 iucv_sock_cleanup_listen(sk
);
505 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
506 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
507 sk
->sk_state
= IUCV_DISCONN
;
508 sk
->sk_state_change(sk
);
510 case IUCV_DISCONN
: /* fall through */
511 sk
->sk_state
= IUCV_CLOSING
;
512 sk
->sk_state_change(sk
);
514 if (!err
&& !skb_queue_empty(&iucv
->send_skb_q
)) {
515 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
516 timeo
= sk
->sk_lingertime
;
518 timeo
= IUCV_DISCONN_TIMEOUT
;
520 iucv_sock_in_state(sk
, IUCV_CLOSED
, 0),
524 case IUCV_CLOSING
: /* fall through */
525 sk
->sk_state
= IUCV_CLOSED
;
526 sk
->sk_state_change(sk
);
528 sk
->sk_err
= ECONNRESET
;
529 sk
->sk_state_change(sk
);
531 skb_queue_purge(&iucv
->send_skb_q
);
532 skb_queue_purge(&iucv
->backlog_skb_q
);
534 default: /* fall through */
535 iucv_sever_path(sk
, 1);
539 dev_put(iucv
->hs_dev
);
541 sk
->sk_bound_dev_if
= 0;
544 /* mark socket for deletion by iucv_sock_kill() */
545 sock_set_flag(sk
, SOCK_ZAPPED
);
550 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
553 sk
->sk_type
= parent
->sk_type
;
554 security_sk_clone(parent
, sk
);
558 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
, int kern
)
561 struct iucv_sock
*iucv
;
563 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
, kern
);
568 sock_init_data(sock
, sk
);
569 INIT_LIST_HEAD(&iucv
->accept_q
);
570 spin_lock_init(&iucv
->accept_q_lock
);
571 skb_queue_head_init(&iucv
->send_skb_q
);
572 INIT_LIST_HEAD(&iucv
->message_q
.list
);
573 spin_lock_init(&iucv
->message_q
.lock
);
574 skb_queue_head_init(&iucv
->backlog_skb_q
);
576 atomic_set(&iucv
->pendings
, 0);
579 atomic_set(&iucv
->msg_sent
, 0);
580 atomic_set(&iucv
->msg_recv
, 0);
582 iucv
->sk_txnotify
= afiucv_hs_callback_txnotify
;
583 memset(&iucv
->src_user_id
, 0, 32);
585 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
587 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
589 sk
->sk_destruct
= iucv_sock_destruct
;
590 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
591 sk
->sk_allocation
= GFP_DMA
;
593 sock_reset_flag(sk
, SOCK_ZAPPED
);
595 sk
->sk_protocol
= proto
;
596 sk
->sk_state
= IUCV_OPEN
;
598 iucv_sock_link(&iucv_sk_list
, sk
);
602 /* Create an IUCV socket */
603 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
608 if (protocol
&& protocol
!= PF_IUCV
)
609 return -EPROTONOSUPPORT
;
611 sock
->state
= SS_UNCONNECTED
;
613 switch (sock
->type
) {
615 sock
->ops
= &iucv_sock_ops
;
618 /* currently, proto ops can handle both sk types */
619 sock
->ops
= &iucv_sock_ops
;
622 return -ESOCKTNOSUPPORT
;
625 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
, kern
);
629 iucv_sock_init(sk
, NULL
);
634 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
636 write_lock_bh(&l
->lock
);
637 sk_add_node(sk
, &l
->head
);
638 write_unlock_bh(&l
->lock
);
641 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
643 write_lock_bh(&l
->lock
);
644 sk_del_node_init(sk
);
645 write_unlock_bh(&l
->lock
);
648 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
651 struct iucv_sock
*par
= iucv_sk(parent
);
654 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
655 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
656 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
657 iucv_sk(sk
)->parent
= parent
;
658 sk_acceptq_added(parent
);
661 void iucv_accept_unlink(struct sock
*sk
)
664 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
666 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
667 list_del_init(&iucv_sk(sk
)->accept_q
);
668 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
669 sk_acceptq_removed(iucv_sk(sk
)->parent
);
670 iucv_sk(sk
)->parent
= NULL
;
674 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
676 struct iucv_sock
*isk
, *n
;
679 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
680 sk
= (struct sock
*) isk
;
683 if (sk
->sk_state
== IUCV_CLOSED
) {
684 iucv_accept_unlink(sk
);
689 if (sk
->sk_state
== IUCV_CONNECTED
||
690 sk
->sk_state
== IUCV_DISCONN
||
692 iucv_accept_unlink(sk
);
694 sock_graft(sk
, newsock
);
705 static void __iucv_auto_name(struct iucv_sock
*iucv
)
709 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
710 while (__iucv_get_sock_by_name(name
)) {
711 sprintf(name
, "%08x",
712 atomic_inc_return(&iucv_sk_list
.autobind_name
));
714 memcpy(iucv
->src_name
, name
, 8);
717 /* Bind an unbound socket */
718 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
721 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
722 struct sock
*sk
= sock
->sk
;
723 struct iucv_sock
*iucv
;
725 struct net_device
*dev
;
728 /* Verify the input sockaddr */
729 if (addr_len
< sizeof(struct sockaddr_iucv
) ||
730 addr
->sa_family
!= AF_IUCV
)
734 if (sk
->sk_state
!= IUCV_OPEN
) {
739 write_lock_bh(&iucv_sk_list
.lock
);
742 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
749 /* Bind the socket */
751 if (!memcmp(sa
->siucv_user_id
, iucv_userid
, 8))
752 goto vm_bind
; /* VM IUCV transport */
754 /* try hiper transport */
755 memcpy(uid
, sa
->siucv_user_id
, sizeof(uid
));
758 for_each_netdev_rcu(&init_net
, dev
) {
759 if (!memcmp(dev
->perm_addr
, uid
, 8)) {
760 memcpy(iucv
->src_user_id
, sa
->siucv_user_id
, 8);
761 /* Check for unitialized siucv_name */
762 if (strncmp(sa
->siucv_name
, " ", 8) == 0)
763 __iucv_auto_name(iucv
);
765 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
766 sk
->sk_bound_dev_if
= dev
->ifindex
;
769 sk
->sk_state
= IUCV_BOUND
;
770 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
772 iucv
->msglimit
= IUCV_HIPER_MSGLIM_DEFAULT
;
780 /* use local userid for backward compat */
781 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
782 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
783 sk
->sk_state
= IUCV_BOUND
;
784 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
786 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
789 /* found no dev to bind */
792 /* Release the socket list lock */
793 write_unlock_bh(&iucv_sk_list
.lock
);
799 /* Automatically bind an unbound socket */
800 static int iucv_sock_autobind(struct sock
*sk
)
802 struct iucv_sock
*iucv
= iucv_sk(sk
);
805 if (unlikely(!pr_iucv
))
808 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
810 write_lock_bh(&iucv_sk_list
.lock
);
811 __iucv_auto_name(iucv
);
812 write_unlock_bh(&iucv_sk_list
.lock
);
815 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
820 static int afiucv_path_connect(struct socket
*sock
, struct sockaddr
*addr
)
822 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
823 struct sock
*sk
= sock
->sk
;
824 struct iucv_sock
*iucv
= iucv_sk(sk
);
825 unsigned char user_data
[16];
828 high_nmcpy(user_data
, sa
->siucv_name
);
829 low_nmcpy(user_data
, iucv
->src_name
);
830 ASCEBC(user_data
, sizeof(user_data
));
833 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
834 IUCV_IPRMDATA
, GFP_KERNEL
);
839 err
= pr_iucv
->path_connect(iucv
->path
, &af_iucv_handler
,
840 sa
->siucv_user_id
, NULL
, user_data
,
843 iucv_path_free(iucv
->path
);
846 case 0x0b: /* Target communicator is not logged on */
849 case 0x0d: /* Max connections for this guest exceeded */
850 case 0x0e: /* Max connections for target guest exceeded */
853 case 0x0f: /* Missing IUCV authorization */
865 /* Connect an unconnected socket */
866 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
869 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
870 struct sock
*sk
= sock
->sk
;
871 struct iucv_sock
*iucv
= iucv_sk(sk
);
874 if (alen
< sizeof(struct sockaddr_iucv
) || addr
->sa_family
!= AF_IUCV
)
877 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
880 if (sk
->sk_state
== IUCV_OPEN
&&
881 iucv
->transport
== AF_IUCV_TRANS_HIPER
)
882 return -EBADFD
; /* explicit bind required */
884 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
887 if (sk
->sk_state
== IUCV_OPEN
) {
888 err
= iucv_sock_autobind(sk
);
895 /* Set the destination information */
896 memcpy(iucv
->dst_user_id
, sa
->siucv_user_id
, 8);
897 memcpy(iucv
->dst_name
, sa
->siucv_name
, 8);
899 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
900 err
= iucv_send_ctrl(sock
->sk
, AF_IUCV_FLAG_SYN
);
902 err
= afiucv_path_connect(sock
, addr
);
906 if (sk
->sk_state
!= IUCV_CONNECTED
)
907 err
= iucv_sock_wait(sk
, iucv_sock_in_state(sk
, IUCV_CONNECTED
,
909 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
911 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_CLOSED
)
914 if (err
&& iucv
->transport
== AF_IUCV_TRANS_IUCV
)
915 iucv_sever_path(sk
, 0);
922 /* Move a socket into listening state. */
923 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
925 struct sock
*sk
= sock
->sk
;
931 if (sk
->sk_state
!= IUCV_BOUND
)
934 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
937 sk
->sk_max_ack_backlog
= backlog
;
938 sk
->sk_ack_backlog
= 0;
939 sk
->sk_state
= IUCV_LISTEN
;
947 /* Accept a pending connection */
948 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
949 int flags
, bool kern
)
951 DECLARE_WAITQUEUE(wait
, current
);
952 struct sock
*sk
= sock
->sk
, *nsk
;
956 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
958 if (sk
->sk_state
!= IUCV_LISTEN
) {
963 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
965 /* Wait for an incoming connection */
966 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
967 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
968 set_current_state(TASK_INTERRUPTIBLE
);
975 timeo
= schedule_timeout(timeo
);
976 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
978 if (sk
->sk_state
!= IUCV_LISTEN
) {
983 if (signal_pending(current
)) {
984 err
= sock_intr_errno(timeo
);
989 set_current_state(TASK_RUNNING
);
990 remove_wait_queue(sk_sleep(sk
), &wait
);
995 newsock
->state
= SS_CONNECTED
;
1002 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
1005 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
1006 struct sock
*sk
= sock
->sk
;
1007 struct iucv_sock
*iucv
= iucv_sk(sk
);
1009 addr
->sa_family
= AF_IUCV
;
1012 memcpy(siucv
->siucv_user_id
, iucv
->dst_user_id
, 8);
1013 memcpy(siucv
->siucv_name
, iucv
->dst_name
, 8);
1015 memcpy(siucv
->siucv_user_id
, iucv
->src_user_id
, 8);
1016 memcpy(siucv
->siucv_name
, iucv
->src_name
, 8);
1018 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
1019 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
1020 memset(&siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
1022 return sizeof(struct sockaddr_iucv
);
1026 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1028 * @msg: Pointer to a struct iucv_message
1029 * @skb: The socket data to send, skb->len MUST BE <= 7
1031 * Send the socket data in the parameter list in the iucv message
1032 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1033 * list and the socket data len at index 7 (last byte).
1034 * See also iucv_msg_length().
1036 * Returns the error code from the iucv_message_send() call.
1038 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
1039 struct sk_buff
*skb
)
1043 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
1044 prmdata
[7] = 0xff - (u8
) skb
->len
;
1045 return pr_iucv
->message_send(path
, msg
, IUCV_IPRMDATA
, 0,
1046 (void *) prmdata
, 8);
1049 static int iucv_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1052 struct sock
*sk
= sock
->sk
;
1053 struct iucv_sock
*iucv
= iucv_sk(sk
);
1054 size_t headroom
= 0;
1056 struct sk_buff
*skb
;
1057 struct iucv_message txmsg
= {0};
1058 struct cmsghdr
*cmsg
;
1064 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
1066 err
= sock_error(sk
);
1070 if (msg
->msg_flags
& MSG_OOB
)
1073 /* SOCK_SEQPACKET: we do not support segmented records */
1074 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
1079 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1084 /* Return if the socket is not in connected state */
1085 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1090 /* initialize defaults */
1091 cmsg_done
= 0; /* check for duplicate headers */
1094 /* iterate over control messages */
1095 for_each_cmsghdr(cmsg
, msg
) {
1096 if (!CMSG_OK(msg
, cmsg
)) {
1101 if (cmsg
->cmsg_level
!= SOL_IUCV
)
1104 if (cmsg
->cmsg_type
& cmsg_done
) {
1108 cmsg_done
|= cmsg
->cmsg_type
;
1110 switch (cmsg
->cmsg_type
) {
1111 case SCM_IUCV_TRGCLS
:
1112 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
1117 /* set iucv message target class */
1118 memcpy(&txmsg
.class,
1119 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
1129 /* allocate one skb for each iucv message:
1130 * this is fine for SOCK_SEQPACKET (unless we want to support
1131 * segmented records using the MSG_EOR flag), but
1132 * for SOCK_STREAM we might want to improve it in future */
1133 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1134 headroom
= sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
;
1137 if (len
< PAGE_SIZE
) {
1140 /* In nonlinear "classic" iucv skb,
1141 * reserve space for iucv_array
1143 headroom
= sizeof(struct iucv_array
) *
1144 (MAX_SKB_FRAGS
+ 1);
1145 linear
= PAGE_SIZE
- headroom
;
1148 skb
= sock_alloc_send_pskb(sk
, headroom
+ linear
, len
- linear
,
1153 skb_reserve(skb
, headroom
);
1154 skb_put(skb
, linear
);
1156 skb
->data_len
= len
- linear
;
1157 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, len
);
1161 /* wait if outstanding messages for iucv path has reached */
1162 timeo
= sock_sndtimeo(sk
, noblock
);
1163 err
= iucv_sock_wait(sk
, iucv_below_msglim(sk
), timeo
);
1167 /* return -ECONNRESET if the socket is no longer connected */
1168 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1173 /* increment and save iucv message tag for msg_completion cbk */
1174 txmsg
.tag
= iucv
->send_tag
++;
1175 IUCV_SKB_CB(skb
)->tag
= txmsg
.tag
;
1177 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1178 atomic_inc(&iucv
->msg_sent
);
1179 err
= afiucv_hs_send(&txmsg
, sk
, skb
, 0);
1181 atomic_dec(&iucv
->msg_sent
);
1184 } else { /* Classic VM IUCV transport */
1185 skb_queue_tail(&iucv
->send_skb_q
, skb
);
1187 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
) &&
1189 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
1191 /* on success: there is no message_complete callback */
1192 /* for an IPRMDATA msg; remove skb from send queue */
1194 skb_unlink(skb
, &iucv
->send_skb_q
);
1198 /* this error should never happen since the */
1199 /* IUCV_IPRMDATA path flag is set... sever path */
1201 pr_iucv
->path_sever(iucv
->path
, NULL
);
1202 skb_unlink(skb
, &iucv
->send_skb_q
);
1206 } else if (skb_is_nonlinear(skb
)) {
1207 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1210 /* skip iucv_array lying in the headroom */
1211 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1212 iba
[0].length
= (u32
)skb_headlen(skb
);
1213 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1214 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1216 iba
[i
+ 1].address
=
1217 (u32
)(addr_t
)skb_frag_address(frag
);
1218 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1220 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1222 (void *)iba
, skb
->len
);
1223 } else { /* non-IPRM Linear skb */
1224 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1225 0, 0, (void *)skb
->data
, skb
->len
);
1230 memcpy(user_id
, iucv
->dst_user_id
, 8);
1232 memcpy(appl_id
, iucv
->dst_name
, 8);
1234 "Application %s on z/VM guest %s exceeds message limit\n",
1240 skb_unlink(skb
, &iucv
->send_skb_q
);
1255 static struct sk_buff
*alloc_iucv_recv_skb(unsigned long len
)
1257 size_t headroom
, linear
;
1258 struct sk_buff
*skb
;
1261 if (len
< PAGE_SIZE
) {
1265 headroom
= sizeof(struct iucv_array
) * (MAX_SKB_FRAGS
+ 1);
1266 linear
= PAGE_SIZE
- headroom
;
1268 skb
= alloc_skb_with_frags(headroom
+ linear
, len
- linear
,
1269 0, &err
, GFP_ATOMIC
| GFP_DMA
);
1271 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1275 skb_reserve(skb
, headroom
);
1276 skb_put(skb
, linear
);
1278 skb
->data_len
= len
- linear
;
1283 /* iucv_process_message() - Receive a single outstanding IUCV message
1285 * Locking: must be called with message_q.lock held
1287 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1288 struct iucv_path
*path
,
1289 struct iucv_message
*msg
)
1294 len
= iucv_msg_length(msg
);
1296 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1297 /* Note: the first 4 bytes are reserved for msg tag */
1298 IUCV_SKB_CB(skb
)->class = msg
->class;
1300 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1301 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1302 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1307 if (skb_is_nonlinear(skb
)) {
1308 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1311 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1312 iba
[0].length
= (u32
)skb_headlen(skb
);
1313 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1314 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1316 iba
[i
+ 1].address
=
1317 (u32
)(addr_t
)skb_frag_address(frag
);
1318 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1320 rc
= pr_iucv
->message_receive(path
, msg
,
1322 (void *)iba
, len
, NULL
);
1324 rc
= pr_iucv
->message_receive(path
, msg
,
1325 msg
->flags
& IUCV_IPRMDATA
,
1326 skb
->data
, len
, NULL
);
1332 WARN_ON_ONCE(skb
->len
!= len
);
1335 IUCV_SKB_CB(skb
)->offset
= 0;
1336 if (sk_filter(sk
, skb
)) {
1337 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
1341 if (__sock_queue_rcv_skb(sk
, skb
)) /* handle rcv queue full */
1342 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1345 /* iucv_process_message_q() - Process outstanding IUCV messages
1347 * Locking: must be called with message_q.lock held
1349 static void iucv_process_message_q(struct sock
*sk
)
1351 struct iucv_sock
*iucv
= iucv_sk(sk
);
1352 struct sk_buff
*skb
;
1353 struct sock_msg_q
*p
, *n
;
1355 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1356 skb
= alloc_iucv_recv_skb(iucv_msg_length(&p
->msg
));
1359 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1362 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1367 static int iucv_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1368 size_t len
, int flags
)
1370 int noblock
= flags
& MSG_DONTWAIT
;
1371 struct sock
*sk
= sock
->sk
;
1372 struct iucv_sock
*iucv
= iucv_sk(sk
);
1373 unsigned int copied
, rlen
;
1374 struct sk_buff
*skb
, *rskb
, *cskb
;
1378 if ((sk
->sk_state
== IUCV_DISCONN
) &&
1379 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1380 skb_queue_empty(&sk
->sk_receive_queue
) &&
1381 list_empty(&iucv
->message_q
.list
))
1384 if (flags
& (MSG_OOB
))
1387 /* receive/dequeue next skb:
1388 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1389 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1391 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1396 offset
= IUCV_SKB_CB(skb
)->offset
;
1397 rlen
= skb
->len
- offset
; /* real length of skb */
1398 copied
= min_t(unsigned int, rlen
, len
);
1400 sk
->sk_shutdown
= sk
->sk_shutdown
| RCV_SHUTDOWN
;
1403 if (skb_copy_datagram_msg(cskb
, offset
, msg
, copied
)) {
1404 if (!(flags
& MSG_PEEK
))
1405 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1409 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1410 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1412 msg
->msg_flags
|= MSG_TRUNC
;
1413 /* each iucv message contains a complete record */
1414 msg
->msg_flags
|= MSG_EOR
;
1417 /* create control message to store iucv msg target class:
1418 * get the trgcls from the control buffer of the skb due to
1419 * fragmentation of original iucv message. */
1420 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1421 sizeof(IUCV_SKB_CB(skb
)->class),
1422 (void *)&IUCV_SKB_CB(skb
)->class);
1424 if (!(flags
& MSG_PEEK
))
1425 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1429 /* Mark read part of skb as used */
1430 if (!(flags
& MSG_PEEK
)) {
1432 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1433 if (sk
->sk_type
== SOCK_STREAM
) {
1434 if (copied
< rlen
) {
1435 IUCV_SKB_CB(skb
)->offset
= offset
+ copied
;
1436 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1442 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1443 atomic_inc(&iucv
->msg_recv
);
1444 if (atomic_read(&iucv
->msg_recv
) > iucv
->msglimit
) {
1446 iucv_sock_close(sk
);
1451 /* Queue backlog skbs */
1452 spin_lock_bh(&iucv
->message_q
.lock
);
1453 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1455 IUCV_SKB_CB(rskb
)->offset
= 0;
1456 if (__sock_queue_rcv_skb(sk
, rskb
)) {
1457 /* handle rcv queue full */
1458 skb_queue_head(&iucv
->backlog_skb_q
,
1462 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1464 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1465 if (!list_empty(&iucv
->message_q
.list
))
1466 iucv_process_message_q(sk
);
1467 if (atomic_read(&iucv
->msg_recv
) >=
1468 iucv
->msglimit
/ 2) {
1469 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_WIN
);
1471 sk
->sk_state
= IUCV_DISCONN
;
1472 sk
->sk_state_change(sk
);
1476 spin_unlock_bh(&iucv
->message_q
.lock
);
1480 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1481 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1487 static inline __poll_t
iucv_accept_poll(struct sock
*parent
)
1489 struct iucv_sock
*isk
, *n
;
1492 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1493 sk
= (struct sock
*) isk
;
1495 if (sk
->sk_state
== IUCV_CONNECTED
)
1496 return EPOLLIN
| EPOLLRDNORM
;
1502 __poll_t
iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1505 struct sock
*sk
= sock
->sk
;
1508 sock_poll_wait(file
, wait
);
1510 if (sk
->sk_state
== IUCV_LISTEN
)
1511 return iucv_accept_poll(sk
);
1513 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1515 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? EPOLLPRI
: 0);
1517 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1520 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1523 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1524 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1525 mask
|= EPOLLIN
| EPOLLRDNORM
;
1527 if (sk
->sk_state
== IUCV_CLOSED
)
1530 if (sk
->sk_state
== IUCV_DISCONN
)
1533 if (sock_writeable(sk
) && iucv_below_msglim(sk
))
1534 mask
|= EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
;
1536 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1541 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1543 struct sock
*sk
= sock
->sk
;
1544 struct iucv_sock
*iucv
= iucv_sk(sk
);
1545 struct iucv_message txmsg
;
1550 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1554 switch (sk
->sk_state
) {
1565 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1566 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
) {
1569 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1570 IUCV_IPRMDATA
, 0, (void *) iprm_shutdown
, 8);
1585 iucv_send_ctrl(sk
, AF_IUCV_FLAG_SHT
);
1588 sk
->sk_shutdown
|= how
;
1589 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1590 if ((iucv
->transport
== AF_IUCV_TRANS_IUCV
) &&
1592 err
= pr_iucv
->path_quiesce(iucv
->path
, NULL
);
1595 /* skb_queue_purge(&sk->sk_receive_queue); */
1597 skb_queue_purge(&sk
->sk_receive_queue
);
1600 /* Wake up anyone sleeping in poll */
1601 sk
->sk_state_change(sk
);
1608 static int iucv_sock_release(struct socket
*sock
)
1610 struct sock
*sk
= sock
->sk
;
1616 iucv_sock_close(sk
);
1623 /* getsockopt and setsockopt */
1624 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1625 char __user
*optval
, unsigned int optlen
)
1627 struct sock
*sk
= sock
->sk
;
1628 struct iucv_sock
*iucv
= iucv_sk(sk
);
1632 if (level
!= SOL_IUCV
)
1633 return -ENOPROTOOPT
;
1635 if (optlen
< sizeof(int))
1638 if (get_user(val
, (int __user
*) optval
))
1645 case SO_IPRMDATA_MSG
:
1647 iucv
->flags
|= IUCV_IPRMDATA
;
1649 iucv
->flags
&= ~IUCV_IPRMDATA
;
1652 switch (sk
->sk_state
) {
1655 if (val
< 1 || val
> (u16
)(~0))
1658 iucv
->msglimit
= val
;
1674 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1675 char __user
*optval
, int __user
*optlen
)
1677 struct sock
*sk
= sock
->sk
;
1678 struct iucv_sock
*iucv
= iucv_sk(sk
);
1682 if (level
!= SOL_IUCV
)
1683 return -ENOPROTOOPT
;
1685 if (get_user(len
, optlen
))
1691 len
= min_t(unsigned int, len
, sizeof(int));
1694 case SO_IPRMDATA_MSG
:
1695 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1699 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1700 : iucv
->msglimit
; /* default */
1704 if (sk
->sk_state
== IUCV_OPEN
)
1706 val
= (iucv
->hs_dev
) ? iucv
->hs_dev
->mtu
-
1707 sizeof(struct af_iucv_trans_hdr
) - ETH_HLEN
:
1711 return -ENOPROTOOPT
;
1714 if (put_user(len
, optlen
))
1716 if (copy_to_user(optval
, &val
, len
))
1723 /* Callback wrappers - called from iucv base support */
1724 static int iucv_callback_connreq(struct iucv_path
*path
,
1725 u8 ipvmid
[8], u8 ipuser
[16])
1727 unsigned char user_data
[16];
1728 unsigned char nuser_data
[16];
1729 unsigned char src_name
[8];
1730 struct sock
*sk
, *nsk
;
1731 struct iucv_sock
*iucv
, *niucv
;
1734 memcpy(src_name
, ipuser
, 8);
1735 EBCASC(src_name
, 8);
1736 /* Find out if this path belongs to af_iucv. */
1737 read_lock(&iucv_sk_list
.lock
);
1740 sk_for_each(sk
, &iucv_sk_list
.head
)
1741 if (sk
->sk_state
== IUCV_LISTEN
&&
1742 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1744 * Found a listening socket with
1745 * src_name == ipuser[0-7].
1750 read_unlock(&iucv_sk_list
.lock
);
1752 /* No socket found, not one of our paths. */
1757 /* Check if parent socket is listening */
1758 low_nmcpy(user_data
, iucv
->src_name
);
1759 high_nmcpy(user_data
, iucv
->dst_name
);
1760 ASCEBC(user_data
, sizeof(user_data
));
1761 if (sk
->sk_state
!= IUCV_LISTEN
) {
1762 err
= pr_iucv
->path_sever(path
, user_data
);
1763 iucv_path_free(path
);
1767 /* Check for backlog size */
1768 if (sk_acceptq_is_full(sk
)) {
1769 err
= pr_iucv
->path_sever(path
, user_data
);
1770 iucv_path_free(path
);
1774 /* Create the new socket */
1775 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
, 0);
1777 err
= pr_iucv
->path_sever(path
, user_data
);
1778 iucv_path_free(path
);
1782 niucv
= iucv_sk(nsk
);
1783 iucv_sock_init(nsk
, sk
);
1785 /* Set the new iucv_sock */
1786 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1787 EBCASC(niucv
->dst_name
, 8);
1788 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1789 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1790 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1793 /* Call iucv_accept */
1794 high_nmcpy(nuser_data
, ipuser
+ 8);
1795 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1796 ASCEBC(nuser_data
+ 8, 8);
1798 /* set message limit for path based on msglimit of accepting socket */
1799 niucv
->msglimit
= iucv
->msglimit
;
1800 path
->msglim
= iucv
->msglimit
;
1801 err
= pr_iucv
->path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1803 iucv_sever_path(nsk
, 1);
1804 iucv_sock_kill(nsk
);
1808 iucv_accept_enqueue(sk
, nsk
);
1810 /* Wake up accept */
1811 nsk
->sk_state
= IUCV_CONNECTED
;
1812 sk
->sk_data_ready(sk
);
1819 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1821 struct sock
*sk
= path
->private;
1823 sk
->sk_state
= IUCV_CONNECTED
;
1824 sk
->sk_state_change(sk
);
1827 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1829 struct sock
*sk
= path
->private;
1830 struct iucv_sock
*iucv
= iucv_sk(sk
);
1831 struct sk_buff
*skb
;
1832 struct sock_msg_q
*save_msg
;
1835 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1836 pr_iucv
->message_reject(path
, msg
);
1840 spin_lock(&iucv
->message_q
.lock
);
1842 if (!list_empty(&iucv
->message_q
.list
) ||
1843 !skb_queue_empty(&iucv
->backlog_skb_q
))
1846 len
= atomic_read(&sk
->sk_rmem_alloc
);
1847 len
+= SKB_TRUESIZE(iucv_msg_length(msg
));
1848 if (len
> sk
->sk_rcvbuf
)
1851 skb
= alloc_iucv_recv_skb(iucv_msg_length(msg
));
1855 iucv_process_message(sk
, skb
, path
, msg
);
1859 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1862 save_msg
->path
= path
;
1863 save_msg
->msg
= *msg
;
1865 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1868 spin_unlock(&iucv
->message_q
.lock
);
1871 static void iucv_callback_txdone(struct iucv_path
*path
,
1872 struct iucv_message
*msg
)
1874 struct sock
*sk
= path
->private;
1875 struct sk_buff
*this = NULL
;
1876 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1877 struct sk_buff
*list_skb
= list
->next
;
1878 unsigned long flags
;
1881 if (!skb_queue_empty(list
)) {
1882 spin_lock_irqsave(&list
->lock
, flags
);
1884 while (list_skb
!= (struct sk_buff
*)list
) {
1885 if (msg
->tag
== IUCV_SKB_CB(list_skb
)->tag
) {
1889 list_skb
= list_skb
->next
;
1892 __skb_unlink(this, list
);
1894 spin_unlock_irqrestore(&list
->lock
, flags
);
1898 /* wake up any process waiting for sending */
1899 iucv_sock_wake_msglim(sk
);
1903 if (sk
->sk_state
== IUCV_CLOSING
) {
1904 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1905 sk
->sk_state
= IUCV_CLOSED
;
1906 sk
->sk_state_change(sk
);
1913 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1915 struct sock
*sk
= path
->private;
1917 if (sk
->sk_state
== IUCV_CLOSED
)
1921 iucv_sever_path(sk
, 1);
1922 sk
->sk_state
= IUCV_DISCONN
;
1924 sk
->sk_state_change(sk
);
1928 /* called if the other communication side shuts down its RECV direction;
1929 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1931 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1933 struct sock
*sk
= path
->private;
1936 if (sk
->sk_state
!= IUCV_CLOSED
) {
1937 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1938 sk
->sk_state_change(sk
);
1943 /***************** HiperSockets transport callbacks ********************/
1944 static void afiucv_swap_src_dest(struct sk_buff
*skb
)
1946 struct af_iucv_trans_hdr
*trans_hdr
=
1947 (struct af_iucv_trans_hdr
*)skb
->data
;
1951 ASCEBC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
1952 ASCEBC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
1953 ASCEBC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
1954 ASCEBC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
1955 memcpy(tmpID
, trans_hdr
->srcUserID
, 8);
1956 memcpy(tmpName
, trans_hdr
->srcAppName
, 8);
1957 memcpy(trans_hdr
->srcUserID
, trans_hdr
->destUserID
, 8);
1958 memcpy(trans_hdr
->srcAppName
, trans_hdr
->destAppName
, 8);
1959 memcpy(trans_hdr
->destUserID
, tmpID
, 8);
1960 memcpy(trans_hdr
->destAppName
, tmpName
, 8);
1961 skb_push(skb
, ETH_HLEN
);
1962 memset(skb
->data
, 0, ETH_HLEN
);
1966 * afiucv_hs_callback_syn - react on received SYN
1968 static int afiucv_hs_callback_syn(struct sock
*sk
, struct sk_buff
*skb
)
1971 struct iucv_sock
*iucv
, *niucv
;
1972 struct af_iucv_trans_hdr
*trans_hdr
;
1976 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
1978 /* no sock - connection refused */
1979 afiucv_swap_src_dest(skb
);
1980 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1981 err
= dev_queue_xmit(skb
);
1985 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
, 0);
1987 if ((sk
->sk_state
!= IUCV_LISTEN
) ||
1988 sk_acceptq_is_full(sk
) ||
1990 /* error on server socket - connection refused */
1991 afiucv_swap_src_dest(skb
);
1992 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1993 err
= dev_queue_xmit(skb
);
1994 iucv_sock_kill(nsk
);
1999 niucv
= iucv_sk(nsk
);
2000 iucv_sock_init(nsk
, sk
);
2001 niucv
->transport
= AF_IUCV_TRANS_HIPER
;
2002 niucv
->msglimit
= iucv
->msglimit
;
2003 if (!trans_hdr
->window
)
2004 niucv
->msglimit_peer
= IUCV_HIPER_MSGLIM_DEFAULT
;
2006 niucv
->msglimit_peer
= trans_hdr
->window
;
2007 memcpy(niucv
->dst_name
, trans_hdr
->srcAppName
, 8);
2008 memcpy(niucv
->dst_user_id
, trans_hdr
->srcUserID
, 8);
2009 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
2010 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
2011 nsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
2012 niucv
->hs_dev
= iucv
->hs_dev
;
2013 dev_hold(niucv
->hs_dev
);
2014 afiucv_swap_src_dest(skb
);
2015 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
;
2016 trans_hdr
->window
= niucv
->msglimit
;
2017 /* if receiver acks the xmit connection is established */
2018 err
= dev_queue_xmit(skb
);
2020 iucv_accept_enqueue(sk
, nsk
);
2021 nsk
->sk_state
= IUCV_CONNECTED
;
2022 sk
->sk_data_ready(sk
);
2024 iucv_sock_kill(nsk
);
2028 return NET_RX_SUCCESS
;
2032 * afiucv_hs_callback_synack() - react on received SYN-ACK
2034 static int afiucv_hs_callback_synack(struct sock
*sk
, struct sk_buff
*skb
)
2036 struct iucv_sock
*iucv
= iucv_sk(sk
);
2037 struct af_iucv_trans_hdr
*trans_hdr
=
2038 (struct af_iucv_trans_hdr
*)skb
->data
;
2042 if (sk
->sk_state
!= IUCV_BOUND
)
2045 iucv
->msglimit_peer
= trans_hdr
->window
;
2046 sk
->sk_state
= IUCV_CONNECTED
;
2047 sk
->sk_state_change(sk
);
2051 return NET_RX_SUCCESS
;
2055 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2057 static int afiucv_hs_callback_synfin(struct sock
*sk
, struct sk_buff
*skb
)
2059 struct iucv_sock
*iucv
= iucv_sk(sk
);
2063 if (sk
->sk_state
!= IUCV_BOUND
)
2066 sk
->sk_state
= IUCV_DISCONN
;
2067 sk
->sk_state_change(sk
);
2071 return NET_RX_SUCCESS
;
2075 * afiucv_hs_callback_fin() - react on received FIN
2077 static int afiucv_hs_callback_fin(struct sock
*sk
, struct sk_buff
*skb
)
2079 struct iucv_sock
*iucv
= iucv_sk(sk
);
2081 /* other end of connection closed */
2085 if (sk
->sk_state
== IUCV_CONNECTED
) {
2086 sk
->sk_state
= IUCV_DISCONN
;
2087 sk
->sk_state_change(sk
);
2092 return NET_RX_SUCCESS
;
2096 * afiucv_hs_callback_win() - react on received WIN
2098 static int afiucv_hs_callback_win(struct sock
*sk
, struct sk_buff
*skb
)
2100 struct iucv_sock
*iucv
= iucv_sk(sk
);
2101 struct af_iucv_trans_hdr
*trans_hdr
=
2102 (struct af_iucv_trans_hdr
*)skb
->data
;
2105 return NET_RX_SUCCESS
;
2107 if (sk
->sk_state
!= IUCV_CONNECTED
)
2108 return NET_RX_SUCCESS
;
2110 atomic_sub(trans_hdr
->window
, &iucv
->msg_sent
);
2111 iucv_sock_wake_msglim(sk
);
2112 return NET_RX_SUCCESS
;
2116 * afiucv_hs_callback_rx() - react on received data
2118 static int afiucv_hs_callback_rx(struct sock
*sk
, struct sk_buff
*skb
)
2120 struct iucv_sock
*iucv
= iucv_sk(sk
);
2124 return NET_RX_SUCCESS
;
2127 if (sk
->sk_state
!= IUCV_CONNECTED
) {
2129 return NET_RX_SUCCESS
;
2132 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2134 return NET_RX_SUCCESS
;
2137 /* write stuff from iucv_msg to skb cb */
2138 skb_pull(skb
, sizeof(struct af_iucv_trans_hdr
));
2139 skb_reset_transport_header(skb
);
2140 skb_reset_network_header(skb
);
2141 IUCV_SKB_CB(skb
)->offset
= 0;
2142 if (sk_filter(sk
, skb
)) {
2143 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
2145 return NET_RX_SUCCESS
;
2148 spin_lock(&iucv
->message_q
.lock
);
2149 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
2150 if (__sock_queue_rcv_skb(sk
, skb
))
2151 /* handle rcv queue full */
2152 skb_queue_tail(&iucv
->backlog_skb_q
, skb
);
2154 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
2155 spin_unlock(&iucv
->message_q
.lock
);
2156 return NET_RX_SUCCESS
;
2160 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2162 * called from netif RX softirq
2164 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2165 struct packet_type
*pt
, struct net_device
*orig_dev
)
2168 struct iucv_sock
*iucv
;
2169 struct af_iucv_trans_hdr
*trans_hdr
;
2170 int err
= NET_RX_SUCCESS
;
2173 if (skb
->len
< (ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
))) {
2174 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
2176 (int)(ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
)));
2178 return NET_RX_SUCCESS
;
2180 if (skb_headlen(skb
) < (ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
)))
2181 if (skb_linearize(skb
)) {
2182 WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
2185 return NET_RX_SUCCESS
;
2187 skb_pull(skb
, ETH_HLEN
);
2188 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
2189 EBCASC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
2190 EBCASC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
2191 EBCASC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
2192 EBCASC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
2193 memset(nullstring
, 0, sizeof(nullstring
));
2196 read_lock(&iucv_sk_list
.lock
);
2197 sk_for_each(sk
, &iucv_sk_list
.head
) {
2198 if (trans_hdr
->flags
== AF_IUCV_FLAG_SYN
) {
2199 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2200 trans_hdr
->destAppName
, 8)) &&
2201 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2202 trans_hdr
->destUserID
, 8)) &&
2203 (!memcmp(&iucv_sk(sk
)->dst_name
, nullstring
, 8)) &&
2204 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2210 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2211 trans_hdr
->destAppName
, 8)) &&
2212 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2213 trans_hdr
->destUserID
, 8)) &&
2214 (!memcmp(&iucv_sk(sk
)->dst_name
,
2215 trans_hdr
->srcAppName
, 8)) &&
2216 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2217 trans_hdr
->srcUserID
, 8))) {
2223 read_unlock(&iucv_sk_list
.lock
);
2228 how should we send with no sock
2229 1) send without sock no send rc checking?
2230 2) introduce default sock to handle this cases
2232 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2234 SYN|ACK, SYN|FIN, FIN -> no action? */
2236 switch (trans_hdr
->flags
) {
2237 case AF_IUCV_FLAG_SYN
:
2238 /* connect request */
2239 err
= afiucv_hs_callback_syn(sk
, skb
);
2241 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
):
2242 /* connect request confirmed */
2243 err
= afiucv_hs_callback_synack(sk
, skb
);
2245 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
):
2246 /* connect request refused */
2247 err
= afiucv_hs_callback_synfin(sk
, skb
);
2249 case (AF_IUCV_FLAG_FIN
):
2251 err
= afiucv_hs_callback_fin(sk
, skb
);
2253 case (AF_IUCV_FLAG_WIN
):
2254 err
= afiucv_hs_callback_win(sk
, skb
);
2255 if (skb
->len
== sizeof(struct af_iucv_trans_hdr
)) {
2259 /* fall through and receive non-zero length data */
2260 case (AF_IUCV_FLAG_SHT
):
2261 /* shutdown request */
2262 /* fall through and receive zero length data */
2264 /* plain data frame */
2265 IUCV_SKB_CB(skb
)->class = trans_hdr
->iucv_hdr
.class;
2266 err
= afiucv_hs_callback_rx(sk
, skb
);
2276 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2279 static void afiucv_hs_callback_txnotify(struct sk_buff
*skb
,
2280 enum iucv_tx_notify n
)
2282 struct sock
*isk
= skb
->sk
;
2283 struct sock
*sk
= NULL
;
2284 struct iucv_sock
*iucv
= NULL
;
2285 struct sk_buff_head
*list
;
2286 struct sk_buff
*list_skb
;
2287 struct sk_buff
*nskb
;
2288 unsigned long flags
;
2290 read_lock_irqsave(&iucv_sk_list
.lock
, flags
);
2291 sk_for_each(sk
, &iucv_sk_list
.head
)
2296 read_unlock_irqrestore(&iucv_sk_list
.lock
, flags
);
2298 if (!iucv
|| sock_flag(sk
, SOCK_ZAPPED
))
2301 list
= &iucv
->send_skb_q
;
2302 spin_lock_irqsave(&list
->lock
, flags
);
2303 if (skb_queue_empty(list
))
2305 list_skb
= list
->next
;
2306 nskb
= list_skb
->next
;
2307 while (list_skb
!= (struct sk_buff
*)list
) {
2308 if (skb_shinfo(list_skb
) == skb_shinfo(skb
)) {
2311 __skb_unlink(list_skb
, list
);
2312 kfree_skb(list_skb
);
2313 iucv_sock_wake_msglim(sk
);
2315 case TX_NOTIFY_PENDING
:
2316 atomic_inc(&iucv
->pendings
);
2318 case TX_NOTIFY_DELAYED_OK
:
2319 __skb_unlink(list_skb
, list
);
2320 atomic_dec(&iucv
->pendings
);
2321 if (atomic_read(&iucv
->pendings
) <= 0)
2322 iucv_sock_wake_msglim(sk
);
2323 kfree_skb(list_skb
);
2325 case TX_NOTIFY_UNREACHABLE
:
2326 case TX_NOTIFY_DELAYED_UNREACHABLE
:
2327 case TX_NOTIFY_TPQFULL
: /* not yet used */
2328 case TX_NOTIFY_GENERALERROR
:
2329 case TX_NOTIFY_DELAYED_GENERALERROR
:
2330 __skb_unlink(list_skb
, list
);
2331 kfree_skb(list_skb
);
2332 if (sk
->sk_state
== IUCV_CONNECTED
) {
2333 sk
->sk_state
= IUCV_DISCONN
;
2334 sk
->sk_state_change(sk
);
2344 spin_unlock_irqrestore(&list
->lock
, flags
);
2346 if (sk
->sk_state
== IUCV_CLOSING
) {
2347 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
2348 sk
->sk_state
= IUCV_CLOSED
;
2349 sk
->sk_state_change(sk
);
2356 * afiucv_netdev_event: handle netdev notifier chain events
2358 static int afiucv_netdev_event(struct notifier_block
*this,
2359 unsigned long event
, void *ptr
)
2361 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2363 struct iucv_sock
*iucv
;
2367 case NETDEV_GOING_DOWN
:
2368 sk_for_each(sk
, &iucv_sk_list
.head
) {
2370 if ((iucv
->hs_dev
== event_dev
) &&
2371 (sk
->sk_state
== IUCV_CONNECTED
)) {
2372 if (event
== NETDEV_GOING_DOWN
)
2373 iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
2374 sk
->sk_state
= IUCV_DISCONN
;
2375 sk
->sk_state_change(sk
);
2380 case NETDEV_UNREGISTER
:
2387 static struct notifier_block afiucv_netdev_notifier
= {
2388 .notifier_call
= afiucv_netdev_event
,
2391 static const struct proto_ops iucv_sock_ops
= {
2393 .owner
= THIS_MODULE
,
2394 .release
= iucv_sock_release
,
2395 .bind
= iucv_sock_bind
,
2396 .connect
= iucv_sock_connect
,
2397 .listen
= iucv_sock_listen
,
2398 .accept
= iucv_sock_accept
,
2399 .getname
= iucv_sock_getname
,
2400 .sendmsg
= iucv_sock_sendmsg
,
2401 .recvmsg
= iucv_sock_recvmsg
,
2402 .poll
= iucv_sock_poll
,
2403 .ioctl
= sock_no_ioctl
,
2404 .mmap
= sock_no_mmap
,
2405 .socketpair
= sock_no_socketpair
,
2406 .shutdown
= iucv_sock_shutdown
,
2407 .setsockopt
= iucv_sock_setsockopt
,
2408 .getsockopt
= iucv_sock_getsockopt
,
2411 static const struct net_proto_family iucv_sock_family_ops
= {
2413 .owner
= THIS_MODULE
,
2414 .create
= iucv_sock_create
,
2417 static struct packet_type iucv_packet_type
= {
2418 .type
= cpu_to_be16(ETH_P_AF_IUCV
),
2419 .func
= afiucv_hs_rcv
,
2422 static int afiucv_iucv_init(void)
2426 err
= pr_iucv
->iucv_register(&af_iucv_handler
, 0);
2429 /* establish dummy device */
2430 af_iucv_driver
.bus
= pr_iucv
->bus
;
2431 err
= driver_register(&af_iucv_driver
);
2434 af_iucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2439 dev_set_name(af_iucv_dev
, "af_iucv");
2440 af_iucv_dev
->bus
= pr_iucv
->bus
;
2441 af_iucv_dev
->parent
= pr_iucv
->root
;
2442 af_iucv_dev
->release
= (void (*)(struct device
*))kfree
;
2443 af_iucv_dev
->driver
= &af_iucv_driver
;
2444 err
= device_register(af_iucv_dev
);
2450 put_device(af_iucv_dev
);
2452 driver_unregister(&af_iucv_driver
);
2454 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2459 static int __init
afiucv_init(void)
2463 if (MACHINE_IS_VM
) {
2464 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
2465 if (unlikely(err
)) {
2467 err
= -EPROTONOSUPPORT
;
2471 pr_iucv
= try_then_request_module(symbol_get(iucv_if
), "iucv");
2473 printk(KERN_WARNING
"iucv_if lookup failed\n");
2474 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2477 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2481 err
= proto_register(&iucv_proto
, 0);
2484 err
= sock_register(&iucv_sock_family_ops
);
2489 err
= afiucv_iucv_init();
2493 register_netdevice_notifier(&afiucv_netdev_notifier
);
2494 dev_add_pack(&iucv_packet_type
);
2498 sock_unregister(PF_IUCV
);
2500 proto_unregister(&iucv_proto
);
2503 symbol_put(iucv_if
);
2507 static void __exit
afiucv_exit(void)
2510 device_unregister(af_iucv_dev
);
2511 driver_unregister(&af_iucv_driver
);
2512 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2513 symbol_put(iucv_if
);
2515 unregister_netdevice_notifier(&afiucv_netdev_notifier
);
2516 dev_remove_pack(&iucv_packet_type
);
2517 sock_unregister(PF_IUCV
);
2518 proto_unregister(&iucv_proto
);
2521 module_init(afiucv_init
);
2522 module_exit(afiucv_exit
);
2524 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2525 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
2526 MODULE_VERSION(VERSION
);
2527 MODULE_LICENSE("GPL");
2528 MODULE_ALIAS_NETPROTO(PF_IUCV
);