perf probe: Add bitfield member support
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / iucv / af_iucv.c
blob9637e45744fa2f818c94aa66e284f2ba0026f65b
1 /*
2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8 * PM functions:
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <net/sock.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/iucv.h>
31 #include <net/iucv/af_iucv.h>
33 #define VERSION "1.1"
35 static char iucv_userid[80];
37 static const struct proto_ops iucv_sock_ops;
39 static struct proto iucv_proto = {
40 .name = "AF_IUCV",
41 .owner = THIS_MODULE,
42 .obj_size = sizeof(struct iucv_sock),
45 /* special AF_IUCV IPRM messages */
46 static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 /* macros to set/get socket control buffer at correct offset */
52 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
53 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
54 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
57 #define __iucv_sock_wait(sk, condition, timeo, ret) \
58 do { \
59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \
61 ret = 0; \
62 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
63 while (!(condition)) { \
64 if (!__timeo) { \
65 ret = -EAGAIN; \
66 break; \
67 } \
68 if (signal_pending(current)) { \
69 ret = sock_intr_errno(__timeo); \
70 break; \
71 } \
72 release_sock(sk); \
73 __timeo = schedule_timeout(__timeo); \
74 lock_sock(sk); \
75 ret = sock_error(sk); \
76 if (ret) \
77 break; \
78 } \
79 finish_wait(sk_sleep(sk), &__wait); \
80 } while (0)
82 #define iucv_sock_wait(sk, condition, timeo) \
83 ({ \
84 int __ret = 0; \
85 if (!(condition)) \
86 __iucv_sock_wait(sk, condition, timeo, __ret); \
87 __ret; \
90 static void iucv_sock_kill(struct sock *sk);
91 static void iucv_sock_close(struct sock *sk);
93 /* Call Back functions */
94 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
96 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
97 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
98 u8 ipuser[16]);
99 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
100 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
102 static struct iucv_sock_list iucv_sk_list = {
103 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
104 .autobind_name = ATOMIC_INIT(0)
107 static struct iucv_handler af_iucv_handler = {
108 .path_pending = iucv_callback_connreq,
109 .path_complete = iucv_callback_connack,
110 .path_severed = iucv_callback_connrej,
111 .message_pending = iucv_callback_rx,
112 .message_complete = iucv_callback_txdone,
113 .path_quiesced = iucv_callback_shutdown,
116 static inline void high_nmcpy(unsigned char *dst, char *src)
118 memcpy(dst, src, 8);
121 static inline void low_nmcpy(unsigned char *dst, char *src)
123 memcpy(&dst[8], src, 8);
126 static int afiucv_pm_prepare(struct device *dev)
128 #ifdef CONFIG_PM_DEBUG
129 printk(KERN_WARNING "afiucv_pm_prepare\n");
130 #endif
131 return 0;
134 static void afiucv_pm_complete(struct device *dev)
136 #ifdef CONFIG_PM_DEBUG
137 printk(KERN_WARNING "afiucv_pm_complete\n");
138 #endif
142 * afiucv_pm_freeze() - Freeze PM callback
143 * @dev: AFIUCV dummy device
145 * Sever all established IUCV communication pathes
147 static int afiucv_pm_freeze(struct device *dev)
149 struct iucv_sock *iucv;
150 struct sock *sk;
151 struct hlist_node *node;
152 int err = 0;
154 #ifdef CONFIG_PM_DEBUG
155 printk(KERN_WARNING "afiucv_pm_freeze\n");
156 #endif
157 read_lock(&iucv_sk_list.lock);
158 sk_for_each(sk, node, &iucv_sk_list.head) {
159 iucv = iucv_sk(sk);
160 skb_queue_purge(&iucv->send_skb_q);
161 skb_queue_purge(&iucv->backlog_skb_q);
162 switch (sk->sk_state) {
163 case IUCV_SEVERED:
164 case IUCV_DISCONN:
165 case IUCV_CLOSING:
166 case IUCV_CONNECTED:
167 if (iucv->path) {
168 err = iucv_path_sever(iucv->path, NULL);
169 iucv_path_free(iucv->path);
170 iucv->path = NULL;
172 break;
173 case IUCV_OPEN:
174 case IUCV_BOUND:
175 case IUCV_LISTEN:
176 case IUCV_CLOSED:
177 default:
178 break;
181 read_unlock(&iucv_sk_list.lock);
182 return err;
186 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
187 * @dev: AFIUCV dummy device
189 * socket clean up after freeze
191 static int afiucv_pm_restore_thaw(struct device *dev)
193 struct iucv_sock *iucv;
194 struct sock *sk;
195 struct hlist_node *node;
197 #ifdef CONFIG_PM_DEBUG
198 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
199 #endif
200 read_lock(&iucv_sk_list.lock);
201 sk_for_each(sk, node, &iucv_sk_list.head) {
202 iucv = iucv_sk(sk);
203 switch (sk->sk_state) {
204 case IUCV_CONNECTED:
205 sk->sk_err = EPIPE;
206 sk->sk_state = IUCV_DISCONN;
207 sk->sk_state_change(sk);
208 break;
209 case IUCV_DISCONN:
210 case IUCV_SEVERED:
211 case IUCV_CLOSING:
212 case IUCV_LISTEN:
213 case IUCV_BOUND:
214 case IUCV_OPEN:
215 default:
216 break;
219 read_unlock(&iucv_sk_list.lock);
220 return 0;
223 static const struct dev_pm_ops afiucv_pm_ops = {
224 .prepare = afiucv_pm_prepare,
225 .complete = afiucv_pm_complete,
226 .freeze = afiucv_pm_freeze,
227 .thaw = afiucv_pm_restore_thaw,
228 .restore = afiucv_pm_restore_thaw,
231 static struct device_driver af_iucv_driver = {
232 .owner = THIS_MODULE,
233 .name = "afiucv",
234 .bus = &iucv_bus,
235 .pm = &afiucv_pm_ops,
238 /* dummy device used as trigger for PM functions */
239 static struct device *af_iucv_dev;
242 * iucv_msg_length() - Returns the length of an iucv message.
243 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
245 * The function returns the length of the specified iucv message @msg of data
246 * stored in a buffer and of data stored in the parameter list (PRMDATA).
248 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
249 * data:
250 * PRMDATA[0..6] socket data (max 7 bytes);
251 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
253 * The socket data length is computed by substracting the socket data length
254 * value from 0xFF.
255 * If the socket data len is greater 7, then PRMDATA can be used for special
256 * notifications (see iucv_sock_shutdown); and further,
257 * if the socket data len is > 7, the function returns 8.
259 * Use this function to allocate socket buffers to store iucv message data.
261 static inline size_t iucv_msg_length(struct iucv_message *msg)
263 size_t datalen;
265 if (msg->flags & IUCV_IPRMDATA) {
266 datalen = 0xff - msg->rmmsg[7];
267 return (datalen < 8) ? datalen : 8;
269 return msg->length;
273 * iucv_sock_in_state() - check for specific states
274 * @sk: sock structure
275 * @state: first iucv sk state
276 * @state: second iucv sk state
278 * Returns true if the socket in either in the first or second state.
280 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
282 return (sk->sk_state == state || sk->sk_state == state2);
286 * iucv_below_msglim() - function to check if messages can be sent
287 * @sk: sock structure
289 * Returns true if the send queue length is lower than the message limit.
290 * Always returns true if the socket is not connected (no iucv path for
291 * checking the message limit).
293 static inline int iucv_below_msglim(struct sock *sk)
295 struct iucv_sock *iucv = iucv_sk(sk);
297 if (sk->sk_state != IUCV_CONNECTED)
298 return 1;
299 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
303 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
305 static void iucv_sock_wake_msglim(struct sock *sk)
307 struct socket_wq *wq;
309 rcu_read_lock();
310 wq = rcu_dereference(sk->sk_wq);
311 if (wq_has_sleeper(wq))
312 wake_up_interruptible_all(&wq->wait);
313 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
314 rcu_read_unlock();
317 /* Timers */
318 static void iucv_sock_timeout(unsigned long arg)
320 struct sock *sk = (struct sock *)arg;
322 bh_lock_sock(sk);
323 sk->sk_err = ETIMEDOUT;
324 sk->sk_state_change(sk);
325 bh_unlock_sock(sk);
327 iucv_sock_kill(sk);
328 sock_put(sk);
331 static void iucv_sock_clear_timer(struct sock *sk)
333 sk_stop_timer(sk, &sk->sk_timer);
336 static struct sock *__iucv_get_sock_by_name(char *nm)
338 struct sock *sk;
339 struct hlist_node *node;
341 sk_for_each(sk, node, &iucv_sk_list.head)
342 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
343 return sk;
345 return NULL;
348 static void iucv_sock_destruct(struct sock *sk)
350 skb_queue_purge(&sk->sk_receive_queue);
351 skb_queue_purge(&sk->sk_write_queue);
354 /* Cleanup Listen */
355 static void iucv_sock_cleanup_listen(struct sock *parent)
357 struct sock *sk;
359 /* Close non-accepted connections */
360 while ((sk = iucv_accept_dequeue(parent, NULL))) {
361 iucv_sock_close(sk);
362 iucv_sock_kill(sk);
365 parent->sk_state = IUCV_CLOSED;
368 /* Kill socket (only if zapped and orphaned) */
369 static void iucv_sock_kill(struct sock *sk)
371 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
372 return;
374 iucv_sock_unlink(&iucv_sk_list, sk);
375 sock_set_flag(sk, SOCK_DEAD);
376 sock_put(sk);
379 /* Close an IUCV socket */
380 static void iucv_sock_close(struct sock *sk)
382 unsigned char user_data[16];
383 struct iucv_sock *iucv = iucv_sk(sk);
384 int err;
385 unsigned long timeo;
387 iucv_sock_clear_timer(sk);
388 lock_sock(sk);
390 switch (sk->sk_state) {
391 case IUCV_LISTEN:
392 iucv_sock_cleanup_listen(sk);
393 break;
395 case IUCV_CONNECTED:
396 case IUCV_DISCONN:
397 err = 0;
399 sk->sk_state = IUCV_CLOSING;
400 sk->sk_state_change(sk);
402 if (!skb_queue_empty(&iucv->send_skb_q)) {
403 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
404 timeo = sk->sk_lingertime;
405 else
406 timeo = IUCV_DISCONN_TIMEOUT;
407 err = iucv_sock_wait(sk,
408 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
409 timeo);
412 case IUCV_CLOSING: /* fall through */
413 sk->sk_state = IUCV_CLOSED;
414 sk->sk_state_change(sk);
416 if (iucv->path) {
417 low_nmcpy(user_data, iucv->src_name);
418 high_nmcpy(user_data, iucv->dst_name);
419 ASCEBC(user_data, sizeof(user_data));
420 err = iucv_path_sever(iucv->path, user_data);
421 iucv_path_free(iucv->path);
422 iucv->path = NULL;
425 sk->sk_err = ECONNRESET;
426 sk->sk_state_change(sk);
428 skb_queue_purge(&iucv->send_skb_q);
429 skb_queue_purge(&iucv->backlog_skb_q);
430 break;
432 default:
433 /* nothing to do here */
434 break;
437 /* mark socket for deletion by iucv_sock_kill() */
438 sock_set_flag(sk, SOCK_ZAPPED);
440 release_sock(sk);
443 static void iucv_sock_init(struct sock *sk, struct sock *parent)
445 if (parent)
446 sk->sk_type = parent->sk_type;
449 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
451 struct sock *sk;
453 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
454 if (!sk)
455 return NULL;
457 sock_init_data(sock, sk);
458 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
459 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
460 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
461 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
462 spin_lock_init(&iucv_sk(sk)->message_q.lock);
463 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
464 iucv_sk(sk)->send_tag = 0;
465 iucv_sk(sk)->flags = 0;
466 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
467 iucv_sk(sk)->path = NULL;
468 memset(&iucv_sk(sk)->src_user_id , 0, 32);
470 sk->sk_destruct = iucv_sock_destruct;
471 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
472 sk->sk_allocation = GFP_DMA;
474 sock_reset_flag(sk, SOCK_ZAPPED);
476 sk->sk_protocol = proto;
477 sk->sk_state = IUCV_OPEN;
479 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
481 iucv_sock_link(&iucv_sk_list, sk);
482 return sk;
485 /* Create an IUCV socket */
486 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
487 int kern)
489 struct sock *sk;
491 if (protocol && protocol != PF_IUCV)
492 return -EPROTONOSUPPORT;
494 sock->state = SS_UNCONNECTED;
496 switch (sock->type) {
497 case SOCK_STREAM:
498 sock->ops = &iucv_sock_ops;
499 break;
500 case SOCK_SEQPACKET:
501 /* currently, proto ops can handle both sk types */
502 sock->ops = &iucv_sock_ops;
503 break;
504 default:
505 return -ESOCKTNOSUPPORT;
508 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
509 if (!sk)
510 return -ENOMEM;
512 iucv_sock_init(sk, NULL);
514 return 0;
517 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
519 write_lock_bh(&l->lock);
520 sk_add_node(sk, &l->head);
521 write_unlock_bh(&l->lock);
524 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
526 write_lock_bh(&l->lock);
527 sk_del_node_init(sk);
528 write_unlock_bh(&l->lock);
531 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
533 unsigned long flags;
534 struct iucv_sock *par = iucv_sk(parent);
536 sock_hold(sk);
537 spin_lock_irqsave(&par->accept_q_lock, flags);
538 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
539 spin_unlock_irqrestore(&par->accept_q_lock, flags);
540 iucv_sk(sk)->parent = parent;
541 sk_acceptq_added(parent);
544 void iucv_accept_unlink(struct sock *sk)
546 unsigned long flags;
547 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
549 spin_lock_irqsave(&par->accept_q_lock, flags);
550 list_del_init(&iucv_sk(sk)->accept_q);
551 spin_unlock_irqrestore(&par->accept_q_lock, flags);
552 sk_acceptq_removed(iucv_sk(sk)->parent);
553 iucv_sk(sk)->parent = NULL;
554 sock_put(sk);
557 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
559 struct iucv_sock *isk, *n;
560 struct sock *sk;
562 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
563 sk = (struct sock *) isk;
564 lock_sock(sk);
566 if (sk->sk_state == IUCV_CLOSED) {
567 iucv_accept_unlink(sk);
568 release_sock(sk);
569 continue;
572 if (sk->sk_state == IUCV_CONNECTED ||
573 sk->sk_state == IUCV_SEVERED ||
574 sk->sk_state == IUCV_DISCONN || /* due to PM restore */
575 !newsock) {
576 iucv_accept_unlink(sk);
577 if (newsock)
578 sock_graft(sk, newsock);
580 if (sk->sk_state == IUCV_SEVERED)
581 sk->sk_state = IUCV_DISCONN;
583 release_sock(sk);
584 return sk;
587 release_sock(sk);
589 return NULL;
592 /* Bind an unbound socket */
593 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
594 int addr_len)
596 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
597 struct sock *sk = sock->sk;
598 struct iucv_sock *iucv;
599 int err;
601 /* Verify the input sockaddr */
602 if (!addr || addr->sa_family != AF_IUCV)
603 return -EINVAL;
605 lock_sock(sk);
606 if (sk->sk_state != IUCV_OPEN) {
607 err = -EBADFD;
608 goto done;
611 write_lock_bh(&iucv_sk_list.lock);
613 iucv = iucv_sk(sk);
614 if (__iucv_get_sock_by_name(sa->siucv_name)) {
615 err = -EADDRINUSE;
616 goto done_unlock;
618 if (iucv->path) {
619 err = 0;
620 goto done_unlock;
623 /* Bind the socket */
624 memcpy(iucv->src_name, sa->siucv_name, 8);
626 /* Copy the user id */
627 memcpy(iucv->src_user_id, iucv_userid, 8);
628 sk->sk_state = IUCV_BOUND;
629 err = 0;
631 done_unlock:
632 /* Release the socket list lock */
633 write_unlock_bh(&iucv_sk_list.lock);
634 done:
635 release_sock(sk);
636 return err;
639 /* Automatically bind an unbound socket */
640 static int iucv_sock_autobind(struct sock *sk)
642 struct iucv_sock *iucv = iucv_sk(sk);
643 char query_buffer[80];
644 char name[12];
645 int err = 0;
647 /* Set the userid and name */
648 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
649 if (unlikely(err))
650 return -EPROTO;
652 memcpy(iucv->src_user_id, query_buffer, 8);
654 write_lock_bh(&iucv_sk_list.lock);
656 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
657 while (__iucv_get_sock_by_name(name)) {
658 sprintf(name, "%08x",
659 atomic_inc_return(&iucv_sk_list.autobind_name));
662 write_unlock_bh(&iucv_sk_list.lock);
664 memcpy(&iucv->src_name, name, 8);
666 return err;
669 /* Connect an unconnected socket */
670 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
671 int alen, int flags)
673 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
674 struct sock *sk = sock->sk;
675 struct iucv_sock *iucv;
676 unsigned char user_data[16];
677 int err;
679 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
680 return -EINVAL;
682 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
683 return -EBADFD;
685 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
686 return -EINVAL;
688 if (sk->sk_state == IUCV_OPEN) {
689 err = iucv_sock_autobind(sk);
690 if (unlikely(err))
691 return err;
694 lock_sock(sk);
696 /* Set the destination information */
697 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
698 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
700 high_nmcpy(user_data, sa->siucv_name);
701 low_nmcpy(user_data, iucv_sk(sk)->src_name);
702 ASCEBC(user_data, sizeof(user_data));
704 iucv = iucv_sk(sk);
705 /* Create path. */
706 iucv->path = iucv_path_alloc(iucv->msglimit,
707 IUCV_IPRMDATA, GFP_KERNEL);
708 if (!iucv->path) {
709 err = -ENOMEM;
710 goto done;
712 err = iucv_path_connect(iucv->path, &af_iucv_handler,
713 sa->siucv_user_id, NULL, user_data, sk);
714 if (err) {
715 iucv_path_free(iucv->path);
716 iucv->path = NULL;
717 switch (err) {
718 case 0x0b: /* Target communicator is not logged on */
719 err = -ENETUNREACH;
720 break;
721 case 0x0d: /* Max connections for this guest exceeded */
722 case 0x0e: /* Max connections for target guest exceeded */
723 err = -EAGAIN;
724 break;
725 case 0x0f: /* Missing IUCV authorization */
726 err = -EACCES;
727 break;
728 default:
729 err = -ECONNREFUSED;
730 break;
732 goto done;
735 if (sk->sk_state != IUCV_CONNECTED) {
736 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
737 IUCV_DISCONN),
738 sock_sndtimeo(sk, flags & O_NONBLOCK));
741 if (sk->sk_state == IUCV_DISCONN) {
742 err = -ECONNREFUSED;
745 if (err) {
746 iucv_path_sever(iucv->path, NULL);
747 iucv_path_free(iucv->path);
748 iucv->path = NULL;
751 done:
752 release_sock(sk);
753 return err;
756 /* Move a socket into listening state. */
757 static int iucv_sock_listen(struct socket *sock, int backlog)
759 struct sock *sk = sock->sk;
760 int err;
762 lock_sock(sk);
764 err = -EINVAL;
765 if (sk->sk_state != IUCV_BOUND)
766 goto done;
768 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
769 goto done;
771 sk->sk_max_ack_backlog = backlog;
772 sk->sk_ack_backlog = 0;
773 sk->sk_state = IUCV_LISTEN;
774 err = 0;
776 done:
777 release_sock(sk);
778 return err;
781 /* Accept a pending connection */
782 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
783 int flags)
785 DECLARE_WAITQUEUE(wait, current);
786 struct sock *sk = sock->sk, *nsk;
787 long timeo;
788 int err = 0;
790 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
792 if (sk->sk_state != IUCV_LISTEN) {
793 err = -EBADFD;
794 goto done;
797 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
799 /* Wait for an incoming connection */
800 add_wait_queue_exclusive(sk_sleep(sk), &wait);
801 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
802 set_current_state(TASK_INTERRUPTIBLE);
803 if (!timeo) {
804 err = -EAGAIN;
805 break;
808 release_sock(sk);
809 timeo = schedule_timeout(timeo);
810 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
812 if (sk->sk_state != IUCV_LISTEN) {
813 err = -EBADFD;
814 break;
817 if (signal_pending(current)) {
818 err = sock_intr_errno(timeo);
819 break;
823 set_current_state(TASK_RUNNING);
824 remove_wait_queue(sk_sleep(sk), &wait);
826 if (err)
827 goto done;
829 newsock->state = SS_CONNECTED;
831 done:
832 release_sock(sk);
833 return err;
836 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
837 int *len, int peer)
839 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
840 struct sock *sk = sock->sk;
842 addr->sa_family = AF_IUCV;
843 *len = sizeof(struct sockaddr_iucv);
845 if (peer) {
846 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
847 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
848 } else {
849 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
850 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
852 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
853 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
854 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
856 return 0;
860 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
861 * @path: IUCV path
862 * @msg: Pointer to a struct iucv_message
863 * @skb: The socket data to send, skb->len MUST BE <= 7
865 * Send the socket data in the parameter list in the iucv message
866 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
867 * list and the socket data len at index 7 (last byte).
868 * See also iucv_msg_length().
870 * Returns the error code from the iucv_message_send() call.
872 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
873 struct sk_buff *skb)
875 u8 prmdata[8];
877 memcpy(prmdata, (void *) skb->data, skb->len);
878 prmdata[7] = 0xff - (u8) skb->len;
879 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
880 (void *) prmdata, 8);
883 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
884 struct msghdr *msg, size_t len)
886 struct sock *sk = sock->sk;
887 struct iucv_sock *iucv = iucv_sk(sk);
888 struct sk_buff *skb;
889 struct iucv_message txmsg;
890 struct cmsghdr *cmsg;
891 int cmsg_done;
892 long timeo;
893 char user_id[9];
894 char appl_id[9];
895 int err;
896 int noblock = msg->msg_flags & MSG_DONTWAIT;
898 err = sock_error(sk);
899 if (err)
900 return err;
902 if (msg->msg_flags & MSG_OOB)
903 return -EOPNOTSUPP;
905 /* SOCK_SEQPACKET: we do not support segmented records */
906 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
907 return -EOPNOTSUPP;
909 lock_sock(sk);
911 if (sk->sk_shutdown & SEND_SHUTDOWN) {
912 err = -EPIPE;
913 goto out;
916 /* Return if the socket is not in connected state */
917 if (sk->sk_state != IUCV_CONNECTED) {
918 err = -ENOTCONN;
919 goto out;
922 /* initialize defaults */
923 cmsg_done = 0; /* check for duplicate headers */
924 txmsg.class = 0;
926 /* iterate over control messages */
927 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
928 cmsg = CMSG_NXTHDR(msg, cmsg)) {
930 if (!CMSG_OK(msg, cmsg)) {
931 err = -EINVAL;
932 goto out;
935 if (cmsg->cmsg_level != SOL_IUCV)
936 continue;
938 if (cmsg->cmsg_type & cmsg_done) {
939 err = -EINVAL;
940 goto out;
942 cmsg_done |= cmsg->cmsg_type;
944 switch (cmsg->cmsg_type) {
945 case SCM_IUCV_TRGCLS:
946 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
947 err = -EINVAL;
948 goto out;
951 /* set iucv message target class */
952 memcpy(&txmsg.class,
953 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
955 break;
957 default:
958 err = -EINVAL;
959 goto out;
960 break;
964 /* allocate one skb for each iucv message:
965 * this is fine for SOCK_SEQPACKET (unless we want to support
966 * segmented records using the MSG_EOR flag), but
967 * for SOCK_STREAM we might want to improve it in future */
968 skb = sock_alloc_send_skb(sk, len, noblock, &err);
969 if (!skb)
970 goto out;
971 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
972 err = -EFAULT;
973 goto fail;
976 /* wait if outstanding messages for iucv path has reached */
977 timeo = sock_sndtimeo(sk, noblock);
978 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
979 if (err)
980 goto fail;
982 /* return -ECONNRESET if the socket is no longer connected */
983 if (sk->sk_state != IUCV_CONNECTED) {
984 err = -ECONNRESET;
985 goto fail;
988 /* increment and save iucv message tag for msg_completion cbk */
989 txmsg.tag = iucv->send_tag++;
990 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
991 skb_queue_tail(&iucv->send_skb_q, skb);
993 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
994 && skb->len <= 7) {
995 err = iucv_send_iprm(iucv->path, &txmsg, skb);
997 /* on success: there is no message_complete callback
998 * for an IPRMDATA msg; remove skb from send queue */
999 if (err == 0) {
1000 skb_unlink(skb, &iucv->send_skb_q);
1001 kfree_skb(skb);
1004 /* this error should never happen since the
1005 * IUCV_IPRMDATA path flag is set... sever path */
1006 if (err == 0x15) {
1007 iucv_path_sever(iucv->path, NULL);
1008 skb_unlink(skb, &iucv->send_skb_q);
1009 err = -EPIPE;
1010 goto fail;
1012 } else
1013 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
1014 (void *) skb->data, skb->len);
1015 if (err) {
1016 if (err == 3) {
1017 user_id[8] = 0;
1018 memcpy(user_id, iucv->dst_user_id, 8);
1019 appl_id[8] = 0;
1020 memcpy(appl_id, iucv->dst_name, 8);
1021 pr_err("Application %s on z/VM guest %s"
1022 " exceeds message limit\n",
1023 appl_id, user_id);
1024 err = -EAGAIN;
1025 } else
1026 err = -EPIPE;
1027 skb_unlink(skb, &iucv->send_skb_q);
1028 goto fail;
1031 release_sock(sk);
1032 return len;
1034 fail:
1035 kfree_skb(skb);
1036 out:
1037 release_sock(sk);
1038 return err;
1041 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1043 * Locking: must be called with message_q.lock held
1045 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1047 int dataleft, size, copied = 0;
1048 struct sk_buff *nskb;
1050 dataleft = len;
1051 while (dataleft) {
1052 if (dataleft >= sk->sk_rcvbuf / 4)
1053 size = sk->sk_rcvbuf / 4;
1054 else
1055 size = dataleft;
1057 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1058 if (!nskb)
1059 return -ENOMEM;
1061 /* copy target class to control buffer of new skb */
1062 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1064 /* copy data fragment */
1065 memcpy(nskb->data, skb->data + copied, size);
1066 copied += size;
1067 dataleft -= size;
1069 skb_reset_transport_header(nskb);
1070 skb_reset_network_header(nskb);
1071 nskb->len = size;
1073 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1076 return 0;
1079 /* iucv_process_message() - Receive a single outstanding IUCV message
1081 * Locking: must be called with message_q.lock held
1083 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1084 struct iucv_path *path,
1085 struct iucv_message *msg)
1087 int rc;
1088 unsigned int len;
1090 len = iucv_msg_length(msg);
1092 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1093 /* Note: the first 4 bytes are reserved for msg tag */
1094 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1096 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1097 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1098 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1099 skb->data = NULL;
1100 skb->len = 0;
1102 } else {
1103 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
1104 skb->data, len, NULL);
1105 if (rc) {
1106 kfree_skb(skb);
1107 return;
1109 /* we need to fragment iucv messages for SOCK_STREAM only;
1110 * for SOCK_SEQPACKET, it is only relevant if we support
1111 * record segmentation using MSG_EOR (see also recvmsg()) */
1112 if (sk->sk_type == SOCK_STREAM &&
1113 skb->truesize >= sk->sk_rcvbuf / 4) {
1114 rc = iucv_fragment_skb(sk, skb, len);
1115 kfree_skb(skb);
1116 skb = NULL;
1117 if (rc) {
1118 iucv_path_sever(path, NULL);
1119 return;
1121 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1122 } else {
1123 skb_reset_transport_header(skb);
1124 skb_reset_network_header(skb);
1125 skb->len = len;
1129 if (sock_queue_rcv_skb(sk, skb))
1130 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1133 /* iucv_process_message_q() - Process outstanding IUCV messages
1135 * Locking: must be called with message_q.lock held
1137 static void iucv_process_message_q(struct sock *sk)
1139 struct iucv_sock *iucv = iucv_sk(sk);
1140 struct sk_buff *skb;
1141 struct sock_msg_q *p, *n;
1143 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1144 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1145 if (!skb)
1146 break;
1147 iucv_process_message(sk, skb, p->path, &p->msg);
1148 list_del(&p->list);
1149 kfree(p);
1150 if (!skb_queue_empty(&iucv->backlog_skb_q))
1151 break;
1155 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1156 struct msghdr *msg, size_t len, int flags)
1158 int noblock = flags & MSG_DONTWAIT;
1159 struct sock *sk = sock->sk;
1160 struct iucv_sock *iucv = iucv_sk(sk);
1161 unsigned int copied, rlen;
1162 struct sk_buff *skb, *rskb, *cskb;
1163 int err = 0;
1165 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
1166 skb_queue_empty(&iucv->backlog_skb_q) &&
1167 skb_queue_empty(&sk->sk_receive_queue) &&
1168 list_empty(&iucv->message_q.list))
1169 return 0;
1171 if (flags & (MSG_OOB))
1172 return -EOPNOTSUPP;
1174 /* receive/dequeue next skb:
1175 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1176 skb = skb_recv_datagram(sk, flags, noblock, &err);
1177 if (!skb) {
1178 if (sk->sk_shutdown & RCV_SHUTDOWN)
1179 return 0;
1180 return err;
1183 rlen = skb->len; /* real length of skb */
1184 copied = min_t(unsigned int, rlen, len);
1186 cskb = skb;
1187 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
1188 if (!(flags & MSG_PEEK))
1189 skb_queue_head(&sk->sk_receive_queue, skb);
1190 return -EFAULT;
1193 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1194 if (sk->sk_type == SOCK_SEQPACKET) {
1195 if (copied < rlen)
1196 msg->msg_flags |= MSG_TRUNC;
1197 /* each iucv message contains a complete record */
1198 msg->msg_flags |= MSG_EOR;
1201 /* create control message to store iucv msg target class:
1202 * get the trgcls from the control buffer of the skb due to
1203 * fragmentation of original iucv message. */
1204 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1205 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1206 if (err) {
1207 if (!(flags & MSG_PEEK))
1208 skb_queue_head(&sk->sk_receive_queue, skb);
1209 return err;
1212 /* Mark read part of skb as used */
1213 if (!(flags & MSG_PEEK)) {
1215 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1216 if (sk->sk_type == SOCK_STREAM) {
1217 skb_pull(skb, copied);
1218 if (skb->len) {
1219 skb_queue_head(&sk->sk_receive_queue, skb);
1220 goto done;
1224 kfree_skb(skb);
1226 /* Queue backlog skbs */
1227 spin_lock_bh(&iucv->message_q.lock);
1228 rskb = skb_dequeue(&iucv->backlog_skb_q);
1229 while (rskb) {
1230 if (sock_queue_rcv_skb(sk, rskb)) {
1231 skb_queue_head(&iucv->backlog_skb_q,
1232 rskb);
1233 break;
1234 } else {
1235 rskb = skb_dequeue(&iucv->backlog_skb_q);
1238 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1239 if (!list_empty(&iucv->message_q.list))
1240 iucv_process_message_q(sk);
1242 spin_unlock_bh(&iucv->message_q.lock);
1245 done:
1246 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1247 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1248 copied = rlen;
1250 return copied;
1253 static inline unsigned int iucv_accept_poll(struct sock *parent)
1255 struct iucv_sock *isk, *n;
1256 struct sock *sk;
1258 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1259 sk = (struct sock *) isk;
1261 if (sk->sk_state == IUCV_CONNECTED)
1262 return POLLIN | POLLRDNORM;
1265 return 0;
1268 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1269 poll_table *wait)
1271 struct sock *sk = sock->sk;
1272 unsigned int mask = 0;
1274 sock_poll_wait(file, sk_sleep(sk), wait);
1276 if (sk->sk_state == IUCV_LISTEN)
1277 return iucv_accept_poll(sk);
1279 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1280 mask |= POLLERR;
1282 if (sk->sk_shutdown & RCV_SHUTDOWN)
1283 mask |= POLLRDHUP;
1285 if (sk->sk_shutdown == SHUTDOWN_MASK)
1286 mask |= POLLHUP;
1288 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1289 (sk->sk_shutdown & RCV_SHUTDOWN))
1290 mask |= POLLIN | POLLRDNORM;
1292 if (sk->sk_state == IUCV_CLOSED)
1293 mask |= POLLHUP;
1295 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1296 mask |= POLLIN;
1298 if (sock_writeable(sk))
1299 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1300 else
1301 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1303 return mask;
1306 static int iucv_sock_shutdown(struct socket *sock, int how)
1308 struct sock *sk = sock->sk;
1309 struct iucv_sock *iucv = iucv_sk(sk);
1310 struct iucv_message txmsg;
1311 int err = 0;
1313 how++;
1315 if ((how & ~SHUTDOWN_MASK) || !how)
1316 return -EINVAL;
1318 lock_sock(sk);
1319 switch (sk->sk_state) {
1320 case IUCV_DISCONN:
1321 case IUCV_CLOSING:
1322 case IUCV_SEVERED:
1323 case IUCV_CLOSED:
1324 err = -ENOTCONN;
1325 goto fail;
1327 default:
1328 sk->sk_shutdown |= how;
1329 break;
1332 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1333 txmsg.class = 0;
1334 txmsg.tag = 0;
1335 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1336 (void *) iprm_shutdown, 8);
1337 if (err) {
1338 switch (err) {
1339 case 1:
1340 err = -ENOTCONN;
1341 break;
1342 case 2:
1343 err = -ECONNRESET;
1344 break;
1345 default:
1346 err = -ENOTCONN;
1347 break;
1352 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1353 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1354 if (err)
1355 err = -ENOTCONN;
1357 skb_queue_purge(&sk->sk_receive_queue);
1360 /* Wake up anyone sleeping in poll */
1361 sk->sk_state_change(sk);
1363 fail:
1364 release_sock(sk);
1365 return err;
1368 static int iucv_sock_release(struct socket *sock)
1370 struct sock *sk = sock->sk;
1371 int err = 0;
1373 if (!sk)
1374 return 0;
1376 iucv_sock_close(sk);
1378 /* Unregister with IUCV base support */
1379 if (iucv_sk(sk)->path) {
1380 iucv_path_sever(iucv_sk(sk)->path, NULL);
1381 iucv_path_free(iucv_sk(sk)->path);
1382 iucv_sk(sk)->path = NULL;
1385 sock_orphan(sk);
1386 iucv_sock_kill(sk);
1387 return err;
1390 /* getsockopt and setsockopt */
1391 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1392 char __user *optval, unsigned int optlen)
1394 struct sock *sk = sock->sk;
1395 struct iucv_sock *iucv = iucv_sk(sk);
1396 int val;
1397 int rc;
1399 if (level != SOL_IUCV)
1400 return -ENOPROTOOPT;
1402 if (optlen < sizeof(int))
1403 return -EINVAL;
1405 if (get_user(val, (int __user *) optval))
1406 return -EFAULT;
1408 rc = 0;
1410 lock_sock(sk);
1411 switch (optname) {
1412 case SO_IPRMDATA_MSG:
1413 if (val)
1414 iucv->flags |= IUCV_IPRMDATA;
1415 else
1416 iucv->flags &= ~IUCV_IPRMDATA;
1417 break;
1418 case SO_MSGLIMIT:
1419 switch (sk->sk_state) {
1420 case IUCV_OPEN:
1421 case IUCV_BOUND:
1422 if (val < 1 || val > (u16)(~0))
1423 rc = -EINVAL;
1424 else
1425 iucv->msglimit = val;
1426 break;
1427 default:
1428 rc = -EINVAL;
1429 break;
1431 break;
1432 default:
1433 rc = -ENOPROTOOPT;
1434 break;
1436 release_sock(sk);
1438 return rc;
1441 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1442 char __user *optval, int __user *optlen)
1444 struct sock *sk = sock->sk;
1445 struct iucv_sock *iucv = iucv_sk(sk);
1446 int val, len;
1448 if (level != SOL_IUCV)
1449 return -ENOPROTOOPT;
1451 if (get_user(len, optlen))
1452 return -EFAULT;
1454 if (len < 0)
1455 return -EINVAL;
1457 len = min_t(unsigned int, len, sizeof(int));
1459 switch (optname) {
1460 case SO_IPRMDATA_MSG:
1461 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1462 break;
1463 case SO_MSGLIMIT:
1464 lock_sock(sk);
1465 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1466 : iucv->msglimit; /* default */
1467 release_sock(sk);
1468 break;
1469 default:
1470 return -ENOPROTOOPT;
1473 if (put_user(len, optlen))
1474 return -EFAULT;
1475 if (copy_to_user(optval, &val, len))
1476 return -EFAULT;
1478 return 0;
1482 /* Callback wrappers - called from iucv base support */
1483 static int iucv_callback_connreq(struct iucv_path *path,
1484 u8 ipvmid[8], u8 ipuser[16])
1486 unsigned char user_data[16];
1487 unsigned char nuser_data[16];
1488 unsigned char src_name[8];
1489 struct hlist_node *node;
1490 struct sock *sk, *nsk;
1491 struct iucv_sock *iucv, *niucv;
1492 int err;
1494 memcpy(src_name, ipuser, 8);
1495 EBCASC(src_name, 8);
1496 /* Find out if this path belongs to af_iucv. */
1497 read_lock(&iucv_sk_list.lock);
1498 iucv = NULL;
1499 sk = NULL;
1500 sk_for_each(sk, node, &iucv_sk_list.head)
1501 if (sk->sk_state == IUCV_LISTEN &&
1502 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1504 * Found a listening socket with
1505 * src_name == ipuser[0-7].
1507 iucv = iucv_sk(sk);
1508 break;
1510 read_unlock(&iucv_sk_list.lock);
1511 if (!iucv)
1512 /* No socket found, not one of our paths. */
1513 return -EINVAL;
1515 bh_lock_sock(sk);
1517 /* Check if parent socket is listening */
1518 low_nmcpy(user_data, iucv->src_name);
1519 high_nmcpy(user_data, iucv->dst_name);
1520 ASCEBC(user_data, sizeof(user_data));
1521 if (sk->sk_state != IUCV_LISTEN) {
1522 err = iucv_path_sever(path, user_data);
1523 iucv_path_free(path);
1524 goto fail;
1527 /* Check for backlog size */
1528 if (sk_acceptq_is_full(sk)) {
1529 err = iucv_path_sever(path, user_data);
1530 iucv_path_free(path);
1531 goto fail;
1534 /* Create the new socket */
1535 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1536 if (!nsk) {
1537 err = iucv_path_sever(path, user_data);
1538 iucv_path_free(path);
1539 goto fail;
1542 niucv = iucv_sk(nsk);
1543 iucv_sock_init(nsk, sk);
1545 /* Set the new iucv_sock */
1546 memcpy(niucv->dst_name, ipuser + 8, 8);
1547 EBCASC(niucv->dst_name, 8);
1548 memcpy(niucv->dst_user_id, ipvmid, 8);
1549 memcpy(niucv->src_name, iucv->src_name, 8);
1550 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1551 niucv->path = path;
1553 /* Call iucv_accept */
1554 high_nmcpy(nuser_data, ipuser + 8);
1555 memcpy(nuser_data + 8, niucv->src_name, 8);
1556 ASCEBC(nuser_data + 8, 8);
1558 /* set message limit for path based on msglimit of accepting socket */
1559 niucv->msglimit = iucv->msglimit;
1560 path->msglim = iucv->msglimit;
1561 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1562 if (err) {
1563 err = iucv_path_sever(path, user_data);
1564 iucv_path_free(path);
1565 iucv_sock_kill(nsk);
1566 goto fail;
1569 iucv_accept_enqueue(sk, nsk);
1571 /* Wake up accept */
1572 nsk->sk_state = IUCV_CONNECTED;
1573 sk->sk_data_ready(sk, 1);
1574 err = 0;
1575 fail:
1576 bh_unlock_sock(sk);
1577 return 0;
1580 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1582 struct sock *sk = path->private;
1584 sk->sk_state = IUCV_CONNECTED;
1585 sk->sk_state_change(sk);
1588 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1590 struct sock *sk = path->private;
1591 struct iucv_sock *iucv = iucv_sk(sk);
1592 struct sk_buff *skb;
1593 struct sock_msg_q *save_msg;
1594 int len;
1596 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1597 iucv_message_reject(path, msg);
1598 return;
1601 spin_lock(&iucv->message_q.lock);
1603 if (!list_empty(&iucv->message_q.list) ||
1604 !skb_queue_empty(&iucv->backlog_skb_q))
1605 goto save_message;
1607 len = atomic_read(&sk->sk_rmem_alloc);
1608 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1609 if (len > sk->sk_rcvbuf)
1610 goto save_message;
1612 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1613 if (!skb)
1614 goto save_message;
1616 iucv_process_message(sk, skb, path, msg);
1617 goto out_unlock;
1619 save_message:
1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1621 if (!save_msg)
1622 goto out_unlock;
1623 save_msg->path = path;
1624 save_msg->msg = *msg;
1626 list_add_tail(&save_msg->list, &iucv->message_q.list);
1628 out_unlock:
1629 spin_unlock(&iucv->message_q.lock);
1632 static void iucv_callback_txdone(struct iucv_path *path,
1633 struct iucv_message *msg)
1635 struct sock *sk = path->private;
1636 struct sk_buff *this = NULL;
1637 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1638 struct sk_buff *list_skb = list->next;
1639 unsigned long flags;
1641 if (!skb_queue_empty(list)) {
1642 spin_lock_irqsave(&list->lock, flags);
1644 while (list_skb != (struct sk_buff *)list) {
1645 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1646 this = list_skb;
1647 break;
1649 list_skb = list_skb->next;
1651 if (this)
1652 __skb_unlink(this, list);
1654 spin_unlock_irqrestore(&list->lock, flags);
1656 if (this) {
1657 kfree_skb(this);
1658 /* wake up any process waiting for sending */
1659 iucv_sock_wake_msglim(sk);
1662 BUG_ON(!this);
1664 if (sk->sk_state == IUCV_CLOSING) {
1665 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1666 sk->sk_state = IUCV_CLOSED;
1667 sk->sk_state_change(sk);
1673 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1675 struct sock *sk = path->private;
1677 if (!list_empty(&iucv_sk(sk)->accept_q))
1678 sk->sk_state = IUCV_SEVERED;
1679 else
1680 sk->sk_state = IUCV_DISCONN;
1682 sk->sk_state_change(sk);
1685 /* called if the other communication side shuts down its RECV direction;
1686 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1688 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1690 struct sock *sk = path->private;
1692 bh_lock_sock(sk);
1693 if (sk->sk_state != IUCV_CLOSED) {
1694 sk->sk_shutdown |= SEND_SHUTDOWN;
1695 sk->sk_state_change(sk);
1697 bh_unlock_sock(sk);
1700 static const struct proto_ops iucv_sock_ops = {
1701 .family = PF_IUCV,
1702 .owner = THIS_MODULE,
1703 .release = iucv_sock_release,
1704 .bind = iucv_sock_bind,
1705 .connect = iucv_sock_connect,
1706 .listen = iucv_sock_listen,
1707 .accept = iucv_sock_accept,
1708 .getname = iucv_sock_getname,
1709 .sendmsg = iucv_sock_sendmsg,
1710 .recvmsg = iucv_sock_recvmsg,
1711 .poll = iucv_sock_poll,
1712 .ioctl = sock_no_ioctl,
1713 .mmap = sock_no_mmap,
1714 .socketpair = sock_no_socketpair,
1715 .shutdown = iucv_sock_shutdown,
1716 .setsockopt = iucv_sock_setsockopt,
1717 .getsockopt = iucv_sock_getsockopt,
1720 static const struct net_proto_family iucv_sock_family_ops = {
1721 .family = AF_IUCV,
1722 .owner = THIS_MODULE,
1723 .create = iucv_sock_create,
1726 static int __init afiucv_init(void)
1728 int err;
1730 if (!MACHINE_IS_VM) {
1731 pr_err("The af_iucv module cannot be loaded"
1732 " without z/VM\n");
1733 err = -EPROTONOSUPPORT;
1734 goto out;
1736 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1737 if (unlikely(err)) {
1738 WARN_ON(err);
1739 err = -EPROTONOSUPPORT;
1740 goto out;
1743 err = iucv_register(&af_iucv_handler, 0);
1744 if (err)
1745 goto out;
1746 err = proto_register(&iucv_proto, 0);
1747 if (err)
1748 goto out_iucv;
1749 err = sock_register(&iucv_sock_family_ops);
1750 if (err)
1751 goto out_proto;
1752 /* establish dummy device */
1753 err = driver_register(&af_iucv_driver);
1754 if (err)
1755 goto out_sock;
1756 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1757 if (!af_iucv_dev) {
1758 err = -ENOMEM;
1759 goto out_driver;
1761 dev_set_name(af_iucv_dev, "af_iucv");
1762 af_iucv_dev->bus = &iucv_bus;
1763 af_iucv_dev->parent = iucv_root;
1764 af_iucv_dev->release = (void (*)(struct device *))kfree;
1765 af_iucv_dev->driver = &af_iucv_driver;
1766 err = device_register(af_iucv_dev);
1767 if (err)
1768 goto out_driver;
1770 return 0;
1772 out_driver:
1773 driver_unregister(&af_iucv_driver);
1774 out_sock:
1775 sock_unregister(PF_IUCV);
1776 out_proto:
1777 proto_unregister(&iucv_proto);
1778 out_iucv:
1779 iucv_unregister(&af_iucv_handler, 0);
1780 out:
1781 return err;
1784 static void __exit afiucv_exit(void)
1786 device_unregister(af_iucv_dev);
1787 driver_unregister(&af_iucv_driver);
1788 sock_unregister(PF_IUCV);
1789 proto_unregister(&iucv_proto);
1790 iucv_unregister(&af_iucv_handler, 0);
1793 module_init(afiucv_init);
1794 module_exit(afiucv_exit);
1796 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1797 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1798 MODULE_VERSION(VERSION);
1799 MODULE_LICENSE("GPL");
1800 MODULE_ALIAS_NETPROTO(PF_IUCV);