af_iucv: sync sk shutdown flag if iucv path is quiesced
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / iucv / af_iucv.c
blob6cf02b41ef951b0e8da9fd7906aada6a487baabb
1 /*
2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
9 */
11 #define KMSG_COMPONENT "af_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
24 #include <net/sock.h>
25 #include <asm/ebcdic.h>
26 #include <asm/cpcmd.h>
27 #include <linux/kmod.h>
29 #include <net/iucv/iucv.h>
30 #include <net/iucv/af_iucv.h>
32 #define CONFIG_IUCV_SOCK_DEBUG 1
34 #define IPRMDATA 0x80
35 #define VERSION "1.0"
37 static char iucv_userid[80];
39 static struct proto_ops iucv_sock_ops;
41 static struct proto iucv_proto = {
42 .name = "AF_IUCV",
43 .owner = THIS_MODULE,
44 .obj_size = sizeof(struct iucv_sock),
47 static void iucv_sock_kill(struct sock *sk);
48 static void iucv_sock_close(struct sock *sk);
50 /* Call Back functions */
51 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
52 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
53 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
54 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
55 u8 ipuser[16]);
56 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
57 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
59 static struct iucv_sock_list iucv_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
61 .autobind_name = ATOMIC_INIT(0)
64 static struct iucv_handler af_iucv_handler = {
65 .path_pending = iucv_callback_connreq,
66 .path_complete = iucv_callback_connack,
67 .path_severed = iucv_callback_connrej,
68 .message_pending = iucv_callback_rx,
69 .message_complete = iucv_callback_txdone,
70 .path_quiesced = iucv_callback_shutdown,
73 static inline void high_nmcpy(unsigned char *dst, char *src)
75 memcpy(dst, src, 8);
78 static inline void low_nmcpy(unsigned char *dst, char *src)
80 memcpy(&dst[8], src, 8);
83 /* Timers */
84 static void iucv_sock_timeout(unsigned long arg)
86 struct sock *sk = (struct sock *)arg;
88 bh_lock_sock(sk);
89 sk->sk_err = ETIMEDOUT;
90 sk->sk_state_change(sk);
91 bh_unlock_sock(sk);
93 iucv_sock_kill(sk);
94 sock_put(sk);
97 static void iucv_sock_clear_timer(struct sock *sk)
99 sk_stop_timer(sk, &sk->sk_timer);
102 static struct sock *__iucv_get_sock_by_name(char *nm)
104 struct sock *sk;
105 struct hlist_node *node;
107 sk_for_each(sk, node, &iucv_sk_list.head)
108 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
109 return sk;
111 return NULL;
114 static void iucv_sock_destruct(struct sock *sk)
116 skb_queue_purge(&sk->sk_receive_queue);
117 skb_queue_purge(&sk->sk_write_queue);
120 /* Cleanup Listen */
121 static void iucv_sock_cleanup_listen(struct sock *parent)
123 struct sock *sk;
125 /* Close non-accepted connections */
126 while ((sk = iucv_accept_dequeue(parent, NULL))) {
127 iucv_sock_close(sk);
128 iucv_sock_kill(sk);
131 parent->sk_state = IUCV_CLOSED;
132 sock_set_flag(parent, SOCK_ZAPPED);
135 /* Kill socket */
136 static void iucv_sock_kill(struct sock *sk)
138 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
139 return;
141 iucv_sock_unlink(&iucv_sk_list, sk);
142 sock_set_flag(sk, SOCK_DEAD);
143 sock_put(sk);
146 /* Close an IUCV socket */
147 static void iucv_sock_close(struct sock *sk)
149 unsigned char user_data[16];
150 struct iucv_sock *iucv = iucv_sk(sk);
151 int err;
152 unsigned long timeo;
154 iucv_sock_clear_timer(sk);
155 lock_sock(sk);
157 switch (sk->sk_state) {
158 case IUCV_LISTEN:
159 iucv_sock_cleanup_listen(sk);
160 break;
162 case IUCV_CONNECTED:
163 case IUCV_DISCONN:
164 err = 0;
166 sk->sk_state = IUCV_CLOSING;
167 sk->sk_state_change(sk);
169 if (!skb_queue_empty(&iucv->send_skb_q)) {
170 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
171 timeo = sk->sk_lingertime;
172 else
173 timeo = IUCV_DISCONN_TIMEOUT;
174 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
177 sk->sk_state = IUCV_CLOSED;
178 sk->sk_state_change(sk);
180 if (iucv->path) {
181 low_nmcpy(user_data, iucv->src_name);
182 high_nmcpy(user_data, iucv->dst_name);
183 ASCEBC(user_data, sizeof(user_data));
184 err = iucv_path_sever(iucv->path, user_data);
185 iucv_path_free(iucv->path);
186 iucv->path = NULL;
189 sk->sk_err = ECONNRESET;
190 sk->sk_state_change(sk);
192 skb_queue_purge(&iucv->send_skb_q);
193 skb_queue_purge(&iucv->backlog_skb_q);
195 sock_set_flag(sk, SOCK_ZAPPED);
196 break;
198 default:
199 sock_set_flag(sk, SOCK_ZAPPED);
200 break;
203 release_sock(sk);
204 iucv_sock_kill(sk);
207 static void iucv_sock_init(struct sock *sk, struct sock *parent)
209 if (parent)
210 sk->sk_type = parent->sk_type;
213 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
215 struct sock *sk;
217 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
218 if (!sk)
219 return NULL;
221 sock_init_data(sock, sk);
222 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
223 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
224 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
225 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
226 spin_lock_init(&iucv_sk(sk)->message_q.lock);
227 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
228 iucv_sk(sk)->send_tag = 0;
230 sk->sk_destruct = iucv_sock_destruct;
231 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
232 sk->sk_allocation = GFP_DMA;
234 sock_reset_flag(sk, SOCK_ZAPPED);
236 sk->sk_protocol = proto;
237 sk->sk_state = IUCV_OPEN;
239 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
241 iucv_sock_link(&iucv_sk_list, sk);
242 return sk;
245 /* Create an IUCV socket */
246 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
248 struct sock *sk;
250 if (sock->type != SOCK_STREAM)
251 return -ESOCKTNOSUPPORT;
253 sock->state = SS_UNCONNECTED;
254 sock->ops = &iucv_sock_ops;
256 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
257 if (!sk)
258 return -ENOMEM;
260 iucv_sock_init(sk, NULL);
262 return 0;
265 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
267 write_lock_bh(&l->lock);
268 sk_add_node(sk, &l->head);
269 write_unlock_bh(&l->lock);
272 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
274 write_lock_bh(&l->lock);
275 sk_del_node_init(sk);
276 write_unlock_bh(&l->lock);
279 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
281 unsigned long flags;
282 struct iucv_sock *par = iucv_sk(parent);
284 sock_hold(sk);
285 spin_lock_irqsave(&par->accept_q_lock, flags);
286 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
287 spin_unlock_irqrestore(&par->accept_q_lock, flags);
288 iucv_sk(sk)->parent = parent;
289 parent->sk_ack_backlog++;
292 void iucv_accept_unlink(struct sock *sk)
294 unsigned long flags;
295 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
297 spin_lock_irqsave(&par->accept_q_lock, flags);
298 list_del_init(&iucv_sk(sk)->accept_q);
299 spin_unlock_irqrestore(&par->accept_q_lock, flags);
300 iucv_sk(sk)->parent->sk_ack_backlog--;
301 iucv_sk(sk)->parent = NULL;
302 sock_put(sk);
305 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
307 struct iucv_sock *isk, *n;
308 struct sock *sk;
310 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
311 sk = (struct sock *) isk;
312 lock_sock(sk);
314 if (sk->sk_state == IUCV_CLOSED) {
315 iucv_accept_unlink(sk);
316 release_sock(sk);
317 continue;
320 if (sk->sk_state == IUCV_CONNECTED ||
321 sk->sk_state == IUCV_SEVERED ||
322 !newsock) {
323 iucv_accept_unlink(sk);
324 if (newsock)
325 sock_graft(sk, newsock);
327 if (sk->sk_state == IUCV_SEVERED)
328 sk->sk_state = IUCV_DISCONN;
330 release_sock(sk);
331 return sk;
334 release_sock(sk);
336 return NULL;
339 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
340 unsigned long timeo)
342 DECLARE_WAITQUEUE(wait, current);
343 int err = 0;
345 add_wait_queue(sk->sk_sleep, &wait);
346 while (sk->sk_state != state && sk->sk_state != state2) {
347 set_current_state(TASK_INTERRUPTIBLE);
349 if (!timeo) {
350 err = -EAGAIN;
351 break;
354 if (signal_pending(current)) {
355 err = sock_intr_errno(timeo);
356 break;
359 release_sock(sk);
360 timeo = schedule_timeout(timeo);
361 lock_sock(sk);
363 err = sock_error(sk);
364 if (err)
365 break;
367 set_current_state(TASK_RUNNING);
368 remove_wait_queue(sk->sk_sleep, &wait);
369 return err;
372 /* Bind an unbound socket */
373 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
374 int addr_len)
376 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
377 struct sock *sk = sock->sk;
378 struct iucv_sock *iucv;
379 int err;
381 /* Verify the input sockaddr */
382 if (!addr || addr->sa_family != AF_IUCV)
383 return -EINVAL;
385 lock_sock(sk);
386 if (sk->sk_state != IUCV_OPEN) {
387 err = -EBADFD;
388 goto done;
391 write_lock_bh(&iucv_sk_list.lock);
393 iucv = iucv_sk(sk);
394 if (__iucv_get_sock_by_name(sa->siucv_name)) {
395 err = -EADDRINUSE;
396 goto done_unlock;
398 if (iucv->path) {
399 err = 0;
400 goto done_unlock;
403 /* Bind the socket */
404 memcpy(iucv->src_name, sa->siucv_name, 8);
406 /* Copy the user id */
407 memcpy(iucv->src_user_id, iucv_userid, 8);
408 sk->sk_state = IUCV_BOUND;
409 err = 0;
411 done_unlock:
412 /* Release the socket list lock */
413 write_unlock_bh(&iucv_sk_list.lock);
414 done:
415 release_sock(sk);
416 return err;
419 /* Automatically bind an unbound socket */
420 static int iucv_sock_autobind(struct sock *sk)
422 struct iucv_sock *iucv = iucv_sk(sk);
423 char query_buffer[80];
424 char name[12];
425 int err = 0;
427 /* Set the userid and name */
428 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
429 if (unlikely(err))
430 return -EPROTO;
432 memcpy(iucv->src_user_id, query_buffer, 8);
434 write_lock_bh(&iucv_sk_list.lock);
436 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
437 while (__iucv_get_sock_by_name(name)) {
438 sprintf(name, "%08x",
439 atomic_inc_return(&iucv_sk_list.autobind_name));
442 write_unlock_bh(&iucv_sk_list.lock);
444 memcpy(&iucv->src_name, name, 8);
446 return err;
449 /* Connect an unconnected socket */
450 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
451 int alen, int flags)
453 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
454 struct sock *sk = sock->sk;
455 struct iucv_sock *iucv;
456 unsigned char user_data[16];
457 int err;
459 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
460 return -EINVAL;
462 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
463 return -EBADFD;
465 if (sk->sk_type != SOCK_STREAM)
466 return -EINVAL;
468 iucv = iucv_sk(sk);
470 if (sk->sk_state == IUCV_OPEN) {
471 err = iucv_sock_autobind(sk);
472 if (unlikely(err))
473 return err;
476 lock_sock(sk);
478 /* Set the destination information */
479 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
480 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
482 high_nmcpy(user_data, sa->siucv_name);
483 low_nmcpy(user_data, iucv_sk(sk)->src_name);
484 ASCEBC(user_data, sizeof(user_data));
486 iucv = iucv_sk(sk);
487 /* Create path. */
488 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
489 IPRMDATA, GFP_KERNEL);
490 if (!iucv->path) {
491 err = -ENOMEM;
492 goto done;
494 err = iucv_path_connect(iucv->path, &af_iucv_handler,
495 sa->siucv_user_id, NULL, user_data, sk);
496 if (err) {
497 iucv_path_free(iucv->path);
498 iucv->path = NULL;
499 switch (err) {
500 case 0x0b: /* Target communicator is not logged on */
501 err = -ENETUNREACH;
502 break;
503 case 0x0d: /* Max connections for this guest exceeded */
504 case 0x0e: /* Max connections for target guest exceeded */
505 err = -EAGAIN;
506 break;
507 case 0x0f: /* Missing IUCV authorization */
508 err = -EACCES;
509 break;
510 default:
511 err = -ECONNREFUSED;
512 break;
514 goto done;
517 if (sk->sk_state != IUCV_CONNECTED) {
518 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
519 sock_sndtimeo(sk, flags & O_NONBLOCK));
522 if (sk->sk_state == IUCV_DISCONN) {
523 release_sock(sk);
524 return -ECONNREFUSED;
527 if (err) {
528 iucv_path_sever(iucv->path, NULL);
529 iucv_path_free(iucv->path);
530 iucv->path = NULL;
533 done:
534 release_sock(sk);
535 return err;
538 /* Move a socket into listening state. */
539 static int iucv_sock_listen(struct socket *sock, int backlog)
541 struct sock *sk = sock->sk;
542 int err;
544 lock_sock(sk);
546 err = -EINVAL;
547 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
548 goto done;
550 sk->sk_max_ack_backlog = backlog;
551 sk->sk_ack_backlog = 0;
552 sk->sk_state = IUCV_LISTEN;
553 err = 0;
555 done:
556 release_sock(sk);
557 return err;
560 /* Accept a pending connection */
561 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
562 int flags)
564 DECLARE_WAITQUEUE(wait, current);
565 struct sock *sk = sock->sk, *nsk;
566 long timeo;
567 int err = 0;
569 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
571 if (sk->sk_state != IUCV_LISTEN) {
572 err = -EBADFD;
573 goto done;
576 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
578 /* Wait for an incoming connection */
579 add_wait_queue_exclusive(sk->sk_sleep, &wait);
580 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
581 set_current_state(TASK_INTERRUPTIBLE);
582 if (!timeo) {
583 err = -EAGAIN;
584 break;
587 release_sock(sk);
588 timeo = schedule_timeout(timeo);
589 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
591 if (sk->sk_state != IUCV_LISTEN) {
592 err = -EBADFD;
593 break;
596 if (signal_pending(current)) {
597 err = sock_intr_errno(timeo);
598 break;
602 set_current_state(TASK_RUNNING);
603 remove_wait_queue(sk->sk_sleep, &wait);
605 if (err)
606 goto done;
608 newsock->state = SS_CONNECTED;
610 done:
611 release_sock(sk);
612 return err;
615 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
616 int *len, int peer)
618 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
619 struct sock *sk = sock->sk;
621 addr->sa_family = AF_IUCV;
622 *len = sizeof(struct sockaddr_iucv);
624 if (peer) {
625 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
626 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
627 } else {
628 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
629 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
631 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
632 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
633 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
635 return 0;
638 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
639 struct msghdr *msg, size_t len)
641 struct sock *sk = sock->sk;
642 struct iucv_sock *iucv = iucv_sk(sk);
643 struct sk_buff *skb;
644 struct iucv_message txmsg;
645 char user_id[9];
646 char appl_id[9];
647 int err;
649 err = sock_error(sk);
650 if (err)
651 return err;
653 if (msg->msg_flags & MSG_OOB)
654 return -EOPNOTSUPP;
656 lock_sock(sk);
658 if (sk->sk_shutdown & SEND_SHUTDOWN) {
659 err = -EPIPE;
660 goto out;
663 if (sk->sk_state == IUCV_CONNECTED) {
664 if (!(skb = sock_alloc_send_skb(sk, len,
665 msg->msg_flags & MSG_DONTWAIT,
666 &err)))
667 goto out;
669 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
670 err = -EFAULT;
671 goto fail;
674 txmsg.class = 0;
675 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
676 txmsg.tag = iucv->send_tag++;
677 memcpy(skb->cb, &txmsg.tag, 4);
678 skb_queue_tail(&iucv->send_skb_q, skb);
679 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
680 (void *) skb->data, skb->len);
681 if (err) {
682 if (err == 3) {
683 user_id[8] = 0;
684 memcpy(user_id, iucv->dst_user_id, 8);
685 appl_id[8] = 0;
686 memcpy(appl_id, iucv->dst_name, 8);
687 pr_err("Application %s on z/VM guest %s"
688 " exceeds message limit\n",
689 user_id, appl_id);
691 skb_unlink(skb, &iucv->send_skb_q);
692 err = -EPIPE;
693 goto fail;
696 } else {
697 err = -ENOTCONN;
698 goto out;
701 release_sock(sk);
702 return len;
704 fail:
705 kfree_skb(skb);
706 out:
707 release_sock(sk);
708 return err;
711 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
713 int dataleft, size, copied = 0;
714 struct sk_buff *nskb;
716 dataleft = len;
717 while (dataleft) {
718 if (dataleft >= sk->sk_rcvbuf / 4)
719 size = sk->sk_rcvbuf / 4;
720 else
721 size = dataleft;
723 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
724 if (!nskb)
725 return -ENOMEM;
727 memcpy(nskb->data, skb->data + copied, size);
728 copied += size;
729 dataleft -= size;
731 skb_reset_transport_header(nskb);
732 skb_reset_network_header(nskb);
733 nskb->len = size;
735 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
738 return 0;
741 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
742 struct iucv_path *path,
743 struct iucv_message *msg)
745 int rc;
747 if (msg->flags & IPRMDATA) {
748 skb->data = NULL;
749 skb->len = 0;
750 } else {
751 rc = iucv_message_receive(path, msg, 0, skb->data,
752 msg->length, NULL);
753 if (rc) {
754 kfree_skb(skb);
755 return;
757 if (skb->truesize >= sk->sk_rcvbuf / 4) {
758 rc = iucv_fragment_skb(sk, skb, msg->length);
759 kfree_skb(skb);
760 skb = NULL;
761 if (rc) {
762 iucv_path_sever(path, NULL);
763 return;
765 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
766 } else {
767 skb_reset_transport_header(skb);
768 skb_reset_network_header(skb);
769 skb->len = msg->length;
773 if (sock_queue_rcv_skb(sk, skb))
774 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
777 static void iucv_process_message_q(struct sock *sk)
779 struct iucv_sock *iucv = iucv_sk(sk);
780 struct sk_buff *skb;
781 struct sock_msg_q *p, *n;
783 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
784 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
785 if (!skb)
786 break;
787 iucv_process_message(sk, skb, p->path, &p->msg);
788 list_del(&p->list);
789 kfree(p);
790 if (!skb_queue_empty(&iucv->backlog_skb_q))
791 break;
795 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
796 struct msghdr *msg, size_t len, int flags)
798 int noblock = flags & MSG_DONTWAIT;
799 struct sock *sk = sock->sk;
800 struct iucv_sock *iucv = iucv_sk(sk);
801 int target, copied = 0;
802 struct sk_buff *skb, *rskb, *cskb;
803 int err = 0;
805 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
806 skb_queue_empty(&iucv->backlog_skb_q) &&
807 skb_queue_empty(&sk->sk_receive_queue) &&
808 list_empty(&iucv->message_q.list))
809 return 0;
811 if (flags & (MSG_OOB))
812 return -EOPNOTSUPP;
814 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
816 skb = skb_recv_datagram(sk, flags, noblock, &err);
817 if (!skb) {
818 if (sk->sk_shutdown & RCV_SHUTDOWN)
819 return 0;
820 return err;
823 copied = min_t(unsigned int, skb->len, len);
825 cskb = skb;
826 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
827 skb_queue_head(&sk->sk_receive_queue, skb);
828 if (copied == 0)
829 return -EFAULT;
830 goto done;
833 len -= copied;
835 /* Mark read part of skb as used */
836 if (!(flags & MSG_PEEK)) {
837 skb_pull(skb, copied);
839 if (skb->len) {
840 skb_queue_head(&sk->sk_receive_queue, skb);
841 goto done;
844 kfree_skb(skb);
846 /* Queue backlog skbs */
847 rskb = skb_dequeue(&iucv->backlog_skb_q);
848 while (rskb) {
849 if (sock_queue_rcv_skb(sk, rskb)) {
850 skb_queue_head(&iucv->backlog_skb_q,
851 rskb);
852 break;
853 } else {
854 rskb = skb_dequeue(&iucv->backlog_skb_q);
857 if (skb_queue_empty(&iucv->backlog_skb_q)) {
858 spin_lock_bh(&iucv->message_q.lock);
859 if (!list_empty(&iucv->message_q.list))
860 iucv_process_message_q(sk);
861 spin_unlock_bh(&iucv->message_q.lock);
864 } else
865 skb_queue_head(&sk->sk_receive_queue, skb);
867 done:
868 return err ? : copied;
871 static inline unsigned int iucv_accept_poll(struct sock *parent)
873 struct iucv_sock *isk, *n;
874 struct sock *sk;
876 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
877 sk = (struct sock *) isk;
879 if (sk->sk_state == IUCV_CONNECTED)
880 return POLLIN | POLLRDNORM;
883 return 0;
886 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
887 poll_table *wait)
889 struct sock *sk = sock->sk;
890 unsigned int mask = 0;
892 poll_wait(file, sk->sk_sleep, wait);
894 if (sk->sk_state == IUCV_LISTEN)
895 return iucv_accept_poll(sk);
897 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
898 mask |= POLLERR;
900 if (sk->sk_shutdown & RCV_SHUTDOWN)
901 mask |= POLLRDHUP;
903 if (sk->sk_shutdown == SHUTDOWN_MASK)
904 mask |= POLLHUP;
906 if (!skb_queue_empty(&sk->sk_receive_queue) ||
907 (sk->sk_shutdown & RCV_SHUTDOWN))
908 mask |= POLLIN | POLLRDNORM;
910 if (sk->sk_state == IUCV_CLOSED)
911 mask |= POLLHUP;
913 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
914 mask |= POLLIN;
916 if (sock_writeable(sk))
917 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
918 else
919 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
921 return mask;
924 static int iucv_sock_shutdown(struct socket *sock, int how)
926 struct sock *sk = sock->sk;
927 struct iucv_sock *iucv = iucv_sk(sk);
928 struct iucv_message txmsg;
929 int err = 0;
930 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
932 how++;
934 if ((how & ~SHUTDOWN_MASK) || !how)
935 return -EINVAL;
937 lock_sock(sk);
938 switch (sk->sk_state) {
939 case IUCV_CLOSED:
940 err = -ENOTCONN;
941 goto fail;
943 default:
944 sk->sk_shutdown |= how;
945 break;
948 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
949 txmsg.class = 0;
950 txmsg.tag = 0;
951 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
952 (void *) prmmsg, 8);
953 if (err) {
954 switch (err) {
955 case 1:
956 err = -ENOTCONN;
957 break;
958 case 2:
959 err = -ECONNRESET;
960 break;
961 default:
962 err = -ENOTCONN;
963 break;
968 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
969 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
970 if (err)
971 err = -ENOTCONN;
973 skb_queue_purge(&sk->sk_receive_queue);
976 /* Wake up anyone sleeping in poll */
977 sk->sk_state_change(sk);
979 fail:
980 release_sock(sk);
981 return err;
984 static int iucv_sock_release(struct socket *sock)
986 struct sock *sk = sock->sk;
987 int err = 0;
989 if (!sk)
990 return 0;
992 iucv_sock_close(sk);
994 /* Unregister with IUCV base support */
995 if (iucv_sk(sk)->path) {
996 iucv_path_sever(iucv_sk(sk)->path, NULL);
997 iucv_path_free(iucv_sk(sk)->path);
998 iucv_sk(sk)->path = NULL;
1001 sock_orphan(sk);
1002 iucv_sock_kill(sk);
1003 return err;
1006 /* Callback wrappers - called from iucv base support */
1007 static int iucv_callback_connreq(struct iucv_path *path,
1008 u8 ipvmid[8], u8 ipuser[16])
1010 unsigned char user_data[16];
1011 unsigned char nuser_data[16];
1012 unsigned char src_name[8];
1013 struct hlist_node *node;
1014 struct sock *sk, *nsk;
1015 struct iucv_sock *iucv, *niucv;
1016 int err;
1018 memcpy(src_name, ipuser, 8);
1019 EBCASC(src_name, 8);
1020 /* Find out if this path belongs to af_iucv. */
1021 read_lock(&iucv_sk_list.lock);
1022 iucv = NULL;
1023 sk = NULL;
1024 sk_for_each(sk, node, &iucv_sk_list.head)
1025 if (sk->sk_state == IUCV_LISTEN &&
1026 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1028 * Found a listening socket with
1029 * src_name == ipuser[0-7].
1031 iucv = iucv_sk(sk);
1032 break;
1034 read_unlock(&iucv_sk_list.lock);
1035 if (!iucv)
1036 /* No socket found, not one of our paths. */
1037 return -EINVAL;
1039 bh_lock_sock(sk);
1041 /* Check if parent socket is listening */
1042 low_nmcpy(user_data, iucv->src_name);
1043 high_nmcpy(user_data, iucv->dst_name);
1044 ASCEBC(user_data, sizeof(user_data));
1045 if (sk->sk_state != IUCV_LISTEN) {
1046 err = iucv_path_sever(path, user_data);
1047 iucv_path_free(path);
1048 goto fail;
1051 /* Check for backlog size */
1052 if (sk_acceptq_is_full(sk)) {
1053 err = iucv_path_sever(path, user_data);
1054 iucv_path_free(path);
1055 goto fail;
1058 /* Create the new socket */
1059 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1060 if (!nsk) {
1061 err = iucv_path_sever(path, user_data);
1062 iucv_path_free(path);
1063 goto fail;
1066 niucv = iucv_sk(nsk);
1067 iucv_sock_init(nsk, sk);
1069 /* Set the new iucv_sock */
1070 memcpy(niucv->dst_name, ipuser + 8, 8);
1071 EBCASC(niucv->dst_name, 8);
1072 memcpy(niucv->dst_user_id, ipvmid, 8);
1073 memcpy(niucv->src_name, iucv->src_name, 8);
1074 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1075 niucv->path = path;
1077 /* Call iucv_accept */
1078 high_nmcpy(nuser_data, ipuser + 8);
1079 memcpy(nuser_data + 8, niucv->src_name, 8);
1080 ASCEBC(nuser_data + 8, 8);
1082 path->msglim = IUCV_QUEUELEN_DEFAULT;
1083 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1084 if (err) {
1085 err = iucv_path_sever(path, user_data);
1086 iucv_path_free(path);
1087 iucv_sock_kill(nsk);
1088 goto fail;
1091 iucv_accept_enqueue(sk, nsk);
1093 /* Wake up accept */
1094 nsk->sk_state = IUCV_CONNECTED;
1095 sk->sk_data_ready(sk, 1);
1096 err = 0;
1097 fail:
1098 bh_unlock_sock(sk);
1099 return 0;
1102 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1104 struct sock *sk = path->private;
1106 sk->sk_state = IUCV_CONNECTED;
1107 sk->sk_state_change(sk);
1110 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1112 struct sock *sk = path->private;
1113 struct iucv_sock *iucv = iucv_sk(sk);
1114 struct sk_buff *skb;
1115 struct sock_msg_q *save_msg;
1116 int len;
1118 if (sk->sk_shutdown & RCV_SHUTDOWN)
1119 return;
1121 if (!list_empty(&iucv->message_q.list) ||
1122 !skb_queue_empty(&iucv->backlog_skb_q))
1123 goto save_message;
1125 len = atomic_read(&sk->sk_rmem_alloc);
1126 len += msg->length + sizeof(struct sk_buff);
1127 if (len > sk->sk_rcvbuf)
1128 goto save_message;
1130 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1131 if (!skb)
1132 goto save_message;
1134 spin_lock(&iucv->message_q.lock);
1135 iucv_process_message(sk, skb, path, msg);
1136 spin_unlock(&iucv->message_q.lock);
1138 return;
1140 save_message:
1141 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1142 if (!save_msg)
1143 return;
1144 save_msg->path = path;
1145 save_msg->msg = *msg;
1147 spin_lock(&iucv->message_q.lock);
1148 list_add_tail(&save_msg->list, &iucv->message_q.list);
1149 spin_unlock(&iucv->message_q.lock);
1152 static void iucv_callback_txdone(struct iucv_path *path,
1153 struct iucv_message *msg)
1155 struct sock *sk = path->private;
1156 struct sk_buff *this = NULL;
1157 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1158 struct sk_buff *list_skb = list->next;
1159 unsigned long flags;
1161 if (!skb_queue_empty(list)) {
1162 spin_lock_irqsave(&list->lock, flags);
1164 while (list_skb != (struct sk_buff *)list) {
1165 if (!memcmp(&msg->tag, list_skb->cb, 4)) {
1166 this = list_skb;
1167 break;
1169 list_skb = list_skb->next;
1171 if (this)
1172 __skb_unlink(this, list);
1174 spin_unlock_irqrestore(&list->lock, flags);
1176 kfree_skb(this);
1178 BUG_ON(!this);
1180 if (sk->sk_state == IUCV_CLOSING) {
1181 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1182 sk->sk_state = IUCV_CLOSED;
1183 sk->sk_state_change(sk);
1189 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1191 struct sock *sk = path->private;
1193 if (!list_empty(&iucv_sk(sk)->accept_q))
1194 sk->sk_state = IUCV_SEVERED;
1195 else
1196 sk->sk_state = IUCV_DISCONN;
1198 sk->sk_state_change(sk);
1201 /* called if the other communication side shuts down its RECV direction;
1202 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1204 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1206 struct sock *sk = path->private;
1208 bh_lock_sock(sk);
1209 if (sk->sk_state != IUCV_CLOSED) {
1210 sk->sk_shutdown |= SEND_SHUTDOWN;
1211 sk->sk_state_change(sk);
1213 bh_unlock_sock(sk);
1216 static struct proto_ops iucv_sock_ops = {
1217 .family = PF_IUCV,
1218 .owner = THIS_MODULE,
1219 .release = iucv_sock_release,
1220 .bind = iucv_sock_bind,
1221 .connect = iucv_sock_connect,
1222 .listen = iucv_sock_listen,
1223 .accept = iucv_sock_accept,
1224 .getname = iucv_sock_getname,
1225 .sendmsg = iucv_sock_sendmsg,
1226 .recvmsg = iucv_sock_recvmsg,
1227 .poll = iucv_sock_poll,
1228 .ioctl = sock_no_ioctl,
1229 .mmap = sock_no_mmap,
1230 .socketpair = sock_no_socketpair,
1231 .shutdown = iucv_sock_shutdown,
1232 .setsockopt = sock_no_setsockopt,
1233 .getsockopt = sock_no_getsockopt
1236 static struct net_proto_family iucv_sock_family_ops = {
1237 .family = AF_IUCV,
1238 .owner = THIS_MODULE,
1239 .create = iucv_sock_create,
1242 static int __init afiucv_init(void)
1244 int err;
1246 if (!MACHINE_IS_VM) {
1247 pr_err("The af_iucv module cannot be loaded"
1248 " without z/VM\n");
1249 err = -EPROTONOSUPPORT;
1250 goto out;
1252 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1253 if (unlikely(err)) {
1254 WARN_ON(err);
1255 err = -EPROTONOSUPPORT;
1256 goto out;
1259 err = iucv_register(&af_iucv_handler, 0);
1260 if (err)
1261 goto out;
1262 err = proto_register(&iucv_proto, 0);
1263 if (err)
1264 goto out_iucv;
1265 err = sock_register(&iucv_sock_family_ops);
1266 if (err)
1267 goto out_proto;
1268 return 0;
1270 out_proto:
1271 proto_unregister(&iucv_proto);
1272 out_iucv:
1273 iucv_unregister(&af_iucv_handler, 0);
1274 out:
1275 return err;
1278 static void __exit afiucv_exit(void)
1280 sock_unregister(PF_IUCV);
1281 proto_unregister(&iucv_proto);
1282 iucv_unregister(&af_iucv_handler, 0);
1285 module_init(afiucv_init);
1286 module_exit(afiucv_exit);
1288 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1289 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1290 MODULE_VERSION(VERSION);
1291 MODULE_LICENSE("GPL");
1292 MODULE_ALIAS_NETPROTO(PF_IUCV);