[DCCP]: Uninline some functions
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / dccp / proto.c
blobf644c1a23c4dce41e99d3b760ffff90986aa1356
1 /*
2 * net/dccp/proto.c
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
20 #include <linux/in.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
26 #include <net/inet_sock.h>
27 #include <net/sock.h>
28 #include <net/xfrm.h>
30 #include <asm/semaphore.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
36 #include "ccid.h"
37 #include "dccp.h"
38 #include "feat.h"
40 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42 EXPORT_SYMBOL_GPL(dccp_statistics);
44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
46 EXPORT_SYMBOL_GPL(dccp_orphan_count);
48 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
49 .lhash_lock = RW_LOCK_UNLOCKED,
50 .lhash_users = ATOMIC_INIT(0),
51 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
54 EXPORT_SYMBOL_GPL(dccp_hashinfo);
56 void dccp_set_state(struct sock *sk, const int state)
58 const int oldstate = sk->sk_state;
60 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
61 dccp_role(sk), sk,
62 dccp_state_name(oldstate), dccp_state_name(state));
63 WARN_ON(state == oldstate);
65 switch (state) {
66 case DCCP_OPEN:
67 if (oldstate != DCCP_OPEN)
68 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
69 break;
71 case DCCP_CLOSED:
72 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
75 sk->sk_prot->unhash(sk);
76 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78 inet_put_port(&dccp_hashinfo, sk);
79 /* fall through */
80 default:
81 if (oldstate == DCCP_OPEN)
82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
85 /* Change state AFTER socket is unhashed to avoid closed
86 * socket sitting in hash tables.
88 sk->sk_state = state;
91 EXPORT_SYMBOL_GPL(dccp_set_state);
93 void dccp_done(struct sock *sk)
95 dccp_set_state(sk, DCCP_CLOSED);
96 dccp_clear_xmit_timers(sk);
98 sk->sk_shutdown = SHUTDOWN_MASK;
100 if (!sock_flag(sk, SOCK_DEAD))
101 sk->sk_state_change(sk);
102 else
103 inet_csk_destroy_sock(sk);
106 EXPORT_SYMBOL_GPL(dccp_done);
108 const char *dccp_packet_name(const int type)
110 static const char *dccp_packet_names[] = {
111 [DCCP_PKT_REQUEST] = "REQUEST",
112 [DCCP_PKT_RESPONSE] = "RESPONSE",
113 [DCCP_PKT_DATA] = "DATA",
114 [DCCP_PKT_ACK] = "ACK",
115 [DCCP_PKT_DATAACK] = "DATAACK",
116 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
117 [DCCP_PKT_CLOSE] = "CLOSE",
118 [DCCP_PKT_RESET] = "RESET",
119 [DCCP_PKT_SYNC] = "SYNC",
120 [DCCP_PKT_SYNCACK] = "SYNCACK",
123 if (type >= DCCP_NR_PKT_TYPES)
124 return "INVALID";
125 else
126 return dccp_packet_names[type];
129 EXPORT_SYMBOL_GPL(dccp_packet_name);
131 const char *dccp_state_name(const int state)
133 static char *dccp_state_names[] = {
134 [DCCP_OPEN] = "OPEN",
135 [DCCP_REQUESTING] = "REQUESTING",
136 [DCCP_PARTOPEN] = "PARTOPEN",
137 [DCCP_LISTEN] = "LISTEN",
138 [DCCP_RESPOND] = "RESPOND",
139 [DCCP_CLOSING] = "CLOSING",
140 [DCCP_TIME_WAIT] = "TIME_WAIT",
141 [DCCP_CLOSED] = "CLOSED",
144 if (state >= DCCP_MAX_STATES)
145 return "INVALID STATE!";
146 else
147 return dccp_state_names[state];
150 EXPORT_SYMBOL_GPL(dccp_state_name);
152 void dccp_hash(struct sock *sk)
154 inet_hash(&dccp_hashinfo, sk);
157 EXPORT_SYMBOL_GPL(dccp_hash);
159 void dccp_unhash(struct sock *sk)
161 inet_unhash(&dccp_hashinfo, sk);
164 EXPORT_SYMBOL_GPL(dccp_unhash);
166 int dccp_init_sock(struct sock *sk)
168 struct dccp_sock *dp = dccp_sk(sk);
169 struct inet_connection_sock *icsk = inet_csk(sk);
170 static int dccp_ctl_socket_init = 1;
172 dccp_options_init(&dp->dccps_options);
173 do_gettimeofday(&dp->dccps_epoch);
176 * FIXME: We're hardcoding the CCID, and doing this at this point makes
177 * the listening (master) sock get CCID control blocks, which is not
178 * necessary, but for now, to not mess with the test userspace apps,
179 * lets leave it here, later the real solution is to do this in a
180 * setsockopt(CCIDs-I-want/accept). -acme
182 if (likely(!dccp_ctl_socket_init)) {
183 int rc = dccp_feat_init(sk);
185 if (rc)
186 return rc;
188 if (dp->dccps_options.dccpo_send_ack_vector) {
189 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
190 if (dp->dccps_hc_rx_ackvec == NULL)
191 return -ENOMEM;
193 dp->dccps_hc_rx_ccid =
194 ccid_hc_rx_new(dp->dccps_options.dccpo_rx_ccid,
195 sk, GFP_KERNEL);
196 dp->dccps_hc_tx_ccid =
197 ccid_hc_tx_new(dp->dccps_options.dccpo_tx_ccid,
198 sk, GFP_KERNEL);
199 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
200 dp->dccps_hc_tx_ccid == NULL)) {
201 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
202 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
203 if (dp->dccps_options.dccpo_send_ack_vector) {
204 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
205 dp->dccps_hc_rx_ackvec = NULL;
207 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
208 return -ENOMEM;
210 } else {
211 /* control socket doesn't need feat nego */
212 INIT_LIST_HEAD(&dp->dccps_options.dccpo_pending);
213 INIT_LIST_HEAD(&dp->dccps_options.dccpo_conf);
214 dccp_ctl_socket_init = 0;
217 dccp_init_xmit_timers(sk);
218 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
219 sk->sk_state = DCCP_CLOSED;
220 sk->sk_write_space = dccp_write_space;
221 icsk->icsk_sync_mss = dccp_sync_mss;
222 dp->dccps_mss_cache = 536;
223 dp->dccps_role = DCCP_ROLE_UNDEFINED;
224 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
225 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
227 return 0;
230 EXPORT_SYMBOL_GPL(dccp_init_sock);
232 int dccp_destroy_sock(struct sock *sk)
234 struct dccp_sock *dp = dccp_sk(sk);
237 * DCCP doesn't use sk_write_queue, just sk_send_head
238 * for retransmissions
240 if (sk->sk_send_head != NULL) {
241 kfree_skb(sk->sk_send_head);
242 sk->sk_send_head = NULL;
245 /* Clean up a referenced DCCP bind bucket. */
246 if (inet_csk(sk)->icsk_bind_hash != NULL)
247 inet_put_port(&dccp_hashinfo, sk);
249 kfree(dp->dccps_service_list);
250 dp->dccps_service_list = NULL;
252 if (dp->dccps_options.dccpo_send_ack_vector) {
253 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
254 dp->dccps_hc_rx_ackvec = NULL;
256 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
257 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
258 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
260 /* clean up feature negotiation state */
261 dccp_feat_clean(sk);
263 return 0;
266 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
268 static inline int dccp_listen_start(struct sock *sk)
270 struct dccp_sock *dp = dccp_sk(sk);
272 dp->dccps_role = DCCP_ROLE_LISTEN;
274 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
275 * before calling listen()
277 if (dccp_service_not_initialized(sk))
278 return -EPROTO;
279 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
282 int dccp_disconnect(struct sock *sk, int flags)
284 struct inet_connection_sock *icsk = inet_csk(sk);
285 struct inet_sock *inet = inet_sk(sk);
286 int err = 0;
287 const int old_state = sk->sk_state;
289 if (old_state != DCCP_CLOSED)
290 dccp_set_state(sk, DCCP_CLOSED);
292 /* ABORT function of RFC793 */
293 if (old_state == DCCP_LISTEN) {
294 inet_csk_listen_stop(sk);
295 /* FIXME: do the active reset thing */
296 } else if (old_state == DCCP_REQUESTING)
297 sk->sk_err = ECONNRESET;
299 dccp_clear_xmit_timers(sk);
300 __skb_queue_purge(&sk->sk_receive_queue);
301 if (sk->sk_send_head != NULL) {
302 __kfree_skb(sk->sk_send_head);
303 sk->sk_send_head = NULL;
306 inet->dport = 0;
308 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
309 inet_reset_saddr(sk);
311 sk->sk_shutdown = 0;
312 sock_reset_flag(sk, SOCK_DONE);
314 icsk->icsk_backoff = 0;
315 inet_csk_delack_init(sk);
316 __sk_dst_reset(sk);
318 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
320 sk->sk_error_report(sk);
321 return err;
324 EXPORT_SYMBOL_GPL(dccp_disconnect);
327 * Wait for a DCCP event.
329 * Note that we don't need to lock the socket, as the upper poll layers
330 * take care of normal races (between the test and the event) and we don't
331 * go look at any of the socket buffers directly.
333 unsigned int dccp_poll(struct file *file, struct socket *sock,
334 poll_table *wait)
336 unsigned int mask;
337 struct sock *sk = sock->sk;
339 poll_wait(file, sk->sk_sleep, wait);
340 if (sk->sk_state == DCCP_LISTEN)
341 return inet_csk_listen_poll(sk);
343 /* Socket is not locked. We are protected from async events
344 by poll logic and correct handling of state changes
345 made by another threads is impossible in any case.
348 mask = 0;
349 if (sk->sk_err)
350 mask = POLLERR;
352 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
353 mask |= POLLHUP;
354 if (sk->sk_shutdown & RCV_SHUTDOWN)
355 mask |= POLLIN | POLLRDNORM;
357 /* Connected? */
358 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
359 if (atomic_read(&sk->sk_rmem_alloc) > 0)
360 mask |= POLLIN | POLLRDNORM;
362 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
363 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
364 mask |= POLLOUT | POLLWRNORM;
365 } else { /* send SIGIO later */
366 set_bit(SOCK_ASYNC_NOSPACE,
367 &sk->sk_socket->flags);
368 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
370 /* Race breaker. If space is freed after
371 * wspace test but before the flags are set,
372 * IO signal will be lost.
374 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
375 mask |= POLLOUT | POLLWRNORM;
379 return mask;
382 EXPORT_SYMBOL_GPL(dccp_poll);
384 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
386 dccp_pr_debug("entry\n");
387 return -ENOIOCTLCMD;
390 EXPORT_SYMBOL_GPL(dccp_ioctl);
392 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
393 char __user *optval, int optlen)
395 struct dccp_sock *dp = dccp_sk(sk);
396 struct dccp_service_list *sl = NULL;
398 if (service == DCCP_SERVICE_INVALID_VALUE ||
399 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
400 return -EINVAL;
402 if (optlen > sizeof(service)) {
403 sl = kmalloc(optlen, GFP_KERNEL);
404 if (sl == NULL)
405 return -ENOMEM;
407 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
408 if (copy_from_user(sl->dccpsl_list,
409 optval + sizeof(service),
410 optlen - sizeof(service)) ||
411 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
412 kfree(sl);
413 return -EFAULT;
417 lock_sock(sk);
418 dp->dccps_service = service;
420 kfree(dp->dccps_service_list);
422 dp->dccps_service_list = sl;
423 release_sock(sk);
424 return 0;
427 /* byte 1 is feature. the rest is the preference list */
428 static int dccp_setsockopt_change(struct sock *sk, int type,
429 struct dccp_so_feat __user *optval)
431 struct dccp_so_feat opt;
432 u8 *val;
433 int rc;
435 if (copy_from_user(&opt, optval, sizeof(opt)))
436 return -EFAULT;
438 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
439 if (!val)
440 return -ENOMEM;
442 if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
443 rc = -EFAULT;
444 goto out_free_val;
447 rc = dccp_feat_change(sk, type, opt.dccpsf_feat, val, opt.dccpsf_len,
448 GFP_KERNEL);
449 if (rc)
450 goto out_free_val;
452 out:
453 return rc;
455 out_free_val:
456 kfree(val);
457 goto out;
460 int dccp_setsockopt(struct sock *sk, int level, int optname,
461 char __user *optval, int optlen)
463 struct dccp_sock *dp;
464 int err;
465 int val;
467 if (level != SOL_DCCP)
468 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
469 optname, optval,
470 optlen);
472 if (optlen < sizeof(int))
473 return -EINVAL;
475 if (get_user(val, (int __user *)optval))
476 return -EFAULT;
478 if (optname == DCCP_SOCKOPT_SERVICE)
479 return dccp_setsockopt_service(sk, val, optval, optlen);
481 lock_sock(sk);
482 dp = dccp_sk(sk);
483 err = 0;
485 switch (optname) {
486 case DCCP_SOCKOPT_PACKET_SIZE:
487 dp->dccps_packet_size = val;
488 break;
490 case DCCP_SOCKOPT_CHANGE_L:
491 if (optlen != sizeof(struct dccp_so_feat))
492 err = -EINVAL;
493 else
494 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
495 (struct dccp_so_feat *)
496 optval);
497 break;
499 case DCCP_SOCKOPT_CHANGE_R:
500 if (optlen != sizeof(struct dccp_so_feat))
501 err = -EINVAL;
502 else
503 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
504 (struct dccp_so_feat *)
505 optval);
506 break;
508 default:
509 err = -ENOPROTOOPT;
510 break;
513 release_sock(sk);
514 return err;
517 EXPORT_SYMBOL_GPL(dccp_setsockopt);
519 static int dccp_getsockopt_service(struct sock *sk, int len,
520 __be32 __user *optval,
521 int __user *optlen)
523 const struct dccp_sock *dp = dccp_sk(sk);
524 const struct dccp_service_list *sl;
525 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
527 lock_sock(sk);
528 if (dccp_service_not_initialized(sk))
529 goto out;
531 if ((sl = dp->dccps_service_list) != NULL) {
532 slen = sl->dccpsl_nr * sizeof(u32);
533 total_len += slen;
536 err = -EINVAL;
537 if (total_len > len)
538 goto out;
540 err = 0;
541 if (put_user(total_len, optlen) ||
542 put_user(dp->dccps_service, optval) ||
543 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
544 err = -EFAULT;
545 out:
546 release_sock(sk);
547 return err;
550 int dccp_getsockopt(struct sock *sk, int level, int optname,
551 char __user *optval, int __user *optlen)
553 struct dccp_sock *dp;
554 int val, len;
556 if (level != SOL_DCCP)
557 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
558 optname, optval,
559 optlen);
560 if (get_user(len, optlen))
561 return -EFAULT;
563 if (len < sizeof(int))
564 return -EINVAL;
566 dp = dccp_sk(sk);
568 switch (optname) {
569 case DCCP_SOCKOPT_PACKET_SIZE:
570 val = dp->dccps_packet_size;
571 len = sizeof(dp->dccps_packet_size);
572 break;
573 case DCCP_SOCKOPT_SERVICE:
574 return dccp_getsockopt_service(sk, len,
575 (__be32 __user *)optval, optlen);
576 case 128 ... 191:
577 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
578 len, (u32 __user *)optval, optlen);
579 case 192 ... 255:
580 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
581 len, (u32 __user *)optval, optlen);
582 default:
583 return -ENOPROTOOPT;
586 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
587 return -EFAULT;
589 return 0;
592 EXPORT_SYMBOL_GPL(dccp_getsockopt);
594 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
595 size_t len)
597 const struct dccp_sock *dp = dccp_sk(sk);
598 const int flags = msg->msg_flags;
599 const int noblock = flags & MSG_DONTWAIT;
600 struct sk_buff *skb;
601 int rc, size;
602 long timeo;
604 if (len > dp->dccps_mss_cache)
605 return -EMSGSIZE;
607 lock_sock(sk);
608 timeo = sock_sndtimeo(sk, noblock);
611 * We have to use sk_stream_wait_connect here to set sk_write_pending,
612 * so that the trick in dccp_rcv_request_sent_state_process.
614 /* Wait for a connection to finish. */
615 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
616 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
617 goto out_release;
619 size = sk->sk_prot->max_header + len;
620 release_sock(sk);
621 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
622 lock_sock(sk);
623 if (skb == NULL)
624 goto out_release;
626 skb_reserve(skb, sk->sk_prot->max_header);
627 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
628 if (rc != 0)
629 goto out_discard;
631 rc = dccp_write_xmit(sk, skb, &timeo);
633 * XXX we don't use sk_write_queue, so just discard the packet.
634 * Current plan however is to _use_ sk_write_queue with
635 * an algorith similar to tcp_sendmsg, where the main difference
636 * is that in DCCP we have to respect packet boundaries, so
637 * no coalescing of skbs.
639 * This bug was _quickly_ found & fixed by just looking at an OSTRA
640 * generated callgraph 8) -acme
642 out_release:
643 release_sock(sk);
644 return rc ? : len;
645 out_discard:
646 kfree_skb(skb);
647 goto out_release;
650 EXPORT_SYMBOL_GPL(dccp_sendmsg);
652 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
653 size_t len, int nonblock, int flags, int *addr_len)
655 const struct dccp_hdr *dh;
656 long timeo;
658 lock_sock(sk);
660 if (sk->sk_state == DCCP_LISTEN) {
661 len = -ENOTCONN;
662 goto out;
665 timeo = sock_rcvtimeo(sk, nonblock);
667 do {
668 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
670 if (skb == NULL)
671 goto verify_sock_status;
673 dh = dccp_hdr(skb);
675 if (dh->dccph_type == DCCP_PKT_DATA ||
676 dh->dccph_type == DCCP_PKT_DATAACK)
677 goto found_ok_skb;
679 if (dh->dccph_type == DCCP_PKT_RESET ||
680 dh->dccph_type == DCCP_PKT_CLOSE) {
681 dccp_pr_debug("found fin ok!\n");
682 len = 0;
683 goto found_fin_ok;
685 dccp_pr_debug("packet_type=%s\n",
686 dccp_packet_name(dh->dccph_type));
687 sk_eat_skb(sk, skb);
688 verify_sock_status:
689 if (sock_flag(sk, SOCK_DONE)) {
690 len = 0;
691 break;
694 if (sk->sk_err) {
695 len = sock_error(sk);
696 break;
699 if (sk->sk_shutdown & RCV_SHUTDOWN) {
700 len = 0;
701 break;
704 if (sk->sk_state == DCCP_CLOSED) {
705 if (!sock_flag(sk, SOCK_DONE)) {
706 /* This occurs when user tries to read
707 * from never connected socket.
709 len = -ENOTCONN;
710 break;
712 len = 0;
713 break;
716 if (!timeo) {
717 len = -EAGAIN;
718 break;
721 if (signal_pending(current)) {
722 len = sock_intr_errno(timeo);
723 break;
726 sk_wait_data(sk, &timeo);
727 continue;
728 found_ok_skb:
729 if (len > skb->len)
730 len = skb->len;
731 else if (len < skb->len)
732 msg->msg_flags |= MSG_TRUNC;
734 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
735 /* Exception. Bailout! */
736 len = -EFAULT;
737 break;
739 found_fin_ok:
740 if (!(flags & MSG_PEEK))
741 sk_eat_skb(sk, skb);
742 break;
743 } while (1);
744 out:
745 release_sock(sk);
746 return len;
749 EXPORT_SYMBOL_GPL(dccp_recvmsg);
751 int inet_dccp_listen(struct socket *sock, int backlog)
753 struct sock *sk = sock->sk;
754 unsigned char old_state;
755 int err;
757 lock_sock(sk);
759 err = -EINVAL;
760 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
761 goto out;
763 old_state = sk->sk_state;
764 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
765 goto out;
767 /* Really, if the socket is already in listen state
768 * we can only allow the backlog to be adjusted.
770 if (old_state != DCCP_LISTEN) {
772 * FIXME: here it probably should be sk->sk_prot->listen_start
773 * see tcp_listen_start
775 err = dccp_listen_start(sk);
776 if (err)
777 goto out;
779 sk->sk_max_ack_backlog = backlog;
780 err = 0;
782 out:
783 release_sock(sk);
784 return err;
787 EXPORT_SYMBOL_GPL(inet_dccp_listen);
789 static const unsigned char dccp_new_state[] = {
790 /* current state: new state: action: */
791 [0] = DCCP_CLOSED,
792 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
793 [DCCP_REQUESTING] = DCCP_CLOSED,
794 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
795 [DCCP_LISTEN] = DCCP_CLOSED,
796 [DCCP_RESPOND] = DCCP_CLOSED,
797 [DCCP_CLOSING] = DCCP_CLOSED,
798 [DCCP_TIME_WAIT] = DCCP_CLOSED,
799 [DCCP_CLOSED] = DCCP_CLOSED,
802 static int dccp_close_state(struct sock *sk)
804 const int next = dccp_new_state[sk->sk_state];
805 const int ns = next & DCCP_STATE_MASK;
807 if (ns != sk->sk_state)
808 dccp_set_state(sk, ns);
810 return next & DCCP_ACTION_FIN;
813 void dccp_close(struct sock *sk, long timeout)
815 struct sk_buff *skb;
817 lock_sock(sk);
819 sk->sk_shutdown = SHUTDOWN_MASK;
821 if (sk->sk_state == DCCP_LISTEN) {
822 dccp_set_state(sk, DCCP_CLOSED);
824 /* Special case. */
825 inet_csk_listen_stop(sk);
827 goto adjudge_to_death;
831 * We need to flush the recv. buffs. We do this only on the
832 * descriptor close, not protocol-sourced closes, because the
833 *reader process may not have drained the data yet!
835 /* FIXME: check for unread data */
836 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
837 __kfree_skb(skb);
840 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
841 /* Check zero linger _after_ checking for unread data. */
842 sk->sk_prot->disconnect(sk, 0);
843 } else if (dccp_close_state(sk)) {
844 dccp_send_close(sk, 1);
847 sk_stream_wait_close(sk, timeout);
849 adjudge_to_death:
851 * It is the last release_sock in its life. It will remove backlog.
853 release_sock(sk);
855 * Now socket is owned by kernel and we acquire BH lock
856 * to finish close. No need to check for user refs.
858 local_bh_disable();
859 bh_lock_sock(sk);
860 BUG_TRAP(!sock_owned_by_user(sk));
862 sock_hold(sk);
863 sock_orphan(sk);
866 * The last release_sock may have processed the CLOSE or RESET
867 * packet moving sock to CLOSED state, if not we have to fire
868 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
869 * in draft-ietf-dccp-spec-11. -acme
871 if (sk->sk_state == DCCP_CLOSING) {
872 /* FIXME: should start at 2 * RTT */
873 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
874 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
875 inet_csk(sk)->icsk_rto,
876 DCCP_RTO_MAX);
877 #if 0
878 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
879 dccp_set_state(sk, DCCP_CLOSED);
880 #endif
883 atomic_inc(sk->sk_prot->orphan_count);
884 if (sk->sk_state == DCCP_CLOSED)
885 inet_csk_destroy_sock(sk);
887 /* Otherwise, socket is reprieved until protocol close. */
889 bh_unlock_sock(sk);
890 local_bh_enable();
891 sock_put(sk);
894 EXPORT_SYMBOL_GPL(dccp_close);
896 void dccp_shutdown(struct sock *sk, int how)
898 dccp_pr_debug("entry\n");
901 EXPORT_SYMBOL_GPL(dccp_shutdown);
903 static int __init dccp_mib_init(void)
905 int rc = -ENOMEM;
907 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
908 if (dccp_statistics[0] == NULL)
909 goto out;
911 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
912 if (dccp_statistics[1] == NULL)
913 goto out_free_one;
915 rc = 0;
916 out:
917 return rc;
918 out_free_one:
919 free_percpu(dccp_statistics[0]);
920 dccp_statistics[0] = NULL;
921 goto out;
925 static void dccp_mib_exit(void)
927 free_percpu(dccp_statistics[0]);
928 free_percpu(dccp_statistics[1]);
929 dccp_statistics[0] = dccp_statistics[1] = NULL;
932 static int thash_entries;
933 module_param(thash_entries, int, 0444);
934 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
936 #ifdef CONFIG_IP_DCCP_DEBUG
937 int dccp_debug;
938 module_param(dccp_debug, int, 0444);
939 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
941 EXPORT_SYMBOL_GPL(dccp_debug);
942 #endif
944 static int __init dccp_init(void)
946 unsigned long goal;
947 int ehash_order, bhash_order, i;
948 int rc = -ENOBUFS;
950 dccp_hashinfo.bind_bucket_cachep =
951 kmem_cache_create("dccp_bind_bucket",
952 sizeof(struct inet_bind_bucket), 0,
953 SLAB_HWCACHE_ALIGN, NULL, NULL);
954 if (!dccp_hashinfo.bind_bucket_cachep)
955 goto out;
958 * Size and allocate the main established and bind bucket
959 * hash tables.
961 * The methodology is similar to that of the buffer cache.
963 if (num_physpages >= (128 * 1024))
964 goal = num_physpages >> (21 - PAGE_SHIFT);
965 else
966 goal = num_physpages >> (23 - PAGE_SHIFT);
968 if (thash_entries)
969 goal = (thash_entries *
970 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
971 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
973 do {
974 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
975 sizeof(struct inet_ehash_bucket);
976 dccp_hashinfo.ehash_size >>= 1;
977 while (dccp_hashinfo.ehash_size &
978 (dccp_hashinfo.ehash_size - 1))
979 dccp_hashinfo.ehash_size--;
980 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
981 __get_free_pages(GFP_ATOMIC, ehash_order);
982 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
984 if (!dccp_hashinfo.ehash) {
985 printk(KERN_CRIT "Failed to allocate DCCP "
986 "established hash table\n");
987 goto out_free_bind_bucket_cachep;
990 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
991 rwlock_init(&dccp_hashinfo.ehash[i].lock);
992 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
995 bhash_order = ehash_order;
997 do {
998 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
999 sizeof(struct inet_bind_hashbucket);
1000 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1001 bhash_order > 0)
1002 continue;
1003 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1004 __get_free_pages(GFP_ATOMIC, bhash_order);
1005 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1007 if (!dccp_hashinfo.bhash) {
1008 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
1009 goto out_free_dccp_ehash;
1012 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1013 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1014 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1017 rc = dccp_mib_init();
1018 if (rc)
1019 goto out_free_dccp_bhash;
1021 rc = dccp_ackvec_init();
1022 if (rc)
1023 goto out_free_dccp_mib;
1025 rc = dccp_sysctl_init();
1026 if (rc)
1027 goto out_ackvec_exit;
1028 out:
1029 return rc;
1030 out_ackvec_exit:
1031 dccp_ackvec_exit();
1032 out_free_dccp_mib:
1033 dccp_mib_exit();
1034 out_free_dccp_bhash:
1035 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1036 dccp_hashinfo.bhash = NULL;
1037 out_free_dccp_ehash:
1038 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1039 dccp_hashinfo.ehash = NULL;
1040 out_free_bind_bucket_cachep:
1041 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1042 dccp_hashinfo.bind_bucket_cachep = NULL;
1043 goto out;
1046 static void __exit dccp_fini(void)
1048 dccp_mib_exit();
1049 free_pages((unsigned long)dccp_hashinfo.bhash,
1050 get_order(dccp_hashinfo.bhash_size *
1051 sizeof(struct inet_bind_hashbucket)));
1052 free_pages((unsigned long)dccp_hashinfo.ehash,
1053 get_order(dccp_hashinfo.ehash_size *
1054 sizeof(struct inet_ehash_bucket)));
1055 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1056 dccp_ackvec_exit();
1057 dccp_sysctl_exit();
1060 module_init(dccp_init);
1061 module_exit(dccp_fini);
1063 MODULE_LICENSE("GPL");
1064 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1065 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");