rtl8187: Fix transmission count sent to mac80211
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / dccp / proto.c
blobea85c423cdbdb6e672cea8d8935709faacae6ef9
1 /*
2 * net/dccp/proto.c
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
19 #include <linux/in.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <net/checksum.h>
25 #include <net/inet_sock.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
29 #include <asm/ioctls.h>
30 #include <linux/spinlock.h>
31 #include <linux/timer.h>
32 #include <linux/delay.h>
33 #include <linux/poll.h>
35 #include "ccid.h"
36 #include "dccp.h"
37 #include "feat.h"
39 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
41 EXPORT_SYMBOL_GPL(dccp_statistics);
43 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45 EXPORT_SYMBOL_GPL(dccp_orphan_count);
47 struct inet_hashinfo dccp_hashinfo;
48 EXPORT_SYMBOL_GPL(dccp_hashinfo);
50 /* the maximum queue length for tx in packets. 0 is no limit */
51 int sysctl_dccp_tx_qlen __read_mostly = 5;
53 void dccp_set_state(struct sock *sk, const int state)
55 const int oldstate = sk->sk_state;
57 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
58 dccp_state_name(oldstate), dccp_state_name(state));
59 WARN_ON(state == oldstate);
61 switch (state) {
62 case DCCP_OPEN:
63 if (oldstate != DCCP_OPEN)
64 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
65 break;
67 case DCCP_CLOSED:
68 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
69 oldstate == DCCP_CLOSING)
70 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
72 sk->sk_prot->unhash(sk);
73 if (inet_csk(sk)->icsk_bind_hash != NULL &&
74 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
75 inet_put_port(sk);
76 /* fall through */
77 default:
78 if (oldstate == DCCP_OPEN)
79 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
82 /* Change state AFTER socket is unhashed to avoid closed
83 * socket sitting in hash tables.
85 sk->sk_state = state;
88 EXPORT_SYMBOL_GPL(dccp_set_state);
90 static void dccp_finish_passive_close(struct sock *sk)
92 switch (sk->sk_state) {
93 case DCCP_PASSIVE_CLOSE:
94 /* Node (client or server) has received Close packet. */
95 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
96 dccp_set_state(sk, DCCP_CLOSED);
97 break;
98 case DCCP_PASSIVE_CLOSEREQ:
100 * Client received CloseReq. We set the `active' flag so that
101 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
103 dccp_send_close(sk, 1);
104 dccp_set_state(sk, DCCP_CLOSING);
108 void dccp_done(struct sock *sk)
110 dccp_set_state(sk, DCCP_CLOSED);
111 dccp_clear_xmit_timers(sk);
113 sk->sk_shutdown = SHUTDOWN_MASK;
115 if (!sock_flag(sk, SOCK_DEAD))
116 sk->sk_state_change(sk);
117 else
118 inet_csk_destroy_sock(sk);
121 EXPORT_SYMBOL_GPL(dccp_done);
123 const char *dccp_packet_name(const int type)
125 static const char *dccp_packet_names[] = {
126 [DCCP_PKT_REQUEST] = "REQUEST",
127 [DCCP_PKT_RESPONSE] = "RESPONSE",
128 [DCCP_PKT_DATA] = "DATA",
129 [DCCP_PKT_ACK] = "ACK",
130 [DCCP_PKT_DATAACK] = "DATAACK",
131 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
132 [DCCP_PKT_CLOSE] = "CLOSE",
133 [DCCP_PKT_RESET] = "RESET",
134 [DCCP_PKT_SYNC] = "SYNC",
135 [DCCP_PKT_SYNCACK] = "SYNCACK",
138 if (type >= DCCP_NR_PKT_TYPES)
139 return "INVALID";
140 else
141 return dccp_packet_names[type];
144 EXPORT_SYMBOL_GPL(dccp_packet_name);
146 const char *dccp_state_name(const int state)
148 static char *dccp_state_names[] = {
149 [DCCP_OPEN] = "OPEN",
150 [DCCP_REQUESTING] = "REQUESTING",
151 [DCCP_PARTOPEN] = "PARTOPEN",
152 [DCCP_LISTEN] = "LISTEN",
153 [DCCP_RESPOND] = "RESPOND",
154 [DCCP_CLOSING] = "CLOSING",
155 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
156 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
157 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
158 [DCCP_TIME_WAIT] = "TIME_WAIT",
159 [DCCP_CLOSED] = "CLOSED",
162 if (state >= DCCP_MAX_STATES)
163 return "INVALID STATE!";
164 else
165 return dccp_state_names[state];
168 EXPORT_SYMBOL_GPL(dccp_state_name);
170 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
172 struct dccp_sock *dp = dccp_sk(sk);
173 struct dccp_minisock *dmsk = dccp_msk(sk);
174 struct inet_connection_sock *icsk = inet_csk(sk);
176 dccp_minisock_init(&dp->dccps_minisock);
178 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
179 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
180 sk->sk_state = DCCP_CLOSED;
181 sk->sk_write_space = dccp_write_space;
182 icsk->icsk_sync_mss = dccp_sync_mss;
183 dp->dccps_mss_cache = 536;
184 dp->dccps_rate_last = jiffies;
185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
189 dccp_init_xmit_timers(sk);
191 INIT_LIST_HEAD(&dp->dccps_featneg);
193 * FIXME: We're hardcoding the CCID, and doing this at this point makes
194 * the listening (master) sock get CCID control blocks, which is not
195 * necessary, but for now, to not mess with the test userspace apps,
196 * lets leave it here, later the real solution is to do this in a
197 * setsockopt(CCIDs-I-want/accept). -acme
199 if (likely(ctl_sock_initialized)) {
200 int rc = dccp_feat_init(sk);
202 if (rc)
203 return rc;
205 if (dmsk->dccpms_send_ack_vector) {
206 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
207 if (dp->dccps_hc_rx_ackvec == NULL)
208 return -ENOMEM;
210 dp->dccps_hc_rx_ccid = ccid_hc_rx_new(dmsk->dccpms_rx_ccid,
211 sk, GFP_KERNEL);
212 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
213 sk, GFP_KERNEL);
214 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
215 dp->dccps_hc_tx_ccid == NULL)) {
216 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
217 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
218 if (dmsk->dccpms_send_ack_vector) {
219 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
220 dp->dccps_hc_rx_ackvec = NULL;
222 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
223 return -ENOMEM;
225 } else {
226 /* control socket doesn't need feat nego */
227 INIT_LIST_HEAD(&dmsk->dccpms_pending);
228 INIT_LIST_HEAD(&dmsk->dccpms_conf);
231 return 0;
234 EXPORT_SYMBOL_GPL(dccp_init_sock);
236 void dccp_destroy_sock(struct sock *sk)
238 struct dccp_sock *dp = dccp_sk(sk);
239 struct dccp_minisock *dmsk = dccp_msk(sk);
242 * DCCP doesn't use sk_write_queue, just sk_send_head
243 * for retransmissions
245 if (sk->sk_send_head != NULL) {
246 kfree_skb(sk->sk_send_head);
247 sk->sk_send_head = NULL;
250 /* Clean up a referenced DCCP bind bucket. */
251 if (inet_csk(sk)->icsk_bind_hash != NULL)
252 inet_put_port(sk);
254 kfree(dp->dccps_service_list);
255 dp->dccps_service_list = NULL;
257 if (dmsk->dccpms_send_ack_vector) {
258 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
259 dp->dccps_hc_rx_ackvec = NULL;
261 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
262 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
263 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
265 /* clean up feature negotiation state */
266 dccp_feat_list_purge(&dp->dccps_featneg);
269 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
271 static inline int dccp_listen_start(struct sock *sk, int backlog)
273 struct dccp_sock *dp = dccp_sk(sk);
275 dp->dccps_role = DCCP_ROLE_LISTEN;
276 /* do not start to listen if feature negotiation setup fails */
277 if (dccp_feat_finalise_settings(dp))
278 return -EPROTO;
279 return inet_csk_listen_start(sk, backlog);
282 static inline int dccp_need_reset(int state)
284 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
285 state != DCCP_REQUESTING;
288 int dccp_disconnect(struct sock *sk, int flags)
290 struct inet_connection_sock *icsk = inet_csk(sk);
291 struct inet_sock *inet = inet_sk(sk);
292 int err = 0;
293 const int old_state = sk->sk_state;
295 if (old_state != DCCP_CLOSED)
296 dccp_set_state(sk, DCCP_CLOSED);
299 * This corresponds to the ABORT function of RFC793, sec. 3.8
300 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
302 if (old_state == DCCP_LISTEN) {
303 inet_csk_listen_stop(sk);
304 } else if (dccp_need_reset(old_state)) {
305 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
306 sk->sk_err = ECONNRESET;
307 } else if (old_state == DCCP_REQUESTING)
308 sk->sk_err = ECONNRESET;
310 dccp_clear_xmit_timers(sk);
312 __skb_queue_purge(&sk->sk_receive_queue);
313 __skb_queue_purge(&sk->sk_write_queue);
314 if (sk->sk_send_head != NULL) {
315 __kfree_skb(sk->sk_send_head);
316 sk->sk_send_head = NULL;
319 inet->dport = 0;
321 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
322 inet_reset_saddr(sk);
324 sk->sk_shutdown = 0;
325 sock_reset_flag(sk, SOCK_DONE);
327 icsk->icsk_backoff = 0;
328 inet_csk_delack_init(sk);
329 __sk_dst_reset(sk);
331 WARN_ON(inet->num && !icsk->icsk_bind_hash);
333 sk->sk_error_report(sk);
334 return err;
337 EXPORT_SYMBOL_GPL(dccp_disconnect);
340 * Wait for a DCCP event.
342 * Note that we don't need to lock the socket, as the upper poll layers
343 * take care of normal races (between the test and the event) and we don't
344 * go look at any of the socket buffers directly.
346 unsigned int dccp_poll(struct file *file, struct socket *sock,
347 poll_table *wait)
349 unsigned int mask;
350 struct sock *sk = sock->sk;
352 poll_wait(file, sk->sk_sleep, wait);
353 if (sk->sk_state == DCCP_LISTEN)
354 return inet_csk_listen_poll(sk);
356 /* Socket is not locked. We are protected from async events
357 by poll logic and correct handling of state changes
358 made by another threads is impossible in any case.
361 mask = 0;
362 if (sk->sk_err)
363 mask = POLLERR;
365 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
366 mask |= POLLHUP;
367 if (sk->sk_shutdown & RCV_SHUTDOWN)
368 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
370 /* Connected? */
371 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
372 if (atomic_read(&sk->sk_rmem_alloc) > 0)
373 mask |= POLLIN | POLLRDNORM;
375 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
376 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
377 mask |= POLLOUT | POLLWRNORM;
378 } else { /* send SIGIO later */
379 set_bit(SOCK_ASYNC_NOSPACE,
380 &sk->sk_socket->flags);
381 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
383 /* Race breaker. If space is freed after
384 * wspace test but before the flags are set,
385 * IO signal will be lost.
387 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
388 mask |= POLLOUT | POLLWRNORM;
392 return mask;
395 EXPORT_SYMBOL_GPL(dccp_poll);
397 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
399 int rc = -ENOTCONN;
401 lock_sock(sk);
403 if (sk->sk_state == DCCP_LISTEN)
404 goto out;
406 switch (cmd) {
407 case SIOCINQ: {
408 struct sk_buff *skb;
409 unsigned long amount = 0;
411 skb = skb_peek(&sk->sk_receive_queue);
412 if (skb != NULL) {
414 * We will only return the amount of this packet since
415 * that is all that will be read.
417 amount = skb->len;
419 rc = put_user(amount, (int __user *)arg);
421 break;
422 default:
423 rc = -ENOIOCTLCMD;
424 break;
426 out:
427 release_sock(sk);
428 return rc;
431 EXPORT_SYMBOL_GPL(dccp_ioctl);
433 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
434 char __user *optval, int optlen)
436 struct dccp_sock *dp = dccp_sk(sk);
437 struct dccp_service_list *sl = NULL;
439 if (service == DCCP_SERVICE_INVALID_VALUE ||
440 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
441 return -EINVAL;
443 if (optlen > sizeof(service)) {
444 sl = kmalloc(optlen, GFP_KERNEL);
445 if (sl == NULL)
446 return -ENOMEM;
448 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
449 if (copy_from_user(sl->dccpsl_list,
450 optval + sizeof(service),
451 optlen - sizeof(service)) ||
452 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
453 kfree(sl);
454 return -EFAULT;
458 lock_sock(sk);
459 dp->dccps_service = service;
461 kfree(dp->dccps_service_list);
463 dp->dccps_service_list = sl;
464 release_sock(sk);
465 return 0;
468 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
470 u8 *list, len;
471 int i, rc;
473 if (cscov < 0 || cscov > 15)
474 return -EINVAL;
476 * Populate a list of permissible values, in the range cscov...15. This
477 * is necessary since feature negotiation of single values only works if
478 * both sides incidentally choose the same value. Since the list starts
479 * lowest-value first, negotiation will pick the smallest shared value.
481 if (cscov == 0)
482 return 0;
483 len = 16 - cscov;
485 list = kmalloc(len, GFP_KERNEL);
486 if (list == NULL)
487 return -ENOBUFS;
489 for (i = 0; i < len; i++)
490 list[i] = cscov++;
492 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
494 if (rc == 0) {
495 if (rx)
496 dccp_sk(sk)->dccps_pcrlen = cscov;
497 else
498 dccp_sk(sk)->dccps_pcslen = cscov;
500 kfree(list);
501 return rc;
504 static int dccp_setsockopt_ccid(struct sock *sk, int type,
505 char __user *optval, int optlen)
507 u8 *val;
508 int rc = 0;
510 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
511 return -EINVAL;
513 val = kmalloc(optlen, GFP_KERNEL);
514 if (val == NULL)
515 return -ENOMEM;
517 if (copy_from_user(val, optval, optlen)) {
518 kfree(val);
519 return -EFAULT;
522 lock_sock(sk);
523 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
524 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
526 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
527 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
528 release_sock(sk);
530 kfree(val);
531 return rc;
534 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
535 char __user *optval, int optlen)
537 struct dccp_sock *dp = dccp_sk(sk);
538 int val, err = 0;
540 switch (optname) {
541 case DCCP_SOCKOPT_PACKET_SIZE:
542 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
543 return 0;
544 case DCCP_SOCKOPT_CHANGE_L:
545 case DCCP_SOCKOPT_CHANGE_R:
546 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
547 return 0;
548 case DCCP_SOCKOPT_CCID:
549 case DCCP_SOCKOPT_RX_CCID:
550 case DCCP_SOCKOPT_TX_CCID:
551 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
554 if (optlen < (int)sizeof(int))
555 return -EINVAL;
557 if (get_user(val, (int __user *)optval))
558 return -EFAULT;
560 if (optname == DCCP_SOCKOPT_SERVICE)
561 return dccp_setsockopt_service(sk, val, optval, optlen);
563 lock_sock(sk);
564 switch (optname) {
565 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
566 if (dp->dccps_role != DCCP_ROLE_SERVER)
567 err = -EOPNOTSUPP;
568 else
569 dp->dccps_server_timewait = (val != 0);
570 break;
571 case DCCP_SOCKOPT_SEND_CSCOV:
572 err = dccp_setsockopt_cscov(sk, val, false);
573 break;
574 case DCCP_SOCKOPT_RECV_CSCOV:
575 err = dccp_setsockopt_cscov(sk, val, true);
576 break;
577 default:
578 err = -ENOPROTOOPT;
579 break;
581 release_sock(sk);
583 return err;
586 int dccp_setsockopt(struct sock *sk, int level, int optname,
587 char __user *optval, int optlen)
589 if (level != SOL_DCCP)
590 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
591 optname, optval,
592 optlen);
593 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
596 EXPORT_SYMBOL_GPL(dccp_setsockopt);
598 #ifdef CONFIG_COMPAT
599 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
600 char __user *optval, int optlen)
602 if (level != SOL_DCCP)
603 return inet_csk_compat_setsockopt(sk, level, optname,
604 optval, optlen);
605 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
608 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
609 #endif
611 static int dccp_getsockopt_service(struct sock *sk, int len,
612 __be32 __user *optval,
613 int __user *optlen)
615 const struct dccp_sock *dp = dccp_sk(sk);
616 const struct dccp_service_list *sl;
617 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
619 lock_sock(sk);
620 if ((sl = dp->dccps_service_list) != NULL) {
621 slen = sl->dccpsl_nr * sizeof(u32);
622 total_len += slen;
625 err = -EINVAL;
626 if (total_len > len)
627 goto out;
629 err = 0;
630 if (put_user(total_len, optlen) ||
631 put_user(dp->dccps_service, optval) ||
632 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
633 err = -EFAULT;
634 out:
635 release_sock(sk);
636 return err;
639 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
640 char __user *optval, int __user *optlen)
642 struct dccp_sock *dp;
643 int val, len;
645 if (get_user(len, optlen))
646 return -EFAULT;
648 if (len < (int)sizeof(int))
649 return -EINVAL;
651 dp = dccp_sk(sk);
653 switch (optname) {
654 case DCCP_SOCKOPT_PACKET_SIZE:
655 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
656 return 0;
657 case DCCP_SOCKOPT_SERVICE:
658 return dccp_getsockopt_service(sk, len,
659 (__be32 __user *)optval, optlen);
660 case DCCP_SOCKOPT_GET_CUR_MPS:
661 val = dp->dccps_mss_cache;
662 break;
663 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
664 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
665 case DCCP_SOCKOPT_TX_CCID:
666 val = ccid_get_current_tx_ccid(dp);
667 if (val < 0)
668 return -ENOPROTOOPT;
669 break;
670 case DCCP_SOCKOPT_RX_CCID:
671 val = ccid_get_current_rx_ccid(dp);
672 if (val < 0)
673 return -ENOPROTOOPT;
674 break;
675 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
676 val = dp->dccps_server_timewait;
677 break;
678 case DCCP_SOCKOPT_SEND_CSCOV:
679 val = dp->dccps_pcslen;
680 break;
681 case DCCP_SOCKOPT_RECV_CSCOV:
682 val = dp->dccps_pcrlen;
683 break;
684 case 128 ... 191:
685 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
686 len, (u32 __user *)optval, optlen);
687 case 192 ... 255:
688 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
689 len, (u32 __user *)optval, optlen);
690 default:
691 return -ENOPROTOOPT;
694 len = sizeof(val);
695 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
696 return -EFAULT;
698 return 0;
701 int dccp_getsockopt(struct sock *sk, int level, int optname,
702 char __user *optval, int __user *optlen)
704 if (level != SOL_DCCP)
705 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
706 optname, optval,
707 optlen);
708 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
711 EXPORT_SYMBOL_GPL(dccp_getsockopt);
713 #ifdef CONFIG_COMPAT
714 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
715 char __user *optval, int __user *optlen)
717 if (level != SOL_DCCP)
718 return inet_csk_compat_getsockopt(sk, level, optname,
719 optval, optlen);
720 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
723 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
724 #endif
726 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
727 size_t len)
729 const struct dccp_sock *dp = dccp_sk(sk);
730 const int flags = msg->msg_flags;
731 const int noblock = flags & MSG_DONTWAIT;
732 struct sk_buff *skb;
733 int rc, size;
734 long timeo;
736 if (len > dp->dccps_mss_cache)
737 return -EMSGSIZE;
739 lock_sock(sk);
741 if (sysctl_dccp_tx_qlen &&
742 (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
743 rc = -EAGAIN;
744 goto out_release;
747 timeo = sock_sndtimeo(sk, noblock);
750 * We have to use sk_stream_wait_connect here to set sk_write_pending,
751 * so that the trick in dccp_rcv_request_sent_state_process.
753 /* Wait for a connection to finish. */
754 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
755 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
756 goto out_release;
758 size = sk->sk_prot->max_header + len;
759 release_sock(sk);
760 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
761 lock_sock(sk);
762 if (skb == NULL)
763 goto out_release;
765 skb_reserve(skb, sk->sk_prot->max_header);
766 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
767 if (rc != 0)
768 goto out_discard;
770 skb_queue_tail(&sk->sk_write_queue, skb);
771 dccp_write_xmit(sk,0);
772 out_release:
773 release_sock(sk);
774 return rc ? : len;
775 out_discard:
776 kfree_skb(skb);
777 goto out_release;
780 EXPORT_SYMBOL_GPL(dccp_sendmsg);
782 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
783 size_t len, int nonblock, int flags, int *addr_len)
785 const struct dccp_hdr *dh;
786 long timeo;
788 lock_sock(sk);
790 if (sk->sk_state == DCCP_LISTEN) {
791 len = -ENOTCONN;
792 goto out;
795 timeo = sock_rcvtimeo(sk, nonblock);
797 do {
798 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
800 if (skb == NULL)
801 goto verify_sock_status;
803 dh = dccp_hdr(skb);
805 switch (dh->dccph_type) {
806 case DCCP_PKT_DATA:
807 case DCCP_PKT_DATAACK:
808 goto found_ok_skb;
810 case DCCP_PKT_CLOSE:
811 case DCCP_PKT_CLOSEREQ:
812 if (!(flags & MSG_PEEK))
813 dccp_finish_passive_close(sk);
814 /* fall through */
815 case DCCP_PKT_RESET:
816 dccp_pr_debug("found fin (%s) ok!\n",
817 dccp_packet_name(dh->dccph_type));
818 len = 0;
819 goto found_fin_ok;
820 default:
821 dccp_pr_debug("packet_type=%s\n",
822 dccp_packet_name(dh->dccph_type));
823 sk_eat_skb(sk, skb, 0);
825 verify_sock_status:
826 if (sock_flag(sk, SOCK_DONE)) {
827 len = 0;
828 break;
831 if (sk->sk_err) {
832 len = sock_error(sk);
833 break;
836 if (sk->sk_shutdown & RCV_SHUTDOWN) {
837 len = 0;
838 break;
841 if (sk->sk_state == DCCP_CLOSED) {
842 if (!sock_flag(sk, SOCK_DONE)) {
843 /* This occurs when user tries to read
844 * from never connected socket.
846 len = -ENOTCONN;
847 break;
849 len = 0;
850 break;
853 if (!timeo) {
854 len = -EAGAIN;
855 break;
858 if (signal_pending(current)) {
859 len = sock_intr_errno(timeo);
860 break;
863 sk_wait_data(sk, &timeo);
864 continue;
865 found_ok_skb:
866 if (len > skb->len)
867 len = skb->len;
868 else if (len < skb->len)
869 msg->msg_flags |= MSG_TRUNC;
871 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
872 /* Exception. Bailout! */
873 len = -EFAULT;
874 break;
876 found_fin_ok:
877 if (!(flags & MSG_PEEK))
878 sk_eat_skb(sk, skb, 0);
879 break;
880 } while (1);
881 out:
882 release_sock(sk);
883 return len;
886 EXPORT_SYMBOL_GPL(dccp_recvmsg);
888 int inet_dccp_listen(struct socket *sock, int backlog)
890 struct sock *sk = sock->sk;
891 unsigned char old_state;
892 int err;
894 lock_sock(sk);
896 err = -EINVAL;
897 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
898 goto out;
900 old_state = sk->sk_state;
901 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
902 goto out;
904 /* Really, if the socket is already in listen state
905 * we can only allow the backlog to be adjusted.
907 if (old_state != DCCP_LISTEN) {
909 * FIXME: here it probably should be sk->sk_prot->listen_start
910 * see tcp_listen_start
912 err = dccp_listen_start(sk, backlog);
913 if (err)
914 goto out;
916 sk->sk_max_ack_backlog = backlog;
917 err = 0;
919 out:
920 release_sock(sk);
921 return err;
924 EXPORT_SYMBOL_GPL(inet_dccp_listen);
926 static void dccp_terminate_connection(struct sock *sk)
928 u8 next_state = DCCP_CLOSED;
930 switch (sk->sk_state) {
931 case DCCP_PASSIVE_CLOSE:
932 case DCCP_PASSIVE_CLOSEREQ:
933 dccp_finish_passive_close(sk);
934 break;
935 case DCCP_PARTOPEN:
936 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
937 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
938 /* fall through */
939 case DCCP_OPEN:
940 dccp_send_close(sk, 1);
942 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
943 !dccp_sk(sk)->dccps_server_timewait)
944 next_state = DCCP_ACTIVE_CLOSEREQ;
945 else
946 next_state = DCCP_CLOSING;
947 /* fall through */
948 default:
949 dccp_set_state(sk, next_state);
953 void dccp_close(struct sock *sk, long timeout)
955 struct dccp_sock *dp = dccp_sk(sk);
956 struct sk_buff *skb;
957 u32 data_was_unread = 0;
958 int state;
960 lock_sock(sk);
962 sk->sk_shutdown = SHUTDOWN_MASK;
964 if (sk->sk_state == DCCP_LISTEN) {
965 dccp_set_state(sk, DCCP_CLOSED);
967 /* Special case. */
968 inet_csk_listen_stop(sk);
970 goto adjudge_to_death;
973 sk_stop_timer(sk, &dp->dccps_xmit_timer);
976 * We need to flush the recv. buffs. We do this only on the
977 * descriptor close, not protocol-sourced closes, because the
978 *reader process may not have drained the data yet!
980 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
981 data_was_unread += skb->len;
982 __kfree_skb(skb);
985 if (data_was_unread) {
986 /* Unread data was tossed, send an appropriate Reset Code */
987 DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread);
988 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
989 dccp_set_state(sk, DCCP_CLOSED);
990 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
991 /* Check zero linger _after_ checking for unread data. */
992 sk->sk_prot->disconnect(sk, 0);
993 } else if (sk->sk_state != DCCP_CLOSED) {
994 dccp_terminate_connection(sk);
997 sk_stream_wait_close(sk, timeout);
999 adjudge_to_death:
1000 state = sk->sk_state;
1001 sock_hold(sk);
1002 sock_orphan(sk);
1003 atomic_inc(sk->sk_prot->orphan_count);
1006 * It is the last release_sock in its life. It will remove backlog.
1008 release_sock(sk);
1010 * Now socket is owned by kernel and we acquire BH lock
1011 * to finish close. No need to check for user refs.
1013 local_bh_disable();
1014 bh_lock_sock(sk);
1015 WARN_ON(sock_owned_by_user(sk));
1017 /* Have we already been destroyed by a softirq or backlog? */
1018 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1019 goto out;
1021 if (sk->sk_state == DCCP_CLOSED)
1022 inet_csk_destroy_sock(sk);
1024 /* Otherwise, socket is reprieved until protocol close. */
1026 out:
1027 bh_unlock_sock(sk);
1028 local_bh_enable();
1029 sock_put(sk);
1032 EXPORT_SYMBOL_GPL(dccp_close);
1034 void dccp_shutdown(struct sock *sk, int how)
1036 dccp_pr_debug("called shutdown(%x)\n", how);
1039 EXPORT_SYMBOL_GPL(dccp_shutdown);
1041 static inline int dccp_mib_init(void)
1043 return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib));
1046 static inline void dccp_mib_exit(void)
1048 snmp_mib_free((void**)dccp_statistics);
1051 static int thash_entries;
1052 module_param(thash_entries, int, 0444);
1053 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1055 #ifdef CONFIG_IP_DCCP_DEBUG
1056 int dccp_debug;
1057 module_param(dccp_debug, bool, 0644);
1058 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1060 EXPORT_SYMBOL_GPL(dccp_debug);
1061 #endif
1063 static int __init dccp_init(void)
1065 unsigned long goal;
1066 int ehash_order, bhash_order, i;
1067 int rc = -ENOBUFS;
1069 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1070 FIELD_SIZEOF(struct sk_buff, cb));
1072 inet_hashinfo_init(&dccp_hashinfo);
1073 dccp_hashinfo.bind_bucket_cachep =
1074 kmem_cache_create("dccp_bind_bucket",
1075 sizeof(struct inet_bind_bucket), 0,
1076 SLAB_HWCACHE_ALIGN, NULL);
1077 if (!dccp_hashinfo.bind_bucket_cachep)
1078 goto out;
1081 * Size and allocate the main established and bind bucket
1082 * hash tables.
1084 * The methodology is similar to that of the buffer cache.
1086 if (num_physpages >= (128 * 1024))
1087 goal = num_physpages >> (21 - PAGE_SHIFT);
1088 else
1089 goal = num_physpages >> (23 - PAGE_SHIFT);
1091 if (thash_entries)
1092 goal = (thash_entries *
1093 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1094 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1096 do {
1097 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1098 sizeof(struct inet_ehash_bucket);
1099 while (dccp_hashinfo.ehash_size &
1100 (dccp_hashinfo.ehash_size - 1))
1101 dccp_hashinfo.ehash_size--;
1102 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1103 __get_free_pages(GFP_ATOMIC, ehash_order);
1104 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1106 if (!dccp_hashinfo.ehash) {
1107 DCCP_CRIT("Failed to allocate DCCP established hash table");
1108 goto out_free_bind_bucket_cachep;
1111 for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
1112 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1113 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
1116 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1117 goto out_free_dccp_ehash;
1119 bhash_order = ehash_order;
1121 do {
1122 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1123 sizeof(struct inet_bind_hashbucket);
1124 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1125 bhash_order > 0)
1126 continue;
1127 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1128 __get_free_pages(GFP_ATOMIC, bhash_order);
1129 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1131 if (!dccp_hashinfo.bhash) {
1132 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1133 goto out_free_dccp_locks;
1136 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1137 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1138 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1141 rc = dccp_mib_init();
1142 if (rc)
1143 goto out_free_dccp_bhash;
1145 rc = dccp_ackvec_init();
1146 if (rc)
1147 goto out_free_dccp_mib;
1149 rc = dccp_sysctl_init();
1150 if (rc)
1151 goto out_ackvec_exit;
1153 dccp_timestamping_init();
1154 out:
1155 return rc;
1156 out_ackvec_exit:
1157 dccp_ackvec_exit();
1158 out_free_dccp_mib:
1159 dccp_mib_exit();
1160 out_free_dccp_bhash:
1161 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1162 dccp_hashinfo.bhash = NULL;
1163 out_free_dccp_locks:
1164 inet_ehash_locks_free(&dccp_hashinfo);
1165 out_free_dccp_ehash:
1166 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1167 dccp_hashinfo.ehash = NULL;
1168 out_free_bind_bucket_cachep:
1169 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1170 dccp_hashinfo.bind_bucket_cachep = NULL;
1171 goto out;
1174 static void __exit dccp_fini(void)
1176 dccp_mib_exit();
1177 free_pages((unsigned long)dccp_hashinfo.bhash,
1178 get_order(dccp_hashinfo.bhash_size *
1179 sizeof(struct inet_bind_hashbucket)));
1180 free_pages((unsigned long)dccp_hashinfo.ehash,
1181 get_order(dccp_hashinfo.ehash_size *
1182 sizeof(struct inet_ehash_bucket)));
1183 inet_ehash_locks_free(&dccp_hashinfo);
1184 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1185 dccp_ackvec_exit();
1186 dccp_sysctl_exit();
1189 module_init(dccp_init);
1190 module_exit(dccp_fini);
1192 MODULE_LICENSE("GPL");
1193 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1194 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");