4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <net/checksum.h>
25 #include <net/inet_sock.h>
29 #include <asm/semaphore.h>
30 #include <linux/spinlock.h>
31 #include <linux/timer.h>
32 #include <linux/delay.h>
33 #include <linux/poll.h>
39 DEFINE_SNMP_STAT(struct dccp_mib
, dccp_statistics
) __read_mostly
;
41 EXPORT_SYMBOL_GPL(dccp_statistics
);
43 atomic_t dccp_orphan_count
= ATOMIC_INIT(0);
45 EXPORT_SYMBOL_GPL(dccp_orphan_count
);
47 struct inet_hashinfo __cacheline_aligned dccp_hashinfo
= {
48 .lhash_lock
= RW_LOCK_UNLOCKED
,
49 .lhash_users
= ATOMIC_INIT(0),
50 .lhash_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo
.lhash_wait
),
53 EXPORT_SYMBOL_GPL(dccp_hashinfo
);
55 void dccp_set_state(struct sock
*sk
, const int state
)
57 const int oldstate
= sk
->sk_state
;
59 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
61 dccp_state_name(oldstate
), dccp_state_name(state
));
62 WARN_ON(state
== oldstate
);
66 if (oldstate
!= DCCP_OPEN
)
67 DCCP_INC_STATS(DCCP_MIB_CURRESTAB
);
71 if (oldstate
== DCCP_CLOSING
|| oldstate
== DCCP_OPEN
)
72 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS
);
74 sk
->sk_prot
->unhash(sk
);
75 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
&&
76 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
77 inet_put_port(&dccp_hashinfo
, sk
);
80 if (oldstate
== DCCP_OPEN
)
81 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB
);
84 /* Change state AFTER socket is unhashed to avoid closed
85 * socket sitting in hash tables.
90 EXPORT_SYMBOL_GPL(dccp_set_state
);
92 void dccp_done(struct sock
*sk
)
94 dccp_set_state(sk
, DCCP_CLOSED
);
95 dccp_clear_xmit_timers(sk
);
97 sk
->sk_shutdown
= SHUTDOWN_MASK
;
99 if (!sock_flag(sk
, SOCK_DEAD
))
100 sk
->sk_state_change(sk
);
102 inet_csk_destroy_sock(sk
);
105 EXPORT_SYMBOL_GPL(dccp_done
);
107 const char *dccp_packet_name(const int type
)
109 static const char *dccp_packet_names
[] = {
110 [DCCP_PKT_REQUEST
] = "REQUEST",
111 [DCCP_PKT_RESPONSE
] = "RESPONSE",
112 [DCCP_PKT_DATA
] = "DATA",
113 [DCCP_PKT_ACK
] = "ACK",
114 [DCCP_PKT_DATAACK
] = "DATAACK",
115 [DCCP_PKT_CLOSEREQ
] = "CLOSEREQ",
116 [DCCP_PKT_CLOSE
] = "CLOSE",
117 [DCCP_PKT_RESET
] = "RESET",
118 [DCCP_PKT_SYNC
] = "SYNC",
119 [DCCP_PKT_SYNCACK
] = "SYNCACK",
122 if (type
>= DCCP_NR_PKT_TYPES
)
125 return dccp_packet_names
[type
];
128 EXPORT_SYMBOL_GPL(dccp_packet_name
);
130 const char *dccp_state_name(const int state
)
132 static char *dccp_state_names
[] = {
133 [DCCP_OPEN
] = "OPEN",
134 [DCCP_REQUESTING
] = "REQUESTING",
135 [DCCP_PARTOPEN
] = "PARTOPEN",
136 [DCCP_LISTEN
] = "LISTEN",
137 [DCCP_RESPOND
] = "RESPOND",
138 [DCCP_CLOSING
] = "CLOSING",
139 [DCCP_TIME_WAIT
] = "TIME_WAIT",
140 [DCCP_CLOSED
] = "CLOSED",
143 if (state
>= DCCP_MAX_STATES
)
144 return "INVALID STATE!";
146 return dccp_state_names
[state
];
149 EXPORT_SYMBOL_GPL(dccp_state_name
);
151 void dccp_hash(struct sock
*sk
)
153 inet_hash(&dccp_hashinfo
, sk
);
156 EXPORT_SYMBOL_GPL(dccp_hash
);
158 void dccp_unhash(struct sock
*sk
)
160 inet_unhash(&dccp_hashinfo
, sk
);
163 EXPORT_SYMBOL_GPL(dccp_unhash
);
165 int dccp_init_sock(struct sock
*sk
, const __u8 ctl_sock_initialized
)
167 struct dccp_sock
*dp
= dccp_sk(sk
);
168 struct dccp_minisock
*dmsk
= dccp_msk(sk
);
169 struct inet_connection_sock
*icsk
= inet_csk(sk
);
171 dccp_minisock_init(&dp
->dccps_minisock
);
172 do_gettimeofday(&dp
->dccps_epoch
);
175 * FIXME: We're hardcoding the CCID, and doing this at this point makes
176 * the listening (master) sock get CCID control blocks, which is not
177 * necessary, but for now, to not mess with the test userspace apps,
178 * lets leave it here, later the real solution is to do this in a
179 * setsockopt(CCIDs-I-want/accept). -acme
181 if (likely(ctl_sock_initialized
)) {
182 int rc
= dccp_feat_init(dmsk
);
187 if (dmsk
->dccpms_send_ack_vector
) {
188 dp
->dccps_hc_rx_ackvec
= dccp_ackvec_alloc(GFP_KERNEL
);
189 if (dp
->dccps_hc_rx_ackvec
== NULL
)
192 dp
->dccps_hc_rx_ccid
= ccid_hc_rx_new(dmsk
->dccpms_rx_ccid
,
194 dp
->dccps_hc_tx_ccid
= ccid_hc_tx_new(dmsk
->dccpms_tx_ccid
,
196 if (unlikely(dp
->dccps_hc_rx_ccid
== NULL
||
197 dp
->dccps_hc_tx_ccid
== NULL
)) {
198 ccid_hc_rx_delete(dp
->dccps_hc_rx_ccid
, sk
);
199 ccid_hc_tx_delete(dp
->dccps_hc_tx_ccid
, sk
);
200 if (dmsk
->dccpms_send_ack_vector
) {
201 dccp_ackvec_free(dp
->dccps_hc_rx_ackvec
);
202 dp
->dccps_hc_rx_ackvec
= NULL
;
204 dp
->dccps_hc_rx_ccid
= dp
->dccps_hc_tx_ccid
= NULL
;
208 /* control socket doesn't need feat nego */
209 INIT_LIST_HEAD(&dmsk
->dccpms_pending
);
210 INIT_LIST_HEAD(&dmsk
->dccpms_conf
);
213 dccp_init_xmit_timers(sk
);
214 icsk
->icsk_rto
= DCCP_TIMEOUT_INIT
;
215 icsk
->icsk_syn_retries
= sysctl_dccp_request_retries
;
216 sk
->sk_state
= DCCP_CLOSED
;
217 sk
->sk_write_space
= dccp_write_space
;
218 icsk
->icsk_sync_mss
= dccp_sync_mss
;
219 dp
->dccps_mss_cache
= 536;
220 dp
->dccps_role
= DCCP_ROLE_UNDEFINED
;
221 dp
->dccps_service
= DCCP_SERVICE_CODE_IS_ABSENT
;
222 dp
->dccps_l_ack_ratio
= dp
->dccps_r_ack_ratio
= 1;
227 EXPORT_SYMBOL_GPL(dccp_init_sock
);
229 int dccp_destroy_sock(struct sock
*sk
)
231 struct dccp_sock
*dp
= dccp_sk(sk
);
232 struct dccp_minisock
*dmsk
= dccp_msk(sk
);
235 * DCCP doesn't use sk_write_queue, just sk_send_head
236 * for retransmissions
238 if (sk
->sk_send_head
!= NULL
) {
239 kfree_skb(sk
->sk_send_head
);
240 sk
->sk_send_head
= NULL
;
243 /* Clean up a referenced DCCP bind bucket. */
244 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
)
245 inet_put_port(&dccp_hashinfo
, sk
);
247 kfree(dp
->dccps_service_list
);
248 dp
->dccps_service_list
= NULL
;
250 if (dmsk
->dccpms_send_ack_vector
) {
251 dccp_ackvec_free(dp
->dccps_hc_rx_ackvec
);
252 dp
->dccps_hc_rx_ackvec
= NULL
;
254 ccid_hc_rx_delete(dp
->dccps_hc_rx_ccid
, sk
);
255 ccid_hc_tx_delete(dp
->dccps_hc_tx_ccid
, sk
);
256 dp
->dccps_hc_rx_ccid
= dp
->dccps_hc_tx_ccid
= NULL
;
258 /* clean up feature negotiation state */
259 dccp_feat_clean(dmsk
);
264 EXPORT_SYMBOL_GPL(dccp_destroy_sock
);
266 static inline int dccp_listen_start(struct sock
*sk
, int backlog
)
268 struct dccp_sock
*dp
= dccp_sk(sk
);
270 dp
->dccps_role
= DCCP_ROLE_LISTEN
;
271 return inet_csk_listen_start(sk
, backlog
);
274 int dccp_disconnect(struct sock
*sk
, int flags
)
276 struct inet_connection_sock
*icsk
= inet_csk(sk
);
277 struct inet_sock
*inet
= inet_sk(sk
);
279 const int old_state
= sk
->sk_state
;
281 if (old_state
!= DCCP_CLOSED
)
282 dccp_set_state(sk
, DCCP_CLOSED
);
284 /* ABORT function of RFC793 */
285 if (old_state
== DCCP_LISTEN
) {
286 inet_csk_listen_stop(sk
);
287 /* FIXME: do the active reset thing */
288 } else if (old_state
== DCCP_REQUESTING
)
289 sk
->sk_err
= ECONNRESET
;
291 dccp_clear_xmit_timers(sk
);
292 __skb_queue_purge(&sk
->sk_receive_queue
);
293 if (sk
->sk_send_head
!= NULL
) {
294 __kfree_skb(sk
->sk_send_head
);
295 sk
->sk_send_head
= NULL
;
300 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
301 inet_reset_saddr(sk
);
304 sock_reset_flag(sk
, SOCK_DONE
);
306 icsk
->icsk_backoff
= 0;
307 inet_csk_delack_init(sk
);
310 BUG_TRAP(!inet
->num
|| icsk
->icsk_bind_hash
);
312 sk
->sk_error_report(sk
);
316 EXPORT_SYMBOL_GPL(dccp_disconnect
);
319 * Wait for a DCCP event.
321 * Note that we don't need to lock the socket, as the upper poll layers
322 * take care of normal races (between the test and the event) and we don't
323 * go look at any of the socket buffers directly.
325 unsigned int dccp_poll(struct file
*file
, struct socket
*sock
,
329 struct sock
*sk
= sock
->sk
;
331 poll_wait(file
, sk
->sk_sleep
, wait
);
332 if (sk
->sk_state
== DCCP_LISTEN
)
333 return inet_csk_listen_poll(sk
);
335 /* Socket is not locked. We are protected from async events
336 by poll logic and correct handling of state changes
337 made by another threads is impossible in any case.
344 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== DCCP_CLOSED
)
346 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
347 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
350 if ((1 << sk
->sk_state
) & ~(DCCPF_REQUESTING
| DCCPF_RESPOND
)) {
351 if (atomic_read(&sk
->sk_rmem_alloc
) > 0)
352 mask
|= POLLIN
| POLLRDNORM
;
354 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
355 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
356 mask
|= POLLOUT
| POLLWRNORM
;
357 } else { /* send SIGIO later */
358 set_bit(SOCK_ASYNC_NOSPACE
,
359 &sk
->sk_socket
->flags
);
360 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
362 /* Race breaker. If space is freed after
363 * wspace test but before the flags are set,
364 * IO signal will be lost.
366 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
367 mask
|= POLLOUT
| POLLWRNORM
;
374 EXPORT_SYMBOL_GPL(dccp_poll
);
376 int dccp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
378 dccp_pr_debug("entry\n");
382 EXPORT_SYMBOL_GPL(dccp_ioctl
);
384 static int dccp_setsockopt_service(struct sock
*sk
, const __be32 service
,
385 char __user
*optval
, int optlen
)
387 struct dccp_sock
*dp
= dccp_sk(sk
);
388 struct dccp_service_list
*sl
= NULL
;
390 if (service
== DCCP_SERVICE_INVALID_VALUE
||
391 optlen
> DCCP_SERVICE_LIST_MAX_LEN
* sizeof(u32
))
394 if (optlen
> sizeof(service
)) {
395 sl
= kmalloc(optlen
, GFP_KERNEL
);
399 sl
->dccpsl_nr
= optlen
/ sizeof(u32
) - 1;
400 if (copy_from_user(sl
->dccpsl_list
,
401 optval
+ sizeof(service
),
402 optlen
- sizeof(service
)) ||
403 dccp_list_has_service(sl
, DCCP_SERVICE_INVALID_VALUE
)) {
410 dp
->dccps_service
= service
;
412 kfree(dp
->dccps_service_list
);
414 dp
->dccps_service_list
= sl
;
419 /* byte 1 is feature. the rest is the preference list */
420 static int dccp_setsockopt_change(struct sock
*sk
, int type
,
421 struct dccp_so_feat __user
*optval
)
423 struct dccp_so_feat opt
;
427 if (copy_from_user(&opt
, optval
, sizeof(opt
)))
430 val
= kmalloc(opt
.dccpsf_len
, GFP_KERNEL
);
434 if (copy_from_user(val
, opt
.dccpsf_val
, opt
.dccpsf_len
)) {
439 rc
= dccp_feat_change(dccp_msk(sk
), type
, opt
.dccpsf_feat
,
440 val
, opt
.dccpsf_len
, GFP_KERNEL
);
452 static int do_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
453 char __user
*optval
, int optlen
)
455 struct dccp_sock
*dp
;
459 if (optlen
< sizeof(int))
462 if (get_user(val
, (int __user
*)optval
))
465 if (optname
== DCCP_SOCKOPT_SERVICE
)
466 return dccp_setsockopt_service(sk
, val
, optval
, optlen
);
473 case DCCP_SOCKOPT_PACKET_SIZE
:
474 dp
->dccps_packet_size
= val
;
476 case DCCP_SOCKOPT_CHANGE_L
:
477 if (optlen
!= sizeof(struct dccp_so_feat
))
480 err
= dccp_setsockopt_change(sk
, DCCPO_CHANGE_L
,
481 (struct dccp_so_feat __user
*)
484 case DCCP_SOCKOPT_CHANGE_R
:
485 if (optlen
!= sizeof(struct dccp_so_feat
))
488 err
= dccp_setsockopt_change(sk
, DCCPO_CHANGE_R
,
489 (struct dccp_so_feat __user
*)
492 case DCCP_SOCKOPT_SEND_CSCOV
: /* sender side, RFC 4340, sec. 9.2 */
493 if (val
< 0 || val
> 15)
496 dp
->dccps_pcslen
= val
;
498 case DCCP_SOCKOPT_RECV_CSCOV
: /* receiver side, RFC 4340 sec. 9.2.1 */
499 if (val
< 0 || val
> 15)
502 dp
->dccps_pcrlen
= val
;
503 /* FIXME: add feature negotiation,
504 * ChangeL(MinimumChecksumCoverage, val) */
516 int dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
517 char __user
*optval
, int optlen
)
519 if (level
!= SOL_DCCP
)
520 return inet_csk(sk
)->icsk_af_ops
->setsockopt(sk
, level
,
523 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
526 EXPORT_SYMBOL_GPL(dccp_setsockopt
);
529 int compat_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
530 char __user
*optval
, int optlen
)
532 if (level
!= SOL_DCCP
)
533 return inet_csk_compat_setsockopt(sk
, level
, optname
,
535 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
538 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt
);
541 static int dccp_getsockopt_service(struct sock
*sk
, int len
,
542 __be32 __user
*optval
,
545 const struct dccp_sock
*dp
= dccp_sk(sk
);
546 const struct dccp_service_list
*sl
;
547 int err
= -ENOENT
, slen
= 0, total_len
= sizeof(u32
);
550 if ((sl
= dp
->dccps_service_list
) != NULL
) {
551 slen
= sl
->dccpsl_nr
* sizeof(u32
);
560 if (put_user(total_len
, optlen
) ||
561 put_user(dp
->dccps_service
, optval
) ||
562 (sl
!= NULL
&& copy_to_user(optval
+ 1, sl
->dccpsl_list
, slen
)))
569 static int do_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
570 char __user
*optval
, int __user
*optlen
)
572 struct dccp_sock
*dp
;
575 if (get_user(len
, optlen
))
578 if (len
< sizeof(int))
584 case DCCP_SOCKOPT_PACKET_SIZE
:
585 val
= dp
->dccps_packet_size
;
586 len
= sizeof(dp
->dccps_packet_size
);
588 case DCCP_SOCKOPT_SERVICE
:
589 return dccp_getsockopt_service(sk
, len
,
590 (__be32 __user
*)optval
, optlen
);
591 case DCCP_SOCKOPT_SEND_CSCOV
:
592 val
= dp
->dccps_pcslen
;
594 case DCCP_SOCKOPT_RECV_CSCOV
:
595 val
= dp
->dccps_pcrlen
;
598 return ccid_hc_rx_getsockopt(dp
->dccps_hc_rx_ccid
, sk
, optname
,
599 len
, (u32 __user
*)optval
, optlen
);
601 return ccid_hc_tx_getsockopt(dp
->dccps_hc_tx_ccid
, sk
, optname
,
602 len
, (u32 __user
*)optval
, optlen
);
607 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
613 int dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
614 char __user
*optval
, int __user
*optlen
)
616 if (level
!= SOL_DCCP
)
617 return inet_csk(sk
)->icsk_af_ops
->getsockopt(sk
, level
,
620 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
623 EXPORT_SYMBOL_GPL(dccp_getsockopt
);
626 int compat_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
627 char __user
*optval
, int __user
*optlen
)
629 if (level
!= SOL_DCCP
)
630 return inet_csk_compat_getsockopt(sk
, level
, optname
,
632 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
635 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt
);
638 int dccp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
641 const struct dccp_sock
*dp
= dccp_sk(sk
);
642 const int flags
= msg
->msg_flags
;
643 const int noblock
= flags
& MSG_DONTWAIT
;
648 if (len
> dp
->dccps_mss_cache
)
652 timeo
= sock_sndtimeo(sk
, noblock
);
655 * We have to use sk_stream_wait_connect here to set sk_write_pending,
656 * so that the trick in dccp_rcv_request_sent_state_process.
658 /* Wait for a connection to finish. */
659 if ((1 << sk
->sk_state
) & ~(DCCPF_OPEN
| DCCPF_PARTOPEN
| DCCPF_CLOSING
))
660 if ((rc
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
663 size
= sk
->sk_prot
->max_header
+ len
;
665 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
670 skb_reserve(skb
, sk
->sk_prot
->max_header
);
671 rc
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
675 skb_queue_tail(&sk
->sk_write_queue
, skb
);
676 dccp_write_xmit(sk
,0);
685 EXPORT_SYMBOL_GPL(dccp_sendmsg
);
687 int dccp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
688 size_t len
, int nonblock
, int flags
, int *addr_len
)
690 const struct dccp_hdr
*dh
;
695 if (sk
->sk_state
== DCCP_LISTEN
) {
700 timeo
= sock_rcvtimeo(sk
, nonblock
);
703 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
706 goto verify_sock_status
;
710 if (dh
->dccph_type
== DCCP_PKT_DATA
||
711 dh
->dccph_type
== DCCP_PKT_DATAACK
)
714 if (dh
->dccph_type
== DCCP_PKT_RESET
||
715 dh
->dccph_type
== DCCP_PKT_CLOSE
) {
716 dccp_pr_debug("found fin ok!\n");
720 dccp_pr_debug("packet_type=%s\n",
721 dccp_packet_name(dh
->dccph_type
));
722 sk_eat_skb(sk
, skb
, 0);
724 if (sock_flag(sk
, SOCK_DONE
)) {
730 len
= sock_error(sk
);
734 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
739 if (sk
->sk_state
== DCCP_CLOSED
) {
740 if (!sock_flag(sk
, SOCK_DONE
)) {
741 /* This occurs when user tries to read
742 * from never connected socket.
756 if (signal_pending(current
)) {
757 len
= sock_intr_errno(timeo
);
761 sk_wait_data(sk
, &timeo
);
766 else if (len
< skb
->len
)
767 msg
->msg_flags
|= MSG_TRUNC
;
769 if (skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, len
)) {
770 /* Exception. Bailout! */
775 if (!(flags
& MSG_PEEK
))
776 sk_eat_skb(sk
, skb
, 0);
784 EXPORT_SYMBOL_GPL(dccp_recvmsg
);
786 int inet_dccp_listen(struct socket
*sock
, int backlog
)
788 struct sock
*sk
= sock
->sk
;
789 unsigned char old_state
;
795 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_DCCP
)
798 old_state
= sk
->sk_state
;
799 if (!((1 << old_state
) & (DCCPF_CLOSED
| DCCPF_LISTEN
)))
802 /* Really, if the socket is already in listen state
803 * we can only allow the backlog to be adjusted.
805 if (old_state
!= DCCP_LISTEN
) {
807 * FIXME: here it probably should be sk->sk_prot->listen_start
808 * see tcp_listen_start
810 err
= dccp_listen_start(sk
, backlog
);
814 sk
->sk_max_ack_backlog
= backlog
;
822 EXPORT_SYMBOL_GPL(inet_dccp_listen
);
824 static const unsigned char dccp_new_state
[] = {
825 /* current state: new state: action: */
827 [DCCP_OPEN
] = DCCP_CLOSING
| DCCP_ACTION_FIN
,
828 [DCCP_REQUESTING
] = DCCP_CLOSED
,
829 [DCCP_PARTOPEN
] = DCCP_CLOSING
| DCCP_ACTION_FIN
,
830 [DCCP_LISTEN
] = DCCP_CLOSED
,
831 [DCCP_RESPOND
] = DCCP_CLOSED
,
832 [DCCP_CLOSING
] = DCCP_CLOSED
,
833 [DCCP_TIME_WAIT
] = DCCP_CLOSED
,
834 [DCCP_CLOSED
] = DCCP_CLOSED
,
837 static int dccp_close_state(struct sock
*sk
)
839 const int next
= dccp_new_state
[sk
->sk_state
];
840 const int ns
= next
& DCCP_STATE_MASK
;
842 if (ns
!= sk
->sk_state
)
843 dccp_set_state(sk
, ns
);
845 return next
& DCCP_ACTION_FIN
;
848 void dccp_close(struct sock
*sk
, long timeout
)
850 struct dccp_sock
*dp
= dccp_sk(sk
);
856 sk
->sk_shutdown
= SHUTDOWN_MASK
;
858 if (sk
->sk_state
== DCCP_LISTEN
) {
859 dccp_set_state(sk
, DCCP_CLOSED
);
862 inet_csk_listen_stop(sk
);
864 goto adjudge_to_death
;
867 sk_stop_timer(sk
, &dp
->dccps_xmit_timer
);
870 * We need to flush the recv. buffs. We do this only on the
871 * descriptor close, not protocol-sourced closes, because the
872 *reader process may not have drained the data yet!
874 /* FIXME: check for unread data */
875 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
879 if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
880 /* Check zero linger _after_ checking for unread data. */
881 sk
->sk_prot
->disconnect(sk
, 0);
882 } else if (dccp_close_state(sk
)) {
883 dccp_send_close(sk
, 1);
886 sk_stream_wait_close(sk
, timeout
);
889 state
= sk
->sk_state
;
892 atomic_inc(sk
->sk_prot
->orphan_count
);
895 * It is the last release_sock in its life. It will remove backlog.
899 * Now socket is owned by kernel and we acquire BH lock
900 * to finish close. No need to check for user refs.
904 BUG_TRAP(!sock_owned_by_user(sk
));
906 /* Have we already been destroyed by a softirq or backlog? */
907 if (state
!= DCCP_CLOSED
&& sk
->sk_state
== DCCP_CLOSED
)
911 * The last release_sock may have processed the CLOSE or RESET
912 * packet moving sock to CLOSED state, if not we have to fire
913 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
914 * in draft-ietf-dccp-spec-11. -acme
916 if (sk
->sk_state
== DCCP_CLOSING
) {
917 /* FIXME: should start at 2 * RTT */
918 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
919 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
920 inet_csk(sk
)->icsk_rto
,
923 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
924 dccp_set_state(sk
, DCCP_CLOSED
);
928 if (sk
->sk_state
== DCCP_CLOSED
)
929 inet_csk_destroy_sock(sk
);
931 /* Otherwise, socket is reprieved until protocol close. */
939 EXPORT_SYMBOL_GPL(dccp_close
);
941 void dccp_shutdown(struct sock
*sk
, int how
)
943 dccp_pr_debug("entry\n");
946 EXPORT_SYMBOL_GPL(dccp_shutdown
);
948 static int __init
dccp_mib_init(void)
952 dccp_statistics
[0] = alloc_percpu(struct dccp_mib
);
953 if (dccp_statistics
[0] == NULL
)
956 dccp_statistics
[1] = alloc_percpu(struct dccp_mib
);
957 if (dccp_statistics
[1] == NULL
)
964 free_percpu(dccp_statistics
[0]);
965 dccp_statistics
[0] = NULL
;
970 static void dccp_mib_exit(void)
972 free_percpu(dccp_statistics
[0]);
973 free_percpu(dccp_statistics
[1]);
974 dccp_statistics
[0] = dccp_statistics
[1] = NULL
;
977 static int thash_entries
;
978 module_param(thash_entries
, int, 0444);
979 MODULE_PARM_DESC(thash_entries
, "Number of ehash buckets");
981 #ifdef CONFIG_IP_DCCP_DEBUG
983 module_param(dccp_debug
, int, 0444);
984 MODULE_PARM_DESC(dccp_debug
, "Enable debug messages");
986 EXPORT_SYMBOL_GPL(dccp_debug
);
989 static int __init
dccp_init(void)
992 int ehash_order
, bhash_order
, i
;
995 dccp_hashinfo
.bind_bucket_cachep
=
996 kmem_cache_create("dccp_bind_bucket",
997 sizeof(struct inet_bind_bucket
), 0,
998 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
999 if (!dccp_hashinfo
.bind_bucket_cachep
)
1003 * Size and allocate the main established and bind bucket
1006 * The methodology is similar to that of the buffer cache.
1008 if (num_physpages
>= (128 * 1024))
1009 goal
= num_physpages
>> (21 - PAGE_SHIFT
);
1011 goal
= num_physpages
>> (23 - PAGE_SHIFT
);
1014 goal
= (thash_entries
*
1015 sizeof(struct inet_ehash_bucket
)) >> PAGE_SHIFT
;
1016 for (ehash_order
= 0; (1UL << ehash_order
) < goal
; ehash_order
++)
1019 dccp_hashinfo
.ehash_size
= (1UL << ehash_order
) * PAGE_SIZE
/
1020 sizeof(struct inet_ehash_bucket
);
1021 dccp_hashinfo
.ehash_size
>>= 1;
1022 while (dccp_hashinfo
.ehash_size
&
1023 (dccp_hashinfo
.ehash_size
- 1))
1024 dccp_hashinfo
.ehash_size
--;
1025 dccp_hashinfo
.ehash
= (struct inet_ehash_bucket
*)
1026 __get_free_pages(GFP_ATOMIC
, ehash_order
);
1027 } while (!dccp_hashinfo
.ehash
&& --ehash_order
> 0);
1029 if (!dccp_hashinfo
.ehash
) {
1030 printk(KERN_CRIT
"Failed to allocate DCCP "
1031 "established hash table\n");
1032 goto out_free_bind_bucket_cachep
;
1035 for (i
= 0; i
< (dccp_hashinfo
.ehash_size
<< 1); i
++) {
1036 rwlock_init(&dccp_hashinfo
.ehash
[i
].lock
);
1037 INIT_HLIST_HEAD(&dccp_hashinfo
.ehash
[i
].chain
);
1040 bhash_order
= ehash_order
;
1043 dccp_hashinfo
.bhash_size
= (1UL << bhash_order
) * PAGE_SIZE
/
1044 sizeof(struct inet_bind_hashbucket
);
1045 if ((dccp_hashinfo
.bhash_size
> (64 * 1024)) &&
1048 dccp_hashinfo
.bhash
= (struct inet_bind_hashbucket
*)
1049 __get_free_pages(GFP_ATOMIC
, bhash_order
);
1050 } while (!dccp_hashinfo
.bhash
&& --bhash_order
>= 0);
1052 if (!dccp_hashinfo
.bhash
) {
1053 printk(KERN_CRIT
"Failed to allocate DCCP bind hash table\n");
1054 goto out_free_dccp_ehash
;
1057 for (i
= 0; i
< dccp_hashinfo
.bhash_size
; i
++) {
1058 spin_lock_init(&dccp_hashinfo
.bhash
[i
].lock
);
1059 INIT_HLIST_HEAD(&dccp_hashinfo
.bhash
[i
].chain
);
1062 rc
= dccp_mib_init();
1064 goto out_free_dccp_bhash
;
1066 rc
= dccp_ackvec_init();
1068 goto out_free_dccp_mib
;
1070 rc
= dccp_sysctl_init();
1072 goto out_ackvec_exit
;
1079 out_free_dccp_bhash
:
1080 free_pages((unsigned long)dccp_hashinfo
.bhash
, bhash_order
);
1081 dccp_hashinfo
.bhash
= NULL
;
1082 out_free_dccp_ehash
:
1083 free_pages((unsigned long)dccp_hashinfo
.ehash
, ehash_order
);
1084 dccp_hashinfo
.ehash
= NULL
;
1085 out_free_bind_bucket_cachep
:
1086 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1087 dccp_hashinfo
.bind_bucket_cachep
= NULL
;
1091 static void __exit
dccp_fini(void)
1094 free_pages((unsigned long)dccp_hashinfo
.bhash
,
1095 get_order(dccp_hashinfo
.bhash_size
*
1096 sizeof(struct inet_bind_hashbucket
)));
1097 free_pages((unsigned long)dccp_hashinfo
.ehash
,
1098 get_order(dccp_hashinfo
.ehash_size
*
1099 sizeof(struct inet_ehash_bucket
)));
1100 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1105 module_init(dccp_init
);
1106 module_exit(dccp_fini
);
1108 MODULE_LICENSE("GPL");
1109 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1110 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");