4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <net/checksum.h>
25 #include <net/inet_sock.h>
29 #include <asm/ioctls.h>
30 #include <linux/spinlock.h>
31 #include <linux/timer.h>
32 #include <linux/delay.h>
33 #include <linux/poll.h>
39 DEFINE_SNMP_STAT(struct dccp_mib
, dccp_statistics
) __read_mostly
;
41 EXPORT_SYMBOL_GPL(dccp_statistics
);
43 struct percpu_counter dccp_orphan_count
;
44 EXPORT_SYMBOL_GPL(dccp_orphan_count
);
46 struct inet_hashinfo dccp_hashinfo
;
47 EXPORT_SYMBOL_GPL(dccp_hashinfo
);
49 /* the maximum queue length for tx in packets. 0 is no limit */
50 int sysctl_dccp_tx_qlen __read_mostly
= 5;
52 void dccp_set_state(struct sock
*sk
, const int state
)
54 const int oldstate
= sk
->sk_state
;
56 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk
), sk
,
57 dccp_state_name(oldstate
), dccp_state_name(state
));
58 WARN_ON(state
== oldstate
);
62 if (oldstate
!= DCCP_OPEN
)
63 DCCP_INC_STATS(DCCP_MIB_CURRESTAB
);
64 /* Client retransmits all Confirm options until entering OPEN */
65 if (oldstate
== DCCP_PARTOPEN
)
66 dccp_feat_list_purge(&dccp_sk(sk
)->dccps_featneg
);
70 if (oldstate
== DCCP_OPEN
|| oldstate
== DCCP_ACTIVE_CLOSEREQ
||
71 oldstate
== DCCP_CLOSING
)
72 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS
);
74 sk
->sk_prot
->unhash(sk
);
75 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
&&
76 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
80 if (oldstate
== DCCP_OPEN
)
81 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB
);
84 /* Change state AFTER socket is unhashed to avoid closed
85 * socket sitting in hash tables.
90 EXPORT_SYMBOL_GPL(dccp_set_state
);
92 static void dccp_finish_passive_close(struct sock
*sk
)
94 switch (sk
->sk_state
) {
95 case DCCP_PASSIVE_CLOSE
:
96 /* Node (client or server) has received Close packet. */
97 dccp_send_reset(sk
, DCCP_RESET_CODE_CLOSED
);
98 dccp_set_state(sk
, DCCP_CLOSED
);
100 case DCCP_PASSIVE_CLOSEREQ
:
102 * Client received CloseReq. We set the `active' flag so that
103 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
105 dccp_send_close(sk
, 1);
106 dccp_set_state(sk
, DCCP_CLOSING
);
110 void dccp_done(struct sock
*sk
)
112 dccp_set_state(sk
, DCCP_CLOSED
);
113 dccp_clear_xmit_timers(sk
);
115 sk
->sk_shutdown
= SHUTDOWN_MASK
;
117 if (!sock_flag(sk
, SOCK_DEAD
))
118 sk
->sk_state_change(sk
);
120 inet_csk_destroy_sock(sk
);
123 EXPORT_SYMBOL_GPL(dccp_done
);
125 const char *dccp_packet_name(const int type
)
127 static const char *const dccp_packet_names
[] = {
128 [DCCP_PKT_REQUEST
] = "REQUEST",
129 [DCCP_PKT_RESPONSE
] = "RESPONSE",
130 [DCCP_PKT_DATA
] = "DATA",
131 [DCCP_PKT_ACK
] = "ACK",
132 [DCCP_PKT_DATAACK
] = "DATAACK",
133 [DCCP_PKT_CLOSEREQ
] = "CLOSEREQ",
134 [DCCP_PKT_CLOSE
] = "CLOSE",
135 [DCCP_PKT_RESET
] = "RESET",
136 [DCCP_PKT_SYNC
] = "SYNC",
137 [DCCP_PKT_SYNCACK
] = "SYNCACK",
140 if (type
>= DCCP_NR_PKT_TYPES
)
143 return dccp_packet_names
[type
];
146 EXPORT_SYMBOL_GPL(dccp_packet_name
);
148 const char *dccp_state_name(const int state
)
150 static const char *const dccp_state_names
[] = {
151 [DCCP_OPEN
] = "OPEN",
152 [DCCP_REQUESTING
] = "REQUESTING",
153 [DCCP_PARTOPEN
] = "PARTOPEN",
154 [DCCP_LISTEN
] = "LISTEN",
155 [DCCP_RESPOND
] = "RESPOND",
156 [DCCP_CLOSING
] = "CLOSING",
157 [DCCP_ACTIVE_CLOSEREQ
] = "CLOSEREQ",
158 [DCCP_PASSIVE_CLOSE
] = "PASSIVE_CLOSE",
159 [DCCP_PASSIVE_CLOSEREQ
] = "PASSIVE_CLOSEREQ",
160 [DCCP_TIME_WAIT
] = "TIME_WAIT",
161 [DCCP_CLOSED
] = "CLOSED",
164 if (state
>= DCCP_MAX_STATES
)
165 return "INVALID STATE!";
167 return dccp_state_names
[state
];
170 EXPORT_SYMBOL_GPL(dccp_state_name
);
172 int dccp_init_sock(struct sock
*sk
, const __u8 ctl_sock_initialized
)
174 struct dccp_sock
*dp
= dccp_sk(sk
);
175 struct inet_connection_sock
*icsk
= inet_csk(sk
);
177 icsk
->icsk_rto
= DCCP_TIMEOUT_INIT
;
178 icsk
->icsk_syn_retries
= sysctl_dccp_request_retries
;
179 sk
->sk_state
= DCCP_CLOSED
;
180 sk
->sk_write_space
= dccp_write_space
;
181 icsk
->icsk_sync_mss
= dccp_sync_mss
;
182 dp
->dccps_mss_cache
= 536;
183 dp
->dccps_rate_last
= jiffies
;
184 dp
->dccps_role
= DCCP_ROLE_UNDEFINED
;
185 dp
->dccps_service
= DCCP_SERVICE_CODE_IS_ABSENT
;
186 dp
->dccps_l_ack_ratio
= dp
->dccps_r_ack_ratio
= 1;
188 dccp_init_xmit_timers(sk
);
190 INIT_LIST_HEAD(&dp
->dccps_featneg
);
191 /* control socket doesn't need feat nego */
192 if (likely(ctl_sock_initialized
))
193 return dccp_feat_init(sk
);
197 EXPORT_SYMBOL_GPL(dccp_init_sock
);
199 void dccp_destroy_sock(struct sock
*sk
)
201 struct dccp_sock
*dp
= dccp_sk(sk
);
204 * DCCP doesn't use sk_write_queue, just sk_send_head
205 * for retransmissions
207 if (sk
->sk_send_head
!= NULL
) {
208 kfree_skb(sk
->sk_send_head
);
209 sk
->sk_send_head
= NULL
;
212 /* Clean up a referenced DCCP bind bucket. */
213 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
)
216 kfree(dp
->dccps_service_list
);
217 dp
->dccps_service_list
= NULL
;
219 if (dp
->dccps_hc_rx_ackvec
!= NULL
) {
220 dccp_ackvec_free(dp
->dccps_hc_rx_ackvec
);
221 dp
->dccps_hc_rx_ackvec
= NULL
;
223 ccid_hc_rx_delete(dp
->dccps_hc_rx_ccid
, sk
);
224 ccid_hc_tx_delete(dp
->dccps_hc_tx_ccid
, sk
);
225 dp
->dccps_hc_rx_ccid
= dp
->dccps_hc_tx_ccid
= NULL
;
227 /* clean up feature negotiation state */
228 dccp_feat_list_purge(&dp
->dccps_featneg
);
231 EXPORT_SYMBOL_GPL(dccp_destroy_sock
);
233 static inline int dccp_listen_start(struct sock
*sk
, int backlog
)
235 struct dccp_sock
*dp
= dccp_sk(sk
);
237 dp
->dccps_role
= DCCP_ROLE_LISTEN
;
238 /* do not start to listen if feature negotiation setup fails */
239 if (dccp_feat_finalise_settings(dp
))
241 return inet_csk_listen_start(sk
, backlog
);
244 static inline int dccp_need_reset(int state
)
246 return state
!= DCCP_CLOSED
&& state
!= DCCP_LISTEN
&&
247 state
!= DCCP_REQUESTING
;
250 int dccp_disconnect(struct sock
*sk
, int flags
)
252 struct inet_connection_sock
*icsk
= inet_csk(sk
);
253 struct inet_sock
*inet
= inet_sk(sk
);
255 const int old_state
= sk
->sk_state
;
257 if (old_state
!= DCCP_CLOSED
)
258 dccp_set_state(sk
, DCCP_CLOSED
);
261 * This corresponds to the ABORT function of RFC793, sec. 3.8
262 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
264 if (old_state
== DCCP_LISTEN
) {
265 inet_csk_listen_stop(sk
);
266 } else if (dccp_need_reset(old_state
)) {
267 dccp_send_reset(sk
, DCCP_RESET_CODE_ABORTED
);
268 sk
->sk_err
= ECONNRESET
;
269 } else if (old_state
== DCCP_REQUESTING
)
270 sk
->sk_err
= ECONNRESET
;
272 dccp_clear_xmit_timers(sk
);
274 __skb_queue_purge(&sk
->sk_receive_queue
);
275 __skb_queue_purge(&sk
->sk_write_queue
);
276 if (sk
->sk_send_head
!= NULL
) {
277 __kfree_skb(sk
->sk_send_head
);
278 sk
->sk_send_head
= NULL
;
281 inet
->inet_dport
= 0;
283 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
284 inet_reset_saddr(sk
);
287 sock_reset_flag(sk
, SOCK_DONE
);
289 icsk
->icsk_backoff
= 0;
290 inet_csk_delack_init(sk
);
293 WARN_ON(inet
->inet_num
&& !icsk
->icsk_bind_hash
);
295 sk
->sk_error_report(sk
);
299 EXPORT_SYMBOL_GPL(dccp_disconnect
);
302 * Wait for a DCCP event.
304 * Note that we don't need to lock the socket, as the upper poll layers
305 * take care of normal races (between the test and the event) and we don't
306 * go look at any of the socket buffers directly.
308 unsigned int dccp_poll(struct file
*file
, struct socket
*sock
,
312 struct sock
*sk
= sock
->sk
;
314 sock_poll_wait(file
, sk
->sk_sleep
, wait
);
315 if (sk
->sk_state
== DCCP_LISTEN
)
316 return inet_csk_listen_poll(sk
);
318 /* Socket is not locked. We are protected from async events
319 by poll logic and correct handling of state changes
320 made by another threads is impossible in any case.
327 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== DCCP_CLOSED
)
329 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
330 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
333 if ((1 << sk
->sk_state
) & ~(DCCPF_REQUESTING
| DCCPF_RESPOND
)) {
334 if (atomic_read(&sk
->sk_rmem_alloc
) > 0)
335 mask
|= POLLIN
| POLLRDNORM
;
337 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
338 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
339 mask
|= POLLOUT
| POLLWRNORM
;
340 } else { /* send SIGIO later */
341 set_bit(SOCK_ASYNC_NOSPACE
,
342 &sk
->sk_socket
->flags
);
343 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
345 /* Race breaker. If space is freed after
346 * wspace test but before the flags are set,
347 * IO signal will be lost.
349 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
350 mask
|= POLLOUT
| POLLWRNORM
;
357 EXPORT_SYMBOL_GPL(dccp_poll
);
359 int dccp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
365 if (sk
->sk_state
== DCCP_LISTEN
)
371 unsigned long amount
= 0;
373 skb
= skb_peek(&sk
->sk_receive_queue
);
376 * We will only return the amount of this packet since
377 * that is all that will be read.
381 rc
= put_user(amount
, (int __user
*)arg
);
393 EXPORT_SYMBOL_GPL(dccp_ioctl
);
395 static int dccp_setsockopt_service(struct sock
*sk
, const __be32 service
,
396 char __user
*optval
, unsigned int optlen
)
398 struct dccp_sock
*dp
= dccp_sk(sk
);
399 struct dccp_service_list
*sl
= NULL
;
401 if (service
== DCCP_SERVICE_INVALID_VALUE
||
402 optlen
> DCCP_SERVICE_LIST_MAX_LEN
* sizeof(u32
))
405 if (optlen
> sizeof(service
)) {
406 sl
= kmalloc(optlen
, GFP_KERNEL
);
410 sl
->dccpsl_nr
= optlen
/ sizeof(u32
) - 1;
411 if (copy_from_user(sl
->dccpsl_list
,
412 optval
+ sizeof(service
),
413 optlen
- sizeof(service
)) ||
414 dccp_list_has_service(sl
, DCCP_SERVICE_INVALID_VALUE
)) {
421 dp
->dccps_service
= service
;
423 kfree(dp
->dccps_service_list
);
425 dp
->dccps_service_list
= sl
;
430 static int dccp_setsockopt_cscov(struct sock
*sk
, int cscov
, bool rx
)
435 if (cscov
< 0 || cscov
> 15)
438 * Populate a list of permissible values, in the range cscov...15. This
439 * is necessary since feature negotiation of single values only works if
440 * both sides incidentally choose the same value. Since the list starts
441 * lowest-value first, negotiation will pick the smallest shared value.
447 list
= kmalloc(len
, GFP_KERNEL
);
451 for (i
= 0; i
< len
; i
++)
454 rc
= dccp_feat_register_sp(sk
, DCCPF_MIN_CSUM_COVER
, rx
, list
, len
);
458 dccp_sk(sk
)->dccps_pcrlen
= cscov
;
460 dccp_sk(sk
)->dccps_pcslen
= cscov
;
466 static int dccp_setsockopt_ccid(struct sock
*sk
, int type
,
467 char __user
*optval
, unsigned int optlen
)
472 if (optlen
< 1 || optlen
> DCCP_FEAT_MAX_SP_VALS
)
475 val
= kmalloc(optlen
, GFP_KERNEL
);
479 if (copy_from_user(val
, optval
, optlen
)) {
485 if (type
== DCCP_SOCKOPT_TX_CCID
|| type
== DCCP_SOCKOPT_CCID
)
486 rc
= dccp_feat_register_sp(sk
, DCCPF_CCID
, 1, val
, optlen
);
488 if (!rc
&& (type
== DCCP_SOCKOPT_RX_CCID
|| type
== DCCP_SOCKOPT_CCID
))
489 rc
= dccp_feat_register_sp(sk
, DCCPF_CCID
, 0, val
, optlen
);
496 static int do_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
497 char __user
*optval
, unsigned int optlen
)
499 struct dccp_sock
*dp
= dccp_sk(sk
);
503 case DCCP_SOCKOPT_PACKET_SIZE
:
504 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
506 case DCCP_SOCKOPT_CHANGE_L
:
507 case DCCP_SOCKOPT_CHANGE_R
:
508 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
510 case DCCP_SOCKOPT_CCID
:
511 case DCCP_SOCKOPT_RX_CCID
:
512 case DCCP_SOCKOPT_TX_CCID
:
513 return dccp_setsockopt_ccid(sk
, optname
, optval
, optlen
);
516 if (optlen
< (int)sizeof(int))
519 if (get_user(val
, (int __user
*)optval
))
522 if (optname
== DCCP_SOCKOPT_SERVICE
)
523 return dccp_setsockopt_service(sk
, val
, optval
, optlen
);
527 case DCCP_SOCKOPT_SERVER_TIMEWAIT
:
528 if (dp
->dccps_role
!= DCCP_ROLE_SERVER
)
531 dp
->dccps_server_timewait
= (val
!= 0);
533 case DCCP_SOCKOPT_SEND_CSCOV
:
534 err
= dccp_setsockopt_cscov(sk
, val
, false);
536 case DCCP_SOCKOPT_RECV_CSCOV
:
537 err
= dccp_setsockopt_cscov(sk
, val
, true);
548 int dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
549 char __user
*optval
, unsigned int optlen
)
551 if (level
!= SOL_DCCP
)
552 return inet_csk(sk
)->icsk_af_ops
->setsockopt(sk
, level
,
555 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
558 EXPORT_SYMBOL_GPL(dccp_setsockopt
);
561 int compat_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
562 char __user
*optval
, unsigned int optlen
)
564 if (level
!= SOL_DCCP
)
565 return inet_csk_compat_setsockopt(sk
, level
, optname
,
567 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
570 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt
);
573 static int dccp_getsockopt_service(struct sock
*sk
, int len
,
574 __be32 __user
*optval
,
577 const struct dccp_sock
*dp
= dccp_sk(sk
);
578 const struct dccp_service_list
*sl
;
579 int err
= -ENOENT
, slen
= 0, total_len
= sizeof(u32
);
582 if ((sl
= dp
->dccps_service_list
) != NULL
) {
583 slen
= sl
->dccpsl_nr
* sizeof(u32
);
592 if (put_user(total_len
, optlen
) ||
593 put_user(dp
->dccps_service
, optval
) ||
594 (sl
!= NULL
&& copy_to_user(optval
+ 1, sl
->dccpsl_list
, slen
)))
601 static int do_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
602 char __user
*optval
, int __user
*optlen
)
604 struct dccp_sock
*dp
;
607 if (get_user(len
, optlen
))
610 if (len
< (int)sizeof(int))
616 case DCCP_SOCKOPT_PACKET_SIZE
:
617 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
619 case DCCP_SOCKOPT_SERVICE
:
620 return dccp_getsockopt_service(sk
, len
,
621 (__be32 __user
*)optval
, optlen
);
622 case DCCP_SOCKOPT_GET_CUR_MPS
:
623 val
= dp
->dccps_mss_cache
;
625 case DCCP_SOCKOPT_AVAILABLE_CCIDS
:
626 return ccid_getsockopt_builtin_ccids(sk
, len
, optval
, optlen
);
627 case DCCP_SOCKOPT_TX_CCID
:
628 val
= ccid_get_current_tx_ccid(dp
);
632 case DCCP_SOCKOPT_RX_CCID
:
633 val
= ccid_get_current_rx_ccid(dp
);
637 case DCCP_SOCKOPT_SERVER_TIMEWAIT
:
638 val
= dp
->dccps_server_timewait
;
640 case DCCP_SOCKOPT_SEND_CSCOV
:
641 val
= dp
->dccps_pcslen
;
643 case DCCP_SOCKOPT_RECV_CSCOV
:
644 val
= dp
->dccps_pcrlen
;
647 return ccid_hc_rx_getsockopt(dp
->dccps_hc_rx_ccid
, sk
, optname
,
648 len
, (u32 __user
*)optval
, optlen
);
650 return ccid_hc_tx_getsockopt(dp
->dccps_hc_tx_ccid
, sk
, optname
,
651 len
, (u32 __user
*)optval
, optlen
);
657 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
663 int dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
664 char __user
*optval
, int __user
*optlen
)
666 if (level
!= SOL_DCCP
)
667 return inet_csk(sk
)->icsk_af_ops
->getsockopt(sk
, level
,
670 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
673 EXPORT_SYMBOL_GPL(dccp_getsockopt
);
676 int compat_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
677 char __user
*optval
, int __user
*optlen
)
679 if (level
!= SOL_DCCP
)
680 return inet_csk_compat_getsockopt(sk
, level
, optname
,
682 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
685 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt
);
688 int dccp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
691 const struct dccp_sock
*dp
= dccp_sk(sk
);
692 const int flags
= msg
->msg_flags
;
693 const int noblock
= flags
& MSG_DONTWAIT
;
698 if (len
> dp
->dccps_mss_cache
)
703 if (sysctl_dccp_tx_qlen
&&
704 (sk
->sk_write_queue
.qlen
>= sysctl_dccp_tx_qlen
)) {
709 timeo
= sock_sndtimeo(sk
, noblock
);
712 * We have to use sk_stream_wait_connect here to set sk_write_pending,
713 * so that the trick in dccp_rcv_request_sent_state_process.
715 /* Wait for a connection to finish. */
716 if ((1 << sk
->sk_state
) & ~(DCCPF_OPEN
| DCCPF_PARTOPEN
))
717 if ((rc
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
720 size
= sk
->sk_prot
->max_header
+ len
;
722 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
727 skb_reserve(skb
, sk
->sk_prot
->max_header
);
728 rc
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
732 skb_queue_tail(&sk
->sk_write_queue
, skb
);
733 dccp_write_xmit(sk
,0);
742 EXPORT_SYMBOL_GPL(dccp_sendmsg
);
744 int dccp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
745 size_t len
, int nonblock
, int flags
, int *addr_len
)
747 const struct dccp_hdr
*dh
;
752 if (sk
->sk_state
== DCCP_LISTEN
) {
757 timeo
= sock_rcvtimeo(sk
, nonblock
);
760 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
763 goto verify_sock_status
;
767 switch (dh
->dccph_type
) {
769 case DCCP_PKT_DATAACK
:
773 case DCCP_PKT_CLOSEREQ
:
774 if (!(flags
& MSG_PEEK
))
775 dccp_finish_passive_close(sk
);
778 dccp_pr_debug("found fin (%s) ok!\n",
779 dccp_packet_name(dh
->dccph_type
));
783 dccp_pr_debug("packet_type=%s\n",
784 dccp_packet_name(dh
->dccph_type
));
785 sk_eat_skb(sk
, skb
, 0);
788 if (sock_flag(sk
, SOCK_DONE
)) {
794 len
= sock_error(sk
);
798 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
803 if (sk
->sk_state
== DCCP_CLOSED
) {
804 if (!sock_flag(sk
, SOCK_DONE
)) {
805 /* This occurs when user tries to read
806 * from never connected socket.
820 if (signal_pending(current
)) {
821 len
= sock_intr_errno(timeo
);
825 sk_wait_data(sk
, &timeo
);
830 else if (len
< skb
->len
)
831 msg
->msg_flags
|= MSG_TRUNC
;
833 if (skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, len
)) {
834 /* Exception. Bailout! */
838 if (flags
& MSG_TRUNC
)
841 if (!(flags
& MSG_PEEK
))
842 sk_eat_skb(sk
, skb
, 0);
850 EXPORT_SYMBOL_GPL(dccp_recvmsg
);
852 int inet_dccp_listen(struct socket
*sock
, int backlog
)
854 struct sock
*sk
= sock
->sk
;
855 unsigned char old_state
;
861 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_DCCP
)
864 old_state
= sk
->sk_state
;
865 if (!((1 << old_state
) & (DCCPF_CLOSED
| DCCPF_LISTEN
)))
868 /* Really, if the socket is already in listen state
869 * we can only allow the backlog to be adjusted.
871 if (old_state
!= DCCP_LISTEN
) {
873 * FIXME: here it probably should be sk->sk_prot->listen_start
874 * see tcp_listen_start
876 err
= dccp_listen_start(sk
, backlog
);
880 sk
->sk_max_ack_backlog
= backlog
;
888 EXPORT_SYMBOL_GPL(inet_dccp_listen
);
890 static void dccp_terminate_connection(struct sock
*sk
)
892 u8 next_state
= DCCP_CLOSED
;
894 switch (sk
->sk_state
) {
895 case DCCP_PASSIVE_CLOSE
:
896 case DCCP_PASSIVE_CLOSEREQ
:
897 dccp_finish_passive_close(sk
);
900 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk
);
901 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
904 dccp_send_close(sk
, 1);
906 if (dccp_sk(sk
)->dccps_role
== DCCP_ROLE_SERVER
&&
907 !dccp_sk(sk
)->dccps_server_timewait
)
908 next_state
= DCCP_ACTIVE_CLOSEREQ
;
910 next_state
= DCCP_CLOSING
;
913 dccp_set_state(sk
, next_state
);
917 void dccp_close(struct sock
*sk
, long timeout
)
919 struct dccp_sock
*dp
= dccp_sk(sk
);
921 u32 data_was_unread
= 0;
926 sk
->sk_shutdown
= SHUTDOWN_MASK
;
928 if (sk
->sk_state
== DCCP_LISTEN
) {
929 dccp_set_state(sk
, DCCP_CLOSED
);
932 inet_csk_listen_stop(sk
);
934 goto adjudge_to_death
;
937 sk_stop_timer(sk
, &dp
->dccps_xmit_timer
);
940 * We need to flush the recv. buffs. We do this only on the
941 * descriptor close, not protocol-sourced closes, because the
942 *reader process may not have drained the data yet!
944 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
945 data_was_unread
+= skb
->len
;
949 if (data_was_unread
) {
950 /* Unread data was tossed, send an appropriate Reset Code */
951 DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread
);
952 dccp_send_reset(sk
, DCCP_RESET_CODE_ABORTED
);
953 dccp_set_state(sk
, DCCP_CLOSED
);
954 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
955 /* Check zero linger _after_ checking for unread data. */
956 sk
->sk_prot
->disconnect(sk
, 0);
957 } else if (sk
->sk_state
!= DCCP_CLOSED
) {
958 dccp_terminate_connection(sk
);
961 sk_stream_wait_close(sk
, timeout
);
964 state
= sk
->sk_state
;
969 * It is the last release_sock in its life. It will remove backlog.
973 * Now socket is owned by kernel and we acquire BH lock
974 * to finish close. No need to check for user refs.
978 WARN_ON(sock_owned_by_user(sk
));
980 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
982 /* Have we already been destroyed by a softirq or backlog? */
983 if (state
!= DCCP_CLOSED
&& sk
->sk_state
== DCCP_CLOSED
)
986 if (sk
->sk_state
== DCCP_CLOSED
)
987 inet_csk_destroy_sock(sk
);
989 /* Otherwise, socket is reprieved until protocol close. */
997 EXPORT_SYMBOL_GPL(dccp_close
);
999 void dccp_shutdown(struct sock
*sk
, int how
)
1001 dccp_pr_debug("called shutdown(%x)\n", how
);
1004 EXPORT_SYMBOL_GPL(dccp_shutdown
);
1006 static inline int dccp_mib_init(void)
1008 return snmp_mib_init((void __percpu
**)dccp_statistics
,
1009 sizeof(struct dccp_mib
));
1012 static inline void dccp_mib_exit(void)
1014 snmp_mib_free((void __percpu
**)dccp_statistics
);
1017 static int thash_entries
;
1018 module_param(thash_entries
, int, 0444);
1019 MODULE_PARM_DESC(thash_entries
, "Number of ehash buckets");
1021 #ifdef CONFIG_IP_DCCP_DEBUG
1023 module_param(dccp_debug
, bool, 0644);
1024 MODULE_PARM_DESC(dccp_debug
, "Enable debug messages");
1026 EXPORT_SYMBOL_GPL(dccp_debug
);
1029 static int __init
dccp_init(void)
1032 int ehash_order
, bhash_order
, i
;
1035 BUILD_BUG_ON(sizeof(struct dccp_skb_cb
) >
1036 FIELD_SIZEOF(struct sk_buff
, cb
));
1037 rc
= percpu_counter_init(&dccp_orphan_count
, 0);
1041 inet_hashinfo_init(&dccp_hashinfo
);
1042 dccp_hashinfo
.bind_bucket_cachep
=
1043 kmem_cache_create("dccp_bind_bucket",
1044 sizeof(struct inet_bind_bucket
), 0,
1045 SLAB_HWCACHE_ALIGN
, NULL
);
1046 if (!dccp_hashinfo
.bind_bucket_cachep
)
1047 goto out_free_percpu
;
1050 * Size and allocate the main established and bind bucket
1053 * The methodology is similar to that of the buffer cache.
1055 if (totalram_pages
>= (128 * 1024))
1056 goal
= totalram_pages
>> (21 - PAGE_SHIFT
);
1058 goal
= totalram_pages
>> (23 - PAGE_SHIFT
);
1061 goal
= (thash_entries
*
1062 sizeof(struct inet_ehash_bucket
)) >> PAGE_SHIFT
;
1063 for (ehash_order
= 0; (1UL << ehash_order
) < goal
; ehash_order
++)
1066 unsigned long hash_size
= (1UL << ehash_order
) * PAGE_SIZE
/
1067 sizeof(struct inet_ehash_bucket
);
1069 while (hash_size
& (hash_size
- 1))
1071 dccp_hashinfo
.ehash_mask
= hash_size
- 1;
1072 dccp_hashinfo
.ehash
= (struct inet_ehash_bucket
*)
1073 __get_free_pages(GFP_ATOMIC
|__GFP_NOWARN
, ehash_order
);
1074 } while (!dccp_hashinfo
.ehash
&& --ehash_order
> 0);
1076 if (!dccp_hashinfo
.ehash
) {
1077 DCCP_CRIT("Failed to allocate DCCP established hash table");
1078 goto out_free_bind_bucket_cachep
;
1081 for (i
= 0; i
<= dccp_hashinfo
.ehash_mask
; i
++) {
1082 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo
.ehash
[i
].chain
, i
);
1083 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo
.ehash
[i
].twchain
, i
);
1086 if (inet_ehash_locks_alloc(&dccp_hashinfo
))
1087 goto out_free_dccp_ehash
;
1089 bhash_order
= ehash_order
;
1092 dccp_hashinfo
.bhash_size
= (1UL << bhash_order
) * PAGE_SIZE
/
1093 sizeof(struct inet_bind_hashbucket
);
1094 if ((dccp_hashinfo
.bhash_size
> (64 * 1024)) &&
1097 dccp_hashinfo
.bhash
= (struct inet_bind_hashbucket
*)
1098 __get_free_pages(GFP_ATOMIC
|__GFP_NOWARN
, bhash_order
);
1099 } while (!dccp_hashinfo
.bhash
&& --bhash_order
>= 0);
1101 if (!dccp_hashinfo
.bhash
) {
1102 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1103 goto out_free_dccp_locks
;
1106 for (i
= 0; i
< dccp_hashinfo
.bhash_size
; i
++) {
1107 spin_lock_init(&dccp_hashinfo
.bhash
[i
].lock
);
1108 INIT_HLIST_HEAD(&dccp_hashinfo
.bhash
[i
].chain
);
1111 rc
= dccp_mib_init();
1113 goto out_free_dccp_bhash
;
1115 rc
= dccp_ackvec_init();
1117 goto out_free_dccp_mib
;
1119 rc
= dccp_sysctl_init();
1121 goto out_ackvec_exit
;
1123 rc
= ccid_initialize_builtins();
1125 goto out_sysctl_exit
;
1127 dccp_timestamping_init();
1137 out_free_dccp_bhash
:
1138 free_pages((unsigned long)dccp_hashinfo
.bhash
, bhash_order
);
1139 out_free_dccp_locks
:
1140 inet_ehash_locks_free(&dccp_hashinfo
);
1141 out_free_dccp_ehash
:
1142 free_pages((unsigned long)dccp_hashinfo
.ehash
, ehash_order
);
1143 out_free_bind_bucket_cachep
:
1144 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1146 percpu_counter_destroy(&dccp_orphan_count
);
1148 dccp_hashinfo
.bhash
= NULL
;
1149 dccp_hashinfo
.ehash
= NULL
;
1150 dccp_hashinfo
.bind_bucket_cachep
= NULL
;
1154 static void __exit
dccp_fini(void)
1156 ccid_cleanup_builtins();
1158 free_pages((unsigned long)dccp_hashinfo
.bhash
,
1159 get_order(dccp_hashinfo
.bhash_size
*
1160 sizeof(struct inet_bind_hashbucket
)));
1161 free_pages((unsigned long)dccp_hashinfo
.ehash
,
1162 get_order((dccp_hashinfo
.ehash_mask
+ 1) *
1163 sizeof(struct inet_ehash_bucket
)));
1164 inet_ehash_locks_free(&dccp_hashinfo
);
1165 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1168 percpu_counter_destroy(&dccp_orphan_count
);
1171 module_init(dccp_init
);
1172 module_exit(dccp_fini
);
1174 MODULE_LICENSE("GPL");
1175 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1176 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");