4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <linux/slab.h>
24 #include <net/checksum.h>
26 #include <net/inet_sock.h>
30 #include <asm/ioctls.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
40 DEFINE_SNMP_STAT(struct dccp_mib
, dccp_statistics
) __read_mostly
;
42 EXPORT_SYMBOL_GPL(dccp_statistics
);
44 struct percpu_counter dccp_orphan_count
;
45 EXPORT_SYMBOL_GPL(dccp_orphan_count
);
47 struct inet_hashinfo dccp_hashinfo
;
48 EXPORT_SYMBOL_GPL(dccp_hashinfo
);
50 /* the maximum queue length for tx in packets. 0 is no limit */
51 int sysctl_dccp_tx_qlen __read_mostly
= 5;
53 #ifdef CONFIG_IP_DCCP_DEBUG
54 static const char *dccp_state_name(const int state
)
56 static const char *const dccp_state_names
[] = {
58 [DCCP_REQUESTING
] = "REQUESTING",
59 [DCCP_PARTOPEN
] = "PARTOPEN",
60 [DCCP_LISTEN
] = "LISTEN",
61 [DCCP_RESPOND
] = "RESPOND",
62 [DCCP_CLOSING
] = "CLOSING",
63 [DCCP_ACTIVE_CLOSEREQ
] = "CLOSEREQ",
64 [DCCP_PASSIVE_CLOSE
] = "PASSIVE_CLOSE",
65 [DCCP_PASSIVE_CLOSEREQ
] = "PASSIVE_CLOSEREQ",
66 [DCCP_TIME_WAIT
] = "TIME_WAIT",
67 [DCCP_CLOSED
] = "CLOSED",
70 if (state
>= DCCP_MAX_STATES
)
71 return "INVALID STATE!";
73 return dccp_state_names
[state
];
77 void dccp_set_state(struct sock
*sk
, const int state
)
79 const int oldstate
= sk
->sk_state
;
81 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk
), sk
,
82 dccp_state_name(oldstate
), dccp_state_name(state
));
83 WARN_ON(state
== oldstate
);
87 if (oldstate
!= DCCP_OPEN
)
88 DCCP_INC_STATS(DCCP_MIB_CURRESTAB
);
89 /* Client retransmits all Confirm options until entering OPEN */
90 if (oldstate
== DCCP_PARTOPEN
)
91 dccp_feat_list_purge(&dccp_sk(sk
)->dccps_featneg
);
95 if (oldstate
== DCCP_OPEN
|| oldstate
== DCCP_ACTIVE_CLOSEREQ
||
96 oldstate
== DCCP_CLOSING
)
97 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS
);
99 sk
->sk_prot
->unhash(sk
);
100 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
&&
101 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
105 if (oldstate
== DCCP_OPEN
)
106 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB
);
109 /* Change state AFTER socket is unhashed to avoid closed
110 * socket sitting in hash tables.
112 sk
->sk_state
= state
;
115 EXPORT_SYMBOL_GPL(dccp_set_state
);
117 static void dccp_finish_passive_close(struct sock
*sk
)
119 switch (sk
->sk_state
) {
120 case DCCP_PASSIVE_CLOSE
:
121 /* Node (client or server) has received Close packet. */
122 dccp_send_reset(sk
, DCCP_RESET_CODE_CLOSED
);
123 dccp_set_state(sk
, DCCP_CLOSED
);
125 case DCCP_PASSIVE_CLOSEREQ
:
127 * Client received CloseReq. We set the `active' flag so that
128 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 dccp_send_close(sk
, 1);
131 dccp_set_state(sk
, DCCP_CLOSING
);
135 void dccp_done(struct sock
*sk
)
137 dccp_set_state(sk
, DCCP_CLOSED
);
138 dccp_clear_xmit_timers(sk
);
140 sk
->sk_shutdown
= SHUTDOWN_MASK
;
142 if (!sock_flag(sk
, SOCK_DEAD
))
143 sk
->sk_state_change(sk
);
145 inet_csk_destroy_sock(sk
);
148 EXPORT_SYMBOL_GPL(dccp_done
);
150 const char *dccp_packet_name(const int type
)
152 static const char *const dccp_packet_names
[] = {
153 [DCCP_PKT_REQUEST
] = "REQUEST",
154 [DCCP_PKT_RESPONSE
] = "RESPONSE",
155 [DCCP_PKT_DATA
] = "DATA",
156 [DCCP_PKT_ACK
] = "ACK",
157 [DCCP_PKT_DATAACK
] = "DATAACK",
158 [DCCP_PKT_CLOSEREQ
] = "CLOSEREQ",
159 [DCCP_PKT_CLOSE
] = "CLOSE",
160 [DCCP_PKT_RESET
] = "RESET",
161 [DCCP_PKT_SYNC
] = "SYNC",
162 [DCCP_PKT_SYNCACK
] = "SYNCACK",
165 if (type
>= DCCP_NR_PKT_TYPES
)
168 return dccp_packet_names
[type
];
171 EXPORT_SYMBOL_GPL(dccp_packet_name
);
173 int dccp_init_sock(struct sock
*sk
, const __u8 ctl_sock_initialized
)
175 struct dccp_sock
*dp
= dccp_sk(sk
);
176 struct inet_connection_sock
*icsk
= inet_csk(sk
);
178 icsk
->icsk_rto
= DCCP_TIMEOUT_INIT
;
179 icsk
->icsk_syn_retries
= sysctl_dccp_request_retries
;
180 sk
->sk_state
= DCCP_CLOSED
;
181 sk
->sk_write_space
= dccp_write_space
;
182 icsk
->icsk_sync_mss
= dccp_sync_mss
;
183 dp
->dccps_mss_cache
= 536;
184 dp
->dccps_rate_last
= jiffies
;
185 dp
->dccps_role
= DCCP_ROLE_UNDEFINED
;
186 dp
->dccps_service
= DCCP_SERVICE_CODE_IS_ABSENT
;
187 dp
->dccps_tx_qlen
= sysctl_dccp_tx_qlen
;
189 dccp_init_xmit_timers(sk
);
191 INIT_LIST_HEAD(&dp
->dccps_featneg
);
192 /* control socket doesn't need feat nego */
193 if (likely(ctl_sock_initialized
))
194 return dccp_feat_init(sk
);
198 EXPORT_SYMBOL_GPL(dccp_init_sock
);
200 void dccp_destroy_sock(struct sock
*sk
)
202 struct dccp_sock
*dp
= dccp_sk(sk
);
205 * DCCP doesn't use sk_write_queue, just sk_send_head
206 * for retransmissions
208 if (sk
->sk_send_head
!= NULL
) {
209 kfree_skb(sk
->sk_send_head
);
210 sk
->sk_send_head
= NULL
;
213 /* Clean up a referenced DCCP bind bucket. */
214 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
)
217 kfree(dp
->dccps_service_list
);
218 dp
->dccps_service_list
= NULL
;
220 if (dp
->dccps_hc_rx_ackvec
!= NULL
) {
221 dccp_ackvec_free(dp
->dccps_hc_rx_ackvec
);
222 dp
->dccps_hc_rx_ackvec
= NULL
;
224 ccid_hc_rx_delete(dp
->dccps_hc_rx_ccid
, sk
);
225 ccid_hc_tx_delete(dp
->dccps_hc_tx_ccid
, sk
);
226 dp
->dccps_hc_rx_ccid
= dp
->dccps_hc_tx_ccid
= NULL
;
228 /* clean up feature negotiation state */
229 dccp_feat_list_purge(&dp
->dccps_featneg
);
232 EXPORT_SYMBOL_GPL(dccp_destroy_sock
);
234 static inline int dccp_listen_start(struct sock
*sk
, int backlog
)
236 struct dccp_sock
*dp
= dccp_sk(sk
);
238 dp
->dccps_role
= DCCP_ROLE_LISTEN
;
239 /* do not start to listen if feature negotiation setup fails */
240 if (dccp_feat_finalise_settings(dp
))
242 return inet_csk_listen_start(sk
, backlog
);
245 static inline int dccp_need_reset(int state
)
247 return state
!= DCCP_CLOSED
&& state
!= DCCP_LISTEN
&&
248 state
!= DCCP_REQUESTING
;
251 int dccp_disconnect(struct sock
*sk
, int flags
)
253 struct inet_connection_sock
*icsk
= inet_csk(sk
);
254 struct inet_sock
*inet
= inet_sk(sk
);
256 const int old_state
= sk
->sk_state
;
258 if (old_state
!= DCCP_CLOSED
)
259 dccp_set_state(sk
, DCCP_CLOSED
);
262 * This corresponds to the ABORT function of RFC793, sec. 3.8
263 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
265 if (old_state
== DCCP_LISTEN
) {
266 inet_csk_listen_stop(sk
);
267 } else if (dccp_need_reset(old_state
)) {
268 dccp_send_reset(sk
, DCCP_RESET_CODE_ABORTED
);
269 sk
->sk_err
= ECONNRESET
;
270 } else if (old_state
== DCCP_REQUESTING
)
271 sk
->sk_err
= ECONNRESET
;
273 dccp_clear_xmit_timers(sk
);
275 __skb_queue_purge(&sk
->sk_receive_queue
);
276 __skb_queue_purge(&sk
->sk_write_queue
);
277 if (sk
->sk_send_head
!= NULL
) {
278 __kfree_skb(sk
->sk_send_head
);
279 sk
->sk_send_head
= NULL
;
282 inet
->inet_dport
= 0;
284 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
285 inet_reset_saddr(sk
);
288 sock_reset_flag(sk
, SOCK_DONE
);
290 icsk
->icsk_backoff
= 0;
291 inet_csk_delack_init(sk
);
294 WARN_ON(inet
->inet_num
&& !icsk
->icsk_bind_hash
);
296 sk
->sk_error_report(sk
);
300 EXPORT_SYMBOL_GPL(dccp_disconnect
);
303 * Wait for a DCCP event.
305 * Note that we don't need to lock the socket, as the upper poll layers
306 * take care of normal races (between the test and the event) and we don't
307 * go look at any of the socket buffers directly.
309 unsigned int dccp_poll(struct file
*file
, struct socket
*sock
,
313 struct sock
*sk
= sock
->sk
;
315 sock_poll_wait(file
, sk_sleep(sk
), wait
);
316 if (sk
->sk_state
== DCCP_LISTEN
)
317 return inet_csk_listen_poll(sk
);
319 /* Socket is not locked. We are protected from async events
320 by poll logic and correct handling of state changes
321 made by another threads is impossible in any case.
328 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== DCCP_CLOSED
)
330 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
331 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
334 if ((1 << sk
->sk_state
) & ~(DCCPF_REQUESTING
| DCCPF_RESPOND
)) {
335 if (atomic_read(&sk
->sk_rmem_alloc
) > 0)
336 mask
|= POLLIN
| POLLRDNORM
;
338 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
339 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
340 mask
|= POLLOUT
| POLLWRNORM
;
341 } else { /* send SIGIO later */
342 set_bit(SOCK_ASYNC_NOSPACE
,
343 &sk
->sk_socket
->flags
);
344 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
346 /* Race breaker. If space is freed after
347 * wspace test but before the flags are set,
348 * IO signal will be lost.
350 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
351 mask
|= POLLOUT
| POLLWRNORM
;
358 EXPORT_SYMBOL_GPL(dccp_poll
);
360 int dccp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
366 if (sk
->sk_state
== DCCP_LISTEN
)
372 unsigned long amount
= 0;
374 skb
= skb_peek(&sk
->sk_receive_queue
);
377 * We will only return the amount of this packet since
378 * that is all that will be read.
382 rc
= put_user(amount
, (int __user
*)arg
);
394 EXPORT_SYMBOL_GPL(dccp_ioctl
);
396 static int dccp_setsockopt_service(struct sock
*sk
, const __be32 service
,
397 char __user
*optval
, unsigned int optlen
)
399 struct dccp_sock
*dp
= dccp_sk(sk
);
400 struct dccp_service_list
*sl
= NULL
;
402 if (service
== DCCP_SERVICE_INVALID_VALUE
||
403 optlen
> DCCP_SERVICE_LIST_MAX_LEN
* sizeof(u32
))
406 if (optlen
> sizeof(service
)) {
407 sl
= kmalloc(optlen
, GFP_KERNEL
);
411 sl
->dccpsl_nr
= optlen
/ sizeof(u32
) - 1;
412 if (copy_from_user(sl
->dccpsl_list
,
413 optval
+ sizeof(service
),
414 optlen
- sizeof(service
)) ||
415 dccp_list_has_service(sl
, DCCP_SERVICE_INVALID_VALUE
)) {
422 dp
->dccps_service
= service
;
424 kfree(dp
->dccps_service_list
);
426 dp
->dccps_service_list
= sl
;
431 static int dccp_setsockopt_cscov(struct sock
*sk
, int cscov
, bool rx
)
436 if (cscov
< 0 || cscov
> 15)
439 * Populate a list of permissible values, in the range cscov...15. This
440 * is necessary since feature negotiation of single values only works if
441 * both sides incidentally choose the same value. Since the list starts
442 * lowest-value first, negotiation will pick the smallest shared value.
448 list
= kmalloc(len
, GFP_KERNEL
);
452 for (i
= 0; i
< len
; i
++)
455 rc
= dccp_feat_register_sp(sk
, DCCPF_MIN_CSUM_COVER
, rx
, list
, len
);
459 dccp_sk(sk
)->dccps_pcrlen
= cscov
;
461 dccp_sk(sk
)->dccps_pcslen
= cscov
;
467 static int dccp_setsockopt_ccid(struct sock
*sk
, int type
,
468 char __user
*optval
, unsigned int optlen
)
473 if (optlen
< 1 || optlen
> DCCP_FEAT_MAX_SP_VALS
)
476 val
= memdup_user(optval
, optlen
);
481 if (type
== DCCP_SOCKOPT_TX_CCID
|| type
== DCCP_SOCKOPT_CCID
)
482 rc
= dccp_feat_register_sp(sk
, DCCPF_CCID
, 1, val
, optlen
);
484 if (!rc
&& (type
== DCCP_SOCKOPT_RX_CCID
|| type
== DCCP_SOCKOPT_CCID
))
485 rc
= dccp_feat_register_sp(sk
, DCCPF_CCID
, 0, val
, optlen
);
492 static int do_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
493 char __user
*optval
, unsigned int optlen
)
495 struct dccp_sock
*dp
= dccp_sk(sk
);
499 case DCCP_SOCKOPT_PACKET_SIZE
:
500 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
502 case DCCP_SOCKOPT_CHANGE_L
:
503 case DCCP_SOCKOPT_CHANGE_R
:
504 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
506 case DCCP_SOCKOPT_CCID
:
507 case DCCP_SOCKOPT_RX_CCID
:
508 case DCCP_SOCKOPT_TX_CCID
:
509 return dccp_setsockopt_ccid(sk
, optname
, optval
, optlen
);
512 if (optlen
< (int)sizeof(int))
515 if (get_user(val
, (int __user
*)optval
))
518 if (optname
== DCCP_SOCKOPT_SERVICE
)
519 return dccp_setsockopt_service(sk
, val
, optval
, optlen
);
523 case DCCP_SOCKOPT_SERVER_TIMEWAIT
:
524 if (dp
->dccps_role
!= DCCP_ROLE_SERVER
)
527 dp
->dccps_server_timewait
= (val
!= 0);
529 case DCCP_SOCKOPT_SEND_CSCOV
:
530 err
= dccp_setsockopt_cscov(sk
, val
, false);
532 case DCCP_SOCKOPT_RECV_CSCOV
:
533 err
= dccp_setsockopt_cscov(sk
, val
, true);
535 case DCCP_SOCKOPT_QPOLICY_ID
:
536 if (sk
->sk_state
!= DCCP_CLOSED
)
538 else if (val
< 0 || val
>= DCCPQ_POLICY_MAX
)
541 dp
->dccps_qpolicy
= val
;
543 case DCCP_SOCKOPT_QPOLICY_TXQLEN
:
547 dp
->dccps_tx_qlen
= val
;
558 int dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
559 char __user
*optval
, unsigned int optlen
)
561 if (level
!= SOL_DCCP
)
562 return inet_csk(sk
)->icsk_af_ops
->setsockopt(sk
, level
,
565 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
568 EXPORT_SYMBOL_GPL(dccp_setsockopt
);
571 int compat_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
572 char __user
*optval
, unsigned int optlen
)
574 if (level
!= SOL_DCCP
)
575 return inet_csk_compat_setsockopt(sk
, level
, optname
,
577 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
580 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt
);
583 static int dccp_getsockopt_service(struct sock
*sk
, int len
,
584 __be32 __user
*optval
,
587 const struct dccp_sock
*dp
= dccp_sk(sk
);
588 const struct dccp_service_list
*sl
;
589 int err
= -ENOENT
, slen
= 0, total_len
= sizeof(u32
);
592 if ((sl
= dp
->dccps_service_list
) != NULL
) {
593 slen
= sl
->dccpsl_nr
* sizeof(u32
);
602 if (put_user(total_len
, optlen
) ||
603 put_user(dp
->dccps_service
, optval
) ||
604 (sl
!= NULL
&& copy_to_user(optval
+ 1, sl
->dccpsl_list
, slen
)))
611 static int do_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
612 char __user
*optval
, int __user
*optlen
)
614 struct dccp_sock
*dp
;
617 if (get_user(len
, optlen
))
620 if (len
< (int)sizeof(int))
626 case DCCP_SOCKOPT_PACKET_SIZE
:
627 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
629 case DCCP_SOCKOPT_SERVICE
:
630 return dccp_getsockopt_service(sk
, len
,
631 (__be32 __user
*)optval
, optlen
);
632 case DCCP_SOCKOPT_GET_CUR_MPS
:
633 val
= dp
->dccps_mss_cache
;
635 case DCCP_SOCKOPT_AVAILABLE_CCIDS
:
636 return ccid_getsockopt_builtin_ccids(sk
, len
, optval
, optlen
);
637 case DCCP_SOCKOPT_TX_CCID
:
638 val
= ccid_get_current_tx_ccid(dp
);
642 case DCCP_SOCKOPT_RX_CCID
:
643 val
= ccid_get_current_rx_ccid(dp
);
647 case DCCP_SOCKOPT_SERVER_TIMEWAIT
:
648 val
= dp
->dccps_server_timewait
;
650 case DCCP_SOCKOPT_SEND_CSCOV
:
651 val
= dp
->dccps_pcslen
;
653 case DCCP_SOCKOPT_RECV_CSCOV
:
654 val
= dp
->dccps_pcrlen
;
656 case DCCP_SOCKOPT_QPOLICY_ID
:
657 val
= dp
->dccps_qpolicy
;
659 case DCCP_SOCKOPT_QPOLICY_TXQLEN
:
660 val
= dp
->dccps_tx_qlen
;
663 return ccid_hc_rx_getsockopt(dp
->dccps_hc_rx_ccid
, sk
, optname
,
664 len
, (u32 __user
*)optval
, optlen
);
666 return ccid_hc_tx_getsockopt(dp
->dccps_hc_tx_ccid
, sk
, optname
,
667 len
, (u32 __user
*)optval
, optlen
);
673 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
679 int dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
680 char __user
*optval
, int __user
*optlen
)
682 if (level
!= SOL_DCCP
)
683 return inet_csk(sk
)->icsk_af_ops
->getsockopt(sk
, level
,
686 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
689 EXPORT_SYMBOL_GPL(dccp_getsockopt
);
692 int compat_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
693 char __user
*optval
, int __user
*optlen
)
695 if (level
!= SOL_DCCP
)
696 return inet_csk_compat_getsockopt(sk
, level
, optname
,
698 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
701 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt
);
704 static int dccp_msghdr_parse(struct msghdr
*msg
, struct sk_buff
*skb
)
706 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msg
);
709 * Assign an (opaque) qpolicy priority value to skb->priority.
711 * We are overloading this skb field for use with the qpolicy subystem.
712 * The skb->priority is normally used for the SO_PRIORITY option, which
713 * is initialised from sk_priority. Since the assignment of sk_priority
714 * to skb->priority happens later (on layer 3), we overload this field
715 * for use with queueing priorities as long as the skb is on layer 4.
716 * The default priority value (if nothing is set) is 0.
720 for (; cmsg
!= NULL
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
722 if (!CMSG_OK(msg
, cmsg
))
725 if (cmsg
->cmsg_level
!= SOL_DCCP
)
728 if (cmsg
->cmsg_type
<= DCCP_SCM_QPOLICY_MAX
&&
729 !dccp_qpolicy_param_ok(skb
->sk
, cmsg
->cmsg_type
))
732 switch (cmsg
->cmsg_type
) {
733 case DCCP_SCM_PRIORITY
:
734 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(__u32
)))
736 skb
->priority
= *(__u32
*)CMSG_DATA(cmsg
);
745 int dccp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
748 const struct dccp_sock
*dp
= dccp_sk(sk
);
749 const int flags
= msg
->msg_flags
;
750 const int noblock
= flags
& MSG_DONTWAIT
;
755 if (len
> dp
->dccps_mss_cache
)
760 if (dccp_qpolicy_full(sk
)) {
765 timeo
= sock_sndtimeo(sk
, noblock
);
768 * We have to use sk_stream_wait_connect here to set sk_write_pending,
769 * so that the trick in dccp_rcv_request_sent_state_process.
771 /* Wait for a connection to finish. */
772 if ((1 << sk
->sk_state
) & ~(DCCPF_OPEN
| DCCPF_PARTOPEN
))
773 if ((rc
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
776 size
= sk
->sk_prot
->max_header
+ len
;
778 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
783 skb_reserve(skb
, sk
->sk_prot
->max_header
);
784 rc
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
788 rc
= dccp_msghdr_parse(msg
, skb
);
792 dccp_qpolicy_push(sk
, skb
);
794 * The xmit_timer is set if the TX CCID is rate-based and will expire
795 * when congestion control permits to release further packets into the
796 * network. Window-based CCIDs do not use this timer.
798 if (!timer_pending(&dp
->dccps_xmit_timer
))
808 EXPORT_SYMBOL_GPL(dccp_sendmsg
);
810 int dccp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
811 size_t len
, int nonblock
, int flags
, int *addr_len
)
813 const struct dccp_hdr
*dh
;
818 if (sk
->sk_state
== DCCP_LISTEN
) {
823 timeo
= sock_rcvtimeo(sk
, nonblock
);
826 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
829 goto verify_sock_status
;
833 switch (dh
->dccph_type
) {
835 case DCCP_PKT_DATAACK
:
839 case DCCP_PKT_CLOSEREQ
:
840 if (!(flags
& MSG_PEEK
))
841 dccp_finish_passive_close(sk
);
844 dccp_pr_debug("found fin (%s) ok!\n",
845 dccp_packet_name(dh
->dccph_type
));
849 dccp_pr_debug("packet_type=%s\n",
850 dccp_packet_name(dh
->dccph_type
));
851 sk_eat_skb(sk
, skb
, 0);
854 if (sock_flag(sk
, SOCK_DONE
)) {
860 len
= sock_error(sk
);
864 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
869 if (sk
->sk_state
== DCCP_CLOSED
) {
870 if (!sock_flag(sk
, SOCK_DONE
)) {
871 /* This occurs when user tries to read
872 * from never connected socket.
886 if (signal_pending(current
)) {
887 len
= sock_intr_errno(timeo
);
891 sk_wait_data(sk
, &timeo
);
896 else if (len
< skb
->len
)
897 msg
->msg_flags
|= MSG_TRUNC
;
899 if (skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, len
)) {
900 /* Exception. Bailout! */
904 if (flags
& MSG_TRUNC
)
907 if (!(flags
& MSG_PEEK
))
908 sk_eat_skb(sk
, skb
, 0);
916 EXPORT_SYMBOL_GPL(dccp_recvmsg
);
918 int inet_dccp_listen(struct socket
*sock
, int backlog
)
920 struct sock
*sk
= sock
->sk
;
921 unsigned char old_state
;
927 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_DCCP
)
930 old_state
= sk
->sk_state
;
931 if (!((1 << old_state
) & (DCCPF_CLOSED
| DCCPF_LISTEN
)))
934 /* Really, if the socket is already in listen state
935 * we can only allow the backlog to be adjusted.
937 if (old_state
!= DCCP_LISTEN
) {
939 * FIXME: here it probably should be sk->sk_prot->listen_start
940 * see tcp_listen_start
942 err
= dccp_listen_start(sk
, backlog
);
946 sk
->sk_max_ack_backlog
= backlog
;
954 EXPORT_SYMBOL_GPL(inet_dccp_listen
);
956 static void dccp_terminate_connection(struct sock
*sk
)
958 u8 next_state
= DCCP_CLOSED
;
960 switch (sk
->sk_state
) {
961 case DCCP_PASSIVE_CLOSE
:
962 case DCCP_PASSIVE_CLOSEREQ
:
963 dccp_finish_passive_close(sk
);
966 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk
);
967 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
970 dccp_send_close(sk
, 1);
972 if (dccp_sk(sk
)->dccps_role
== DCCP_ROLE_SERVER
&&
973 !dccp_sk(sk
)->dccps_server_timewait
)
974 next_state
= DCCP_ACTIVE_CLOSEREQ
;
976 next_state
= DCCP_CLOSING
;
979 dccp_set_state(sk
, next_state
);
983 void dccp_close(struct sock
*sk
, long timeout
)
985 struct dccp_sock
*dp
= dccp_sk(sk
);
987 u32 data_was_unread
= 0;
992 sk
->sk_shutdown
= SHUTDOWN_MASK
;
994 if (sk
->sk_state
== DCCP_LISTEN
) {
995 dccp_set_state(sk
, DCCP_CLOSED
);
998 inet_csk_listen_stop(sk
);
1000 goto adjudge_to_death
;
1003 sk_stop_timer(sk
, &dp
->dccps_xmit_timer
);
1006 * We need to flush the recv. buffs. We do this only on the
1007 * descriptor close, not protocol-sourced closes, because the
1008 *reader process may not have drained the data yet!
1010 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
1011 data_was_unread
+= skb
->len
;
1015 if (data_was_unread
) {
1016 /* Unread data was tossed, send an appropriate Reset Code */
1017 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread
);
1018 dccp_send_reset(sk
, DCCP_RESET_CODE_ABORTED
);
1019 dccp_set_state(sk
, DCCP_CLOSED
);
1020 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
1021 /* Check zero linger _after_ checking for unread data. */
1022 sk
->sk_prot
->disconnect(sk
, 0);
1023 } else if (sk
->sk_state
!= DCCP_CLOSED
) {
1025 * Normal connection termination. May need to wait if there are
1026 * still packets in the TX queue that are delayed by the CCID.
1028 dccp_flush_write_queue(sk
, &timeout
);
1029 dccp_terminate_connection(sk
);
1033 * Flush write queue. This may be necessary in several cases:
1034 * - we have been closed by the peer but still have application data;
1035 * - abortive termination (unread data or zero linger time),
1036 * - normal termination but queue could not be flushed within time limit
1038 __skb_queue_purge(&sk
->sk_write_queue
);
1040 sk_stream_wait_close(sk
, timeout
);
1043 state
= sk
->sk_state
;
1048 * It is the last release_sock in its life. It will remove backlog.
1052 * Now socket is owned by kernel and we acquire BH lock
1053 * to finish close. No need to check for user refs.
1057 WARN_ON(sock_owned_by_user(sk
));
1059 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
1061 /* Have we already been destroyed by a softirq or backlog? */
1062 if (state
!= DCCP_CLOSED
&& sk
->sk_state
== DCCP_CLOSED
)
1065 if (sk
->sk_state
== DCCP_CLOSED
)
1066 inet_csk_destroy_sock(sk
);
1068 /* Otherwise, socket is reprieved until protocol close. */
1076 EXPORT_SYMBOL_GPL(dccp_close
);
1078 void dccp_shutdown(struct sock
*sk
, int how
)
1080 dccp_pr_debug("called shutdown(%x)\n", how
);
1083 EXPORT_SYMBOL_GPL(dccp_shutdown
);
1085 static inline int dccp_mib_init(void)
1087 return snmp_mib_init((void __percpu
**)dccp_statistics
,
1088 sizeof(struct dccp_mib
),
1089 __alignof__(struct dccp_mib
));
1092 static inline void dccp_mib_exit(void)
1094 snmp_mib_free((void __percpu
**)dccp_statistics
);
1097 static int thash_entries
;
1098 module_param(thash_entries
, int, 0444);
1099 MODULE_PARM_DESC(thash_entries
, "Number of ehash buckets");
1101 #ifdef CONFIG_IP_DCCP_DEBUG
1103 module_param(dccp_debug
, bool, 0644);
1104 MODULE_PARM_DESC(dccp_debug
, "Enable debug messages");
1106 EXPORT_SYMBOL_GPL(dccp_debug
);
1109 static int __init
dccp_init(void)
1112 int ehash_order
, bhash_order
, i
;
1115 BUILD_BUG_ON(sizeof(struct dccp_skb_cb
) >
1116 FIELD_SIZEOF(struct sk_buff
, cb
));
1117 rc
= percpu_counter_init(&dccp_orphan_count
, 0);
1121 inet_hashinfo_init(&dccp_hashinfo
);
1122 dccp_hashinfo
.bind_bucket_cachep
=
1123 kmem_cache_create("dccp_bind_bucket",
1124 sizeof(struct inet_bind_bucket
), 0,
1125 SLAB_HWCACHE_ALIGN
, NULL
);
1126 if (!dccp_hashinfo
.bind_bucket_cachep
)
1127 goto out_free_percpu
;
1130 * Size and allocate the main established and bind bucket
1133 * The methodology is similar to that of the buffer cache.
1135 if (totalram_pages
>= (128 * 1024))
1136 goal
= totalram_pages
>> (21 - PAGE_SHIFT
);
1138 goal
= totalram_pages
>> (23 - PAGE_SHIFT
);
1141 goal
= (thash_entries
*
1142 sizeof(struct inet_ehash_bucket
)) >> PAGE_SHIFT
;
1143 for (ehash_order
= 0; (1UL << ehash_order
) < goal
; ehash_order
++)
1146 unsigned long hash_size
= (1UL << ehash_order
) * PAGE_SIZE
/
1147 sizeof(struct inet_ehash_bucket
);
1149 while (hash_size
& (hash_size
- 1))
1151 dccp_hashinfo
.ehash_mask
= hash_size
- 1;
1152 dccp_hashinfo
.ehash
= (struct inet_ehash_bucket
*)
1153 __get_free_pages(GFP_ATOMIC
|__GFP_NOWARN
, ehash_order
);
1154 } while (!dccp_hashinfo
.ehash
&& --ehash_order
> 0);
1156 if (!dccp_hashinfo
.ehash
) {
1157 DCCP_CRIT("Failed to allocate DCCP established hash table");
1158 goto out_free_bind_bucket_cachep
;
1161 for (i
= 0; i
<= dccp_hashinfo
.ehash_mask
; i
++) {
1162 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo
.ehash
[i
].chain
, i
);
1163 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo
.ehash
[i
].twchain
, i
);
1166 if (inet_ehash_locks_alloc(&dccp_hashinfo
))
1167 goto out_free_dccp_ehash
;
1169 bhash_order
= ehash_order
;
1172 dccp_hashinfo
.bhash_size
= (1UL << bhash_order
) * PAGE_SIZE
/
1173 sizeof(struct inet_bind_hashbucket
);
1174 if ((dccp_hashinfo
.bhash_size
> (64 * 1024)) &&
1177 dccp_hashinfo
.bhash
= (struct inet_bind_hashbucket
*)
1178 __get_free_pages(GFP_ATOMIC
|__GFP_NOWARN
, bhash_order
);
1179 } while (!dccp_hashinfo
.bhash
&& --bhash_order
>= 0);
1181 if (!dccp_hashinfo
.bhash
) {
1182 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1183 goto out_free_dccp_locks
;
1186 for (i
= 0; i
< dccp_hashinfo
.bhash_size
; i
++) {
1187 spin_lock_init(&dccp_hashinfo
.bhash
[i
].lock
);
1188 INIT_HLIST_HEAD(&dccp_hashinfo
.bhash
[i
].chain
);
1191 rc
= dccp_mib_init();
1193 goto out_free_dccp_bhash
;
1195 rc
= dccp_ackvec_init();
1197 goto out_free_dccp_mib
;
1199 rc
= dccp_sysctl_init();
1201 goto out_ackvec_exit
;
1203 rc
= ccid_initialize_builtins();
1205 goto out_sysctl_exit
;
1207 dccp_timestamping_init();
1217 out_free_dccp_bhash
:
1218 free_pages((unsigned long)dccp_hashinfo
.bhash
, bhash_order
);
1219 out_free_dccp_locks
:
1220 inet_ehash_locks_free(&dccp_hashinfo
);
1221 out_free_dccp_ehash
:
1222 free_pages((unsigned long)dccp_hashinfo
.ehash
, ehash_order
);
1223 out_free_bind_bucket_cachep
:
1224 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1226 percpu_counter_destroy(&dccp_orphan_count
);
1228 dccp_hashinfo
.bhash
= NULL
;
1229 dccp_hashinfo
.ehash
= NULL
;
1230 dccp_hashinfo
.bind_bucket_cachep
= NULL
;
1234 static void __exit
dccp_fini(void)
1236 ccid_cleanup_builtins();
1238 free_pages((unsigned long)dccp_hashinfo
.bhash
,
1239 get_order(dccp_hashinfo
.bhash_size
*
1240 sizeof(struct inet_bind_hashbucket
)));
1241 free_pages((unsigned long)dccp_hashinfo
.ehash
,
1242 get_order((dccp_hashinfo
.ehash_mask
+ 1) *
1243 sizeof(struct inet_ehash_bucket
)));
1244 inet_ehash_locks_free(&dccp_hashinfo
);
1245 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1248 percpu_counter_destroy(&dccp_orphan_count
);
1251 module_init(dccp_init
);
1252 module_exit(dccp_fini
);
1254 MODULE_LICENSE("GPL");
1255 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1256 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");