4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <linux/slab.h>
24 #include <net/checksum.h>
26 #include <net/inet_sock.h>
30 #include <asm/ioctls.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
40 DEFINE_SNMP_STAT(struct dccp_mib
, dccp_statistics
) __read_mostly
;
42 EXPORT_SYMBOL_GPL(dccp_statistics
);
44 struct percpu_counter dccp_orphan_count
;
45 EXPORT_SYMBOL_GPL(dccp_orphan_count
);
47 struct inet_hashinfo dccp_hashinfo
;
48 EXPORT_SYMBOL_GPL(dccp_hashinfo
);
50 /* the maximum queue length for tx in packets. 0 is no limit */
51 int sysctl_dccp_tx_qlen __read_mostly
= 5;
53 #ifdef CONFIG_IP_DCCP_DEBUG
54 static const char *dccp_state_name(const int state
)
56 static const char *const dccp_state_names
[] = {
58 [DCCP_REQUESTING
] = "REQUESTING",
59 [DCCP_PARTOPEN
] = "PARTOPEN",
60 [DCCP_LISTEN
] = "LISTEN",
61 [DCCP_RESPOND
] = "RESPOND",
62 [DCCP_CLOSING
] = "CLOSING",
63 [DCCP_ACTIVE_CLOSEREQ
] = "CLOSEREQ",
64 [DCCP_PASSIVE_CLOSE
] = "PASSIVE_CLOSE",
65 [DCCP_PASSIVE_CLOSEREQ
] = "PASSIVE_CLOSEREQ",
66 [DCCP_TIME_WAIT
] = "TIME_WAIT",
67 [DCCP_CLOSED
] = "CLOSED",
70 if (state
>= DCCP_MAX_STATES
)
71 return "INVALID STATE!";
73 return dccp_state_names
[state
];
77 void dccp_set_state(struct sock
*sk
, const int state
)
79 const int oldstate
= sk
->sk_state
;
81 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk
), sk
,
82 dccp_state_name(oldstate
), dccp_state_name(state
));
83 WARN_ON(state
== oldstate
);
87 if (oldstate
!= DCCP_OPEN
)
88 DCCP_INC_STATS(DCCP_MIB_CURRESTAB
);
89 /* Client retransmits all Confirm options until entering OPEN */
90 if (oldstate
== DCCP_PARTOPEN
)
91 dccp_feat_list_purge(&dccp_sk(sk
)->dccps_featneg
);
95 if (oldstate
== DCCP_OPEN
|| oldstate
== DCCP_ACTIVE_CLOSEREQ
||
96 oldstate
== DCCP_CLOSING
)
97 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS
);
99 sk
->sk_prot
->unhash(sk
);
100 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
&&
101 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
105 if (oldstate
== DCCP_OPEN
)
106 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB
);
109 /* Change state AFTER socket is unhashed to avoid closed
110 * socket sitting in hash tables.
112 sk
->sk_state
= state
;
115 EXPORT_SYMBOL_GPL(dccp_set_state
);
117 static void dccp_finish_passive_close(struct sock
*sk
)
119 switch (sk
->sk_state
) {
120 case DCCP_PASSIVE_CLOSE
:
121 /* Node (client or server) has received Close packet. */
122 dccp_send_reset(sk
, DCCP_RESET_CODE_CLOSED
);
123 dccp_set_state(sk
, DCCP_CLOSED
);
125 case DCCP_PASSIVE_CLOSEREQ
:
127 * Client received CloseReq. We set the `active' flag so that
128 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 dccp_send_close(sk
, 1);
131 dccp_set_state(sk
, DCCP_CLOSING
);
135 void dccp_done(struct sock
*sk
)
137 dccp_set_state(sk
, DCCP_CLOSED
);
138 dccp_clear_xmit_timers(sk
);
140 sk
->sk_shutdown
= SHUTDOWN_MASK
;
142 if (!sock_flag(sk
, SOCK_DEAD
))
143 sk
->sk_state_change(sk
);
145 inet_csk_destroy_sock(sk
);
148 EXPORT_SYMBOL_GPL(dccp_done
);
150 const char *dccp_packet_name(const int type
)
152 static const char *const dccp_packet_names
[] = {
153 [DCCP_PKT_REQUEST
] = "REQUEST",
154 [DCCP_PKT_RESPONSE
] = "RESPONSE",
155 [DCCP_PKT_DATA
] = "DATA",
156 [DCCP_PKT_ACK
] = "ACK",
157 [DCCP_PKT_DATAACK
] = "DATAACK",
158 [DCCP_PKT_CLOSEREQ
] = "CLOSEREQ",
159 [DCCP_PKT_CLOSE
] = "CLOSE",
160 [DCCP_PKT_RESET
] = "RESET",
161 [DCCP_PKT_SYNC
] = "SYNC",
162 [DCCP_PKT_SYNCACK
] = "SYNCACK",
165 if (type
>= DCCP_NR_PKT_TYPES
)
168 return dccp_packet_names
[type
];
171 EXPORT_SYMBOL_GPL(dccp_packet_name
);
173 int dccp_init_sock(struct sock
*sk
, const __u8 ctl_sock_initialized
)
175 struct dccp_sock
*dp
= dccp_sk(sk
);
176 struct inet_connection_sock
*icsk
= inet_csk(sk
);
178 icsk
->icsk_rto
= DCCP_TIMEOUT_INIT
;
179 icsk
->icsk_syn_retries
= sysctl_dccp_request_retries
;
180 sk
->sk_state
= DCCP_CLOSED
;
181 sk
->sk_write_space
= dccp_write_space
;
182 icsk
->icsk_sync_mss
= dccp_sync_mss
;
183 dp
->dccps_mss_cache
= 536;
184 dp
->dccps_rate_last
= jiffies
;
185 dp
->dccps_role
= DCCP_ROLE_UNDEFINED
;
186 dp
->dccps_service
= DCCP_SERVICE_CODE_IS_ABSENT
;
187 dp
->dccps_l_ack_ratio
= dp
->dccps_r_ack_ratio
= 1;
188 dp
->dccps_tx_qlen
= sysctl_dccp_tx_qlen
;
190 dccp_init_xmit_timers(sk
);
192 INIT_LIST_HEAD(&dp
->dccps_featneg
);
193 /* control socket doesn't need feat nego */
194 if (likely(ctl_sock_initialized
))
195 return dccp_feat_init(sk
);
199 EXPORT_SYMBOL_GPL(dccp_init_sock
);
201 void dccp_destroy_sock(struct sock
*sk
)
203 struct dccp_sock
*dp
= dccp_sk(sk
);
206 * DCCP doesn't use sk_write_queue, just sk_send_head
207 * for retransmissions
209 if (sk
->sk_send_head
!= NULL
) {
210 kfree_skb(sk
->sk_send_head
);
211 sk
->sk_send_head
= NULL
;
214 /* Clean up a referenced DCCP bind bucket. */
215 if (inet_csk(sk
)->icsk_bind_hash
!= NULL
)
218 kfree(dp
->dccps_service_list
);
219 dp
->dccps_service_list
= NULL
;
221 if (dp
->dccps_hc_rx_ackvec
!= NULL
) {
222 dccp_ackvec_free(dp
->dccps_hc_rx_ackvec
);
223 dp
->dccps_hc_rx_ackvec
= NULL
;
225 ccid_hc_rx_delete(dp
->dccps_hc_rx_ccid
, sk
);
226 ccid_hc_tx_delete(dp
->dccps_hc_tx_ccid
, sk
);
227 dp
->dccps_hc_rx_ccid
= dp
->dccps_hc_tx_ccid
= NULL
;
229 /* clean up feature negotiation state */
230 dccp_feat_list_purge(&dp
->dccps_featneg
);
233 EXPORT_SYMBOL_GPL(dccp_destroy_sock
);
235 static inline int dccp_listen_start(struct sock
*sk
, int backlog
)
237 struct dccp_sock
*dp
= dccp_sk(sk
);
239 dp
->dccps_role
= DCCP_ROLE_LISTEN
;
240 /* do not start to listen if feature negotiation setup fails */
241 if (dccp_feat_finalise_settings(dp
))
243 return inet_csk_listen_start(sk
, backlog
);
246 static inline int dccp_need_reset(int state
)
248 return state
!= DCCP_CLOSED
&& state
!= DCCP_LISTEN
&&
249 state
!= DCCP_REQUESTING
;
252 int dccp_disconnect(struct sock
*sk
, int flags
)
254 struct inet_connection_sock
*icsk
= inet_csk(sk
);
255 struct inet_sock
*inet
= inet_sk(sk
);
257 const int old_state
= sk
->sk_state
;
259 if (old_state
!= DCCP_CLOSED
)
260 dccp_set_state(sk
, DCCP_CLOSED
);
263 * This corresponds to the ABORT function of RFC793, sec. 3.8
264 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
266 if (old_state
== DCCP_LISTEN
) {
267 inet_csk_listen_stop(sk
);
268 } else if (dccp_need_reset(old_state
)) {
269 dccp_send_reset(sk
, DCCP_RESET_CODE_ABORTED
);
270 sk
->sk_err
= ECONNRESET
;
271 } else if (old_state
== DCCP_REQUESTING
)
272 sk
->sk_err
= ECONNRESET
;
274 dccp_clear_xmit_timers(sk
);
276 __skb_queue_purge(&sk
->sk_receive_queue
);
277 __skb_queue_purge(&sk
->sk_write_queue
);
278 if (sk
->sk_send_head
!= NULL
) {
279 __kfree_skb(sk
->sk_send_head
);
280 sk
->sk_send_head
= NULL
;
283 inet
->inet_dport
= 0;
285 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
286 inet_reset_saddr(sk
);
289 sock_reset_flag(sk
, SOCK_DONE
);
291 icsk
->icsk_backoff
= 0;
292 inet_csk_delack_init(sk
);
295 WARN_ON(inet
->inet_num
&& !icsk
->icsk_bind_hash
);
297 sk
->sk_error_report(sk
);
301 EXPORT_SYMBOL_GPL(dccp_disconnect
);
304 * Wait for a DCCP event.
306 * Note that we don't need to lock the socket, as the upper poll layers
307 * take care of normal races (between the test and the event) and we don't
308 * go look at any of the socket buffers directly.
310 unsigned int dccp_poll(struct file
*file
, struct socket
*sock
,
314 struct sock
*sk
= sock
->sk
;
316 sock_poll_wait(file
, sk_sleep(sk
), wait
);
317 if (sk
->sk_state
== DCCP_LISTEN
)
318 return inet_csk_listen_poll(sk
);
320 /* Socket is not locked. We are protected from async events
321 by poll logic and correct handling of state changes
322 made by another threads is impossible in any case.
329 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== DCCP_CLOSED
)
331 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
332 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
335 if ((1 << sk
->sk_state
) & ~(DCCPF_REQUESTING
| DCCPF_RESPOND
)) {
336 if (atomic_read(&sk
->sk_rmem_alloc
) > 0)
337 mask
|= POLLIN
| POLLRDNORM
;
339 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
340 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
341 mask
|= POLLOUT
| POLLWRNORM
;
342 } else { /* send SIGIO later */
343 set_bit(SOCK_ASYNC_NOSPACE
,
344 &sk
->sk_socket
->flags
);
345 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
347 /* Race breaker. If space is freed after
348 * wspace test but before the flags are set,
349 * IO signal will be lost.
351 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
352 mask
|= POLLOUT
| POLLWRNORM
;
359 EXPORT_SYMBOL_GPL(dccp_poll
);
361 int dccp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
367 if (sk
->sk_state
== DCCP_LISTEN
)
373 unsigned long amount
= 0;
375 skb
= skb_peek(&sk
->sk_receive_queue
);
378 * We will only return the amount of this packet since
379 * that is all that will be read.
383 rc
= put_user(amount
, (int __user
*)arg
);
395 EXPORT_SYMBOL_GPL(dccp_ioctl
);
397 static int dccp_setsockopt_service(struct sock
*sk
, const __be32 service
,
398 char __user
*optval
, unsigned int optlen
)
400 struct dccp_sock
*dp
= dccp_sk(sk
);
401 struct dccp_service_list
*sl
= NULL
;
403 if (service
== DCCP_SERVICE_INVALID_VALUE
||
404 optlen
> DCCP_SERVICE_LIST_MAX_LEN
* sizeof(u32
))
407 if (optlen
> sizeof(service
)) {
408 sl
= kmalloc(optlen
, GFP_KERNEL
);
412 sl
->dccpsl_nr
= optlen
/ sizeof(u32
) - 1;
413 if (copy_from_user(sl
->dccpsl_list
,
414 optval
+ sizeof(service
),
415 optlen
- sizeof(service
)) ||
416 dccp_list_has_service(sl
, DCCP_SERVICE_INVALID_VALUE
)) {
423 dp
->dccps_service
= service
;
425 kfree(dp
->dccps_service_list
);
427 dp
->dccps_service_list
= sl
;
432 static int dccp_setsockopt_cscov(struct sock
*sk
, int cscov
, bool rx
)
437 if (cscov
< 0 || cscov
> 15)
440 * Populate a list of permissible values, in the range cscov...15. This
441 * is necessary since feature negotiation of single values only works if
442 * both sides incidentally choose the same value. Since the list starts
443 * lowest-value first, negotiation will pick the smallest shared value.
449 list
= kmalloc(len
, GFP_KERNEL
);
453 for (i
= 0; i
< len
; i
++)
456 rc
= dccp_feat_register_sp(sk
, DCCPF_MIN_CSUM_COVER
, rx
, list
, len
);
460 dccp_sk(sk
)->dccps_pcrlen
= cscov
;
462 dccp_sk(sk
)->dccps_pcslen
= cscov
;
468 static int dccp_setsockopt_ccid(struct sock
*sk
, int type
,
469 char __user
*optval
, unsigned int optlen
)
474 if (optlen
< 1 || optlen
> DCCP_FEAT_MAX_SP_VALS
)
477 val
= memdup_user(optval
, optlen
);
482 if (type
== DCCP_SOCKOPT_TX_CCID
|| type
== DCCP_SOCKOPT_CCID
)
483 rc
= dccp_feat_register_sp(sk
, DCCPF_CCID
, 1, val
, optlen
);
485 if (!rc
&& (type
== DCCP_SOCKOPT_RX_CCID
|| type
== DCCP_SOCKOPT_CCID
))
486 rc
= dccp_feat_register_sp(sk
, DCCPF_CCID
, 0, val
, optlen
);
493 static int do_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
494 char __user
*optval
, unsigned int optlen
)
496 struct dccp_sock
*dp
= dccp_sk(sk
);
500 case DCCP_SOCKOPT_PACKET_SIZE
:
501 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
503 case DCCP_SOCKOPT_CHANGE_L
:
504 case DCCP_SOCKOPT_CHANGE_R
:
505 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
507 case DCCP_SOCKOPT_CCID
:
508 case DCCP_SOCKOPT_RX_CCID
:
509 case DCCP_SOCKOPT_TX_CCID
:
510 return dccp_setsockopt_ccid(sk
, optname
, optval
, optlen
);
513 if (optlen
< (int)sizeof(int))
516 if (get_user(val
, (int __user
*)optval
))
519 if (optname
== DCCP_SOCKOPT_SERVICE
)
520 return dccp_setsockopt_service(sk
, val
, optval
, optlen
);
524 case DCCP_SOCKOPT_SERVER_TIMEWAIT
:
525 if (dp
->dccps_role
!= DCCP_ROLE_SERVER
)
528 dp
->dccps_server_timewait
= (val
!= 0);
530 case DCCP_SOCKOPT_SEND_CSCOV
:
531 err
= dccp_setsockopt_cscov(sk
, val
, false);
533 case DCCP_SOCKOPT_RECV_CSCOV
:
534 err
= dccp_setsockopt_cscov(sk
, val
, true);
536 case DCCP_SOCKOPT_QPOLICY_ID
:
537 if (sk
->sk_state
!= DCCP_CLOSED
)
539 else if (val
< 0 || val
>= DCCPQ_POLICY_MAX
)
542 dp
->dccps_qpolicy
= val
;
544 case DCCP_SOCKOPT_QPOLICY_TXQLEN
:
548 dp
->dccps_tx_qlen
= val
;
559 int dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
560 char __user
*optval
, unsigned int optlen
)
562 if (level
!= SOL_DCCP
)
563 return inet_csk(sk
)->icsk_af_ops
->setsockopt(sk
, level
,
566 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
569 EXPORT_SYMBOL_GPL(dccp_setsockopt
);
572 int compat_dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
573 char __user
*optval
, unsigned int optlen
)
575 if (level
!= SOL_DCCP
)
576 return inet_csk_compat_setsockopt(sk
, level
, optname
,
578 return do_dccp_setsockopt(sk
, level
, optname
, optval
, optlen
);
581 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt
);
584 static int dccp_getsockopt_service(struct sock
*sk
, int len
,
585 __be32 __user
*optval
,
588 const struct dccp_sock
*dp
= dccp_sk(sk
);
589 const struct dccp_service_list
*sl
;
590 int err
= -ENOENT
, slen
= 0, total_len
= sizeof(u32
);
593 if ((sl
= dp
->dccps_service_list
) != NULL
) {
594 slen
= sl
->dccpsl_nr
* sizeof(u32
);
603 if (put_user(total_len
, optlen
) ||
604 put_user(dp
->dccps_service
, optval
) ||
605 (sl
!= NULL
&& copy_to_user(optval
+ 1, sl
->dccpsl_list
, slen
)))
612 static int do_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
613 char __user
*optval
, int __user
*optlen
)
615 struct dccp_sock
*dp
;
618 if (get_user(len
, optlen
))
621 if (len
< (int)sizeof(int))
627 case DCCP_SOCKOPT_PACKET_SIZE
:
628 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
630 case DCCP_SOCKOPT_SERVICE
:
631 return dccp_getsockopt_service(sk
, len
,
632 (__be32 __user
*)optval
, optlen
);
633 case DCCP_SOCKOPT_GET_CUR_MPS
:
634 val
= dp
->dccps_mss_cache
;
636 case DCCP_SOCKOPT_AVAILABLE_CCIDS
:
637 return ccid_getsockopt_builtin_ccids(sk
, len
, optval
, optlen
);
638 case DCCP_SOCKOPT_TX_CCID
:
639 val
= ccid_get_current_tx_ccid(dp
);
643 case DCCP_SOCKOPT_RX_CCID
:
644 val
= ccid_get_current_rx_ccid(dp
);
648 case DCCP_SOCKOPT_SERVER_TIMEWAIT
:
649 val
= dp
->dccps_server_timewait
;
651 case DCCP_SOCKOPT_SEND_CSCOV
:
652 val
= dp
->dccps_pcslen
;
654 case DCCP_SOCKOPT_RECV_CSCOV
:
655 val
= dp
->dccps_pcrlen
;
657 case DCCP_SOCKOPT_QPOLICY_ID
:
658 val
= dp
->dccps_qpolicy
;
660 case DCCP_SOCKOPT_QPOLICY_TXQLEN
:
661 val
= dp
->dccps_tx_qlen
;
664 return ccid_hc_rx_getsockopt(dp
->dccps_hc_rx_ccid
, sk
, optname
,
665 len
, (u32 __user
*)optval
, optlen
);
667 return ccid_hc_tx_getsockopt(dp
->dccps_hc_tx_ccid
, sk
, optname
,
668 len
, (u32 __user
*)optval
, optlen
);
674 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
680 int dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
681 char __user
*optval
, int __user
*optlen
)
683 if (level
!= SOL_DCCP
)
684 return inet_csk(sk
)->icsk_af_ops
->getsockopt(sk
, level
,
687 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
690 EXPORT_SYMBOL_GPL(dccp_getsockopt
);
693 int compat_dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
694 char __user
*optval
, int __user
*optlen
)
696 if (level
!= SOL_DCCP
)
697 return inet_csk_compat_getsockopt(sk
, level
, optname
,
699 return do_dccp_getsockopt(sk
, level
, optname
, optval
, optlen
);
702 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt
);
705 static int dccp_msghdr_parse(struct msghdr
*msg
, struct sk_buff
*skb
)
707 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msg
);
710 * Assign an (opaque) qpolicy priority value to skb->priority.
712 * We are overloading this skb field for use with the qpolicy subystem.
713 * The skb->priority is normally used for the SO_PRIORITY option, which
714 * is initialised from sk_priority. Since the assignment of sk_priority
715 * to skb->priority happens later (on layer 3), we overload this field
716 * for use with queueing priorities as long as the skb is on layer 4.
717 * The default priority value (if nothing is set) is 0.
721 for (; cmsg
!= NULL
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
723 if (!CMSG_OK(msg
, cmsg
))
726 if (cmsg
->cmsg_level
!= SOL_DCCP
)
729 if (cmsg
->cmsg_type
<= DCCP_SCM_QPOLICY_MAX
&&
730 !dccp_qpolicy_param_ok(skb
->sk
, cmsg
->cmsg_type
))
733 switch (cmsg
->cmsg_type
) {
734 case DCCP_SCM_PRIORITY
:
735 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(__u32
)))
737 skb
->priority
= *(__u32
*)CMSG_DATA(cmsg
);
746 int dccp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
749 const struct dccp_sock
*dp
= dccp_sk(sk
);
750 const int flags
= msg
->msg_flags
;
751 const int noblock
= flags
& MSG_DONTWAIT
;
756 if (len
> dp
->dccps_mss_cache
)
761 if (dccp_qpolicy_full(sk
)) {
766 timeo
= sock_sndtimeo(sk
, noblock
);
769 * We have to use sk_stream_wait_connect here to set sk_write_pending,
770 * so that the trick in dccp_rcv_request_sent_state_process.
772 /* Wait for a connection to finish. */
773 if ((1 << sk
->sk_state
) & ~(DCCPF_OPEN
| DCCPF_PARTOPEN
))
774 if ((rc
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
777 size
= sk
->sk_prot
->max_header
+ len
;
779 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
784 skb_reserve(skb
, sk
->sk_prot
->max_header
);
785 rc
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
789 rc
= dccp_msghdr_parse(msg
, skb
);
793 dccp_qpolicy_push(sk
, skb
);
795 * The xmit_timer is set if the TX CCID is rate-based and will expire
796 * when congestion control permits to release further packets into the
797 * network. Window-based CCIDs do not use this timer.
799 if (!timer_pending(&dp
->dccps_xmit_timer
))
809 EXPORT_SYMBOL_GPL(dccp_sendmsg
);
811 int dccp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
812 size_t len
, int nonblock
, int flags
, int *addr_len
)
814 const struct dccp_hdr
*dh
;
819 if (sk
->sk_state
== DCCP_LISTEN
) {
824 timeo
= sock_rcvtimeo(sk
, nonblock
);
827 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
830 goto verify_sock_status
;
834 switch (dh
->dccph_type
) {
836 case DCCP_PKT_DATAACK
:
840 case DCCP_PKT_CLOSEREQ
:
841 if (!(flags
& MSG_PEEK
))
842 dccp_finish_passive_close(sk
);
845 dccp_pr_debug("found fin (%s) ok!\n",
846 dccp_packet_name(dh
->dccph_type
));
850 dccp_pr_debug("packet_type=%s\n",
851 dccp_packet_name(dh
->dccph_type
));
852 sk_eat_skb(sk
, skb
, 0);
855 if (sock_flag(sk
, SOCK_DONE
)) {
861 len
= sock_error(sk
);
865 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
870 if (sk
->sk_state
== DCCP_CLOSED
) {
871 if (!sock_flag(sk
, SOCK_DONE
)) {
872 /* This occurs when user tries to read
873 * from never connected socket.
887 if (signal_pending(current
)) {
888 len
= sock_intr_errno(timeo
);
892 sk_wait_data(sk
, &timeo
);
897 else if (len
< skb
->len
)
898 msg
->msg_flags
|= MSG_TRUNC
;
900 if (skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, len
)) {
901 /* Exception. Bailout! */
905 if (flags
& MSG_TRUNC
)
908 if (!(flags
& MSG_PEEK
))
909 sk_eat_skb(sk
, skb
, 0);
917 EXPORT_SYMBOL_GPL(dccp_recvmsg
);
919 int inet_dccp_listen(struct socket
*sock
, int backlog
)
921 struct sock
*sk
= sock
->sk
;
922 unsigned char old_state
;
928 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_DCCP
)
931 old_state
= sk
->sk_state
;
932 if (!((1 << old_state
) & (DCCPF_CLOSED
| DCCPF_LISTEN
)))
935 /* Really, if the socket is already in listen state
936 * we can only allow the backlog to be adjusted.
938 if (old_state
!= DCCP_LISTEN
) {
940 * FIXME: here it probably should be sk->sk_prot->listen_start
941 * see tcp_listen_start
943 err
= dccp_listen_start(sk
, backlog
);
947 sk
->sk_max_ack_backlog
= backlog
;
955 EXPORT_SYMBOL_GPL(inet_dccp_listen
);
957 static void dccp_terminate_connection(struct sock
*sk
)
959 u8 next_state
= DCCP_CLOSED
;
961 switch (sk
->sk_state
) {
962 case DCCP_PASSIVE_CLOSE
:
963 case DCCP_PASSIVE_CLOSEREQ
:
964 dccp_finish_passive_close(sk
);
967 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk
);
968 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
971 dccp_send_close(sk
, 1);
973 if (dccp_sk(sk
)->dccps_role
== DCCP_ROLE_SERVER
&&
974 !dccp_sk(sk
)->dccps_server_timewait
)
975 next_state
= DCCP_ACTIVE_CLOSEREQ
;
977 next_state
= DCCP_CLOSING
;
980 dccp_set_state(sk
, next_state
);
984 void dccp_close(struct sock
*sk
, long timeout
)
986 struct dccp_sock
*dp
= dccp_sk(sk
);
988 u32 data_was_unread
= 0;
993 sk
->sk_shutdown
= SHUTDOWN_MASK
;
995 if (sk
->sk_state
== DCCP_LISTEN
) {
996 dccp_set_state(sk
, DCCP_CLOSED
);
999 inet_csk_listen_stop(sk
);
1001 goto adjudge_to_death
;
1004 sk_stop_timer(sk
, &dp
->dccps_xmit_timer
);
1007 * We need to flush the recv. buffs. We do this only on the
1008 * descriptor close, not protocol-sourced closes, because the
1009 *reader process may not have drained the data yet!
1011 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
1012 data_was_unread
+= skb
->len
;
1016 if (data_was_unread
) {
1017 /* Unread data was tossed, send an appropriate Reset Code */
1018 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread
);
1019 dccp_send_reset(sk
, DCCP_RESET_CODE_ABORTED
);
1020 dccp_set_state(sk
, DCCP_CLOSED
);
1021 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
1022 /* Check zero linger _after_ checking for unread data. */
1023 sk
->sk_prot
->disconnect(sk
, 0);
1024 } else if (sk
->sk_state
!= DCCP_CLOSED
) {
1026 * Normal connection termination. May need to wait if there are
1027 * still packets in the TX queue that are delayed by the CCID.
1029 dccp_flush_write_queue(sk
, &timeout
);
1030 dccp_terminate_connection(sk
);
1034 * Flush write queue. This may be necessary in several cases:
1035 * - we have been closed by the peer but still have application data;
1036 * - abortive termination (unread data or zero linger time),
1037 * - normal termination but queue could not be flushed within time limit
1039 __skb_queue_purge(&sk
->sk_write_queue
);
1041 sk_stream_wait_close(sk
, timeout
);
1044 state
= sk
->sk_state
;
1049 * It is the last release_sock in its life. It will remove backlog.
1053 * Now socket is owned by kernel and we acquire BH lock
1054 * to finish close. No need to check for user refs.
1058 WARN_ON(sock_owned_by_user(sk
));
1060 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
1062 /* Have we already been destroyed by a softirq or backlog? */
1063 if (state
!= DCCP_CLOSED
&& sk
->sk_state
== DCCP_CLOSED
)
1066 if (sk
->sk_state
== DCCP_CLOSED
)
1067 inet_csk_destroy_sock(sk
);
1069 /* Otherwise, socket is reprieved until protocol close. */
1077 EXPORT_SYMBOL_GPL(dccp_close
);
1079 void dccp_shutdown(struct sock
*sk
, int how
)
1081 dccp_pr_debug("called shutdown(%x)\n", how
);
1084 EXPORT_SYMBOL_GPL(dccp_shutdown
);
1086 static inline int dccp_mib_init(void)
1088 return snmp_mib_init((void __percpu
**)dccp_statistics
,
1089 sizeof(struct dccp_mib
),
1090 __alignof__(struct dccp_mib
));
1093 static inline void dccp_mib_exit(void)
1095 snmp_mib_free((void __percpu
**)dccp_statistics
);
1098 static int thash_entries
;
1099 module_param(thash_entries
, int, 0444);
1100 MODULE_PARM_DESC(thash_entries
, "Number of ehash buckets");
1102 #ifdef CONFIG_IP_DCCP_DEBUG
1104 module_param(dccp_debug
, bool, 0644);
1105 MODULE_PARM_DESC(dccp_debug
, "Enable debug messages");
1107 EXPORT_SYMBOL_GPL(dccp_debug
);
1110 static int __init
dccp_init(void)
1113 int ehash_order
, bhash_order
, i
;
1116 BUILD_BUG_ON(sizeof(struct dccp_skb_cb
) >
1117 FIELD_SIZEOF(struct sk_buff
, cb
));
1118 rc
= percpu_counter_init(&dccp_orphan_count
, 0);
1122 inet_hashinfo_init(&dccp_hashinfo
);
1123 dccp_hashinfo
.bind_bucket_cachep
=
1124 kmem_cache_create("dccp_bind_bucket",
1125 sizeof(struct inet_bind_bucket
), 0,
1126 SLAB_HWCACHE_ALIGN
, NULL
);
1127 if (!dccp_hashinfo
.bind_bucket_cachep
)
1128 goto out_free_percpu
;
1131 * Size and allocate the main established and bind bucket
1134 * The methodology is similar to that of the buffer cache.
1136 if (totalram_pages
>= (128 * 1024))
1137 goal
= totalram_pages
>> (21 - PAGE_SHIFT
);
1139 goal
= totalram_pages
>> (23 - PAGE_SHIFT
);
1142 goal
= (thash_entries
*
1143 sizeof(struct inet_ehash_bucket
)) >> PAGE_SHIFT
;
1144 for (ehash_order
= 0; (1UL << ehash_order
) < goal
; ehash_order
++)
1147 unsigned long hash_size
= (1UL << ehash_order
) * PAGE_SIZE
/
1148 sizeof(struct inet_ehash_bucket
);
1150 while (hash_size
& (hash_size
- 1))
1152 dccp_hashinfo
.ehash_mask
= hash_size
- 1;
1153 dccp_hashinfo
.ehash
= (struct inet_ehash_bucket
*)
1154 __get_free_pages(GFP_ATOMIC
|__GFP_NOWARN
, ehash_order
);
1155 } while (!dccp_hashinfo
.ehash
&& --ehash_order
> 0);
1157 if (!dccp_hashinfo
.ehash
) {
1158 DCCP_CRIT("Failed to allocate DCCP established hash table");
1159 goto out_free_bind_bucket_cachep
;
1162 for (i
= 0; i
<= dccp_hashinfo
.ehash_mask
; i
++) {
1163 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo
.ehash
[i
].chain
, i
);
1164 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo
.ehash
[i
].twchain
, i
);
1167 if (inet_ehash_locks_alloc(&dccp_hashinfo
))
1168 goto out_free_dccp_ehash
;
1170 bhash_order
= ehash_order
;
1173 dccp_hashinfo
.bhash_size
= (1UL << bhash_order
) * PAGE_SIZE
/
1174 sizeof(struct inet_bind_hashbucket
);
1175 if ((dccp_hashinfo
.bhash_size
> (64 * 1024)) &&
1178 dccp_hashinfo
.bhash
= (struct inet_bind_hashbucket
*)
1179 __get_free_pages(GFP_ATOMIC
|__GFP_NOWARN
, bhash_order
);
1180 } while (!dccp_hashinfo
.bhash
&& --bhash_order
>= 0);
1182 if (!dccp_hashinfo
.bhash
) {
1183 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1184 goto out_free_dccp_locks
;
1187 for (i
= 0; i
< dccp_hashinfo
.bhash_size
; i
++) {
1188 spin_lock_init(&dccp_hashinfo
.bhash
[i
].lock
);
1189 INIT_HLIST_HEAD(&dccp_hashinfo
.bhash
[i
].chain
);
1192 rc
= dccp_mib_init();
1194 goto out_free_dccp_bhash
;
1196 rc
= dccp_ackvec_init();
1198 goto out_free_dccp_mib
;
1200 rc
= dccp_sysctl_init();
1202 goto out_ackvec_exit
;
1204 rc
= ccid_initialize_builtins();
1206 goto out_sysctl_exit
;
1208 dccp_timestamping_init();
1218 out_free_dccp_bhash
:
1219 free_pages((unsigned long)dccp_hashinfo
.bhash
, bhash_order
);
1220 out_free_dccp_locks
:
1221 inet_ehash_locks_free(&dccp_hashinfo
);
1222 out_free_dccp_ehash
:
1223 free_pages((unsigned long)dccp_hashinfo
.ehash
, ehash_order
);
1224 out_free_bind_bucket_cachep
:
1225 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1227 percpu_counter_destroy(&dccp_orphan_count
);
1229 dccp_hashinfo
.bhash
= NULL
;
1230 dccp_hashinfo
.ehash
= NULL
;
1231 dccp_hashinfo
.bind_bucket_cachep
= NULL
;
1235 static void __exit
dccp_fini(void)
1237 ccid_cleanup_builtins();
1239 free_pages((unsigned long)dccp_hashinfo
.bhash
,
1240 get_order(dccp_hashinfo
.bhash_size
*
1241 sizeof(struct inet_bind_hashbucket
)));
1242 free_pages((unsigned long)dccp_hashinfo
.ehash
,
1243 get_order((dccp_hashinfo
.ehash_mask
+ 1) *
1244 sizeof(struct inet_ehash_bucket
)));
1245 inet_ehash_locks_free(&dccp_hashinfo
);
1246 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
1249 percpu_counter_destroy(&dccp_orphan_count
);
1252 module_init(dccp_init
);
1253 module_exit(dccp_fini
);
1255 MODULE_LICENSE("GPL");
1256 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1257 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");