4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
26 #include <net/inet_common.h>
28 #include <net/protocol.h>
32 #include <asm/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <linux/timer.h>
35 #include <linux/delay.h>
36 #include <linux/poll.h>
37 #include <linux/dccp.h>
42 DEFINE_SNMP_STAT(struct dccp_mib
, dccp_statistics
) __read_mostly
;
44 atomic_t dccp_orphan_count
= ATOMIC_INIT(0);
46 static struct net_protocol dccp_protocol
= {
47 .handler
= dccp_v4_rcv
,
48 .err_handler
= dccp_v4_err
,
52 const char *dccp_packet_name(const int type
)
54 static const char *dccp_packet_names
[] = {
55 [DCCP_PKT_REQUEST
] = "REQUEST",
56 [DCCP_PKT_RESPONSE
] = "RESPONSE",
57 [DCCP_PKT_DATA
] = "DATA",
58 [DCCP_PKT_ACK
] = "ACK",
59 [DCCP_PKT_DATAACK
] = "DATAACK",
60 [DCCP_PKT_CLOSEREQ
] = "CLOSEREQ",
61 [DCCP_PKT_CLOSE
] = "CLOSE",
62 [DCCP_PKT_RESET
] = "RESET",
63 [DCCP_PKT_SYNC
] = "SYNC",
64 [DCCP_PKT_SYNCACK
] = "SYNCACK",
67 if (type
>= DCCP_NR_PKT_TYPES
)
70 return dccp_packet_names
[type
];
73 EXPORT_SYMBOL_GPL(dccp_packet_name
);
75 const char *dccp_state_name(const int state
)
77 static char *dccp_state_names
[] = {
79 [DCCP_REQUESTING
] = "REQUESTING",
80 [DCCP_PARTOPEN
] = "PARTOPEN",
81 [DCCP_LISTEN
] = "LISTEN",
82 [DCCP_RESPOND
] = "RESPOND",
83 [DCCP_CLOSING
] = "CLOSING",
84 [DCCP_TIME_WAIT
] = "TIME_WAIT",
85 [DCCP_CLOSED
] = "CLOSED",
88 if (state
>= DCCP_MAX_STATES
)
89 return "INVALID STATE!";
91 return dccp_state_names
[state
];
94 EXPORT_SYMBOL_GPL(dccp_state_name
);
96 static inline int dccp_listen_start(struct sock
*sk
)
98 struct dccp_sock
*dp
= dccp_sk(sk
);
100 dp
->dccps_role
= DCCP_ROLE_LISTEN
;
102 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
103 * before calling listen()
105 if (dccp_service_not_initialized(sk
))
107 return inet_csk_listen_start(sk
, TCP_SYNQ_HSIZE
);
110 int dccp_disconnect(struct sock
*sk
, int flags
)
112 struct inet_connection_sock
*icsk
= inet_csk(sk
);
113 struct inet_sock
*inet
= inet_sk(sk
);
115 const int old_state
= sk
->sk_state
;
117 if (old_state
!= DCCP_CLOSED
)
118 dccp_set_state(sk
, DCCP_CLOSED
);
120 /* ABORT function of RFC793 */
121 if (old_state
== DCCP_LISTEN
) {
122 inet_csk_listen_stop(sk
);
123 /* FIXME: do the active reset thing */
124 } else if (old_state
== DCCP_REQUESTING
)
125 sk
->sk_err
= ECONNRESET
;
127 dccp_clear_xmit_timers(sk
);
128 __skb_queue_purge(&sk
->sk_receive_queue
);
129 if (sk
->sk_send_head
!= NULL
) {
130 __kfree_skb(sk
->sk_send_head
);
131 sk
->sk_send_head
= NULL
;
136 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
137 inet_reset_saddr(sk
);
140 sock_reset_flag(sk
, SOCK_DONE
);
142 icsk
->icsk_backoff
= 0;
143 inet_csk_delack_init(sk
);
146 BUG_TRAP(!inet
->num
|| icsk
->icsk_bind_hash
);
148 sk
->sk_error_report(sk
);
153 * Wait for a DCCP event.
155 * Note that we don't need to lock the socket, as the upper poll layers
156 * take care of normal races (between the test and the event) and we don't
157 * go look at any of the socket buffers directly.
159 static unsigned int dccp_poll(struct file
*file
, struct socket
*sock
,
163 struct sock
*sk
= sock
->sk
;
165 poll_wait(file
, sk
->sk_sleep
, wait
);
166 if (sk
->sk_state
== DCCP_LISTEN
)
167 return inet_csk_listen_poll(sk
);
169 /* Socket is not locked. We are protected from async events
170 by poll logic and correct handling of state changes
171 made by another threads is impossible in any case.
178 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== DCCP_CLOSED
)
180 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
181 mask
|= POLLIN
| POLLRDNORM
;
184 if ((1 << sk
->sk_state
) & ~(DCCPF_REQUESTING
| DCCPF_RESPOND
)) {
185 if (atomic_read(&sk
->sk_rmem_alloc
) > 0)
186 mask
|= POLLIN
| POLLRDNORM
;
188 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
189 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
190 mask
|= POLLOUT
| POLLWRNORM
;
191 } else { /* send SIGIO later */
192 set_bit(SOCK_ASYNC_NOSPACE
,
193 &sk
->sk_socket
->flags
);
194 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
196 /* Race breaker. If space is freed after
197 * wspace test but before the flags are set,
198 * IO signal will be lost.
200 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
201 mask
|= POLLOUT
| POLLWRNORM
;
208 int dccp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
210 dccp_pr_debug("entry\n");
214 static int dccp_setsockopt_service(struct sock
*sk
, const u32 service
,
215 char __user
*optval
, int optlen
)
217 struct dccp_sock
*dp
= dccp_sk(sk
);
218 struct dccp_service_list
*sl
= NULL
;
220 if (service
== DCCP_SERVICE_INVALID_VALUE
||
221 optlen
> DCCP_SERVICE_LIST_MAX_LEN
* sizeof(u32
))
224 if (optlen
> sizeof(service
)) {
225 sl
= kmalloc(optlen
, GFP_KERNEL
);
229 sl
->dccpsl_nr
= optlen
/ sizeof(u32
) - 1;
230 if (copy_from_user(sl
->dccpsl_list
,
231 optval
+ sizeof(service
),
232 optlen
- sizeof(service
)) ||
233 dccp_list_has_service(sl
, DCCP_SERVICE_INVALID_VALUE
)) {
240 dp
->dccps_service
= service
;
242 kfree(dp
->dccps_service_list
);
244 dp
->dccps_service_list
= sl
;
249 int dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
250 char __user
*optval
, int optlen
)
252 struct dccp_sock
*dp
;
256 if (level
!= SOL_DCCP
)
257 return inet_csk(sk
)->icsk_af_ops
->setsockopt(sk
, level
,
261 if (optlen
< sizeof(int))
264 if (get_user(val
, (int __user
*)optval
))
267 if (optname
== DCCP_SOCKOPT_SERVICE
)
268 return dccp_setsockopt_service(sk
, val
, optval
, optlen
);
275 case DCCP_SOCKOPT_PACKET_SIZE
:
276 dp
->dccps_packet_size
= val
;
287 static int dccp_getsockopt_service(struct sock
*sk
, int len
,
291 const struct dccp_sock
*dp
= dccp_sk(sk
);
292 const struct dccp_service_list
*sl
;
293 int err
= -ENOENT
, slen
= 0, total_len
= sizeof(u32
);
296 if (dccp_service_not_initialized(sk
))
299 if ((sl
= dp
->dccps_service_list
) != NULL
) {
300 slen
= sl
->dccpsl_nr
* sizeof(u32
);
309 if (put_user(total_len
, optlen
) ||
310 put_user(dp
->dccps_service
, optval
) ||
311 (sl
!= NULL
&& copy_to_user(optval
+ 1, sl
->dccpsl_list
, slen
)))
318 int dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
319 char __user
*optval
, int __user
*optlen
)
321 struct dccp_sock
*dp
;
324 if (level
!= SOL_DCCP
)
325 return inet_csk(sk
)->icsk_af_ops
->getsockopt(sk
, level
,
328 if (get_user(len
, optlen
))
331 if (len
< sizeof(int))
337 case DCCP_SOCKOPT_PACKET_SIZE
:
338 val
= dp
->dccps_packet_size
;
339 len
= sizeof(dp
->dccps_packet_size
);
341 case DCCP_SOCKOPT_SERVICE
:
342 return dccp_getsockopt_service(sk
, len
,
343 (u32 __user
*)optval
, optlen
);
345 return ccid_hc_rx_getsockopt(dp
->dccps_hc_rx_ccid
, sk
, optname
,
346 len
, (u32 __user
*)optval
, optlen
);
348 return ccid_hc_tx_getsockopt(dp
->dccps_hc_tx_ccid
, sk
, optname
,
349 len
, (u32 __user
*)optval
, optlen
);
354 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
360 int dccp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
363 const struct dccp_sock
*dp
= dccp_sk(sk
);
364 const int flags
= msg
->msg_flags
;
365 const int noblock
= flags
& MSG_DONTWAIT
;
370 if (len
> dp
->dccps_mss_cache
)
374 timeo
= sock_sndtimeo(sk
, noblock
);
377 * We have to use sk_stream_wait_connect here to set sk_write_pending,
378 * so that the trick in dccp_rcv_request_sent_state_process.
380 /* Wait for a connection to finish. */
381 if ((1 << sk
->sk_state
) & ~(DCCPF_OPEN
| DCCPF_PARTOPEN
| DCCPF_CLOSING
))
382 if ((rc
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
385 size
= sk
->sk_prot
->max_header
+ len
;
387 skb
= sock_alloc_send_skb(sk
, size
, noblock
, &rc
);
392 skb_reserve(skb
, sk
->sk_prot
->max_header
);
393 rc
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
397 rc
= dccp_write_xmit(sk
, skb
, &timeo
);
399 * XXX we don't use sk_write_queue, so just discard the packet.
400 * Current plan however is to _use_ sk_write_queue with
401 * an algorith similar to tcp_sendmsg, where the main difference
402 * is that in DCCP we have to respect packet boundaries, so
403 * no coalescing of skbs.
405 * This bug was _quickly_ found & fixed by just looking at an OSTRA
406 * generated callgraph 8) -acme
416 int dccp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
417 size_t len
, int nonblock
, int flags
, int *addr_len
)
419 const struct dccp_hdr
*dh
;
424 if (sk
->sk_state
== DCCP_LISTEN
) {
429 timeo
= sock_rcvtimeo(sk
, nonblock
);
432 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
435 goto verify_sock_status
;
439 if (dh
->dccph_type
== DCCP_PKT_DATA
||
440 dh
->dccph_type
== DCCP_PKT_DATAACK
)
443 if (dh
->dccph_type
== DCCP_PKT_RESET
||
444 dh
->dccph_type
== DCCP_PKT_CLOSE
) {
445 dccp_pr_debug("found fin ok!\n");
449 dccp_pr_debug("packet_type=%s\n",
450 dccp_packet_name(dh
->dccph_type
));
453 if (sock_flag(sk
, SOCK_DONE
)) {
459 len
= sock_error(sk
);
463 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
468 if (sk
->sk_state
== DCCP_CLOSED
) {
469 if (!sock_flag(sk
, SOCK_DONE
)) {
470 /* This occurs when user tries to read
471 * from never connected socket.
485 if (signal_pending(current
)) {
486 len
= sock_intr_errno(timeo
);
490 sk_wait_data(sk
, &timeo
);
495 else if (len
< skb
->len
)
496 msg
->msg_flags
|= MSG_TRUNC
;
498 if (skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, len
)) {
499 /* Exception. Bailout! */
504 if (!(flags
& MSG_PEEK
))
513 static int inet_dccp_listen(struct socket
*sock
, int backlog
)
515 struct sock
*sk
= sock
->sk
;
516 unsigned char old_state
;
522 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_DCCP
)
525 old_state
= sk
->sk_state
;
526 if (!((1 << old_state
) & (DCCPF_CLOSED
| DCCPF_LISTEN
)))
529 /* Really, if the socket is already in listen state
530 * we can only allow the backlog to be adjusted.
532 if (old_state
!= DCCP_LISTEN
) {
534 * FIXME: here it probably should be sk->sk_prot->listen_start
535 * see tcp_listen_start
537 err
= dccp_listen_start(sk
);
541 sk
->sk_max_ack_backlog
= backlog
;
549 static const unsigned char dccp_new_state
[] = {
550 /* current state: new state: action: */
552 [DCCP_OPEN
] = DCCP_CLOSING
| DCCP_ACTION_FIN
,
553 [DCCP_REQUESTING
] = DCCP_CLOSED
,
554 [DCCP_PARTOPEN
] = DCCP_CLOSING
| DCCP_ACTION_FIN
,
555 [DCCP_LISTEN
] = DCCP_CLOSED
,
556 [DCCP_RESPOND
] = DCCP_CLOSED
,
557 [DCCP_CLOSING
] = DCCP_CLOSED
,
558 [DCCP_TIME_WAIT
] = DCCP_CLOSED
,
559 [DCCP_CLOSED
] = DCCP_CLOSED
,
562 static int dccp_close_state(struct sock
*sk
)
564 const int next
= dccp_new_state
[sk
->sk_state
];
565 const int ns
= next
& DCCP_STATE_MASK
;
567 if (ns
!= sk
->sk_state
)
568 dccp_set_state(sk
, ns
);
570 return next
& DCCP_ACTION_FIN
;
573 void dccp_close(struct sock
*sk
, long timeout
)
579 sk
->sk_shutdown
= SHUTDOWN_MASK
;
581 if (sk
->sk_state
== DCCP_LISTEN
) {
582 dccp_set_state(sk
, DCCP_CLOSED
);
585 inet_csk_listen_stop(sk
);
587 goto adjudge_to_death
;
591 * We need to flush the recv. buffs. We do this only on the
592 * descriptor close, not protocol-sourced closes, because the
593 *reader process may not have drained the data yet!
595 /* FIXME: check for unread data */
596 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
600 if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
601 /* Check zero linger _after_ checking for unread data. */
602 sk
->sk_prot
->disconnect(sk
, 0);
603 } else if (dccp_close_state(sk
)) {
604 dccp_send_close(sk
, 1);
607 sk_stream_wait_close(sk
, timeout
);
611 * It is the last release_sock in its life. It will remove backlog.
615 * Now socket is owned by kernel and we acquire BH lock
616 * to finish close. No need to check for user refs.
620 BUG_TRAP(!sock_owned_by_user(sk
));
626 * The last release_sock may have processed the CLOSE or RESET
627 * packet moving sock to CLOSED state, if not we have to fire
628 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
629 * in draft-ietf-dccp-spec-11. -acme
631 if (sk
->sk_state
== DCCP_CLOSING
) {
632 /* FIXME: should start at 2 * RTT */
633 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
634 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
635 inet_csk(sk
)->icsk_rto
,
638 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
639 dccp_set_state(sk
, DCCP_CLOSED
);
643 atomic_inc(sk
->sk_prot
->orphan_count
);
644 if (sk
->sk_state
== DCCP_CLOSED
)
645 inet_csk_destroy_sock(sk
);
647 /* Otherwise, socket is reprieved until protocol close. */
654 void dccp_shutdown(struct sock
*sk
, int how
)
656 dccp_pr_debug("entry\n");
659 static struct proto_ops inet_dccp_ops
= {
661 .owner
= THIS_MODULE
,
662 .release
= inet_release
,
664 .connect
= inet_stream_connect
,
665 .socketpair
= sock_no_socketpair
,
666 .accept
= inet_accept
,
667 .getname
= inet_getname
,
668 /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
671 /* FIXME: work on inet_listen to rename it to sock_common_listen */
672 .listen
= inet_dccp_listen
,
673 .shutdown
= inet_shutdown
,
674 .setsockopt
= sock_common_setsockopt
,
675 .getsockopt
= sock_common_getsockopt
,
676 .sendmsg
= inet_sendmsg
,
677 .recvmsg
= sock_common_recvmsg
,
678 .mmap
= sock_no_mmap
,
679 .sendpage
= sock_no_sendpage
,
682 extern struct net_proto_family inet_family_ops
;
684 static struct inet_protosw dccp_v4_protosw
= {
686 .protocol
= IPPROTO_DCCP
,
687 .prot
= &dccp_v4_prot
,
688 .ops
= &inet_dccp_ops
,
695 * This is the global socket data structure used for responding to
696 * the Out-of-the-blue (OOTB) packets. A control sock will be created
697 * for this socket at the initialization time.
699 struct socket
*dccp_ctl_socket
;
701 static char dccp_ctl_socket_err_msg
[] __initdata
=
702 KERN_ERR
"DCCP: Failed to create the control socket.\n";
704 static int __init
dccp_ctl_sock_init(void)
706 int rc
= sock_create_kern(PF_INET
, SOCK_DCCP
, IPPROTO_DCCP
,
709 printk(dccp_ctl_socket_err_msg
);
711 dccp_ctl_socket
->sk
->sk_allocation
= GFP_ATOMIC
;
712 inet_sk(dccp_ctl_socket
->sk
)->uc_ttl
= -1;
714 /* Unhash it so that IP input processing does not even
715 * see it, we do not wish this socket to see incoming
718 dccp_ctl_socket
->sk
->sk_prot
->unhash(dccp_ctl_socket
->sk
);
724 #ifdef CONFIG_IP_DCCP_UNLOAD_HACK
725 void dccp_ctl_sock_exit(void)
727 if (dccp_ctl_socket
!= NULL
) {
728 sock_release(dccp_ctl_socket
);
729 dccp_ctl_socket
= NULL
;
733 EXPORT_SYMBOL_GPL(dccp_ctl_sock_exit
);
736 static int __init
init_dccp_v4_mibs(void)
740 dccp_statistics
[0] = alloc_percpu(struct dccp_mib
);
741 if (dccp_statistics
[0] == NULL
)
744 dccp_statistics
[1] = alloc_percpu(struct dccp_mib
);
745 if (dccp_statistics
[1] == NULL
)
752 free_percpu(dccp_statistics
[0]);
753 dccp_statistics
[0] = NULL
;
758 static int thash_entries
;
759 module_param(thash_entries
, int, 0444);
760 MODULE_PARM_DESC(thash_entries
, "Number of ehash buckets");
762 #ifdef CONFIG_IP_DCCP_DEBUG
764 module_param(dccp_debug
, int, 0444);
765 MODULE_PARM_DESC(dccp_debug
, "Enable debug messages");
768 static int __init
dccp_init(void)
771 int ehash_order
, bhash_order
, i
;
772 int rc
= proto_register(&dccp_v4_prot
, 1);
777 dccp_hashinfo
.bind_bucket_cachep
=
778 kmem_cache_create("dccp_bind_bucket",
779 sizeof(struct inet_bind_bucket
), 0,
780 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
781 if (!dccp_hashinfo
.bind_bucket_cachep
)
782 goto out_proto_unregister
;
785 * Size and allocate the main established and bind bucket
788 * The methodology is similar to that of the buffer cache.
790 if (num_physpages
>= (128 * 1024))
791 goal
= num_physpages
>> (21 - PAGE_SHIFT
);
793 goal
= num_physpages
>> (23 - PAGE_SHIFT
);
796 goal
= (thash_entries
*
797 sizeof(struct inet_ehash_bucket
)) >> PAGE_SHIFT
;
798 for (ehash_order
= 0; (1UL << ehash_order
) < goal
; ehash_order
++)
801 dccp_hashinfo
.ehash_size
= (1UL << ehash_order
) * PAGE_SIZE
/
802 sizeof(struct inet_ehash_bucket
);
803 dccp_hashinfo
.ehash_size
>>= 1;
804 while (dccp_hashinfo
.ehash_size
&
805 (dccp_hashinfo
.ehash_size
- 1))
806 dccp_hashinfo
.ehash_size
--;
807 dccp_hashinfo
.ehash
= (struct inet_ehash_bucket
*)
808 __get_free_pages(GFP_ATOMIC
, ehash_order
);
809 } while (!dccp_hashinfo
.ehash
&& --ehash_order
> 0);
811 if (!dccp_hashinfo
.ehash
) {
812 printk(KERN_CRIT
"Failed to allocate DCCP "
813 "established hash table\n");
814 goto out_free_bind_bucket_cachep
;
817 for (i
= 0; i
< (dccp_hashinfo
.ehash_size
<< 1); i
++) {
818 rwlock_init(&dccp_hashinfo
.ehash
[i
].lock
);
819 INIT_HLIST_HEAD(&dccp_hashinfo
.ehash
[i
].chain
);
822 bhash_order
= ehash_order
;
825 dccp_hashinfo
.bhash_size
= (1UL << bhash_order
) * PAGE_SIZE
/
826 sizeof(struct inet_bind_hashbucket
);
827 if ((dccp_hashinfo
.bhash_size
> (64 * 1024)) &&
830 dccp_hashinfo
.bhash
= (struct inet_bind_hashbucket
*)
831 __get_free_pages(GFP_ATOMIC
, bhash_order
);
832 } while (!dccp_hashinfo
.bhash
&& --bhash_order
>= 0);
834 if (!dccp_hashinfo
.bhash
) {
835 printk(KERN_CRIT
"Failed to allocate DCCP bind hash table\n");
836 goto out_free_dccp_ehash
;
839 for (i
= 0; i
< dccp_hashinfo
.bhash_size
; i
++) {
840 spin_lock_init(&dccp_hashinfo
.bhash
[i
].lock
);
841 INIT_HLIST_HEAD(&dccp_hashinfo
.bhash
[i
].chain
);
844 if (init_dccp_v4_mibs())
845 goto out_free_dccp_bhash
;
848 if (inet_add_protocol(&dccp_protocol
, IPPROTO_DCCP
))
849 goto out_free_dccp_v4_mibs
;
851 inet_register_protosw(&dccp_v4_protosw
);
853 rc
= dccp_ctl_sock_init();
855 goto out_unregister_protosw
;
858 out_unregister_protosw
:
859 inet_unregister_protosw(&dccp_v4_protosw
);
860 inet_del_protocol(&dccp_protocol
, IPPROTO_DCCP
);
861 out_free_dccp_v4_mibs
:
862 free_percpu(dccp_statistics
[0]);
863 free_percpu(dccp_statistics
[1]);
864 dccp_statistics
[0] = dccp_statistics
[1] = NULL
;
866 free_pages((unsigned long)dccp_hashinfo
.bhash
, bhash_order
);
867 dccp_hashinfo
.bhash
= NULL
;
869 free_pages((unsigned long)dccp_hashinfo
.ehash
, ehash_order
);
870 dccp_hashinfo
.ehash
= NULL
;
871 out_free_bind_bucket_cachep
:
872 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
873 dccp_hashinfo
.bind_bucket_cachep
= NULL
;
874 out_proto_unregister
:
875 proto_unregister(&dccp_v4_prot
);
879 static const char dccp_del_proto_err_msg
[] __exitdata
=
880 KERN_ERR
"can't remove dccp net_protocol\n";
882 static void __exit
dccp_fini(void)
884 inet_unregister_protosw(&dccp_v4_protosw
);
886 if (inet_del_protocol(&dccp_protocol
, IPPROTO_DCCP
) < 0)
887 printk(dccp_del_proto_err_msg
);
889 free_percpu(dccp_statistics
[0]);
890 free_percpu(dccp_statistics
[1]);
891 free_pages((unsigned long)dccp_hashinfo
.bhash
,
892 get_order(dccp_hashinfo
.bhash_size
*
893 sizeof(struct inet_bind_hashbucket
)));
894 free_pages((unsigned long)dccp_hashinfo
.ehash
,
895 get_order(dccp_hashinfo
.ehash_size
*
896 sizeof(struct inet_ehash_bucket
)));
897 kmem_cache_destroy(dccp_hashinfo
.bind_bucket_cachep
);
898 proto_unregister(&dccp_v4_prot
);
901 module_init(dccp_init
);
902 module_exit(dccp_fini
);
905 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
906 * values directly, Also cover the case where the protocol is not specified,
907 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
909 MODULE_ALIAS("net-pf-" __stringify(PF_INET
) "-proto-33-type-6");
910 MODULE_ALIAS("net-pf-" __stringify(PF_INET
) "-proto-0-type-6");
911 MODULE_LICENSE("GPL");
912 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
913 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");