2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #define MAX_SND_MSGLEN 4096
24 #define MAX_MSG_LEN 256
29 struct list_head cs_lh
;
44 struct list_head socks
;
46 struct mutex sndbuf_lock
;
47 char snd_cmdplen_buf
[8];
48 __u8 snd_cmdplen_read
;
52 atomic_t ready_to_read
;
53 struct list_head rcv_msgs
; /* protected by rds_lock */
55 struct mutex rcvbuf_lock
;
56 char rcvbuf
[MAX_MSG_LEN
+8];
61 static struct kmem_cache
*cor_rdmsg_slab
;
63 static DEFINE_MUTEX(cor_rds_lock
);
64 static struct cor_rdsock
*cor_crd
= 0;
67 int cor_rd_socket_release(struct socket
*sock
)
69 mutex_lock(&cor_rds_lock
);
71 BUG_ON(((struct cor_rdsock
*) sock
->sk
) != cor_crd
);
75 cor_set_interface_config(0, 0, 0);
77 while (list_empty(&(cor_crd
->rcv_msgs
)) == 0) {
78 struct cor_rd_msg
*rdm
= container_of(cor_crd
->rcv_msgs
.next
,
79 struct cor_rd_msg
, lh
);
83 list_del(&(rdm
->cs_lh
));
84 kref_put(&(rdm
->cs
->ref
), cor_free_sock
);
87 kmem_cache_free(cor_rdmsg_slab
, rdm
);
90 while (list_empty(&(cor_crd
->socks
)) == 0) {
91 struct cor_sock
*cs
= container_of(cor_crd
->socks
.next
,
92 struct cor_sock
, data
.conn_managed
.crd_lh
);
94 BUG_ON(cs
->type
!= CS_TYPE_CONN_MANAGED
);
95 BUG_ON(cs
->data
.conn_managed
.in_crd_list
== 0);
96 list_del(&(cs
->data
.conn_managed
.crd_lh
));
97 cs
->data
.conn_managed
.in_crd_list
= 0;
98 _cor_set_sock_connecterror(cs
, -ENETUNREACH
);
99 kref_put(&(cs
->ref
), cor_free_sock
);
102 if (cor_crd
->cmdparams
!= 0) {
103 kfree(cor_crd
->cmdparams
);
104 cor_crd
->cmdparams
= 0;
109 mutex_unlock(&cor_rds_lock
);
116 int cor_rd_socket_bind(struct socket
*sock
, struct sockaddr
*saddr
,
122 int cor_rd_socket_connect(struct socket
*sock
, struct sockaddr
*saddr
,
123 int sockaddr_len
, int flags
)
125 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
127 atomic_set(&(crd
->connected
), 1);
130 sock
->state
= SS_CONNECTED
;
131 release_sock(sock
->sk
);
135 int cor_rd_socket_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
141 int cor_rd_socket_listen(struct socket
*sock
, int len
)
146 int cor_rd_socket_shutdown(struct socket
*sock
, int flags
)
151 int cor_rd_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
156 int cor_rd_setsockopt(struct socket
*sock
, int level
,
157 int optname
, char __user
*optval
, unsigned int optlen
)
162 int cor_rd_getsockopt(struct socket
*sock
, int level
,
163 int optname
, char __user
*optval
, int __user
*optlen
)
168 static int cor_rd_parse_version(struct cor_rdsock
*crd
, __u32 cmd
,
169 char *param
, __u32 paramlen
)
174 mutex_lock(&cor_rds_lock
);
181 version
= cor_parse_u32(param
);
185 if (crd
->versioninited
!= 0)
188 crd
->versioninited
= 1;
194 mutex_unlock(&cor_rds_lock
);
199 /* interface_config_lock must be held */
202 static int _cor_rd_parse_up_interfaces(struct cor_rdsock
*crd
, char *param
,
203 __u32 paramlen
, __u32
*offset
)
207 struct cor_interface_config
*newconfig
= 0;
209 if (unlikely(*offset
+ 4 > paramlen
))
212 num_intf
= cor_parse_u32(param
+ *offset
);
215 if (unlikely(num_intf
> 65536))
218 newconfig
= kmalloc(num_intf
* sizeof(struct cor_interface_config
),
220 if (unlikely(newconfig
== 0))
223 memset(newconfig
, 0, num_intf
* sizeof(struct cor_interface_config
));
225 for (i
=0;i
<num_intf
;i
++) {
226 struct cor_interface_config
*newconfig_curr
= &(newconfig
[i
]);
228 if (unlikely(*offset
+ 4 > paramlen
))
231 newconfig_curr
->name_len
= cor_parse_u32(param
+ *offset
);
234 if (unlikely(*offset
+ newconfig_curr
->name_len
< paramlen
))
237 newconfig_curr
->name
= kmalloc(newconfig_curr
->name_len
,
239 if (unlikely(newconfig_curr
->name
== 0))
242 memcpy(newconfig_curr
->name
, param
+ *offset
,
243 newconfig_curr
->name_len
);
244 *offset
+= newconfig_curr
->name_len
;
247 cor_set_interface_config(newconfig
, num_intf
, 0);
253 struct cor_interface_config
*newconfig_curr
;
257 newconfig_curr
= &(newconfig
[i
]);
259 BUG_ON(newconfig_curr
->name
== 0);
260 kfree(newconfig_curr
->name
);
261 newconfig_curr
->name
= 0;
267 static int cor_rd_parse_up(struct cor_rdsock
*crd
, __u32 cmd
,
268 char *param
, __u32 paramlen
)
276 if (unlikely(paramlen
< 12))
279 flags
= cor_parse_u64(param
);
282 addrlen
= cor_parse_u32(param
+ offset
);
286 if (unlikely(unlikely(addrlen
> 64) ||
287 unlikely(offset
+ addrlen
> paramlen
)))
289 addr
= param
+ offset
;
293 if ((flags
& CRD_UTK_UP_FLAGS_INTERFACES
) != 0) {
294 if (_cor_rd_parse_up_interfaces(crd
, param
, paramlen
, &offset
)
299 cor_set_interface_config(0, 0, 1);
302 if (cor_config_up(addr
, addrlen
) != 0)
308 static int cor_rd_parse_connecterror(struct cor_rdsock
*crd
, __u32 cmd
,
309 char *param
, __u32 paramlen
)
315 if (unlikely(paramlen
< 12))
318 cookie
= cor_parse_be64(param
);
319 error
= cor_parse_u32(param
+ 8);
321 if (error
== CRD_UTK_CONNECTERROR_ACCES
) {
323 } else if (error
== CRD_UTK_CONNECTERROR_NETUNREACH
) {
324 errorno
= -ENETUNREACH
;
325 } else if (error
== CRD_UTK_CONNECTERROR_TIMEDOUT
) {
326 errorno
= -ETIMEDOUT
;
327 } else if (error
== CRD_UTK_CONNECTERROR_REFUSED
) {
328 errorno
= -ECONNREFUSED
;
330 errorno
= -ENETUNREACH
;
333 cor_set_sock_connecterror(cookie
, errorno
);
338 static int cor_rd_parse(struct cor_rdsock
*crd
, __u32 cmd
, char *param
,
341 if (unlikely(unlikely(cmd
!= CRD_UTK_VERSION
) &&
342 unlikely(crd
->versioninited
== 0)))
345 if (cmd
== CRD_UTK_VERSION
) {
346 return cor_rd_parse_version(crd
, cmd
, param
, paramlen
);
347 } else if (cmd
== CRD_UTK_UP
) {
348 return cor_rd_parse_up(crd
, cmd
, param
, paramlen
);
349 } else if (cmd
== CRD_UTK_CONNECTERROR
) {
350 return cor_rd_parse_connecterror(crd
, cmd
, param
, paramlen
);
356 static int _cor_rd_sendmsg_hdr(struct cor_rdsock
*crd
, struct msghdr
*msg
,
363 BUG_ON(len
> (1024 * 1024 * 1024));
365 BUG_ON(crd
->snd_cmdplen_read
> 8);
366 cpy
= (8 - crd
->snd_cmdplen_read
);
367 if (unlikely(cpy
> len
))
370 st_rc
= copy_from_iter(crd
->snd_cmdplen_buf
+
371 crd
->snd_cmdplen_read
, cpy
, &(msg
->msg_iter
));
373 if (unlikely(st_rc
!= cpy
))
376 crd
->snd_cmdplen_read
+= cpy
;
381 static int _cor_rd_sendmsg_body(struct cor_rdsock
*crd
, struct msghdr
*msg
,
390 BUG_ON(len
> (1024 * 1024 * 1024));
392 BUG_ON(crd
->snd_cmdplen_read
!= 8);
394 cmd
= cor_parse_u32(crd
->snd_cmdplen_buf
);
395 paramlen
= cor_parse_u32(crd
->snd_cmdplen_buf
+ 4);
397 if (crd
->cmdparams
== 0 && paramlen
!= 0) {
398 BUG_ON(crd
->param_read
!= 0);
399 if (unlikely(paramlen
> MAX_SND_MSGLEN
))
402 crd
->cmdparams
= kmalloc(paramlen
, GFP_KERNEL
);
403 if (unlikely(crd
->cmdparams
== 0))
407 if (crd
->param_read
< paramlen
) {
410 cpy
= (paramlen
- crd
->param_read
);
414 BUG_ON(crd
->cmdparams
== 0);
416 st_rc
= copy_from_iter(crd
->cmdparams
+
417 crd
->param_read
, cpy
, &(msg
->msg_iter
));
419 if (unlikely(st_rc
!= cpy
))
422 crd
->param_read
+= cpy
;
425 BUG_ON(crd
->param_read
> paramlen
);
427 if (crd
->param_read
== paramlen
) {
428 int rc
= cor_rd_parse(crd
, cmd
, crd
->cmdparams
, paramlen
);
429 if (unlikely(rc
!= 0))
432 memset(crd
->snd_cmdplen_buf
, 0,
433 sizeof(crd
->snd_cmdplen_read
));
434 crd
->snd_cmdplen_read
= 0;
436 kfree(crd
->cmdparams
);
443 static int _cor_rd_sendmsg(struct cor_rdsock
*crd
, struct msghdr
*msg
,
446 if (crd
->snd_cmdplen_read
< 8) {
447 return _cor_rd_sendmsg_hdr(crd
, msg
, len
);
449 return _cor_rd_sendmsg_body(crd
, msg
, len
);
453 int cor_rd_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t total_len
)
455 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
463 if (unlikely(total_len
> 1024 * 1024 * 1024))
464 len
= 1024 * 1024 * 1024;
466 len
= (__u32
) total_len
;
468 if (unlikely(atomic_read(&(crd
->connected
)) == 0))
471 mutex_lock(&(crd
->sndbuf_lock
));
473 while (currread
< len
) {
474 rc
= _cor_rd_sendmsg(crd
, msg
, len
- currread
);
475 if (unlikely(rc
< 0))
478 totalread
+= currread
;
482 mutex_unlock(&(crd
->sndbuf_lock
));
484 if (rc
>= 0 && totalread
!= 0) {
485 BUG_ON(totalread
> (1024 * 1024 * 1024));
492 static void cor_fill_msgbuf_supportedversions(struct cor_rdsock
*crd
,
493 struct cor_rd_msg
*rdm
)
495 BUG_ON(rdm
->cs
!= 0);
497 BUG_ON(MAX_MSG_LEN
< 16);
499 cor_put_u32(crd
->rcvbuf
, CRD_KTU_SUPPORTEDVERSIONS
);
500 cor_put_u32(crd
->rcvbuf
+ 4, 8); /* len */
501 cor_put_u32(crd
->rcvbuf
+ 8, 0);
502 cor_put_u32(crd
->rcvbuf
+ 12, 0);
507 static void cor_fill_msgbuf_connect(struct cor_rdsock
*crd
,
508 struct cor_rd_msg
*rdm
)
511 __u32 remoteaddr_len
;
513 BUG_ON(rdm
->cs
== 0);
514 mutex_lock(&(rdm
->cs
->lock
));
515 BUG_ON(rdm
->cs
->type
!= CS_TYPE_CONN_MANAGED
);
517 remoteaddr
= (char *) &(rdm
->cs
->data
.conn_managed
.remoteaddr
);
518 remoteaddr_len
= sizeof(struct cor_sockaddr
);
519 BUG_ON(remoteaddr_len
!= 68);
521 BUG_ON(MAX_MSG_LEN
< (20 + remoteaddr_len
));
523 cor_put_u32(crd
->rcvbuf
, CRD_KTU_CONNECT
);
524 cor_put_u32(crd
->rcvbuf
+ 4, 12 + remoteaddr_len
);
525 cor_put_be64(crd
->rcvbuf
+ 8, rdm
->cs
->data
.conn_managed
.cookie
);
526 memcpy(crd
->rcvbuf
+ 16, remoteaddr
, remoteaddr_len
);
527 cor_put_u32(crd
->rcvbuf
+ 16 + remoteaddr_len
, rdm
->cs
->is_highlatency
?
528 COR_TOS_HIGH_LATENCY
: COR_TOS_LOW_LATENCY
);
530 crd
->rcvbuflen
= 20 + remoteaddr_len
;
531 mutex_unlock(&(rdm
->cs
->lock
));
534 static void _cor_fill_msgbuf(struct cor_rdsock
*crd
, struct cor_rd_msg
*rdm
)
536 if (rdm
->type
== CRD_KTU_SUPPORTEDVERSIONS
) {
537 cor_fill_msgbuf_supportedversions(crd
, rdm
);
538 } else if (rdm
->type
== CRD_KTU_CONNECT
) {
539 cor_fill_msgbuf_connect(crd
, rdm
);
545 static int cor_fill_msgbuf(struct socket
*sock
, struct cor_rdsock
*crd
,
549 struct cor_rd_msg
*rdm
= 0;
552 mutex_lock(&cor_rds_lock
);
553 if (list_empty(&(crd
->rcv_msgs
)) == 0)
555 atomic_set(&(crd
->ready_to_read
), 0);
556 mutex_unlock(&cor_rds_lock
);
561 if (wait_event_interruptible(*sk_sleep(sock
->sk
),
562 atomic_read(&(crd
->ready_to_read
)) != 0) != 0)
566 rdm
= container_of(crd
->rcv_msgs
.next
, struct cor_rd_msg
, lh
);
567 list_del(&(rdm
->lh
));
570 list_del(&(rdm
->cs_lh
));
572 mutex_unlock(&cor_rds_lock
);
574 memset(crd
->rcvbuf
, 0, sizeof(crd
->rcvbuf
));
576 crd
->rcvbufoffset
= 0;
578 _cor_fill_msgbuf(crd
, rdm
);
581 kref_put(&(rdm
->cs
->ref
), cor_free_sock
);
585 kmem_cache_free(cor_rdmsg_slab
, rdm
);
590 int cor_rd_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t total_len
,
594 int blocking
= (flags
& MSG_DONTWAIT
) == 0;
596 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
600 if (unlikely(total_len
> 1024 * 1024 * 1024))
601 totallen
= 1024 * 1024 * 1024;
603 totallen
= (__u32
) total_len
;
606 if (unlikely((flags
& MSG_PEEK
) != 0))
609 if (unlikely(atomic_read(&(crd
->connected
)) == 0))
612 mutex_lock(&(crd
->rcvbuf_lock
));
613 while (copied
< totallen
) {
614 __u32 len
= totallen
- copied
;
617 if (crd
->rcvbufoffset
== crd
->rcvbuflen
) {
618 int rc
= cor_fill_msgbuf(sock
, crd
,
619 blocking
&& copied
== 0);
620 if (rc
!= 0 && copied
== 0)
626 BUG_ON(crd
->rcvbufoffset
> crd
->rcvbuflen
);
628 if (len
> (crd
->rcvbuflen
- crd
->rcvbufoffset
))
629 len
= crd
->rcvbuflen
- crd
->rcvbufoffset
;
631 st_rc
= copy_to_iter(crd
->rcvbuf
+ crd
->rcvbufoffset
, len
,
634 if (unlikely(st_rc
!= len
)) {
640 crd
->rcvbufoffset
+= len
;
642 mutex_unlock(&(crd
->rcvbuf_lock
));
644 BUG_ON(copied
> 0 && unlikely((copied
> total_len
||
645 copied
> totallen
)));
650 static unsigned int cor_rd_poll(struct file
*file
, struct socket
*sock
,
653 unsigned int mask
= 0;
655 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
657 if (unlikely(atomic_read(&(crd
->connected
)) == 0))
660 sock_poll_wait(file
, sock
, wait
);
662 mutex_lock(&(crd
->rcvbuf_lock
));
663 mutex_lock(&cor_rds_lock
);
665 if (crd
->rcvbufoffset
!= crd
->rcvbuflen
||
666 (list_empty(&(crd
->rcv_msgs
)) == 0))
667 mask
|= (POLLIN
| POLLRDNORM
);
669 mutex_unlock(&cor_rds_lock
);
670 mutex_unlock(&(crd
->rcvbuf_lock
));
672 mask
|= (POLLOUT
| POLLWRNORM
);
677 struct proto cor_rd_proto
= {
679 .obj_size
= sizeof(struct cor_rdsock
),
680 .owner
= THIS_MODULE
,
683 const struct proto_ops cor_rd_proto_ops
= {
685 .owner
= THIS_MODULE
,
686 .release
= cor_rd_socket_release
,
687 .bind
= cor_rd_socket_bind
,
688 .connect
= cor_rd_socket_connect
,
689 .accept
= cor_rd_socket_accept
,
690 .listen
= cor_rd_socket_listen
,
691 .shutdown
= cor_rd_socket_shutdown
,
692 .ioctl
= cor_rd_ioctl
,
693 .setsockopt
= cor_rd_setsockopt
,
694 .getsockopt
= cor_rd_getsockopt
,
696 .combat_ioctl
= cor_rd_ioctl
,
697 .compat_setsockopt
= cor_rd_setsockopt
,
698 .compat_getsockopt
= cor_rd_getsockopt
,
700 .sendmsg
= cor_rd_sendmsg
,
701 .recvmsg
= cor_rd_recvmsg
,
704 .socketpair
= cor_socket_socketpair
,
705 .getname
= cor_socket_getname
,
706 .mmap
= cor_socket_mmap
,
708 /* sendpage, splice_read, are optional */
711 int cor_create_rdaemon_sock(struct net
*net
, struct socket
*sock
, int protocol
,
714 struct cor_rd_msg
*rdm
= 0;
715 struct cor_rdsock
*newcrd
= 0;
717 rdm
= kmem_cache_alloc(cor_rdmsg_slab
, GFP_KERNEL
);
718 if (unlikely(rdm
== 0))
721 newcrd
= (struct cor_rdsock
*) sk_alloc(net
, PF_COR
, GFP_KERNEL
,
722 &cor_rd_proto
, kern
);
723 if (unlikely(newcrd
== 0)) {
724 kmem_cache_free(cor_rdmsg_slab
, rdm
);
728 sock_init_data(sock
, (struct sock
*) newcrd
);
729 newcrd
->sk
.sk_protocol
= protocol
;
730 memset(((char *)newcrd
) + sizeof(struct sock
), 0,
731 sizeof(struct cor_rdsock
) - sizeof(struct sock
));
733 atomic_set(&(newcrd
->connected
), 0);
734 INIT_LIST_HEAD(&(newcrd
->socks
));
735 mutex_init(&(newcrd
->sndbuf_lock
));
736 mutex_init(&(newcrd
->rcvbuf_lock
));
737 atomic_set(&(newcrd
->ready_to_read
), 0);
738 INIT_LIST_HEAD(&(newcrd
->rcv_msgs
));
740 mutex_lock(&cor_rds_lock
);
742 sock_put((struct sock
*) newcrd
);
743 mutex_unlock(&cor_rds_lock
);
744 kmem_cache_free(cor_rdmsg_slab
, rdm
);
749 memset(rdm
, 0, sizeof(struct cor_rd_msg
));
750 rdm
->type
= CRD_KTU_SUPPORTEDVERSIONS
;
751 list_add_tail(&(rdm
->lh
), &(cor_crd
->rcv_msgs
));
753 atomic_set(&(newcrd
->ready_to_read
), 1);
755 mutex_unlock(&cor_rds_lock
);
757 sock
->state
= SS_UNCONNECTED
;
758 sock
->ops
= &cor_rd_proto_ops
;
759 sock
->sk
= (struct sock
*) cor_crd
;
764 int cor_rdreq_connect(struct cor_sock
*cs
)
768 struct cor_rd_msg
*rdm
= kmem_cache_alloc(cor_rdmsg_slab
, GFP_KERNEL
);
770 mutex_lock(&cor_rds_lock
);
771 mutex_lock(&(cs
->lock
));
773 BUG_ON(cs
->type
!= CS_TYPE_CONN_MANAGED
);
774 BUG_ON(cs
->data
.conn_managed
.cookie
== 0);
776 if (unlikely(cor_crd
== 0 || atomic_read(&(cor_crd
->connected
)) == 0 ||
777 cor_crd
->versioninited
== 0)) {
782 if (unlikely(rdm
== 0)) {
787 memset(rdm
, 0, sizeof(struct cor_rd_msg
));
789 kref_get(&(cs
->ref
));
790 list_add_tail(&(rdm
->cs_lh
), &(cs
->data
.conn_managed
.rd_msgs
));
792 rdm
->type
= CRD_KTU_CONNECT
;
794 if (list_empty(&(cor_crd
->rcv_msgs
))) {
795 atomic_set(&(cor_crd
->ready_to_read
), 1);
797 cor_crd
->sk
.sk_data_ready(&(cor_crd
->sk
));
799 list_add_tail(&(rdm
->lh
), &(cor_crd
->rcv_msgs
));
800 kref_get(&(cs
->ref
));
802 kref_get(&(cs
->ref
));
803 list_add_tail(&(cs
->data
.conn_managed
.crd_lh
), &(cor_crd
->socks
));
804 cs
->data
.conn_managed
.in_crd_list
= 1;
809 mutex_unlock(&(cs
->lock
));
810 mutex_unlock(&cor_rds_lock
);
815 void cor_usersock_release(struct cor_sock
*cs
)
817 mutex_lock(&cor_rds_lock
);
818 mutex_lock(&(cs
->lock
));
820 if (cs
->type
!= CS_TYPE_CONN_MANAGED
)
823 while (list_empty(&(cs
->data
.conn_managed
.rd_msgs
)) == 0) {
824 struct cor_rd_msg
*rdm
= container_of(
825 cs
->data
.conn_managed
.rd_msgs
.next
,
826 struct cor_rd_msg
, cs_lh
);
828 list_del(&(rdm
->lh
));
829 BUG_ON(rdm
->cs
!= cs
);
830 list_del(&(rdm
->cs_lh
));
831 kref_put(&(cs
->ref
), cor_kreffree_bug
);
833 kmem_cache_free(cor_rdmsg_slab
, rdm
);
836 if (cs
->data
.conn_managed
.in_crd_list
!= 0) {
837 list_del(&(cs
->data
.conn_managed
.crd_lh
));
838 cs
->data
.conn_managed
.in_crd_list
= 0;
839 kref_put(&(cs
->ref
), cor_kreffree_bug
);
843 mutex_unlock(&(cs
->lock
));
844 mutex_unlock(&cor_rds_lock
);
847 int __init
cor_rd_init1(void)
849 cor_rdmsg_slab
= kmem_cache_create("cor_rdmsg",
850 sizeof(struct cor_rd_msg
), 8, 0, 0);
851 if (unlikely(cor_rdmsg_slab
== 0))
857 int __init
cor_rd_init2(void)
859 return proto_register(&cor_rd_proto
, 1);
862 void __exit
cor_rd_exit1(void)
864 proto_unregister(&cor_rd_proto
);
867 void __exit
cor_rd_exit2(void)
869 kmem_cache_destroy(cor_rdmsg_slab
);