2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #define MAX_SND_MSGLEN 4096
19 #define MAX_MSG_LEN 256
24 struct list_head cs_lh
;
39 struct list_head socks
;
41 struct mutex sndbuf_lock
;
42 __u8 snd_cmdplen_read
;
46 atomic_t ready_to_read
;
47 struct list_head rcv_msgs
; /* protected by rds_lock */
49 struct mutex rcvbuf_lock
;
54 char snd_cmdplen_buf
[8];
55 char rcvbuf
[MAX_MSG_LEN
+8];
59 static struct kmem_cache
*cor_rdmsg_slab
;
61 static DEFINE_MUTEX(cor_rds_lock
);
62 static struct cor_rdsock
*cor_crd
= 0;
65 int cor_rd_socket_release(struct socket
*sock
)
67 mutex_lock(&cor_rds_lock
);
69 BUG_ON(((struct cor_rdsock
*) sock
->sk
) != cor_crd
);
73 cor_set_interface_config(0, 0, 0);
75 while (list_empty(&(cor_crd
->rcv_msgs
)) == 0) {
76 struct cor_rd_msg
*rdm
= container_of(cor_crd
->rcv_msgs
.next
,
77 struct cor_rd_msg
, lh
);
81 list_del(&(rdm
->cs_lh
));
82 kref_put(&(rdm
->cs
->ref
), cor_free_sock
);
85 kmem_cache_free(cor_rdmsg_slab
, rdm
);
88 while (list_empty(&(cor_crd
->socks
)) == 0) {
89 struct cor_sock
*cs
= container_of(cor_crd
->socks
.next
,
90 struct cor_sock
, data
.conn_managed
.crd_lh
);
92 BUG_ON(cs
->type
!= CS_TYPE_CONN_MANAGED
);
93 BUG_ON(cs
->data
.conn_managed
.in_crd_list
== 0);
94 list_del(&(cs
->data
.conn_managed
.crd_lh
));
95 cs
->data
.conn_managed
.in_crd_list
= 0;
96 _cor_set_sock_connecterror(cs
, ENETUNREACH
);
97 kref_put(&(cs
->ref
), cor_free_sock
);
100 if (cor_crd
->cmdparams
!= 0) {
101 kfree(cor_crd
->cmdparams
);
102 cor_crd
->cmdparams
= 0;
107 mutex_unlock(&cor_rds_lock
);
114 int cor_rd_socket_bind(struct socket
*sock
, struct sockaddr
*saddr
,
120 int cor_rd_socket_connect(struct socket
*sock
, struct sockaddr
*saddr
,
121 int sockaddr_len
, int flags
)
123 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
125 atomic_set(&(crd
->connected
), 1);
128 sock
->state
= SS_CONNECTED
;
129 release_sock(sock
->sk
);
133 int cor_rd_socket_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
139 int cor_rd_socket_listen(struct socket
*sock
, int len
)
144 int cor_rd_socket_shutdown(struct socket
*sock
, int flags
)
149 int cor_rd_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
154 int cor_rd_setsockopt(struct socket
*sock
, int level
,
155 int optname
, char __user
*optval
, unsigned int optlen
)
160 int cor_rd_getsockopt(struct socket
*sock
, int level
,
161 int optname
, char __user
*optval
, int __user
*optlen
)
166 static int cor_rd_parse_version(struct cor_rdsock
*crd
, __u32 cmd
,
167 char *param
, __u32 paramlen
)
172 mutex_lock(&cor_rds_lock
);
179 version
= cor_parse_u32(param
);
183 if (crd
->versioninited
!= 0)
186 crd
->versioninited
= 1;
192 mutex_unlock(&cor_rds_lock
);
197 /* interface_config_lock must be held */
200 static int _cor_rd_parse_up_interfaces(struct cor_rdsock
*crd
, char *param
,
201 __u32 paramlen
, __u32
*offset
)
205 struct cor_interface_config
*newconfig
= 0;
207 if (unlikely(*offset
+ 4 > paramlen
))
210 num_intf
= cor_parse_u32(param
+ *offset
);
213 if (unlikely(num_intf
> 65536))
216 newconfig
= kmalloc(num_intf
* sizeof(struct cor_interface_config
),
218 if (unlikely(newconfig
== 0))
221 memset(newconfig
, 0, num_intf
* sizeof(struct cor_interface_config
));
223 for (i
= 0; i
< num_intf
; i
++) {
224 struct cor_interface_config
*newconfig_curr
= &(newconfig
[i
]);
226 if (unlikely(*offset
+ 4 > paramlen
))
229 newconfig_curr
->name_len
= cor_parse_u32(param
+ *offset
);
232 if (unlikely(*offset
+ newconfig_curr
->name_len
< paramlen
))
235 newconfig_curr
->name
= kmalloc(newconfig_curr
->name_len
,
237 if (unlikely(newconfig_curr
->name
== 0))
240 memcpy(newconfig_curr
->name
, param
+ *offset
,
241 newconfig_curr
->name_len
);
242 *offset
+= newconfig_curr
->name_len
;
245 cor_set_interface_config(newconfig
, num_intf
, 0);
251 struct cor_interface_config
*newconfig_curr
;
255 newconfig_curr
= &(newconfig
[i
]);
257 BUG_ON(newconfig_curr
->name
== 0);
258 kfree(newconfig_curr
->name
);
259 newconfig_curr
->name
= 0;
265 static int cor_rd_parse_up(struct cor_rdsock
*crd
, __u32 cmd
,
266 char *param
, __u32 paramlen
)
274 if (unlikely(paramlen
< 8))
277 flags
= cor_parse_u64(param
);
280 if ((flags
& CRD_UTK_UP_FLAGS_ADDR
) != 0) {
281 if (unlikely(paramlen
- offset
< 8))
285 addr
= cor_parse_be64(param
+ offset
);
289 if ((flags
& CRD_UTK_UP_FLAGS_INTERFACES
) != 0) {
290 if (_cor_rd_parse_up_interfaces(crd
, param
, paramlen
, &offset
)
295 cor_set_interface_config(0, 0, 1);
298 if (cor_config_up(has_addr
, addr
) != 0)
304 static int cor_rd_parse_connecterror(struct cor_rdsock
*crd
, __u32 cmd
,
305 char *param
, __u32 paramlen
)
311 if (unlikely(paramlen
< 12))
314 cookie
= cor_parse_be64(param
);
315 error
= cor_parse_u32(param
+ 8);
317 if (error
== CRD_UTK_CONNECTERROR_ACCES
) {
319 } else if (error
== CRD_UTK_CONNECTERROR_NETUNREACH
) {
320 errorno
= ENETUNREACH
;
321 } else if (error
== CRD_UTK_CONNECTERROR_TIMEDOUT
) {
323 } else if (error
== CRD_UTK_CONNECTERROR_REFUSED
) {
324 errorno
= ECONNREFUSED
;
326 errorno
= ENETUNREACH
;
329 cor_set_sock_connecterror(cookie
, errorno
);
334 static int cor_rd_parse(struct cor_rdsock
*crd
, __u32 cmd
, char *param
,
337 if (unlikely(unlikely(cmd
!= CRD_UTK_VERSION
) &&
338 unlikely(crd
->versioninited
== 0)))
341 if (cmd
== CRD_UTK_VERSION
) {
342 return cor_rd_parse_version(crd
, cmd
, param
, paramlen
);
343 } else if (cmd
== CRD_UTK_UP
) {
344 return cor_rd_parse_up(crd
, cmd
, param
, paramlen
);
345 } else if (cmd
== CRD_UTK_CONNECTERROR
) {
346 return cor_rd_parse_connecterror(crd
, cmd
, param
, paramlen
);
352 static int _cor_rd_sendmsg_hdr(struct cor_rdsock
*crd
, struct msghdr
*msg
,
359 BUG_ON(len
> (1024 * 1024 * 1024));
361 BUG_ON(crd
->snd_cmdplen_read
> 8);
362 cpy
= (8 - crd
->snd_cmdplen_read
);
363 if (unlikely(cpy
> len
))
366 st_rc
= copy_from_iter(crd
->user_copy
.snd_cmdplen_buf
+
367 crd
->snd_cmdplen_read
, cpy
, &(msg
->msg_iter
));
369 if (unlikely(st_rc
!= cpy
))
372 crd
->snd_cmdplen_read
+= cpy
;
377 static int _cor_rd_sendmsg_body(struct cor_rdsock
*crd
, struct msghdr
*msg
,
386 BUG_ON(len
> (1024 * 1024 * 1024));
388 BUG_ON(crd
->snd_cmdplen_read
!= 8);
390 cmd
= cor_parse_u32(crd
->user_copy
.snd_cmdplen_buf
);
391 paramlen
= cor_parse_u32(crd
->user_copy
.snd_cmdplen_buf
+ 4);
393 if (crd
->cmdparams
== 0 && paramlen
!= 0) {
394 BUG_ON(crd
->param_read
!= 0);
395 if (unlikely(paramlen
> MAX_SND_MSGLEN
))
398 crd
->cmdparams
= kmalloc(paramlen
, GFP_KERNEL
);
399 if (unlikely(crd
->cmdparams
== 0))
403 if (crd
->param_read
< paramlen
) {
406 cpy
= (paramlen
- crd
->param_read
);
410 BUG_ON(crd
->cmdparams
== 0);
412 st_rc
= copy_from_iter(crd
->cmdparams
+
413 crd
->param_read
, cpy
, &(msg
->msg_iter
));
415 if (unlikely(st_rc
!= cpy
))
418 crd
->param_read
+= cpy
;
421 BUG_ON(crd
->param_read
> paramlen
);
423 if (crd
->param_read
== paramlen
) {
424 int rc
= cor_rd_parse(crd
, cmd
, crd
->cmdparams
, paramlen
);
425 if (unlikely(rc
!= 0))
428 memset(crd
->user_copy
.snd_cmdplen_buf
, 0,
429 sizeof(crd
->snd_cmdplen_read
));
430 crd
->snd_cmdplen_read
= 0;
432 kfree(crd
->cmdparams
);
439 static int _cor_rd_sendmsg(struct cor_rdsock
*crd
, struct msghdr
*msg
,
442 if (crd
->snd_cmdplen_read
< 8) {
443 return _cor_rd_sendmsg_hdr(crd
, msg
, len
);
445 return _cor_rd_sendmsg_body(crd
, msg
, len
);
449 int cor_rd_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t total_len
)
451 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
459 if (unlikely(total_len
> 1024 * 1024 * 1024))
460 len
= 1024 * 1024 * 1024;
462 len
= (__u32
) total_len
;
464 if (unlikely(atomic_read(&(crd
->connected
)) == 0))
467 mutex_lock(&(crd
->sndbuf_lock
));
469 while (currread
< len
) {
470 rc
= _cor_rd_sendmsg(crd
, msg
, len
- currread
);
471 if (unlikely(rc
< 0))
474 totalread
+= currread
;
478 mutex_unlock(&(crd
->sndbuf_lock
));
480 if (rc
>= 0 && totalread
!= 0) {
481 BUG_ON(totalread
> (1024 * 1024 * 1024));
488 static void cor_fill_msgbuf_supportedversions(struct cor_rdsock
*crd
,
489 struct cor_rd_msg
*rdm
)
491 BUG_ON(rdm
->cs
!= 0);
493 BUG_ON(MAX_MSG_LEN
< 16);
495 cor_put_u32(crd
->user_copy
.rcvbuf
, CRD_KTU_SUPPORTEDVERSIONS
);
496 cor_put_u32(crd
->user_copy
.rcvbuf
+ 4, 8); /* len */
497 cor_put_u32(crd
->user_copy
.rcvbuf
+ 8, 0);
498 cor_put_u32(crd
->user_copy
.rcvbuf
+ 12, 0);
503 static void cor_fill_msgbuf_connect(struct cor_rdsock
*crd
,
504 struct cor_rd_msg
*rdm
)
507 __u32 remoteaddr_len
;
509 BUG_ON(rdm
->cs
== 0);
510 mutex_lock(&(rdm
->cs
->lock
));
511 BUG_ON(rdm
->cs
->type
!= CS_TYPE_CONN_MANAGED
);
513 remoteaddr
= (char *) &(rdm
->cs
->data
.conn_managed
.remoteaddr
);
514 remoteaddr_len
= sizeof(struct cor_sockaddr
);
515 BUILD_BUG_ON(remoteaddr_len
!= 16);
517 BUG_ON(MAX_MSG_LEN
< (20 + remoteaddr_len
));
519 cor_put_u32(crd
->user_copy
.rcvbuf
, CRD_KTU_CONNECT
);
520 cor_put_u32(crd
->user_copy
.rcvbuf
+ 4, 12 + remoteaddr_len
);
521 cor_put_be64(crd
->user_copy
.rcvbuf
+ 8,
522 rdm
->cs
->data
.conn_managed
.cookie
);
523 memcpy(crd
->user_copy
.rcvbuf
+ 16, remoteaddr
, remoteaddr_len
);
524 cor_put_u32(crd
->user_copy
.rcvbuf
+ 16 + remoteaddr_len
,
525 rdm
->cs
->is_highlatency
?
526 COR_TOS_HIGH_LATENCY
: COR_TOS_LOW_LATENCY
);
528 crd
->rcvbuflen
= 20 + remoteaddr_len
;
529 mutex_unlock(&(rdm
->cs
->lock
));
532 static void _cor_fill_msgbuf(struct cor_rdsock
*crd
, struct cor_rd_msg
*rdm
)
534 if (rdm
->type
== CRD_KTU_SUPPORTEDVERSIONS
) {
535 cor_fill_msgbuf_supportedversions(crd
, rdm
);
536 } else if (rdm
->type
== CRD_KTU_CONNECT
) {
537 cor_fill_msgbuf_connect(crd
, rdm
);
543 static int cor_fill_msgbuf(struct socket
*sock
, struct cor_rdsock
*crd
,
547 struct cor_rd_msg
*rdm
= 0;
550 mutex_lock(&cor_rds_lock
);
551 if (list_empty(&(crd
->rcv_msgs
)) == 0)
553 atomic_set(&(crd
->ready_to_read
), 0);
554 mutex_unlock(&cor_rds_lock
);
559 if (wait_event_interruptible(*sk_sleep(sock
->sk
),
560 atomic_read(&(crd
->ready_to_read
)) != 0) != 0)
564 rdm
= container_of(crd
->rcv_msgs
.next
, struct cor_rd_msg
, lh
);
565 list_del(&(rdm
->lh
));
568 list_del(&(rdm
->cs_lh
));
570 mutex_unlock(&cor_rds_lock
);
572 memset(crd
->user_copy
.rcvbuf
, 0, sizeof(crd
->user_copy
.rcvbuf
));
574 crd
->rcvbufoffset
= 0;
576 _cor_fill_msgbuf(crd
, rdm
);
579 kref_put(&(rdm
->cs
->ref
), cor_free_sock
);
583 kmem_cache_free(cor_rdmsg_slab
, rdm
);
588 int cor_rd_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t total_len
,
592 int blocking
= (flags
& MSG_DONTWAIT
) == 0;
594 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
598 if (unlikely(total_len
> 1024 * 1024 * 1024))
599 totallen
= 1024 * 1024 * 1024;
601 totallen
= (__u32
) total_len
;
604 if (unlikely((flags
& MSG_PEEK
) != 0))
607 if (unlikely(atomic_read(&(crd
->connected
)) == 0))
610 mutex_lock(&(crd
->rcvbuf_lock
));
611 while (copied
< totallen
) {
612 __u32 len
= totallen
- copied
;
615 if (crd
->rcvbufoffset
== crd
->rcvbuflen
) {
616 int rc
= cor_fill_msgbuf(sock
, crd
,
617 blocking
&& copied
== 0);
618 if (rc
!= 0 && copied
== 0)
624 BUG_ON(crd
->rcvbufoffset
> crd
->rcvbuflen
);
626 if (len
> (crd
->rcvbuflen
- crd
->rcvbufoffset
))
627 len
= crd
->rcvbuflen
- crd
->rcvbufoffset
;
629 st_rc
= copy_to_iter(crd
->user_copy
.rcvbuf
+ crd
->rcvbufoffset
,
630 len
, &(msg
->msg_iter
));
632 if (unlikely(st_rc
!= len
)) {
638 crd
->rcvbufoffset
+= len
;
640 mutex_unlock(&(crd
->rcvbuf_lock
));
642 BUG_ON(copied
> 0 && unlikely((copied
> total_len
||
643 copied
> totallen
)));
648 static unsigned int cor_rd_poll(struct file
*file
, struct socket
*sock
,
651 unsigned int mask
= 0;
653 struct cor_rdsock
*crd
= (struct cor_rdsock
*) sock
->sk
;
655 if (unlikely(atomic_read(&(crd
->connected
)) == 0))
658 sock_poll_wait(file
, sock
, wait
);
660 mutex_lock(&(crd
->rcvbuf_lock
));
661 mutex_lock(&cor_rds_lock
);
663 if (crd
->rcvbufoffset
!= crd
->rcvbuflen
||
664 (list_empty(&(crd
->rcv_msgs
)) == 0))
665 mask
|= (POLLIN
| POLLRDNORM
);
667 mutex_unlock(&cor_rds_lock
);
668 mutex_unlock(&(crd
->rcvbuf_lock
));
670 mask
|= (POLLOUT
| POLLWRNORM
);
675 struct proto cor_rd_proto
= {
677 .obj_size
= sizeof(struct cor_rdsock
),
678 .useroffset
= offsetof(struct cor_rdsock
, user_copy
),
679 .usersize
= sizeof(((struct cor_rdsock
*) 0)->user_copy
),
680 .owner
= THIS_MODULE
,
683 const struct proto_ops cor_rd_proto_ops
= {
685 .owner
= THIS_MODULE
,
686 .release
= cor_rd_socket_release
,
687 .bind
= cor_rd_socket_bind
,
688 .connect
= cor_rd_socket_connect
,
689 .accept
= cor_rd_socket_accept
,
690 .listen
= cor_rd_socket_listen
,
691 .shutdown
= cor_rd_socket_shutdown
,
692 .ioctl
= cor_rd_ioctl
,
693 .setsockopt
= cor_rd_setsockopt
,
694 .getsockopt
= cor_rd_getsockopt
,
696 .combat_ioctl
= cor_rd_ioctl
,
697 .compat_setsockopt
= cor_rd_setsockopt
,
698 .compat_getsockopt
= cor_rd_getsockopt
,
700 .sendmsg
= cor_rd_sendmsg
,
701 .recvmsg
= cor_rd_recvmsg
,
704 .socketpair
= cor_socket_socketpair
,
705 .getname
= cor_socket_getname
,
706 .mmap
= cor_socket_mmap
,
708 /* sendpage, splice_read, are optional */
711 int cor_create_rdaemon_sock(struct net
*net
, struct socket
*sock
, int protocol
,
714 struct cor_rd_msg
*rdm
= 0;
715 struct cor_rdsock
*newcrd
= 0;
717 rdm
= kmem_cache_alloc(cor_rdmsg_slab
, GFP_KERNEL
);
718 if (unlikely(rdm
== 0))
721 newcrd
= (struct cor_rdsock
*) sk_alloc(net
, PF_COR
, GFP_KERNEL
,
722 &cor_rd_proto
, kern
);
723 if (unlikely(newcrd
== 0)) {
724 kmem_cache_free(cor_rdmsg_slab
, rdm
);
728 sock_init_data(sock
, (struct sock
*) newcrd
);
729 newcrd
->sk
.sk_protocol
= protocol
;
730 memset(((char *)newcrd
) + sizeof(struct sock
), 0,
731 sizeof(struct cor_rdsock
) - sizeof(struct sock
));
733 atomic_set(&(newcrd
->connected
), 0);
734 INIT_LIST_HEAD(&(newcrd
->socks
));
735 mutex_init(&(newcrd
->sndbuf_lock
));
736 mutex_init(&(newcrd
->rcvbuf_lock
));
737 atomic_set(&(newcrd
->ready_to_read
), 0);
738 INIT_LIST_HEAD(&(newcrd
->rcv_msgs
));
740 mutex_lock(&cor_rds_lock
);
742 sock_put((struct sock
*) newcrd
);
743 mutex_unlock(&cor_rds_lock
);
744 kmem_cache_free(cor_rdmsg_slab
, rdm
);
749 memset(rdm
, 0, sizeof(struct cor_rd_msg
));
750 rdm
->type
= CRD_KTU_SUPPORTEDVERSIONS
;
751 list_add_tail(&(rdm
->lh
), &(cor_crd
->rcv_msgs
));
753 atomic_set(&(newcrd
->ready_to_read
), 1);
755 mutex_unlock(&cor_rds_lock
);
757 sock
->state
= SS_UNCONNECTED
;
758 sock
->ops
= &cor_rd_proto_ops
;
759 sock
->sk
= (struct sock
*) cor_crd
;
764 int cor_rdreq_connect(struct cor_sock
*cs
)
768 struct cor_rd_msg
*rdm
= kmem_cache_alloc(cor_rdmsg_slab
, GFP_KERNEL
);
770 mutex_lock(&cor_rds_lock
);
771 mutex_lock(&(cs
->lock
));
773 BUG_ON(cs
->type
!= CS_TYPE_CONN_MANAGED
);
774 BUG_ON(cs
->data
.conn_managed
.cookie
== 0);
776 if (unlikely(cor_crd
== 0 || atomic_read(&(cor_crd
->connected
)) == 0 ||
777 cor_crd
->versioninited
== 0)) {
782 if (unlikely(rdm
== 0)) {
787 memset(rdm
, 0, sizeof(struct cor_rd_msg
));
789 kref_get(&(cs
->ref
));
790 list_add_tail(&(rdm
->cs_lh
), &(cs
->data
.conn_managed
.rd_msgs
));
792 rdm
->type
= CRD_KTU_CONNECT
;
794 if (list_empty(&(cor_crd
->rcv_msgs
))) {
795 atomic_set(&(cor_crd
->ready_to_read
), 1);
797 cor_crd
->sk
.sk_data_ready(&(cor_crd
->sk
));
799 list_add_tail(&(rdm
->lh
), &(cor_crd
->rcv_msgs
));
800 kref_get(&(cs
->ref
));
802 kref_get(&(cs
->ref
));
803 list_add_tail(&(cs
->data
.conn_managed
.crd_lh
), &(cor_crd
->socks
));
804 cs
->data
.conn_managed
.in_crd_list
= 1;
809 mutex_unlock(&(cs
->lock
));
810 mutex_unlock(&cor_rds_lock
);
815 void cor_usersock_release(struct cor_sock
*cs
)
817 mutex_lock(&cor_rds_lock
);
818 mutex_lock(&(cs
->lock
));
820 if (cs
->type
!= CS_TYPE_CONN_MANAGED
)
823 while (list_empty(&(cs
->data
.conn_managed
.rd_msgs
)) == 0) {
824 struct cor_rd_msg
*rdm
= container_of(
825 cs
->data
.conn_managed
.rd_msgs
.next
,
826 struct cor_rd_msg
, cs_lh
);
828 list_del(&(rdm
->lh
));
829 BUG_ON(rdm
->cs
!= cs
);
830 list_del(&(rdm
->cs_lh
));
831 kref_put(&(cs
->ref
), cor_kreffree_bug
);
833 kmem_cache_free(cor_rdmsg_slab
, rdm
);
836 if (cs
->data
.conn_managed
.in_crd_list
!= 0) {
837 list_del(&(cs
->data
.conn_managed
.crd_lh
));
838 cs
->data
.conn_managed
.in_crd_list
= 0;
839 kref_put(&(cs
->ref
), cor_kreffree_bug
);
843 mutex_unlock(&(cs
->lock
));
844 mutex_unlock(&cor_rds_lock
);
847 int __init
cor_rd_init1(void)
849 cor_rdmsg_slab
= kmem_cache_create("cor_rdmsg",
850 sizeof(struct cor_rd_msg
), 8, 0, 0);
851 if (unlikely(cor_rdmsg_slab
== 0))
857 int __init
cor_rd_init2(void)
859 return proto_register(&cor_rd_proto
, 1);
862 void __exit
cor_rd_exit1(void)
864 proto_unregister(&cor_rd_proto
);
867 void __exit
cor_rd_exit2(void)
869 kmem_cache_destroy(cor_rdmsg_slab
);
873 MODULE_LICENSE("GPL");