4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * CPU Performance Counter system calls and device driver.
29 * This module uses a combination of thread context operators, and
30 * thread-specific data to export CPU performance counters
31 * via both a system call and a driver interface.
33 * There are three access methods exported - the 'shared' device
34 * and the 'private' and 'agent' variants of the system call.
36 * The shared device treats the performance counter registers as
37 * a processor metric, regardless of the work scheduled on them.
38 * The private system call treats the performance counter registers
39 * as a property of a single lwp. This is achieved by using the
40 * thread context operators to virtualize the contents of the
41 * performance counter registers between lwps.
43 * The agent method is like the private method, except that it must
44 * be accessed via /proc's agent lwp to allow the counter context of
45 * other threads to be examined safely.
47 * The shared usage fundamentally conflicts with the agent and private usage;
48 * almost all of the complexity of the module is needed to allow these two
49 * models to co-exist in a reasonable way.
52 #include <sys/types.h>
54 #include <sys/errno.h>
59 #include <sys/processor.h>
60 #include <sys/cpuvar.h>
63 #include <sys/modctl.h>
65 #include <sys/sunddi.h>
66 #include <sys/nvpair.h>
67 #include <sys/policy.h>
68 #include <sys/machsystm.h>
69 #include <sys/cpc_impl.h>
70 #include <sys/cpc_pcbe.h>
73 static int kcpc_copyin_set(kcpc_set_t
**set
, void *ubuf
, size_t len
);
74 static int kcpc_verify_set(kcpc_set_t
*set
);
75 static uint32_t kcpc_nvlist_npairs(nvlist_t
*list
);
78 * Generic attributes supported regardless of processor.
81 #define ATTRLIST "picnum"
85 * System call to access CPU performance counters.
88 cpc(int cmd
, id_t lwpid
, void *udata1
, void *udata2
, void *udata3
)
97 * This CPC syscall should only be loaded if it found a PCBE to use.
99 ASSERT(pcbe_ops
!= NULL
);
101 if (curproc
->p_agenttp
== curthread
) {
103 * Only if /proc is invoking this system call from
104 * the agent thread do we allow the caller to examine
105 * the contexts of other lwps in the process. And
106 * because we know we're the agent, we know we don't
107 * have to grab p_lock because no-one else can change
108 * the state of the process.
110 if ((t
= idtot(curproc
, lwpid
)) == NULL
|| t
== curthread
)
111 return (set_errno(ESRCH
));
112 ASSERT(t
->t_tid
== lwpid
&& ttolwp(t
) != NULL
);
116 if (t
->t_cpc_set
== NULL
&& (cmd
== CPC_SAMPLE
|| cmd
== CPC_RELE
))
117 return (set_errno(EINVAL
));
122 * udata1 = pointer to packed nvlist buffer
123 * udata2 = size of packed nvlist buffer
124 * udata3 = User addr to return error subcode in.
127 rw_enter(&kcpc_cpuctx_lock
, RW_READER
);
128 if (kcpc_cpuctx
|| dtrace_cpc_in_use
) {
129 rw_exit(&kcpc_cpuctx_lock
);
130 return (set_errno(EAGAIN
));
133 if (kcpc_hw_lwp_hook() != 0) {
134 rw_exit(&kcpc_cpuctx_lock
);
135 return (set_errno(EACCES
));
139 * An LWP may only have one set bound to it at a time; if there
140 * is a set bound to this LWP already, we unbind it here.
142 if (t
->t_cpc_set
!= NULL
)
143 (void) kcpc_unbind(t
->t_cpc_set
);
144 ASSERT(t
->t_cpc_set
== NULL
);
146 if ((error
= kcpc_copyin_set(&t
->t_cpc_set
, udata1
,
147 (size_t)udata2
)) != 0) {
148 rw_exit(&kcpc_cpuctx_lock
);
149 return (set_errno(error
));
152 if ((error
= kcpc_verify_set(t
->t_cpc_set
)) != 0) {
153 rw_exit(&kcpc_cpuctx_lock
);
154 kcpc_free_set(t
->t_cpc_set
);
156 if (copyout(&error
, udata3
, sizeof (error
)) == -1)
157 return (set_errno(EFAULT
));
158 return (set_errno(EINVAL
));
161 if ((error
= kcpc_bind_thread(t
->t_cpc_set
, t
, &code
)) != 0) {
162 rw_exit(&kcpc_cpuctx_lock
);
163 kcpc_free_set(t
->t_cpc_set
);
166 * EINVAL and EACCES are the only errors with more
169 if ((error
== EINVAL
|| error
== EACCES
) &&
170 copyout(&code
, udata3
, sizeof (code
)) == -1)
171 return (set_errno(EFAULT
));
172 return (set_errno(error
));
175 rw_exit(&kcpc_cpuctx_lock
);
179 * udata1 = pointer to user's buffer
180 * udata2 = pointer to user's hrtime
181 * udata3 = pointer to user's tick
184 * We only allow thread-bound sets to be sampled via the
185 * syscall, so if this set has a CPU-bound context, return an
188 if (t
->t_cpc_set
->ks_ctx
->kc_cpuid
!= -1)
189 return (set_errno(EINVAL
));
190 if ((error
= kcpc_sample(t
->t_cpc_set
, udata1
, udata2
,
192 return (set_errno(error
));
198 * These are valid only if this lwp has a bound set.
200 if (t
->t_cpc_set
== NULL
)
201 return (set_errno(EINVAL
));
202 if (cmd
== CPC_PRESET
) {
204 * The preset is shipped up to us from userland in two
205 * parts. This lets us handle 64-bit values from 32-bit
206 * and 64-bit applications in the same manner.
208 * udata1 = index of request to preset
209 * udata2 = new 64-bit preset (most sig. 32 bits)
210 * udata3 = new 64-bit preset (least sig. 32 bits)
212 if ((error
= kcpc_preset(t
->t_cpc_set
, (intptr_t)udata1
,
213 ((uint64_t)(uintptr_t)udata2
<< 32ULL) |
214 (uint64_t)(uintptr_t)udata3
)) != 0)
215 return (set_errno(error
));
218 * udata[1-3] = unused
220 if ((error
= kcpc_restart(t
->t_cpc_set
)) != 0)
221 return (set_errno(error
));
230 if (t
!= curthread
|| t
->t_cpc_set
== NULL
)
231 return (set_errno(EINVAL
));
233 * Provided for backwards compatibility with CPCv1.
235 * Stop the counters and record the current counts. Use the
236 * counts as the preset to rebind a new set with the requests
237 * reconfigured as requested.
239 * udata1: 1 == enable; 0 == disable
242 rw_enter(&kcpc_cpuctx_lock
, RW_READER
);
243 if ((error
= kcpc_enable(t
,
244 cmd
, (int)(uintptr_t)udata1
)) != 0) {
245 rw_exit(&kcpc_cpuctx_lock
);
246 return (set_errno(error
));
248 rw_exit(&kcpc_cpuctx_lock
);
251 return (cpc_ncounters
);
253 return (pcbe_ops
->pcbe_caps
);
254 case CPC_EVLIST_SIZE
:
255 case CPC_LIST_EVENTS
:
257 * udata1 = pointer to user's int or buffer
261 if ((uintptr_t)udata2
>= cpc_ncounters
)
262 return (set_errno(EINVAL
));
265 pcbe_ops
->pcbe_list_events((uintptr_t)udata2
)) + 1;
267 if (cmd
== CPC_EVLIST_SIZE
) {
268 if (suword32(udata1
, size
) == -1)
269 return (set_errno(EFAULT
));
272 pcbe_ops
->pcbe_list_events((uintptr_t)udata2
),
274 return (set_errno(EFAULT
));
277 case CPC_ATTRLIST_SIZE
:
280 * udata1 = pointer to user's int or buffer
284 * attrlist size is length of PCBE-supported attributes, plus
285 * room for "picnum\0" plus an optional ',' separator char.
287 str
= pcbe_ops
->pcbe_list_attrs();
288 size
= strlen(str
) + sizeof (SEPARATOR ATTRLIST
) + 1;
291 * A ',' separator character is necessary.
295 if (cmd
== CPC_ATTRLIST_SIZE
) {
296 if (suword32(udata1
, size
) == -1)
297 return (set_errno(EFAULT
));
300 * Copyout the PCBE attributes, and then append the
301 * generic attribute list (with separator if necessary).
303 if (copyout(str
, udata1
, strlen(str
)) == -1)
304 return (set_errno(EFAULT
));
305 if (str
[0] != '\0') {
306 if (copyout(SEPARATOR ATTRLIST
,
307 ((char *)udata1
) + strlen(str
),
308 strlen(SEPARATOR ATTRLIST
) + 1)
310 return (set_errno(EFAULT
));
312 if (copyout(ATTRLIST
,
313 (char *)udata1
+ strlen(str
),
314 strlen(ATTRLIST
) + 1) == -1)
315 return (set_errno(EFAULT
));
321 * udata1 = pointer to user's buffer
325 if (cmd
== CPC_IMPL_NAME
) {
326 str
= pcbe_ops
->pcbe_impl_name();
327 ASSERT(strlen(str
) < CPC_MAX_IMPL_NAME
);
329 str
= pcbe_ops
->pcbe_cpuref();
330 ASSERT(strlen(str
) < CPC_MAX_CPUREF
);
333 if (copyout(str
, udata1
, strlen(str
) + 1) != 0)
334 return (set_errno(EFAULT
));
340 if ((error
= kcpc_unbind(t
->t_cpc_set
)) != 0)
341 return (set_errno(error
));
344 return (set_errno(EINVAL
));
349 * The 'shared' device allows direct access to the
350 * performance counter control register of the current CPU.
351 * The major difference between the contexts created here and those
352 * above is that the context handlers are -not- installed, thus
353 * no context switching behaviour occurs.
355 * Because they manipulate per-cpu state, these ioctls can
356 * only be invoked from a bound lwp, by a caller with the cpc_cpu privilege
357 * who can open the relevant entry in /devices (the act of holding it open
358 * causes other uses of the counters to be suspended).
360 * Note that for correct results, the caller -must- ensure that
361 * all existing per-lwp contexts are either inactive or marked invalid;
362 * that's what the open routine does.
366 kcpc_ioctl(dev_t dev
, int cmd
, intptr_t data
, int flags
, cred_t
*cr
, int *rvp
)
368 kthread_t
*t
= curthread
;
376 STRUCT_DECL(__cpc_args
, args
);
378 STRUCT_INIT(args
, flags
);
380 if (curthread
->t_bind_cpu
!= getminor(dev
))
381 return (EAGAIN
); /* someone unbound it? */
383 cpuid
= getminor(dev
);
385 if (cmd
== CPCIO_BIND
|| cmd
== CPCIO_SAMPLE
) {
386 if (copyin((void *)data
, STRUCT_BUF(args
),
387 STRUCT_SIZE(args
)) == -1)
390 udata1
= STRUCT_FGETP(args
, udata1
);
391 udata2
= STRUCT_FGETP(args
, udata2
);
392 udata3
= STRUCT_FGETP(args
, udata3
);
398 * udata1 = pointer to packed nvlist buffer
399 * udata2 = size of packed nvlist buffer
400 * udata3 = User addr to return error subcode in.
402 if (t
->t_cpc_set
!= NULL
) {
403 (void) kcpc_unbind(t
->t_cpc_set
);
404 ASSERT(t
->t_cpc_set
== NULL
);
407 if ((error
= kcpc_copyin_set(&t
->t_cpc_set
, udata1
,
408 (size_t)udata2
)) != 0) {
412 if ((error
= kcpc_verify_set(t
->t_cpc_set
)) != 0) {
413 kcpc_free_set(t
->t_cpc_set
);
415 if (copyout(&error
, udata3
, sizeof (error
)) == -1)
420 if ((error
= kcpc_bind_cpu(t
->t_cpc_set
, cpuid
, &code
)) != 0) {
421 kcpc_free_set(t
->t_cpc_set
);
424 * Subcodes are only returned for EINVAL and EACCESS.
426 if ((error
== EINVAL
|| error
== EACCES
) &&
427 copyout(&code
, udata3
, sizeof (code
)) == -1)
435 * udata1 = pointer to user's buffer
436 * udata2 = pointer to user's hrtime
437 * udata3 = pointer to user's tick
440 * Only CPU-bound sets may be sampled via the ioctl(). If this
441 * set has no CPU-bound context, return an error.
443 if (t
->t_cpc_set
== NULL
)
445 if ((error
= kcpc_sample(t
->t_cpc_set
, udata1
, udata2
,
450 if (t
->t_cpc_set
== NULL
)
452 return (kcpc_unbind(t
->t_cpc_set
));
459 * The device supports multiple opens, but only one open
460 * is allowed per processor. This is to enable multiple
461 * instances of tools looking at different processors.
463 #define KCPC_MINOR_SHARED ((minor_t)0x3fffful)
465 static ulong_t
*kcpc_cpumap
; /* bitmap of cpus */
469 kcpc_open(dev_t
*dev
, int flags
, int otyp
, cred_t
*cr
)
474 ASSERT(pcbe_ops
!= NULL
);
476 if ((error
= secpolicy_cpc_cpu(cr
)) != 0)
478 if (getminor(*dev
) != KCPC_MINOR_SHARED
)
480 if ((cpuid
= curthread
->t_bind_cpu
) == PBIND_NONE
)
482 if (cpuid
> max_cpuid
)
485 rw_enter(&kcpc_cpuctx_lock
, RW_WRITER
);
486 if (++kcpc_cpuctx
== 1) {
487 ASSERT(kcpc_cpumap
== NULL
);
490 * Bail out if DTrace is already using the counters.
492 if (dtrace_cpc_in_use
) {
494 rw_exit(&kcpc_cpuctx_lock
);
497 kcpc_cpumap
= kmem_zalloc(BT_SIZEOFMAP(max_cpuid
+ 1),
500 * When this device is open for processor-based contexts,
501 * no further lwp-based contexts can be created.
503 * Since this is the first open, ensure that all existing
504 * contexts are invalidated.
506 kcpc_invalidate_all();
507 } else if (BT_TEST(kcpc_cpumap
, cpuid
)) {
509 rw_exit(&kcpc_cpuctx_lock
);
511 } else if (kcpc_hw_cpu_hook(cpuid
, kcpc_cpumap
) != 0) {
513 rw_exit(&kcpc_cpuctx_lock
);
516 BT_SET(kcpc_cpumap
, cpuid
);
517 rw_exit(&kcpc_cpuctx_lock
);
519 *dev
= makedevice(getmajor(*dev
), (minor_t
)cpuid
);
526 kcpc_close(dev_t dev
, int flags
, int otyp
, cred_t
*cr
)
528 rw_enter(&kcpc_cpuctx_lock
, RW_WRITER
);
529 BT_CLEAR(kcpc_cpumap
, getminor(dev
));
530 if (--kcpc_cpuctx
== 0) {
531 kmem_free(kcpc_cpumap
, BT_SIZEOFMAP(max_cpuid
+ 1));
534 ASSERT(kcpc_cpuctx
>= 0);
535 rw_exit(&kcpc_cpuctx_lock
);
541 * Sane boundaries on the size of packed lists. In bytes.
543 #define CPC_MIN_PACKSIZE 4
544 #define CPC_MAX_PACKSIZE 10000
547 * Sane boundary on the number of requests a set can contain.
549 #define CPC_MAX_NREQS 100
552 * Sane boundary on the number of attributes a request can contain.
554 #define CPC_MAX_ATTRS 50
557 * Copy in a packed nvlist from the user and create a request set out of it.
558 * If successful, return 0 and store a pointer to the set we've created. Returns
559 * error code on error.
562 kcpc_copyin_set(kcpc_set_t
**inset
, void *ubuf
, size_t len
)
570 nvpair_t
*nvp
= NULL
;
580 uint32_t setflags
= (uint32_t)-1;
584 if (len
< CPC_MIN_PACKSIZE
|| len
> CPC_MAX_PACKSIZE
)
587 packbuf
= kmem_alloc(len
, KM_SLEEP
);
589 if (copyin(ubuf
, packbuf
, len
) == -1) {
590 kmem_free(packbuf
, len
);
594 if (nvlist_unpack(packbuf
, len
, &nvl
, KM_SLEEP
) != 0) {
595 kmem_free(packbuf
, len
);
600 * The nvlist has been unpacked so there is no need for the packed
601 * representation from this point on.
603 kmem_free(packbuf
, len
);
606 while ((nvp
= nvlist_next_nvpair(nvl
, nvp
)) != NULL
) {
607 switch (nvpair_type(nvp
)) {
608 case DATA_TYPE_UINT32
:
609 if (strcmp(nvpair_name(nvp
), "flags") != 0 ||
610 nvpair_value_uint32(nvp
, &setflags
) != 0) {
615 case DATA_TYPE_NVLIST_ARRAY
:
616 if (strcmp(nvpair_name(nvp
), "reqs") != 0 ||
617 nvpair_value_nvlist_array(nvp
, &reqlist
,
631 * There should be two members in the top-level nvlist:
632 * an array of nvlists consisting of the requests, and flags.
633 * Anything else is an invalid set.
640 if (nreqs
> CPC_MAX_NREQS
) {
646 * The requests are now stored in the nvlist array at reqlist.
647 * Note that the use of kmem_zalloc() to alloc the kcpc_set_t means
648 * we don't need to call the init routines for ks_lock and ks_condv.
650 set
= kmem_zalloc(sizeof (kcpc_set_t
), KM_SLEEP
);
651 set
->ks_req
= (kcpc_request_t
*)kmem_zalloc(sizeof (kcpc_request_t
) *
653 set
->ks_nreqs
= nreqs
;
655 * If the nvlist didn't contain a flags member, setflags was initialized
656 * with an illegal value and this set will fail sanity checks later on.
658 set
->ks_flags
= setflags
;
660 * Initialize bind/unbind set synchronization.
662 set
->ks_state
&= ~KCPC_SET_BOUND
;
665 * Build the set up one request at a time, always keeping it self-
666 * consistent so we can give it to kcpc_free_set() if we need to back
667 * out and return and error.
669 for (i
= 0; i
< nreqs
; i
++) {
671 set
->ks_req
[i
].kr_picnum
= -1;
672 while ((nvp
= nvlist_next_nvpair(reqlist
[i
], nvp
)) != NULL
) {
673 name
= nvpair_name(nvp
);
674 switch (nvpair_type(nvp
)) {
675 case DATA_TYPE_UINT32
:
676 if (nvpair_value_uint32(nvp
, &uint32
) == EINVAL
)
678 if (strcmp(name
, "cr_flags") == 0)
679 set
->ks_req
[i
].kr_flags
= uint32
;
680 if (strcmp(name
, "cr_index") == 0)
681 set
->ks_req
[i
].kr_index
= uint32
;
683 case DATA_TYPE_UINT64
:
684 if (nvpair_value_uint64(nvp
, &uint64
) == EINVAL
)
686 if (strcmp(name
, "cr_preset") == 0)
687 set
->ks_req
[i
].kr_preset
= uint64
;
689 case DATA_TYPE_STRING
:
690 if (nvpair_value_string(nvp
, &string
) == EINVAL
)
692 if (strcmp(name
, "cr_event") == 0)
693 (void) strncpy(set
->ks_req
[i
].kr_event
,
694 string
, CPC_MAX_EVENT_LEN
);
696 case DATA_TYPE_NVLIST
:
697 if (strcmp(name
, "cr_attr") != 0)
699 if (nvpair_value_nvlist(nvp
, &attrs
) == EINVAL
)
703 * If the picnum has been specified as an
704 * attribute, consume that attribute here and
705 * remove it from the list of attributes.
707 if (nvlist_lookup_uint64(attrs
, "picnum",
709 if (nvlist_remove(attrs
, "picnum",
710 DATA_TYPE_UINT64
) != 0)
711 panic("nvlist %p faulty",
713 set
->ks_req
[i
].kr_picnum
= uint64
;
716 if ((set
->ks_req
[i
].kr_nattrs
=
717 kcpc_nvlist_npairs(attrs
)) == 0)
720 if (set
->ks_req
[i
].kr_nattrs
> CPC_MAX_ATTRS
)
723 set
->ks_req
[i
].kr_attr
=
724 kmem_alloc(set
->ks_req
[i
].kr_nattrs
*
725 sizeof (kcpc_attr_t
), KM_SLEEP
);
728 while ((nvp_attr
= nvlist_next_nvpair(attrs
,
729 nvp_attr
)) != NULL
) {
730 attrp
= &set
->ks_req
[i
].kr_attr
[j
];
732 if (nvpair_type(nvp_attr
) !=
736 (void) strncpy(attrp
->ka_name
,
737 nvpair_name(nvp_attr
),
740 if (nvpair_value_uint64(nvp_attr
,
741 &(attrp
->ka_val
)) == EINVAL
)
745 ASSERT(j
== set
->ks_req
[i
].kr_nattrs
);
763 * Count the number of nvpairs in the supplied nvlist.
766 kcpc_nvlist_npairs(nvlist_t
*list
)
768 nvpair_t
*nvp
= NULL
;
771 while ((nvp
= nvlist_next_nvpair(list
, nvp
)) != NULL
)
778 * Performs sanity checks on the given set.
779 * Returns 0 if the set checks out OK.
780 * Returns a detailed error subcode, or -1 if there is no applicable subcode.
783 kcpc_verify_set(kcpc_set_t
*set
)
790 if (set
->ks_nreqs
> cpc_ncounters
)
793 if (CPC_SET_VALID_FLAGS(set
->ks_flags
) == 0)
796 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
797 rp
= &set
->ks_req
[i
];
800 * The following comparison must cast cpc_ncounters to an int,
801 * because kr_picnum will be -1 if the request didn't explicitly
804 if (rp
->kr_picnum
>= (int)cpc_ncounters
)
805 return (CPC_INVALID_PICNUM
);
808 * Of the pics whose physical picnum has been specified, make
809 * sure each PIC appears only once in set.
811 if ((n
= set
->ks_req
[i
].kr_picnum
) != -1) {
812 if ((bitmap
& (1 << n
)) != 0)
818 * Make sure the requested index falls within the range of all
821 if (rp
->kr_index
< 0 || rp
->kr_index
>= set
->ks_nreqs
)
825 * Make sure there are no unknown flags.
827 if (KCPC_REQ_VALID_FLAGS(rp
->kr_flags
) == 0)
828 return (CPC_REQ_INVALID_FLAGS
);
834 static struct cb_ops cb_ops
= {
837 nodev
, /* strategy */
854 kcpc_probe(dev_info_t
*devi
)
856 return (DDI_PROBE_SUCCESS
);
859 static dev_info_t
*kcpc_devi
;
862 kcpc_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
864 if (cmd
!= DDI_ATTACH
)
865 return (DDI_FAILURE
);
867 return (ddi_create_minor_node(devi
, "shared", S_IFCHR
,
868 KCPC_MINOR_SHARED
, DDI_PSEUDO
, 0));
873 kcpc_getinfo(dev_info_t
*devi
, ddi_info_cmd_t cmd
, void *arg
, void **result
)
876 case DDI_INFO_DEVT2DEVINFO
:
877 switch (getminor((dev_t
)arg
)) {
878 case KCPC_MINOR_SHARED
:
880 return (DDI_SUCCESS
);
885 case DDI_INFO_DEVT2INSTANCE
:
887 return (DDI_SUCCESS
);
892 return (DDI_FAILURE
);
895 static struct dev_ops dev_ops
= {
899 nulldev
, /* identify */
907 ddi_quiesce_not_needed
, /* quiesce */
910 static struct modldrv modldrv
= {
912 "cpc sampling driver",
916 static struct sysent cpc_sysent
= {
918 SE_NOUNLOAD
| SE_ARGC
| SE_32RVAL1
,
922 static struct modlsys modlsys
= {
924 "cpc sampling system call",
928 #ifdef _SYSCALL32_IMPL
929 static struct modlsys modlsys32
= {
931 "32-bit cpc sampling system call",
936 static struct modlinkage modl
= {
940 #ifdef _SYSCALL32_IMPL
948 if (kcpc_init() != 0)
951 return (mod_install(&modl
));
957 return (mod_remove(&modl
));
961 _info(struct modinfo
*mi
)
963 return (mod_info(&modl
, mi
));