Merge commit '1f1540205fa6366266184180654434272c425ac2'
[unleashed.git] / usr / src / lib / libsysevent / libevchannel.c
blob1f12f22cdb0c30ae869952381feb52b7820cd264
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <stdio.h>
26 #include <ctype.h>
27 #include <fcntl.h>
28 #include <errno.h>
29 #include <door.h>
30 #include <unistd.h>
31 #include <stddef.h>
32 #include <stdlib.h>
33 #include <strings.h>
34 #include <pthread.h>
35 #include <atomic.h>
36 #include <signal.h>
37 #include <sys/types.h>
38 #include <sys/varargs.h>
39 #include <sys/sysevent.h>
40 #include <sys/sysevent_impl.h>
42 #include "libsysevent.h"
43 #include "libsysevent_impl.h"
46 * The functions below deal with the General Purpose Event Handling framework
48 * sysevent_evc_bind - create/bind application to named channel
49 * sysevent_evc_unbind - unbind from previously bound/created channel
50 * sysevent_evc_subscribe - subscribe to existing event channel
51 * sysevent_evc_unsubscribe - unsubscribe from existing event channel
52 * sysevent_evc_publish - generate a system event via an event channel
53 * sysevent_evc_control - various channel based control operation
56 static void kill_door_servers(evchan_subscr_t *);
58 #define misaligned(p) ((uintptr_t)(p) & 3) /* 4-byte alignment required */
60 static pthread_key_t nrkey = PTHREAD_ONCE_KEY_NP;
63 * If the current thread is a door server thread servicing a door created
64 * for us in sysevent_evc_xsubscribe, then an attempt to unsubscribe from
65 * within door invocation context on the same channel will deadlock in the
66 * kernel waiting for our own invocation to complete. Such calls are
67 * forbidden, and we abort if they are encountered (better than hanging
68 * unkillably).
70 * We'd like to offer this detection to subscriptions established with
71 * sysevent_evc_subscribe, but we don't have control over the door service
72 * threads in that case. Perhaps the fix is to always use door_xcreate
73 * even for sysevent_evc_subscribe?
75 static boolean_t
76 will_deadlock(evchan_t *scp)
78 evchan_subscr_t *subp = pthread_getspecific(nrkey);
79 evchan_impl_hdl_t *hdl = EVCHAN_IMPL_HNDL(scp);
81 return (subp != NULL && subp->ev_subhead == hdl ? B_TRUE : B_FALSE);
85 * Check syntax of a channel name
87 static int
88 sysevent_is_chan_name(const char *str)
90 for (; *str != '\0'; str++) {
91 if (!EVCH_ISCHANCHAR(*str))
92 return (0);
95 return (1);
99 * Check for printable characters
101 static int
102 strisprint(const char *s)
104 for (; *s != '\0'; s++) {
105 if (*s < ' ' || *s > '~')
106 return (0);
109 return (1);
113 * sysevent_evc_bind - Create/bind application to named channel
116 sysevent_evc_bind(const char *channel, evchan_t **scpp, uint32_t flags)
118 int chanlen;
119 evchan_t *scp;
120 sev_bind_args_t uargs;
121 int ec;
123 if (scpp == NULL || misaligned(scpp)) {
124 return (errno = EINVAL);
127 /* Provide useful value in error case */
128 *scpp = NULL;
130 if (channel == NULL ||
131 (chanlen = strlen(channel) + 1) > MAX_CHNAME_LEN) {
132 return (errno = EINVAL);
135 /* Check channel syntax */
136 if (!sysevent_is_chan_name(channel)) {
137 return (errno = EINVAL);
140 if (flags & ~EVCH_B_FLAGS) {
141 return (errno = EINVAL);
144 scp = calloc(1, sizeof (evchan_impl_hdl_t));
145 if (scp == NULL) {
146 return (errno = ENOMEM);
150 * Enable sysevent driver. Fallback if the device link doesn't exist;
151 * this situation can arise if a channel is bound early in system
152 * startup, prior to devfsadm(8) being invoked.
154 EV_FD(scp) = open(DEVSYSEVENT, O_RDWR);
155 if (EV_FD(scp) == -1) {
156 if (errno != ENOENT) {
157 ec = errno == EACCES ? EPERM : errno;
158 free(scp);
159 return (errno = ec);
162 EV_FD(scp) = open(DEVICESYSEVENT, O_RDWR);
163 if (EV_FD(scp) == -1) {
164 ec = errno == EACCES ? EPERM : errno;
165 free(scp);
166 return (errno = ec);
171 * Force to close the fd's when process is doing exec.
172 * The driver will then release stale binding handles.
173 * The driver will release also the associated subscriptions
174 * if EVCH_SUB_KEEP flag was not set.
176 (void) fcntl(EV_FD(scp), F_SETFD, FD_CLOEXEC);
178 uargs.chan_name.name = (uintptr_t)channel;
179 uargs.chan_name.len = chanlen;
180 uargs.flags = flags;
182 if (ioctl(EV_FD(scp), SEV_CHAN_OPEN, &uargs) != 0) {
183 ec = errno;
184 (void) close(EV_FD(scp));
185 free(scp);
186 return (errno = ec);
189 /* Needed to detect a fork() */
190 EV_PID(scp) = getpid();
191 (void) mutex_init(EV_LOCK(scp), USYNC_THREAD, NULL);
193 *scpp = scp;
195 return (0);
199 * sysevent_evc_unbind - Unbind from previously bound/created channel
202 sysevent_evc_unbind(evchan_t *scp)
204 sev_unsubscribe_args_t uargs;
205 evchan_subscr_t *subp;
206 int errcp;
208 if (scp == NULL || misaligned(scp))
209 return (errno = EINVAL);
211 if (will_deadlock(scp))
212 return (errno = EDEADLK);
214 (void) mutex_lock(EV_LOCK(scp));
217 * Unsubscribe, if we are in the process which did the bind.
219 if (EV_PID(scp) == getpid()) {
220 uargs.sid.name = '\0';
221 uargs.sid.len = 0;
223 * The unsubscribe ioctl will block until all door upcalls have
224 * drained.
226 if (ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs) != 0) {
227 errcp = errno;
228 (void) mutex_unlock(EV_LOCK(scp));
229 return (errno = errcp);
233 while ((subp = EV_SUB_NEXT(scp)) != NULL) {
234 EV_SUB_NEXT(scp) = subp->evsub_next;
236 /* If door_xcreate was applied we can clean up */
237 if (subp->evsub_attr)
238 kill_door_servers(subp);
240 if (door_revoke(subp->evsub_door_desc) != 0 && errno == EPERM)
241 (void) close(subp->evsub_door_desc);
243 free(subp->evsub_sid);
244 free(subp);
247 (void) mutex_unlock(EV_LOCK(scp));
250 * The close of the driver will do the unsubscribe if a) it is the last
251 * close and b) we are in a child which inherited subscriptions.
253 (void) close(EV_FD(scp));
254 (void) mutex_destroy(EV_LOCK(scp));
255 free(scp);
257 return (0);
261 * sysevent_evc_publish - Generate a system event via an event channel
264 sysevent_evc_publish(evchan_t *scp, const char *class,
265 const char *subclass, const char *vendor,
266 const char *pub_name, nvlist_t *attr_list,
267 uint32_t flags)
269 sysevent_t *ev;
270 sev_publish_args_t uargs;
271 int rc;
272 int ec;
274 if (scp == NULL || misaligned(scp)) {
275 return (errno = EINVAL);
278 /* No inheritance of binding handles via fork() */
279 if (EV_PID(scp) != getpid()) {
280 return (errno = EINVAL);
283 ev = sysevent_alloc_event((char *)class, (char *)subclass,
284 (char *)vendor, (char *)pub_name, attr_list);
285 if (ev == NULL) {
286 return (errno);
289 uargs.ev.name = (uintptr_t)ev;
290 uargs.ev.len = SE_SIZE(ev);
291 uargs.flags = flags;
293 (void) mutex_lock(EV_LOCK(scp));
295 rc = ioctl(EV_FD(scp), SEV_PUBLISH, (intptr_t)&uargs);
296 ec = errno;
298 (void) mutex_unlock(EV_LOCK(scp));
300 sysevent_free(ev);
302 if (rc != 0) {
303 return (ec);
305 return (0);
309 * Generic callback which catches events from the kernel and calls
310 * subscribers call back routine.
312 * Kernel guarantees that door_upcalls are disabled when unsubscription
313 * was issued that's why cookie points always to a valid evchan_subscr_t *.
315 * Furthermore it's not necessary to lock subp because the sysevent
316 * framework guarantees no unsubscription until door_return.
318 /*ARGSUSED3*/
319 static void
320 door_upcall(void *cookie, char *args, size_t alen,
321 door_desc_t *ddp, uint_t ndid)
323 evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie);
324 int rval = 0;
327 * If we've been invoked simply to kill the thread then
328 * exit now.
330 if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING)
331 pthread_exit(NULL);
333 if (args == NULL || alen <= (size_t)0) {
334 /* Skip callback execution */
335 rval = EINVAL;
336 } else {
337 rval = subp->evsub_func((sysevent_t *)(void *)args,
338 subp->evsub_cookie);
342 * Fill in return values for door_return
344 alen = sizeof (rval);
345 bcopy(&rval, args, alen);
347 (void) door_return(args, alen, NULL, 0);
350 static pthread_once_t xsub_thrattr_once = PTHREAD_ONCE_INIT;
351 static pthread_attr_t xsub_thrattr;
353 static void
354 xsub_thrattr_init(void)
356 (void) pthread_attr_init(&xsub_thrattr);
357 (void) pthread_attr_setdetachstate(&xsub_thrattr,
358 PTHREAD_CREATE_DETACHED);
359 (void) pthread_attr_setscope(&xsub_thrattr, PTHREAD_SCOPE_SYSTEM);
363 * Our door server create function is only called during initial
364 * door_xcreate since we specify DOOR_NO_DEPLETION_CB.
367 xsub_door_server_create(door_info_t *dip, void *(*startf)(void *),
368 void *startfarg, void *cookie)
370 evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie);
371 struct sysevent_subattr_impl *xsa = subp->evsub_attr;
372 pthread_attr_t *thrattr;
373 sigset_t oset;
374 int err;
376 if (subp->evsub_state == EVCHAN_SUB_STATE_CLOSING)
377 return (0); /* shouldn't happen, but just in case */
380 * If sysevent_evc_xsubscribe was called electing to use a
381 * different door server create function then let it take it
382 * from here.
384 if (xsa->xs_thrcreate) {
385 return (xsa->xs_thrcreate(dip, startf, startfarg,
386 xsa->xs_thrcreate_cookie));
389 if (xsa->xs_thrattr == NULL) {
390 (void) pthread_once(&xsub_thrattr_once, xsub_thrattr_init);
391 thrattr = &xsub_thrattr;
392 } else {
393 thrattr = xsa->xs_thrattr;
396 (void) pthread_sigmask(SIG_SETMASK, &xsa->xs_sigmask, &oset);
397 err = pthread_create(NULL, thrattr, startf, startfarg);
398 (void) pthread_sigmask(SIG_SETMASK, &oset, NULL);
400 return (err == 0 ? 1 : -1);
403 void
404 xsub_door_server_setup(void *cookie)
406 evchan_subscr_t *subp = EVCHAN_SUBSCR(cookie);
407 struct sysevent_subattr_impl *xsa = subp->evsub_attr;
409 if (xsa->xs_thrsetup == NULL) {
410 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
411 (void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
414 (void) pthread_setspecific(nrkey, (void *)subp);
416 if (xsa->xs_thrsetup)
417 xsa->xs_thrsetup(xsa->xs_thrsetup_cookie);
421 * Cause private door server threads to exit. We have already performed the
422 * unsubscribe ioctl which stops new invocations and waits until all
423 * existing invocations are complete. So all server threads should be
424 * blocked in door_return. The door has not yet been revoked. We will
425 * invoke repeatedly after setting the evsub_state to be noticed on
426 * wakeup; each invocation will result in the death of one server thread.
428 * You'd think it would be easier to kill these threads, such as through
429 * pthread_cancel. Unfortunately door_return is not a cancellation point,
430 * and if you do cancel a thread blocked in door_return the EINTR check in
431 * the door_return assembly logic causes us to loop with EINTR forever!
433 static void
434 kill_door_servers(evchan_subscr_t *subp)
436 door_arg_t da;
438 bzero(&da, sizeof (da));
439 subp->evsub_state = EVCHAN_SUB_STATE_CLOSING;
440 membar_producer();
442 (void) door_call(subp->evsub_door_desc, &da);
445 static int
446 sysevent_evc_subscribe_cmn(evchan_t *scp, const char *sid, const char *class,
447 int (*event_handler)(sysevent_t *ev, void *cookie),
448 void *cookie, uint32_t flags, struct sysevent_subattr_impl *xsa)
450 evchan_subscr_t *subp;
451 int upcall_door;
452 sev_subscribe_args_t uargs;
453 uint32_t sid_len;
454 uint32_t class_len;
455 int ec;
457 if (scp == NULL || misaligned(scp) || sid == NULL || class == NULL) {
458 return (errno = EINVAL);
461 /* No inheritance of binding handles via fork() */
462 if (EV_PID(scp) != getpid()) {
463 return (errno = EINVAL);
466 if ((sid_len = strlen(sid) + 1) > MAX_SUBID_LEN || sid_len == 1 ||
467 (class_len = strlen(class) + 1) > MAX_CLASS_LEN) {
468 return (errno = EINVAL);
471 /* Check for printable characters */
472 if (!strisprint(sid)) {
473 return (errno = EINVAL);
476 if (event_handler == NULL) {
477 return (errno = EINVAL);
480 if (pthread_key_create_once_np(&nrkey, NULL) != 0)
481 return (errno); /* ENOMEM or EAGAIN */
483 /* Create subscriber data */
484 if ((subp = calloc(1, sizeof (evchan_subscr_t))) == NULL) {
485 return (errno);
488 if ((subp->evsub_sid = strdup(sid)) == NULL) {
489 ec = errno;
490 free(subp);
491 return (ec);
495 * EC_ALL string will not be copied to kernel - NULL is assumed
497 if (strcmp(class, EC_ALL) == 0) {
498 class = NULL;
499 class_len = 0;
503 * Fill this in now for the xsub_door_server_setup dance
505 subp->ev_subhead = EVCHAN_IMPL_HNDL(scp);
506 subp->evsub_state = EVCHAN_SUB_STATE_ACTIVE;
508 if (xsa == NULL) {
509 upcall_door = door_create(door_upcall, (void *)subp,
510 DOOR_REFUSE_DESC | DOOR_NO_CANCEL);
511 } else {
512 subp->evsub_attr = xsa;
515 * Create a private door with exactly one thread to
516 * service the callbacks (the GPEC kernel implementation
517 * serializes deliveries for each subscriber id).
519 upcall_door = door_xcreate(door_upcall, (void *)subp,
520 DOOR_REFUSE_DESC | DOOR_NO_CANCEL | DOOR_NO_DEPLETION_CB,
521 xsub_door_server_create, xsub_door_server_setup,
522 (void *)subp, 1);
525 if (upcall_door == -1) {
526 ec = errno;
527 free(subp->evsub_sid);
528 free(subp);
529 return (ec);
532 /* Complete subscriber information */
533 subp->evsub_door_desc = upcall_door;
534 subp->evsub_func = event_handler;
535 subp->evsub_cookie = cookie;
537 (void) mutex_lock(EV_LOCK(scp));
539 uargs.sid.name = (uintptr_t)sid;
540 uargs.sid.len = sid_len;
541 uargs.class_info.name = (uintptr_t)class;
542 uargs.class_info.len = class_len;
543 uargs.door_desc = subp->evsub_door_desc;
544 uargs.flags = flags;
545 if (ioctl(EV_FD(scp), SEV_SUBSCRIBE, (intptr_t)&uargs) != 0) {
546 ec = errno;
547 (void) mutex_unlock(EV_LOCK(scp));
548 if (xsa)
549 kill_door_servers(subp);
550 (void) door_revoke(upcall_door);
551 free(subp->evsub_sid);
552 free(subp);
553 return (ec);
556 /* Attach to subscriber list */
557 subp->evsub_next = EV_SUB_NEXT(scp);
558 EV_SUB_NEXT(scp) = subp;
560 (void) mutex_unlock(EV_LOCK(scp));
562 return (0);
566 * sysevent_evc_subscribe - subscribe to an existing event channel
567 * using a non-private door (which will create as many server threads
568 * as the apparent maximum concurrency requirements suggest).
571 sysevent_evc_subscribe(evchan_t *scp, const char *sid, const char *class,
572 int (*event_handler)(sysevent_t *ev, void *cookie),
573 void *cookie, uint32_t flags)
575 return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler,
576 cookie, flags, NULL));
579 static void
580 subattr_dfltinit(struct sysevent_subattr_impl *xsa)
582 (void) sigfillset(&xsa->xs_sigmask);
583 (void) sigdelset(&xsa->xs_sigmask, SIGABRT);
586 static struct sysevent_subattr_impl dfltsa;
587 pthread_once_t dfltsa_inited = PTHREAD_ONCE_INIT;
589 static void
590 init_dfltsa(void)
592 subattr_dfltinit(&dfltsa);
596 * sysevent_evc_subscribe - subscribe to an existing event channel
597 * using a private door with control over thread creation.
600 sysevent_evc_xsubscribe(evchan_t *scp, const char *sid, const char *class,
601 int (*event_handler)(sysevent_t *ev, void *cookie),
602 void *cookie, uint32_t flags, sysevent_subattr_t *attr)
604 struct sysevent_subattr_impl *xsa;
606 if (attr != NULL) {
607 xsa = (struct sysevent_subattr_impl *)attr;
608 } else {
609 xsa = &dfltsa;
610 (void) pthread_once(&dfltsa_inited, init_dfltsa);
613 return (sysevent_evc_subscribe_cmn(scp, sid, class, event_handler,
614 cookie, flags, xsa));
617 sysevent_subattr_t *
618 sysevent_subattr_alloc(void)
620 struct sysevent_subattr_impl *xsa = calloc(1, sizeof (*xsa));
622 if (xsa != NULL)
623 subattr_dfltinit(xsa);
625 return (xsa != NULL ? (sysevent_subattr_t *)xsa : NULL);
628 void
629 sysevent_subattr_free(sysevent_subattr_t *attr)
631 struct sysevent_subattr_impl *xsa =
632 (struct sysevent_subattr_impl *)attr;
634 free(xsa);
637 void
638 sysevent_subattr_thrcreate(sysevent_subattr_t *attr,
639 door_xcreate_server_func_t *thrcreate, void *cookie)
641 struct sysevent_subattr_impl *xsa =
642 (struct sysevent_subattr_impl *)attr;
644 xsa->xs_thrcreate = thrcreate;
645 xsa->xs_thrcreate_cookie = cookie;
648 void
649 sysevent_subattr_thrsetup(sysevent_subattr_t *attr,
650 door_xcreate_thrsetup_func_t *thrsetup, void *cookie)
652 struct sysevent_subattr_impl *xsa =
653 (struct sysevent_subattr_impl *)attr;
655 xsa->xs_thrsetup = thrsetup;
656 xsa->xs_thrsetup_cookie = cookie;
659 void
660 sysevent_subattr_sigmask(sysevent_subattr_t *attr, sigset_t *set)
662 struct sysevent_subattr_impl *xsa =
663 (struct sysevent_subattr_impl *)attr;
665 if (set) {
666 xsa->xs_sigmask = *set;
667 } else {
668 (void) sigfillset(&xsa->xs_sigmask);
669 (void) sigdelset(&xsa->xs_sigmask, SIGABRT);
673 void
674 sysevent_subattr_thrattr(sysevent_subattr_t *attr, pthread_attr_t *thrattr)
676 struct sysevent_subattr_impl *xsa =
677 (struct sysevent_subattr_impl *)attr;
679 xsa->xs_thrattr = thrattr;
683 * sysevent_evc_unsubscribe - Unsubscribe from an existing event channel
686 sysevent_evc_unsubscribe(evchan_t *scp, const char *sid)
688 int all_subscribers = 0;
689 sev_unsubscribe_args_t uargs;
690 evchan_subscr_t *subp, *prevsubp, *tofree;
691 int errcp;
692 int rc;
694 if (scp == NULL || misaligned(scp))
695 return (errno = EINVAL);
697 if (sid == NULL || strlen(sid) == 0 ||
698 (strlen(sid) >= MAX_SUBID_LEN))
699 return (errno = EINVAL);
701 /* No inheritance of binding handles via fork() */
702 if (EV_PID(scp) != getpid())
703 return (errno = EINVAL);
705 if (strcmp(sid, EVCH_ALLSUB) == 0) {
706 all_subscribers++;
707 /* Indicates all subscriber id's for this channel */
708 uargs.sid.name = '\0';
709 uargs.sid.len = 0;
710 } else {
711 uargs.sid.name = (uintptr_t)sid;
712 uargs.sid.len = strlen(sid) + 1;
715 if (will_deadlock(scp))
716 return (errno = EDEADLK);
718 (void) mutex_lock(EV_LOCK(scp));
721 * The unsubscribe ioctl will block until all door upcalls have drained.
723 rc = ioctl(EV_FD(scp), SEV_UNSUBSCRIBE, (intptr_t)&uargs);
725 if (rc != 0) {
726 errcp = errno;
727 (void) mutex_unlock(EV_LOCK(scp));
728 return (errno = errcp); /* EFAULT, ENXIO, EINVAL possible */
733 * Search for the matching subscriber. If EVCH_ALLSUB was specified
734 * then the ioctl above will have returned 0 even if there are
735 * no subscriptions, so the initial EV_SUB_NEXT can be NULL.
737 prevsubp = NULL;
738 subp = EV_SUB_NEXT(scp);
739 while (subp != NULL) {
740 if (all_subscribers || strcmp(subp->evsub_sid, sid) == 0) {
741 if (prevsubp == NULL) {
742 EV_SUB_NEXT(scp) = subp->evsub_next;
743 } else {
744 prevsubp->evsub_next = subp->evsub_next;
747 tofree = subp;
748 subp = subp->evsub_next;
750 /* If door_xcreate was applied we can clean up */
751 if (tofree->evsub_attr)
752 kill_door_servers(tofree);
754 (void) door_revoke(tofree->evsub_door_desc);
755 free(tofree->evsub_sid);
756 free(tofree);
758 /* Freed single subscriber already? */
759 if (all_subscribers == 0)
760 break;
761 } else {
762 prevsubp = subp;
763 subp = subp->evsub_next;
767 (void) mutex_unlock(EV_LOCK(scp));
769 return (0);
773 * sysevent_evc_control - Various channel based control operation
776 sysevent_evc_control(evchan_t *scp, int cmd, /* arg */ ...)
778 va_list ap;
779 uint32_t *chlenp;
780 sev_control_args_t uargs;
781 int rc = 0;
783 if (scp == NULL || misaligned(scp)) {
784 return (errno = EINVAL);
787 /* No inheritance of binding handles via fork() */
788 if (EV_PID(scp) != getpid()) {
789 return (errno = EINVAL);
792 va_start(ap, cmd);
794 uargs.cmd = cmd;
796 (void) mutex_lock(EV_LOCK(scp));
798 switch (cmd) {
799 case EVCH_GET_CHAN_LEN:
800 case EVCH_GET_CHAN_LEN_MAX:
801 chlenp = va_arg(ap, uint32_t *);
802 if (chlenp == NULL || misaligned(chlenp)) {
803 rc = EINVAL;
804 break;
806 rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs);
807 *chlenp = uargs.value;
808 break;
810 case EVCH_SET_CHAN_LEN:
811 /* Range change will be handled in framework */
812 uargs.value = va_arg(ap, uint32_t);
813 rc = ioctl(EV_FD(scp), SEV_CHAN_CONTROL, (intptr_t)&uargs);
814 break;
816 default:
817 rc = EINVAL;
820 (void) mutex_unlock(EV_LOCK(scp));
822 if (rc == -1) {
823 rc = errno;
826 va_end(ap);
828 return (errno = rc);
832 sysevent_evc_setpropnvl(evchan_t *scp, nvlist_t *nvl)
834 sev_propnvl_args_t uargs;
835 char *buf = NULL;
836 size_t nvlsz = 0;
837 int rc;
839 if (scp == NULL || misaligned(scp))
840 return (errno = EINVAL);
842 if (nvl != NULL &&
843 nvlist_pack(nvl, &buf, &nvlsz, NV_ENCODE_NATIVE, 0) != 0)
844 return (errno);
846 uargs.packednvl.name = (uint64_t)(uintptr_t)buf;
847 uargs.packednvl.len = (uint32_t)nvlsz;
849 rc = ioctl(EV_FD(scp), SEV_SETPROPNVL, (intptr_t)&uargs);
851 free(buf);
853 return (rc);
857 sysevent_evc_getpropnvl(evchan_t *scp, nvlist_t **nvlp)
859 sev_propnvl_args_t uargs;
860 char buf[1024], *bufp = buf; /* stack buffer */
861 size_t sz = sizeof (buf);
862 char *buf2 = NULL; /* allocated if stack buf too small */
863 int64_t expgen = -1;
864 int rc;
866 if (scp == NULL || misaligned(scp) || nvlp == NULL)
867 return (errno = EINVAL);
869 *nvlp = NULL;
871 again:
872 uargs.packednvl.name = (uint64_t)(uintptr_t)bufp;
873 uargs.packednvl.len = (uint32_t)sz;
875 rc = ioctl(EV_FD(scp), SEV_GETPROPNVL, (intptr_t)&uargs);
877 if (rc == E2BIG)
878 return (errno = E2BIG); /* driver refuses to copyout */
881 * If the packed nvlist is too big for the buffer size we offered
882 * then the ioctl returns EOVERFLOW and indicates in the 'len'
883 * the size required for the current property nvlist generation
884 * (itself returned in the generation member).
886 if (rc == EOVERFLOW &&
887 (buf2 == NULL || uargs.generation != expgen)) {
888 free(buf2);
890 if ((sz = uargs.packednvl.len) > 1024 * 1024)
891 return (E2BIG);
893 bufp = buf2 = malloc(sz);
895 if (buf2 == NULL)
896 return (errno = ENOMEM);
898 expgen = uargs.generation;
899 goto again;
903 * The chan prop nvlist can be absent, in which case the ioctl
904 * returns success and uargs.packednvl.len of 0; we have already
905 * set *nvlp to NULL. Otherwise we must unpack the nvl.
907 if (rc == 0 && uargs.packednvl.len != 0 &&
908 nvlist_unpack(bufp, uargs.packednvl.len, nvlp, 0) != 0)
909 rc = EINVAL;
911 free(buf2);
913 return (rc ? errno = rc : 0);