pax: avoid implicit function declaration warnings
[unleashed.git] / kernel / os / sunddi.c
blob0c065f1063eb68fbc8cc2fafb85a67a139bac0a9
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Garrett D'Amore <garrett@damore.org>
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */
60 #include <sys/ndi_impldefs.h> /* include prototypes */
61 #include <sys/ddi_periodic.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/sysevent.h>
69 #include <sys/dacf_impl.h>
70 #include <sys/ddidevmap.h>
71 #include <sys/bootconf.h>
72 #include <sys/disp.h>
73 #include <sys/atomic.h>
74 #include <sys/promif.h>
75 #include <sys/instance.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/task.h>
78 #include <sys/project.h>
79 #include <sys/taskq.h>
80 #include <sys/devpolicy.h>
81 #include <sys/ctype.h>
82 #include <net/if.h>
83 #include <sys/rctl.h>
84 #include <sys/zone.h>
85 #include <sys/clock_impl.h>
86 #include <sys/ddi.h>
87 #include <sys/modhash.h>
88 #include <sys/sunldi_impl.h>
89 #include <sys/fs/dv_node.h>
90 #include <sys/fs/snode.h>
92 extern pri_t minclsyspri;
94 extern rctl_hndl_t rc_project_locked_mem;
95 extern rctl_hndl_t rc_zone_locked_mem;
97 #ifdef DEBUG
98 static int sunddi_debug = 0;
99 #endif /* DEBUG */
101 /* ddi_umem_unlock miscellaneous */
103 static void i_ddi_umem_unlock_thread_start(void);
105 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */
106 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */
107 static kthread_t *ddi_umem_unlock_thread;
109 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list.
111 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL;
112 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
115 * DDI(Sun) Function and flag definitions:
118 #if defined(__x86)
120 * Used to indicate which entries were chosen from a range.
122 char *chosen_reg = "chosen-reg";
123 #endif
126 * Function used to ring system console bell
128 void (*ddi_console_bell_func)(clock_t duration);
131 * Creating register mappings and handling interrupts:
135 * Generic ddi_map: Call parent to fulfill request...
139 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
140 off_t len, caddr_t *addrp)
142 dev_info_t *pdip;
144 ASSERT(dp);
145 pdip = (dev_info_t *)DEVI(dp)->devi_parent;
146 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
147 dp, mp, offset, len, addrp));
151 * ddi_apply_range: (Called by nexi only.)
152 * Apply ranges in parent node dp, to child regspec rp...
156 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 return (i_ddi_apply_range(dp, rdip, rp));
162 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
163 off_t len)
165 ddi_map_req_t mr;
166 #if defined(__x86)
167 struct {
168 int bus;
169 int addr;
170 int size;
171 } reg, *reglist;
172 uint_t length;
173 int rc;
176 * get the 'registers' or the 'reg' property.
177 * We look up the reg property as an array of
178 * int's.
180 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
181 DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
182 if (rc != DDI_PROP_SUCCESS)
183 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
184 DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
185 if (rc == DDI_PROP_SUCCESS) {
187 * point to the required entry.
189 reg = reglist[rnumber];
190 reg.addr += offset;
191 if (len != 0)
192 reg.size = len;
194 * make a new property containing ONLY the required tuple.
196 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
197 chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
198 != DDI_PROP_SUCCESS) {
199 cmn_err(CE_WARN, "%s%d: cannot create '%s' "
200 "property", DEVI(dip)->devi_name,
201 DEVI(dip)->devi_instance, chosen_reg);
204 * free the memory allocated by
205 * ddi_prop_lookup_int_array ().
207 ddi_prop_free((void *)reglist);
209 #endif
210 mr.map_op = DDI_MO_MAP_LOCKED;
211 mr.map_type = DDI_MT_RNUMBER;
212 mr.map_obj.rnumber = rnumber;
213 mr.map_prot = PROT_READ | PROT_WRITE;
214 mr.map_flags = DDI_MF_KERNEL_MAPPING;
215 mr.map_handlep = NULL;
216 mr.map_vers = DDI_MAP_VERSION;
219 * Call my parent to map in my regs.
222 return (ddi_map(dip, &mr, offset, len, kaddrp));
225 void
226 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
227 off_t len)
229 ddi_map_req_t mr;
231 mr.map_op = DDI_MO_UNMAP;
232 mr.map_type = DDI_MT_RNUMBER;
233 mr.map_flags = DDI_MF_KERNEL_MAPPING;
234 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */
235 mr.map_obj.rnumber = rnumber;
236 mr.map_handlep = NULL;
237 mr.map_vers = DDI_MAP_VERSION;
240 * Call my parent to unmap my regs.
243 (void) ddi_map(dip, &mr, offset, len, kaddrp);
244 *kaddrp = (caddr_t)0;
245 #if defined(__x86)
246 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
247 #endif
251 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
252 off_t offset, off_t len, caddr_t *vaddrp)
254 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
258 * nullbusmap: The/DDI default bus_map entry point for nexi
259 * not conforming to the reg/range paradigm (i.e. scsi, etc.)
260 * with no HAT/MMU layer to be programmed at this level.
262 * If the call is to map by rnumber, return an error,
263 * otherwise pass anything else up the tree to my parent.
266 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
267 off_t offset, off_t len, caddr_t *vaddrp)
269 _NOTE(ARGUNUSED(rdip))
270 if (mp->map_type == DDI_MT_RNUMBER)
271 return (DDI_ME_UNSUPPORTED);
273 return (ddi_map(dip, mp, offset, len, vaddrp));
277 * ddi_rnumber_to_regspec: Not for use by leaf drivers.
278 * Only for use by nexi using the reg/range paradigm.
280 struct regspec *
281 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 return (i_ddi_rnumber_to_regspec(dip, rnumber));
288 * Note that we allow the dip to be nil because we may be called
289 * prior even to the instantiation of the devinfo tree itself - all
290 * regular leaf and nexus drivers should always use a non-nil dip!
292 * We treat peek in a somewhat cavalier fashion .. assuming that we'll
293 * simply get a synchronous fault as soon as we touch a missing address.
295 * Poke is rather more carefully handled because we might poke to a write
296 * buffer, "succeed", then only find some time later that we got an
297 * asynchronous fault that indicated that the address we were writing to
298 * was not really backed by hardware.
301 static int
302 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
303 void *addr, void *value_p)
305 union {
306 uint64_t u64;
307 uint32_t u32;
308 uint16_t u16;
309 uint8_t u8;
310 } peekpoke_value;
312 peekpoke_ctlops_t peekpoke_args;
313 uint64_t dummy_result;
314 int rval;
316 /* Note: size is assumed to be correct; it is not checked. */
317 peekpoke_args.size = size;
318 peekpoke_args.dev_addr = (uintptr_t)addr;
319 peekpoke_args.handle = NULL;
320 peekpoke_args.repcount = 1;
321 peekpoke_args.flags = 0;
323 if (cmd == DDI_CTLOPS_POKE) {
324 switch (size) {
325 case sizeof (uint8_t):
326 peekpoke_value.u8 = *(uint8_t *)value_p;
327 break;
328 case sizeof (uint16_t):
329 peekpoke_value.u16 = *(uint16_t *)value_p;
330 break;
331 case sizeof (uint32_t):
332 peekpoke_value.u32 = *(uint32_t *)value_p;
333 break;
334 case sizeof (uint64_t):
335 peekpoke_value.u64 = *(uint64_t *)value_p;
336 break;
340 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 if (devi != NULL)
343 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
344 &dummy_result);
345 else
346 rval = peekpoke_mem(cmd, &peekpoke_args);
349 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
352 switch (size) {
353 case sizeof (uint8_t):
354 *(uint8_t *)value_p = peekpoke_value.u8;
355 break;
356 case sizeof (uint16_t):
357 *(uint16_t *)value_p = peekpoke_value.u16;
358 break;
359 case sizeof (uint32_t):
360 *(uint32_t *)value_p = peekpoke_value.u32;
361 break;
362 case sizeof (uint64_t):
363 *(uint64_t *)value_p = peekpoke_value.u64;
364 break;
368 return (rval);
372 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
373 * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
376 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 switch (size) {
379 case sizeof (uint8_t):
380 case sizeof (uint16_t):
381 case sizeof (uint32_t):
382 case sizeof (uint64_t):
383 break;
384 default:
385 return (DDI_FAILURE);
388 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
392 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 switch (size) {
395 case sizeof (uint8_t):
396 case sizeof (uint16_t):
397 case sizeof (uint32_t):
398 case sizeof (uint64_t):
399 break;
400 default:
401 return (DDI_FAILURE);
404 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
408 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
411 val_p));
415 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
418 val_p));
422 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
425 val_p));
429 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
432 val_p));
437 * We need to separate the old interfaces from the new ones and leave them
438 * in here for a while. Previous versions of the OS defined the new interfaces
439 * to the old interfaces. This way we can fix things up so that we can
440 * eventually remove these interfaces.
441 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
442 * or earlier will actually have a reference to ddi_peekc in the binary.
444 #ifdef _ILP32
446 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
448 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
449 val_p));
453 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
455 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
456 val_p));
460 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
462 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
463 val_p));
467 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
469 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
470 val_p));
472 #endif /* _ILP32 */
475 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
477 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
481 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
483 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
487 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
489 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
493 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
495 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
499 * We need to separate the old interfaces from the new ones and leave them
500 * in here for a while. Previous versions of the OS defined the new interfaces
501 * to the old interfaces. This way we can fix things up so that we can
502 * eventually remove these interfaces.
503 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
504 * or earlier will actually have a reference to ddi_pokec in the binary.
506 #ifdef _ILP32
508 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
510 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
514 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
516 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
520 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
522 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
526 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
528 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
530 #endif /* _ILP32 */
533 * ddi_peekpokeio() is used primarily by the mem drivers for moving
534 * data to and from uio structures via peek and poke. Note that we
535 * use "internal" routines ddi_peek and ddi_poke to make this go
536 * slightly faster, avoiding the call overhead ..
539 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
540 caddr_t addr, size_t len, uint_t xfersize)
542 int64_t ibuffer;
543 int8_t w8;
544 size_t sz;
545 int o;
547 if (xfersize > sizeof (long))
548 xfersize = sizeof (long);
550 while (len != 0) {
551 if ((len | (uintptr_t)addr) & 1) {
552 sz = sizeof (int8_t);
553 if (rw == UIO_WRITE) {
554 if ((o = uwritec(uio)) == -1)
555 return (DDI_FAILURE);
556 if (ddi_poke8(devi, (int8_t *)addr,
557 (int8_t)o) != DDI_SUCCESS)
558 return (DDI_FAILURE);
559 } else {
560 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
561 (int8_t *)addr, &w8) != DDI_SUCCESS)
562 return (DDI_FAILURE);
563 if (ureadc(w8, uio))
564 return (DDI_FAILURE);
566 } else {
567 switch (xfersize) {
568 case sizeof (int64_t):
569 if (((len | (uintptr_t)addr) &
570 (sizeof (int64_t) - 1)) == 0) {
571 sz = xfersize;
572 break;
574 /*FALLTHROUGH*/
575 case sizeof (int32_t):
576 if (((len | (uintptr_t)addr) &
577 (sizeof (int32_t) - 1)) == 0) {
578 sz = xfersize;
579 break;
581 /*FALLTHROUGH*/
582 default:
584 * This still assumes that we might have an
585 * I/O bus out there that permits 16-bit
586 * transfers (and that it would be upset by
587 * 32-bit transfers from such locations).
589 sz = sizeof (int16_t);
590 break;
593 if (rw == UIO_READ) {
594 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
595 addr, &ibuffer) != DDI_SUCCESS)
596 return (DDI_FAILURE);
599 if (uiomove(&ibuffer, sz, rw, uio))
600 return (DDI_FAILURE);
602 if (rw == UIO_WRITE) {
603 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
604 addr, &ibuffer) != DDI_SUCCESS)
605 return (DDI_FAILURE);
608 addr += sz;
609 len -= sz;
611 return (DDI_SUCCESS);
615 * These routines are used by drivers that do layered ioctls
616 * On sparc, they're implemented in assembler to avoid spilling
617 * register windows in the common (copyin) case ..
620 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
622 if (flags & FKIOCTL)
623 return (kcopy(buf, kernbuf, size) ? -1 : 0);
624 return (copyin(buf, kernbuf, size));
628 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
630 if (flags & FKIOCTL)
631 return (kcopy(buf, kernbuf, size) ? -1 : 0);
632 return (copyout(buf, kernbuf, size));
636 * Conversions in nexus pagesize units. We don't duplicate the
637 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
638 * routines anyway.
640 unsigned long
641 ddi_btop(dev_info_t *dip, unsigned long bytes)
643 unsigned long pages;
645 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
646 return (pages);
649 unsigned long
650 ddi_btopr(dev_info_t *dip, unsigned long bytes)
652 unsigned long pages;
654 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
655 return (pages);
658 unsigned long
659 ddi_ptob(dev_info_t *dip, unsigned long pages)
661 unsigned long bytes;
663 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
664 return (bytes);
667 unsigned int
668 ddi_enter_critical(void)
670 return ((uint_t)spl7());
673 void
674 ddi_exit_critical(unsigned int spl)
676 splx((int)spl);
680 * Nexus ctlops punter
684 * Request bus_ctl parent to handle a bus_ctl request
686 * (The sparc version is in sparc_ddi.s)
689 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
691 int (*fp)();
693 if (!d || !r)
694 return (DDI_FAILURE);
696 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
697 return (DDI_FAILURE);
699 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
700 return ((*fp)(d, r, op, a, v));
705 * DMA/DVMA setup
709 * Request bus_dma_ctl parent to fiddle with a dma request.
711 * (The sparc version is in sparc_subr.s)
714 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
715 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
716 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
718 int (*fp)();
720 if (dip != ddi_root_node())
721 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
722 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
723 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
727 * For all DMA control functions, call the DMA control
728 * routine and return status.
730 * Just plain assume that the parent is to be called.
731 * If a nexus driver or a thread outside the framework
732 * of a nexus driver or a leaf driver calls these functions,
733 * it is up to them to deal with the fact that the parent's
734 * bus_dma_ctl function will be the first one called.
737 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
740 * This routine is left in place to satisfy link dependencies
741 * for any 3rd party nexus drivers that rely on it. It is never
742 * called, though.
744 /*ARGSUSED*/
746 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
747 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
749 return (DDI_FAILURE);
754 * The SPARC versions of these routines are done in assembler to
755 * save register windows, so they're in sparc_subr.s.
759 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
760 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
762 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
763 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
765 if (dip != ddi_root_node())
766 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
768 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
769 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
773 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
775 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
777 if (dip != ddi_root_node())
778 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
780 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
781 return ((*funcp)(dip, rdip, handlep));
785 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
786 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
787 ddi_dma_cookie_t *cp, uint_t *ccountp)
789 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
790 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
792 if (dip != ddi_root_node())
793 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
795 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
796 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
800 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
801 ddi_dma_handle_t handle)
803 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
805 if (dip != ddi_root_node())
806 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
808 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
809 return ((*funcp)(dip, rdip, handle));
814 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
815 ddi_dma_handle_t handle, off_t off, size_t len,
816 uint_t cache_flags)
818 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
819 off_t, size_t, uint_t);
821 if (dip != ddi_root_node())
822 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
824 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
825 return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
829 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
830 ddi_dma_handle_t handle, uint_t win, off_t *offp,
831 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
833 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
834 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
836 if (dip != ddi_root_node())
837 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
839 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
840 return ((*funcp)(dip, rdip, handle, win, offp, lenp,
841 cookiep, ccountp));
845 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
847 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
848 dev_info_t *dip, *rdip;
849 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
850 size_t, uint_t);
853 * the DMA nexus driver will set DMP_NOSYNC if the
854 * platform does not require any sync operation. For
855 * example if the memory is uncached or consistent
856 * and without any I/O write buffers involved.
858 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
859 return (DDI_SUCCESS);
861 dip = rdip = hp->dmai_rdip;
862 if (dip != ddi_root_node())
863 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
864 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
865 return ((*funcp)(dip, rdip, h, o, l, whom));
869 ddi_dma_unbind_handle(ddi_dma_handle_t h)
871 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
872 dev_info_t *dip, *rdip;
873 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
875 dip = rdip = hp->dmai_rdip;
876 if (dip != ddi_root_node())
877 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
878 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
879 return ((*funcp)(dip, rdip, h));
884 * DMA burst sizes, and transfer minimums
888 ddi_dma_burstsizes(ddi_dma_handle_t handle)
890 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
892 if (!dimp)
893 return (0);
894 else
895 return (dimp->dmai_burstsizes);
899 * Given two DMA attribute structures, apply the attributes
900 * of one to the other, following the rules of attributes
901 * and the wishes of the caller.
903 * The rules of DMA attribute structures are that you cannot
904 * make things *less* restrictive as you apply one set
905 * of attributes to another.
908 void
909 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
911 attr->dma_attr_addr_lo =
912 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
913 attr->dma_attr_addr_hi =
914 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
915 attr->dma_attr_count_max =
916 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
917 attr->dma_attr_align =
918 MAX(attr->dma_attr_align, mod->dma_attr_align);
919 attr->dma_attr_burstsizes =
920 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
921 attr->dma_attr_minxfer =
922 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
923 attr->dma_attr_maxxfer =
924 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
925 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
926 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
927 (uint_t)mod->dma_attr_sgllen);
928 attr->dma_attr_granular =
929 MAX(attr->dma_attr_granular, mod->dma_attr_granular);
933 * mmap/segmap interface:
937 * ddi_segmap: setup the default segment driver. Calls the drivers
938 * XXmmap routine to validate the range to be mapped.
939 * Return ENXIO of the range is not valid. Create
940 * a seg_dev segment that contains all of the
941 * necessary information and will reference the
942 * default segment driver routines. It returns zero
943 * on success or non-zero on failure.
946 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
947 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
949 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
950 off_t, uint_t, uint_t, uint_t, struct cred *);
952 return (spec_segmap(dev, offset, asp, addrp, len,
953 prot, maxprot, flags, credp));
957 * ddi_map_fault: Resolve mappings at fault time. Used by segment
958 * drivers. Allows each successive parent to resolve
959 * address translations and add its mappings to the
960 * mapping list supplied in the page structure. It
961 * returns zero on success or non-zero on failure.
965 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
966 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
968 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
972 * ddi_device_mapping_check: Called from ddi_segmap_setup.
973 * Invokes platform specific DDI to determine whether attributes specified
974 * in attr(9s) are valid for the region of memory that will be made
975 * available for direct access to user process via the mmap(2) system call.
978 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
979 uint_t rnumber, uint_t *hat_flags)
981 ddi_acc_handle_t handle;
982 ddi_map_req_t mr;
983 ddi_acc_hdl_t *hp;
984 int result;
985 dev_info_t *dip;
988 * we use e_ddi_hold_devi_by_dev to search for the devi. We
989 * release it immediately since it should already be held by
990 * a devfs vnode.
992 if ((dip =
993 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
994 return (-1);
995 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */
998 * Allocate and initialize the common elements of data
999 * access handle.
1001 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1002 if (handle == NULL)
1003 return (-1);
1005 hp = impl_acc_hdl_get(handle);
1006 hp->ah_vers = VERS_ACCHDL;
1007 hp->ah_dip = dip;
1008 hp->ah_rnumber = rnumber;
1009 hp->ah_offset = 0;
1010 hp->ah_len = 0;
1011 hp->ah_acc = *accattrp;
1014 * Set up the mapping request and call to parent.
1016 mr.map_op = DDI_MO_MAP_HANDLE;
1017 mr.map_type = DDI_MT_RNUMBER;
1018 mr.map_obj.rnumber = rnumber;
1019 mr.map_prot = PROT_READ | PROT_WRITE;
1020 mr.map_flags = DDI_MF_KERNEL_MAPPING;
1021 mr.map_handlep = hp;
1022 mr.map_vers = DDI_MAP_VERSION;
1023 result = ddi_map(dip, &mr, 0, 0, NULL);
1026 * Region must be mappable, pick up flags from the framework.
1028 *hat_flags = hp->ah_hat_flags;
1030 impl_acc_hdl_free(handle);
1033 * check for end result.
1035 if (result != DDI_SUCCESS)
1036 return (-1);
1037 return (0);
1042 * Property functions: See also, ddipropdefs.h.
1044 * These functions are the framework for the property functions,
1045 * i.e. they support software defined properties. All implementation
1046 * specific property handling (i.e.: self-identifying devices and
1047 * PROM defined properties are handled in the implementation specific
1048 * functions (defined in ddi_implfuncs.h).
1052 * nopropop: Shouldn't be called, right?
1055 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1056 char *name, caddr_t valuep, int *lengthp)
1058 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1059 return (DDI_PROP_NOT_FOUND);
1062 #ifdef DDI_PROP_DEBUG
1063 int ddi_prop_debug_flag = 0;
1066 ddi_prop_debug(int enable)
1068 int prev = ddi_prop_debug_flag;
1070 if ((enable != 0) || (prev != 0))
1071 printf("ddi_prop_debug: debugging %s\n",
1072 enable ? "enabled" : "disabled");
1073 ddi_prop_debug_flag = enable;
1074 return (prev);
1077 #endif /* DDI_PROP_DEBUG */
1080 * Search a property list for a match, if found return pointer
1081 * to matching prop struct, else return NULL.
1084 ddi_prop_t *
1085 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1087 ddi_prop_t *propp;
1090 * find the property in child's devinfo:
1091 * Search order defined by this search function is first matching
1092 * property with input dev == DDI_DEV_T_ANY matching any dev or
1093 * dev == propp->prop_dev, name == propp->name, and the correct
1094 * data type as specified in the flags. If a DDI_DEV_T_NONE dev
1095 * value made it this far then it implies a DDI_DEV_T_ANY search.
1097 if (dev == DDI_DEV_T_NONE)
1098 dev = DDI_DEV_T_ANY;
1100 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
1102 if (!DDI_STRSAME(propp->prop_name, name))
1103 continue;
1105 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1106 continue;
1108 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1109 continue;
1111 return (propp);
1114 return ((ddi_prop_t *)0);
1118 * Search for property within devnames structures
1120 ddi_prop_t *
1121 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1123 major_t major;
1124 struct devnames *dnp;
1125 ddi_prop_t *propp;
1128 * Valid dev_t value is needed to index into the
1129 * correct devnames entry, therefore a dev_t
1130 * value of DDI_DEV_T_ANY is not appropriate.
1132 ASSERT(dev != DDI_DEV_T_ANY);
1133 if (dev == DDI_DEV_T_ANY) {
1134 return ((ddi_prop_t *)0);
1137 major = getmajor(dev);
1138 dnp = &(devnamesp[major]);
1140 if (dnp->dn_global_prop_ptr == NULL)
1141 return ((ddi_prop_t *)0);
1143 LOCK_DEV_OPS(&dnp->dn_lock);
1145 for (propp = dnp->dn_global_prop_ptr->prop_list;
1146 propp != NULL;
1147 propp = (ddi_prop_t *)propp->prop_next) {
1149 if (!DDI_STRSAME(propp->prop_name, name))
1150 continue;
1152 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1153 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1154 continue;
1156 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1157 continue;
1159 /* Property found, return it */
1160 UNLOCK_DEV_OPS(&dnp->dn_lock);
1161 return (propp);
1164 UNLOCK_DEV_OPS(&dnp->dn_lock);
1165 return ((ddi_prop_t *)0);
1168 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1171 * ddi_prop_search_global:
1172 * Search the global property list within devnames
1173 * for the named property. Return the encoded value.
1175 static int
1176 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1177 void *valuep, uint_t *lengthp)
1179 ddi_prop_t *propp;
1180 caddr_t buffer;
1182 propp = i_ddi_search_global_prop(dev, name, flags);
1184 /* Property NOT found, bail */
1185 if (propp == (ddi_prop_t *)0)
1186 return (DDI_PROP_NOT_FOUND);
1188 if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1189 return (DDI_PROP_UNDEFINED);
1191 if ((buffer = kmem_alloc(propp->prop_len,
1192 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1193 cmn_err(CE_CONT, prop_no_mem_msg, name);
1194 return (DDI_PROP_NO_MEMORY);
1198 * Return the encoded data
1200 *(caddr_t *)valuep = buffer;
1201 *lengthp = propp->prop_len;
1202 bcopy(propp->prop_val, buffer, propp->prop_len);
1204 return (DDI_PROP_SUCCESS);
1208 * ddi_prop_search_common: Lookup and return the encoded value
1211 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1212 uint_t flags, char *name, void *valuep, uint_t *lengthp)
1214 ddi_prop_t *propp;
1215 int i;
1216 caddr_t buffer;
1217 caddr_t prealloc = NULL;
1218 int plength = 0;
1219 dev_info_t *pdip;
1220 int (*bop)();
1222 /*CONSTANTCONDITION*/
1223 while (1) {
1225 mutex_enter(&(DEVI(dip)->devi_lock));
1229 * find the property in child's devinfo:
1230 * Search order is:
1231 * 1. driver defined properties
1232 * 2. system defined properties
1233 * 3. driver global properties
1234 * 4. boot defined properties
1237 propp = i_ddi_prop_search(dev, name, flags,
1238 &(DEVI(dip)->devi_drv_prop_ptr));
1239 if (propp == NULL) {
1240 propp = i_ddi_prop_search(dev, name, flags,
1241 &(DEVI(dip)->devi_sys_prop_ptr));
1243 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1244 propp = i_ddi_prop_search(dev, name, flags,
1245 &DEVI(dip)->devi_global_prop_list->prop_list);
1248 if (propp == NULL) {
1249 propp = i_ddi_prop_search(dev, name, flags,
1250 &(DEVI(dip)->devi_hw_prop_ptr));
1254 * Software property found?
1256 if (propp != (ddi_prop_t *)0) {
1259 * If explicit undefine, return now.
1261 if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1262 mutex_exit(&(DEVI(dip)->devi_lock));
1263 if (prealloc)
1264 kmem_free(prealloc, plength);
1265 return (DDI_PROP_UNDEFINED);
1269 * If we only want to know if it exists, return now
1271 if (prop_op == PROP_EXISTS) {
1272 mutex_exit(&(DEVI(dip)->devi_lock));
1273 ASSERT(prealloc == NULL);
1274 return (DDI_PROP_SUCCESS);
1278 * If length only request or prop length == 0,
1279 * service request and return now.
1281 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1282 *lengthp = propp->prop_len;
1285 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1286 * that means prop_len is 0, so set valuep
1287 * also to NULL
1289 if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1290 *(caddr_t *)valuep = NULL;
1292 mutex_exit(&(DEVI(dip)->devi_lock));
1293 if (prealloc)
1294 kmem_free(prealloc, plength);
1295 return (DDI_PROP_SUCCESS);
1299 * If LEN_AND_VAL_ALLOC and the request can sleep,
1300 * drop the mutex, allocate the buffer, and go
1301 * through the loop again. If we already allocated
1302 * the buffer, and the size of the property changed,
1303 * keep trying...
1305 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1306 (flags & DDI_PROP_CANSLEEP)) {
1307 if (prealloc && (propp->prop_len != plength)) {
1308 kmem_free(prealloc, plength);
1309 prealloc = NULL;
1311 if (prealloc == NULL) {
1312 plength = propp->prop_len;
1313 mutex_exit(&(DEVI(dip)->devi_lock));
1314 prealloc = kmem_alloc(plength,
1315 KM_SLEEP);
1316 continue;
1321 * Allocate buffer, if required. Either way,
1322 * set `buffer' variable.
1324 i = *lengthp; /* Get callers length */
1325 *lengthp = propp->prop_len; /* Set callers length */
1327 switch (prop_op) {
1329 case PROP_LEN_AND_VAL_ALLOC:
1331 if (prealloc == NULL) {
1332 buffer = kmem_alloc(propp->prop_len,
1333 KM_NOSLEEP);
1334 } else {
1335 buffer = prealloc;
1338 if (buffer == NULL) {
1339 mutex_exit(&(DEVI(dip)->devi_lock));
1340 cmn_err(CE_CONT, prop_no_mem_msg, name);
1341 return (DDI_PROP_NO_MEMORY);
1343 /* Set callers buf ptr */
1344 *(caddr_t *)valuep = buffer;
1345 break;
1347 case PROP_LEN_AND_VAL_BUF:
1349 if (propp->prop_len > (i)) {
1350 mutex_exit(&(DEVI(dip)->devi_lock));
1351 return (DDI_PROP_BUF_TOO_SMALL);
1354 buffer = valuep; /* Get callers buf ptr */
1355 break;
1357 default:
1358 break;
1362 * Do the copy.
1364 bcopy(propp->prop_val, buffer, propp->prop_len);
1365 mutex_exit(&(DEVI(dip)->devi_lock));
1366 return (DDI_PROP_SUCCESS);
1369 mutex_exit(&(DEVI(dip)->devi_lock));
1370 if (prealloc)
1371 kmem_free(prealloc, plength);
1372 prealloc = NULL;
1375 * Prop not found, call parent bus_ops to deal with possible
1376 * h/w layer (possible PROM defined props, etc.) and to
1377 * possibly ascend the hierarchy, if allowed by flags.
1379 pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1382 * One last call for the root driver PROM props?
1384 if (dip == ddi_root_node()) {
1385 return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1386 flags, name, valuep, (int *)lengthp));
1390 * We may have been called to check for properties
1391 * within a single devinfo node that has no parent -
1392 * see make_prop()
1394 if (pdip == NULL) {
1395 ASSERT((flags &
1396 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1397 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1398 return (DDI_PROP_NOT_FOUND);
1402 * Instead of recursing, we do iterative calls up the tree.
1403 * As a bit of optimization, skip the bus_op level if the
1404 * node is a s/w node and if the parent's bus_prop_op function
1405 * is `ddi_bus_prop_op', because we know that in this case,
1406 * this function does nothing.
1408 * 4225415: If the parent isn't attached, or the child
1409 * hasn't been named by the parent yet, use the default
1410 * ddi_bus_prop_op as a proxy for the parent. This
1411 * allows property lookups in any child/parent state to
1412 * include 'prom' and inherited properties, even when
1413 * there are no drivers attached to the child or parent.
1416 bop = ddi_bus_prop_op;
1417 if (i_ddi_devi_attached(pdip) &&
1418 (i_ddi_node_state(dip) >= DS_INITIALIZED))
1419 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1421 i = DDI_PROP_NOT_FOUND;
1423 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1424 i = (*bop)(dev, pdip, dip, prop_op,
1425 flags | DDI_PROP_DONTPASS,
1426 name, valuep, lengthp);
1429 if ((flags & DDI_PROP_DONTPASS) ||
1430 (i != DDI_PROP_NOT_FOUND))
1431 return (i);
1433 dip = pdip;
1435 /*NOTREACHED*/
1440 * ddi_prop_op: The basic property operator for drivers.
1442 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1444 * prop_op valuep
1445 * ------ ------
1447 * PROP_LEN <unused>
1449 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer
1451 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to
1452 * address of allocated buffer, if successful)
1455 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1456 char *name, caddr_t valuep, int *lengthp)
1458 int i;
1460 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1463 * If this was originally an LDI prop lookup then we bail here.
1464 * The reason is that the LDI property lookup interfaces first call
1465 * a drivers prop_op() entry point to allow it to override
1466 * properties. But if we've made it here, then the driver hasn't
1467 * overriden any properties. We don't want to continue with the
1468 * property search here because we don't have any type inforamtion.
1469 * When we return failure, the LDI interfaces will then proceed to
1470 * call the typed property interfaces to look up the property.
1472 if (mod_flags & DDI_PROP_DYNAMIC)
1473 return (DDI_PROP_NOT_FOUND);
1476 * check for pre-typed property consumer asking for typed property:
1477 * see e_ddi_getprop_int64.
1479 if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1480 mod_flags |= DDI_PROP_TYPE_INT64;
1481 mod_flags |= DDI_PROP_TYPE_ANY;
1483 i = ddi_prop_search_common(dev, dip, prop_op,
1484 mod_flags, name, valuep, (uint_t *)lengthp);
1485 if (i == DDI_PROP_FOUND_1275)
1486 return (DDI_PROP_SUCCESS);
1487 return (i);
1491 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1492 * maintain size in number of blksize blocks. Provides a dynamic property
1493 * implementation for size oriented properties based on nblocks64 and blksize
1494 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64
1495 * is too large. This interface should not be used with a nblocks64 that
1496 * represents the driver's idea of how to represent unknown, if nblocks is
1497 * unknown use ddi_prop_op.
1500 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1501 int mod_flags, char *name, caddr_t valuep, int *lengthp,
1502 uint64_t nblocks64, uint_t blksize)
1504 uint64_t size64;
1505 int blkshift;
1507 /* convert block size to shift value */
1508 ASSERT(BIT_ONLYONESET(blksize));
1509 blkshift = highbit(blksize) - 1;
1512 * There is no point in supporting nblocks64 values that don't have
1513 * an accurate uint64_t byte count representation.
1515 if (nblocks64 >= (UINT64_MAX >> blkshift))
1516 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1517 name, valuep, lengthp));
1519 size64 = nblocks64 << blkshift;
1520 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1521 name, valuep, lengthp, size64, blksize));
1525 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1528 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1529 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1531 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1532 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1536 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1537 * maintain size in bytes. Provides a of dynamic property implementation for
1538 * size oriented properties based on size64 value and blksize passed in by the
1539 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface
1540 * should not be used with a size64 that represents the driver's idea of how
1541 * to represent unknown, if size is unknown use ddi_prop_op.
1543 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1544 * integers. While the most likely interface to request them ([bc]devi_size)
1545 * is declared int (signed) there is no enforcement of this, which means we
1546 * can't enforce limitations here without risking regression.
1549 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1550 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1551 uint_t blksize)
1553 uint64_t nblocks64;
1554 int callers_length;
1555 caddr_t buffer;
1556 int blkshift;
1559 * This is a kludge to support capture of size(9P) pure dynamic
1560 * properties in snapshots for non-cmlb code (without exposing
1561 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1562 * should be removed.
1564 if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1565 static i_ddi_prop_dyn_t prop_dyn_size[] = {
1566 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR},
1567 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK},
1568 {NULL}
1570 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1573 /* convert block size to shift value */
1574 ASSERT(BIT_ONLYONESET(blksize));
1575 blkshift = highbit(blksize) - 1;
1577 /* compute DEV_BSIZE nblocks value */
1578 nblocks64 = size64 >> blkshift;
1580 /* get callers length, establish length of our dynamic properties */
1581 callers_length = *lengthp;
1583 if (strcmp(name, "Nblocks") == 0)
1584 *lengthp = sizeof (uint64_t);
1585 else if (strcmp(name, "Size") == 0)
1586 *lengthp = sizeof (uint64_t);
1587 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1588 *lengthp = sizeof (uint32_t);
1589 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1590 *lengthp = sizeof (uint32_t);
1591 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1592 *lengthp = sizeof (uint32_t);
1593 else {
1594 /* fallback to ddi_prop_op */
1595 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1596 name, valuep, lengthp));
1599 /* service request for the length of the property */
1600 if (prop_op == PROP_LEN)
1601 return (DDI_PROP_SUCCESS);
1603 switch (prop_op) {
1604 case PROP_LEN_AND_VAL_ALLOC:
1605 if ((buffer = kmem_alloc(*lengthp,
1606 (mod_flags & DDI_PROP_CANSLEEP) ?
1607 KM_SLEEP : KM_NOSLEEP)) == NULL)
1608 return (DDI_PROP_NO_MEMORY);
1610 *(caddr_t *)valuep = buffer; /* set callers buf ptr */
1611 break;
1613 case PROP_LEN_AND_VAL_BUF:
1614 /* the length of the property and the request must match */
1615 if (callers_length != *lengthp)
1616 return (DDI_PROP_INVAL_ARG);
1618 buffer = valuep; /* get callers buf ptr */
1619 break;
1621 default:
1622 return (DDI_PROP_INVAL_ARG);
1625 /* transfer the value into the buffer */
1626 if (strcmp(name, "Nblocks") == 0)
1627 *((uint64_t *)buffer) = nblocks64;
1628 else if (strcmp(name, "Size") == 0)
1629 *((uint64_t *)buffer) = size64;
1630 else if (strcmp(name, "nblocks") == 0)
1631 *((uint32_t *)buffer) = (uint32_t)nblocks64;
1632 else if (strcmp(name, "size") == 0)
1633 *((uint32_t *)buffer) = (uint32_t)size64;
1634 else if (strcmp(name, "blksize") == 0)
1635 *((uint32_t *)buffer) = (uint32_t)blksize;
1636 return (DDI_PROP_SUCCESS);
1640 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1643 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1644 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1646 return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1647 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1651 * Variable length props...
1655 * ddi_getlongprop: Get variable length property len+val into a buffer
1656 * allocated by property provider via kmem_alloc. Requester
1657 * is responsible for freeing returned property via kmem_free.
1659 * Arguments:
1661 * dev_t: Input: dev_t of property.
1662 * dip: Input: dev_info_t pointer of child.
1663 * flags: Input: Possible flag modifiers are:
1664 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found.
1665 * DDI_PROP_CANSLEEP: Memory allocation may sleep.
1666 * name: Input: name of property.
1667 * valuep: Output: Addr of callers buffer pointer.
1668 * lengthp:Output: *lengthp will contain prop length on exit.
1670 * Possible Returns:
1672 * DDI_PROP_SUCCESS: Prop found and returned.
1673 * DDI_PROP_NOT_FOUND: Prop not found
1674 * DDI_PROP_UNDEFINED: Prop explicitly undefined.
1675 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem.
1679 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1680 char *name, caddr_t valuep, int *lengthp)
1682 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1683 flags, name, valuep, lengthp));
1688 * ddi_getlongprop_buf: Get long prop into pre-allocated callers
1689 * buffer. (no memory allocation by provider).
1691 * dev_t: Input: dev_t of property.
1692 * dip: Input: dev_info_t pointer of child.
1693 * flags: Input: DDI_PROP_DONTPASS or NULL
1694 * name: Input: name of property
1695 * valuep: Input: ptr to callers buffer.
1696 * lengthp:I/O: ptr to length of callers buffer on entry,
1697 * actual length of property on exit.
1699 * Possible returns:
1701 * DDI_PROP_SUCCESS Prop found and returned
1702 * DDI_PROP_NOT_FOUND Prop not found
1703 * DDI_PROP_UNDEFINED Prop explicitly undefined.
1704 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small,
1705 * no value returned, but actual prop
1706 * length returned in *lengthp
1711 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1712 char *name, caddr_t valuep, int *lengthp)
1714 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1715 flags, name, valuep, lengthp));
1719 * Integer/boolean sized props.
1721 * Call is value only... returns found boolean or int sized prop value or
1722 * defvalue if prop not found or is wrong length or is explicitly undefined.
1723 * Only flag is DDI_PROP_DONTPASS...
1725 * By convention, this interface returns boolean (0) sized properties
1726 * as value (int)1.
1728 * This never returns an error, if property not found or specifically
1729 * undefined, the input `defvalue' is returned.
1733 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1735 int propvalue = defvalue;
1736 int proplength = sizeof (int);
1737 int error;
1739 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1740 flags, name, (caddr_t)&propvalue, &proplength);
1742 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1743 propvalue = 1;
1745 return (propvalue);
1749 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1750 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1754 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1756 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1760 * Allocate a struct prop_driver_data, along with 'size' bytes
1761 * for decoded property data. This structure is freed by
1762 * calling ddi_prop_free(9F).
1764 static void *
1765 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1767 struct prop_driver_data *pdd;
1770 * Allocate a structure with enough memory to store the decoded data.
1772 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1773 pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1774 pdd->pdd_prop_free = prop_free;
1777 * Return a pointer to the location to put the decoded data.
1779 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1783 * Allocated the memory needed to store the encoded data in the property
1784 * handle.
1786 static int
1787 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1790 * If size is zero, then set data to NULL and size to 0. This
1791 * is a boolean property.
1793 if (size == 0) {
1794 ph->ph_size = 0;
1795 ph->ph_data = NULL;
1796 ph->ph_cur_pos = NULL;
1797 ph->ph_save_pos = NULL;
1798 } else {
1799 if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1800 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1801 if (ph->ph_data == NULL)
1802 return (DDI_PROP_NO_MEMORY);
1803 } else
1804 ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1805 ph->ph_size = size;
1806 ph->ph_cur_pos = ph->ph_data;
1807 ph->ph_save_pos = ph->ph_data;
1809 return (DDI_PROP_SUCCESS);
1813 * Free the space allocated by the lookup routines. Each lookup routine
1814 * returns a pointer to the decoded data to the driver. The driver then
1815 * passes this pointer back to us. This data actually lives in a struct
1816 * prop_driver_data. We use negative indexing to find the beginning of
1817 * the structure and then free the entire structure using the size and
1818 * the free routine stored in the structure.
1820 void
1821 ddi_prop_free(void *datap)
1823 struct prop_driver_data *pdd;
1826 * Get the structure
1828 pdd = (struct prop_driver_data *)
1829 ((caddr_t)datap - sizeof (struct prop_driver_data));
1831 * Call the free routine to free it
1833 (*pdd->pdd_prop_free)(pdd);
1837 * Free the data associated with an array of ints,
1838 * allocated with ddi_prop_decode_alloc().
1840 static void
1841 ddi_prop_free_ints(struct prop_driver_data *pdd)
1843 kmem_free(pdd, pdd->pdd_size);
1847 * Free a single string property or a single string contained within
1848 * the argv style return value of an array of strings.
1850 static void
1851 ddi_prop_free_string(struct prop_driver_data *pdd)
1853 kmem_free(pdd, pdd->pdd_size);
1858 * Free an array of strings.
1860 static void
1861 ddi_prop_free_strings(struct prop_driver_data *pdd)
1863 kmem_free(pdd, pdd->pdd_size);
1867 * Free the data associated with an array of bytes.
1869 static void
1870 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1872 kmem_free(pdd, pdd->pdd_size);
1876 * Reset the current location pointer in the property handle to the
1877 * beginning of the data.
1879 void
1880 ddi_prop_reset_pos(prop_handle_t *ph)
1882 ph->ph_cur_pos = ph->ph_data;
1883 ph->ph_save_pos = ph->ph_data;
1887 * Restore the current location pointer in the property handle to the
1888 * saved position.
1890 void
1891 ddi_prop_save_pos(prop_handle_t *ph)
1893 ph->ph_save_pos = ph->ph_cur_pos;
1897 * Save the location that the current location pointer is pointing to..
1899 void
1900 ddi_prop_restore_pos(prop_handle_t *ph)
1902 ph->ph_cur_pos = ph->ph_save_pos;
1906 * Property encode/decode functions
1910 * Decode a single integer property
1912 static int
1913 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1915 int i;
1916 int tmp;
1919 * If there is nothing to decode return an error
1921 if (ph->ph_size == 0)
1922 return (DDI_PROP_END_OF_DATA);
1925 * Decode the property as a single integer and return it
1926 * in data if we were able to decode it.
1928 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1929 if (i < DDI_PROP_RESULT_OK) {
1930 switch (i) {
1931 case DDI_PROP_RESULT_EOF:
1932 return (DDI_PROP_END_OF_DATA);
1934 case DDI_PROP_RESULT_ERROR:
1935 return (DDI_PROP_CANNOT_DECODE);
1939 *(int *)data = tmp;
1940 *nelements = 1;
1941 return (DDI_PROP_SUCCESS);
1945 * Decode a single 64 bit integer property
1947 static int
1948 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1950 int i;
1951 int64_t tmp;
1954 * If there is nothing to decode return an error
1956 if (ph->ph_size == 0)
1957 return (DDI_PROP_END_OF_DATA);
1960 * Decode the property as a single integer and return it
1961 * in data if we were able to decode it.
1963 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1964 if (i < DDI_PROP_RESULT_OK) {
1965 switch (i) {
1966 case DDI_PROP_RESULT_EOF:
1967 return (DDI_PROP_END_OF_DATA);
1969 case DDI_PROP_RESULT_ERROR:
1970 return (DDI_PROP_CANNOT_DECODE);
1974 *(int64_t *)data = tmp;
1975 *nelements = 1;
1976 return (DDI_PROP_SUCCESS);
1980 * Decode an array of integers property
1982 static int
1983 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1985 int i;
1986 int cnt = 0;
1987 int *tmp;
1988 int *intp;
1989 int n;
1992 * Figure out how many array elements there are by going through the
1993 * data without decoding it first and counting.
1995 for (;;) {
1996 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
1997 if (i < 0)
1998 break;
1999 cnt++;
2003 * If there are no elements return an error
2005 if (cnt == 0)
2006 return (DDI_PROP_END_OF_DATA);
2009 * If we cannot skip through the data, we cannot decode it
2011 if (i == DDI_PROP_RESULT_ERROR)
2012 return (DDI_PROP_CANNOT_DECODE);
2015 * Reset the data pointer to the beginning of the encoded data
2017 ddi_prop_reset_pos(ph);
2020 * Allocated memory to store the decoded value in.
2022 intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2023 ddi_prop_free_ints);
2026 * Decode each element and place it in the space we just allocated
2028 tmp = intp;
2029 for (n = 0; n < cnt; n++, tmp++) {
2030 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2031 if (i < DDI_PROP_RESULT_OK) {
2033 * Free the space we just allocated
2034 * and return an error.
2036 ddi_prop_free(intp);
2037 switch (i) {
2038 case DDI_PROP_RESULT_EOF:
2039 return (DDI_PROP_END_OF_DATA);
2041 case DDI_PROP_RESULT_ERROR:
2042 return (DDI_PROP_CANNOT_DECODE);
2047 *nelements = cnt;
2048 *(int **)data = intp;
2050 return (DDI_PROP_SUCCESS);
2054 * Decode a 64 bit integer array property
2056 static int
2057 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2059 int i;
2060 int n;
2061 int cnt = 0;
2062 int64_t *tmp;
2063 int64_t *intp;
2066 * Count the number of array elements by going
2067 * through the data without decoding it.
2069 for (;;) {
2070 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2071 if (i < 0)
2072 break;
2073 cnt++;
2077 * If there are no elements return an error
2079 if (cnt == 0)
2080 return (DDI_PROP_END_OF_DATA);
2083 * If we cannot skip through the data, we cannot decode it
2085 if (i == DDI_PROP_RESULT_ERROR)
2086 return (DDI_PROP_CANNOT_DECODE);
2089 * Reset the data pointer to the beginning of the encoded data
2091 ddi_prop_reset_pos(ph);
2094 * Allocate memory to store the decoded value.
2096 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2097 ddi_prop_free_ints);
2100 * Decode each element and place it in the space allocated
2102 tmp = intp;
2103 for (n = 0; n < cnt; n++, tmp++) {
2104 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2105 if (i < DDI_PROP_RESULT_OK) {
2107 * Free the space we just allocated
2108 * and return an error.
2110 ddi_prop_free(intp);
2111 switch (i) {
2112 case DDI_PROP_RESULT_EOF:
2113 return (DDI_PROP_END_OF_DATA);
2115 case DDI_PROP_RESULT_ERROR:
2116 return (DDI_PROP_CANNOT_DECODE);
2121 *nelements = cnt;
2122 *(int64_t **)data = intp;
2124 return (DDI_PROP_SUCCESS);
2128 * Encode an array of integers property (Can be one element)
2131 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2133 int i;
2134 int *tmp;
2135 int cnt;
2136 int size;
2139 * If there is no data, we cannot do anything
2141 if (nelements == 0)
2142 return (DDI_PROP_CANNOT_ENCODE);
2145 * Get the size of an encoded int.
2147 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2149 if (size < DDI_PROP_RESULT_OK) {
2150 switch (size) {
2151 case DDI_PROP_RESULT_EOF:
2152 return (DDI_PROP_END_OF_DATA);
2154 case DDI_PROP_RESULT_ERROR:
2155 return (DDI_PROP_CANNOT_ENCODE);
2160 * Allocate space in the handle to store the encoded int.
2162 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2163 DDI_PROP_SUCCESS)
2164 return (DDI_PROP_NO_MEMORY);
2167 * Encode the array of ints.
2169 tmp = (int *)data;
2170 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2171 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2172 if (i < DDI_PROP_RESULT_OK) {
2173 switch (i) {
2174 case DDI_PROP_RESULT_EOF:
2175 return (DDI_PROP_END_OF_DATA);
2177 case DDI_PROP_RESULT_ERROR:
2178 return (DDI_PROP_CANNOT_ENCODE);
2183 return (DDI_PROP_SUCCESS);
2188 * Encode a 64 bit integer array property
2191 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2193 int i;
2194 int cnt;
2195 int size;
2196 int64_t *tmp;
2199 * If there is no data, we cannot do anything
2201 if (nelements == 0)
2202 return (DDI_PROP_CANNOT_ENCODE);
2205 * Get the size of an encoded 64 bit int.
2207 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2209 if (size < DDI_PROP_RESULT_OK) {
2210 switch (size) {
2211 case DDI_PROP_RESULT_EOF:
2212 return (DDI_PROP_END_OF_DATA);
2214 case DDI_PROP_RESULT_ERROR:
2215 return (DDI_PROP_CANNOT_ENCODE);
2220 * Allocate space in the handle to store the encoded int.
2222 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2223 DDI_PROP_SUCCESS)
2224 return (DDI_PROP_NO_MEMORY);
2227 * Encode the array of ints.
2229 tmp = (int64_t *)data;
2230 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2231 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2232 if (i < DDI_PROP_RESULT_OK) {
2233 switch (i) {
2234 case DDI_PROP_RESULT_EOF:
2235 return (DDI_PROP_END_OF_DATA);
2237 case DDI_PROP_RESULT_ERROR:
2238 return (DDI_PROP_CANNOT_ENCODE);
2243 return (DDI_PROP_SUCCESS);
2247 * Decode a single string property
2249 static int
2250 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2252 char *tmp;
2253 char *str;
2254 int i;
2255 int size;
2258 * If there is nothing to decode return an error
2260 if (ph->ph_size == 0)
2261 return (DDI_PROP_END_OF_DATA);
2264 * Get the decoded size of the encoded string.
2266 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2267 if (size < DDI_PROP_RESULT_OK) {
2268 switch (size) {
2269 case DDI_PROP_RESULT_EOF:
2270 return (DDI_PROP_END_OF_DATA);
2272 case DDI_PROP_RESULT_ERROR:
2273 return (DDI_PROP_CANNOT_DECODE);
2278 * Allocated memory to store the decoded value in.
2280 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2282 ddi_prop_reset_pos(ph);
2285 * Decode the str and place it in the space we just allocated
2287 tmp = str;
2288 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2289 if (i < DDI_PROP_RESULT_OK) {
2291 * Free the space we just allocated
2292 * and return an error.
2294 ddi_prop_free(str);
2295 switch (i) {
2296 case DDI_PROP_RESULT_EOF:
2297 return (DDI_PROP_END_OF_DATA);
2299 case DDI_PROP_RESULT_ERROR:
2300 return (DDI_PROP_CANNOT_DECODE);
2304 *(char **)data = str;
2305 *nelements = 1;
2307 return (DDI_PROP_SUCCESS);
2311 * Decode an array of strings.
2314 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2316 int cnt = 0;
2317 char **strs;
2318 char **tmp;
2319 char *ptr;
2320 int i;
2321 int n;
2322 int size;
2323 size_t nbytes;
2326 * Figure out how many array elements there are by going through the
2327 * data without decoding it first and counting.
2329 for (;;) {
2330 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2331 if (i < 0)
2332 break;
2333 cnt++;
2337 * If there are no elements return an error
2339 if (cnt == 0)
2340 return (DDI_PROP_END_OF_DATA);
2343 * If we cannot skip through the data, we cannot decode it
2345 if (i == DDI_PROP_RESULT_ERROR)
2346 return (DDI_PROP_CANNOT_DECODE);
2349 * Reset the data pointer to the beginning of the encoded data
2351 ddi_prop_reset_pos(ph);
2354 * Figure out how much memory we need for the sum total
2356 nbytes = (cnt + 1) * sizeof (char *);
2358 for (n = 0; n < cnt; n++) {
2360 * Get the decoded size of the current encoded string.
2362 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2363 if (size < DDI_PROP_RESULT_OK) {
2364 switch (size) {
2365 case DDI_PROP_RESULT_EOF:
2366 return (DDI_PROP_END_OF_DATA);
2368 case DDI_PROP_RESULT_ERROR:
2369 return (DDI_PROP_CANNOT_DECODE);
2373 nbytes += size;
2377 * Allocate memory in which to store the decoded strings.
2379 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2382 * Set up pointers for each string by figuring out yet
2383 * again how long each string is.
2385 ddi_prop_reset_pos(ph);
2386 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2387 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2389 * Get the decoded size of the current encoded string.
2391 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2392 if (size < DDI_PROP_RESULT_OK) {
2393 ddi_prop_free(strs);
2394 switch (size) {
2395 case DDI_PROP_RESULT_EOF:
2396 return (DDI_PROP_END_OF_DATA);
2398 case DDI_PROP_RESULT_ERROR:
2399 return (DDI_PROP_CANNOT_DECODE);
2403 *tmp = ptr;
2404 ptr += size;
2408 * String array is terminated by a NULL
2410 *tmp = NULL;
2413 * Finally, we can decode each string
2415 ddi_prop_reset_pos(ph);
2416 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2417 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2418 if (i < DDI_PROP_RESULT_OK) {
2420 * Free the space we just allocated
2421 * and return an error
2423 ddi_prop_free(strs);
2424 switch (i) {
2425 case DDI_PROP_RESULT_EOF:
2426 return (DDI_PROP_END_OF_DATA);
2428 case DDI_PROP_RESULT_ERROR:
2429 return (DDI_PROP_CANNOT_DECODE);
2434 *(char ***)data = strs;
2435 *nelements = cnt;
2437 return (DDI_PROP_SUCCESS);
2441 * Encode a string.
2444 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2446 char **tmp;
2447 int size;
2448 int i;
2451 * If there is no data, we cannot do anything
2453 if (nelements == 0)
2454 return (DDI_PROP_CANNOT_ENCODE);
2457 * Get the size of the encoded string.
2459 tmp = (char **)data;
2460 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2461 if (size < DDI_PROP_RESULT_OK) {
2462 switch (size) {
2463 case DDI_PROP_RESULT_EOF:
2464 return (DDI_PROP_END_OF_DATA);
2466 case DDI_PROP_RESULT_ERROR:
2467 return (DDI_PROP_CANNOT_ENCODE);
2472 * Allocate space in the handle to store the encoded string.
2474 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2475 return (DDI_PROP_NO_MEMORY);
2477 ddi_prop_reset_pos(ph);
2480 * Encode the string.
2482 tmp = (char **)data;
2483 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2484 if (i < DDI_PROP_RESULT_OK) {
2485 switch (i) {
2486 case DDI_PROP_RESULT_EOF:
2487 return (DDI_PROP_END_OF_DATA);
2489 case DDI_PROP_RESULT_ERROR:
2490 return (DDI_PROP_CANNOT_ENCODE);
2494 return (DDI_PROP_SUCCESS);
2499 * Encode an array of strings.
2502 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2504 int cnt = 0;
2505 char **tmp;
2506 int size;
2507 uint_t total_size;
2508 int i;
2511 * If there is no data, we cannot do anything
2513 if (nelements == 0)
2514 return (DDI_PROP_CANNOT_ENCODE);
2517 * Get the total size required to encode all the strings.
2519 total_size = 0;
2520 tmp = (char **)data;
2521 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2522 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2523 if (size < DDI_PROP_RESULT_OK) {
2524 switch (size) {
2525 case DDI_PROP_RESULT_EOF:
2526 return (DDI_PROP_END_OF_DATA);
2528 case DDI_PROP_RESULT_ERROR:
2529 return (DDI_PROP_CANNOT_ENCODE);
2532 total_size += (uint_t)size;
2536 * Allocate space in the handle to store the encoded strings.
2538 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2539 return (DDI_PROP_NO_MEMORY);
2541 ddi_prop_reset_pos(ph);
2544 * Encode the array of strings.
2546 tmp = (char **)data;
2547 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2548 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2549 if (i < DDI_PROP_RESULT_OK) {
2550 switch (i) {
2551 case DDI_PROP_RESULT_EOF:
2552 return (DDI_PROP_END_OF_DATA);
2554 case DDI_PROP_RESULT_ERROR:
2555 return (DDI_PROP_CANNOT_ENCODE);
2560 return (DDI_PROP_SUCCESS);
2565 * Decode an array of bytes.
2567 static int
2568 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2570 uchar_t *tmp;
2571 int nbytes;
2572 int i;
2575 * If there are no elements return an error
2577 if (ph->ph_size == 0)
2578 return (DDI_PROP_END_OF_DATA);
2581 * Get the size of the encoded array of bytes.
2583 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2584 data, ph->ph_size);
2585 if (nbytes < DDI_PROP_RESULT_OK) {
2586 switch (nbytes) {
2587 case DDI_PROP_RESULT_EOF:
2588 return (DDI_PROP_END_OF_DATA);
2590 case DDI_PROP_RESULT_ERROR:
2591 return (DDI_PROP_CANNOT_DECODE);
2596 * Allocated memory to store the decoded value in.
2598 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2601 * Decode each element and place it in the space we just allocated
2603 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2604 if (i < DDI_PROP_RESULT_OK) {
2606 * Free the space we just allocated
2607 * and return an error
2609 ddi_prop_free(tmp);
2610 switch (i) {
2611 case DDI_PROP_RESULT_EOF:
2612 return (DDI_PROP_END_OF_DATA);
2614 case DDI_PROP_RESULT_ERROR:
2615 return (DDI_PROP_CANNOT_DECODE);
2619 *(uchar_t **)data = tmp;
2620 *nelements = nbytes;
2622 return (DDI_PROP_SUCCESS);
2626 * Encode an array of bytes.
2629 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2631 int size;
2632 int i;
2635 * If there are no elements, then this is a boolean property,
2636 * so just create a property handle with no data and return.
2638 if (nelements == 0) {
2639 (void) ddi_prop_encode_alloc(ph, 0);
2640 return (DDI_PROP_SUCCESS);
2644 * Get the size of the encoded array of bytes.
2646 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2647 nelements);
2648 if (size < DDI_PROP_RESULT_OK) {
2649 switch (size) {
2650 case DDI_PROP_RESULT_EOF:
2651 return (DDI_PROP_END_OF_DATA);
2653 case DDI_PROP_RESULT_ERROR:
2654 return (DDI_PROP_CANNOT_DECODE);
2659 * Allocate space in the handle to store the encoded bytes.
2661 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2662 return (DDI_PROP_NO_MEMORY);
2665 * Encode the array of bytes.
2667 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2668 nelements);
2669 if (i < DDI_PROP_RESULT_OK) {
2670 switch (i) {
2671 case DDI_PROP_RESULT_EOF:
2672 return (DDI_PROP_END_OF_DATA);
2674 case DDI_PROP_RESULT_ERROR:
2675 return (DDI_PROP_CANNOT_ENCODE);
2679 return (DDI_PROP_SUCCESS);
2683 * OBP 1275 integer, string and byte operators.
2685 * DDI_PROP_CMD_DECODE:
2687 * DDI_PROP_RESULT_ERROR: cannot decode the data
2688 * DDI_PROP_RESULT_EOF: end of data
2689 * DDI_PROP_OK: data was decoded
2691 * DDI_PROP_CMD_ENCODE:
2693 * DDI_PROP_RESULT_ERROR: cannot encode the data
2694 * DDI_PROP_RESULT_EOF: end of data
2695 * DDI_PROP_OK: data was encoded
2697 * DDI_PROP_CMD_SKIP:
2699 * DDI_PROP_RESULT_ERROR: cannot skip the data
2700 * DDI_PROP_RESULT_EOF: end of data
2701 * DDI_PROP_OK: data was skipped
2703 * DDI_PROP_CMD_GET_ESIZE:
2705 * DDI_PROP_RESULT_ERROR: cannot get encoded size
2706 * DDI_PROP_RESULT_EOF: end of data
2707 * > 0: the encoded size
2709 * DDI_PROP_CMD_GET_DSIZE:
2711 * DDI_PROP_RESULT_ERROR: cannot get decoded size
2712 * DDI_PROP_RESULT_EOF: end of data
2713 * > 0: the decoded size
2717 * OBP 1275 integer operator
2719 * OBP properties are a byte stream of data, so integers may not be
2720 * properly aligned. Therefore we need to copy them one byte at a time.
2723 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2725 int i;
2727 switch (cmd) {
2728 case DDI_PROP_CMD_DECODE:
2730 * Check that there is encoded data
2732 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2733 return (DDI_PROP_RESULT_ERROR);
2734 if (ph->ph_flags & PH_FROM_PROM) {
2735 i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2736 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2737 ph->ph_size - i))
2738 return (DDI_PROP_RESULT_ERROR);
2739 } else {
2740 if (ph->ph_size < sizeof (int) ||
2741 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2742 ph->ph_size - sizeof (int))))
2743 return (DDI_PROP_RESULT_ERROR);
2747 * Copy the integer, using the implementation-specific
2748 * copy function if the property is coming from the PROM.
2750 if (ph->ph_flags & PH_FROM_PROM) {
2751 *data = impl_ddi_prop_int_from_prom(
2752 (uchar_t *)ph->ph_cur_pos,
2753 (ph->ph_size < PROP_1275_INT_SIZE) ?
2754 ph->ph_size : PROP_1275_INT_SIZE);
2755 } else {
2756 bcopy(ph->ph_cur_pos, data, sizeof (int));
2760 * Move the current location to the start of the next
2761 * bit of undecoded data.
2763 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2764 PROP_1275_INT_SIZE;
2765 return (DDI_PROP_RESULT_OK);
2767 case DDI_PROP_CMD_ENCODE:
2769 * Check that there is room to encoded the data
2771 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2772 ph->ph_size < PROP_1275_INT_SIZE ||
2773 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2774 ph->ph_size - sizeof (int))))
2775 return (DDI_PROP_RESULT_ERROR);
2778 * Encode the integer into the byte stream one byte at a
2779 * time.
2781 bcopy(data, ph->ph_cur_pos, sizeof (int));
2784 * Move the current location to the start of the next bit of
2785 * space where we can store encoded data.
2787 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2788 return (DDI_PROP_RESULT_OK);
2790 case DDI_PROP_CMD_SKIP:
2792 * Check that there is encoded data
2794 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2795 ph->ph_size < PROP_1275_INT_SIZE)
2796 return (DDI_PROP_RESULT_ERROR);
2799 if ((caddr_t)ph->ph_cur_pos ==
2800 (caddr_t)ph->ph_data + ph->ph_size) {
2801 return (DDI_PROP_RESULT_EOF);
2802 } else if ((caddr_t)ph->ph_cur_pos >
2803 (caddr_t)ph->ph_data + ph->ph_size) {
2804 return (DDI_PROP_RESULT_EOF);
2808 * Move the current location to the start of the next bit of
2809 * undecoded data.
2811 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2812 return (DDI_PROP_RESULT_OK);
2814 case DDI_PROP_CMD_GET_ESIZE:
2816 * Return the size of an encoded integer on OBP
2818 return (PROP_1275_INT_SIZE);
2820 case DDI_PROP_CMD_GET_DSIZE:
2822 * Return the size of a decoded integer on the system.
2824 return (sizeof (int));
2826 default:
2827 #ifdef DEBUG
2828 panic("ddi_prop_1275_int: %x impossible", cmd);
2829 /*NOTREACHED*/
2830 #else
2831 return (DDI_PROP_RESULT_ERROR);
2832 #endif /* DEBUG */
2837 * 64 bit integer operator.
2839 * This is an extension, defined by Sun, to the 1275 integer
2840 * operator. This routine handles the encoding/decoding of
2841 * 64 bit integer properties.
2844 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2847 switch (cmd) {
2848 case DDI_PROP_CMD_DECODE:
2850 * Check that there is encoded data
2852 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2853 return (DDI_PROP_RESULT_ERROR);
2854 if (ph->ph_flags & PH_FROM_PROM) {
2855 return (DDI_PROP_RESULT_ERROR);
2856 } else {
2857 if (ph->ph_size < sizeof (int64_t) ||
2858 ((int64_t *)ph->ph_cur_pos >
2859 ((int64_t *)ph->ph_data +
2860 ph->ph_size - sizeof (int64_t))))
2861 return (DDI_PROP_RESULT_ERROR);
2864 * Copy the integer, using the implementation-specific
2865 * copy function if the property is coming from the PROM.
2867 if (ph->ph_flags & PH_FROM_PROM) {
2868 return (DDI_PROP_RESULT_ERROR);
2869 } else {
2870 bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2874 * Move the current location to the start of the next
2875 * bit of undecoded data.
2877 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2878 sizeof (int64_t);
2879 return (DDI_PROP_RESULT_OK);
2881 case DDI_PROP_CMD_ENCODE:
2883 * Check that there is room to encoded the data
2885 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2886 ph->ph_size < sizeof (int64_t) ||
2887 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2888 ph->ph_size - sizeof (int64_t))))
2889 return (DDI_PROP_RESULT_ERROR);
2892 * Encode the integer into the byte stream one byte at a
2893 * time.
2895 bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2898 * Move the current location to the start of the next bit of
2899 * space where we can store encoded data.
2901 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2902 sizeof (int64_t);
2903 return (DDI_PROP_RESULT_OK);
2905 case DDI_PROP_CMD_SKIP:
2907 * Check that there is encoded data
2909 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2910 ph->ph_size < sizeof (int64_t))
2911 return (DDI_PROP_RESULT_ERROR);
2913 if ((caddr_t)ph->ph_cur_pos ==
2914 (caddr_t)ph->ph_data + ph->ph_size) {
2915 return (DDI_PROP_RESULT_EOF);
2916 } else if ((caddr_t)ph->ph_cur_pos >
2917 (caddr_t)ph->ph_data + ph->ph_size) {
2918 return (DDI_PROP_RESULT_EOF);
2922 * Move the current location to the start of
2923 * the next bit of undecoded data.
2925 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2926 sizeof (int64_t);
2927 return (DDI_PROP_RESULT_OK);
2929 case DDI_PROP_CMD_GET_ESIZE:
2931 * Return the size of an encoded integer on OBP
2933 return (sizeof (int64_t));
2935 case DDI_PROP_CMD_GET_DSIZE:
2937 * Return the size of a decoded integer on the system.
2939 return (sizeof (int64_t));
2941 default:
2942 #ifdef DEBUG
2943 panic("ddi_prop_int64_op: %x impossible", cmd);
2944 /*NOTREACHED*/
2945 #else
2946 return (DDI_PROP_RESULT_ERROR);
2947 #endif /* DEBUG */
2952 * OBP 1275 string operator.
2954 * OBP strings are NULL terminated.
2957 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2959 int n;
2960 char *p;
2961 char *end;
2963 switch (cmd) {
2964 case DDI_PROP_CMD_DECODE:
2966 * Check that there is encoded data
2968 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2969 return (DDI_PROP_RESULT_ERROR);
2973 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2974 * how to NULL terminate result.
2976 p = (char *)ph->ph_cur_pos;
2977 end = (char *)ph->ph_data + ph->ph_size;
2978 if (p >= end)
2979 return (DDI_PROP_RESULT_EOF);
2981 while (p < end) {
2982 *data++ = *p;
2983 if (*p++ == 0) { /* NULL from OBP */
2984 ph->ph_cur_pos = p;
2985 return (DDI_PROP_RESULT_OK);
2990 * If OBP did not NULL terminate string, which happens
2991 * (at least) for 'true'/'false' boolean values, account for
2992 * the space and store null termination on decode.
2994 ph->ph_cur_pos = p;
2995 *data = 0;
2996 return (DDI_PROP_RESULT_OK);
2998 case DDI_PROP_CMD_ENCODE:
3000 * Check that there is room to encoded the data
3002 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3003 return (DDI_PROP_RESULT_ERROR);
3006 n = strlen(data) + 1;
3007 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3008 ph->ph_size - n)) {
3009 return (DDI_PROP_RESULT_ERROR);
3013 * Copy the NULL terminated string
3015 bcopy(data, ph->ph_cur_pos, n);
3018 * Move the current location to the start of the next bit of
3019 * space where we can store encoded data.
3021 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3022 return (DDI_PROP_RESULT_OK);
3024 case DDI_PROP_CMD_SKIP:
3026 * Check that there is encoded data
3028 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3029 return (DDI_PROP_RESULT_ERROR);
3033 * Return the string length plus one for the NULL
3034 * We know the size of the property, we need to
3035 * ensure that the string is properly formatted,
3036 * since we may be looking up random OBP data.
3038 p = (char *)ph->ph_cur_pos;
3039 end = (char *)ph->ph_data + ph->ph_size;
3040 if (p >= end)
3041 return (DDI_PROP_RESULT_EOF);
3043 while (p < end) {
3044 if (*p++ == 0) { /* NULL from OBP */
3045 ph->ph_cur_pos = p;
3046 return (DDI_PROP_RESULT_OK);
3051 * Accommodate the fact that OBP does not always NULL
3052 * terminate strings.
3054 ph->ph_cur_pos = p;
3055 return (DDI_PROP_RESULT_OK);
3057 case DDI_PROP_CMD_GET_ESIZE:
3059 * Return the size of the encoded string on OBP.
3061 return (strlen(data) + 1);
3063 case DDI_PROP_CMD_GET_DSIZE:
3065 * Return the string length plus one for the NULL.
3066 * We know the size of the property, we need to
3067 * ensure that the string is properly formatted,
3068 * since we may be looking up random OBP data.
3070 p = (char *)ph->ph_cur_pos;
3071 end = (char *)ph->ph_data + ph->ph_size;
3072 if (p >= end)
3073 return (DDI_PROP_RESULT_EOF);
3075 for (n = 0; p < end; n++) {
3076 if (*p++ == 0) { /* NULL from OBP */
3077 ph->ph_cur_pos = p;
3078 return (n + 1);
3083 * If OBP did not NULL terminate string, which happens for
3084 * 'true'/'false' boolean values, account for the space
3085 * to store null termination here.
3087 ph->ph_cur_pos = p;
3088 return (n + 1);
3090 default:
3091 #ifdef DEBUG
3092 panic("ddi_prop_1275_string: %x impossible", cmd);
3093 /*NOTREACHED*/
3094 #else
3095 return (DDI_PROP_RESULT_ERROR);
3096 #endif /* DEBUG */
3101 * OBP 1275 byte operator
3103 * Caller must specify the number of bytes to get. OBP encodes bytes
3104 * as a byte so there is a 1-to-1 translation.
3107 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3108 uint_t nelements)
3110 switch (cmd) {
3111 case DDI_PROP_CMD_DECODE:
3113 * Check that there is encoded data
3115 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3116 ph->ph_size < nelements ||
3117 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3118 ph->ph_size - nelements)))
3119 return (DDI_PROP_RESULT_ERROR);
3122 * Copy out the bytes
3124 bcopy(ph->ph_cur_pos, data, nelements);
3127 * Move the current location
3129 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3130 return (DDI_PROP_RESULT_OK);
3132 case DDI_PROP_CMD_ENCODE:
3134 * Check that there is room to encode the data
3136 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3137 ph->ph_size < nelements ||
3138 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3139 ph->ph_size - nelements)))
3140 return (DDI_PROP_RESULT_ERROR);
3143 * Copy in the bytes
3145 bcopy(data, ph->ph_cur_pos, nelements);
3148 * Move the current location to the start of the next bit of
3149 * space where we can store encoded data.
3151 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3152 return (DDI_PROP_RESULT_OK);
3154 case DDI_PROP_CMD_SKIP:
3156 * Check that there is encoded data
3158 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3159 ph->ph_size < nelements)
3160 return (DDI_PROP_RESULT_ERROR);
3162 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3163 ph->ph_size - nelements))
3164 return (DDI_PROP_RESULT_EOF);
3167 * Move the current location
3169 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3170 return (DDI_PROP_RESULT_OK);
3172 case DDI_PROP_CMD_GET_ESIZE:
3174 * The size in bytes of the encoded size is the
3175 * same as the decoded size provided by the caller.
3177 return (nelements);
3179 case DDI_PROP_CMD_GET_DSIZE:
3181 * Just return the number of bytes specified by the caller.
3183 return (nelements);
3185 default:
3186 #ifdef DEBUG
3187 panic("ddi_prop_1275_bytes: %x impossible", cmd);
3188 /*NOTREACHED*/
3189 #else
3190 return (DDI_PROP_RESULT_ERROR);
3191 #endif /* DEBUG */
3196 * Used for properties that come from the OBP, hardware configuration files,
3197 * or that are created by calls to ddi_prop_update(9F).
3199 static struct prop_handle_ops prop_1275_ops = {
3200 ddi_prop_1275_int,
3201 ddi_prop_1275_string,
3202 ddi_prop_1275_bytes,
3203 ddi_prop_int64_op
3208 * Interface to create/modify a managed property on child's behalf...
3209 * Flags interpreted are:
3210 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep.
3211 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list.
3213 * Use same dev_t when modifying or undefining a property.
3214 * Search for properties with DDI_DEV_T_ANY to match first named
3215 * property on the list.
3217 * Properties are stored LIFO and subsequently will match the first
3218 * `matching' instance.
3222 * ddi_prop_add: Add a software defined property
3226 * define to get a new ddi_prop_t.
3227 * km_flags are KM_SLEEP or KM_NOSLEEP.
3230 #define DDI_NEW_PROP_T(km_flags) \
3231 (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3233 static int
3234 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3235 char *name, caddr_t value, int length)
3237 ddi_prop_t *new_propp, *propp;
3238 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3239 int km_flags = KM_NOSLEEP;
3240 int name_buf_len;
3243 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3246 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3247 return (DDI_PROP_INVAL_ARG);
3249 if (flags & DDI_PROP_CANSLEEP)
3250 km_flags = KM_SLEEP;
3252 if (flags & DDI_PROP_SYSTEM_DEF)
3253 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3254 else if (flags & DDI_PROP_HW_DEF)
3255 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3257 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) {
3258 cmn_err(CE_CONT, prop_no_mem_msg, name);
3259 return (DDI_PROP_NO_MEMORY);
3263 * If dev is major number 0, then we need to do a ddi_name_to_major
3264 * to get the real major number for the device. This needs to be
3265 * done because some drivers need to call ddi_prop_create in their
3266 * attach routines but they don't have a dev. By creating the dev
3267 * ourself if the major number is 0, drivers will not have to know what
3268 * their major number. They can just create a dev with major number
3269 * 0 and pass it in. For device 0, we will be doing a little extra
3270 * work by recreating the same dev that we already have, but its the
3271 * price you pay :-).
3273 * This fixes bug #1098060.
3275 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3276 new_propp->prop_dev =
3277 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3278 getminor(dev));
3279 } else
3280 new_propp->prop_dev = dev;
3283 * Allocate space for property name and copy it in...
3286 name_buf_len = strlen(name) + 1;
3287 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3288 if (new_propp->prop_name == 0) {
3289 kmem_free(new_propp, sizeof (ddi_prop_t));
3290 cmn_err(CE_CONT, prop_no_mem_msg, name);
3291 return (DDI_PROP_NO_MEMORY);
3293 bcopy(name, new_propp->prop_name, name_buf_len);
3296 * Set the property type
3298 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3301 * Set length and value ONLY if not an explicit property undefine:
3302 * NOTE: value and length are zero for explicit undefines.
3305 if (flags & DDI_PROP_UNDEF_IT) {
3306 new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3307 } else {
3308 if ((new_propp->prop_len = length) != 0) {
3309 new_propp->prop_val = kmem_alloc(length, km_flags);
3310 if (new_propp->prop_val == 0) {
3311 kmem_free(new_propp->prop_name, name_buf_len);
3312 kmem_free(new_propp, sizeof (ddi_prop_t));
3313 cmn_err(CE_CONT, prop_no_mem_msg, name);
3314 return (DDI_PROP_NO_MEMORY);
3316 bcopy(value, new_propp->prop_val, length);
3321 * Link property into beginning of list. (Properties are LIFO order.)
3324 mutex_enter(&(DEVI(dip)->devi_lock));
3325 propp = *list_head;
3326 new_propp->prop_next = propp;
3327 *list_head = new_propp;
3328 mutex_exit(&(DEVI(dip)->devi_lock));
3329 return (DDI_PROP_SUCCESS);
3334 * ddi_prop_change: Modify a software managed property value
3336 * Set new length and value if found.
3337 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3338 * input name is the NULL string.
3339 * returns DDI_PROP_NO_MEMORY if unable to allocate memory
3341 * Note: an undef can be modified to be a define,
3342 * (you can't go the other way.)
3345 static int
3346 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3347 char *name, caddr_t value, int length)
3349 ddi_prop_t *propp;
3350 ddi_prop_t **ppropp;
3351 caddr_t p = NULL;
3353 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3354 return (DDI_PROP_INVAL_ARG);
3357 * Preallocate buffer, even if we don't need it...
3359 if (length != 0) {
3360 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3361 KM_SLEEP : KM_NOSLEEP);
3362 if (p == NULL) {
3363 cmn_err(CE_CONT, prop_no_mem_msg, name);
3364 return (DDI_PROP_NO_MEMORY);
3369 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3370 * number, a real dev_t value should be created based upon the dip's
3371 * binding driver. See ddi_prop_add...
3373 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3374 dev = makedevice(
3375 ddi_name_to_major(DEVI(dip)->devi_binding_name),
3376 getminor(dev));
3379 * Check to see if the property exists. If so we modify it.
3380 * Else we create it by calling ddi_prop_add().
3382 mutex_enter(&(DEVI(dip)->devi_lock));
3383 ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3384 if (flags & DDI_PROP_SYSTEM_DEF)
3385 ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3386 else if (flags & DDI_PROP_HW_DEF)
3387 ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3389 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3391 * Need to reallocate buffer? If so, do it
3392 * carefully (reuse same space if new prop
3393 * is same size and non-NULL sized).
3395 if (length != 0)
3396 bcopy(value, p, length);
3398 if (propp->prop_len != 0)
3399 kmem_free(propp->prop_val, propp->prop_len);
3401 propp->prop_len = length;
3402 propp->prop_val = p;
3403 propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3404 mutex_exit(&(DEVI(dip)->devi_lock));
3405 return (DDI_PROP_SUCCESS);
3408 mutex_exit(&(DEVI(dip)->devi_lock));
3409 if (length != 0)
3410 kmem_free(p, length);
3412 return (ddi_prop_add(dev, dip, flags, name, value, length));
3416 * Common update routine used to update and encode a property. Creates
3417 * a property handle, calls the property encode routine, figures out if
3418 * the property already exists and updates if it does. Otherwise it
3419 * creates if it does not exist.
3422 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3423 char *name, void *data, uint_t nelements,
3424 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3426 prop_handle_t ph;
3427 int rval;
3428 uint_t ourflags;
3431 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3432 * return error.
3434 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3435 return (DDI_PROP_INVAL_ARG);
3438 * Create the handle
3440 ph.ph_data = NULL;
3441 ph.ph_cur_pos = NULL;
3442 ph.ph_save_pos = NULL;
3443 ph.ph_size = 0;
3444 ph.ph_ops = &prop_1275_ops;
3447 * ourflags:
3448 * For compatibility with the old interfaces. The old interfaces
3449 * didn't sleep by default and slept when the flag was set. These
3450 * interfaces to the opposite. So the old interfaces now set the
3451 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3453 * ph.ph_flags:
3454 * Blocked data or unblocked data allocation
3455 * for ph.ph_data in ddi_prop_encode_alloc()
3457 if (flags & DDI_PROP_DONTSLEEP) {
3458 ourflags = flags;
3459 ph.ph_flags = DDI_PROP_DONTSLEEP;
3460 } else {
3461 ourflags = flags | DDI_PROP_CANSLEEP;
3462 ph.ph_flags = DDI_PROP_CANSLEEP;
3466 * Encode the data and store it in the property handle by
3467 * calling the prop_encode routine.
3469 if ((rval = (*prop_create)(&ph, data, nelements)) !=
3470 DDI_PROP_SUCCESS) {
3471 if (rval == DDI_PROP_NO_MEMORY)
3472 cmn_err(CE_CONT, prop_no_mem_msg, name);
3473 if (ph.ph_size != 0)
3474 kmem_free(ph.ph_data, ph.ph_size);
3475 return (rval);
3479 * The old interfaces use a stacking approach to creating
3480 * properties. If we are being called from the old interfaces,
3481 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3482 * create without checking.
3484 if (flags & DDI_PROP_STACK_CREATE) {
3485 rval = ddi_prop_add(match_dev, dip,
3486 ourflags, name, ph.ph_data, ph.ph_size);
3487 } else {
3488 rval = ddi_prop_change(match_dev, dip,
3489 ourflags, name, ph.ph_data, ph.ph_size);
3493 * Free the encoded data allocated in the prop_encode routine.
3495 if (ph.ph_size != 0)
3496 kmem_free(ph.ph_data, ph.ph_size);
3498 return (rval);
3503 * ddi_prop_create: Define a managed property:
3504 * See above for details.
3508 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3509 char *name, caddr_t value, int length)
3511 if (!(flag & DDI_PROP_CANSLEEP)) {
3512 flag |= DDI_PROP_DONTSLEEP;
3513 #ifdef DDI_PROP_DEBUG
3514 if (length != 0)
3515 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3516 "use ddi_prop_update (prop = %s, node = %s%d)",
3517 name, ddi_driver_name(dip), ddi_get_instance(dip));
3518 #endif /* DDI_PROP_DEBUG */
3520 flag &= ~DDI_PROP_SYSTEM_DEF;
3521 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3522 return (ddi_prop_update_common(dev, dip, flag, name,
3523 value, length, ddi_prop_fm_encode_bytes));
3527 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3528 char *name, caddr_t value, int length)
3530 if (!(flag & DDI_PROP_CANSLEEP))
3531 flag |= DDI_PROP_DONTSLEEP;
3532 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3533 return (ddi_prop_update_common(dev, dip, flag,
3534 name, value, length, ddi_prop_fm_encode_bytes));
3538 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3539 char *name, caddr_t value, int length)
3541 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3544 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3545 * return error.
3547 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3548 return (DDI_PROP_INVAL_ARG);
3550 if (!(flag & DDI_PROP_CANSLEEP))
3551 flag |= DDI_PROP_DONTSLEEP;
3552 flag &= ~DDI_PROP_SYSTEM_DEF;
3553 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3554 return (DDI_PROP_NOT_FOUND);
3556 return (ddi_prop_update_common(dev, dip,
3557 (flag | DDI_PROP_TYPE_BYTE), name,
3558 value, length, ddi_prop_fm_encode_bytes));
3562 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3563 char *name, caddr_t value, int length)
3565 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3568 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3569 * return error.
3571 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3572 return (DDI_PROP_INVAL_ARG);
3574 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3575 return (DDI_PROP_NOT_FOUND);
3577 if (!(flag & DDI_PROP_CANSLEEP))
3578 flag |= DDI_PROP_DONTSLEEP;
3579 return (ddi_prop_update_common(dev, dip,
3580 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3581 name, value, length, ddi_prop_fm_encode_bytes));
3586 * Common lookup routine used to lookup and decode a property.
3587 * Creates a property handle, searches for the raw encoded data,
3588 * fills in the handle, and calls the property decode functions
3589 * passed in.
3591 * This routine is not static because ddi_bus_prop_op() which lives in
3592 * ddi_impl.c calls it. No driver should be calling this routine.
3595 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3596 uint_t flags, char *name, void *data, uint_t *nelements,
3597 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3599 int rval;
3600 uint_t ourflags;
3601 prop_handle_t ph;
3603 if ((match_dev == DDI_DEV_T_NONE) ||
3604 (name == NULL) || (strlen(name) == 0))
3605 return (DDI_PROP_INVAL_ARG);
3607 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3608 flags | DDI_PROP_CANSLEEP;
3611 * Get the encoded data
3613 bzero(&ph, sizeof (prop_handle_t));
3615 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3617 * For rootnex and unbound dlpi style-2 devices, index into
3618 * the devnames' array and search the global
3619 * property list.
3621 ourflags &= ~DDI_UNBND_DLPI2;
3622 rval = i_ddi_prop_search_global(match_dev,
3623 ourflags, name, &ph.ph_data, &ph.ph_size);
3624 } else {
3625 rval = ddi_prop_search_common(match_dev, dip,
3626 PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3627 &ph.ph_data, &ph.ph_size);
3631 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3632 ASSERT(ph.ph_data == NULL);
3633 ASSERT(ph.ph_size == 0);
3634 return (rval);
3638 * If the encoded data came from a OBP or software
3639 * use the 1275 OBP decode/encode routines.
3641 ph.ph_cur_pos = ph.ph_data;
3642 ph.ph_save_pos = ph.ph_data;
3643 ph.ph_ops = &prop_1275_ops;
3644 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3646 rval = (*prop_decoder)(&ph, data, nelements);
3649 * Free the encoded data
3651 if (ph.ph_size != 0)
3652 kmem_free(ph.ph_data, ph.ph_size);
3654 return (rval);
3658 * Lookup and return an array of composite properties. The driver must
3659 * provide the decode routine.
3662 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3663 uint_t flags, char *name, void *data, uint_t *nelements,
3664 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3666 return (ddi_prop_lookup_common(match_dev, dip,
3667 (flags | DDI_PROP_TYPE_COMPOSITE), name,
3668 data, nelements, prop_decoder));
3672 * Return 1 if a property exists (no type checking done).
3673 * Return 0 if it does not exist.
3676 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3678 int i;
3679 uint_t x = 0;
3681 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3682 flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3683 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3688 * Update an array of composite properties. The driver must
3689 * provide the encode routine.
3692 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3693 char *name, void *data, uint_t nelements,
3694 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3696 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3697 name, data, nelements, prop_create));
3701 * Get a single integer or boolean property and return it.
3702 * If the property does not exists, or cannot be decoded,
3703 * then return the defvalue passed in.
3705 * This routine always succeeds.
3708 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3709 char *name, int defvalue)
3711 int data;
3712 uint_t nelements;
3713 int rval;
3715 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3716 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3717 #ifdef DEBUG
3718 if (dip != NULL) {
3719 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3720 " 0x%x (prop = %s, node = %s%d)", flags,
3721 name, ddi_driver_name(dip), ddi_get_instance(dip));
3723 #endif /* DEBUG */
3724 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3725 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3728 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3729 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3730 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3731 if (rval == DDI_PROP_END_OF_DATA)
3732 data = 1;
3733 else
3734 data = defvalue;
3736 return (data);
3740 * Get a single 64 bit integer or boolean property and return it.
3741 * If the property does not exists, or cannot be decoded,
3742 * then return the defvalue passed in.
3744 * This routine always succeeds.
3746 int64_t
3747 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3748 char *name, int64_t defvalue)
3750 int64_t data;
3751 uint_t nelements;
3752 int rval;
3754 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3755 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3756 #ifdef DEBUG
3757 if (dip != NULL) {
3758 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3759 " 0x%x (prop = %s, node = %s%d)", flags,
3760 name, ddi_driver_name(dip), ddi_get_instance(dip));
3762 #endif /* DEBUG */
3763 return (DDI_PROP_INVAL_ARG);
3766 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3767 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3768 name, &data, &nelements, ddi_prop_fm_decode_int64))
3769 != DDI_PROP_SUCCESS) {
3770 if (rval == DDI_PROP_END_OF_DATA)
3771 data = 1;
3772 else
3773 data = defvalue;
3775 return (data);
3779 * Get an array of integer property
3782 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3783 char *name, int **data, uint_t *nelements)
3785 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3786 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3787 #ifdef DEBUG
3788 if (dip != NULL) {
3789 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3790 "invalid flag 0x%x (prop = %s, node = %s%d)",
3791 flags, name, ddi_driver_name(dip),
3792 ddi_get_instance(dip));
3794 #endif /* DEBUG */
3795 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3796 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3799 return (ddi_prop_lookup_common(match_dev, dip,
3800 (flags | DDI_PROP_TYPE_INT), name, data,
3801 nelements, ddi_prop_fm_decode_ints));
3805 * Get an array of 64 bit integer properties
3808 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3809 char *name, int64_t **data, uint_t *nelements)
3811 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3812 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3813 #ifdef DEBUG
3814 if (dip != NULL) {
3815 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3816 "invalid flag 0x%x (prop = %s, node = %s%d)",
3817 flags, name, ddi_driver_name(dip),
3818 ddi_get_instance(dip));
3820 #endif /* DEBUG */
3821 return (DDI_PROP_INVAL_ARG);
3824 return (ddi_prop_lookup_common(match_dev, dip,
3825 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3826 name, data, nelements, ddi_prop_fm_decode_int64_array));
3830 * Update a single integer property. If the property exists on the drivers
3831 * property list it updates, else it creates it.
3834 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3835 char *name, int data)
3837 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3838 name, &data, 1, ddi_prop_fm_encode_ints));
3842 * Update a single 64 bit integer property.
3843 * Update the driver property list if it exists, else create it.
3846 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3847 char *name, int64_t data)
3849 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3850 name, &data, 1, ddi_prop_fm_encode_int64));
3854 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3855 char *name, int data)
3857 return (ddi_prop_update_common(match_dev, dip,
3858 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3859 name, &data, 1, ddi_prop_fm_encode_ints));
3863 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3864 char *name, int64_t data)
3866 return (ddi_prop_update_common(match_dev, dip,
3867 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3868 name, &data, 1, ddi_prop_fm_encode_int64));
3872 * Update an array of integer property. If the property exists on the drivers
3873 * property list it updates, else it creates it.
3876 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3877 char *name, int *data, uint_t nelements)
3879 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3880 name, data, nelements, ddi_prop_fm_encode_ints));
3884 * Update an array of 64 bit integer properties.
3885 * Update the driver property list if it exists, else create it.
3888 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3889 char *name, int64_t *data, uint_t nelements)
3891 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3892 name, data, nelements, ddi_prop_fm_encode_int64));
3896 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3897 char *name, int64_t *data, uint_t nelements)
3899 return (ddi_prop_update_common(match_dev, dip,
3900 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3901 name, data, nelements, ddi_prop_fm_encode_int64));
3905 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3906 char *name, int *data, uint_t nelements)
3908 return (ddi_prop_update_common(match_dev, dip,
3909 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3910 name, data, nelements, ddi_prop_fm_encode_ints));
3914 * Get a single string property.
3917 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3918 char *name, char **data)
3920 uint_t x;
3922 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3923 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3924 #ifdef DEBUG
3925 if (dip != NULL) {
3926 cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3927 "(prop = %s, node = %s%d); invalid bits ignored",
3928 "ddi_prop_lookup_string", flags, name,
3929 ddi_driver_name(dip), ddi_get_instance(dip));
3931 #endif /* DEBUG */
3932 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3933 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3936 return (ddi_prop_lookup_common(match_dev, dip,
3937 (flags | DDI_PROP_TYPE_STRING), name, data,
3938 &x, ddi_prop_fm_decode_string));
3942 * Get an array of strings property.
3945 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3946 char *name, char ***data, uint_t *nelements)
3948 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3949 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3950 #ifdef DEBUG
3951 if (dip != NULL) {
3952 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3953 "invalid flag 0x%x (prop = %s, node = %s%d)",
3954 flags, name, ddi_driver_name(dip),
3955 ddi_get_instance(dip));
3957 #endif /* DEBUG */
3958 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3959 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3962 return (ddi_prop_lookup_common(match_dev, dip,
3963 (flags | DDI_PROP_TYPE_STRING), name, data,
3964 nelements, ddi_prop_fm_decode_strings));
3968 * Update a single string property.
3971 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3972 char *name, char *data)
3974 return (ddi_prop_update_common(match_dev, dip,
3975 DDI_PROP_TYPE_STRING, name, &data, 1,
3976 ddi_prop_fm_encode_string));
3980 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3981 char *name, char *data)
3983 return (ddi_prop_update_common(match_dev, dip,
3984 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3985 name, &data, 1, ddi_prop_fm_encode_string));
3990 * Update an array of strings property.
3993 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3994 char *name, char **data, uint_t nelements)
3996 return (ddi_prop_update_common(match_dev, dip,
3997 DDI_PROP_TYPE_STRING, name, data, nelements,
3998 ddi_prop_fm_encode_strings));
4002 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4003 char *name, char **data, uint_t nelements)
4005 return (ddi_prop_update_common(match_dev, dip,
4006 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4007 name, data, nelements,
4008 ddi_prop_fm_encode_strings));
4013 * Get an array of bytes property.
4016 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4017 char *name, uchar_t **data, uint_t *nelements)
4019 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4020 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4021 #ifdef DEBUG
4022 if (dip != NULL) {
4023 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4024 " invalid flag 0x%x (prop = %s, node = %s%d)",
4025 flags, name, ddi_driver_name(dip),
4026 ddi_get_instance(dip));
4028 #endif /* DEBUG */
4029 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4030 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4033 return (ddi_prop_lookup_common(match_dev, dip,
4034 (flags | DDI_PROP_TYPE_BYTE), name, data,
4035 nelements, ddi_prop_fm_decode_bytes));
4039 * Update an array of bytes property.
4042 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4043 char *name, uchar_t *data, uint_t nelements)
4045 if (nelements == 0)
4046 return (DDI_PROP_INVAL_ARG);
4048 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4049 name, data, nelements, ddi_prop_fm_encode_bytes));
4054 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4055 char *name, uchar_t *data, uint_t nelements)
4057 if (nelements == 0)
4058 return (DDI_PROP_INVAL_ARG);
4060 return (ddi_prop_update_common(match_dev, dip,
4061 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4062 name, data, nelements, ddi_prop_fm_encode_bytes));
4067 * ddi_prop_remove_common: Undefine a managed property:
4068 * Input dev_t must match dev_t when defined.
4069 * Returns DDI_PROP_NOT_FOUND, possibly.
4070 * DDI_PROP_INVAL_ARG is also possible if dev is
4071 * DDI_DEV_T_ANY or incoming name is the NULL string.
4074 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4076 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4077 ddi_prop_t *propp;
4078 ddi_prop_t *lastpropp = NULL;
4080 if ((dev == DDI_DEV_T_ANY) || (name == NULL) ||
4081 (strlen(name) == 0)) {
4082 return (DDI_PROP_INVAL_ARG);
4085 if (flag & DDI_PROP_SYSTEM_DEF)
4086 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4087 else if (flag & DDI_PROP_HW_DEF)
4088 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4090 mutex_enter(&(DEVI(dip)->devi_lock));
4092 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
4093 if (DDI_STRSAME(propp->prop_name, name) &&
4094 (dev == propp->prop_dev)) {
4096 * Unlink this propp allowing for it to
4097 * be first in the list:
4100 if (lastpropp == NULL)
4101 *list_head = propp->prop_next;
4102 else
4103 lastpropp->prop_next = propp->prop_next;
4105 mutex_exit(&(DEVI(dip)->devi_lock));
4108 * Free memory and return...
4110 kmem_free(propp->prop_name,
4111 strlen(propp->prop_name) + 1);
4112 if (propp->prop_len != 0)
4113 kmem_free(propp->prop_val, propp->prop_len);
4114 kmem_free(propp, sizeof (ddi_prop_t));
4115 return (DDI_PROP_SUCCESS);
4117 lastpropp = propp;
4119 mutex_exit(&(DEVI(dip)->devi_lock));
4120 return (DDI_PROP_NOT_FOUND);
4124 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4126 return (ddi_prop_remove_common(dev, dip, name, 0));
4130 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4132 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4136 * e_ddi_prop_list_delete: remove a list of properties
4137 * Note that the caller needs to provide the required protection
4138 * (eg. devi_lock if these properties are still attached to a devi)
4140 void
4141 e_ddi_prop_list_delete(ddi_prop_t *props)
4143 i_ddi_prop_list_delete(props);
4147 * ddi_prop_remove_all_common:
4148 * Used before unloading a driver to remove
4149 * all properties. (undefines all dev_t's props.)
4150 * Also removes `explicitly undefined' props.
4151 * No errors possible.
4153 void
4154 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4156 ddi_prop_t **list_head;
4158 mutex_enter(&(DEVI(dip)->devi_lock));
4159 if (flag & DDI_PROP_SYSTEM_DEF) {
4160 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4161 } else if (flag & DDI_PROP_HW_DEF) {
4162 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4163 } else {
4164 list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4166 i_ddi_prop_list_delete(*list_head);
4167 *list_head = NULL;
4168 mutex_exit(&(DEVI(dip)->devi_lock));
4173 * ddi_prop_remove_all: Remove all driver prop definitions.
4176 void
4177 ddi_prop_remove_all(dev_info_t *dip)
4179 i_ddi_prop_dyn_driver_set(dip, NULL);
4180 ddi_prop_remove_all_common(dip, 0);
4184 * e_ddi_prop_remove_all: Remove all system prop definitions.
4187 void
4188 e_ddi_prop_remove_all(dev_info_t *dip)
4190 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4195 * ddi_prop_undefine: Explicitly undefine a property. Property
4196 * searches which match this property return
4197 * the error code DDI_PROP_UNDEFINED.
4199 * Use ddi_prop_remove to negate effect of
4200 * ddi_prop_undefine
4202 * See above for error returns.
4206 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4208 if (!(flag & DDI_PROP_CANSLEEP))
4209 flag |= DDI_PROP_DONTSLEEP;
4210 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4211 return (ddi_prop_update_common(dev, dip, flag,
4212 name, NULL, 0, ddi_prop_fm_encode_bytes));
4216 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4218 if (!(flag & DDI_PROP_CANSLEEP))
4219 flag |= DDI_PROP_DONTSLEEP;
4220 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4221 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4222 return (ddi_prop_update_common(dev, dip, flag,
4223 name, NULL, 0, ddi_prop_fm_encode_bytes));
4227 * Support for gathering dynamic properties in devinfo snapshot.
4229 void
4230 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4232 DEVI(dip)->devi_prop_dyn_driver = dp;
4235 i_ddi_prop_dyn_t *
4236 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4238 return (DEVI(dip)->devi_prop_dyn_driver);
4241 void
4242 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4244 DEVI(dip)->devi_prop_dyn_parent = dp;
4247 i_ddi_prop_dyn_t *
4248 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4250 return (DEVI(dip)->devi_prop_dyn_parent);
4253 void
4254 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4256 /* for now we invalidate the entire cached snapshot */
4257 if (dip && dp)
4258 i_ddi_di_cache_invalidate();
4261 /* ARGSUSED */
4262 void
4263 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4265 /* for now we invalidate the entire cached snapshot */
4266 i_ddi_di_cache_invalidate();
4271 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4273 * if input dip != child_dip, then call is on behalf of child
4274 * to search PROM, do it via ddi_prop_search_common() and ascend only
4275 * if allowed.
4277 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4278 * to search for PROM defined props only.
4280 * Note that the PROM search is done only if the requested dev
4281 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4282 * have no associated dev, thus are automatically associated with
4283 * DDI_DEV_T_NONE.
4285 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4287 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4288 * that the property resides in the prom.
4291 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4292 ddi_prop_op_t prop_op, int mod_flags,
4293 char *name, caddr_t valuep, int *lengthp)
4295 int len;
4296 caddr_t buffer;
4299 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4300 * look in caller's PROM if it's a self identifying device...
4302 * Note that this is very similar to ddi_prop_op, but we
4303 * search the PROM instead of the s/w defined properties,
4304 * and we are called on by the parent driver to do this for
4305 * the child.
4308 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4309 ndi_dev_is_prom_node(ch_dip) &&
4310 ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4311 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4312 if (len == -1) {
4313 return (DDI_PROP_NOT_FOUND);
4317 * If exists only request, we're done
4319 if (prop_op == PROP_EXISTS) {
4320 return (DDI_PROP_FOUND_1275);
4324 * If length only request or prop length == 0, get out
4326 if ((prop_op == PROP_LEN) || (len == 0)) {
4327 *lengthp = len;
4328 return (DDI_PROP_FOUND_1275);
4332 * Allocate buffer if required... (either way `buffer'
4333 * is receiving address).
4336 switch (prop_op) {
4338 case PROP_LEN_AND_VAL_ALLOC:
4340 buffer = kmem_alloc((size_t)len,
4341 mod_flags & DDI_PROP_CANSLEEP ?
4342 KM_SLEEP : KM_NOSLEEP);
4343 if (buffer == NULL) {
4344 return (DDI_PROP_NO_MEMORY);
4346 *(caddr_t *)valuep = buffer;
4347 break;
4349 case PROP_LEN_AND_VAL_BUF:
4351 if (len > (*lengthp)) {
4352 *lengthp = len;
4353 return (DDI_PROP_BUF_TOO_SMALL);
4356 buffer = valuep;
4357 break;
4359 default:
4360 break;
4364 * Call the PROM function to do the copy.
4366 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4367 name, buffer);
4369 *lengthp = len; /* return the actual length to the caller */
4370 (void) impl_fix_props(dip, ch_dip, name, len, buffer);
4371 return (DDI_PROP_FOUND_1275);
4374 return (DDI_PROP_NOT_FOUND);
4378 * The ddi_bus_prop_op default bus nexus prop op function.
4380 * Code to search hardware layer (PROM), if it exists,
4381 * on behalf of child, then, if appropriate, ascend and check
4382 * my own software defined properties...
4385 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4386 ddi_prop_op_t prop_op, int mod_flags,
4387 char *name, caddr_t valuep, int *lengthp)
4389 int error;
4391 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4392 name, valuep, lengthp);
4394 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4395 error == DDI_PROP_BUF_TOO_SMALL)
4396 return (error);
4398 if (error == DDI_PROP_NO_MEMORY) {
4399 cmn_err(CE_CONT, prop_no_mem_msg, name);
4400 return (DDI_PROP_NO_MEMORY);
4404 * Check the 'options' node as a last resort
4406 if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4407 return (DDI_PROP_NOT_FOUND);
4409 if (ch_dip == ddi_root_node()) {
4411 * As a last resort, when we've reached
4412 * the top and still haven't found the
4413 * property, see if the desired property
4414 * is attached to the options node.
4416 * The options dip is attached right after boot.
4418 ASSERT(options_dip != NULL);
4420 * Force the "don't pass" flag to *just* see
4421 * what the options node has to offer.
4423 return (ddi_prop_search_common(dev, options_dip, prop_op,
4424 mod_flags|DDI_PROP_DONTPASS, name, valuep,
4425 (uint_t *)lengthp));
4429 * Otherwise, continue search with parent's s/w defined properties...
4430 * NOTE: Using `dip' in following call increments the level.
4433 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4434 name, valuep, (uint_t *)lengthp));
4438 * External property functions used by other parts of the kernel...
4442 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4446 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4447 caddr_t valuep, int *lengthp)
4449 _NOTE(ARGUNUSED(type))
4450 dev_info_t *devi;
4451 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4452 int error;
4454 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4455 return (DDI_PROP_NOT_FOUND);
4457 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4458 ddi_release_devi(devi);
4459 return (error);
4463 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf.
4467 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4468 caddr_t valuep, int *lengthp)
4470 _NOTE(ARGUNUSED(type))
4471 dev_info_t *devi;
4472 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4473 int error;
4475 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4476 return (DDI_PROP_NOT_FOUND);
4478 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4479 ddi_release_devi(devi);
4480 return (error);
4484 * e_ddi_getprop: See comments for ddi_getprop.
4487 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4489 _NOTE(ARGUNUSED(type))
4490 dev_info_t *devi;
4491 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4492 int propvalue = defvalue;
4493 int proplength = sizeof (int);
4494 int error;
4496 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4497 return (defvalue);
4499 error = cdev_prop_op(dev, devi, prop_op,
4500 flags, name, (caddr_t)&propvalue, &proplength);
4501 ddi_release_devi(devi);
4503 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4504 propvalue = 1;
4506 return (propvalue);
4510 * e_ddi_getprop_int64:
4512 * This is a typed interfaces, but predates typed properties. With the
4513 * introduction of typed properties the framework tries to ensure
4514 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4515 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a
4516 * typed interface invokes legacy (non-typed) interfaces:
4517 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the
4518 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support
4519 * this type of lookup as a single operation we invoke the legacy
4520 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4521 * framework ddi_prop_op(9F) implementation is expected to check for
4522 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4523 * (currently TYPE_INT64).
4525 int64_t
4526 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4527 int flags, int64_t defvalue)
4529 _NOTE(ARGUNUSED(type))
4530 dev_info_t *devi;
4531 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4532 int64_t propvalue = defvalue;
4533 int proplength = sizeof (propvalue);
4534 int error;
4536 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4537 return (defvalue);
4539 error = cdev_prop_op(dev, devi, prop_op, flags |
4540 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4541 ddi_release_devi(devi);
4543 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4544 propvalue = 1;
4546 return (propvalue);
4550 * e_ddi_getproplen: See comments for ddi_getproplen.
4553 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4555 _NOTE(ARGUNUSED(type))
4556 dev_info_t *devi;
4557 ddi_prop_op_t prop_op = PROP_LEN;
4558 int error;
4560 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4561 return (DDI_PROP_NOT_FOUND);
4563 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4564 ddi_release_devi(devi);
4565 return (error);
4569 * Routines to get at elements of the dev_info structure
4573 * ddi_binding_name: Return the driver binding name of the devinfo node
4574 * This is the name the OS used to bind the node to a driver.
4576 char *
4577 ddi_binding_name(dev_info_t *dip)
4579 return (DEVI(dip)->devi_binding_name);
4583 * ddi_driver_major: Return the major number of the driver that
4584 * the supplied devinfo is bound to. If not yet bound,
4585 * DDI_MAJOR_T_NONE.
4587 * When used by the driver bound to 'devi', this
4588 * function will reliably return the driver major number.
4589 * Other ways of determining the driver major number, such as
4590 * major = ddi_name_to_major(ddi_get_name(devi));
4591 * major = ddi_name_to_major(ddi_binding_name(devi));
4592 * can return a different result as the driver/alias binding
4593 * can change dynamically, and thus should be avoided.
4595 major_t
4596 ddi_driver_major(dev_info_t *devi)
4598 return (DEVI(devi)->devi_major);
4602 * ddi_driver_name: Return the normalized driver name. this is the
4603 * actual driver name
4605 const char *
4606 ddi_driver_name(dev_info_t *devi)
4608 major_t major;
4610 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4611 return (ddi_major_to_name(major));
4613 return (ddi_node_name(devi));
4617 * i_ddi_set_binding_name: Set binding name.
4619 * Set the binding name to the given name.
4620 * This routine is for use by the ddi implementation, not by drivers.
4622 void
4623 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4625 DEVI(dip)->devi_binding_name = name;
4630 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4631 * the implementation has used to bind the node to a driver.
4633 char *
4634 ddi_get_name(dev_info_t *dip)
4636 return (DEVI(dip)->devi_binding_name);
4640 * ddi_node_name: Return the name property of the devinfo node
4641 * This may differ from ddi_binding_name if the node name
4642 * does not define a binding to a driver (i.e. generic names).
4644 char *
4645 ddi_node_name(dev_info_t *dip)
4647 return (DEVI(dip)->devi_node_name);
4652 * ddi_get_nodeid: Get nodeid stored in dev_info structure.
4655 ddi_get_nodeid(dev_info_t *dip)
4657 return (DEVI(dip)->devi_nodeid);
4661 ddi_get_instance(dev_info_t *dip)
4663 return (DEVI(dip)->devi_instance);
4666 struct dev_ops *
4667 ddi_get_driver(dev_info_t *dip)
4669 return (DEVI(dip)->devi_ops);
4672 void
4673 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4675 DEVI(dip)->devi_ops = devo;
4679 * ddi_set_driver_private/ddi_get_driver_private:
4680 * Get/set device driver private data in devinfo.
4682 void
4683 ddi_set_driver_private(dev_info_t *dip, void *data)
4685 DEVI(dip)->devi_driver_data = data;
4688 void *
4689 ddi_get_driver_private(dev_info_t *dip)
4691 return (DEVI(dip)->devi_driver_data);
4695 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4698 dev_info_t *
4699 ddi_get_parent(dev_info_t *dip)
4701 return ((dev_info_t *)DEVI(dip)->devi_parent);
4704 dev_info_t *
4705 ddi_get_child(dev_info_t *dip)
4707 return ((dev_info_t *)DEVI(dip)->devi_child);
4710 dev_info_t *
4711 ddi_get_next_sibling(dev_info_t *dip)
4713 return ((dev_info_t *)DEVI(dip)->devi_sibling);
4716 dev_info_t *
4717 ddi_get_next(dev_info_t *dip)
4719 return ((dev_info_t *)DEVI(dip)->devi_next);
4722 void
4723 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4725 DEVI(dip)->devi_next = DEVI(nextdip);
4729 * ddi_root_node: Return root node of devinfo tree
4732 dev_info_t *
4733 ddi_root_node(void)
4735 extern dev_info_t *top_devinfo;
4737 return (top_devinfo);
4741 * Miscellaneous functions:
4745 * Implementation specific hooks
4748 void
4749 ddi_report_dev(dev_info_t *d)
4751 char *b;
4753 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, NULL, NULL);
4756 * If this devinfo node has cb_ops, it's implicitly accessible from
4757 * userland, so we print its full name together with the instance
4758 * number 'abbreviation' that the driver may use internally.
4760 if (DEVI(d)->devi_ops->devo_cb_ops != NULL &&
4761 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4762 cmn_err(CE_CONT, "?%s%d is %s\n",
4763 ddi_driver_name(d), ddi_get_instance(d),
4764 ddi_pathname(d, b));
4765 kmem_free(b, MAXPATHLEN);
4770 * ddi_ctlops() is described in the assembler not to buy a new register
4771 * window when it's called and can reduce cost in climbing the device tree
4772 * without using the tail call optimization.
4775 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4777 int ret;
4779 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4780 (void *)&rnumber, (void *)result);
4782 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4786 ddi_dev_nregs(dev_info_t *dev, int *result)
4788 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4792 ddi_dev_is_sid(dev_info_t *d)
4794 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, NULL, NULL));
4798 ddi_slaveonly(dev_info_t *d)
4800 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, NULL, NULL));
4804 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4806 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, NULL));
4810 ddi_streams_driver(dev_info_t *dip)
4812 if (i_ddi_devi_attached(dip) &&
4813 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4814 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4815 return (DDI_SUCCESS);
4816 return (DDI_FAILURE);
4820 * callback free list
4823 static int ncallbacks;
4824 static int nc_low = 170;
4825 static int nc_med = 512;
4826 static int nc_high = 2048;
4827 static struct ddi_callback *callbackq;
4828 static struct ddi_callback *callbackqfree;
4831 * set/run callback lists
4833 struct cbstats {
4834 kstat_named_t cb_asked;
4835 kstat_named_t cb_new;
4836 kstat_named_t cb_run;
4837 kstat_named_t cb_delete;
4838 kstat_named_t cb_maxreq;
4839 kstat_named_t cb_maxlist;
4840 kstat_named_t cb_alloc;
4841 kstat_named_t cb_runouts;
4842 kstat_named_t cb_L2;
4843 kstat_named_t cb_grow;
4844 } cbstats = {
4845 {"asked", KSTAT_DATA_UINT32},
4846 {"new", KSTAT_DATA_UINT32},
4847 {"run", KSTAT_DATA_UINT32},
4848 {"delete", KSTAT_DATA_UINT32},
4849 {"maxreq", KSTAT_DATA_UINT32},
4850 {"maxlist", KSTAT_DATA_UINT32},
4851 {"alloc", KSTAT_DATA_UINT32},
4852 {"runouts", KSTAT_DATA_UINT32},
4853 {"L2", KSTAT_DATA_UINT32},
4854 {"grow", KSTAT_DATA_UINT32},
4857 #define nc_asked cb_asked.value.ui32
4858 #define nc_new cb_new.value.ui32
4859 #define nc_run cb_run.value.ui32
4860 #define nc_delete cb_delete.value.ui32
4861 #define nc_maxreq cb_maxreq.value.ui32
4862 #define nc_maxlist cb_maxlist.value.ui32
4863 #define nc_alloc cb_alloc.value.ui32
4864 #define nc_runouts cb_runouts.value.ui32
4865 #define nc_L2 cb_L2.value.ui32
4866 #define nc_grow cb_grow.value.ui32
4868 static kmutex_t ddi_callback_mutex;
4871 * callbacks are handled using a L1/L2 cache. The L1 cache
4872 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4873 * we can't get callbacks from the L1 cache [because pageout is doing
4874 * I/O at the time freemem is 0], we allocate callbacks out of the
4875 * L2 cache. The L2 cache is static and depends on the memory size.
4876 * [We might also count the number of devices at probe time and
4877 * allocate one structure per device and adjust for deferred attach]
4879 void
4880 impl_ddi_callback_init(void)
4882 int i;
4883 uint_t physmegs;
4884 kstat_t *ksp;
4886 physmegs = physmem >> (20 - PAGESHIFT);
4887 if (physmegs < 48) {
4888 ncallbacks = nc_low;
4889 } else if (physmegs < 128) {
4890 ncallbacks = nc_med;
4891 } else {
4892 ncallbacks = nc_high;
4896 * init free list
4898 callbackq = kmem_zalloc(
4899 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4900 for (i = 0; i < ncallbacks-1; i++)
4901 callbackq[i].c_nfree = &callbackq[i+1];
4902 callbackqfree = callbackq;
4904 /* init kstats */
4905 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4906 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4907 ksp->ks_data = (void *) &cbstats;
4908 kstat_install(ksp);
4913 static void
4914 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4915 int count)
4917 struct ddi_callback *list, *marker, *new;
4918 size_t size = sizeof (struct ddi_callback);
4920 list = marker = (struct ddi_callback *)*listid;
4921 while (list != NULL) {
4922 if (list->c_call == funcp && list->c_arg == arg) {
4923 list->c_count += count;
4924 return;
4926 marker = list;
4927 list = list->c_nlist;
4929 new = kmem_alloc(size, KM_NOSLEEP);
4930 if (new == NULL) {
4931 new = callbackqfree;
4932 if (new == NULL) {
4933 new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4934 &size, KM_NOSLEEP | KM_PANIC);
4935 cbstats.nc_grow++;
4936 } else {
4937 callbackqfree = new->c_nfree;
4938 cbstats.nc_L2++;
4941 if (marker != NULL) {
4942 marker->c_nlist = new;
4943 } else {
4944 *listid = (uintptr_t)new;
4946 new->c_size = size;
4947 new->c_nlist = NULL;
4948 new->c_call = funcp;
4949 new->c_arg = arg;
4950 new->c_count = count;
4951 cbstats.nc_new++;
4952 cbstats.nc_alloc++;
4953 if (cbstats.nc_alloc > cbstats.nc_maxlist)
4954 cbstats.nc_maxlist = cbstats.nc_alloc;
4957 void
4958 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4960 mutex_enter(&ddi_callback_mutex);
4961 cbstats.nc_asked++;
4962 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4963 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4964 (void) callback_insert(funcp, arg, listid, 1);
4965 mutex_exit(&ddi_callback_mutex);
4968 static void
4969 real_callback_run(void *Queue)
4971 int (*funcp)(caddr_t);
4972 caddr_t arg;
4973 int count, rval;
4974 uintptr_t *listid;
4975 struct ddi_callback *list, *marker;
4976 int check_pending = 1;
4977 int pending = 0;
4979 do {
4980 mutex_enter(&ddi_callback_mutex);
4981 listid = Queue;
4982 list = (struct ddi_callback *)*listid;
4983 if (list == NULL) {
4984 mutex_exit(&ddi_callback_mutex);
4985 return;
4987 if (check_pending) {
4988 marker = list;
4989 while (marker != NULL) {
4990 pending += marker->c_count;
4991 marker = marker->c_nlist;
4993 check_pending = 0;
4995 ASSERT(pending > 0);
4996 ASSERT(list->c_count > 0);
4997 funcp = list->c_call;
4998 arg = list->c_arg;
4999 count = list->c_count;
5000 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5001 if (list >= &callbackq[0] &&
5002 list <= &callbackq[ncallbacks-1]) {
5003 list->c_nfree = callbackqfree;
5004 callbackqfree = list;
5005 } else
5006 kmem_free(list, list->c_size);
5008 cbstats.nc_delete++;
5009 cbstats.nc_alloc--;
5010 mutex_exit(&ddi_callback_mutex);
5012 do {
5013 if ((rval = (*funcp)(arg)) == 0) {
5014 pending -= count;
5015 mutex_enter(&ddi_callback_mutex);
5016 (void) callback_insert(funcp, arg, listid,
5017 count);
5018 cbstats.nc_runouts++;
5019 } else {
5020 pending--;
5021 mutex_enter(&ddi_callback_mutex);
5022 cbstats.nc_run++;
5024 mutex_exit(&ddi_callback_mutex);
5025 } while (rval != 0 && (--count > 0));
5026 } while (pending > 0);
5029 void
5030 ddi_run_callback(uintptr_t *listid)
5032 softcall(real_callback_run, listid);
5036 * ddi_periodic_t
5037 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5038 * int level)
5040 * INTERFACE LEVEL
5041 * Solaris DDI specific (Solaris DDI)
5043 * PARAMETERS
5044 * func: the callback function
5046 * The callback function will be invoked. The function is invoked
5047 * in kernel context if the argument level passed is the zero.
5048 * Otherwise it's invoked in interrupt context at the specified
5049 * level.
5051 * arg: the argument passed to the callback function
5053 * interval: interval time
5055 * level : callback interrupt level
5057 * If the value is the zero, the callback function is invoked
5058 * in kernel context. If the value is more than the zero, but
5059 * less than or equal to ten, the callback function is invoked in
5060 * interrupt context at the specified interrupt level, which may
5061 * be used for real time applications.
5063 * This value must be in range of 0-10, which can be a numeric
5064 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5066 * DESCRIPTION
5067 * ddi_periodic_add(9F) schedules the specified function to be
5068 * periodically invoked in the interval time.
5070 * As well as timeout(9F), the exact time interval over which the function
5071 * takes effect cannot be guaranteed, but the value given is a close
5072 * approximation.
5074 * Drivers waiting on behalf of processes with real-time constraints must
5075 * pass non-zero value with the level argument to ddi_periodic_add(9F).
5077 * RETURN VALUES
5078 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5079 * which must be used for ddi_periodic_delete(9F) to specify the request.
5081 * CONTEXT
5082 * ddi_periodic_add(9F) can be called in user or kernel context, but
5083 * it cannot be called in interrupt context, which is different from
5084 * timeout(9F).
5086 ddi_periodic_t
5087 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5090 * Sanity check of the argument level.
5092 if (level < DDI_IPL_0 || level > DDI_IPL_10)
5093 cmn_err(CE_PANIC,
5094 "ddi_periodic_add: invalid interrupt level (%d).", level);
5097 * Sanity check of the context. ddi_periodic_add() cannot be
5098 * called in either interrupt context or high interrupt context.
5100 if (servicing_interrupt())
5101 cmn_err(CE_PANIC,
5102 "ddi_periodic_add: called in (high) interrupt context.");
5104 return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5108 * void
5109 * ddi_periodic_delete(ddi_periodic_t req)
5111 * INTERFACE LEVEL
5112 * Solaris DDI specific (Solaris DDI)
5114 * PARAMETERS
5115 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5116 * previously.
5118 * DESCRIPTION
5119 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5120 * previously requested.
5122 * ddi_periodic_delete(9F) will not return until the pending request
5123 * is canceled or executed.
5125 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5126 * timeout which is either running on another CPU, or has already
5127 * completed causes no problems. However, unlike untimeout(9F), there is
5128 * no restrictions on the lock which might be held across the call to
5129 * ddi_periodic_delete(9F).
5131 * Drivers should be structured with the understanding that the arrival of
5132 * both an interrupt and a timeout for that interrupt can occasionally
5133 * occur, in either order.
5135 * CONTEXT
5136 * ddi_periodic_delete(9F) can be called in user or kernel context, but
5137 * it cannot be called in interrupt context, which is different from
5138 * untimeout(9F).
5140 void
5141 ddi_periodic_delete(ddi_periodic_t req)
5144 * Sanity check of the context. ddi_periodic_delete() cannot be
5145 * called in either interrupt context or high interrupt context.
5147 if (servicing_interrupt())
5148 cmn_err(CE_PANIC,
5149 "ddi_periodic_delete: called in (high) interrupt context.");
5151 i_untimeout((timeout_t)req);
5154 dev_info_t *
5155 nodevinfo(dev_t dev, int otyp)
5157 _NOTE(ARGUNUSED(dev, otyp))
5158 return ((dev_info_t *)0);
5162 * A driver should support its own getinfo(9E) entry point. This function
5163 * is provided as a convenience for ON drivers that don't expect their
5164 * getinfo(9E) entry point to be called. A driver that uses this must not
5165 * call ddi_create_minor_node.
5168 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5170 _NOTE(ARGUNUSED(dip, infocmd, arg, result))
5171 return (DDI_FAILURE);
5175 * A driver should support its own getinfo(9E) entry point. This function
5176 * is provided as a convenience for ON drivers that where the minor number
5177 * is the instance. Drivers that do not have 1:1 mapping must implement
5178 * their own getinfo(9E) function.
5181 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5182 void *arg, void **result)
5184 _NOTE(ARGUNUSED(dip))
5185 int instance;
5187 if (infocmd != DDI_INFO_DEVT2INSTANCE)
5188 return (DDI_FAILURE);
5190 instance = getminor((dev_t)(uintptr_t)arg);
5191 *result = (void *)(uintptr_t)instance;
5192 return (DDI_SUCCESS);
5196 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5198 _NOTE(ARGUNUSED(devi, cmd))
5199 return (DDI_FAILURE);
5203 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5204 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5206 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5207 return (DDI_DMA_NOMAPPING);
5211 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5212 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5214 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5215 return (DDI_DMA_BADATTR);
5219 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5220 ddi_dma_handle_t handle)
5222 _NOTE(ARGUNUSED(dip, rdip, handle))
5223 return (DDI_FAILURE);
5227 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5228 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5229 ddi_dma_cookie_t *cp, uint_t *ccountp)
5231 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5232 return (DDI_DMA_NOMAPPING);
5236 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5237 ddi_dma_handle_t handle)
5239 _NOTE(ARGUNUSED(dip, rdip, handle))
5240 return (DDI_FAILURE);
5244 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5245 ddi_dma_handle_t handle, off_t off, size_t len,
5246 uint_t cache_flags)
5248 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5249 return (DDI_FAILURE);
5253 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5254 ddi_dma_handle_t handle, uint_t win, off_t *offp,
5255 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5257 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5258 return (DDI_FAILURE);
5262 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5263 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5264 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5266 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5267 return (DDI_FAILURE);
5270 void
5271 ddivoid(void)
5275 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5276 struct pollhead **pollhdrp)
5278 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5279 return (ENXIO);
5282 cred_t *
5283 ddi_get_cred(void)
5285 return (CRED());
5288 clock_t
5289 ddi_get_lbolt(void)
5291 return ((clock_t)lbolt_hybrid());
5294 int64_t
5295 ddi_get_lbolt64(void)
5297 return (lbolt_hybrid());
5300 time_t
5301 ddi_get_time(void)
5303 time_t now;
5305 if ((now = gethrestime_sec()) == 0) {
5306 timestruc_t ts;
5307 mutex_enter(&tod_lock);
5308 ts = tod_get();
5309 mutex_exit(&tod_lock);
5310 return (ts.tv_sec);
5311 } else {
5312 return (now);
5316 pid_t
5317 ddi_get_pid(void)
5319 return (ttoproc(curthread)->p_pid);
5322 kt_did_t
5323 ddi_get_kt_did(void)
5325 return (curthread->t_did);
5329 * This function returns B_TRUE if the caller can reasonably expect that a call
5330 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5331 * by user-level signal. If it returns B_FALSE, then the caller should use
5332 * other means to make certain that the wait will not hang "forever."
5334 * It does not check the signal mask, nor for reception of any particular
5335 * signal.
5337 * Currently, a thread can receive a signal if it's not a kernel thread and it
5338 * is not in the middle of exit(2) tear-down. Threads that are in that
5339 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5340 * cv_timedwait, and qwait_sig to qwait.
5342 boolean_t
5343 ddi_can_receive_sig(void)
5345 proc_t *pp;
5347 if (curthread->t_proc_flag & TP_LWPEXIT)
5348 return (B_FALSE);
5349 if ((pp = ttoproc(curthread)) == NULL)
5350 return (B_FALSE);
5351 return (pp->p_as != &kas);
5355 * Swap bytes in 16-bit [half-]words
5357 void
5358 swab(void *src, void *dst, size_t nbytes)
5360 uchar_t *pf = (uchar_t *)src;
5361 uchar_t *pt = (uchar_t *)dst;
5362 uchar_t tmp;
5363 int nshorts;
5365 nshorts = nbytes >> 1;
5367 while (--nshorts >= 0) {
5368 tmp = *pf++;
5369 *pt++ = *pf++;
5370 *pt++ = tmp;
5374 static void
5375 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5377 int circ;
5378 struct ddi_minor_data *dp;
5380 ndi_devi_enter(ddip, &circ);
5381 if ((dp = DEVI(ddip)->devi_minor) == NULL) {
5382 DEVI(ddip)->devi_minor = dmdp;
5383 } else {
5384 while (dp->next != NULL)
5385 dp = dp->next;
5386 dp->next = dmdp;
5388 ndi_devi_exit(ddip, circ);
5392 * Part of the obsolete SunCluster DDI Hooks.
5393 * Keep for binary compatibility
5395 minor_t
5396 ddi_getiminor(dev_t dev)
5398 return (getminor(dev));
5401 static int
5402 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5404 int se_flag;
5405 int kmem_flag;
5406 int se_err;
5407 char *pathname, *class_name;
5408 sysevent_t *ev = NULL;
5409 sysevent_id_t eid;
5410 sysevent_value_t se_val;
5411 sysevent_attr_list_t *ev_attr_list = NULL;
5413 /* determine interrupt context */
5414 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5415 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5417 i_ddi_di_cache_invalidate();
5419 #ifdef DEBUG
5420 if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5421 cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5422 "interrupt level by driver %s",
5423 ddi_driver_name(dip));
5425 #endif /* DEBUG */
5427 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5428 if (ev == NULL) {
5429 goto fail;
5432 pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5433 if (pathname == NULL) {
5434 sysevent_free(ev);
5435 goto fail;
5438 (void) ddi_pathname(dip, pathname);
5439 ASSERT(strlen(pathname));
5440 se_val.value_type = SE_DATA_TYPE_STRING;
5441 se_val.value.sv_string = pathname;
5442 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5443 &se_val, se_flag) != 0) {
5444 kmem_free(pathname, MAXPATHLEN);
5445 sysevent_free(ev);
5446 goto fail;
5448 kmem_free(pathname, MAXPATHLEN);
5450 /* add the device class attribute */
5451 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5452 se_val.value_type = SE_DATA_TYPE_STRING;
5453 se_val.value.sv_string = class_name;
5454 if (sysevent_add_attr(&ev_attr_list,
5455 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5456 sysevent_free_attr(ev_attr_list);
5457 goto fail;
5462 * allow for NULL minor names
5464 if (minor_name != NULL) {
5465 se_val.value.sv_string = minor_name;
5466 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5467 &se_val, se_flag) != 0) {
5468 sysevent_free_attr(ev_attr_list);
5469 sysevent_free(ev);
5470 goto fail;
5474 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5475 sysevent_free_attr(ev_attr_list);
5476 sysevent_free(ev);
5477 goto fail;
5480 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5481 if (se_err == SE_NO_TRANSPORT) {
5482 cmn_err(CE_WARN, "/devices or /dev may not be current "
5483 "for driver %s (%s). Run devfsadm -i %s",
5484 ddi_driver_name(dip), "syseventd not responding",
5485 ddi_driver_name(dip));
5486 } else {
5487 sysevent_free(ev);
5488 goto fail;
5492 sysevent_free(ev);
5493 return (DDI_SUCCESS);
5494 fail:
5495 cmn_err(CE_WARN, "/devices or /dev may not be current "
5496 "for driver %s. Run devfsadm -i %s",
5497 ddi_driver_name(dip), ddi_driver_name(dip));
5498 return (DDI_SUCCESS);
5502 * failing to remove a minor node is not of interest
5503 * therefore we do not generate an error message
5505 static int
5506 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5508 char *pathname, *class_name;
5509 sysevent_t *ev;
5510 sysevent_id_t eid;
5511 sysevent_value_t se_val;
5512 sysevent_attr_list_t *ev_attr_list = NULL;
5515 * only log ddi_remove_minor_node() calls outside the scope
5516 * of attach/detach reconfigurations and when the dip is
5517 * still initialized.
5519 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5520 (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5521 return (DDI_SUCCESS);
5524 i_ddi_di_cache_invalidate();
5526 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5527 if (ev == NULL) {
5528 return (DDI_SUCCESS);
5531 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5532 if (pathname == NULL) {
5533 sysevent_free(ev);
5534 return (DDI_SUCCESS);
5537 (void) ddi_pathname(dip, pathname);
5538 ASSERT(strlen(pathname));
5539 se_val.value_type = SE_DATA_TYPE_STRING;
5540 se_val.value.sv_string = pathname;
5541 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5542 &se_val, SE_SLEEP) != 0) {
5543 kmem_free(pathname, MAXPATHLEN);
5544 sysevent_free(ev);
5545 return (DDI_SUCCESS);
5548 kmem_free(pathname, MAXPATHLEN);
5551 * allow for NULL minor names
5553 if (minor_name != NULL) {
5554 se_val.value.sv_string = minor_name;
5555 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5556 &se_val, SE_SLEEP) != 0) {
5557 sysevent_free_attr(ev_attr_list);
5558 goto fail;
5562 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5563 /* add the device class, driver name and instance attributes */
5565 se_val.value_type = SE_DATA_TYPE_STRING;
5566 se_val.value.sv_string = class_name;
5567 if (sysevent_add_attr(&ev_attr_list,
5568 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5569 sysevent_free_attr(ev_attr_list);
5570 goto fail;
5573 se_val.value_type = SE_DATA_TYPE_STRING;
5574 se_val.value.sv_string = (char *)ddi_driver_name(dip);
5575 if (sysevent_add_attr(&ev_attr_list,
5576 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5577 sysevent_free_attr(ev_attr_list);
5578 goto fail;
5581 se_val.value_type = SE_DATA_TYPE_INT32;
5582 se_val.value.sv_int32 = ddi_get_instance(dip);
5583 if (sysevent_add_attr(&ev_attr_list,
5584 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5585 sysevent_free_attr(ev_attr_list);
5586 goto fail;
5591 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5592 sysevent_free_attr(ev_attr_list);
5593 } else {
5594 (void) log_sysevent(ev, SE_SLEEP, &eid);
5596 fail:
5597 sysevent_free(ev);
5598 return (DDI_SUCCESS);
5602 * Derive the device class of the node.
5603 * Device class names aren't defined yet. Until this is done we use
5604 * devfs event subclass names as device class names.
5606 static int
5607 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5609 int rv = DDI_SUCCESS;
5611 if (i_ddi_devi_class(dip) == NULL) {
5612 if (strncmp(node_type, DDI_NT_BLOCK,
5613 sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5614 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5615 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5616 strcmp(node_type, DDI_NT_FD) != 0) {
5618 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5620 } else if (strncmp(node_type, DDI_NT_NET,
5621 sizeof (DDI_NT_NET) - 1) == 0 &&
5622 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5623 node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5625 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5627 } else if (strncmp(node_type, DDI_NT_PRINTER,
5628 sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5629 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5630 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5632 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5634 } else if (strncmp(node_type, DDI_PSEUDO,
5635 sizeof (DDI_PSEUDO) -1) == 0 &&
5636 (strncmp(ESC_LOFI, ddi_node_name(dip),
5637 sizeof (ESC_LOFI) -1) == 0)) {
5638 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5642 return (rv);
5646 * Check compliance with PSARC 2003/375:
5648 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5649 * exceed IFNAMSIZ (16) characters in length.
5651 static boolean_t
5652 verify_name(char *name)
5654 size_t len = strlen(name);
5655 char *cp;
5657 if (len == 0 || len > IFNAMSIZ)
5658 return (B_FALSE);
5660 for (cp = name; *cp != '\0'; cp++) {
5661 if (!isalnum(*cp) && *cp != '_')
5662 return (B_FALSE);
5665 return (B_TRUE);
5669 * ddi_create_minor_common: Create a ddi_minor_data structure and
5670 * attach it to the given devinfo node.
5674 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5675 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5676 const char *read_priv, const char *write_priv, mode_t priv_mode)
5678 struct ddi_minor_data *dmdp;
5679 major_t major;
5681 if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5682 return (DDI_FAILURE);
5684 if (name == NULL)
5685 return (DDI_FAILURE);
5688 * Log a message if the minor number the driver is creating
5689 * is not expressible on the on-disk filesystem (currently
5690 * this is limited to 18 bits both by UFS). The device can
5691 * be opened via devfs, but not by device special files created
5692 * via mknod().
5694 if (minor_num > L_MAXMIN32) {
5695 cmn_err(CE_WARN,
5696 "%s%d:%s minor 0x%x too big for 32-bit applications",
5697 ddi_driver_name(dip), ddi_get_instance(dip),
5698 name, minor_num);
5699 return (DDI_FAILURE);
5702 /* dip must be bound and attached */
5703 major = ddi_driver_major(dip);
5704 ASSERT(major != DDI_MAJOR_T_NONE);
5707 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5709 if (node_type == NULL) {
5710 node_type = DDI_PSEUDO;
5711 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5712 " minor node %s; default to DDI_PSEUDO",
5713 ddi_driver_name(dip), ddi_get_instance(dip), name));
5717 * If the driver is a network driver, ensure that the name falls within
5718 * the interface naming constraints specified by PSARC/2003/375.
5720 if (strcmp(node_type, DDI_NT_NET) == 0) {
5721 if (!verify_name(name))
5722 return (DDI_FAILURE);
5724 if (mtype == DDM_MINOR) {
5725 struct devnames *dnp = &devnamesp[major];
5727 /* Mark driver as a network driver */
5728 LOCK_DEV_OPS(&dnp->dn_lock);
5729 dnp->dn_flags |= DN_NETWORK_DRIVER;
5732 * If this minor node is created during the device
5733 * attachment, this is a physical network device.
5734 * Mark the driver as a physical network driver.
5736 if (DEVI_IS_ATTACHING(dip))
5737 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5738 UNLOCK_DEV_OPS(&dnp->dn_lock);
5742 if (mtype == DDM_MINOR) {
5743 if (derive_devi_class(dip, node_type, KM_NOSLEEP) !=
5744 DDI_SUCCESS)
5745 return (DDI_FAILURE);
5749 * Take care of minor number information for the node.
5752 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5753 KM_NOSLEEP)) == NULL) {
5754 return (DDI_FAILURE);
5756 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5757 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5758 return (DDI_FAILURE);
5760 dmdp->dip = dip;
5761 dmdp->ddm_dev = makedevice(major, minor_num);
5762 dmdp->ddm_spec_type = spec_type;
5763 dmdp->ddm_node_type = node_type;
5764 dmdp->type = mtype;
5765 if (flag & CLONE_DEV) {
5766 dmdp->type = DDM_ALIAS;
5767 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5769 if (flag & PRIVONLY_DEV) {
5770 dmdp->ddm_flags |= DM_NO_FSPERM;
5772 if (read_priv || write_priv) {
5773 dmdp->ddm_node_priv =
5774 devpolicy_priv_by_name(read_priv, write_priv);
5776 dmdp->ddm_priv_mode = priv_mode;
5778 ddi_append_minor_node(dip, dmdp);
5781 * only log ddi_create_minor_node() calls which occur
5782 * outside the scope of attach(9e)/detach(9e) reconfigurations
5784 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5785 mtype != DDM_INTERNAL_PATH) {
5786 (void) i_log_devfs_minor_create(dip, name);
5790 * Check if any dacf rules match the creation of this minor node
5792 dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5793 return (DDI_SUCCESS);
5797 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5798 minor_t minor_num, char *node_type, int flag)
5800 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5801 node_type, flag, DDM_MINOR, NULL, NULL, 0));
5805 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5806 minor_t minor_num, char *node_type, int flag,
5807 const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5809 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5810 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5814 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5815 minor_t minor_num, char *node_type, int flag)
5817 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5818 node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5822 * Internal (non-ddi) routine for drivers to export names known
5823 * to the kernel (especially ddi_pathname_to_dev_t and friends)
5824 * but not exported externally to /dev
5827 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5828 minor_t minor_num)
5830 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5831 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5834 void
5835 ddi_remove_minor_node(dev_info_t *dip, char *name)
5837 int circ;
5838 struct ddi_minor_data *dmdp, *dmdp1;
5839 struct ddi_minor_data **dmdp_prev;
5841 ndi_devi_enter(dip, &circ);
5842 dmdp_prev = &DEVI(dip)->devi_minor;
5843 dmdp = DEVI(dip)->devi_minor;
5844 while (dmdp != NULL) {
5845 dmdp1 = dmdp->next;
5846 if ((name == NULL || (dmdp->ddm_name != NULL &&
5847 strcmp(name, dmdp->ddm_name) == 0))) {
5848 if (dmdp->ddm_name != NULL) {
5849 if (dmdp->type != DDM_INTERNAL_PATH)
5850 (void) i_log_devfs_minor_remove(dip,
5851 dmdp->ddm_name);
5852 kmem_free(dmdp->ddm_name,
5853 strlen(dmdp->ddm_name) + 1);
5856 * Release device privilege, if any.
5857 * Release dacf client data associated with this minor
5858 * node by storing NULL.
5860 if (dmdp->ddm_node_priv)
5861 dpfree(dmdp->ddm_node_priv);
5862 dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5863 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5864 *dmdp_prev = dmdp1;
5866 * OK, we found it, so get out now -- if we drive on,
5867 * we will strcmp against garbage. See 1139209.
5869 if (name != NULL)
5870 break;
5871 } else {
5872 dmdp_prev = &dmdp->next;
5874 dmdp = dmdp1;
5876 ndi_devi_exit(dip, circ);
5881 ddi_in_panic()
5883 return (panicstr != NULL);
5888 * Find first bit set in a mask (returned counting from 1 up)
5892 ddi_ffs(long mask)
5894 return (ffs(mask));
5898 * Find last bit set. Take mask and clear
5899 * all but the most significant bit, and
5900 * then let ffs do the rest of the work.
5902 * Algorithm courtesy of Steve Chessin.
5906 ddi_fls(long mask)
5908 while (mask) {
5909 long nx;
5911 if ((nx = (mask & (mask - 1))) == 0)
5912 break;
5913 mask = nx;
5915 return (ffs(mask));
5919 * The ddi_soft_state_* routines comprise generic storage management utilities
5920 * for driver soft state structures (in "the old days," this was done with
5921 * statically sized array - big systems and dynamic loading and unloading
5922 * make heap allocation more attractive).
5926 * Allocate a set of pointers to 'n_items' objects of size 'size'
5927 * bytes. Each pointer is initialized to nil.
5929 * The 'size' and 'n_items' values are stashed in the opaque
5930 * handle returned to the caller.
5932 * This implementation interprets 'set of pointers' to mean 'array
5933 * of pointers' but note that nothing in the interface definition
5934 * precludes an implementation that uses, for example, a linked list.
5935 * However there should be a small efficiency gain from using an array
5936 * at lookup time.
5938 * NOTE As an optimization, we make our growable array allocations in
5939 * powers of two (bytes), since that's how much kmem_alloc (currently)
5940 * gives us anyway. It should save us some free/realloc's ..
5942 * As a further optimization, we make the growable array start out
5943 * with MIN_N_ITEMS in it.
5946 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */
5949 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5951 i_ddi_soft_state *ss;
5953 if (state_p == NULL || size == 0)
5954 return (EINVAL);
5956 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5957 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5958 ss->size = size;
5960 if (n_items < MIN_N_ITEMS)
5961 ss->n_items = MIN_N_ITEMS;
5962 else {
5963 int bitlog;
5965 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5966 bitlog--;
5967 ss->n_items = 1 << bitlog;
5970 ASSERT(ss->n_items >= n_items);
5972 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5974 *state_p = ss;
5975 return (0);
5979 * Allocate a state structure of size 'size' to be associated
5980 * with item 'item'.
5982 * In this implementation, the array is extended to
5983 * allow the requested offset, if needed.
5986 ddi_soft_state_zalloc(void *state, int item)
5988 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
5989 void **array;
5990 void *new_element;
5992 if ((state == NULL) || (item < 0))
5993 return (DDI_FAILURE);
5995 mutex_enter(&ss->lock);
5996 if (ss->size == 0) {
5997 mutex_exit(&ss->lock);
5998 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
5999 mod_containing_pc(caller()));
6000 return (DDI_FAILURE);
6003 array = ss->array; /* NULL if ss->n_items == 0 */
6004 ASSERT(ss->n_items != 0 && array != NULL);
6007 * refuse to tread on an existing element
6009 if (item < ss->n_items && array[item] != NULL) {
6010 mutex_exit(&ss->lock);
6011 return (DDI_FAILURE);
6015 * Allocate a new element to plug in
6017 new_element = kmem_zalloc(ss->size, KM_SLEEP);
6020 * Check if the array is big enough, if not, grow it.
6022 if (item >= ss->n_items) {
6023 void **new_array;
6024 size_t new_n_items;
6025 struct i_ddi_soft_state *dirty;
6028 * Allocate a new array of the right length, copy
6029 * all the old pointers to the new array, then
6030 * if it exists at all, put the old array on the
6031 * dirty list.
6033 * Note that we can't kmem_free() the old array.
6035 * Why -- well the 'get' operation is 'mutex-free', so we
6036 * can't easily catch a suspended thread that is just about
6037 * to dereference the array we just grew out of. So we
6038 * cons up a header and put it on a list of 'dirty'
6039 * pointer arrays. (Dirty in the sense that there may
6040 * be suspended threads somewhere that are in the middle
6041 * of referencing them). Fortunately, we -can- garbage
6042 * collect it all at ddi_soft_state_fini time.
6044 new_n_items = ss->n_items;
6045 while (new_n_items < (1 + item))
6046 new_n_items <<= 1; /* double array size .. */
6048 ASSERT(new_n_items >= (1 + item)); /* sanity check! */
6050 new_array = kmem_zalloc(new_n_items * sizeof (void *),
6051 KM_SLEEP);
6053 * Copy the pointers into the new array
6055 bcopy(array, new_array, ss->n_items * sizeof (void *));
6058 * Save the old array on the dirty list
6060 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6061 dirty->array = ss->array;
6062 dirty->n_items = ss->n_items;
6063 dirty->next = ss->next;
6064 ss->next = dirty;
6066 ss->array = (array = new_array);
6067 ss->n_items = new_n_items;
6070 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6072 array[item] = new_element;
6074 mutex_exit(&ss->lock);
6075 return (DDI_SUCCESS);
6079 * Fetch a pointer to the allocated soft state structure.
6081 * This is designed to be cheap.
6083 * There's an argument that there should be more checking for
6084 * nil pointers and out of bounds on the array.. but we do a lot
6085 * of that in the alloc/free routines.
6087 * An array has the convenience that we don't need to lock read-access
6088 * to it c.f. a linked list. However our "expanding array" strategy
6089 * means that we should hold a readers lock on the i_ddi_soft_state
6090 * structure.
6092 * However, from a performance viewpoint, we need to do it without
6093 * any locks at all -- this also makes it a leaf routine. The algorithm
6094 * is 'lock-free' because we only discard the pointer arrays at
6095 * ddi_soft_state_fini() time.
6097 void *
6098 ddi_get_soft_state(void *state, int item)
6100 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6102 ASSERT((ss != NULL) && (item >= 0));
6104 if (item < ss->n_items && ss->array != NULL)
6105 return (ss->array[item]);
6106 return (NULL);
6110 * Free the state structure corresponding to 'item.' Freeing an
6111 * element that has either gone or was never allocated is not
6112 * considered an error. Note that we free the state structure, but
6113 * we don't shrink our pointer array, or discard 'dirty' arrays,
6114 * since even a few pointers don't really waste too much memory.
6116 * Passing an item number that is out of bounds, or a null pointer will
6117 * provoke an error message.
6119 void
6120 ddi_soft_state_free(void *state, int item)
6122 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6123 void **array;
6124 void *element;
6125 static char msg[] = "ddi_soft_state_free:";
6127 if (ss == NULL) {
6128 cmn_err(CE_WARN, "%s null handle: %s",
6129 msg, mod_containing_pc(caller()));
6130 return;
6133 element = NULL;
6135 mutex_enter(&ss->lock);
6137 if ((array = ss->array) == NULL || ss->size == 0) {
6138 cmn_err(CE_WARN, "%s bad handle: %s",
6139 msg, mod_containing_pc(caller()));
6140 } else if (item < 0 || item >= ss->n_items) {
6141 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6142 msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6143 } else if (array[item] != NULL) {
6144 element = array[item];
6145 array[item] = NULL;
6148 mutex_exit(&ss->lock);
6150 if (element)
6151 kmem_free(element, ss->size);
6155 * Free the entire set of pointers, and any
6156 * soft state structures contained therein.
6158 * Note that we don't grab the ss->lock mutex, even though
6159 * we're inspecting the various fields of the data structure.
6161 * There is an implicit assumption that this routine will
6162 * never run concurrently with any of the above on this
6163 * particular state structure i.e. by the time the driver
6164 * calls this routine, there should be no other threads
6165 * running in the driver.
6167 void
6168 ddi_soft_state_fini(void **state_p)
6170 i_ddi_soft_state *ss, *dirty;
6171 int item;
6172 static char msg[] = "ddi_soft_state_fini:";
6174 if (state_p == NULL ||
6175 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6176 cmn_err(CE_WARN, "%s null handle: %s",
6177 msg, mod_containing_pc(caller()));
6178 return;
6181 if (ss->size == 0) {
6182 cmn_err(CE_WARN, "%s bad handle: %s",
6183 msg, mod_containing_pc(caller()));
6184 return;
6187 if (ss->n_items > 0) {
6188 for (item = 0; item < ss->n_items; item++)
6189 ddi_soft_state_free(ss, item);
6190 kmem_free(ss->array, ss->n_items * sizeof (void *));
6194 * Now delete any dirty arrays from previous 'grow' operations
6196 for (dirty = ss->next; dirty; dirty = ss->next) {
6197 ss->next = dirty->next;
6198 kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6199 kmem_free(dirty, sizeof (*dirty));
6202 mutex_destroy(&ss->lock);
6203 kmem_free(ss, sizeof (*ss));
6205 *state_p = NULL;
6208 #define SS_N_ITEMS_PER_HASH 16
6209 #define SS_MIN_HASH_SZ 16
6210 #define SS_MAX_HASH_SZ 4096
6213 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6214 int n_items)
6216 i_ddi_soft_state_bystr *sss;
6217 int hash_sz;
6219 ASSERT(state_p && size && n_items);
6220 if ((state_p == NULL) || (size == 0) || (n_items == 0))
6221 return (EINVAL);
6223 /* current implementation is based on hash, convert n_items to hash */
6224 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6225 if (hash_sz < SS_MIN_HASH_SZ)
6226 hash_sz = SS_MIN_HASH_SZ;
6227 else if (hash_sz > SS_MAX_HASH_SZ)
6228 hash_sz = SS_MAX_HASH_SZ;
6230 /* allocate soft_state pool */
6231 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6232 sss->ss_size = size;
6233 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6234 hash_sz, mod_hash_null_valdtor);
6235 *state_p = (ddi_soft_state_bystr *)sss;
6236 return (0);
6240 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6242 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6243 void *sso;
6244 char *dup_str;
6246 ASSERT(sss && str && sss->ss_mod_hash);
6247 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6248 return (DDI_FAILURE);
6249 sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6250 dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6251 if (mod_hash_insert(sss->ss_mod_hash,
6252 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6253 return (DDI_SUCCESS);
6256 * The only error from an strhash insert is caused by a duplicate key.
6257 * We refuse to tread on an existing elements, so free and fail.
6259 kmem_free(dup_str, strlen(dup_str) + 1);
6260 kmem_free(sso, sss->ss_size);
6261 return (DDI_FAILURE);
6264 void *
6265 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6267 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6268 void *sso;
6270 ASSERT(sss && str && sss->ss_mod_hash);
6271 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6272 return (NULL);
6274 if (mod_hash_find(sss->ss_mod_hash,
6275 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6276 return (sso);
6277 return (NULL);
6280 void
6281 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6283 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6284 void *sso;
6286 ASSERT(sss && str && sss->ss_mod_hash);
6287 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6288 return;
6290 (void) mod_hash_remove(sss->ss_mod_hash,
6291 (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6292 kmem_free(sso, sss->ss_size);
6295 void
6296 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6298 i_ddi_soft_state_bystr *sss;
6300 ASSERT(state_p);
6301 if (state_p == NULL)
6302 return;
6304 sss = (i_ddi_soft_state_bystr *)(*state_p);
6305 if (sss == NULL)
6306 return;
6308 ASSERT(sss->ss_mod_hash);
6309 if (sss->ss_mod_hash) {
6310 mod_hash_destroy_strhash(sss->ss_mod_hash);
6311 sss->ss_mod_hash = NULL;
6314 kmem_free(sss, sizeof (*sss));
6315 *state_p = NULL;
6319 * The ddi_strid_* routines provide string-to-index management utilities.
6321 /* allocate and initialize an strid set */
6323 ddi_strid_init(ddi_strid **strid_p, int n_items)
6325 i_ddi_strid *ss;
6326 int hash_sz;
6328 if (strid_p == NULL)
6329 return (DDI_FAILURE);
6331 /* current implementation is based on hash, convert n_items to hash */
6332 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6333 if (hash_sz < SS_MIN_HASH_SZ)
6334 hash_sz = SS_MIN_HASH_SZ;
6335 else if (hash_sz > SS_MAX_HASH_SZ)
6336 hash_sz = SS_MAX_HASH_SZ;
6338 ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6339 ss->strid_chunksz = n_items;
6340 ss->strid_spacesz = n_items;
6341 ss->strid_space = id_space_create("strid", 1, n_items);
6342 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6343 mod_hash_null_valdtor);
6344 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6345 mod_hash_null_valdtor);
6346 *strid_p = (ddi_strid *)ss;
6347 return (DDI_SUCCESS);
6350 /* allocate an id mapping within the specified set for str, return id */
6351 static id_t
6352 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6354 i_ddi_strid *ss = (i_ddi_strid *)strid;
6355 id_t id;
6356 char *s;
6358 ASSERT(ss && str);
6359 if ((ss == NULL) || (str == NULL))
6360 return (0);
6363 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6364 * range as compressed as possible. This is important to minimize
6365 * the amount of space used when the id is used as a ddi_soft_state
6366 * index by the caller.
6368 * If the id list is exhausted, increase the size of the list
6369 * by the chuck size specified in ddi_strid_init and reattempt
6370 * the allocation
6372 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6373 id_space_extend(ss->strid_space, ss->strid_spacesz,
6374 ss->strid_spacesz + ss->strid_chunksz);
6375 ss->strid_spacesz += ss->strid_chunksz;
6376 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6377 return (0);
6381 * NOTE: since we create and destroy in unison we can save space by
6382 * using bystr key as the byid value. This means destroy must occur
6383 * in (byid, bystr) order.
6385 s = i_ddi_strdup(str, KM_SLEEP);
6386 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6387 (mod_hash_val_t)(intptr_t)id) != 0) {
6388 ddi_strid_free(strid, id);
6389 return (0);
6391 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6392 (mod_hash_val_t)s) != 0) {
6393 ddi_strid_free(strid, id);
6394 return (0);
6397 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6398 return (id);
6401 /* allocate an id mapping within the specified set for str, return id */
6402 id_t
6403 ddi_strid_alloc(ddi_strid *strid, char *str)
6405 return (i_ddi_strid_alloc(strid, str));
6408 /* return the id within the specified strid given the str */
6409 id_t
6410 ddi_strid_str2id(ddi_strid *strid, char *str)
6412 i_ddi_strid *ss = (i_ddi_strid *)strid;
6413 id_t id = 0;
6414 mod_hash_val_t hv;
6416 ASSERT(ss && str);
6417 if (ss && str && (mod_hash_find(ss->strid_bystr,
6418 (mod_hash_key_t)str, &hv) == 0))
6419 id = (int)(intptr_t)hv;
6420 return (id);
6423 /* return str within the specified strid given the id */
6424 char *
6425 ddi_strid_id2str(ddi_strid *strid, id_t id)
6427 i_ddi_strid *ss = (i_ddi_strid *)strid;
6428 char *str = NULL;
6429 mod_hash_val_t hv;
6431 ASSERT(ss && id > 0);
6432 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6433 (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6434 str = (char *)hv;
6435 return (str);
6438 /* free the id mapping within the specified strid */
6439 void
6440 ddi_strid_free(ddi_strid *strid, id_t id)
6442 i_ddi_strid *ss = (i_ddi_strid *)strid;
6443 char *str;
6445 ASSERT(ss && id > 0);
6446 if ((ss == NULL) || (id <= 0))
6447 return;
6449 /* bystr key is byid value: destroy order must be (byid, bystr) */
6450 str = ddi_strid_id2str(strid, id);
6451 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6452 id_free(ss->strid_space, id);
6454 if (str)
6455 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6458 /* destroy the strid set */
6459 void
6460 ddi_strid_fini(ddi_strid **strid_p)
6462 i_ddi_strid *ss;
6464 ASSERT(strid_p);
6465 if (strid_p == NULL)
6466 return;
6468 ss = (i_ddi_strid *)(*strid_p);
6469 if (ss == NULL)
6470 return;
6472 /* bystr key is byid value: destroy order must be (byid, bystr) */
6473 if (ss->strid_byid)
6474 mod_hash_destroy_hash(ss->strid_byid);
6475 if (ss->strid_byid)
6476 mod_hash_destroy_hash(ss->strid_bystr);
6477 if (ss->strid_space)
6478 id_space_destroy(ss->strid_space);
6479 kmem_free(ss, sizeof (*ss));
6480 *strid_p = NULL;
6484 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6485 * Storage is double buffered to prevent updates during devi_addr use -
6486 * double buffering is adaquate for reliable ddi_deviname() consumption.
6487 * The double buffer is not freed until dev_info structure destruction
6488 * (by i_ddi_free_node).
6490 void
6491 ddi_set_name_addr(dev_info_t *dip, char *name)
6493 char *buf = DEVI(dip)->devi_addr_buf;
6494 char *newaddr;
6496 if (buf == NULL) {
6497 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6498 DEVI(dip)->devi_addr_buf = buf;
6501 if (name) {
6502 ASSERT(strlen(name) < MAXNAMELEN);
6503 newaddr = (DEVI(dip)->devi_addr == buf) ?
6504 (buf + MAXNAMELEN) : buf;
6505 (void) strlcpy(newaddr, name, MAXNAMELEN);
6506 } else
6507 newaddr = NULL;
6509 DEVI(dip)->devi_addr = newaddr;
6512 char *
6513 ddi_get_name_addr(dev_info_t *dip)
6515 return (DEVI(dip)->devi_addr);
6518 void
6519 ddi_set_parent_data(dev_info_t *dip, void *pd)
6521 DEVI(dip)->devi_parent_data = pd;
6524 void *
6525 ddi_get_parent_data(dev_info_t *dip)
6527 return (DEVI(dip)->devi_parent_data);
6531 * ddi_name_to_major: returns the major number of a named module,
6532 * derived from the current driver alias binding.
6534 * Caveat: drivers should avoid the use of this function, in particular
6535 * together with ddi_get_name/ddi_binding name, as per
6536 * major = ddi_name_to_major(ddi_get_name(devi));
6537 * ddi_name_to_major() relies on the state of the device/alias binding,
6538 * which can and does change dynamically as aliases are administered
6539 * over time. An attached device instance cannot rely on the major
6540 * number returned by ddi_name_to_major() to match its own major number.
6542 * For driver use, ddi_driver_major() reliably returns the major number
6543 * for the module to which the device was bound at attach time over
6544 * the life of the instance.
6545 * major = ddi_driver_major(dev_info_t *)
6547 major_t
6548 ddi_name_to_major(char *name)
6550 return (mod_name_to_major(name));
6554 * ddi_major_to_name: Returns the module name bound to a major number.
6556 char *
6557 ddi_major_to_name(major_t major)
6559 return (mod_major_to_name(major));
6563 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6564 * pointed at by 'name.' A devinfo node is named as a result of calling
6565 * ddi_initchild().
6567 * Note: the driver must be held before calling this function!
6569 char *
6570 ddi_deviname(dev_info_t *dip, char *name)
6572 char *addrname;
6573 char none = '\0';
6575 if (dip == ddi_root_node()) {
6576 *name = '\0';
6577 return (name);
6580 if (i_ddi_node_state(dip) < DS_BOUND) {
6581 addrname = &none;
6582 } else {
6584 * Use ddi_get_name_addr() without checking state so we get
6585 * a unit-address if we are called after ddi_set_name_addr()
6586 * by nexus DDI_CTL_INITCHILD code, but before completing
6587 * node promotion to DS_INITIALIZED. We currently have
6588 * two situations where we are called in this state:
6589 * o For framework processing of a path-oriented alias.
6590 * o If a SCSA nexus driver calls ddi_devid_register()
6591 * from it's tran_tgt_init(9E) implementation.
6593 addrname = ddi_get_name_addr(dip);
6594 if (addrname == NULL)
6595 addrname = &none;
6598 if (*addrname == '\0') {
6599 (void) sprintf(name, "/%s", ddi_node_name(dip));
6600 } else {
6601 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6604 return (name);
6608 * Spits out the name of device node, typically name@addr, for a given node,
6609 * using the driver name, not the nodename.
6611 * Used by match_parent. Not to be used elsewhere.
6613 char *
6614 i_ddi_parname(dev_info_t *dip, char *name)
6616 char *addrname;
6618 if (dip == ddi_root_node()) {
6619 *name = '\0';
6620 return (name);
6623 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6625 if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6626 (void) sprintf(name, "%s", ddi_binding_name(dip));
6627 else
6628 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6629 return (name);
6632 static char *
6633 pathname_work(dev_info_t *dip, char *path)
6635 char *bp;
6637 if (dip == ddi_root_node()) {
6638 *path = '\0';
6639 return (path);
6641 (void) pathname_work(ddi_get_parent(dip), path);
6642 bp = path + strlen(path);
6643 (void) ddi_deviname(dip, bp);
6644 return (path);
6647 char *
6648 ddi_pathname(dev_info_t *dip, char *path)
6650 return (pathname_work(dip, path));
6653 char *
6654 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6656 if (dmdp->dip == NULL)
6657 *path = '\0';
6658 else {
6659 (void) ddi_pathname(dmdp->dip, path);
6660 if (dmdp->ddm_name) {
6661 (void) strcat(path, ":");
6662 (void) strcat(path, dmdp->ddm_name);
6665 return (path);
6668 static char *
6669 pathname_work_obp(dev_info_t *dip, char *path)
6671 char *bp;
6672 char *obp_path;
6675 * look up the "obp-path" property, return the path if it exists
6677 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6678 "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6679 (void) strcpy(path, obp_path);
6680 ddi_prop_free(obp_path);
6681 return (path);
6685 * stop at root, no obp path
6687 if (dip == ddi_root_node()) {
6688 return (NULL);
6691 obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6692 if (obp_path == NULL)
6693 return (NULL);
6696 * append our component to parent's obp path
6698 bp = path + strlen(path);
6699 if (*(bp - 1) != '/')
6700 (void) strcat(bp++, "/");
6701 (void) ddi_deviname(dip, bp);
6702 return (path);
6706 * return the 'obp-path' based path for the given node, or NULL if the node
6707 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6708 * function can't be called from interrupt context (since we need to
6709 * lookup a string property).
6711 char *
6712 ddi_pathname_obp(dev_info_t *dip, char *path)
6714 ASSERT(!servicing_interrupt());
6715 if (dip == NULL || path == NULL)
6716 return (NULL);
6718 /* split work into a separate function to aid debugging */
6719 return (pathname_work_obp(dip, path));
6723 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6725 dev_info_t *pdip;
6726 char *obp_path = NULL;
6727 int rc = DDI_FAILURE;
6729 if (dip == NULL)
6730 return (DDI_FAILURE);
6732 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6734 pdip = ddi_get_parent(dip);
6736 if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6737 (void) ddi_pathname(pdip, obp_path);
6740 if (component) {
6741 (void) strncat(obp_path, "/", MAXPATHLEN);
6742 (void) strncat(obp_path, component, MAXPATHLEN);
6744 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6745 obp_path);
6747 if (obp_path)
6748 kmem_free(obp_path, MAXPATHLEN);
6750 return (rc);
6754 * Given a dev_t, return the pathname of the corresponding device in the
6755 * buffer pointed at by "path." The buffer is assumed to be large enough
6756 * to hold the pathname of the device (MAXPATHLEN).
6758 * The pathname of a device is the pathname of the devinfo node to which
6759 * the device "belongs," concatenated with the character ':' and the name
6760 * of the minor node corresponding to the dev_t. If spec_type is 0 then
6761 * just the pathname of the devinfo node is returned without driving attach
6762 * of that node. For a non-zero spec_type, an attach is performed and a
6763 * search of the minor list occurs.
6765 * It is possible that the path associated with the dev_t is not
6766 * currently available in the devinfo tree. In order to have a
6767 * dev_t, a device must have been discovered before, which means
6768 * that the path is always in the instance tree. The one exception
6769 * to this is if the dev_t is associated with a pseudo driver, in
6770 * which case the device must exist on the pseudo branch of the
6771 * devinfo tree as a result of parsing .conf files.
6774 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6776 int circ;
6777 major_t major = getmajor(devt);
6778 int instance;
6779 dev_info_t *dip;
6780 char *minorname;
6781 char *drvname;
6783 if (major >= devcnt)
6784 goto fail;
6785 if (major == clone_major) {
6786 /* clone has no minor nodes, manufacture the path here */
6787 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6788 goto fail;
6790 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6791 return (DDI_SUCCESS);
6794 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6795 if ((instance = dev_to_instance(devt)) == -1)
6796 goto fail;
6798 /* reconstruct the path given the major/instance */
6799 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6800 goto fail;
6802 /* if spec_type given we must drive attach and search minor nodes */
6803 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6804 /* attach the path so we can search minors */
6805 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6806 goto fail;
6808 /* Add minorname to path. */
6809 ndi_devi_enter(dip, &circ);
6810 minorname = i_ddi_devtspectype_to_minorname(dip,
6811 devt, spec_type);
6812 if (minorname) {
6813 (void) strcat(path, ":");
6814 (void) strcat(path, minorname);
6816 ndi_devi_exit(dip, circ);
6817 ddi_release_devi(dip);
6818 if (minorname == NULL)
6819 goto fail;
6821 ASSERT(strlen(path) < MAXPATHLEN);
6822 return (DDI_SUCCESS);
6824 fail: *path = 0;
6825 return (DDI_FAILURE);
6829 * Given a major number and an instance, return the path.
6830 * This interface does NOT drive attach.
6833 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6835 struct devnames *dnp;
6836 dev_info_t *dip;
6838 if ((major >= devcnt) || (instance == -1)) {
6839 *path = 0;
6840 return (DDI_FAILURE);
6843 /* look for the major/instance in the instance tree */
6844 if (e_ddi_instance_majorinstance_to_path(major, instance,
6845 path) == DDI_SUCCESS) {
6846 ASSERT(strlen(path) < MAXPATHLEN);
6847 return (DDI_SUCCESS);
6851 * Not in instance tree, find the instance on the per driver list and
6852 * construct path to instance via ddi_pathname(). This is how paths
6853 * down the 'pseudo' branch are constructed.
6855 dnp = &(devnamesp[major]);
6856 LOCK_DEV_OPS(&(dnp->dn_lock));
6857 for (dip = dnp->dn_head; dip;
6858 dip = (dev_info_t *)DEVI(dip)->devi_next) {
6859 /* Skip if instance does not match. */
6860 if (DEVI(dip)->devi_instance != instance)
6861 continue;
6864 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6865 * node demotion, so it is not an effective way of ensuring
6866 * that the ddi_pathname result has a unit-address. Instead,
6867 * we reverify the node state after calling ddi_pathname().
6869 if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6870 (void) ddi_pathname(dip, path);
6871 if (i_ddi_node_state(dip) < DS_INITIALIZED)
6872 continue;
6873 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6874 ASSERT(strlen(path) < MAXPATHLEN);
6875 return (DDI_SUCCESS);
6878 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6880 /* can't reconstruct the path */
6881 *path = 0;
6882 return (DDI_FAILURE);
6885 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6888 * Given the dip for a network interface return the ppa for that interface.
6890 * In all cases except GLD v0 drivers, the ppa == instance.
6891 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6892 * So for these drivers when the attach routine calls gld_register(),
6893 * the GLD framework creates an integer property called "gld_driver_ppa"
6894 * that can be queried here.
6896 * The only time this function is used is when a system is booting over nfs.
6897 * In this case the system has to resolve the pathname of the boot device
6898 * to it's ppa.
6901 i_ddi_devi_get_ppa(dev_info_t *dip)
6903 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6904 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6905 GLD_DRIVER_PPA, ddi_get_instance(dip)));
6909 * i_ddi_devi_set_ppa() should only be called from gld_register()
6910 * and only for GLD v0 drivers
6912 void
6913 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6915 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6920 * Private DDI Console bell functions.
6922 void
6923 ddi_ring_console_bell(clock_t duration)
6925 if (ddi_console_bell_func != NULL)
6926 (*ddi_console_bell_func)(duration);
6929 void
6930 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6932 ddi_console_bell_func = bellfunc;
6936 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6937 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6939 int (*funcp)() = ddi_dma_allochdl;
6940 ddi_dma_attr_t dma_attr;
6941 struct bus_ops *bop;
6943 if (attr == (ddi_dma_attr_t *)0)
6944 return (DDI_DMA_BADATTR);
6946 dma_attr = *attr;
6948 bop = DEVI(dip)->devi_ops->devo_bus_ops;
6949 if (bop && bop->bus_dma_allochdl)
6950 funcp = bop->bus_dma_allochdl;
6952 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6955 void
6956 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6958 ddi_dma_handle_t h = *handlep;
6959 (void) ddi_dma_freehdl(HD, HD, h);
6962 static uintptr_t dma_mem_list_id = 0;
6966 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6967 ddi_device_acc_attr_t *accattrp, uint_t flags,
6968 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6969 size_t *real_length, ddi_acc_handle_t *handlep)
6971 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6972 dev_info_t *dip = hp->dmai_rdip;
6973 ddi_acc_hdl_t *ap;
6974 ddi_dma_attr_t *attrp = &hp->dmai_attr;
6975 uint_t sleepflag, xfermodes;
6976 int (*fp)(caddr_t);
6977 int rval;
6979 if (waitfp == DDI_DMA_SLEEP)
6980 fp = (int (*)())KM_SLEEP;
6981 else if (waitfp == DDI_DMA_DONTWAIT)
6982 fp = (int (*)())KM_NOSLEEP;
6983 else
6984 fp = waitfp;
6985 *handlep = impl_acc_hdl_alloc(fp, arg);
6986 if (*handlep == NULL)
6987 return (DDI_FAILURE);
6989 /* check if the cache attributes are supported */
6990 if (i_ddi_check_cache_attr(flags) == B_FALSE)
6991 return (DDI_FAILURE);
6994 * Transfer the meaningful bits to xfermodes.
6995 * Double-check if the 3rd party driver correctly sets the bits.
6996 * If not, set DDI_DMA_STREAMING to keep compatibility.
6998 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6999 if (xfermodes == 0) {
7000 xfermodes = DDI_DMA_STREAMING;
7004 * initialize the common elements of data access handle
7006 ap = impl_acc_hdl_get(*handlep);
7007 ap->ah_vers = VERS_ACCHDL;
7008 ap->ah_dip = dip;
7009 ap->ah_offset = 0;
7010 ap->ah_len = 0;
7011 ap->ah_xfermodes = flags;
7012 ap->ah_acc = *accattrp;
7014 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7015 if (xfermodes == DDI_DMA_CONSISTENT) {
7016 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7017 flags, accattrp, kaddrp, NULL, ap);
7018 *real_length = length;
7019 } else {
7020 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7021 flags, accattrp, kaddrp, real_length, ap);
7023 if (rval == DDI_SUCCESS) {
7024 ap->ah_len = (off_t)(*real_length);
7025 ap->ah_addr = *kaddrp;
7026 } else {
7027 impl_acc_hdl_free(*handlep);
7028 *handlep = (ddi_acc_handle_t)NULL;
7029 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7030 ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7032 rval = DDI_FAILURE;
7034 return (rval);
7037 void
7038 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7040 ddi_acc_hdl_t *ap;
7042 ap = impl_acc_hdl_get(*handlep);
7043 ASSERT(ap);
7045 i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7048 * free the handle
7050 impl_acc_hdl_free(*handlep);
7051 *handlep = (ddi_acc_handle_t)NULL;
7053 if (dma_mem_list_id != 0) {
7054 ddi_run_callback(&dma_mem_list_id);
7059 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7060 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7061 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7063 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7064 dev_info_t *dip, *rdip;
7065 struct ddi_dma_req dmareq;
7066 int (*funcp)();
7068 dmareq.dmar_flags = flags;
7069 dmareq.dmar_fp = waitfp;
7070 dmareq.dmar_arg = arg;
7071 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7073 if (bp->b_flags & B_PAGEIO) {
7074 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7075 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7076 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7077 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7078 } else {
7079 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7080 if (bp->b_flags & B_SHADOW) {
7081 dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7082 bp->b_shadow;
7083 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7084 } else {
7085 dmareq.dmar_object.dmao_type =
7086 (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7087 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7088 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7092 * If the buffer has no proc pointer, or the proc
7093 * struct has the kernel address space, or the buffer has
7094 * been marked B_REMAPPED (meaning that it is now
7095 * mapped into the kernel's address space), then
7096 * the address space is kas (kernel address space).
7098 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7099 (bp->b_flags & B_REMAPPED)) {
7100 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7101 } else {
7102 dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7103 bp->b_proc->p_as;
7107 dip = rdip = hp->dmai_rdip;
7108 if (dip != ddi_root_node())
7109 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7110 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7111 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7115 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7116 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7117 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7119 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7120 dev_info_t *dip, *rdip;
7121 struct ddi_dma_req dmareq;
7122 int (*funcp)();
7124 if (len == 0) {
7125 return (DDI_DMA_NOMAPPING);
7127 dmareq.dmar_flags = flags;
7128 dmareq.dmar_fp = waitfp;
7129 dmareq.dmar_arg = arg;
7130 dmareq.dmar_object.dmao_size = len;
7131 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7132 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7133 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7134 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7136 dip = rdip = hp->dmai_rdip;
7137 if (dip != ddi_root_node())
7138 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7139 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7140 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7143 void
7144 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7146 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7147 ddi_dma_cookie_t *cp;
7149 cp = hp->dmai_cookie;
7150 ASSERT(cp);
7152 cookiep->dmac_notused = cp->dmac_notused;
7153 cookiep->dmac_type = cp->dmac_type;
7154 cookiep->dmac_address = cp->dmac_address;
7155 cookiep->dmac_size = cp->dmac_size;
7156 hp->dmai_cookie++;
7160 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7162 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7163 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7164 return (DDI_FAILURE);
7165 } else {
7166 *nwinp = hp->dmai_nwin;
7167 return (DDI_SUCCESS);
7172 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7173 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7175 int (*funcp)() = ddi_dma_win;
7176 struct bus_ops *bop;
7178 bop = DEVI(HD)->devi_ops->devo_bus_ops;
7179 if (bop && bop->bus_dma_win)
7180 funcp = bop->bus_dma_win;
7182 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7186 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7188 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7189 &burstsizes, 0, 0));
7193 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7195 return (hp->dmai_fault);
7199 ddi_check_dma_handle(ddi_dma_handle_t handle)
7201 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7202 int (*check)(ddi_dma_impl_t *);
7204 if ((check = hp->dmai_fault_check) == NULL)
7205 check = i_ddi_dma_fault_check;
7207 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7210 void
7211 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7213 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7214 void (*notify)(ddi_dma_impl_t *);
7216 if (!hp->dmai_fault) {
7217 hp->dmai_fault = 1;
7218 if ((notify = hp->dmai_fault_notify) != NULL)
7219 (*notify)(hp);
7223 void
7224 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7226 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7227 void (*notify)(ddi_dma_impl_t *);
7229 if (hp->dmai_fault) {
7230 hp->dmai_fault = 0;
7231 if ((notify = hp->dmai_fault_notify) != NULL)
7232 (*notify)(hp);
7237 * register mapping routines.
7240 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7241 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7242 ddi_acc_handle_t *handle)
7244 ddi_map_req_t mr;
7245 ddi_acc_hdl_t *hp;
7246 int result;
7249 * Allocate and initialize the common elements of data access handle.
7251 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7252 hp = impl_acc_hdl_get(*handle);
7253 hp->ah_vers = VERS_ACCHDL;
7254 hp->ah_dip = dip;
7255 hp->ah_rnumber = rnumber;
7256 hp->ah_offset = offset;
7257 hp->ah_len = len;
7258 hp->ah_acc = *accattrp;
7261 * Set up the mapping request and call to parent.
7263 mr.map_op = DDI_MO_MAP_LOCKED;
7264 mr.map_type = DDI_MT_RNUMBER;
7265 mr.map_obj.rnumber = rnumber;
7266 mr.map_prot = PROT_READ | PROT_WRITE;
7267 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7268 mr.map_handlep = hp;
7269 mr.map_vers = DDI_MAP_VERSION;
7270 result = ddi_map(dip, &mr, offset, len, addrp);
7273 * check for end result
7275 if (result != DDI_SUCCESS) {
7276 impl_acc_hdl_free(*handle);
7277 *handle = (ddi_acc_handle_t)NULL;
7278 } else {
7279 hp->ah_addr = *addrp;
7282 return (result);
7285 void
7286 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7288 ddi_map_req_t mr;
7289 ddi_acc_hdl_t *hp;
7291 hp = impl_acc_hdl_get(*handlep);
7292 ASSERT(hp);
7294 mr.map_op = DDI_MO_UNMAP;
7295 mr.map_type = DDI_MT_RNUMBER;
7296 mr.map_obj.rnumber = hp->ah_rnumber;
7297 mr.map_prot = PROT_READ | PROT_WRITE;
7298 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7299 mr.map_handlep = hp;
7300 mr.map_vers = DDI_MAP_VERSION;
7303 * Call my parent to unmap my regs.
7305 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7306 hp->ah_len, &hp->ah_addr);
7308 * free the handle
7310 impl_acc_hdl_free(*handlep);
7311 *handlep = (ddi_acc_handle_t)NULL;
7315 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7316 ssize_t dev_advcnt, uint_t dev_datasz)
7318 uint8_t *b;
7319 uint16_t *w;
7320 uint32_t *l;
7321 uint64_t *ll;
7323 /* check for total byte count is multiple of data transfer size */
7324 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7325 return (DDI_FAILURE);
7327 switch (dev_datasz) {
7328 case DDI_DATA_SZ01_ACC:
7329 for (b = (uint8_t *)dev_addr;
7330 bytecount != 0; bytecount -= 1, b += dev_advcnt)
7331 ddi_put8(handle, b, 0);
7332 break;
7333 case DDI_DATA_SZ02_ACC:
7334 for (w = (uint16_t *)dev_addr;
7335 bytecount != 0; bytecount -= 2, w += dev_advcnt)
7336 ddi_put16(handle, w, 0);
7337 break;
7338 case DDI_DATA_SZ04_ACC:
7339 for (l = (uint32_t *)dev_addr;
7340 bytecount != 0; bytecount -= 4, l += dev_advcnt)
7341 ddi_put32(handle, l, 0);
7342 break;
7343 case DDI_DATA_SZ08_ACC:
7344 for (ll = (uint64_t *)dev_addr;
7345 bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7346 ddi_put64(handle, ll, 0x0ll);
7347 break;
7348 default:
7349 return (DDI_FAILURE);
7351 return (DDI_SUCCESS);
7355 ddi_device_copy(
7356 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7357 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7358 size_t bytecount, uint_t dev_datasz)
7360 uint8_t *b_src, *b_dst;
7361 uint16_t *w_src, *w_dst;
7362 uint32_t *l_src, *l_dst;
7363 uint64_t *ll_src, *ll_dst;
7365 /* check for total byte count is multiple of data transfer size */
7366 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7367 return (DDI_FAILURE);
7369 switch (dev_datasz) {
7370 case DDI_DATA_SZ01_ACC:
7371 b_src = (uint8_t *)src_addr;
7372 b_dst = (uint8_t *)dest_addr;
7374 for (; bytecount != 0; bytecount -= 1) {
7375 ddi_put8(dest_handle, b_dst,
7376 ddi_get8(src_handle, b_src));
7377 b_dst += dest_advcnt;
7378 b_src += src_advcnt;
7380 break;
7381 case DDI_DATA_SZ02_ACC:
7382 w_src = (uint16_t *)src_addr;
7383 w_dst = (uint16_t *)dest_addr;
7385 for (; bytecount != 0; bytecount -= 2) {
7386 ddi_put16(dest_handle, w_dst,
7387 ddi_get16(src_handle, w_src));
7388 w_dst += dest_advcnt;
7389 w_src += src_advcnt;
7391 break;
7392 case DDI_DATA_SZ04_ACC:
7393 l_src = (uint32_t *)src_addr;
7394 l_dst = (uint32_t *)dest_addr;
7396 for (; bytecount != 0; bytecount -= 4) {
7397 ddi_put32(dest_handle, l_dst,
7398 ddi_get32(src_handle, l_src));
7399 l_dst += dest_advcnt;
7400 l_src += src_advcnt;
7402 break;
7403 case DDI_DATA_SZ08_ACC:
7404 ll_src = (uint64_t *)src_addr;
7405 ll_dst = (uint64_t *)dest_addr;
7407 for (; bytecount != 0; bytecount -= 8) {
7408 ddi_put64(dest_handle, ll_dst,
7409 ddi_get64(src_handle, ll_src));
7410 ll_dst += dest_advcnt;
7411 ll_src += src_advcnt;
7413 break;
7414 default:
7415 return (DDI_FAILURE);
7417 return (DDI_SUCCESS);
7420 #define swap16(value) \
7421 ((((value) & 0xff) << 8) | ((value) >> 8))
7423 #define swap32(value) \
7424 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7425 (uint32_t)swap16((uint16_t)((value) >> 16)))
7427 #define swap64(value) \
7428 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7429 << 32) | \
7430 (uint64_t)swap32((uint32_t)((value) >> 32)))
7432 uint16_t
7433 ddi_swap16(uint16_t value)
7435 return (swap16(value));
7438 uint32_t
7439 ddi_swap32(uint32_t value)
7441 return (swap32(value));
7444 uint64_t
7445 ddi_swap64(uint64_t value)
7447 return (swap64(value));
7451 * Convert a binding name to a driver name.
7452 * A binding name is the name used to determine the driver for a
7453 * device - it may be either an alias for the driver or the name
7454 * of the driver itself.
7456 char *
7457 i_binding_to_drv_name(char *bname)
7459 major_t major_no;
7461 ASSERT(bname != NULL);
7463 if ((major_no = ddi_name_to_major(bname)) == -1)
7464 return (NULL);
7465 return (ddi_major_to_name(major_no));
7469 * Search for minor name that has specified dev_t and spec_type.
7470 * If spec_type is zero then any dev_t match works. Since we
7471 * are returning a pointer to the minor name string, we require the
7472 * caller to do the locking.
7474 char *
7475 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7477 struct ddi_minor_data *dmdp;
7480 * The did layered driver currently intentionally returns a
7481 * devinfo ptr for an underlying sd instance based on a did
7482 * dev_t. In this case it is not an error.
7484 * The did layered driver is associated with Sun Cluster.
7486 ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7487 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7489 ASSERT(DEVI_BUSY_OWNED(dip));
7490 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7491 if (((dmdp->type == DDM_MINOR) ||
7492 (dmdp->type == DDM_INTERNAL_PATH) ||
7493 (dmdp->type == DDM_DEFAULT)) &&
7494 (dmdp->ddm_dev == dev) &&
7495 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7496 (dmdp->ddm_spec_type == spec_type)))
7497 return (dmdp->ddm_name);
7500 return (NULL);
7504 * Find the devt and spectype of the specified minor_name.
7505 * Return DDI_FAILURE if minor_name not found. Since we are
7506 * returning everything via arguments we can do the locking.
7509 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7510 dev_t *devtp, int *spectypep)
7512 int circ;
7513 struct ddi_minor_data *dmdp;
7515 /* deal with clone minor nodes */
7516 if (dip == clone_dip) {
7517 major_t major;
7519 * Make sure minor_name is a STREAMS driver.
7520 * We load the driver but don't attach to any instances.
7523 major = ddi_name_to_major(minor_name);
7524 if (major == DDI_MAJOR_T_NONE)
7525 return (DDI_FAILURE);
7527 if (ddi_hold_driver(major) == NULL)
7528 return (DDI_FAILURE);
7530 if (STREAMSTAB(major) == NULL) {
7531 ddi_rele_driver(major);
7532 return (DDI_FAILURE);
7534 ddi_rele_driver(major);
7536 if (devtp)
7537 *devtp = makedevice(clone_major, (minor_t)major);
7539 if (spectypep)
7540 *spectypep = S_IFCHR;
7542 return (DDI_SUCCESS);
7545 ndi_devi_enter(dip, &circ);
7546 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7547 if (((dmdp->type != DDM_MINOR) &&
7548 (dmdp->type != DDM_INTERNAL_PATH) &&
7549 (dmdp->type != DDM_DEFAULT)) ||
7550 strcmp(minor_name, dmdp->ddm_name))
7551 continue;
7553 if (devtp)
7554 *devtp = dmdp->ddm_dev;
7556 if (spectypep)
7557 *spectypep = dmdp->ddm_spec_type;
7559 ndi_devi_exit(dip, circ);
7560 return (DDI_SUCCESS);
7562 ndi_devi_exit(dip, circ);
7564 return (DDI_FAILURE);
7567 static kmutex_t devid_gen_mutex;
7568 static short devid_gen_number;
7570 #ifdef DEBUG
7572 static int devid_register_corrupt = 0;
7573 static int devid_register_corrupt_major = 0;
7574 static int devid_register_corrupt_hint = 0;
7575 static int devid_register_corrupt_hint_major = 0;
7577 static int devid_lyr_debug = 0;
7579 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \
7580 if (devid_lyr_debug) \
7581 ddi_debug_devid_devts(msg, ndevs, devs)
7583 #else
7585 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7587 #endif /* DEBUG */
7590 #ifdef DEBUG
7592 static void
7593 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7595 int i;
7597 cmn_err(CE_CONT, "%s:\n", msg);
7598 for (i = 0; i < ndevs; i++) {
7599 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7603 static void
7604 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7606 int i;
7608 cmn_err(CE_CONT, "%s:\n", msg);
7609 for (i = 0; i < npaths; i++) {
7610 cmn_err(CE_CONT, " %s\n", paths[i]);
7614 static void
7615 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7617 int i;
7619 cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7620 for (i = 0; i < ndevs; i++) {
7621 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7625 #endif /* DEBUG */
7628 * Register device id into DDI framework.
7629 * Must be called when the driver is bound.
7631 static int
7632 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7634 impl_devid_t *i_devid = (impl_devid_t *)devid;
7635 size_t driver_len;
7636 const char *driver_name;
7637 char *devid_str;
7638 major_t major;
7640 if ((dip == NULL) ||
7641 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7642 return (DDI_FAILURE);
7644 /* verify that the devid is valid */
7645 if (ddi_devid_valid(devid) != DDI_SUCCESS)
7646 return (DDI_FAILURE);
7648 /* Updating driver name hint in devid */
7649 driver_name = ddi_driver_name(dip);
7650 driver_len = strlen(driver_name);
7651 if (driver_len > DEVID_HINT_SIZE) {
7652 /* Pick up last four characters of driver name */
7653 driver_name += driver_len - DEVID_HINT_SIZE;
7654 driver_len = DEVID_HINT_SIZE;
7656 bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7657 bcopy(driver_name, i_devid->did_driver, driver_len);
7659 #ifdef DEBUG
7660 /* Corrupt the devid for testing. */
7661 if (devid_register_corrupt)
7662 i_devid->did_id[0] += devid_register_corrupt;
7663 if (devid_register_corrupt_major &&
7664 (major == devid_register_corrupt_major))
7665 i_devid->did_id[0] += 1;
7666 if (devid_register_corrupt_hint)
7667 i_devid->did_driver[0] += devid_register_corrupt_hint;
7668 if (devid_register_corrupt_hint_major &&
7669 (major == devid_register_corrupt_hint_major))
7670 i_devid->did_driver[0] += 1;
7671 #endif /* DEBUG */
7673 /* encode the devid as a string */
7674 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7675 return (DDI_FAILURE);
7677 /* add string as a string property */
7678 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7679 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7680 cmn_err(CE_WARN, "%s%d: devid property update failed",
7681 ddi_driver_name(dip), ddi_get_instance(dip));
7682 ddi_devid_str_free(devid_str);
7683 return (DDI_FAILURE);
7686 /* keep pointer to devid string for interrupt context fma code */
7687 if (DEVI(dip)->devi_devid_str)
7688 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7689 DEVI(dip)->devi_devid_str = devid_str;
7690 return (DDI_SUCCESS);
7694 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7696 int rval;
7698 rval = i_ddi_devid_register(dip, devid);
7699 if (rval == DDI_SUCCESS) {
7701 * Register devid in devid-to-path cache
7703 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7704 mutex_enter(&DEVI(dip)->devi_lock);
7705 DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7706 mutex_exit(&DEVI(dip)->devi_lock);
7707 } else if (ddi_get_name_addr(dip)) {
7709 * We only expect cache_register DDI_FAILURE when we
7710 * can't form the full path because of NULL devi_addr.
7712 cmn_err(CE_WARN, "%s%d: failed to cache devid",
7713 ddi_driver_name(dip), ddi_get_instance(dip));
7715 } else {
7716 cmn_err(CE_WARN, "%s%d: failed to register devid",
7717 ddi_driver_name(dip), ddi_get_instance(dip));
7719 return (rval);
7723 * Remove (unregister) device id from DDI framework.
7724 * Must be called when device is detached.
7726 static void
7727 i_ddi_devid_unregister(dev_info_t *dip)
7729 if (DEVI(dip)->devi_devid_str) {
7730 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7731 DEVI(dip)->devi_devid_str = NULL;
7734 /* remove the devid property */
7735 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7738 void
7739 ddi_devid_unregister(dev_info_t *dip)
7741 mutex_enter(&DEVI(dip)->devi_lock);
7742 DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7743 mutex_exit(&DEVI(dip)->devi_lock);
7744 e_devid_cache_unregister(dip);
7745 i_ddi_devid_unregister(dip);
7749 * Allocate and initialize a device id.
7752 ddi_devid_init(
7753 dev_info_t *dip,
7754 ushort_t devid_type,
7755 ushort_t nbytes,
7756 void *id,
7757 ddi_devid_t *ret_devid)
7759 impl_devid_t *i_devid;
7760 int sz = sizeof (*i_devid) + nbytes - sizeof (char);
7761 int driver_len;
7762 const char *driver_name;
7764 switch (devid_type) {
7765 case DEVID_SCSI3_WWN:
7766 /*FALLTHRU*/
7767 case DEVID_SCSI_SERIAL:
7768 /*FALLTHRU*/
7769 case DEVID_ATA_SERIAL:
7770 /*FALLTHRU*/
7771 case DEVID_ENCAP:
7772 if (nbytes == 0)
7773 return (DDI_FAILURE);
7774 if (id == NULL)
7775 return (DDI_FAILURE);
7776 break;
7777 case DEVID_FAB:
7778 if (nbytes != 0)
7779 return (DDI_FAILURE);
7780 if (id != NULL)
7781 return (DDI_FAILURE);
7782 nbytes = sizeof (int) +
7783 sizeof (struct timeval32) + sizeof (short);
7784 sz += nbytes;
7785 break;
7786 default:
7787 return (DDI_FAILURE);
7790 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7791 return (DDI_FAILURE);
7793 i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7794 i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7795 i_devid->did_rev_hi = DEVID_REV_MSB;
7796 i_devid->did_rev_lo = DEVID_REV_LSB;
7797 DEVID_FORMTYPE(i_devid, devid_type);
7798 DEVID_FORMLEN(i_devid, nbytes);
7800 /* Fill in driver name hint */
7801 driver_name = ddi_driver_name(dip);
7802 driver_len = strlen(driver_name);
7803 if (driver_len > DEVID_HINT_SIZE) {
7804 /* Pick up last four characters of driver name */
7805 driver_name += driver_len - DEVID_HINT_SIZE;
7806 driver_len = DEVID_HINT_SIZE;
7809 bcopy(driver_name, i_devid->did_driver, driver_len);
7811 /* Fill in id field */
7812 if (devid_type == DEVID_FAB) {
7813 char *cp;
7814 uint32_t hostid;
7815 struct timeval32 timestamp32;
7816 int i;
7817 int *ip;
7818 short gen;
7820 /* increase the generation number */
7821 mutex_enter(&devid_gen_mutex);
7822 gen = devid_gen_number++;
7823 mutex_exit(&devid_gen_mutex);
7825 cp = i_devid->did_id;
7827 /* Fill in host id (big-endian byte ordering) */
7828 hostid = zone_get_hostid(NULL);
7829 *cp++ = hibyte(hiword(hostid));
7830 *cp++ = lobyte(hiword(hostid));
7831 *cp++ = hibyte(loword(hostid));
7832 *cp++ = lobyte(loword(hostid));
7835 * Fill in timestamp (big-endian byte ordering)
7837 * (Note that the format may have to be changed
7838 * before 2038 comes around, though it's arguably
7839 * unique enough as it is..)
7841 uniqtime32(&timestamp32);
7842 ip = (int *)&timestamp32;
7843 for (i = 0;
7844 i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7845 int val;
7846 val = *ip;
7847 *cp++ = hibyte(hiword(val));
7848 *cp++ = lobyte(hiword(val));
7849 *cp++ = hibyte(loword(val));
7850 *cp++ = lobyte(loword(val));
7853 /* fill in the generation number */
7854 *cp++ = hibyte(gen);
7855 *cp++ = lobyte(gen);
7856 } else
7857 bcopy(id, i_devid->did_id, nbytes);
7859 /* return device id */
7860 *ret_devid = (ddi_devid_t)i_devid;
7861 return (DDI_SUCCESS);
7865 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7867 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7871 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7873 char *devidstr;
7875 ASSERT(dev != DDI_DEV_T_NONE);
7877 /* look up the property, devt specific first */
7878 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7879 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7880 if ((dev == DDI_DEV_T_ANY) ||
7881 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7882 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7883 DDI_PROP_SUCCESS)) {
7884 return (DDI_FAILURE);
7888 /* convert to binary form */
7889 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7890 ddi_prop_free(devidstr);
7891 return (DDI_FAILURE);
7893 ddi_prop_free(devidstr);
7894 return (DDI_SUCCESS);
7898 * Return a copy of the device id for dev_t
7901 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7903 dev_info_t *dip;
7904 int rval;
7906 /* get the dip */
7907 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7908 return (DDI_FAILURE);
7910 rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7912 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7913 return (rval);
7917 * Return a copy of the minor name for dev_t and spec_type
7920 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7922 char *buf;
7923 int circ;
7924 dev_info_t *dip;
7925 char *nm;
7926 int rval;
7928 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7929 *minor_name = NULL;
7930 return (DDI_FAILURE);
7933 /* Find the minor name and copy into max size buf */
7934 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7935 ndi_devi_enter(dip, &circ);
7936 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7937 if (nm)
7938 (void) strcpy(buf, nm);
7939 ndi_devi_exit(dip, circ);
7940 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7942 if (nm) {
7943 /* duplicate into min size buf for return result */
7944 *minor_name = i_ddi_strdup(buf, KM_SLEEP);
7945 rval = DDI_SUCCESS;
7946 } else {
7947 *minor_name = NULL;
7948 rval = DDI_FAILURE;
7951 /* free max size buf and return */
7952 kmem_free(buf, MAXNAMELEN);
7953 return (rval);
7957 ddi_lyr_devid_to_devlist(
7958 ddi_devid_t devid,
7959 char *minor_name,
7960 int *retndevs,
7961 dev_t **retdevs)
7963 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7965 if (e_devid_cache_to_devt_list(devid, minor_name,
7966 retndevs, retdevs) == DDI_SUCCESS) {
7967 ASSERT(*retndevs > 0);
7968 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7969 *retndevs, *retdevs);
7970 return (DDI_SUCCESS);
7973 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7974 return (DDI_FAILURE);
7977 if (e_devid_cache_to_devt_list(devid, minor_name,
7978 retndevs, retdevs) == DDI_SUCCESS) {
7979 ASSERT(*retndevs > 0);
7980 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7981 *retndevs, *retdevs);
7982 return (DDI_SUCCESS);
7985 return (DDI_FAILURE);
7988 void
7989 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7991 kmem_free(devlist, sizeof (dev_t) * ndevs);
7995 * Note: This will need to be fixed if we ever allow processes to
7996 * have more than one data model per exec.
7998 model_t
7999 ddi_mmap_get_model(void)
8001 return (get_udatamodel());
8004 model_t
8005 ddi_model_convert_from(model_t model)
8007 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8011 * ddi interfaces managing storage and retrieval of eventcookies.
8015 * Invoke bus nexus driver's implementation of the
8016 * (*bus_remove_eventcall)() interface to remove a registered
8017 * callback handler for "event".
8020 ddi_remove_event_handler(ddi_callback_id_t id)
8022 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8023 dev_info_t *ddip;
8025 ASSERT(cb);
8026 if (!cb) {
8027 return (DDI_FAILURE);
8030 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8031 return (ndi_busop_remove_eventcall(ddip, id));
8035 * Invoke bus nexus driver's implementation of the
8036 * (*bus_add_eventcall)() interface to register a callback handler
8037 * for "event".
8040 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8041 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8042 void *arg, ddi_callback_id_t *id)
8044 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8049 * Return a handle for event "name" by calling up the device tree
8050 * hierarchy via (*bus_get_eventcookie)() interface until claimed
8051 * by a bus nexus or top of dev_info tree is reached.
8054 ddi_get_eventcookie(dev_info_t *dip, char *name,
8055 ddi_eventcookie_t *event_cookiep)
8057 return (ndi_busop_get_eventcookie(dip, dip,
8058 name, event_cookiep));
8062 * This procedure is provided as the general callback function when
8063 * umem_lockmemory calls as_add_callback for long term memory locking.
8064 * When as_unmap, as_setprot, or as_free encounter segments which have
8065 * locked memory, this callback will be invoked.
8067 void
8068 umem_lock_undo(struct as *as, void *arg, uint_t event)
8070 _NOTE(ARGUNUSED(as, event))
8071 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8074 * Call the cleanup function. Decrement the cookie reference
8075 * count, if it goes to zero, return the memory for the cookie.
8076 * The i_ddi_umem_unlock for this cookie may or may not have been
8077 * called already. It is the responsibility of the caller of
8078 * umem_lockmemory to handle the case of the cleanup routine
8079 * being called after a ddi_umem_unlock for the cookie
8080 * was called.
8083 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8085 /* remove the cookie if reference goes to zero */
8086 if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8087 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8092 * The following two Consolidation Private routines provide generic
8093 * interfaces to increase/decrease the amount of device-locked memory.
8095 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8096 * must be called every time i_ddi_incr_locked_memory() is called.
8099 /* ARGSUSED */
8100 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8102 ASSERT(procp != NULL);
8103 mutex_enter(&procp->p_lock);
8104 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8105 mutex_exit(&procp->p_lock);
8106 return (ENOMEM);
8108 mutex_exit(&procp->p_lock);
8109 return (0);
8113 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8114 * must be called every time i_ddi_decr_locked_memory() is called.
8116 /* ARGSUSED */
8117 void
8118 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8120 ASSERT(procp != NULL);
8121 mutex_enter(&procp->p_lock);
8122 rctl_decr_locked_mem(procp, NULL, dec, 1);
8123 mutex_exit(&procp->p_lock);
8127 * The cookie->upd_max_lock_rctl flag is used to determine if we should
8128 * charge device locked memory to the max-locked-memory rctl. Tracking
8129 * device locked memory causes the rctl locks to get hot under high-speed
8130 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit,
8131 * we bypass charging the locked memory to the rctl altogether. The cookie's
8132 * flag tells us if the rctl value should be updated when unlocking the memory,
8133 * in case the rctl gets changed after the memory was locked. Any device
8134 * locked memory in that rare case will not be counted toward the rctl limit.
8136 * When tracking the locked memory, the kproject_t parameter is always NULL
8137 * in the code paths:
8138 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8139 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8140 * Thus, we always use the tk_proj member to check the projp setting.
8142 static void
8143 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8145 proc_t *p;
8146 kproject_t *projp;
8147 zone_t *zonep;
8149 ASSERT(cookie);
8150 p = cookie->procp;
8151 ASSERT(p);
8153 zonep = p->p_zone;
8154 projp = p->p_task->tk_proj;
8156 ASSERT(zonep);
8157 ASSERT(projp);
8159 if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8160 projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8161 cookie->upd_max_lock_rctl = 0;
8162 else
8163 cookie->upd_max_lock_rctl = 1;
8167 * This routine checks if the max-locked-memory resource ctl is
8168 * exceeded, if not increments it, grabs a hold on the project.
8169 * Returns 0 if successful otherwise returns error code
8171 static int
8172 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8174 proc_t *procp;
8175 int ret;
8177 ASSERT(cookie);
8178 if (cookie->upd_max_lock_rctl == 0)
8179 return (0);
8181 procp = cookie->procp;
8182 ASSERT(procp);
8184 if ((ret = i_ddi_incr_locked_memory(procp,
8185 cookie->size)) != 0) {
8186 return (ret);
8188 return (0);
8192 * Decrements the max-locked-memory resource ctl and releases
8193 * the hold on the project that was acquired during umem_incr_devlockmem
8195 static void
8196 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8198 proc_t *proc;
8200 if (cookie->upd_max_lock_rctl == 0)
8201 return;
8203 proc = (proc_t *)cookie->procp;
8204 if (!proc)
8205 return;
8207 i_ddi_decr_locked_memory(proc, cookie->size);
8211 * A consolidation private function which is essentially equivalent to
8212 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8213 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8214 * the ops_vector is valid.
8216 * Lock the virtual address range in the current process and create a
8217 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8218 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8219 * to user space.
8221 * Note: The resource control accounting currently uses a full charge model
8222 * in other words attempts to lock the same/overlapping areas of memory
8223 * will deduct the full size of the buffer from the projects running
8224 * counter for the device locked memory.
8226 * addr, size should be PAGESIZE aligned
8228 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8229 * identifies whether the locked memory will be read or written or both
8230 * DDI_UMEMLOCK_LONGTERM must be set when the locking will
8231 * be maintained for an indefinitely long period (essentially permanent),
8232 * rather than for what would be required for a typical I/O completion.
8233 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8234 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8235 * This is to prevent a deadlock if a file truncation is attempted after
8236 * after the locking is done.
8238 * Returns 0 on success
8239 * EINVAL - for invalid parameters
8240 * EPERM, ENOMEM and other error codes returned by as_pagelock
8241 * ENOMEM - is returned if the current request to lock memory exceeds
8242 * *.max-locked-memory resource control value.
8243 * EFAULT - memory pertains to a regular file mapped shared and
8244 * and DDI_UMEMLOCK_LONGTERM flag is set
8245 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8248 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8249 struct umem_callback_ops *ops_vector,
8250 proc_t *procp)
8252 int error;
8253 struct ddi_umem_cookie *p;
8254 void (*driver_callback)() = NULL;
8255 struct as *as;
8256 struct seg *seg;
8257 vnode_t *vp;
8259 /* Allow device drivers to not have to reference "curproc" */
8260 if (procp == NULL)
8261 procp = curproc;
8262 as = procp->p_as;
8263 *cookie = NULL; /* in case of any error return */
8265 /* These are the only three valid flags */
8266 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8267 DDI_UMEMLOCK_LONGTERM)) != 0)
8268 return (EINVAL);
8270 /* At least one (can be both) of the two access flags must be set */
8271 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8272 return (EINVAL);
8274 /* addr and len must be page-aligned */
8275 if (((uintptr_t)addr & PAGEOFFSET) != 0)
8276 return (EINVAL);
8278 if ((len & PAGEOFFSET) != 0)
8279 return (EINVAL);
8282 * For longterm locking a driver callback must be specified; if
8283 * not longterm then a callback is optional.
8285 if (ops_vector != NULL) {
8286 if (ops_vector->cbo_umem_callback_version !=
8287 UMEM_CALLBACK_VERSION)
8288 return (EINVAL);
8289 else
8290 driver_callback = ops_vector->cbo_umem_lock_cleanup;
8292 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8293 return (EINVAL);
8296 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8297 * be called on first ddi_umem_lock or umem_lockmemory call.
8299 if (ddi_umem_unlock_thread == NULL)
8300 i_ddi_umem_unlock_thread_start();
8302 /* Allocate memory for the cookie */
8303 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8305 /* Convert the flags to seg_rw type */
8306 if (flags & DDI_UMEMLOCK_WRITE) {
8307 p->s_flags = S_WRITE;
8308 } else {
8309 p->s_flags = S_READ;
8312 /* Store procp in cookie for later iosetup/unlock */
8313 p->procp = (void *)procp;
8316 * Store the struct as pointer in cookie for later use by
8317 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8318 * is called after relvm is called.
8320 p->asp = as;
8323 * The size field is needed for lockmem accounting.
8325 p->size = len;
8326 init_lockedmem_rctl_flag(p);
8328 if (umem_incr_devlockmem(p) != 0) {
8330 * The requested memory cannot be locked
8332 kmem_free(p, sizeof (struct ddi_umem_cookie));
8333 *cookie = (ddi_umem_cookie_t)NULL;
8334 return (ENOMEM);
8337 /* Lock the pages corresponding to addr, len in memory */
8338 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8339 if (error != 0) {
8340 umem_decr_devlockmem(p);
8341 kmem_free(p, sizeof (struct ddi_umem_cookie));
8342 *cookie = (ddi_umem_cookie_t)NULL;
8343 return (error);
8347 * For longterm locking the addr must pertain to a seg_vn segment or
8348 * or a seg_spt segment.
8349 * If the segment pertains to a regular file, it cannot be
8350 * mapped MAP_SHARED.
8351 * This is to prevent a deadlock if a file truncation is attempted
8352 * after the locking is done.
8353 * Doing this after as_pagelock guarantees persistence of the as; if
8354 * an unacceptable segment is found, the cleanup includes calling
8355 * as_pageunlock before returning EFAULT.
8357 * segdev is allowed here as it is already locked. This allows
8358 * for memory exported by drivers through mmap() (which is already
8359 * locked) to be allowed for LONGTERM.
8361 if (flags & DDI_UMEMLOCK_LONGTERM) {
8362 extern const struct seg_ops segspt_shmops;
8363 extern const struct seg_ops segdev_ops;
8364 AS_LOCK_ENTER(as, RW_READER);
8365 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8366 if (seg == NULL || seg->s_base > addr + len)
8367 break;
8368 if (seg->s_ops == &segdev_ops)
8369 continue;
8370 if (((seg->s_ops != &segvn_ops) &&
8371 (seg->s_ops != &segspt_shmops)) ||
8372 ((segop_getvp(seg, addr, &vp) == 0 &&
8373 vp != NULL && vp->v_type == VREG) &&
8374 (segop_gettype(seg, addr) & MAP_SHARED))) {
8375 as_pageunlock(as, p->pparray,
8376 addr, len, p->s_flags);
8377 AS_LOCK_EXIT(as);
8378 umem_decr_devlockmem(p);
8379 kmem_free(p, sizeof (struct ddi_umem_cookie));
8380 *cookie = (ddi_umem_cookie_t)NULL;
8381 return (EFAULT);
8384 AS_LOCK_EXIT(as);
8388 /* Initialize the fields in the ddi_umem_cookie */
8389 p->cvaddr = addr;
8390 p->type = UMEM_LOCKED;
8391 if (driver_callback != NULL) {
8392 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8393 p->cook_refcnt = 2;
8394 p->callbacks = *ops_vector;
8395 } else {
8396 /* only i_ddi_umme_unlock needs the cookie */
8397 p->cook_refcnt = 1;
8400 *cookie = (ddi_umem_cookie_t)p;
8403 * If a driver callback was specified, add an entry to the
8404 * as struct callback list. The as_pagelock above guarantees
8405 * the persistence of as.
8407 if (driver_callback) {
8408 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8409 addr, len, KM_SLEEP);
8410 if (error != 0) {
8411 as_pageunlock(as, p->pparray,
8412 addr, len, p->s_flags);
8413 umem_decr_devlockmem(p);
8414 kmem_free(p, sizeof (struct ddi_umem_cookie));
8415 *cookie = (ddi_umem_cookie_t)NULL;
8418 return (error);
8422 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8423 * the cookie. Called from i_ddi_umem_unlock_thread.
8426 static void
8427 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8429 uint_t rc;
8432 * There is no way to determine whether a callback to
8433 * umem_lock_undo was registered via as_add_callback.
8434 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8435 * a valid callback function structure.) as_delete_callback
8436 * is called to delete a possible registered callback. If the
8437 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8438 * indicates that there was a callback registered, and that is was
8439 * successfully deleted. Thus, the cookie reference count
8440 * will never be decremented by umem_lock_undo. Just return the
8441 * memory for the cookie, since both users of the cookie are done.
8442 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8443 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED
8444 * indicates that callback processing is taking place and, and
8445 * umem_lock_undo is, or will be, executing, and thus decrementing
8446 * the cookie reference count when it is complete.
8448 * This needs to be done before as_pageunlock so that the
8449 * persistence of as is guaranteed because of the locked pages.
8452 rc = as_delete_callback(p->asp, p);
8456 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8457 * after relvm is called so use p->asp.
8459 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8462 * Now that we have unlocked the memory decrement the
8463 * *.max-locked-memory rctl
8465 umem_decr_devlockmem(p);
8467 if (rc == AS_CALLBACK_DELETED) {
8468 /* umem_lock_undo will not happen, return the cookie memory */
8469 ASSERT(p->cook_refcnt == 2);
8470 kmem_free(p, sizeof (struct ddi_umem_cookie));
8471 } else {
8473 * umem_undo_lock may happen if as_delete_callback returned
8474 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8475 * reference count, atomically, and return the cookie
8476 * memory if the reference count goes to zero. The only
8477 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8478 * case, just return the cookie memory.
8480 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8481 (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8482 == 0)) {
8483 kmem_free(p, sizeof (struct ddi_umem_cookie));
8489 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8491 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8492 * until it is empty. Then, wait for more to be added. This thread is awoken
8493 * via calls to ddi_umem_unlock.
8496 static void
8497 i_ddi_umem_unlock_thread(void)
8499 struct ddi_umem_cookie *ret_cookie;
8500 callb_cpr_t cprinfo;
8502 /* process the ddi_umem_unlock list */
8503 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8504 callb_generic_cpr, "unlock_thread");
8505 for (;;) {
8506 mutex_enter(&ddi_umem_unlock_mutex);
8507 if (ddi_umem_unlock_head != NULL) { /* list not empty */
8508 ret_cookie = ddi_umem_unlock_head;
8509 /* take if off the list */
8510 if ((ddi_umem_unlock_head =
8511 ddi_umem_unlock_head->unl_forw) == NULL) {
8512 ddi_umem_unlock_tail = NULL;
8514 mutex_exit(&ddi_umem_unlock_mutex);
8515 /* unlock the pages in this cookie */
8516 (void) i_ddi_umem_unlock(ret_cookie);
8517 } else { /* list is empty, wait for next ddi_umem_unlock */
8518 CALLB_CPR_SAFE_BEGIN(&cprinfo);
8519 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8520 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8521 mutex_exit(&ddi_umem_unlock_mutex);
8524 /* ddi_umem_unlock_thread does not exit */
8525 /* NOTREACHED */
8529 * Start the thread that will process the ddi_umem_unlock list if it is
8530 * not already started (i_ddi_umem_unlock_thread).
8532 static void
8533 i_ddi_umem_unlock_thread_start(void)
8535 mutex_enter(&ddi_umem_unlock_mutex);
8536 if (ddi_umem_unlock_thread == NULL) {
8537 ddi_umem_unlock_thread = thread_create(NULL, 0,
8538 i_ddi_umem_unlock_thread, NULL, 0, &p0,
8539 TS_RUN, minclsyspri);
8541 mutex_exit(&ddi_umem_unlock_mutex);
8545 * Lock the virtual address range in the current process and create a
8546 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8547 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8548 * to user space.
8550 * Note: The resource control accounting currently uses a full charge model
8551 * in other words attempts to lock the same/overlapping areas of memory
8552 * will deduct the full size of the buffer from the projects running
8553 * counter for the device locked memory. This applies to umem_lockmemory too.
8555 * addr, size should be PAGESIZE aligned
8556 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8557 * identifies whether the locked memory will be read or written or both
8559 * Returns 0 on success
8560 * EINVAL - for invalid parameters
8561 * EPERM, ENOMEM and other error codes returned by as_pagelock
8562 * ENOMEM - is returned if the current request to lock memory exceeds
8563 * *.max-locked-memory resource control value.
8564 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8567 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8569 int error;
8570 struct ddi_umem_cookie *p;
8572 *cookie = NULL; /* in case of any error return */
8574 /* These are the only two valid flags */
8575 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8576 return (EINVAL);
8579 /* At least one of the two flags (or both) must be set */
8580 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8581 return (EINVAL);
8584 /* addr and len must be page-aligned */
8585 if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8586 return (EINVAL);
8589 if ((len & PAGEOFFSET) != 0) {
8590 return (EINVAL);
8594 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8595 * be called on first ddi_umem_lock or umem_lockmemory call.
8597 if (ddi_umem_unlock_thread == NULL)
8598 i_ddi_umem_unlock_thread_start();
8600 /* Allocate memory for the cookie */
8601 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8603 /* Convert the flags to seg_rw type */
8604 if (flags & DDI_UMEMLOCK_WRITE) {
8605 p->s_flags = S_WRITE;
8606 } else {
8607 p->s_flags = S_READ;
8610 /* Store curproc in cookie for later iosetup/unlock */
8611 p->procp = (void *)curproc;
8614 * Store the struct as pointer in cookie for later use by
8615 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8616 * is called after relvm is called.
8618 p->asp = curproc->p_as;
8620 * The size field is needed for lockmem accounting.
8622 p->size = len;
8623 init_lockedmem_rctl_flag(p);
8625 if (umem_incr_devlockmem(p) != 0) {
8627 * The requested memory cannot be locked
8629 kmem_free(p, sizeof (struct ddi_umem_cookie));
8630 *cookie = (ddi_umem_cookie_t)NULL;
8631 return (ENOMEM);
8634 /* Lock the pages corresponding to addr, len in memory */
8635 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8636 addr, len, p->s_flags);
8637 if (error != 0) {
8638 umem_decr_devlockmem(p);
8639 kmem_free(p, sizeof (struct ddi_umem_cookie));
8640 *cookie = (ddi_umem_cookie_t)NULL;
8641 return (error);
8644 /* Initialize the fields in the ddi_umem_cookie */
8645 p->cvaddr = addr;
8646 p->type = UMEM_LOCKED;
8647 p->cook_refcnt = 1;
8649 *cookie = (ddi_umem_cookie_t)p;
8650 return (error);
8654 * Add the cookie to the ddi_umem_unlock list. Pages will be
8655 * unlocked by i_ddi_umem_unlock_thread.
8658 void
8659 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8661 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8663 ASSERT(p->type == UMEM_LOCKED);
8664 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8665 ASSERT(ddi_umem_unlock_thread != NULL);
8667 p->unl_forw = NULL; /* end of list */
8669 * Queue the unlock request and notify i_ddi_umem_unlock thread
8670 * if it's called in the interrupt context. Otherwise, unlock pages
8671 * immediately.
8673 if (servicing_interrupt()) {
8674 /* queue the unlock request and notify the thread */
8675 mutex_enter(&ddi_umem_unlock_mutex);
8676 if (ddi_umem_unlock_head == NULL) {
8677 ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8678 cv_broadcast(&ddi_umem_unlock_cv);
8679 } else {
8680 ddi_umem_unlock_tail->unl_forw = p;
8681 ddi_umem_unlock_tail = p;
8683 mutex_exit(&ddi_umem_unlock_mutex);
8684 } else {
8685 /* unlock the pages right away */
8686 (void) i_ddi_umem_unlock(p);
8691 * Create a buf structure from a ddi_umem_cookie
8692 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8693 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8694 * off, len - identifies the portion of the memory represented by the cookie
8695 * that the buf points to.
8696 * NOTE: off, len need to follow the alignment/size restrictions of the
8697 * device (dev) that this buf will be passed to. Some devices
8698 * will accept unrestricted alignment/size, whereas others (such as
8699 * st) require some block-size alignment/size. It is the caller's
8700 * responsibility to ensure that the alignment/size restrictions
8701 * are met (we cannot assert as we do not know the restrictions)
8703 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8704 * the flags used in ddi_umem_lock
8706 * The following three arguments are used to initialize fields in the
8707 * buf structure and are uninterpreted by this routine.
8709 * dev
8710 * blkno
8711 * iodone
8713 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8715 * Returns a buf structure pointer on success (to be freed by freerbuf)
8716 * NULL on any parameter error or memory alloc failure
8719 struct buf *
8720 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8721 int direction, dev_t dev, daddr_t blkno,
8722 int (*iodone)(struct buf *), int sleepflag)
8724 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8725 struct buf *bp;
8728 * check for valid cookie offset, len
8730 if ((off + len) > p->size) {
8731 return (NULL);
8734 if (len > p->size) {
8735 return (NULL);
8738 /* direction has to be one of B_READ or B_WRITE */
8739 if ((direction != B_READ) && (direction != B_WRITE)) {
8740 return (NULL);
8743 /* These are the only two valid sleepflags */
8744 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8745 return (NULL);
8749 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8751 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8752 return (NULL);
8755 /* If type is KMEM_NON_PAGEABLE procp is NULL */
8756 ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8757 (p->procp == NULL) : (p->procp != NULL));
8759 bp = kmem_alloc(sizeof (struct buf), sleepflag);
8760 if (bp == NULL) {
8761 return (NULL);
8763 bioinit(bp);
8765 bp->b_flags = B_BUSY | B_PHYS | direction;
8766 bp->b_edev = dev;
8767 bp->b_lblkno = blkno;
8768 bp->b_iodone = iodone;
8769 bp->b_bcount = len;
8770 bp->b_proc = (proc_t *)p->procp;
8771 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8772 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8773 if (p->pparray != NULL) {
8774 bp->b_flags |= B_SHADOW;
8775 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8776 bp->b_shadow = p->pparray + btop(off);
8778 return (bp);
8782 * Fault-handling and related routines
8785 ddi_devstate_t
8786 ddi_get_devstate(dev_info_t *dip)
8788 if (DEVI_IS_DEVICE_OFFLINE(dip))
8789 return (DDI_DEVSTATE_OFFLINE);
8790 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8791 return (DDI_DEVSTATE_DOWN);
8792 else if (DEVI_IS_BUS_QUIESCED(dip))
8793 return (DDI_DEVSTATE_QUIESCED);
8794 else if (DEVI_IS_DEVICE_DEGRADED(dip))
8795 return (DDI_DEVSTATE_DEGRADED);
8796 else
8797 return (DDI_DEVSTATE_UP);
8800 void
8801 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8802 ddi_fault_location_t location, const char *message)
8804 struct ddi_fault_event_data fd;
8805 ddi_eventcookie_t ec;
8808 * Assemble all the information into a fault-event-data structure
8810 fd.f_dip = dip;
8811 fd.f_impact = impact;
8812 fd.f_location = location;
8813 fd.f_message = message;
8814 fd.f_oldstate = ddi_get_devstate(dip);
8817 * Get eventcookie from defining parent.
8819 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8820 DDI_SUCCESS)
8821 return;
8823 (void) ndi_post_event(dip, dip, ec, &fd);
8826 char *
8827 i_ddi_devi_class(dev_info_t *dip)
8829 return (DEVI(dip)->devi_device_class);
8833 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8835 struct dev_info *devi = DEVI(dip);
8837 mutex_enter(&devi->devi_lock);
8839 if (devi->devi_device_class)
8840 kmem_free(devi->devi_device_class,
8841 strlen(devi->devi_device_class) + 1);
8843 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8844 != NULL) {
8845 mutex_exit(&devi->devi_lock);
8846 return (DDI_SUCCESS);
8849 mutex_exit(&devi->devi_lock);
8851 return (DDI_FAILURE);
8856 * Task Queues DDI interfaces.
8859 /* ARGSUSED */
8860 ddi_taskq_t *
8861 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8862 pri_t pri, uint_t cflags)
8864 char full_name[TASKQ_NAMELEN];
8865 const char *tq_name;
8866 int nodeid = 0;
8868 if (dip == NULL)
8869 tq_name = name;
8870 else {
8871 nodeid = ddi_get_instance(dip);
8873 if (name == NULL)
8874 name = "tq";
8876 (void) snprintf(full_name, sizeof (full_name), "%s_%s",
8877 ddi_driver_name(dip), name);
8879 tq_name = full_name;
8882 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8883 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8884 nthreads, INT_MAX, TASKQ_PREPOPULATE));
8887 void
8888 ddi_taskq_destroy(ddi_taskq_t *tq)
8890 taskq_destroy((taskq_t *)tq);
8894 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8895 void *arg, uint_t dflags)
8897 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8898 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8900 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8903 void
8904 ddi_taskq_wait(ddi_taskq_t *tq)
8906 taskq_wait((taskq_t *)tq);
8909 void
8910 ddi_taskq_suspend(ddi_taskq_t *tq)
8912 taskq_suspend((taskq_t *)tq);
8915 boolean_t
8916 ddi_taskq_suspended(ddi_taskq_t *tq)
8918 return (taskq_suspended((taskq_t *)tq));
8921 void
8922 ddi_taskq_resume(ddi_taskq_t *tq)
8924 taskq_resume((taskq_t *)tq);
8928 ddi_parse(
8929 const char *ifname,
8930 char *alnum,
8931 uint_t *nump)
8933 const char *p;
8934 int l;
8935 ulong_t num;
8936 boolean_t nonum = B_TRUE;
8937 char c;
8939 l = strlen(ifname);
8940 for (p = ifname + l; p != ifname; l--) {
8941 c = *--p;
8942 if (!isdigit(c)) {
8943 (void) strlcpy(alnum, ifname, l + 1);
8944 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8945 return (DDI_FAILURE);
8946 break;
8948 nonum = B_FALSE;
8950 if (l == 0 || nonum)
8951 return (DDI_FAILURE);
8953 *nump = num;
8954 return (DDI_SUCCESS);
8958 * Default initialization function for drivers that don't need to quiesce.
8960 /* ARGSUSED */
8962 ddi_quiesce_not_needed(dev_info_t *dip)
8964 return (DDI_SUCCESS);
8968 * Initialization function for drivers that should implement quiesce()
8969 * but haven't yet.
8971 /* ARGSUSED */
8973 ddi_quiesce_not_supported(dev_info_t *dip)
8975 return (DDI_FAILURE);
8978 char *
8979 ddi_strdup(const char *str, int flag)
8981 int n;
8982 char *ptr;
8984 ASSERT(str != NULL);
8985 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
8987 n = strlen(str);
8988 if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
8989 return (NULL);
8990 bcopy(str, ptr, n + 1);
8991 return (ptr);
8994 char *
8995 strdup(const char *str)
8997 return (ddi_strdup(str, KM_SLEEP));
9000 void
9001 strfree(char *str)
9003 ASSERT(str != NULL);
9004 kmem_free(str, strlen(str) + 1);
9008 * Generic DDI callback interfaces.
9012 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9013 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9015 ddi_cb_t *cbp;
9017 ASSERT(dip != NULL);
9018 ASSERT(DDI_CB_FLAG_VALID(flags));
9019 ASSERT(cbfunc != NULL);
9020 ASSERT(ret_hdlp != NULL);
9022 /* Sanity check the context */
9023 ASSERT(!servicing_interrupt());
9024 if (servicing_interrupt())
9025 return (DDI_FAILURE);
9027 /* Validate parameters */
9028 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9029 (cbfunc == NULL) || (ret_hdlp == NULL))
9030 return (DDI_EINVAL);
9032 /* Check for previous registration */
9033 if (DEVI(dip)->devi_cb_p != NULL)
9034 return (DDI_EALREADY);
9036 /* Allocate and initialize callback */
9037 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9038 cbp->cb_dip = dip;
9039 cbp->cb_func = cbfunc;
9040 cbp->cb_arg1 = arg1;
9041 cbp->cb_arg2 = arg2;
9042 cbp->cb_flags = flags;
9043 DEVI(dip)->devi_cb_p = cbp;
9045 /* If adding an IRM callback, notify IRM */
9046 if (flags & DDI_CB_FLAG_INTR)
9047 i_ddi_irm_set_cb(dip, B_TRUE);
9049 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9050 return (DDI_SUCCESS);
9054 ddi_cb_unregister(ddi_cb_handle_t hdl)
9056 ddi_cb_t *cbp;
9057 dev_info_t *dip;
9059 /* Sanity check the context */
9060 ASSERT(!servicing_interrupt());
9061 if (servicing_interrupt())
9062 return (DDI_FAILURE);
9064 /* Validate parameters */
9065 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9066 ((dip = cbp->cb_dip) == NULL))
9067 return (DDI_EINVAL);
9069 /* If removing an IRM callback, notify IRM */
9070 if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9071 i_ddi_irm_set_cb(dip, B_FALSE);
9073 /* Destroy the callback */
9074 kmem_free(cbp, sizeof (ddi_cb_t));
9075 DEVI(dip)->devi_cb_p = NULL;
9077 return (DDI_SUCCESS);
9081 * Platform independent DR routines
9084 static int
9085 ndi2errno(int n)
9087 int err = 0;
9089 switch (n) {
9090 case NDI_NOMEM:
9091 err = ENOMEM;
9092 break;
9093 case NDI_BUSY:
9094 err = EBUSY;
9095 break;
9096 case NDI_FAULT:
9097 err = EFAULT;
9098 break;
9099 case NDI_FAILURE:
9100 err = EIO;
9101 break;
9102 case NDI_SUCCESS:
9103 break;
9104 case NDI_BADHANDLE:
9105 default:
9106 err = EINVAL;
9107 break;
9109 return (err);
9113 * Prom tree node list
9115 struct ptnode {
9116 pnode_t nodeid;
9117 struct ptnode *next;
9121 * Prom tree walk arg
9123 struct pta {
9124 dev_info_t *pdip;
9125 devi_branch_t *bp;
9126 uint_t flags;
9127 dev_info_t *fdip;
9128 struct ptnode *head;
9131 static void
9132 visit_node(pnode_t nodeid, struct pta *ap)
9134 struct ptnode **nextp;
9135 int (*select)(pnode_t, void *, uint_t);
9137 ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9139 select = ap->bp->create.prom_branch_select;
9141 ASSERT(select);
9143 if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9145 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9148 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9150 (*nextp)->nodeid = nodeid;
9153 if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9154 return;
9156 nodeid = prom_childnode(nodeid);
9157 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9158 visit_node(nodeid, ap);
9159 nodeid = prom_nextnode(nodeid);
9164 * NOTE: The caller of this function must check for device contracts
9165 * or LDI callbacks against this dip before setting the dip offline.
9167 static int
9168 set_infant_dip_offline(dev_info_t *dip, void *arg)
9170 char *path = (char *)arg;
9172 ASSERT(dip);
9173 ASSERT(arg);
9175 if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9176 (void) ddi_pathname(dip, path);
9177 cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9178 "node: %s", path);
9179 return (DDI_FAILURE);
9182 mutex_enter(&(DEVI(dip)->devi_lock));
9183 if (!DEVI_IS_DEVICE_OFFLINE(dip))
9184 DEVI_SET_DEVICE_OFFLINE(dip);
9185 mutex_exit(&(DEVI(dip)->devi_lock));
9187 return (DDI_SUCCESS);
9190 typedef struct result {
9191 char *path;
9192 int result;
9193 } result_t;
9195 static int
9196 dip_set_offline(dev_info_t *dip, void *arg)
9198 int end;
9199 result_t *resp = (result_t *)arg;
9201 ASSERT(dip);
9202 ASSERT(resp);
9205 * We stop the walk if e_ddi_offline_notify() returns
9206 * failure, because this implies that one or more consumers
9207 * (either LDI or contract based) has blocked the offline.
9208 * So there is no point in conitnuing the walk
9210 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9211 resp->result = DDI_FAILURE;
9212 return (DDI_WALK_TERMINATE);
9216 * If set_infant_dip_offline() returns failure, it implies
9217 * that we failed to set a particular dip offline. This
9218 * does not imply that the offline as a whole should fail.
9219 * We want to do the best we can, so we continue the walk.
9221 if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9222 end = DDI_SUCCESS;
9223 else
9224 end = DDI_FAILURE;
9226 e_ddi_offline_finalize(dip, end);
9228 return (DDI_WALK_CONTINUE);
9232 * The call to e_ddi_offline_notify() exists for the
9233 * unlikely error case that a branch we are trying to
9234 * create already exists and has device contracts or LDI
9235 * event callbacks against it.
9237 * We allow create to succeed for such branches only if
9238 * no constraints block the offline.
9240 static int
9241 branch_set_offline(dev_info_t *dip, char *path)
9243 int circ;
9244 int end;
9245 result_t res;
9248 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9249 return (DDI_FAILURE);
9252 if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9253 end = DDI_SUCCESS;
9254 else
9255 end = DDI_FAILURE;
9257 e_ddi_offline_finalize(dip, end);
9259 if (end == DDI_FAILURE)
9260 return (DDI_FAILURE);
9262 res.result = DDI_SUCCESS;
9263 res.path = path;
9265 ndi_devi_enter(dip, &circ);
9266 ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9267 ndi_devi_exit(dip, circ);
9269 return (res.result);
9272 /*ARGSUSED*/
9273 static int
9274 create_prom_branch(void *arg, int has_changed)
9276 int circ;
9277 int exists, rv;
9278 pnode_t nodeid;
9279 struct ptnode *tnp;
9280 dev_info_t *dip;
9281 struct pta *ap = arg;
9282 devi_branch_t *bp;
9283 char *path;
9285 ASSERT(ap);
9286 ASSERT(ap->fdip == NULL);
9287 ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9289 bp = ap->bp;
9291 nodeid = ddi_get_nodeid(ap->pdip);
9292 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9293 cmn_err(CE_WARN, "create_prom_branch: invalid "
9294 "nodeid: 0x%x", nodeid);
9295 return (EINVAL);
9298 ap->head = NULL;
9300 nodeid = prom_childnode(nodeid);
9301 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9302 visit_node(nodeid, ap);
9303 nodeid = prom_nextnode(nodeid);
9306 if (ap->head == NULL)
9307 return (ENODEV);
9309 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9310 rv = 0;
9311 while ((tnp = ap->head) != NULL) {
9312 ap->head = tnp->next;
9314 ndi_devi_enter(ap->pdip, &circ);
9317 * Check if the branch already exists.
9319 exists = 0;
9320 dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9321 if (dip != NULL) {
9322 exists = 1;
9324 /* Parent is held busy, so release hold */
9325 ndi_rele_devi(dip);
9326 #ifdef DEBUG
9327 cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9328 " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9329 #endif
9330 } else {
9331 dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9334 kmem_free(tnp, sizeof (struct ptnode));
9337 * Hold the branch if it is not already held
9339 if (dip && !exists) {
9340 e_ddi_branch_hold(dip);
9343 ASSERT(dip == NULL || e_ddi_branch_held(dip));
9346 * Set all dips in the newly created branch offline so that
9347 * only a "configure" operation can attach
9348 * the branch
9350 if (dip == NULL || branch_set_offline(dip, path)
9351 == DDI_FAILURE) {
9352 ndi_devi_exit(ap->pdip, circ);
9353 rv = EIO;
9354 continue;
9357 ASSERT(ddi_get_parent(dip) == ap->pdip);
9359 ndi_devi_exit(ap->pdip, circ);
9361 if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9362 int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9363 if (error && rv == 0)
9364 rv = error;
9368 * Invoke devi_branch_callback() (if it exists) only for
9369 * newly created branches
9371 if (bp->devi_branch_callback && !exists)
9372 bp->devi_branch_callback(dip, bp->arg, 0);
9375 kmem_free(path, MAXPATHLEN);
9377 return (rv);
9380 static int
9381 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9383 int rv, circ, len;
9384 int i, flags, ret;
9385 dev_info_t *dip;
9386 char *nbuf;
9387 char *path;
9388 static const char *noname = "<none>";
9390 ASSERT(pdip);
9391 ASSERT(DEVI_BUSY_OWNED(pdip));
9393 flags = 0;
9396 * Creating the root of a branch ?
9398 if (rdipp) {
9399 *rdipp = NULL;
9400 flags = DEVI_BRANCH_ROOT;
9403 ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9404 rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9406 nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9408 if (rv == DDI_WALK_ERROR) {
9409 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9410 " properties on devinfo node %p", (void *)dip);
9411 goto fail;
9414 len = OBP_MAXDRVNAME;
9415 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9416 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9417 != DDI_PROP_SUCCESS) {
9418 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9419 "no name property", (void *)dip);
9420 goto fail;
9423 ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9424 if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9425 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9426 " for devinfo node %p", nbuf, (void *)dip);
9427 goto fail;
9430 kmem_free(nbuf, OBP_MAXDRVNAME);
9433 * Ignore bind failures just like boot does
9435 (void) ndi_devi_bind_driver(dip, 0);
9437 switch (rv) {
9438 case DDI_WALK_CONTINUE:
9439 case DDI_WALK_PRUNESIB:
9440 ndi_devi_enter(dip, &circ);
9442 i = DDI_WALK_CONTINUE;
9443 for (; i == DDI_WALK_CONTINUE; ) {
9444 i = sid_node_create(dip, bp, NULL);
9447 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9448 if (i == DDI_WALK_ERROR)
9449 rv = i;
9451 * If PRUNESIB stop creating siblings
9452 * of dip's child. Subsequent walk behavior
9453 * is determined by rv returned by dip.
9456 ndi_devi_exit(dip, circ);
9457 break;
9458 case DDI_WALK_TERMINATE:
9460 * Don't create children and ask our parent
9461 * to not create siblings either.
9463 rv = DDI_WALK_PRUNESIB;
9464 break;
9465 case DDI_WALK_PRUNECHILD:
9467 * Don't create children, but ask parent to continue
9468 * with siblings.
9470 rv = DDI_WALK_CONTINUE;
9471 break;
9472 default:
9473 ASSERT(0);
9474 break;
9477 if (rdipp)
9478 *rdipp = dip;
9481 * Set device offline - only the "configure" op should cause an attach.
9482 * Note that it is safe to set the dip offline without checking
9483 * for either device contract or layered driver (LDI) based constraints
9484 * since there cannot be any contracts or LDI opens of this device.
9485 * This is because this node is a newly created dip with the parent busy
9486 * held, so no other thread can come in and attach this dip. A dip that
9487 * has never been attached cannot have contracts since by definition
9488 * a device contract (an agreement between a process and a device minor
9489 * node) can only be created against a device that has minor nodes
9490 * i.e is attached. Similarly an LDI open will only succeed if the
9491 * dip is attached. We assert below that the dip is not attached.
9493 ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9494 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9495 ret = set_infant_dip_offline(dip, path);
9496 ASSERT(ret == DDI_SUCCESS);
9497 kmem_free(path, MAXPATHLEN);
9499 return (rv);
9500 fail:
9501 (void) ndi_devi_free(dip);
9502 kmem_free(nbuf, OBP_MAXDRVNAME);
9503 return (DDI_WALK_ERROR);
9506 static int
9507 create_sid_branch(
9508 dev_info_t *pdip,
9509 devi_branch_t *bp,
9510 dev_info_t **dipp,
9511 uint_t flags)
9513 int rv = 0, state = DDI_WALK_CONTINUE;
9514 dev_info_t *rdip;
9516 while (state == DDI_WALK_CONTINUE) {
9517 int circ;
9519 ndi_devi_enter(pdip, &circ);
9521 state = sid_node_create(pdip, bp, &rdip);
9522 if (rdip == NULL) {
9523 ndi_devi_exit(pdip, circ);
9524 ASSERT(state == DDI_WALK_ERROR);
9525 break;
9528 e_ddi_branch_hold(rdip);
9530 ndi_devi_exit(pdip, circ);
9532 if (flags & DEVI_BRANCH_CONFIGURE) {
9533 int error = e_ddi_branch_configure(rdip, dipp, 0);
9534 if (error && rv == 0)
9535 rv = error;
9539 * devi_branch_callback() is optional
9541 if (bp->devi_branch_callback)
9542 bp->devi_branch_callback(rdip, bp->arg, 0);
9545 ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9547 return (state == DDI_WALK_ERROR ? EIO : rv);
9551 e_ddi_branch_create(
9552 dev_info_t *pdip,
9553 devi_branch_t *bp,
9554 dev_info_t **dipp,
9555 uint_t flags)
9557 int prom_devi, sid_devi, error;
9559 if (pdip == NULL || bp == NULL || bp->type == 0)
9560 return (EINVAL);
9562 prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9563 sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9565 if (prom_devi && bp->create.prom_branch_select == NULL)
9566 return (EINVAL);
9567 else if (sid_devi && bp->create.sid_branch_create == NULL)
9568 return (EINVAL);
9569 else if (!prom_devi && !sid_devi)
9570 return (EINVAL);
9572 if (flags & DEVI_BRANCH_EVENT)
9573 return (EINVAL);
9575 if (prom_devi) {
9576 struct pta pta = {0};
9578 pta.pdip = pdip;
9579 pta.bp = bp;
9580 pta.flags = flags;
9582 error = prom_tree_access(create_prom_branch, &pta, NULL);
9584 if (dipp)
9585 *dipp = pta.fdip;
9586 else if (pta.fdip)
9587 ndi_rele_devi(pta.fdip);
9588 } else {
9589 error = create_sid_branch(pdip, bp, dipp, flags);
9592 return (error);
9596 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9598 int rv;
9599 char *devnm;
9600 dev_info_t *pdip;
9602 if (dipp)
9603 *dipp = NULL;
9605 if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9606 return (EINVAL);
9608 pdip = ddi_get_parent(rdip);
9610 ndi_hold_devi(pdip);
9612 if (!e_ddi_branch_held(rdip)) {
9613 ndi_rele_devi(pdip);
9614 cmn_err(CE_WARN, "e_ddi_branch_configure: "
9615 "dip(%p) not held", (void *)rdip);
9616 return (EINVAL);
9619 if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9621 * First attempt to bind a driver. If we fail, return
9622 * success (On some platforms, dips for some device
9623 * types (CPUs) may not have a driver)
9625 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9626 ndi_rele_devi(pdip);
9627 return (0);
9630 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9631 rv = NDI_FAILURE;
9632 goto out;
9636 ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9638 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9640 (void) ddi_deviname(rdip, devnm);
9642 if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9643 NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9644 /* release hold from ndi_devi_config_one() */
9645 ndi_rele_devi(rdip);
9648 kmem_free(devnm, MAXNAMELEN + 1);
9649 out:
9650 if (rv != NDI_SUCCESS && dipp && rdip) {
9651 ndi_hold_devi(rdip);
9652 *dipp = rdip;
9654 ndi_rele_devi(pdip);
9655 return (ndi2errno(rv));
9658 void
9659 e_ddi_branch_hold(dev_info_t *rdip)
9661 if (e_ddi_branch_held(rdip)) {
9662 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9663 return;
9666 mutex_enter(&DEVI(rdip)->devi_lock);
9667 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9668 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9669 DEVI(rdip)->devi_ref++;
9671 ASSERT(DEVI(rdip)->devi_ref > 0);
9672 mutex_exit(&DEVI(rdip)->devi_lock);
9676 e_ddi_branch_held(dev_info_t *rdip)
9678 int rv = 0;
9680 mutex_enter(&DEVI(rdip)->devi_lock);
9681 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9682 DEVI(rdip)->devi_ref > 0) {
9683 rv = 1;
9685 mutex_exit(&DEVI(rdip)->devi_lock);
9687 return (rv);
9690 void
9691 e_ddi_branch_rele(dev_info_t *rdip)
9693 mutex_enter(&DEVI(rdip)->devi_lock);
9694 DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9695 DEVI(rdip)->devi_ref--;
9696 mutex_exit(&DEVI(rdip)->devi_lock);
9700 e_ddi_branch_unconfigure(
9701 dev_info_t *rdip,
9702 dev_info_t **dipp,
9703 uint_t flags)
9705 int circ, rv;
9706 int destroy;
9707 char *devnm;
9708 uint_t nflags;
9709 dev_info_t *pdip;
9711 if (dipp)
9712 *dipp = NULL;
9714 if (rdip == NULL)
9715 return (EINVAL);
9717 pdip = ddi_get_parent(rdip);
9719 ASSERT(pdip);
9722 * Check if caller holds pdip busy - can cause deadlocks during
9723 * devfs_clean()
9725 if (DEVI_BUSY_OWNED(pdip)) {
9726 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9727 " devinfo node(%p) is busy held", (void *)pdip);
9728 return (EINVAL);
9731 destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9733 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9735 ndi_devi_enter(pdip, &circ);
9736 (void) ddi_deviname(rdip, devnm);
9737 ndi_devi_exit(pdip, circ);
9740 * ddi_deviname() returns a component name with / prepended.
9742 (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9744 ndi_devi_enter(pdip, &circ);
9747 * Recreate device name as it may have changed state (init/uninit)
9748 * when parent busy lock was dropped for devfs_clean()
9750 (void) ddi_deviname(rdip, devnm);
9752 if (!e_ddi_branch_held(rdip)) {
9753 kmem_free(devnm, MAXNAMELEN + 1);
9754 ndi_devi_exit(pdip, circ);
9755 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9756 destroy ? "destroy" : "unconfigure", (void *)rdip);
9757 return (EINVAL);
9761 * Release hold on the branch. This is ok since we are holding the
9762 * parent busy. If rdip is not removed, we must do a hold on the
9763 * branch before returning.
9765 e_ddi_branch_rele(rdip);
9767 nflags = NDI_DEVI_OFFLINE;
9768 if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9769 nflags |= NDI_DEVI_REMOVE;
9770 destroy = 1;
9771 } else {
9772 nflags |= NDI_UNCONFIG; /* uninit but don't remove */
9775 if (flags & DEVI_BRANCH_EVENT)
9776 nflags |= NDI_POST_EVENT;
9778 if (i_ddi_devi_attached(pdip) &&
9779 (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9780 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9781 } else {
9782 rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9783 if (rv == NDI_SUCCESS) {
9784 ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9785 rv = ndi_devi_offline(rdip, nflags);
9789 if (!destroy || rv != NDI_SUCCESS) {
9790 /* The dip still exists, so do a hold */
9791 e_ddi_branch_hold(rdip);
9793 out:
9794 kmem_free(devnm, MAXNAMELEN + 1);
9795 ndi_devi_exit(pdip, circ);
9796 return (ndi2errno(rv));
9800 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9802 return (e_ddi_branch_unconfigure(rdip, dipp,
9803 flag|DEVI_BRANCH_DESTROY));
9807 * Number of chains for hash table
9809 #define NUMCHAINS 17
9812 * Devinfo busy arg
9814 struct devi_busy {
9815 int dv_total;
9816 int s_total;
9817 mod_hash_t *dv_hash;
9818 mod_hash_t *s_hash;
9819 int (*callback)(dev_info_t *, void *, uint_t);
9820 void *arg;
9823 static int
9824 visit_dip(dev_info_t *dip, void *arg)
9826 uintptr_t sbusy, dvbusy, ref;
9827 struct devi_busy *bsp = arg;
9829 ASSERT(bsp->callback);
9832 * A dip cannot be busy if its reference count is 0
9834 if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9835 return (bsp->callback(dip, bsp->arg, 0));
9838 if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9839 dvbusy = 0;
9842 * To catch device opens currently maintained on specfs common snodes.
9844 if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9845 sbusy = 0;
9847 #ifdef DEBUG
9848 if (ref < sbusy || ref < dvbusy) {
9849 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9850 "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9852 #endif
9854 dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9856 return (bsp->callback(dip, bsp->arg, dvbusy));
9859 static int
9860 visit_snode(struct snode *sp, void *arg)
9862 uintptr_t sbusy;
9863 dev_info_t *dip;
9864 int count;
9865 struct devi_busy *bsp = arg;
9867 ASSERT(sp);
9870 * The stable lock is held. This prevents
9871 * the snode and its associated dip from
9872 * going away.
9874 dip = NULL;
9875 count = spec_devi_open_count(sp, &dip);
9877 if (count <= 0)
9878 return (DDI_WALK_CONTINUE);
9880 ASSERT(dip);
9882 if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9883 sbusy = count;
9884 else
9885 sbusy += count;
9887 if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9888 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9889 "sbusy = %lu", "e_ddi_branch_referenced",
9890 (void *)dip, sbusy);
9893 bsp->s_total += count;
9895 return (DDI_WALK_CONTINUE);
9898 static void
9899 visit_dvnode(struct dv_node *dv, void *arg)
9901 uintptr_t dvbusy;
9902 uint_t count;
9903 struct vnode *vp;
9904 struct devi_busy *bsp = arg;
9906 ASSERT(dv && dv->dv_devi);
9908 vp = DVTOV(dv);
9910 mutex_enter(&vp->v_lock);
9911 count = vp->v_count;
9912 mutex_exit(&vp->v_lock);
9914 if (!count)
9915 return;
9917 if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9918 (mod_hash_val_t *)&dvbusy))
9919 dvbusy = count;
9920 else
9921 dvbusy += count;
9923 if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9924 (mod_hash_val_t)dvbusy)) {
9925 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9926 "dvbusy=%lu", "e_ddi_branch_referenced",
9927 (void *)dv->dv_devi, dvbusy);
9930 bsp->dv_total += count;
9934 * Returns reference count on success or -1 on failure.
9937 e_ddi_branch_referenced(
9938 dev_info_t *rdip,
9939 int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9940 void *arg)
9942 int circ;
9943 char *path;
9944 dev_info_t *pdip;
9945 struct devi_busy bsa = {0};
9947 ASSERT(rdip);
9949 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9951 ndi_hold_devi(rdip);
9953 pdip = ddi_get_parent(rdip);
9955 ASSERT(pdip);
9958 * Check if caller holds pdip busy - can cause deadlocks during
9959 * devfs_walk()
9961 if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
9962 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
9963 "devinfo branch(%p) not held or parent busy held",
9964 (void *)rdip);
9965 ndi_rele_devi(rdip);
9966 kmem_free(path, MAXPATHLEN);
9967 return (-1);
9970 ndi_devi_enter(pdip, &circ);
9971 (void) ddi_pathname(rdip, path);
9972 ndi_devi_exit(pdip, circ);
9974 bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
9975 mod_hash_null_valdtor, sizeof (struct dev_info));
9977 bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
9978 mod_hash_null_valdtor, sizeof (struct snode));
9980 if (devfs_walk(path, visit_dvnode, &bsa)) {
9981 cmn_err(CE_WARN, "e_ddi_branch_referenced: "
9982 "devfs walk failed for: %s", path);
9983 kmem_free(path, MAXPATHLEN);
9984 bsa.s_total = bsa.dv_total = -1;
9985 goto out;
9988 kmem_free(path, MAXPATHLEN);
9991 * Walk the snode table to detect device opens, which are currently
9992 * maintained on specfs common snodes.
9994 spec_snode_walk(visit_snode, &bsa);
9996 if (callback == NULL)
9997 goto out;
9999 bsa.callback = callback;
10000 bsa.arg = arg;
10002 if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10003 ndi_devi_enter(rdip, &circ);
10004 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10005 ndi_devi_exit(rdip, circ);
10008 out:
10009 ndi_rele_devi(rdip);
10010 mod_hash_destroy_ptrhash(bsa.s_hash);
10011 mod_hash_destroy_ptrhash(bsa.dv_hash);
10012 return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);