4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
37 #include <sys/model.h>
42 #include <sys/t_lock.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */
60 #include <sys/ndi_impldefs.h> /* include prototypes */
61 #include <sys/ddi_timer.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
86 #include <sys/clock_impl.h>
88 #include <sys/modhash.h>
89 #include <sys/sunldi_impl.h>
90 #include <sys/fs/dv_node.h>
91 #include <sys/fs/snode.h>
93 extern pri_t minclsyspri
;
95 extern rctl_hndl_t rc_project_locked_mem
;
96 extern rctl_hndl_t rc_zone_locked_mem
;
99 static int sunddi_debug
= 0;
102 /* ddi_umem_unlock miscellaneous */
104 static void i_ddi_umem_unlock_thread_start(void);
106 static kmutex_t ddi_umem_unlock_mutex
; /* unlock list mutex */
107 static kcondvar_t ddi_umem_unlock_cv
; /* unlock list block/unblock */
108 static kthread_t
*ddi_umem_unlock_thread
;
110 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list.
112 static struct ddi_umem_cookie
*ddi_umem_unlock_head
= NULL
;
113 static struct ddi_umem_cookie
*ddi_umem_unlock_tail
= NULL
;
116 * DDI(Sun) Function and flag definitions:
121 * Used to indicate which entries were chosen from a range.
123 char *chosen_reg
= "chosen-reg";
127 * Function used to ring system console bell
129 void (*ddi_console_bell_func
)(clock_t duration
);
132 * Creating register mappings and handling interrupts:
136 * Generic ddi_map: Call parent to fulfill request...
140 ddi_map(dev_info_t
*dp
, ddi_map_req_t
*mp
, off_t offset
,
141 off_t len
, caddr_t
*addrp
)
146 pdip
= (dev_info_t
*)DEVI(dp
)->devi_parent
;
147 return ((DEVI(pdip
)->devi_ops
->devo_bus_ops
->bus_map
)(pdip
,
148 dp
, mp
, offset
, len
, addrp
));
152 * ddi_apply_range: (Called by nexi only.)
153 * Apply ranges in parent node dp, to child regspec rp...
157 ddi_apply_range(dev_info_t
*dp
, dev_info_t
*rdip
, struct regspec
*rp
)
159 return (i_ddi_apply_range(dp
, rdip
, rp
));
163 ddi_map_regs(dev_info_t
*dip
, uint_t rnumber
, caddr_t
*kaddrp
, off_t offset
,
177 * get the 'registers' or the 'reg' property.
178 * We look up the reg property as an array of
181 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
,
182 DDI_PROP_DONTPASS
, "registers", (int **)®list
, &length
);
183 if (rc
!= DDI_PROP_SUCCESS
)
184 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
,
185 DDI_PROP_DONTPASS
, "reg", (int **)®list
, &length
);
186 if (rc
== DDI_PROP_SUCCESS
) {
188 * point to the required entry.
190 reg
= reglist
[rnumber
];
195 * make a new property containing ONLY the required tuple.
197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE
, dip
,
198 chosen_reg
, (int *)®
, (sizeof (reg
)/sizeof (int)))
199 != DDI_PROP_SUCCESS
) {
200 cmn_err(CE_WARN
, "%s%d: cannot create '%s' "
201 "property", DEVI(dip
)->devi_name
,
202 DEVI(dip
)->devi_instance
, chosen_reg
);
205 * free the memory allocated by
206 * ddi_prop_lookup_int_array ().
208 ddi_prop_free((void *)reglist
);
211 mr
.map_op
= DDI_MO_MAP_LOCKED
;
212 mr
.map_type
= DDI_MT_RNUMBER
;
213 mr
.map_obj
.rnumber
= rnumber
;
214 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
215 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
216 mr
.map_handlep
= NULL
;
217 mr
.map_vers
= DDI_MAP_VERSION
;
220 * Call my parent to map in my regs.
223 return (ddi_map(dip
, &mr
, offset
, len
, kaddrp
));
227 ddi_unmap_regs(dev_info_t
*dip
, uint_t rnumber
, caddr_t
*kaddrp
, off_t offset
,
232 mr
.map_op
= DDI_MO_UNMAP
;
233 mr
.map_type
= DDI_MT_RNUMBER
;
234 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
235 mr
.map_prot
= PROT_READ
| PROT_WRITE
; /* who cares? */
236 mr
.map_obj
.rnumber
= rnumber
;
237 mr
.map_handlep
= NULL
;
238 mr
.map_vers
= DDI_MAP_VERSION
;
241 * Call my parent to unmap my regs.
244 (void) ddi_map(dip
, &mr
, offset
, len
, kaddrp
);
245 *kaddrp
= (caddr_t
)0;
247 (void) ddi_prop_remove(DDI_DEV_T_NONE
, dip
, chosen_reg
);
252 ddi_bus_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
253 off_t offset
, off_t len
, caddr_t
*vaddrp
)
255 return (i_ddi_bus_map(dip
, rdip
, mp
, offset
, len
, vaddrp
));
259 * nullbusmap: The/DDI default bus_map entry point for nexi
260 * not conforming to the reg/range paradigm (i.e. scsi, etc.)
261 * with no HAT/MMU layer to be programmed at this level.
263 * If the call is to map by rnumber, return an error,
264 * otherwise pass anything else up the tree to my parent.
267 nullbusmap(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
268 off_t offset
, off_t len
, caddr_t
*vaddrp
)
270 _NOTE(ARGUNUSED(rdip
))
271 if (mp
->map_type
== DDI_MT_RNUMBER
)
272 return (DDI_ME_UNSUPPORTED
);
274 return (ddi_map(dip
, mp
, offset
, len
, vaddrp
));
278 * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279 * Only for use by nexi using the reg/range paradigm.
282 ddi_rnumber_to_regspec(dev_info_t
*dip
, int rnumber
)
284 return (i_ddi_rnumber_to_regspec(dip
, rnumber
));
289 * Note that we allow the dip to be nil because we may be called
290 * prior even to the instantiation of the devinfo tree itself - all
291 * regular leaf and nexus drivers should always use a non-nil dip!
293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294 * simply get a synchronous fault as soon as we touch a missing address.
296 * Poke is rather more carefully handled because we might poke to a write
297 * buffer, "succeed", then only find some time later that we got an
298 * asynchronous fault that indicated that the address we were writing to
299 * was not really backed by hardware.
303 i_ddi_peekpoke(dev_info_t
*devi
, ddi_ctl_enum_t cmd
, size_t size
,
304 void *addr
, void *value_p
)
313 peekpoke_ctlops_t peekpoke_args
;
314 uint64_t dummy_result
;
317 /* Note: size is assumed to be correct; it is not checked. */
318 peekpoke_args
.size
= size
;
319 peekpoke_args
.dev_addr
= (uintptr_t)addr
;
320 peekpoke_args
.handle
= NULL
;
321 peekpoke_args
.repcount
= 1;
322 peekpoke_args
.flags
= 0;
324 if (cmd
== DDI_CTLOPS_POKE
) {
326 case sizeof (uint8_t):
327 peekpoke_value
.u8
= *(uint8_t *)value_p
;
329 case sizeof (uint16_t):
330 peekpoke_value
.u16
= *(uint16_t *)value_p
;
332 case sizeof (uint32_t):
333 peekpoke_value
.u32
= *(uint32_t *)value_p
;
335 case sizeof (uint64_t):
336 peekpoke_value
.u64
= *(uint64_t *)value_p
;
341 peekpoke_args
.host_addr
= (uintptr_t)&peekpoke_value
.u64
;
344 rval
= ddi_ctlops(devi
, devi
, cmd
, &peekpoke_args
,
347 rval
= peekpoke_mem(cmd
, &peekpoke_args
);
350 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
352 if ((cmd
== DDI_CTLOPS_PEEK
) & (value_p
!= NULL
)) {
354 case sizeof (uint8_t):
355 *(uint8_t *)value_p
= peekpoke_value
.u8
;
357 case sizeof (uint16_t):
358 *(uint16_t *)value_p
= peekpoke_value
.u16
;
360 case sizeof (uint32_t):
361 *(uint32_t *)value_p
= peekpoke_value
.u32
;
363 case sizeof (uint64_t):
364 *(uint64_t *)value_p
= peekpoke_value
.u64
;
373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
377 ddi_peek(dev_info_t
*devi
, size_t size
, void *addr
, void *value_p
)
380 case sizeof (uint8_t):
381 case sizeof (uint16_t):
382 case sizeof (uint32_t):
383 case sizeof (uint64_t):
386 return (DDI_FAILURE
);
389 return (i_ddi_peekpoke(devi
, DDI_CTLOPS_PEEK
, size
, addr
, value_p
));
393 ddi_poke(dev_info_t
*devi
, size_t size
, void *addr
, void *value_p
)
396 case sizeof (uint8_t):
397 case sizeof (uint16_t):
398 case sizeof (uint32_t):
399 case sizeof (uint64_t):
402 return (DDI_FAILURE
);
405 return (i_ddi_peekpoke(devi
, DDI_CTLOPS_POKE
, size
, addr
, value_p
));
409 ddi_peek8(dev_info_t
*dip
, int8_t *addr
, int8_t *val_p
)
411 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
416 ddi_peek16(dev_info_t
*dip
, int16_t *addr
, int16_t *val_p
)
418 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
423 ddi_peek32(dev_info_t
*dip
, int32_t *addr
, int32_t *val_p
)
425 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
430 ddi_peek64(dev_info_t
*dip
, int64_t *addr
, int64_t *val_p
)
432 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
438 * We need to separate the old interfaces from the new ones and leave them
439 * in here for a while. Previous versions of the OS defined the new interfaces
440 * to the old interfaces. This way we can fix things up so that we can
441 * eventually remove these interfaces.
442 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443 * or earlier will actually have a reference to ddi_peekc in the binary.
447 ddi_peekc(dev_info_t
*dip
, int8_t *addr
, int8_t *val_p
)
449 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
454 ddi_peeks(dev_info_t
*dip
, int16_t *addr
, int16_t *val_p
)
456 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
461 ddi_peekl(dev_info_t
*dip
, int32_t *addr
, int32_t *val_p
)
463 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
468 ddi_peekd(dev_info_t
*dip
, int64_t *addr
, int64_t *val_p
)
470 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
476 ddi_poke8(dev_info_t
*dip
, int8_t *addr
, int8_t val
)
478 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
482 ddi_poke16(dev_info_t
*dip
, int16_t *addr
, int16_t val
)
484 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
488 ddi_poke32(dev_info_t
*dip
, int32_t *addr
, int32_t val
)
490 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
494 ddi_poke64(dev_info_t
*dip
, int64_t *addr
, int64_t val
)
496 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
500 * We need to separate the old interfaces from the new ones and leave them
501 * in here for a while. Previous versions of the OS defined the new interfaces
502 * to the old interfaces. This way we can fix things up so that we can
503 * eventually remove these interfaces.
504 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505 * or earlier will actually have a reference to ddi_pokec in the binary.
509 ddi_pokec(dev_info_t
*dip
, int8_t *addr
, int8_t val
)
511 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
515 ddi_pokes(dev_info_t
*dip
, int16_t *addr
, int16_t val
)
517 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
521 ddi_pokel(dev_info_t
*dip
, int32_t *addr
, int32_t val
)
523 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
527 ddi_poked(dev_info_t
*dip
, int64_t *addr
, int64_t val
)
529 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
534 * ddi_peekpokeio() is used primarily by the mem drivers for moving
535 * data to and from uio structures via peek and poke. Note that we
536 * use "internal" routines ddi_peek and ddi_poke to make this go
537 * slightly faster, avoiding the call overhead ..
540 ddi_peekpokeio(dev_info_t
*devi
, struct uio
*uio
, enum uio_rw rw
,
541 caddr_t addr
, size_t len
, uint_t xfersize
)
548 if (xfersize
> sizeof (long))
549 xfersize
= sizeof (long);
552 if ((len
| (uintptr_t)addr
) & 1) {
553 sz
= sizeof (int8_t);
554 if (rw
== UIO_WRITE
) {
555 if ((o
= uwritec(uio
)) == -1)
556 return (DDI_FAILURE
);
557 if (ddi_poke8(devi
, (int8_t *)addr
,
558 (int8_t)o
) != DDI_SUCCESS
)
559 return (DDI_FAILURE
);
561 if (i_ddi_peekpoke(devi
, DDI_CTLOPS_PEEK
, sz
,
562 (int8_t *)addr
, &w8
) != DDI_SUCCESS
)
563 return (DDI_FAILURE
);
565 return (DDI_FAILURE
);
569 case sizeof (int64_t):
570 if (((len
| (uintptr_t)addr
) &
571 (sizeof (int64_t) - 1)) == 0) {
576 case sizeof (int32_t):
577 if (((len
| (uintptr_t)addr
) &
578 (sizeof (int32_t) - 1)) == 0) {
585 * This still assumes that we might have an
586 * I/O bus out there that permits 16-bit
587 * transfers (and that it would be upset by
588 * 32-bit transfers from such locations).
590 sz
= sizeof (int16_t);
594 if (rw
== UIO_READ
) {
595 if (i_ddi_peekpoke(devi
, DDI_CTLOPS_PEEK
, sz
,
596 addr
, &ibuffer
) != DDI_SUCCESS
)
597 return (DDI_FAILURE
);
600 if (uiomove(&ibuffer
, sz
, rw
, uio
))
601 return (DDI_FAILURE
);
603 if (rw
== UIO_WRITE
) {
604 if (i_ddi_peekpoke(devi
, DDI_CTLOPS_POKE
, sz
,
605 addr
, &ibuffer
) != DDI_SUCCESS
)
606 return (DDI_FAILURE
);
612 return (DDI_SUCCESS
);
616 * These routines are used by drivers that do layered ioctls
617 * On sparc, they're implemented in assembler to avoid spilling
618 * register windows in the common (copyin) case ..
620 #if !defined(__sparc)
622 ddi_copyin(const void *buf
, void *kernbuf
, size_t size
, int flags
)
625 return (kcopy(buf
, kernbuf
, size
) ? -1 : 0);
626 return (copyin(buf
, kernbuf
, size
));
630 ddi_copyout(const void *buf
, void *kernbuf
, size_t size
, int flags
)
633 return (kcopy(buf
, kernbuf
, size
) ? -1 : 0);
634 return (copyout(buf
, kernbuf
, size
));
636 #endif /* !__sparc */
639 * Conversions in nexus pagesize units. We don't duplicate the
640 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
644 ddi_btop(dev_info_t
*dip
, unsigned long bytes
)
648 (void) ddi_ctlops(dip
, dip
, DDI_CTLOPS_BTOP
, &bytes
, &pages
);
653 ddi_btopr(dev_info_t
*dip
, unsigned long bytes
)
657 (void) ddi_ctlops(dip
, dip
, DDI_CTLOPS_BTOPR
, &bytes
, &pages
);
662 ddi_ptob(dev_info_t
*dip
, unsigned long pages
)
666 (void) ddi_ctlops(dip
, dip
, DDI_CTLOPS_PTOB
, &pages
, &bytes
);
671 ddi_enter_critical(void)
673 return ((uint_t
)spl7());
677 ddi_exit_critical(unsigned int spl
)
683 * Nexus ctlops punter
686 #if !defined(__sparc)
688 * Request bus_ctl parent to handle a bus_ctl request
690 * (The sparc version is in sparc_ddi.s)
693 ddi_ctlops(dev_info_t
*d
, dev_info_t
*r
, ddi_ctl_enum_t op
, void *a
, void *v
)
698 return (DDI_FAILURE
);
700 if ((d
= (dev_info_t
*)DEVI(d
)->devi_bus_ctl
) == NULL
)
701 return (DDI_FAILURE
);
703 fp
= DEVI(d
)->devi_ops
->devo_bus_ops
->bus_ctl
;
704 return ((*fp
)(d
, r
, op
, a
, v
));
714 static ddi_dma_lim_t standard_limits
= {
715 (uint_t
)0, /* addr_t dlim_addr_lo */
716 (uint_t
)-1, /* addr_t dlim_addr_hi */
717 (uint_t
)-1, /* uint_t dlim_cntr_max */
718 (uint_t
)1, /* uint_t dlim_burstsizes */
719 (uint_t
)1, /* uint_t dlim_minxfer */
720 0 /* uint_t dlim_dmaspeed */
723 static ddi_dma_lim_t standard_limits
= {
724 (uint_t
)0, /* addr_t dlim_addr_lo */
725 (uint_t
)0xffffff, /* addr_t dlim_addr_hi */
726 (uint_t
)0, /* uint_t dlim_cntr_max */
727 (uint_t
)0x00000001, /* uint_t dlim_burstsizes */
728 (uint_t
)DMA_UNIT_8
, /* uint_t dlim_minxfer */
729 (uint_t
)0, /* uint_t dlim_dmaspeed */
730 (uint_t
)0x86<<24+0, /* uint_t dlim_version */
731 (uint_t
)0xffff, /* uint_t dlim_adreg_max */
732 (uint_t
)0xffff, /* uint_t dlim_ctreg_max */
733 (uint_t
)512, /* uint_t dlim_granular */
734 (int)1, /* int dlim_sgllen */
735 (uint_t
)0xffffffff /* uint_t dlim_reqsizes */
740 #if !defined(__sparc)
742 * Request bus_dma_ctl parent to fiddle with a dma request.
744 * (The sparc version is in sparc_subr.s)
747 ddi_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
748 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
749 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t flags
)
753 if (dip
!= ddi_root_node())
754 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_ctl
;
755 fp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_ctl
;
756 return ((*fp
) (dip
, rdip
, handle
, request
, offp
, lenp
, objp
, flags
));
761 * For all DMA control functions, call the DMA control
762 * routine and return status.
764 * Just plain assume that the parent is to be called.
765 * If a nexus driver or a thread outside the framework
766 * of a nexus driver or a leaf driver calls these functions,
767 * it is up to them to deal with the fact that the parent's
768 * bus_dma_ctl function will be the first one called.
771 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
774 * This routine is left in place to satisfy link dependencies
775 * for any 3rd party nexus drivers that rely on it. It is never
780 ddi_dma_map(dev_info_t
*dip
, dev_info_t
*rdip
,
781 struct ddi_dma_req
*dmareqp
, ddi_dma_handle_t
*handlep
)
783 return (DDI_FAILURE
);
786 #if !defined(__sparc)
789 * The SPARC versions of these routines are done in assembler to
790 * save register windows, so they're in sparc_subr.s.
794 ddi_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
795 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
797 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_attr_t
*,
798 int (*)(caddr_t
), caddr_t
, ddi_dma_handle_t
*);
800 if (dip
!= ddi_root_node())
801 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_allochdl
;
803 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_allochdl
;
804 return ((*funcp
)(dip
, rdip
, attr
, waitfp
, arg
, handlep
));
808 ddi_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handlep
)
810 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
);
812 if (dip
!= ddi_root_node())
813 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_allochdl
;
815 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_freehdl
;
816 return ((*funcp
)(dip
, rdip
, handlep
));
820 ddi_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
821 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
822 ddi_dma_cookie_t
*cp
, uint_t
*ccountp
)
824 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
825 struct ddi_dma_req
*, ddi_dma_cookie_t
*, uint_t
*);
827 if (dip
!= ddi_root_node())
828 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_bindhdl
;
830 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_bindhdl
;
831 return ((*funcp
)(dip
, rdip
, handle
, dmareq
, cp
, ccountp
));
835 ddi_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
836 ddi_dma_handle_t handle
)
838 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
);
840 if (dip
!= ddi_root_node())
841 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_unbindhdl
;
843 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_unbindhdl
;
844 return ((*funcp
)(dip
, rdip
, handle
));
849 ddi_dma_flush(dev_info_t
*dip
, dev_info_t
*rdip
,
850 ddi_dma_handle_t handle
, off_t off
, size_t len
,
853 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
854 off_t
, size_t, uint_t
);
856 if (dip
!= ddi_root_node())
857 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_flush
;
859 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_flush
;
860 return ((*funcp
)(dip
, rdip
, handle
, off
, len
, cache_flags
));
864 ddi_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
865 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
,
866 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
868 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
869 uint_t
, off_t
*, size_t *, ddi_dma_cookie_t
*, uint_t
*);
871 if (dip
!= ddi_root_node())
872 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_win
;
874 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_win
;
875 return ((*funcp
)(dip
, rdip
, handle
, win
, offp
, lenp
,
880 ddi_dma_sync(ddi_dma_handle_t h
, off_t o
, size_t l
, uint_t whom
)
882 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)h
;
883 dev_info_t
*dip
, *rdip
;
884 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
, off_t
,
888 * the DMA nexus driver will set DMP_NOSYNC if the
889 * platform does not require any sync operation. For
890 * example if the memory is uncached or consistent
891 * and without any I/O write buffers involved.
893 if ((hp
->dmai_rflags
& DMP_NOSYNC
) == DMP_NOSYNC
)
894 return (DDI_SUCCESS
);
896 dip
= rdip
= hp
->dmai_rdip
;
897 if (dip
!= ddi_root_node())
898 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_flush
;
899 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_flush
;
900 return ((*funcp
)(dip
, rdip
, h
, o
, l
, whom
));
904 ddi_dma_unbind_handle(ddi_dma_handle_t h
)
906 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)h
;
907 dev_info_t
*dip
, *rdip
;
908 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
);
910 dip
= rdip
= hp
->dmai_rdip
;
911 if (dip
!= ddi_root_node())
912 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_unbindhdl
;
913 funcp
= DEVI(rdip
)->devi_bus_dma_unbindfunc
;
914 return ((*funcp
)(dip
, rdip
, h
));
917 #endif /* !__sparc */
920 * DMA burst sizes, and transfer minimums
924 ddi_dma_burstsizes(ddi_dma_handle_t handle
)
926 ddi_dma_impl_t
*dimp
= (ddi_dma_impl_t
*)handle
;
931 return (dimp
->dmai_burstsizes
);
935 ddi_iomin(dev_info_t
*a
, int i
, int stream
)
940 * Make sure that the initial value is sane
945 i
= (stream
) ? 4 : 1;
948 DDI_CTLOPS_IOMIN
, (void *)(uintptr_t)stream
, (void *)&i
);
949 if (r
!= DDI_SUCCESS
|| (i
& (i
- 1)))
955 * Given two DMA attribute structures, apply the attributes
956 * of one to the other, following the rules of attributes
957 * and the wishes of the caller.
959 * The rules of DMA attribute structures are that you cannot
960 * make things *less* restrictive as you apply one set
961 * of attributes to another.
965 ddi_dma_attr_merge(ddi_dma_attr_t
*attr
, ddi_dma_attr_t
*mod
)
967 attr
->dma_attr_addr_lo
=
968 MAX(attr
->dma_attr_addr_lo
, mod
->dma_attr_addr_lo
);
969 attr
->dma_attr_addr_hi
=
970 MIN(attr
->dma_attr_addr_hi
, mod
->dma_attr_addr_hi
);
971 attr
->dma_attr_count_max
=
972 MIN(attr
->dma_attr_count_max
, mod
->dma_attr_count_max
);
973 attr
->dma_attr_align
=
974 MAX(attr
->dma_attr_align
, mod
->dma_attr_align
);
975 attr
->dma_attr_burstsizes
=
976 (uint_t
)(attr
->dma_attr_burstsizes
& mod
->dma_attr_burstsizes
);
977 attr
->dma_attr_minxfer
=
978 maxbit(attr
->dma_attr_minxfer
, mod
->dma_attr_minxfer
);
979 attr
->dma_attr_maxxfer
=
980 MIN(attr
->dma_attr_maxxfer
, mod
->dma_attr_maxxfer
);
981 attr
->dma_attr_seg
= MIN(attr
->dma_attr_seg
, mod
->dma_attr_seg
);
982 attr
->dma_attr_sgllen
= MIN((uint_t
)attr
->dma_attr_sgllen
,
983 (uint_t
)mod
->dma_attr_sgllen
);
984 attr
->dma_attr_granular
=
985 MAX(attr
->dma_attr_granular
, mod
->dma_attr_granular
);
989 * mmap/segmap interface:
993 * ddi_segmap: setup the default segment driver. Calls the drivers
994 * XXmmap routine to validate the range to be mapped.
995 * Return ENXIO of the range is not valid. Create
996 * a seg_dev segment that contains all of the
997 * necessary information and will reference the
998 * default segment driver routines. It returns zero
999 * on success or non-zero on failure.
1002 ddi_segmap(dev_t dev
, off_t offset
, struct as
*asp
, caddr_t
*addrp
, off_t len
,
1003 uint_t prot
, uint_t maxprot
, uint_t flags
, cred_t
*credp
)
1005 extern int spec_segmap(dev_t
, off_t
, struct as
*, caddr_t
*,
1006 off_t
, uint_t
, uint_t
, uint_t
, struct cred
*);
1008 return (spec_segmap(dev
, offset
, asp
, addrp
, len
,
1009 prot
, maxprot
, flags
, credp
));
1013 * ddi_map_fault: Resolve mappings at fault time. Used by segment
1014 * drivers. Allows each successive parent to resolve
1015 * address translations and add its mappings to the
1016 * mapping list supplied in the page structure. It
1017 * returns zero on success or non-zero on failure.
1021 ddi_map_fault(dev_info_t
*dip
, struct hat
*hat
, struct seg
*seg
,
1022 caddr_t addr
, struct devpage
*dp
, pfn_t pfn
, uint_t prot
, uint_t lock
)
1024 return (i_ddi_map_fault(dip
, dip
, hat
, seg
, addr
, dp
, pfn
, prot
, lock
));
1028 * ddi_device_mapping_check: Called from ddi_segmap_setup.
1029 * Invokes platform specific DDI to determine whether attributes specified
1030 * in attr(9s) are valid for the region of memory that will be made
1031 * available for direct access to user process via the mmap(2) system call.
1034 ddi_device_mapping_check(dev_t dev
, ddi_device_acc_attr_t
*accattrp
,
1035 uint_t rnumber
, uint_t
*hat_flags
)
1037 ddi_acc_handle_t handle
;
1044 * we use e_ddi_hold_devi_by_dev to search for the devi. We
1045 * release it immediately since it should already be held by
1049 e_ddi_hold_devi_by_dev(dev
, E_DDI_HOLD_DEVI_NOATTACH
)) == NULL
)
1051 ddi_release_devi(dip
); /* for e_ddi_hold_devi_by_dev() */
1054 * Allocate and initialize the common elements of data
1057 handle
= impl_acc_hdl_alloc(KM_SLEEP
, NULL
);
1061 hp
= impl_acc_hdl_get(handle
);
1062 hp
->ah_vers
= VERS_ACCHDL
;
1064 hp
->ah_rnumber
= rnumber
;
1067 hp
->ah_acc
= *accattrp
;
1070 * Set up the mapping request and call to parent.
1072 mr
.map_op
= DDI_MO_MAP_HANDLE
;
1073 mr
.map_type
= DDI_MT_RNUMBER
;
1074 mr
.map_obj
.rnumber
= rnumber
;
1075 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
1076 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
1077 mr
.map_handlep
= hp
;
1078 mr
.map_vers
= DDI_MAP_VERSION
;
1079 result
= ddi_map(dip
, &mr
, 0, 0, NULL
);
1082 * Region must be mappable, pick up flags from the framework.
1084 *hat_flags
= hp
->ah_hat_flags
;
1086 impl_acc_hdl_free(handle
);
1089 * check for end result.
1091 if (result
!= DDI_SUCCESS
)
1098 * Property functions: See also, ddipropdefs.h.
1100 * These functions are the framework for the property functions,
1101 * i.e. they support software defined properties. All implementation
1102 * specific property handling (i.e.: self-identifying devices and
1103 * PROM defined properties are handled in the implementation specific
1104 * functions (defined in ddi_implfuncs.h).
1108 * nopropop: Shouldn't be called, right?
1111 nopropop(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
, int mod_flags
,
1112 char *name
, caddr_t valuep
, int *lengthp
)
1114 _NOTE(ARGUNUSED(dev
, dip
, prop_op
, mod_flags
, name
, valuep
, lengthp
))
1115 return (DDI_PROP_NOT_FOUND
);
1118 #ifdef DDI_PROP_DEBUG
1119 int ddi_prop_debug_flag
= 0;
1122 ddi_prop_debug(int enable
)
1124 int prev
= ddi_prop_debug_flag
;
1126 if ((enable
!= 0) || (prev
!= 0))
1127 printf("ddi_prop_debug: debugging %s\n",
1128 enable
? "enabled" : "disabled");
1129 ddi_prop_debug_flag
= enable
;
1133 #endif /* DDI_PROP_DEBUG */
1136 * Search a property list for a match, if found return pointer
1137 * to matching prop struct, else return NULL.
1141 i_ddi_prop_search(dev_t dev
, char *name
, uint_t flags
, ddi_prop_t
**list_head
)
1146 * find the property in child's devinfo:
1147 * Search order defined by this search function is first matching
1148 * property with input dev == DDI_DEV_T_ANY matching any dev or
1149 * dev == propp->prop_dev, name == propp->name, and the correct
1150 * data type as specified in the flags. If a DDI_DEV_T_NONE dev
1151 * value made it this far then it implies a DDI_DEV_T_ANY search.
1153 if (dev
== DDI_DEV_T_NONE
)
1154 dev
= DDI_DEV_T_ANY
;
1156 for (propp
= *list_head
; propp
!= NULL
; propp
= propp
->prop_next
) {
1158 if (!DDI_STRSAME(propp
->prop_name
, name
))
1161 if ((dev
!= DDI_DEV_T_ANY
) && (propp
->prop_dev
!= dev
))
1164 if (((propp
->prop_flags
& flags
) & DDI_PROP_TYPE_MASK
) == 0)
1170 return ((ddi_prop_t
*)0);
1174 * Search for property within devnames structures
1177 i_ddi_search_global_prop(dev_t dev
, char *name
, uint_t flags
)
1180 struct devnames
*dnp
;
1184 * Valid dev_t value is needed to index into the
1185 * correct devnames entry, therefore a dev_t
1186 * value of DDI_DEV_T_ANY is not appropriate.
1188 ASSERT(dev
!= DDI_DEV_T_ANY
);
1189 if (dev
== DDI_DEV_T_ANY
) {
1190 return ((ddi_prop_t
*)0);
1193 major
= getmajor(dev
);
1194 dnp
= &(devnamesp
[major
]);
1196 if (dnp
->dn_global_prop_ptr
== NULL
)
1197 return ((ddi_prop_t
*)0);
1199 LOCK_DEV_OPS(&dnp
->dn_lock
);
1201 for (propp
= dnp
->dn_global_prop_ptr
->prop_list
;
1203 propp
= (ddi_prop_t
*)propp
->prop_next
) {
1205 if (!DDI_STRSAME(propp
->prop_name
, name
))
1208 if ((!(flags
& DDI_PROP_ROOTNEX_GLOBAL
)) &&
1209 (!(flags
& LDI_DEV_T_ANY
)) && (propp
->prop_dev
!= dev
))
1212 if (((propp
->prop_flags
& flags
) & DDI_PROP_TYPE_MASK
) == 0)
1215 /* Property found, return it */
1216 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
1220 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
1221 return ((ddi_prop_t
*)0);
1224 static char prop_no_mem_msg
[] = "can't allocate memory for ddi property <%s>";
1227 * ddi_prop_search_global:
1228 * Search the global property list within devnames
1229 * for the named property. Return the encoded value.
1232 i_ddi_prop_search_global(dev_t dev
, uint_t flags
, char *name
,
1233 void *valuep
, uint_t
*lengthp
)
1238 propp
= i_ddi_search_global_prop(dev
, name
, flags
);
1240 /* Property NOT found, bail */
1241 if (propp
== (ddi_prop_t
*)0)
1242 return (DDI_PROP_NOT_FOUND
);
1244 if (propp
->prop_flags
& DDI_PROP_UNDEF_IT
)
1245 return (DDI_PROP_UNDEFINED
);
1247 if ((buffer
= kmem_alloc(propp
->prop_len
,
1248 (flags
& DDI_PROP_CANSLEEP
) ? KM_SLEEP
: KM_NOSLEEP
)) == NULL
) {
1249 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
1250 return (DDI_PROP_NO_MEMORY
);
1254 * Return the encoded data
1256 *(caddr_t
*)valuep
= buffer
;
1257 *lengthp
= propp
->prop_len
;
1258 bcopy(propp
->prop_val
, buffer
, propp
->prop_len
);
1260 return (DDI_PROP_SUCCESS
);
1264 * ddi_prop_search_common: Lookup and return the encoded value
1267 ddi_prop_search_common(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1268 uint_t flags
, char *name
, void *valuep
, uint_t
*lengthp
)
1273 caddr_t prealloc
= NULL
;
1278 /*CONSTANTCONDITION*/
1281 mutex_enter(&(DEVI(dip
)->devi_lock
));
1285 * find the property in child's devinfo:
1287 * 1. driver defined properties
1288 * 2. system defined properties
1289 * 3. driver global properties
1290 * 4. boot defined properties
1293 propp
= i_ddi_prop_search(dev
, name
, flags
,
1294 &(DEVI(dip
)->devi_drv_prop_ptr
));
1295 if (propp
== NULL
) {
1296 propp
= i_ddi_prop_search(dev
, name
, flags
,
1297 &(DEVI(dip
)->devi_sys_prop_ptr
));
1299 if ((propp
== NULL
) && DEVI(dip
)->devi_global_prop_list
) {
1300 propp
= i_ddi_prop_search(dev
, name
, flags
,
1301 &DEVI(dip
)->devi_global_prop_list
->prop_list
);
1304 if (propp
== NULL
) {
1305 propp
= i_ddi_prop_search(dev
, name
, flags
,
1306 &(DEVI(dip
)->devi_hw_prop_ptr
));
1310 * Software property found?
1312 if (propp
!= (ddi_prop_t
*)0) {
1315 * If explicit undefine, return now.
1317 if (propp
->prop_flags
& DDI_PROP_UNDEF_IT
) {
1318 mutex_exit(&(DEVI(dip
)->devi_lock
));
1320 kmem_free(prealloc
, plength
);
1321 return (DDI_PROP_UNDEFINED
);
1325 * If we only want to know if it exists, return now
1327 if (prop_op
== PROP_EXISTS
) {
1328 mutex_exit(&(DEVI(dip
)->devi_lock
));
1329 ASSERT(prealloc
== NULL
);
1330 return (DDI_PROP_SUCCESS
);
1334 * If length only request or prop length == 0,
1335 * service request and return now.
1337 if ((prop_op
== PROP_LEN
) ||(propp
->prop_len
== 0)) {
1338 *lengthp
= propp
->prop_len
;
1341 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1342 * that means prop_len is 0, so set valuep
1345 if (prop_op
== PROP_LEN_AND_VAL_ALLOC
)
1346 *(caddr_t
*)valuep
= NULL
;
1348 mutex_exit(&(DEVI(dip
)->devi_lock
));
1350 kmem_free(prealloc
, plength
);
1351 return (DDI_PROP_SUCCESS
);
1355 * If LEN_AND_VAL_ALLOC and the request can sleep,
1356 * drop the mutex, allocate the buffer, and go
1357 * through the loop again. If we already allocated
1358 * the buffer, and the size of the property changed,
1361 if ((prop_op
== PROP_LEN_AND_VAL_ALLOC
) &&
1362 (flags
& DDI_PROP_CANSLEEP
)) {
1363 if (prealloc
&& (propp
->prop_len
!= plength
)) {
1364 kmem_free(prealloc
, plength
);
1367 if (prealloc
== NULL
) {
1368 plength
= propp
->prop_len
;
1369 mutex_exit(&(DEVI(dip
)->devi_lock
));
1370 prealloc
= kmem_alloc(plength
,
1377 * Allocate buffer, if required. Either way,
1378 * set `buffer' variable.
1380 i
= *lengthp
; /* Get callers length */
1381 *lengthp
= propp
->prop_len
; /* Set callers length */
1385 case PROP_LEN_AND_VAL_ALLOC
:
1387 if (prealloc
== NULL
) {
1388 buffer
= kmem_alloc(propp
->prop_len
,
1394 if (buffer
== NULL
) {
1395 mutex_exit(&(DEVI(dip
)->devi_lock
));
1396 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
1397 return (DDI_PROP_NO_MEMORY
);
1399 /* Set callers buf ptr */
1400 *(caddr_t
*)valuep
= buffer
;
1403 case PROP_LEN_AND_VAL_BUF
:
1405 if (propp
->prop_len
> (i
)) {
1406 mutex_exit(&(DEVI(dip
)->devi_lock
));
1407 return (DDI_PROP_BUF_TOO_SMALL
);
1410 buffer
= valuep
; /* Get callers buf ptr */
1420 bcopy(propp
->prop_val
, buffer
, propp
->prop_len
);
1421 mutex_exit(&(DEVI(dip
)->devi_lock
));
1422 return (DDI_PROP_SUCCESS
);
1425 mutex_exit(&(DEVI(dip
)->devi_lock
));
1427 kmem_free(prealloc
, plength
);
1431 * Prop not found, call parent bus_ops to deal with possible
1432 * h/w layer (possible PROM defined props, etc.) and to
1433 * possibly ascend the hierarchy, if allowed by flags.
1435 pdip
= (dev_info_t
*)DEVI(dip
)->devi_parent
;
1438 * One last call for the root driver PROM props?
1440 if (dip
== ddi_root_node()) {
1441 return (ddi_bus_prop_op(dev
, dip
, dip
, prop_op
,
1442 flags
, name
, valuep
, (int *)lengthp
));
1446 * We may have been called to check for properties
1447 * within a single devinfo node that has no parent -
1452 (DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
)) ==
1453 (DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
));
1454 return (DDI_PROP_NOT_FOUND
);
1458 * Instead of recursing, we do iterative calls up the tree.
1459 * As a bit of optimization, skip the bus_op level if the
1460 * node is a s/w node and if the parent's bus_prop_op function
1461 * is `ddi_bus_prop_op', because we know that in this case,
1462 * this function does nothing.
1464 * 4225415: If the parent isn't attached, or the child
1465 * hasn't been named by the parent yet, use the default
1466 * ddi_bus_prop_op as a proxy for the parent. This
1467 * allows property lookups in any child/parent state to
1468 * include 'prom' and inherited properties, even when
1469 * there are no drivers attached to the child or parent.
1472 bop
= ddi_bus_prop_op
;
1473 if (i_ddi_devi_attached(pdip
) &&
1474 (i_ddi_node_state(dip
) >= DS_INITIALIZED
))
1475 bop
= DEVI(pdip
)->devi_ops
->devo_bus_ops
->bus_prop_op
;
1477 i
= DDI_PROP_NOT_FOUND
;
1479 if ((bop
!= ddi_bus_prop_op
) || ndi_dev_is_prom_node(dip
)) {
1480 i
= (*bop
)(dev
, pdip
, dip
, prop_op
,
1481 flags
| DDI_PROP_DONTPASS
,
1482 name
, valuep
, lengthp
);
1485 if ((flags
& DDI_PROP_DONTPASS
) ||
1486 (i
!= DDI_PROP_NOT_FOUND
))
1496 * ddi_prop_op: The basic property operator for drivers.
1498 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1505 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer
1507 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to
1508 * address of allocated buffer, if successful)
1511 ddi_prop_op(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
, int mod_flags
,
1512 char *name
, caddr_t valuep
, int *lengthp
)
1516 ASSERT((mod_flags
& DDI_PROP_TYPE_MASK
) == 0);
1519 * If this was originally an LDI prop lookup then we bail here.
1520 * The reason is that the LDI property lookup interfaces first call
1521 * a drivers prop_op() entry point to allow it to override
1522 * properties. But if we've made it here, then the driver hasn't
1523 * overriden any properties. We don't want to continue with the
1524 * property search here because we don't have any type inforamtion.
1525 * When we return failure, the LDI interfaces will then proceed to
1526 * call the typed property interfaces to look up the property.
1528 if (mod_flags
& DDI_PROP_DYNAMIC
)
1529 return (DDI_PROP_NOT_FOUND
);
1532 * check for pre-typed property consumer asking for typed property:
1533 * see e_ddi_getprop_int64.
1535 if (mod_flags
& DDI_PROP_CONSUMER_TYPED
)
1536 mod_flags
|= DDI_PROP_TYPE_INT64
;
1537 mod_flags
|= DDI_PROP_TYPE_ANY
;
1539 i
= ddi_prop_search_common(dev
, dip
, prop_op
,
1540 mod_flags
, name
, valuep
, (uint_t
*)lengthp
);
1541 if (i
== DDI_PROP_FOUND_1275
)
1542 return (DDI_PROP_SUCCESS
);
1547 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1548 * maintain size in number of blksize blocks. Provides a dynamic property
1549 * implementation for size oriented properties based on nblocks64 and blksize
1550 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64
1551 * is too large. This interface should not be used with a nblocks64 that
1552 * represents the driver's idea of how to represent unknown, if nblocks is
1553 * unknown use ddi_prop_op.
1556 ddi_prop_op_nblocks_blksize(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1557 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
,
1558 uint64_t nblocks64
, uint_t blksize
)
1563 /* convert block size to shift value */
1564 ASSERT(BIT_ONLYONESET(blksize
));
1565 blkshift
= highbit(blksize
) - 1;
1568 * There is no point in supporting nblocks64 values that don't have
1569 * an accurate uint64_t byte count representation.
1571 if (nblocks64
>= (UINT64_MAX
>> blkshift
))
1572 return (ddi_prop_op(dev
, dip
, prop_op
, mod_flags
,
1573 name
, valuep
, lengthp
));
1575 size64
= nblocks64
<< blkshift
;
1576 return (ddi_prop_op_size_blksize(dev
, dip
, prop_op
, mod_flags
,
1577 name
, valuep
, lengthp
, size64
, blksize
));
1581 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1584 ddi_prop_op_nblocks(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1585 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
, uint64_t nblocks64
)
1587 return (ddi_prop_op_nblocks_blksize(dev
, dip
, prop_op
,
1588 mod_flags
, name
, valuep
, lengthp
, nblocks64
, DEV_BSIZE
));
1592 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1593 * maintain size in bytes. Provides a of dynamic property implementation for
1594 * size oriented properties based on size64 value and blksize passed in by the
1595 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface
1596 * should not be used with a size64 that represents the driver's idea of how
1597 * to represent unknown, if size is unknown use ddi_prop_op.
1599 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1600 * integers. While the most likely interface to request them ([bc]devi_size)
1601 * is declared int (signed) there is no enforcement of this, which means we
1602 * can't enforce limitations here without risking regression.
1605 ddi_prop_op_size_blksize(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1606 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
, uint64_t size64
,
1615 * This is a kludge to support capture of size(9P) pure dynamic
1616 * properties in snapshots for non-cmlb code (without exposing
1617 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1618 * should be removed.
1620 if (i_ddi_prop_dyn_driver_get(dip
) == NULL
) {
1621 static i_ddi_prop_dyn_t prop_dyn_size
[] = {
1622 {"Size", DDI_PROP_TYPE_INT64
, S_IFCHR
},
1623 {"Nblocks", DDI_PROP_TYPE_INT64
, S_IFBLK
},
1626 i_ddi_prop_dyn_driver_set(dip
, prop_dyn_size
);
1629 /* convert block size to shift value */
1630 ASSERT(BIT_ONLYONESET(blksize
));
1631 blkshift
= highbit(blksize
) - 1;
1633 /* compute DEV_BSIZE nblocks value */
1634 nblocks64
= size64
>> blkshift
;
1636 /* get callers length, establish length of our dynamic properties */
1637 callers_length
= *lengthp
;
1639 if (strcmp(name
, "Nblocks") == 0)
1640 *lengthp
= sizeof (uint64_t);
1641 else if (strcmp(name
, "Size") == 0)
1642 *lengthp
= sizeof (uint64_t);
1643 else if ((strcmp(name
, "nblocks") == 0) && (nblocks64
< UINT_MAX
))
1644 *lengthp
= sizeof (uint32_t);
1645 else if ((strcmp(name
, "size") == 0) && (size64
< UINT_MAX
))
1646 *lengthp
= sizeof (uint32_t);
1647 else if ((strcmp(name
, "blksize") == 0) && (blksize
< UINT_MAX
))
1648 *lengthp
= sizeof (uint32_t);
1650 /* fallback to ddi_prop_op */
1651 return (ddi_prop_op(dev
, dip
, prop_op
, mod_flags
,
1652 name
, valuep
, lengthp
));
1655 /* service request for the length of the property */
1656 if (prop_op
== PROP_LEN
)
1657 return (DDI_PROP_SUCCESS
);
1660 case PROP_LEN_AND_VAL_ALLOC
:
1661 if ((buffer
= kmem_alloc(*lengthp
,
1662 (mod_flags
& DDI_PROP_CANSLEEP
) ?
1663 KM_SLEEP
: KM_NOSLEEP
)) == NULL
)
1664 return (DDI_PROP_NO_MEMORY
);
1666 *(caddr_t
*)valuep
= buffer
; /* set callers buf ptr */
1669 case PROP_LEN_AND_VAL_BUF
:
1670 /* the length of the property and the request must match */
1671 if (callers_length
!= *lengthp
)
1672 return (DDI_PROP_INVAL_ARG
);
1674 buffer
= valuep
; /* get callers buf ptr */
1678 return (DDI_PROP_INVAL_ARG
);
1681 /* transfer the value into the buffer */
1682 if (strcmp(name
, "Nblocks") == 0)
1683 *((uint64_t *)buffer
) = nblocks64
;
1684 else if (strcmp(name
, "Size") == 0)
1685 *((uint64_t *)buffer
) = size64
;
1686 else if (strcmp(name
, "nblocks") == 0)
1687 *((uint32_t *)buffer
) = (uint32_t)nblocks64
;
1688 else if (strcmp(name
, "size") == 0)
1689 *((uint32_t *)buffer
) = (uint32_t)size64
;
1690 else if (strcmp(name
, "blksize") == 0)
1691 *((uint32_t *)buffer
) = (uint32_t)blksize
;
1692 return (DDI_PROP_SUCCESS
);
1696 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1699 ddi_prop_op_size(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1700 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
, uint64_t size64
)
1702 return (ddi_prop_op_size_blksize(dev
, dip
, prop_op
,
1703 mod_flags
, name
, valuep
, lengthp
, size64
, DEV_BSIZE
));
1707 * Variable length props...
1711 * ddi_getlongprop: Get variable length property len+val into a buffer
1712 * allocated by property provider via kmem_alloc. Requester
1713 * is responsible for freeing returned property via kmem_free.
1717 * dev_t: Input: dev_t of property.
1718 * dip: Input: dev_info_t pointer of child.
1719 * flags: Input: Possible flag modifiers are:
1720 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found.
1721 * DDI_PROP_CANSLEEP: Memory allocation may sleep.
1722 * name: Input: name of property.
1723 * valuep: Output: Addr of callers buffer pointer.
1724 * lengthp:Output: *lengthp will contain prop length on exit.
1728 * DDI_PROP_SUCCESS: Prop found and returned.
1729 * DDI_PROP_NOT_FOUND: Prop not found
1730 * DDI_PROP_UNDEFINED: Prop explicitly undefined.
1731 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem.
1735 ddi_getlongprop(dev_t dev
, dev_info_t
*dip
, int flags
,
1736 char *name
, caddr_t valuep
, int *lengthp
)
1738 return (ddi_prop_op(dev
, dip
, PROP_LEN_AND_VAL_ALLOC
,
1739 flags
, name
, valuep
, lengthp
));
1744 * ddi_getlongprop_buf: Get long prop into pre-allocated callers
1745 * buffer. (no memory allocation by provider).
1747 * dev_t: Input: dev_t of property.
1748 * dip: Input: dev_info_t pointer of child.
1749 * flags: Input: DDI_PROP_DONTPASS or NULL
1750 * name: Input: name of property
1751 * valuep: Input: ptr to callers buffer.
1752 * lengthp:I/O: ptr to length of callers buffer on entry,
1753 * actual length of property on exit.
1757 * DDI_PROP_SUCCESS Prop found and returned
1758 * DDI_PROP_NOT_FOUND Prop not found
1759 * DDI_PROP_UNDEFINED Prop explicitly undefined.
1760 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small,
1761 * no value returned, but actual prop
1762 * length returned in *lengthp
1767 ddi_getlongprop_buf(dev_t dev
, dev_info_t
*dip
, int flags
,
1768 char *name
, caddr_t valuep
, int *lengthp
)
1770 return (ddi_prop_op(dev
, dip
, PROP_LEN_AND_VAL_BUF
,
1771 flags
, name
, valuep
, lengthp
));
1775 * Integer/boolean sized props.
1777 * Call is value only... returns found boolean or int sized prop value or
1778 * defvalue if prop not found or is wrong length or is explicitly undefined.
1779 * Only flag is DDI_PROP_DONTPASS...
1781 * By convention, this interface returns boolean (0) sized properties
1784 * This never returns an error, if property not found or specifically
1785 * undefined, the input `defvalue' is returned.
1789 ddi_getprop(dev_t dev
, dev_info_t
*dip
, int flags
, char *name
, int defvalue
)
1791 int propvalue
= defvalue
;
1792 int proplength
= sizeof (int);
1795 error
= ddi_prop_op(dev
, dip
, PROP_LEN_AND_VAL_BUF
,
1796 flags
, name
, (caddr_t
)&propvalue
, &proplength
);
1798 if ((error
== DDI_PROP_SUCCESS
) && (proplength
== 0))
1805 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1806 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1810 ddi_getproplen(dev_t dev
, dev_info_t
*dip
, int flags
, char *name
, int *lengthp
)
1812 return (ddi_prop_op(dev
, dip
, PROP_LEN
, flags
, name
, NULL
, lengthp
));
1816 * Allocate a struct prop_driver_data, along with 'size' bytes
1817 * for decoded property data. This structure is freed by
1818 * calling ddi_prop_free(9F).
1821 ddi_prop_decode_alloc(size_t size
, void (*prop_free
)(struct prop_driver_data
*))
1823 struct prop_driver_data
*pdd
;
1826 * Allocate a structure with enough memory to store the decoded data.
1828 pdd
= kmem_zalloc(sizeof (struct prop_driver_data
) + size
, KM_SLEEP
);
1829 pdd
->pdd_size
= (sizeof (struct prop_driver_data
) + size
);
1830 pdd
->pdd_prop_free
= prop_free
;
1833 * Return a pointer to the location to put the decoded data.
1835 return ((void *)((caddr_t
)pdd
+ sizeof (struct prop_driver_data
)));
1839 * Allocated the memory needed to store the encoded data in the property
1843 ddi_prop_encode_alloc(prop_handle_t
*ph
, size_t size
)
1846 * If size is zero, then set data to NULL and size to 0. This
1847 * is a boolean property.
1852 ph
->ph_cur_pos
= NULL
;
1853 ph
->ph_save_pos
= NULL
;
1855 if (ph
->ph_flags
== DDI_PROP_DONTSLEEP
) {
1856 ph
->ph_data
= kmem_zalloc(size
, KM_NOSLEEP
);
1857 if (ph
->ph_data
== NULL
)
1858 return (DDI_PROP_NO_MEMORY
);
1860 ph
->ph_data
= kmem_zalloc(size
, KM_SLEEP
);
1862 ph
->ph_cur_pos
= ph
->ph_data
;
1863 ph
->ph_save_pos
= ph
->ph_data
;
1865 return (DDI_PROP_SUCCESS
);
1869 * Free the space allocated by the lookup routines. Each lookup routine
1870 * returns a pointer to the decoded data to the driver. The driver then
1871 * passes this pointer back to us. This data actually lives in a struct
1872 * prop_driver_data. We use negative indexing to find the beginning of
1873 * the structure and then free the entire structure using the size and
1874 * the free routine stored in the structure.
1877 ddi_prop_free(void *datap
)
1879 struct prop_driver_data
*pdd
;
1884 pdd
= (struct prop_driver_data
*)
1885 ((caddr_t
)datap
- sizeof (struct prop_driver_data
));
1887 * Call the free routine to free it
1889 (*pdd
->pdd_prop_free
)(pdd
);
1893 * Free the data associated with an array of ints,
1894 * allocated with ddi_prop_decode_alloc().
1897 ddi_prop_free_ints(struct prop_driver_data
*pdd
)
1899 kmem_free(pdd
, pdd
->pdd_size
);
1903 * Free a single string property or a single string contained within
1904 * the argv style return value of an array of strings.
1907 ddi_prop_free_string(struct prop_driver_data
*pdd
)
1909 kmem_free(pdd
, pdd
->pdd_size
);
1914 * Free an array of strings.
1917 ddi_prop_free_strings(struct prop_driver_data
*pdd
)
1919 kmem_free(pdd
, pdd
->pdd_size
);
1923 * Free the data associated with an array of bytes.
1926 ddi_prop_free_bytes(struct prop_driver_data
*pdd
)
1928 kmem_free(pdd
, pdd
->pdd_size
);
1932 * Reset the current location pointer in the property handle to the
1933 * beginning of the data.
1936 ddi_prop_reset_pos(prop_handle_t
*ph
)
1938 ph
->ph_cur_pos
= ph
->ph_data
;
1939 ph
->ph_save_pos
= ph
->ph_data
;
1943 * Restore the current location pointer in the property handle to the
1947 ddi_prop_save_pos(prop_handle_t
*ph
)
1949 ph
->ph_save_pos
= ph
->ph_cur_pos
;
1953 * Save the location that the current location pointer is pointing to..
1956 ddi_prop_restore_pos(prop_handle_t
*ph
)
1958 ph
->ph_cur_pos
= ph
->ph_save_pos
;
1962 * Property encode/decode functions
1966 * Decode a single integer property
1969 ddi_prop_fm_decode_int(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
1975 * If there is nothing to decode return an error
1977 if (ph
->ph_size
== 0)
1978 return (DDI_PROP_END_OF_DATA
);
1981 * Decode the property as a single integer and return it
1982 * in data if we were able to decode it.
1984 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_DECODE
, &tmp
);
1985 if (i
< DDI_PROP_RESULT_OK
) {
1987 case DDI_PROP_RESULT_EOF
:
1988 return (DDI_PROP_END_OF_DATA
);
1990 case DDI_PROP_RESULT_ERROR
:
1991 return (DDI_PROP_CANNOT_DECODE
);
1997 return (DDI_PROP_SUCCESS
);
2001 * Decode a single 64 bit integer property
2004 ddi_prop_fm_decode_int64(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2010 * If there is nothing to decode return an error
2012 if (ph
->ph_size
== 0)
2013 return (DDI_PROP_END_OF_DATA
);
2016 * Decode the property as a single integer and return it
2017 * in data if we were able to decode it.
2019 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_DECODE
, &tmp
);
2020 if (i
< DDI_PROP_RESULT_OK
) {
2022 case DDI_PROP_RESULT_EOF
:
2023 return (DDI_PROP_END_OF_DATA
);
2025 case DDI_PROP_RESULT_ERROR
:
2026 return (DDI_PROP_CANNOT_DECODE
);
2030 *(int64_t *)data
= tmp
;
2032 return (DDI_PROP_SUCCESS
);
2036 * Decode an array of integers property
2039 ddi_prop_fm_decode_ints(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2048 * Figure out how many array elements there are by going through the
2049 * data without decoding it first and counting.
2052 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_SKIP
, NULL
);
2059 * If there are no elements return an error
2062 return (DDI_PROP_END_OF_DATA
);
2065 * If we cannot skip through the data, we cannot decode it
2067 if (i
== DDI_PROP_RESULT_ERROR
)
2068 return (DDI_PROP_CANNOT_DECODE
);
2071 * Reset the data pointer to the beginning of the encoded data
2073 ddi_prop_reset_pos(ph
);
2076 * Allocated memory to store the decoded value in.
2078 intp
= ddi_prop_decode_alloc((cnt
* sizeof (int)),
2079 ddi_prop_free_ints
);
2082 * Decode each element and place it in the space we just allocated
2085 for (n
= 0; n
< cnt
; n
++, tmp
++) {
2086 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_DECODE
, tmp
);
2087 if (i
< DDI_PROP_RESULT_OK
) {
2089 * Free the space we just allocated
2090 * and return an error.
2092 ddi_prop_free(intp
);
2094 case DDI_PROP_RESULT_EOF
:
2095 return (DDI_PROP_END_OF_DATA
);
2097 case DDI_PROP_RESULT_ERROR
:
2098 return (DDI_PROP_CANNOT_DECODE
);
2104 *(int **)data
= intp
;
2106 return (DDI_PROP_SUCCESS
);
2110 * Decode a 64 bit integer array property
2113 ddi_prop_fm_decode_int64_array(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2122 * Count the number of array elements by going
2123 * through the data without decoding it.
2126 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_SKIP
, NULL
);
2133 * If there are no elements return an error
2136 return (DDI_PROP_END_OF_DATA
);
2139 * If we cannot skip through the data, we cannot decode it
2141 if (i
== DDI_PROP_RESULT_ERROR
)
2142 return (DDI_PROP_CANNOT_DECODE
);
2145 * Reset the data pointer to the beginning of the encoded data
2147 ddi_prop_reset_pos(ph
);
2150 * Allocate memory to store the decoded value.
2152 intp
= ddi_prop_decode_alloc((cnt
* sizeof (int64_t)),
2153 ddi_prop_free_ints
);
2156 * Decode each element and place it in the space allocated
2159 for (n
= 0; n
< cnt
; n
++, tmp
++) {
2160 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_DECODE
, tmp
);
2161 if (i
< DDI_PROP_RESULT_OK
) {
2163 * Free the space we just allocated
2164 * and return an error.
2166 ddi_prop_free(intp
);
2168 case DDI_PROP_RESULT_EOF
:
2169 return (DDI_PROP_END_OF_DATA
);
2171 case DDI_PROP_RESULT_ERROR
:
2172 return (DDI_PROP_CANNOT_DECODE
);
2178 *(int64_t **)data
= intp
;
2180 return (DDI_PROP_SUCCESS
);
2184 * Encode an array of integers property (Can be one element)
2187 ddi_prop_fm_encode_ints(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2195 * If there is no data, we cannot do anything
2198 return (DDI_PROP_CANNOT_ENCODE
);
2201 * Get the size of an encoded int.
2203 size
= DDI_PROP_INT(ph
, DDI_PROP_CMD_GET_ESIZE
, NULL
);
2205 if (size
< DDI_PROP_RESULT_OK
) {
2207 case DDI_PROP_RESULT_EOF
:
2208 return (DDI_PROP_END_OF_DATA
);
2210 case DDI_PROP_RESULT_ERROR
:
2211 return (DDI_PROP_CANNOT_ENCODE
);
2216 * Allocate space in the handle to store the encoded int.
2218 if (ddi_prop_encode_alloc(ph
, size
* nelements
) !=
2220 return (DDI_PROP_NO_MEMORY
);
2223 * Encode the array of ints.
2226 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2227 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_ENCODE
, tmp
);
2228 if (i
< DDI_PROP_RESULT_OK
) {
2230 case DDI_PROP_RESULT_EOF
:
2231 return (DDI_PROP_END_OF_DATA
);
2233 case DDI_PROP_RESULT_ERROR
:
2234 return (DDI_PROP_CANNOT_ENCODE
);
2239 return (DDI_PROP_SUCCESS
);
2244 * Encode a 64 bit integer array property
2247 ddi_prop_fm_encode_int64(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2255 * If there is no data, we cannot do anything
2258 return (DDI_PROP_CANNOT_ENCODE
);
2261 * Get the size of an encoded 64 bit int.
2263 size
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_GET_ESIZE
, NULL
);
2265 if (size
< DDI_PROP_RESULT_OK
) {
2267 case DDI_PROP_RESULT_EOF
:
2268 return (DDI_PROP_END_OF_DATA
);
2270 case DDI_PROP_RESULT_ERROR
:
2271 return (DDI_PROP_CANNOT_ENCODE
);
2276 * Allocate space in the handle to store the encoded int.
2278 if (ddi_prop_encode_alloc(ph
, size
* nelements
) !=
2280 return (DDI_PROP_NO_MEMORY
);
2283 * Encode the array of ints.
2285 tmp
= (int64_t *)data
;
2286 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2287 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_ENCODE
, tmp
);
2288 if (i
< DDI_PROP_RESULT_OK
) {
2290 case DDI_PROP_RESULT_EOF
:
2291 return (DDI_PROP_END_OF_DATA
);
2293 case DDI_PROP_RESULT_ERROR
:
2294 return (DDI_PROP_CANNOT_ENCODE
);
2299 return (DDI_PROP_SUCCESS
);
2303 * Decode a single string property
2306 ddi_prop_fm_decode_string(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2314 * If there is nothing to decode return an error
2316 if (ph
->ph_size
== 0)
2317 return (DDI_PROP_END_OF_DATA
);
2320 * Get the decoded size of the encoded string.
2322 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_DSIZE
, NULL
);
2323 if (size
< DDI_PROP_RESULT_OK
) {
2325 case DDI_PROP_RESULT_EOF
:
2326 return (DDI_PROP_END_OF_DATA
);
2328 case DDI_PROP_RESULT_ERROR
:
2329 return (DDI_PROP_CANNOT_DECODE
);
2334 * Allocated memory to store the decoded value in.
2336 str
= ddi_prop_decode_alloc((size_t)size
, ddi_prop_free_string
);
2338 ddi_prop_reset_pos(ph
);
2341 * Decode the str and place it in the space we just allocated
2344 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_DECODE
, tmp
);
2345 if (i
< DDI_PROP_RESULT_OK
) {
2347 * Free the space we just allocated
2348 * and return an error.
2352 case DDI_PROP_RESULT_EOF
:
2353 return (DDI_PROP_END_OF_DATA
);
2355 case DDI_PROP_RESULT_ERROR
:
2356 return (DDI_PROP_CANNOT_DECODE
);
2360 *(char **)data
= str
;
2363 return (DDI_PROP_SUCCESS
);
2367 * Decode an array of strings.
2370 ddi_prop_fm_decode_strings(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2382 * Figure out how many array elements there are by going through the
2383 * data without decoding it first and counting.
2386 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_SKIP
, NULL
);
2393 * If there are no elements return an error
2396 return (DDI_PROP_END_OF_DATA
);
2399 * If we cannot skip through the data, we cannot decode it
2401 if (i
== DDI_PROP_RESULT_ERROR
)
2402 return (DDI_PROP_CANNOT_DECODE
);
2405 * Reset the data pointer to the beginning of the encoded data
2407 ddi_prop_reset_pos(ph
);
2410 * Figure out how much memory we need for the sum total
2412 nbytes
= (cnt
+ 1) * sizeof (char *);
2414 for (n
= 0; n
< cnt
; n
++) {
2416 * Get the decoded size of the current encoded string.
2418 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_DSIZE
, NULL
);
2419 if (size
< DDI_PROP_RESULT_OK
) {
2421 case DDI_PROP_RESULT_EOF
:
2422 return (DDI_PROP_END_OF_DATA
);
2424 case DDI_PROP_RESULT_ERROR
:
2425 return (DDI_PROP_CANNOT_DECODE
);
2433 * Allocate memory in which to store the decoded strings.
2435 strs
= ddi_prop_decode_alloc(nbytes
, ddi_prop_free_strings
);
2438 * Set up pointers for each string by figuring out yet
2439 * again how long each string is.
2441 ddi_prop_reset_pos(ph
);
2442 ptr
= (caddr_t
)strs
+ ((cnt
+ 1) * sizeof (char *));
2443 for (tmp
= strs
, n
= 0; n
< cnt
; n
++, tmp
++) {
2445 * Get the decoded size of the current encoded string.
2447 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_DSIZE
, NULL
);
2448 if (size
< DDI_PROP_RESULT_OK
) {
2449 ddi_prop_free(strs
);
2451 case DDI_PROP_RESULT_EOF
:
2452 return (DDI_PROP_END_OF_DATA
);
2454 case DDI_PROP_RESULT_ERROR
:
2455 return (DDI_PROP_CANNOT_DECODE
);
2464 * String array is terminated by a NULL
2469 * Finally, we can decode each string
2471 ddi_prop_reset_pos(ph
);
2472 for (tmp
= strs
, n
= 0; n
< cnt
; n
++, tmp
++) {
2473 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_DECODE
, *tmp
);
2474 if (i
< DDI_PROP_RESULT_OK
) {
2476 * Free the space we just allocated
2477 * and return an error
2479 ddi_prop_free(strs
);
2481 case DDI_PROP_RESULT_EOF
:
2482 return (DDI_PROP_END_OF_DATA
);
2484 case DDI_PROP_RESULT_ERROR
:
2485 return (DDI_PROP_CANNOT_DECODE
);
2490 *(char ***)data
= strs
;
2493 return (DDI_PROP_SUCCESS
);
2500 ddi_prop_fm_encode_string(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2507 * If there is no data, we cannot do anything
2510 return (DDI_PROP_CANNOT_ENCODE
);
2513 * Get the size of the encoded string.
2515 tmp
= (char **)data
;
2516 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_ESIZE
, *tmp
);
2517 if (size
< DDI_PROP_RESULT_OK
) {
2519 case DDI_PROP_RESULT_EOF
:
2520 return (DDI_PROP_END_OF_DATA
);
2522 case DDI_PROP_RESULT_ERROR
:
2523 return (DDI_PROP_CANNOT_ENCODE
);
2528 * Allocate space in the handle to store the encoded string.
2530 if (ddi_prop_encode_alloc(ph
, size
) != DDI_PROP_SUCCESS
)
2531 return (DDI_PROP_NO_MEMORY
);
2533 ddi_prop_reset_pos(ph
);
2536 * Encode the string.
2538 tmp
= (char **)data
;
2539 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_ENCODE
, *tmp
);
2540 if (i
< DDI_PROP_RESULT_OK
) {
2542 case DDI_PROP_RESULT_EOF
:
2543 return (DDI_PROP_END_OF_DATA
);
2545 case DDI_PROP_RESULT_ERROR
:
2546 return (DDI_PROP_CANNOT_ENCODE
);
2550 return (DDI_PROP_SUCCESS
);
2555 * Encode an array of strings.
2558 ddi_prop_fm_encode_strings(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2567 * If there is no data, we cannot do anything
2570 return (DDI_PROP_CANNOT_ENCODE
);
2573 * Get the total size required to encode all the strings.
2576 tmp
= (char **)data
;
2577 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2578 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_ESIZE
, *tmp
);
2579 if (size
< DDI_PROP_RESULT_OK
) {
2581 case DDI_PROP_RESULT_EOF
:
2582 return (DDI_PROP_END_OF_DATA
);
2584 case DDI_PROP_RESULT_ERROR
:
2585 return (DDI_PROP_CANNOT_ENCODE
);
2588 total_size
+= (uint_t
)size
;
2592 * Allocate space in the handle to store the encoded strings.
2594 if (ddi_prop_encode_alloc(ph
, total_size
) != DDI_PROP_SUCCESS
)
2595 return (DDI_PROP_NO_MEMORY
);
2597 ddi_prop_reset_pos(ph
);
2600 * Encode the array of strings.
2602 tmp
= (char **)data
;
2603 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2604 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_ENCODE
, *tmp
);
2605 if (i
< DDI_PROP_RESULT_OK
) {
2607 case DDI_PROP_RESULT_EOF
:
2608 return (DDI_PROP_END_OF_DATA
);
2610 case DDI_PROP_RESULT_ERROR
:
2611 return (DDI_PROP_CANNOT_ENCODE
);
2616 return (DDI_PROP_SUCCESS
);
2621 * Decode an array of bytes.
2624 ddi_prop_fm_decode_bytes(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2631 * If there are no elements return an error
2633 if (ph
->ph_size
== 0)
2634 return (DDI_PROP_END_OF_DATA
);
2637 * Get the size of the encoded array of bytes.
2639 nbytes
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_GET_DSIZE
,
2641 if (nbytes
< DDI_PROP_RESULT_OK
) {
2643 case DDI_PROP_RESULT_EOF
:
2644 return (DDI_PROP_END_OF_DATA
);
2646 case DDI_PROP_RESULT_ERROR
:
2647 return (DDI_PROP_CANNOT_DECODE
);
2652 * Allocated memory to store the decoded value in.
2654 tmp
= ddi_prop_decode_alloc(nbytes
, ddi_prop_free_bytes
);
2657 * Decode each element and place it in the space we just allocated
2659 i
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_DECODE
, tmp
, nbytes
);
2660 if (i
< DDI_PROP_RESULT_OK
) {
2662 * Free the space we just allocated
2663 * and return an error
2667 case DDI_PROP_RESULT_EOF
:
2668 return (DDI_PROP_END_OF_DATA
);
2670 case DDI_PROP_RESULT_ERROR
:
2671 return (DDI_PROP_CANNOT_DECODE
);
2675 *(uchar_t
**)data
= tmp
;
2676 *nelements
= nbytes
;
2678 return (DDI_PROP_SUCCESS
);
2682 * Encode an array of bytes.
2685 ddi_prop_fm_encode_bytes(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2691 * If there are no elements, then this is a boolean property,
2692 * so just create a property handle with no data and return.
2694 if (nelements
== 0) {
2695 (void) ddi_prop_encode_alloc(ph
, 0);
2696 return (DDI_PROP_SUCCESS
);
2700 * Get the size of the encoded array of bytes.
2702 size
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_GET_ESIZE
, (uchar_t
*)data
,
2704 if (size
< DDI_PROP_RESULT_OK
) {
2706 case DDI_PROP_RESULT_EOF
:
2707 return (DDI_PROP_END_OF_DATA
);
2709 case DDI_PROP_RESULT_ERROR
:
2710 return (DDI_PROP_CANNOT_DECODE
);
2715 * Allocate space in the handle to store the encoded bytes.
2717 if (ddi_prop_encode_alloc(ph
, (uint_t
)size
) != DDI_PROP_SUCCESS
)
2718 return (DDI_PROP_NO_MEMORY
);
2721 * Encode the array of bytes.
2723 i
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_ENCODE
, (uchar_t
*)data
,
2725 if (i
< DDI_PROP_RESULT_OK
) {
2727 case DDI_PROP_RESULT_EOF
:
2728 return (DDI_PROP_END_OF_DATA
);
2730 case DDI_PROP_RESULT_ERROR
:
2731 return (DDI_PROP_CANNOT_ENCODE
);
2735 return (DDI_PROP_SUCCESS
);
2739 * OBP 1275 integer, string and byte operators.
2741 * DDI_PROP_CMD_DECODE:
2743 * DDI_PROP_RESULT_ERROR: cannot decode the data
2744 * DDI_PROP_RESULT_EOF: end of data
2745 * DDI_PROP_OK: data was decoded
2747 * DDI_PROP_CMD_ENCODE:
2749 * DDI_PROP_RESULT_ERROR: cannot encode the data
2750 * DDI_PROP_RESULT_EOF: end of data
2751 * DDI_PROP_OK: data was encoded
2753 * DDI_PROP_CMD_SKIP:
2755 * DDI_PROP_RESULT_ERROR: cannot skip the data
2756 * DDI_PROP_RESULT_EOF: end of data
2757 * DDI_PROP_OK: data was skipped
2759 * DDI_PROP_CMD_GET_ESIZE:
2761 * DDI_PROP_RESULT_ERROR: cannot get encoded size
2762 * DDI_PROP_RESULT_EOF: end of data
2763 * > 0: the encoded size
2765 * DDI_PROP_CMD_GET_DSIZE:
2767 * DDI_PROP_RESULT_ERROR: cannot get decoded size
2768 * DDI_PROP_RESULT_EOF: end of data
2769 * > 0: the decoded size
2773 * OBP 1275 integer operator
2775 * OBP properties are a byte stream of data, so integers may not be
2776 * properly aligned. Therefore we need to copy them one byte at a time.
2779 ddi_prop_1275_int(prop_handle_t
*ph
, uint_t cmd
, int *data
)
2784 case DDI_PROP_CMD_DECODE
:
2786 * Check that there is encoded data
2788 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0)
2789 return (DDI_PROP_RESULT_ERROR
);
2790 if (ph
->ph_flags
& PH_FROM_PROM
) {
2791 i
= MIN(ph
->ph_size
, PROP_1275_INT_SIZE
);
2792 if ((int *)ph
->ph_cur_pos
> ((int *)ph
->ph_data
+
2794 return (DDI_PROP_RESULT_ERROR
);
2796 if (ph
->ph_size
< sizeof (int) ||
2797 ((int *)ph
->ph_cur_pos
> ((int *)ph
->ph_data
+
2798 ph
->ph_size
- sizeof (int))))
2799 return (DDI_PROP_RESULT_ERROR
);
2803 * Copy the integer, using the implementation-specific
2804 * copy function if the property is coming from the PROM.
2806 if (ph
->ph_flags
& PH_FROM_PROM
) {
2807 *data
= impl_ddi_prop_int_from_prom(
2808 (uchar_t
*)ph
->ph_cur_pos
,
2809 (ph
->ph_size
< PROP_1275_INT_SIZE
) ?
2810 ph
->ph_size
: PROP_1275_INT_SIZE
);
2812 bcopy(ph
->ph_cur_pos
, data
, sizeof (int));
2816 * Move the current location to the start of the next
2817 * bit of undecoded data.
2819 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2821 return (DDI_PROP_RESULT_OK
);
2823 case DDI_PROP_CMD_ENCODE
:
2825 * Check that there is room to encoded the data
2827 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2828 ph
->ph_size
< PROP_1275_INT_SIZE
||
2829 ((int *)ph
->ph_cur_pos
> ((int *)ph
->ph_data
+
2830 ph
->ph_size
- sizeof (int))))
2831 return (DDI_PROP_RESULT_ERROR
);
2834 * Encode the integer into the byte stream one byte at a
2837 bcopy(data
, ph
->ph_cur_pos
, sizeof (int));
2840 * Move the current location to the start of the next bit of
2841 * space where we can store encoded data.
2843 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+ PROP_1275_INT_SIZE
;
2844 return (DDI_PROP_RESULT_OK
);
2846 case DDI_PROP_CMD_SKIP
:
2848 * Check that there is encoded data
2850 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2851 ph
->ph_size
< PROP_1275_INT_SIZE
)
2852 return (DDI_PROP_RESULT_ERROR
);
2855 if ((caddr_t
)ph
->ph_cur_pos
==
2856 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2857 return (DDI_PROP_RESULT_EOF
);
2858 } else if ((caddr_t
)ph
->ph_cur_pos
>
2859 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2860 return (DDI_PROP_RESULT_EOF
);
2864 * Move the current location to the start of the next bit of
2867 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+ PROP_1275_INT_SIZE
;
2868 return (DDI_PROP_RESULT_OK
);
2870 case DDI_PROP_CMD_GET_ESIZE
:
2872 * Return the size of an encoded integer on OBP
2874 return (PROP_1275_INT_SIZE
);
2876 case DDI_PROP_CMD_GET_DSIZE
:
2878 * Return the size of a decoded integer on the system.
2880 return (sizeof (int));
2884 panic("ddi_prop_1275_int: %x impossible", cmd
);
2887 return (DDI_PROP_RESULT_ERROR
);
2893 * 64 bit integer operator.
2895 * This is an extension, defined by Sun, to the 1275 integer
2896 * operator. This routine handles the encoding/decoding of
2897 * 64 bit integer properties.
2900 ddi_prop_int64_op(prop_handle_t
*ph
, uint_t cmd
, int64_t *data
)
2904 case DDI_PROP_CMD_DECODE
:
2906 * Check that there is encoded data
2908 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0)
2909 return (DDI_PROP_RESULT_ERROR
);
2910 if (ph
->ph_flags
& PH_FROM_PROM
) {
2911 return (DDI_PROP_RESULT_ERROR
);
2913 if (ph
->ph_size
< sizeof (int64_t) ||
2914 ((int64_t *)ph
->ph_cur_pos
>
2915 ((int64_t *)ph
->ph_data
+
2916 ph
->ph_size
- sizeof (int64_t))))
2917 return (DDI_PROP_RESULT_ERROR
);
2920 * Copy the integer, using the implementation-specific
2921 * copy function if the property is coming from the PROM.
2923 if (ph
->ph_flags
& PH_FROM_PROM
) {
2924 return (DDI_PROP_RESULT_ERROR
);
2926 bcopy(ph
->ph_cur_pos
, data
, sizeof (int64_t));
2930 * Move the current location to the start of the next
2931 * bit of undecoded data.
2933 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2935 return (DDI_PROP_RESULT_OK
);
2937 case DDI_PROP_CMD_ENCODE
:
2939 * Check that there is room to encoded the data
2941 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2942 ph
->ph_size
< sizeof (int64_t) ||
2943 ((int64_t *)ph
->ph_cur_pos
> ((int64_t *)ph
->ph_data
+
2944 ph
->ph_size
- sizeof (int64_t))))
2945 return (DDI_PROP_RESULT_ERROR
);
2948 * Encode the integer into the byte stream one byte at a
2951 bcopy(data
, ph
->ph_cur_pos
, sizeof (int64_t));
2954 * Move the current location to the start of the next bit of
2955 * space where we can store encoded data.
2957 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2959 return (DDI_PROP_RESULT_OK
);
2961 case DDI_PROP_CMD_SKIP
:
2963 * Check that there is encoded data
2965 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2966 ph
->ph_size
< sizeof (int64_t))
2967 return (DDI_PROP_RESULT_ERROR
);
2969 if ((caddr_t
)ph
->ph_cur_pos
==
2970 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2971 return (DDI_PROP_RESULT_EOF
);
2972 } else if ((caddr_t
)ph
->ph_cur_pos
>
2973 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2974 return (DDI_PROP_RESULT_EOF
);
2978 * Move the current location to the start of
2979 * the next bit of undecoded data.
2981 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2983 return (DDI_PROP_RESULT_OK
);
2985 case DDI_PROP_CMD_GET_ESIZE
:
2987 * Return the size of an encoded integer on OBP
2989 return (sizeof (int64_t));
2991 case DDI_PROP_CMD_GET_DSIZE
:
2993 * Return the size of a decoded integer on the system.
2995 return (sizeof (int64_t));
2999 panic("ddi_prop_int64_op: %x impossible", cmd
);
3002 return (DDI_PROP_RESULT_ERROR
);
3008 * OBP 1275 string operator.
3010 * OBP strings are NULL terminated.
3013 ddi_prop_1275_string(prop_handle_t
*ph
, uint_t cmd
, char *data
)
3020 case DDI_PROP_CMD_DECODE
:
3022 * Check that there is encoded data
3024 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0) {
3025 return (DDI_PROP_RESULT_ERROR
);
3029 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3030 * how to NULL terminate result.
3032 p
= (char *)ph
->ph_cur_pos
;
3033 end
= (char *)ph
->ph_data
+ ph
->ph_size
;
3035 return (DDI_PROP_RESULT_EOF
);
3039 if (*p
++ == 0) { /* NULL from OBP */
3041 return (DDI_PROP_RESULT_OK
);
3046 * If OBP did not NULL terminate string, which happens
3047 * (at least) for 'true'/'false' boolean values, account for
3048 * the space and store null termination on decode.
3052 return (DDI_PROP_RESULT_OK
);
3054 case DDI_PROP_CMD_ENCODE
:
3056 * Check that there is room to encoded the data
3058 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0) {
3059 return (DDI_PROP_RESULT_ERROR
);
3062 n
= strlen(data
) + 1;
3063 if ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3065 return (DDI_PROP_RESULT_ERROR
);
3069 * Copy the NULL terminated string
3071 bcopy(data
, ph
->ph_cur_pos
, n
);
3074 * Move the current location to the start of the next bit of
3075 * space where we can store encoded data.
3077 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ n
;
3078 return (DDI_PROP_RESULT_OK
);
3080 case DDI_PROP_CMD_SKIP
:
3082 * Check that there is encoded data
3084 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0) {
3085 return (DDI_PROP_RESULT_ERROR
);
3089 * Return the string length plus one for the NULL
3090 * We know the size of the property, we need to
3091 * ensure that the string is properly formatted,
3092 * since we may be looking up random OBP data.
3094 p
= (char *)ph
->ph_cur_pos
;
3095 end
= (char *)ph
->ph_data
+ ph
->ph_size
;
3097 return (DDI_PROP_RESULT_EOF
);
3100 if (*p
++ == 0) { /* NULL from OBP */
3102 return (DDI_PROP_RESULT_OK
);
3107 * Accommodate the fact that OBP does not always NULL
3108 * terminate strings.
3111 return (DDI_PROP_RESULT_OK
);
3113 case DDI_PROP_CMD_GET_ESIZE
:
3115 * Return the size of the encoded string on OBP.
3117 return (strlen(data
) + 1);
3119 case DDI_PROP_CMD_GET_DSIZE
:
3121 * Return the string length plus one for the NULL.
3122 * We know the size of the property, we need to
3123 * ensure that the string is properly formatted,
3124 * since we may be looking up random OBP data.
3126 p
= (char *)ph
->ph_cur_pos
;
3127 end
= (char *)ph
->ph_data
+ ph
->ph_size
;
3129 return (DDI_PROP_RESULT_EOF
);
3131 for (n
= 0; p
< end
; n
++) {
3132 if (*p
++ == 0) { /* NULL from OBP */
3139 * If OBP did not NULL terminate string, which happens for
3140 * 'true'/'false' boolean values, account for the space
3141 * to store null termination here.
3148 panic("ddi_prop_1275_string: %x impossible", cmd
);
3151 return (DDI_PROP_RESULT_ERROR
);
3157 * OBP 1275 byte operator
3159 * Caller must specify the number of bytes to get. OBP encodes bytes
3160 * as a byte so there is a 1-to-1 translation.
3163 ddi_prop_1275_bytes(prop_handle_t
*ph
, uint_t cmd
, uchar_t
*data
,
3167 case DDI_PROP_CMD_DECODE
:
3169 * Check that there is encoded data
3171 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
3172 ph
->ph_size
< nelements
||
3173 ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3174 ph
->ph_size
- nelements
)))
3175 return (DDI_PROP_RESULT_ERROR
);
3178 * Copy out the bytes
3180 bcopy(ph
->ph_cur_pos
, data
, nelements
);
3183 * Move the current location
3185 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ nelements
;
3186 return (DDI_PROP_RESULT_OK
);
3188 case DDI_PROP_CMD_ENCODE
:
3190 * Check that there is room to encode the data
3192 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
3193 ph
->ph_size
< nelements
||
3194 ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3195 ph
->ph_size
- nelements
)))
3196 return (DDI_PROP_RESULT_ERROR
);
3201 bcopy(data
, ph
->ph_cur_pos
, nelements
);
3204 * Move the current location to the start of the next bit of
3205 * space where we can store encoded data.
3207 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ nelements
;
3208 return (DDI_PROP_RESULT_OK
);
3210 case DDI_PROP_CMD_SKIP
:
3212 * Check that there is encoded data
3214 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
3215 ph
->ph_size
< nelements
)
3216 return (DDI_PROP_RESULT_ERROR
);
3218 if ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3219 ph
->ph_size
- nelements
))
3220 return (DDI_PROP_RESULT_EOF
);
3223 * Move the current location
3225 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ nelements
;
3226 return (DDI_PROP_RESULT_OK
);
3228 case DDI_PROP_CMD_GET_ESIZE
:
3230 * The size in bytes of the encoded size is the
3231 * same as the decoded size provided by the caller.
3235 case DDI_PROP_CMD_GET_DSIZE
:
3237 * Just return the number of bytes specified by the caller.
3243 panic("ddi_prop_1275_bytes: %x impossible", cmd
);
3246 return (DDI_PROP_RESULT_ERROR
);
3252 * Used for properties that come from the OBP, hardware configuration files,
3253 * or that are created by calls to ddi_prop_update(9F).
3255 static struct prop_handle_ops prop_1275_ops
= {
3257 ddi_prop_1275_string
,
3258 ddi_prop_1275_bytes
,
3264 * Interface to create/modify a managed property on child's behalf...
3265 * Flags interpreted are:
3266 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep.
3267 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list.
3269 * Use same dev_t when modifying or undefining a property.
3270 * Search for properties with DDI_DEV_T_ANY to match first named
3271 * property on the list.
3273 * Properties are stored LIFO and subsequently will match the first
3274 * `matching' instance.
3278 * ddi_prop_add: Add a software defined property
3282 * define to get a new ddi_prop_t.
3283 * km_flags are KM_SLEEP or KM_NOSLEEP.
3286 #define DDI_NEW_PROP_T(km_flags) \
3287 (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3290 ddi_prop_add(dev_t dev
, dev_info_t
*dip
, int flags
,
3291 char *name
, caddr_t value
, int length
)
3293 ddi_prop_t
*new_propp
, *propp
;
3294 ddi_prop_t
**list_head
= &(DEVI(dip
)->devi_drv_prop_ptr
);
3295 int km_flags
= KM_NOSLEEP
;
3299 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3302 if (dev
== DDI_DEV_T_ANY
|| name
== (char *)0 || strlen(name
) == 0)
3303 return (DDI_PROP_INVAL_ARG
);
3305 if (flags
& DDI_PROP_CANSLEEP
)
3306 km_flags
= KM_SLEEP
;
3308 if (flags
& DDI_PROP_SYSTEM_DEF
)
3309 list_head
= &(DEVI(dip
)->devi_sys_prop_ptr
);
3310 else if (flags
& DDI_PROP_HW_DEF
)
3311 list_head
= &(DEVI(dip
)->devi_hw_prop_ptr
);
3313 if ((new_propp
= DDI_NEW_PROP_T(km_flags
)) == NULL
) {
3314 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3315 return (DDI_PROP_NO_MEMORY
);
3319 * If dev is major number 0, then we need to do a ddi_name_to_major
3320 * to get the real major number for the device. This needs to be
3321 * done because some drivers need to call ddi_prop_create in their
3322 * attach routines but they don't have a dev. By creating the dev
3323 * ourself if the major number is 0, drivers will not have to know what
3324 * their major number. They can just create a dev with major number
3325 * 0 and pass it in. For device 0, we will be doing a little extra
3326 * work by recreating the same dev that we already have, but its the
3327 * price you pay :-).
3329 * This fixes bug #1098060.
3331 if (getmajor(dev
) == DDI_MAJOR_T_UNKNOWN
) {
3332 new_propp
->prop_dev
=
3333 makedevice(ddi_name_to_major(DEVI(dip
)->devi_binding_name
),
3336 new_propp
->prop_dev
= dev
;
3339 * Allocate space for property name and copy it in...
3342 name_buf_len
= strlen(name
) + 1;
3343 new_propp
->prop_name
= kmem_alloc(name_buf_len
, km_flags
);
3344 if (new_propp
->prop_name
== 0) {
3345 kmem_free(new_propp
, sizeof (ddi_prop_t
));
3346 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3347 return (DDI_PROP_NO_MEMORY
);
3349 bcopy(name
, new_propp
->prop_name
, name_buf_len
);
3352 * Set the property type
3354 new_propp
->prop_flags
= flags
& DDI_PROP_TYPE_MASK
;
3357 * Set length and value ONLY if not an explicit property undefine:
3358 * NOTE: value and length are zero for explicit undefines.
3361 if (flags
& DDI_PROP_UNDEF_IT
) {
3362 new_propp
->prop_flags
|= DDI_PROP_UNDEF_IT
;
3364 if ((new_propp
->prop_len
= length
) != 0) {
3365 new_propp
->prop_val
= kmem_alloc(length
, km_flags
);
3366 if (new_propp
->prop_val
== 0) {
3367 kmem_free(new_propp
->prop_name
, name_buf_len
);
3368 kmem_free(new_propp
, sizeof (ddi_prop_t
));
3369 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3370 return (DDI_PROP_NO_MEMORY
);
3372 bcopy(value
, new_propp
->prop_val
, length
);
3377 * Link property into beginning of list. (Properties are LIFO order.)
3380 mutex_enter(&(DEVI(dip
)->devi_lock
));
3382 new_propp
->prop_next
= propp
;
3383 *list_head
= new_propp
;
3384 mutex_exit(&(DEVI(dip
)->devi_lock
));
3385 return (DDI_PROP_SUCCESS
);
3390 * ddi_prop_change: Modify a software managed property value
3392 * Set new length and value if found.
3393 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3394 * input name is the NULL string.
3395 * returns DDI_PROP_NO_MEMORY if unable to allocate memory
3397 * Note: an undef can be modified to be a define,
3398 * (you can't go the other way.)
3402 ddi_prop_change(dev_t dev
, dev_info_t
*dip
, int flags
,
3403 char *name
, caddr_t value
, int length
)
3406 ddi_prop_t
**ppropp
;
3409 if ((dev
== DDI_DEV_T_ANY
) || (name
== NULL
) || (strlen(name
) == 0))
3410 return (DDI_PROP_INVAL_ARG
);
3413 * Preallocate buffer, even if we don't need it...
3416 p
= kmem_alloc(length
, (flags
& DDI_PROP_CANSLEEP
) ?
3417 KM_SLEEP
: KM_NOSLEEP
);
3419 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3420 return (DDI_PROP_NO_MEMORY
);
3425 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3426 * number, a real dev_t value should be created based upon the dip's
3427 * binding driver. See ddi_prop_add...
3429 if (getmajor(dev
) == DDI_MAJOR_T_UNKNOWN
)
3431 ddi_name_to_major(DEVI(dip
)->devi_binding_name
),
3435 * Check to see if the property exists. If so we modify it.
3436 * Else we create it by calling ddi_prop_add().
3438 mutex_enter(&(DEVI(dip
)->devi_lock
));
3439 ppropp
= &DEVI(dip
)->devi_drv_prop_ptr
;
3440 if (flags
& DDI_PROP_SYSTEM_DEF
)
3441 ppropp
= &DEVI(dip
)->devi_sys_prop_ptr
;
3442 else if (flags
& DDI_PROP_HW_DEF
)
3443 ppropp
= &DEVI(dip
)->devi_hw_prop_ptr
;
3445 if ((propp
= i_ddi_prop_search(dev
, name
, flags
, ppropp
)) != NULL
) {
3447 * Need to reallocate buffer? If so, do it
3448 * carefully (reuse same space if new prop
3449 * is same size and non-NULL sized).
3452 bcopy(value
, p
, length
);
3454 if (propp
->prop_len
!= 0)
3455 kmem_free(propp
->prop_val
, propp
->prop_len
);
3457 propp
->prop_len
= length
;
3458 propp
->prop_val
= p
;
3459 propp
->prop_flags
&= ~DDI_PROP_UNDEF_IT
;
3460 mutex_exit(&(DEVI(dip
)->devi_lock
));
3461 return (DDI_PROP_SUCCESS
);
3464 mutex_exit(&(DEVI(dip
)->devi_lock
));
3466 kmem_free(p
, length
);
3468 return (ddi_prop_add(dev
, dip
, flags
, name
, value
, length
));
3472 * Common update routine used to update and encode a property. Creates
3473 * a property handle, calls the property encode routine, figures out if
3474 * the property already exists and updates if it does. Otherwise it
3475 * creates if it does not exist.
3478 ddi_prop_update_common(dev_t match_dev
, dev_info_t
*dip
, int flags
,
3479 char *name
, void *data
, uint_t nelements
,
3480 int (*prop_create
)(prop_handle_t
*, void *data
, uint_t nelements
))
3487 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3490 if (match_dev
== DDI_DEV_T_ANY
|| name
== NULL
|| strlen(name
) == 0)
3491 return (DDI_PROP_INVAL_ARG
);
3497 ph
.ph_cur_pos
= NULL
;
3498 ph
.ph_save_pos
= NULL
;
3500 ph
.ph_ops
= &prop_1275_ops
;
3504 * For compatibility with the old interfaces. The old interfaces
3505 * didn't sleep by default and slept when the flag was set. These
3506 * interfaces to the opposite. So the old interfaces now set the
3507 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3510 * Blocked data or unblocked data allocation
3511 * for ph.ph_data in ddi_prop_encode_alloc()
3513 if (flags
& DDI_PROP_DONTSLEEP
) {
3515 ph
.ph_flags
= DDI_PROP_DONTSLEEP
;
3517 ourflags
= flags
| DDI_PROP_CANSLEEP
;
3518 ph
.ph_flags
= DDI_PROP_CANSLEEP
;
3522 * Encode the data and store it in the property handle by
3523 * calling the prop_encode routine.
3525 if ((rval
= (*prop_create
)(&ph
, data
, nelements
)) !=
3527 if (rval
== DDI_PROP_NO_MEMORY
)
3528 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3529 if (ph
.ph_size
!= 0)
3530 kmem_free(ph
.ph_data
, ph
.ph_size
);
3535 * The old interfaces use a stacking approach to creating
3536 * properties. If we are being called from the old interfaces,
3537 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3538 * create without checking.
3540 if (flags
& DDI_PROP_STACK_CREATE
) {
3541 rval
= ddi_prop_add(match_dev
, dip
,
3542 ourflags
, name
, ph
.ph_data
, ph
.ph_size
);
3544 rval
= ddi_prop_change(match_dev
, dip
,
3545 ourflags
, name
, ph
.ph_data
, ph
.ph_size
);
3549 * Free the encoded data allocated in the prop_encode routine.
3551 if (ph
.ph_size
!= 0)
3552 kmem_free(ph
.ph_data
, ph
.ph_size
);
3559 * ddi_prop_create: Define a managed property:
3560 * See above for details.
3564 ddi_prop_create(dev_t dev
, dev_info_t
*dip
, int flag
,
3565 char *name
, caddr_t value
, int length
)
3567 if (!(flag
& DDI_PROP_CANSLEEP
)) {
3568 flag
|= DDI_PROP_DONTSLEEP
;
3569 #ifdef DDI_PROP_DEBUG
3571 cmn_err(CE_NOTE
, "!ddi_prop_create: interface obsolete,"
3572 "use ddi_prop_update (prop = %s, node = %s%d)",
3573 name
, ddi_driver_name(dip
), ddi_get_instance(dip
));
3574 #endif /* DDI_PROP_DEBUG */
3576 flag
&= ~DDI_PROP_SYSTEM_DEF
;
3577 flag
|= DDI_PROP_STACK_CREATE
| DDI_PROP_TYPE_ANY
;
3578 return (ddi_prop_update_common(dev
, dip
, flag
, name
,
3579 value
, length
, ddi_prop_fm_encode_bytes
));
3583 e_ddi_prop_create(dev_t dev
, dev_info_t
*dip
, int flag
,
3584 char *name
, caddr_t value
, int length
)
3586 if (!(flag
& DDI_PROP_CANSLEEP
))
3587 flag
|= DDI_PROP_DONTSLEEP
;
3588 flag
|= DDI_PROP_SYSTEM_DEF
| DDI_PROP_STACK_CREATE
| DDI_PROP_TYPE_ANY
;
3589 return (ddi_prop_update_common(dev
, dip
, flag
,
3590 name
, value
, length
, ddi_prop_fm_encode_bytes
));
3594 ddi_prop_modify(dev_t dev
, dev_info_t
*dip
, int flag
,
3595 char *name
, caddr_t value
, int length
)
3597 ASSERT((flag
& DDI_PROP_TYPE_MASK
) == 0);
3600 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3603 if (dev
== DDI_DEV_T_ANY
|| name
== NULL
|| strlen(name
) == 0)
3604 return (DDI_PROP_INVAL_ARG
);
3606 if (!(flag
& DDI_PROP_CANSLEEP
))
3607 flag
|= DDI_PROP_DONTSLEEP
;
3608 flag
&= ~DDI_PROP_SYSTEM_DEF
;
3609 if (ddi_prop_exists(dev
, dip
, (flag
| DDI_PROP_NOTPROM
), name
) == 0)
3610 return (DDI_PROP_NOT_FOUND
);
3612 return (ddi_prop_update_common(dev
, dip
,
3613 (flag
| DDI_PROP_TYPE_BYTE
), name
,
3614 value
, length
, ddi_prop_fm_encode_bytes
));
3618 e_ddi_prop_modify(dev_t dev
, dev_info_t
*dip
, int flag
,
3619 char *name
, caddr_t value
, int length
)
3621 ASSERT((flag
& DDI_PROP_TYPE_MASK
) == 0);
3624 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3627 if (dev
== DDI_DEV_T_ANY
|| name
== NULL
|| strlen(name
) == 0)
3628 return (DDI_PROP_INVAL_ARG
);
3630 if (ddi_prop_exists(dev
, dip
, (flag
| DDI_PROP_SYSTEM_DEF
), name
) == 0)
3631 return (DDI_PROP_NOT_FOUND
);
3633 if (!(flag
& DDI_PROP_CANSLEEP
))
3634 flag
|= DDI_PROP_DONTSLEEP
;
3635 return (ddi_prop_update_common(dev
, dip
,
3636 (flag
| DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_BYTE
),
3637 name
, value
, length
, ddi_prop_fm_encode_bytes
));
3642 * Common lookup routine used to lookup and decode a property.
3643 * Creates a property handle, searches for the raw encoded data,
3644 * fills in the handle, and calls the property decode functions
3647 * This routine is not static because ddi_bus_prop_op() which lives in
3648 * ddi_impl.c calls it. No driver should be calling this routine.
3651 ddi_prop_lookup_common(dev_t match_dev
, dev_info_t
*dip
,
3652 uint_t flags
, char *name
, void *data
, uint_t
*nelements
,
3653 int (*prop_decoder
)(prop_handle_t
*, void *data
, uint_t
*nelements
))
3659 if ((match_dev
== DDI_DEV_T_NONE
) ||
3660 (name
== NULL
) || (strlen(name
) == 0))
3661 return (DDI_PROP_INVAL_ARG
);
3663 ourflags
= (flags
& DDI_PROP_DONTSLEEP
) ? flags
:
3664 flags
| DDI_PROP_CANSLEEP
;
3667 * Get the encoded data
3669 bzero(&ph
, sizeof (prop_handle_t
));
3671 if ((flags
& DDI_UNBND_DLPI2
) || (flags
& DDI_PROP_ROOTNEX_GLOBAL
)) {
3673 * For rootnex and unbound dlpi style-2 devices, index into
3674 * the devnames' array and search the global
3677 ourflags
&= ~DDI_UNBND_DLPI2
;
3678 rval
= i_ddi_prop_search_global(match_dev
,
3679 ourflags
, name
, &ph
.ph_data
, &ph
.ph_size
);
3681 rval
= ddi_prop_search_common(match_dev
, dip
,
3682 PROP_LEN_AND_VAL_ALLOC
, ourflags
, name
,
3683 &ph
.ph_data
, &ph
.ph_size
);
3687 if (rval
!= DDI_PROP_SUCCESS
&& rval
!= DDI_PROP_FOUND_1275
) {
3688 ASSERT(ph
.ph_data
== NULL
);
3689 ASSERT(ph
.ph_size
== 0);
3694 * If the encoded data came from a OBP or software
3695 * use the 1275 OBP decode/encode routines.
3697 ph
.ph_cur_pos
= ph
.ph_data
;
3698 ph
.ph_save_pos
= ph
.ph_data
;
3699 ph
.ph_ops
= &prop_1275_ops
;
3700 ph
.ph_flags
= (rval
== DDI_PROP_FOUND_1275
) ? PH_FROM_PROM
: 0;
3702 rval
= (*prop_decoder
)(&ph
, data
, nelements
);
3705 * Free the encoded data
3707 if (ph
.ph_size
!= 0)
3708 kmem_free(ph
.ph_data
, ph
.ph_size
);
3714 * Lookup and return an array of composite properties. The driver must
3715 * provide the decode routine.
3718 ddi_prop_lookup(dev_t match_dev
, dev_info_t
*dip
,
3719 uint_t flags
, char *name
, void *data
, uint_t
*nelements
,
3720 int (*prop_decoder
)(prop_handle_t
*, void *data
, uint_t
*nelements
))
3722 return (ddi_prop_lookup_common(match_dev
, dip
,
3723 (flags
| DDI_PROP_TYPE_COMPOSITE
), name
,
3724 data
, nelements
, prop_decoder
));
3728 * Return 1 if a property exists (no type checking done).
3729 * Return 0 if it does not exist.
3732 ddi_prop_exists(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
, char *name
)
3737 i
= ddi_prop_search_common(match_dev
, dip
, PROP_EXISTS
,
3738 flags
| DDI_PROP_TYPE_MASK
, name
, NULL
, &x
);
3739 return (i
== DDI_PROP_SUCCESS
|| i
== DDI_PROP_FOUND_1275
);
3744 * Update an array of composite properties. The driver must
3745 * provide the encode routine.
3748 ddi_prop_update(dev_t match_dev
, dev_info_t
*dip
,
3749 char *name
, void *data
, uint_t nelements
,
3750 int (*prop_create
)(prop_handle_t
*, void *data
, uint_t nelements
))
3752 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_COMPOSITE
,
3753 name
, data
, nelements
, prop_create
));
3757 * Get a single integer or boolean property and return it.
3758 * If the property does not exists, or cannot be decoded,
3759 * then return the defvalue passed in.
3761 * This routine always succeeds.
3764 ddi_prop_get_int(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3765 char *name
, int defvalue
)
3771 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3772 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3775 cmn_err(CE_WARN
, "ddi_prop_get_int: invalid flag"
3776 " 0x%x (prop = %s, node = %s%d)", flags
,
3777 name
, ddi_driver_name(dip
), ddi_get_instance(dip
));
3780 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3781 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
3784 if ((rval
= ddi_prop_lookup_common(match_dev
, dip
,
3785 (flags
| DDI_PROP_TYPE_INT
), name
, &data
, &nelements
,
3786 ddi_prop_fm_decode_int
)) != DDI_PROP_SUCCESS
) {
3787 if (rval
== DDI_PROP_END_OF_DATA
)
3796 * Get a single 64 bit integer or boolean property and return it.
3797 * If the property does not exists, or cannot be decoded,
3798 * then return the defvalue passed in.
3800 * This routine always succeeds.
3803 ddi_prop_get_int64(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3804 char *name
, int64_t defvalue
)
3810 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3811 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3814 cmn_err(CE_WARN
, "ddi_prop_get_int64: invalid flag"
3815 " 0x%x (prop = %s, node = %s%d)", flags
,
3816 name
, ddi_driver_name(dip
), ddi_get_instance(dip
));
3819 return (DDI_PROP_INVAL_ARG
);
3822 if ((rval
= ddi_prop_lookup_common(match_dev
, dip
,
3823 (flags
| DDI_PROP_TYPE_INT64
| DDI_PROP_NOTPROM
),
3824 name
, &data
, &nelements
, ddi_prop_fm_decode_int64
))
3825 != DDI_PROP_SUCCESS
) {
3826 if (rval
== DDI_PROP_END_OF_DATA
)
3835 * Get an array of integer property
3838 ddi_prop_lookup_int_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3839 char *name
, int **data
, uint_t
*nelements
)
3841 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3842 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3845 cmn_err(CE_WARN
, "ddi_prop_lookup_int_array: "
3846 "invalid flag 0x%x (prop = %s, node = %s%d)",
3847 flags
, name
, ddi_driver_name(dip
),
3848 ddi_get_instance(dip
));
3851 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3852 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
3855 return (ddi_prop_lookup_common(match_dev
, dip
,
3856 (flags
| DDI_PROP_TYPE_INT
), name
, data
,
3857 nelements
, ddi_prop_fm_decode_ints
));
3861 * Get an array of 64 bit integer properties
3864 ddi_prop_lookup_int64_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3865 char *name
, int64_t **data
, uint_t
*nelements
)
3867 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3868 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3871 cmn_err(CE_WARN
, "ddi_prop_lookup_int64_array: "
3872 "invalid flag 0x%x (prop = %s, node = %s%d)",
3873 flags
, name
, ddi_driver_name(dip
),
3874 ddi_get_instance(dip
));
3877 return (DDI_PROP_INVAL_ARG
);
3880 return (ddi_prop_lookup_common(match_dev
, dip
,
3881 (flags
| DDI_PROP_TYPE_INT64
| DDI_PROP_NOTPROM
),
3882 name
, data
, nelements
, ddi_prop_fm_decode_int64_array
));
3886 * Update a single integer property. If the property exists on the drivers
3887 * property list it updates, else it creates it.
3890 ddi_prop_update_int(dev_t match_dev
, dev_info_t
*dip
,
3891 char *name
, int data
)
3893 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT
,
3894 name
, &data
, 1, ddi_prop_fm_encode_ints
));
3898 * Update a single 64 bit integer property.
3899 * Update the driver property list if it exists, else create it.
3902 ddi_prop_update_int64(dev_t match_dev
, dev_info_t
*dip
,
3903 char *name
, int64_t data
)
3905 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT64
,
3906 name
, &data
, 1, ddi_prop_fm_encode_int64
));
3910 e_ddi_prop_update_int(dev_t match_dev
, dev_info_t
*dip
,
3911 char *name
, int data
)
3913 return (ddi_prop_update_common(match_dev
, dip
,
3914 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT
,
3915 name
, &data
, 1, ddi_prop_fm_encode_ints
));
3919 e_ddi_prop_update_int64(dev_t match_dev
, dev_info_t
*dip
,
3920 char *name
, int64_t data
)
3922 return (ddi_prop_update_common(match_dev
, dip
,
3923 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT64
,
3924 name
, &data
, 1, ddi_prop_fm_encode_int64
));
3928 * Update an array of integer property. If the property exists on the drivers
3929 * property list it updates, else it creates it.
3932 ddi_prop_update_int_array(dev_t match_dev
, dev_info_t
*dip
,
3933 char *name
, int *data
, uint_t nelements
)
3935 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT
,
3936 name
, data
, nelements
, ddi_prop_fm_encode_ints
));
3940 * Update an array of 64 bit integer properties.
3941 * Update the driver property list if it exists, else create it.
3944 ddi_prop_update_int64_array(dev_t match_dev
, dev_info_t
*dip
,
3945 char *name
, int64_t *data
, uint_t nelements
)
3947 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT64
,
3948 name
, data
, nelements
, ddi_prop_fm_encode_int64
));
3952 e_ddi_prop_update_int64_array(dev_t match_dev
, dev_info_t
*dip
,
3953 char *name
, int64_t *data
, uint_t nelements
)
3955 return (ddi_prop_update_common(match_dev
, dip
,
3956 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT64
,
3957 name
, data
, nelements
, ddi_prop_fm_encode_int64
));
3961 e_ddi_prop_update_int_array(dev_t match_dev
, dev_info_t
*dip
,
3962 char *name
, int *data
, uint_t nelements
)
3964 return (ddi_prop_update_common(match_dev
, dip
,
3965 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT
,
3966 name
, data
, nelements
, ddi_prop_fm_encode_ints
));
3970 * Get a single string property.
3973 ddi_prop_lookup_string(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3974 char *name
, char **data
)
3978 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3979 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3982 cmn_err(CE_WARN
, "%s: invalid flag 0x%x "
3983 "(prop = %s, node = %s%d); invalid bits ignored",
3984 "ddi_prop_lookup_string", flags
, name
,
3985 ddi_driver_name(dip
), ddi_get_instance(dip
));
3988 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3989 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
3992 return (ddi_prop_lookup_common(match_dev
, dip
,
3993 (flags
| DDI_PROP_TYPE_STRING
), name
, data
,
3994 &x
, ddi_prop_fm_decode_string
));
3998 * Get an array of strings property.
4001 ddi_prop_lookup_string_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
4002 char *name
, char ***data
, uint_t
*nelements
)
4004 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
4005 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
4008 cmn_err(CE_WARN
, "ddi_prop_lookup_string_array: "
4009 "invalid flag 0x%x (prop = %s, node = %s%d)",
4010 flags
, name
, ddi_driver_name(dip
),
4011 ddi_get_instance(dip
));
4014 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
4015 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
4018 return (ddi_prop_lookup_common(match_dev
, dip
,
4019 (flags
| DDI_PROP_TYPE_STRING
), name
, data
,
4020 nelements
, ddi_prop_fm_decode_strings
));
4024 * Update a single string property.
4027 ddi_prop_update_string(dev_t match_dev
, dev_info_t
*dip
,
4028 char *name
, char *data
)
4030 return (ddi_prop_update_common(match_dev
, dip
,
4031 DDI_PROP_TYPE_STRING
, name
, &data
, 1,
4032 ddi_prop_fm_encode_string
));
4036 e_ddi_prop_update_string(dev_t match_dev
, dev_info_t
*dip
,
4037 char *name
, char *data
)
4039 return (ddi_prop_update_common(match_dev
, dip
,
4040 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_STRING
,
4041 name
, &data
, 1, ddi_prop_fm_encode_string
));
4046 * Update an array of strings property.
4049 ddi_prop_update_string_array(dev_t match_dev
, dev_info_t
*dip
,
4050 char *name
, char **data
, uint_t nelements
)
4052 return (ddi_prop_update_common(match_dev
, dip
,
4053 DDI_PROP_TYPE_STRING
, name
, data
, nelements
,
4054 ddi_prop_fm_encode_strings
));
4058 e_ddi_prop_update_string_array(dev_t match_dev
, dev_info_t
*dip
,
4059 char *name
, char **data
, uint_t nelements
)
4061 return (ddi_prop_update_common(match_dev
, dip
,
4062 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_STRING
,
4063 name
, data
, nelements
,
4064 ddi_prop_fm_encode_strings
));
4069 * Get an array of bytes property.
4072 ddi_prop_lookup_byte_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
4073 char *name
, uchar_t
**data
, uint_t
*nelements
)
4075 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
4076 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
4079 cmn_err(CE_WARN
, "ddi_prop_lookup_byte_array: "
4080 " invalid flag 0x%x (prop = %s, node = %s%d)",
4081 flags
, name
, ddi_driver_name(dip
),
4082 ddi_get_instance(dip
));
4085 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
4086 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
4089 return (ddi_prop_lookup_common(match_dev
, dip
,
4090 (flags
| DDI_PROP_TYPE_BYTE
), name
, data
,
4091 nelements
, ddi_prop_fm_decode_bytes
));
4095 * Update an array of bytes property.
4098 ddi_prop_update_byte_array(dev_t match_dev
, dev_info_t
*dip
,
4099 char *name
, uchar_t
*data
, uint_t nelements
)
4102 return (DDI_PROP_INVAL_ARG
);
4104 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_BYTE
,
4105 name
, data
, nelements
, ddi_prop_fm_encode_bytes
));
4110 e_ddi_prop_update_byte_array(dev_t match_dev
, dev_info_t
*dip
,
4111 char *name
, uchar_t
*data
, uint_t nelements
)
4114 return (DDI_PROP_INVAL_ARG
);
4116 return (ddi_prop_update_common(match_dev
, dip
,
4117 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_BYTE
,
4118 name
, data
, nelements
, ddi_prop_fm_encode_bytes
));
4123 * ddi_prop_remove_common: Undefine a managed property:
4124 * Input dev_t must match dev_t when defined.
4125 * Returns DDI_PROP_NOT_FOUND, possibly.
4126 * DDI_PROP_INVAL_ARG is also possible if dev is
4127 * DDI_DEV_T_ANY or incoming name is the NULL string.
4130 ddi_prop_remove_common(dev_t dev
, dev_info_t
*dip
, char *name
, int flag
)
4132 ddi_prop_t
**list_head
= &(DEVI(dip
)->devi_drv_prop_ptr
);
4134 ddi_prop_t
*lastpropp
= NULL
;
4136 if ((dev
== DDI_DEV_T_ANY
) || (name
== (char *)0) ||
4137 (strlen(name
) == 0)) {
4138 return (DDI_PROP_INVAL_ARG
);
4141 if (flag
& DDI_PROP_SYSTEM_DEF
)
4142 list_head
= &(DEVI(dip
)->devi_sys_prop_ptr
);
4143 else if (flag
& DDI_PROP_HW_DEF
)
4144 list_head
= &(DEVI(dip
)->devi_hw_prop_ptr
);
4146 mutex_enter(&(DEVI(dip
)->devi_lock
));
4148 for (propp
= *list_head
; propp
!= NULL
; propp
= propp
->prop_next
) {
4149 if (DDI_STRSAME(propp
->prop_name
, name
) &&
4150 (dev
== propp
->prop_dev
)) {
4152 * Unlink this propp allowing for it to
4153 * be first in the list:
4156 if (lastpropp
== NULL
)
4157 *list_head
= propp
->prop_next
;
4159 lastpropp
->prop_next
= propp
->prop_next
;
4161 mutex_exit(&(DEVI(dip
)->devi_lock
));
4164 * Free memory and return...
4166 kmem_free(propp
->prop_name
,
4167 strlen(propp
->prop_name
) + 1);
4168 if (propp
->prop_len
!= 0)
4169 kmem_free(propp
->prop_val
, propp
->prop_len
);
4170 kmem_free(propp
, sizeof (ddi_prop_t
));
4171 return (DDI_PROP_SUCCESS
);
4175 mutex_exit(&(DEVI(dip
)->devi_lock
));
4176 return (DDI_PROP_NOT_FOUND
);
4180 ddi_prop_remove(dev_t dev
, dev_info_t
*dip
, char *name
)
4182 return (ddi_prop_remove_common(dev
, dip
, name
, 0));
4186 e_ddi_prop_remove(dev_t dev
, dev_info_t
*dip
, char *name
)
4188 return (ddi_prop_remove_common(dev
, dip
, name
, DDI_PROP_SYSTEM_DEF
));
4192 * e_ddi_prop_list_delete: remove a list of properties
4193 * Note that the caller needs to provide the required protection
4194 * (eg. devi_lock if these properties are still attached to a devi)
4197 e_ddi_prop_list_delete(ddi_prop_t
*props
)
4199 i_ddi_prop_list_delete(props
);
4203 * ddi_prop_remove_all_common:
4204 * Used before unloading a driver to remove
4205 * all properties. (undefines all dev_t's props.)
4206 * Also removes `explicitly undefined' props.
4207 * No errors possible.
4210 ddi_prop_remove_all_common(dev_info_t
*dip
, int flag
)
4212 ddi_prop_t
**list_head
;
4214 mutex_enter(&(DEVI(dip
)->devi_lock
));
4215 if (flag
& DDI_PROP_SYSTEM_DEF
) {
4216 list_head
= &(DEVI(dip
)->devi_sys_prop_ptr
);
4217 } else if (flag
& DDI_PROP_HW_DEF
) {
4218 list_head
= &(DEVI(dip
)->devi_hw_prop_ptr
);
4220 list_head
= &(DEVI(dip
)->devi_drv_prop_ptr
);
4222 i_ddi_prop_list_delete(*list_head
);
4224 mutex_exit(&(DEVI(dip
)->devi_lock
));
4229 * ddi_prop_remove_all: Remove all driver prop definitions.
4233 ddi_prop_remove_all(dev_info_t
*dip
)
4235 i_ddi_prop_dyn_driver_set(dip
, NULL
);
4236 ddi_prop_remove_all_common(dip
, 0);
4240 * e_ddi_prop_remove_all: Remove all system prop definitions.
4244 e_ddi_prop_remove_all(dev_info_t
*dip
)
4246 ddi_prop_remove_all_common(dip
, (int)DDI_PROP_SYSTEM_DEF
);
4251 * ddi_prop_undefine: Explicitly undefine a property. Property
4252 * searches which match this property return
4253 * the error code DDI_PROP_UNDEFINED.
4255 * Use ddi_prop_remove to negate effect of
4258 * See above for error returns.
4262 ddi_prop_undefine(dev_t dev
, dev_info_t
*dip
, int flag
, char *name
)
4264 if (!(flag
& DDI_PROP_CANSLEEP
))
4265 flag
|= DDI_PROP_DONTSLEEP
;
4266 flag
|= DDI_PROP_STACK_CREATE
| DDI_PROP_UNDEF_IT
| DDI_PROP_TYPE_ANY
;
4267 return (ddi_prop_update_common(dev
, dip
, flag
,
4268 name
, NULL
, 0, ddi_prop_fm_encode_bytes
));
4272 e_ddi_prop_undefine(dev_t dev
, dev_info_t
*dip
, int flag
, char *name
)
4274 if (!(flag
& DDI_PROP_CANSLEEP
))
4275 flag
|= DDI_PROP_DONTSLEEP
;
4276 flag
|= DDI_PROP_SYSTEM_DEF
| DDI_PROP_STACK_CREATE
|
4277 DDI_PROP_UNDEF_IT
| DDI_PROP_TYPE_ANY
;
4278 return (ddi_prop_update_common(dev
, dip
, flag
,
4279 name
, NULL
, 0, ddi_prop_fm_encode_bytes
));
4283 * Support for gathering dynamic properties in devinfo snapshot.
4286 i_ddi_prop_dyn_driver_set(dev_info_t
*dip
, i_ddi_prop_dyn_t
*dp
)
4288 DEVI(dip
)->devi_prop_dyn_driver
= dp
;
4292 i_ddi_prop_dyn_driver_get(dev_info_t
*dip
)
4294 return (DEVI(dip
)->devi_prop_dyn_driver
);
4298 i_ddi_prop_dyn_parent_set(dev_info_t
*dip
, i_ddi_prop_dyn_t
*dp
)
4300 DEVI(dip
)->devi_prop_dyn_parent
= dp
;
4304 i_ddi_prop_dyn_parent_get(dev_info_t
*dip
)
4306 return (DEVI(dip
)->devi_prop_dyn_parent
);
4310 i_ddi_prop_dyn_cache_invalidate(dev_info_t
*dip
, i_ddi_prop_dyn_t
*dp
)
4312 /* for now we invalidate the entire cached snapshot */
4314 i_ddi_di_cache_invalidate();
4319 ddi_prop_cache_invalidate(dev_t dev
, dev_info_t
*dip
, char *name
, int flags
)
4321 /* for now we invalidate the entire cached snapshot */
4322 i_ddi_di_cache_invalidate();
4327 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4329 * if input dip != child_dip, then call is on behalf of child
4330 * to search PROM, do it via ddi_prop_search_common() and ascend only
4333 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4334 * to search for PROM defined props only.
4336 * Note that the PROM search is done only if the requested dev
4337 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4338 * have no associated dev, thus are automatically associated with
4341 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4343 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4344 * that the property resides in the prom.
4347 impl_ddi_bus_prop_op(dev_t dev
, dev_info_t
*dip
, dev_info_t
*ch_dip
,
4348 ddi_prop_op_t prop_op
, int mod_flags
,
4349 char *name
, caddr_t valuep
, int *lengthp
)
4355 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4356 * look in caller's PROM if it's a self identifying device...
4358 * Note that this is very similar to ddi_prop_op, but we
4359 * search the PROM instead of the s/w defined properties,
4360 * and we are called on by the parent driver to do this for
4364 if (((dev
== DDI_DEV_T_NONE
) || (dev
== DDI_DEV_T_ANY
)) &&
4365 ndi_dev_is_prom_node(ch_dip
) &&
4366 ((mod_flags
& DDI_PROP_NOTPROM
) == 0)) {
4367 len
= prom_getproplen((pnode_t
)DEVI(ch_dip
)->devi_nodeid
, name
);
4369 return (DDI_PROP_NOT_FOUND
);
4373 * If exists only request, we're done
4375 if (prop_op
== PROP_EXISTS
) {
4376 return (DDI_PROP_FOUND_1275
);
4380 * If length only request or prop length == 0, get out
4382 if ((prop_op
== PROP_LEN
) || (len
== 0)) {
4384 return (DDI_PROP_FOUND_1275
);
4388 * Allocate buffer if required... (either way `buffer'
4389 * is receiving address).
4394 case PROP_LEN_AND_VAL_ALLOC
:
4396 buffer
= kmem_alloc((size_t)len
,
4397 mod_flags
& DDI_PROP_CANSLEEP
?
4398 KM_SLEEP
: KM_NOSLEEP
);
4399 if (buffer
== NULL
) {
4400 return (DDI_PROP_NO_MEMORY
);
4402 *(caddr_t
*)valuep
= buffer
;
4405 case PROP_LEN_AND_VAL_BUF
:
4407 if (len
> (*lengthp
)) {
4409 return (DDI_PROP_BUF_TOO_SMALL
);
4420 * Call the PROM function to do the copy.
4422 (void) prom_getprop((pnode_t
)DEVI(ch_dip
)->devi_nodeid
,
4425 *lengthp
= len
; /* return the actual length to the caller */
4426 (void) impl_fix_props(dip
, ch_dip
, name
, len
, buffer
);
4427 return (DDI_PROP_FOUND_1275
);
4430 return (DDI_PROP_NOT_FOUND
);
4434 * The ddi_bus_prop_op default bus nexus prop op function.
4436 * Code to search hardware layer (PROM), if it exists,
4437 * on behalf of child, then, if appropriate, ascend and check
4438 * my own software defined properties...
4441 ddi_bus_prop_op(dev_t dev
, dev_info_t
*dip
, dev_info_t
*ch_dip
,
4442 ddi_prop_op_t prop_op
, int mod_flags
,
4443 char *name
, caddr_t valuep
, int *lengthp
)
4447 error
= impl_ddi_bus_prop_op(dev
, dip
, ch_dip
, prop_op
, mod_flags
,
4448 name
, valuep
, lengthp
);
4450 if (error
== DDI_PROP_SUCCESS
|| error
== DDI_PROP_FOUND_1275
||
4451 error
== DDI_PROP_BUF_TOO_SMALL
)
4454 if (error
== DDI_PROP_NO_MEMORY
) {
4455 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
4456 return (DDI_PROP_NO_MEMORY
);
4460 * Check the 'options' node as a last resort
4462 if ((mod_flags
& DDI_PROP_DONTPASS
) != 0)
4463 return (DDI_PROP_NOT_FOUND
);
4465 if (ch_dip
== ddi_root_node()) {
4467 * As a last resort, when we've reached
4468 * the top and still haven't found the
4469 * property, see if the desired property
4470 * is attached to the options node.
4472 * The options dip is attached right after boot.
4474 ASSERT(options_dip
!= NULL
);
4476 * Force the "don't pass" flag to *just* see
4477 * what the options node has to offer.
4479 return (ddi_prop_search_common(dev
, options_dip
, prop_op
,
4480 mod_flags
|DDI_PROP_DONTPASS
, name
, valuep
,
4481 (uint_t
*)lengthp
));
4485 * Otherwise, continue search with parent's s/w defined properties...
4486 * NOTE: Using `dip' in following call increments the level.
4489 return (ddi_prop_search_common(dev
, dip
, prop_op
, mod_flags
,
4490 name
, valuep
, (uint_t
*)lengthp
));
4494 * External property functions used by other parts of the kernel...
4498 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4502 e_ddi_getlongprop(dev_t dev
, vtype_t type
, char *name
, int flags
,
4503 caddr_t valuep
, int *lengthp
)
4505 _NOTE(ARGUNUSED(type
))
4507 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_ALLOC
;
4510 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4511 return (DDI_PROP_NOT_FOUND
);
4513 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
, name
, valuep
, lengthp
);
4514 ddi_release_devi(devi
);
4519 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf.
4523 e_ddi_getlongprop_buf(dev_t dev
, vtype_t type
, char *name
, int flags
,
4524 caddr_t valuep
, int *lengthp
)
4526 _NOTE(ARGUNUSED(type
))
4528 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_BUF
;
4531 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4532 return (DDI_PROP_NOT_FOUND
);
4534 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
, name
, valuep
, lengthp
);
4535 ddi_release_devi(devi
);
4540 * e_ddi_getprop: See comments for ddi_getprop.
4543 e_ddi_getprop(dev_t dev
, vtype_t type
, char *name
, int flags
, int defvalue
)
4545 _NOTE(ARGUNUSED(type
))
4547 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_BUF
;
4548 int propvalue
= defvalue
;
4549 int proplength
= sizeof (int);
4552 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4555 error
= cdev_prop_op(dev
, devi
, prop_op
,
4556 flags
, name
, (caddr_t
)&propvalue
, &proplength
);
4557 ddi_release_devi(devi
);
4559 if ((error
== DDI_PROP_SUCCESS
) && (proplength
== 0))
4566 * e_ddi_getprop_int64:
4568 * This is a typed interfaces, but predates typed properties. With the
4569 * introduction of typed properties the framework tries to ensure
4570 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4571 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a
4572 * typed interface invokes legacy (non-typed) interfaces:
4573 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the
4574 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support
4575 * this type of lookup as a single operation we invoke the legacy
4576 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4577 * framework ddi_prop_op(9F) implementation is expected to check for
4578 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4579 * (currently TYPE_INT64).
4582 e_ddi_getprop_int64(dev_t dev
, vtype_t type
, char *name
,
4583 int flags
, int64_t defvalue
)
4585 _NOTE(ARGUNUSED(type
))
4587 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_BUF
;
4588 int64_t propvalue
= defvalue
;
4589 int proplength
= sizeof (propvalue
);
4592 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4595 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
|
4596 DDI_PROP_CONSUMER_TYPED
, name
, (caddr_t
)&propvalue
, &proplength
);
4597 ddi_release_devi(devi
);
4599 if ((error
== DDI_PROP_SUCCESS
) && (proplength
== 0))
4606 * e_ddi_getproplen: See comments for ddi_getproplen.
4609 e_ddi_getproplen(dev_t dev
, vtype_t type
, char *name
, int flags
, int *lengthp
)
4611 _NOTE(ARGUNUSED(type
))
4613 ddi_prop_op_t prop_op
= PROP_LEN
;
4616 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4617 return (DDI_PROP_NOT_FOUND
);
4619 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
, name
, NULL
, lengthp
);
4620 ddi_release_devi(devi
);
4625 * Routines to get at elements of the dev_info structure
4629 * ddi_binding_name: Return the driver binding name of the devinfo node
4630 * This is the name the OS used to bind the node to a driver.
4633 ddi_binding_name(dev_info_t
*dip
)
4635 return (DEVI(dip
)->devi_binding_name
);
4639 * ddi_driver_major: Return the major number of the driver that
4640 * the supplied devinfo is bound to. If not yet bound,
4643 * When used by the driver bound to 'devi', this
4644 * function will reliably return the driver major number.
4645 * Other ways of determining the driver major number, such as
4646 * major = ddi_name_to_major(ddi_get_name(devi));
4647 * major = ddi_name_to_major(ddi_binding_name(devi));
4648 * can return a different result as the driver/alias binding
4649 * can change dynamically, and thus should be avoided.
4652 ddi_driver_major(dev_info_t
*devi
)
4654 return (DEVI(devi
)->devi_major
);
4658 * ddi_driver_name: Return the normalized driver name. this is the
4659 * actual driver name
4662 ddi_driver_name(dev_info_t
*devi
)
4666 if ((major
= ddi_driver_major(devi
)) != DDI_MAJOR_T_NONE
)
4667 return (ddi_major_to_name(major
));
4669 return (ddi_node_name(devi
));
4673 * i_ddi_set_binding_name: Set binding name.
4675 * Set the binding name to the given name.
4676 * This routine is for use by the ddi implementation, not by drivers.
4679 i_ddi_set_binding_name(dev_info_t
*dip
, char *name
)
4681 DEVI(dip
)->devi_binding_name
= name
;
4686 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4687 * the implementation has used to bind the node to a driver.
4690 ddi_get_name(dev_info_t
*dip
)
4692 return (DEVI(dip
)->devi_binding_name
);
4696 * ddi_node_name: Return the name property of the devinfo node
4697 * This may differ from ddi_binding_name if the node name
4698 * does not define a binding to a driver (i.e. generic names).
4701 ddi_node_name(dev_info_t
*dip
)
4703 return (DEVI(dip
)->devi_node_name
);
4708 * ddi_get_nodeid: Get nodeid stored in dev_info structure.
4711 ddi_get_nodeid(dev_info_t
*dip
)
4713 return (DEVI(dip
)->devi_nodeid
);
4717 ddi_get_instance(dev_info_t
*dip
)
4719 return (DEVI(dip
)->devi_instance
);
4723 ddi_get_driver(dev_info_t
*dip
)
4725 return (DEVI(dip
)->devi_ops
);
4729 ddi_set_driver(dev_info_t
*dip
, struct dev_ops
*devo
)
4731 DEVI(dip
)->devi_ops
= devo
;
4735 * ddi_set_driver_private/ddi_get_driver_private:
4736 * Get/set device driver private data in devinfo.
4739 ddi_set_driver_private(dev_info_t
*dip
, void *data
)
4741 DEVI(dip
)->devi_driver_data
= data
;
4745 ddi_get_driver_private(dev_info_t
*dip
)
4747 return (DEVI(dip
)->devi_driver_data
);
4751 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4755 ddi_get_parent(dev_info_t
*dip
)
4757 return ((dev_info_t
*)DEVI(dip
)->devi_parent
);
4761 ddi_get_child(dev_info_t
*dip
)
4763 return ((dev_info_t
*)DEVI(dip
)->devi_child
);
4767 ddi_get_next_sibling(dev_info_t
*dip
)
4769 return ((dev_info_t
*)DEVI(dip
)->devi_sibling
);
4773 ddi_get_next(dev_info_t
*dip
)
4775 return ((dev_info_t
*)DEVI(dip
)->devi_next
);
4779 ddi_set_next(dev_info_t
*dip
, dev_info_t
*nextdip
)
4781 DEVI(dip
)->devi_next
= DEVI(nextdip
);
4785 * ddi_root_node: Return root node of devinfo tree
4791 extern dev_info_t
*top_devinfo
;
4793 return (top_devinfo
);
4797 * Miscellaneous functions:
4801 * Implementation specific hooks
4805 ddi_report_dev(dev_info_t
*d
)
4809 (void) ddi_ctlops(d
, d
, DDI_CTLOPS_REPORTDEV
, (void *)0, (void *)0);
4812 * If this devinfo node has cb_ops, it's implicitly accessible from
4813 * userland, so we print its full name together with the instance
4814 * number 'abbreviation' that the driver may use internally.
4816 if (DEVI(d
)->devi_ops
->devo_cb_ops
!= (struct cb_ops
*)0 &&
4817 (b
= kmem_zalloc(MAXPATHLEN
, KM_NOSLEEP
))) {
4818 cmn_err(CE_CONT
, "?%s%d is %s\n",
4819 ddi_driver_name(d
), ddi_get_instance(d
),
4820 ddi_pathname(d
, b
));
4821 kmem_free(b
, MAXPATHLEN
);
4826 * ddi_ctlops() is described in the assembler not to buy a new register
4827 * window when it's called and can reduce cost in climbing the device tree
4828 * without using the tail call optimization.
4831 ddi_dev_regsize(dev_info_t
*dev
, uint_t rnumber
, off_t
*result
)
4835 ret
= ddi_ctlops(dev
, dev
, DDI_CTLOPS_REGSIZE
,
4836 (void *)&rnumber
, (void *)result
);
4838 return (ret
== DDI_SUCCESS
? DDI_SUCCESS
: DDI_FAILURE
);
4842 ddi_dev_nregs(dev_info_t
*dev
, int *result
)
4844 return (ddi_ctlops(dev
, dev
, DDI_CTLOPS_NREGS
, 0, (void *)result
));
4848 ddi_dev_is_sid(dev_info_t
*d
)
4850 return (ddi_ctlops(d
, d
, DDI_CTLOPS_SIDDEV
, (void *)0, (void *)0));
4854 ddi_slaveonly(dev_info_t
*d
)
4856 return (ddi_ctlops(d
, d
, DDI_CTLOPS_SLAVEONLY
, (void *)0, (void *)0));
4860 ddi_dev_affinity(dev_info_t
*a
, dev_info_t
*b
)
4862 return (ddi_ctlops(a
, a
, DDI_CTLOPS_AFFINITY
, (void *)b
, (void *)0));
4866 ddi_streams_driver(dev_info_t
*dip
)
4868 if (i_ddi_devi_attached(dip
) &&
4869 (DEVI(dip
)->devi_ops
->devo_cb_ops
!= NULL
) &&
4870 (DEVI(dip
)->devi_ops
->devo_cb_ops
->cb_str
!= NULL
))
4871 return (DDI_SUCCESS
);
4872 return (DDI_FAILURE
);
4876 * callback free list
4879 static int ncallbacks
;
4880 static int nc_low
= 170;
4881 static int nc_med
= 512;
4882 static int nc_high
= 2048;
4883 static struct ddi_callback
*callbackq
;
4884 static struct ddi_callback
*callbackqfree
;
4887 * set/run callback lists
4890 kstat_named_t cb_asked
;
4891 kstat_named_t cb_new
;
4892 kstat_named_t cb_run
;
4893 kstat_named_t cb_delete
;
4894 kstat_named_t cb_maxreq
;
4895 kstat_named_t cb_maxlist
;
4896 kstat_named_t cb_alloc
;
4897 kstat_named_t cb_runouts
;
4898 kstat_named_t cb_L2
;
4899 kstat_named_t cb_grow
;
4901 {"asked", KSTAT_DATA_UINT32
},
4902 {"new", KSTAT_DATA_UINT32
},
4903 {"run", KSTAT_DATA_UINT32
},
4904 {"delete", KSTAT_DATA_UINT32
},
4905 {"maxreq", KSTAT_DATA_UINT32
},
4906 {"maxlist", KSTAT_DATA_UINT32
},
4907 {"alloc", KSTAT_DATA_UINT32
},
4908 {"runouts", KSTAT_DATA_UINT32
},
4909 {"L2", KSTAT_DATA_UINT32
},
4910 {"grow", KSTAT_DATA_UINT32
},
4913 #define nc_asked cb_asked.value.ui32
4914 #define nc_new cb_new.value.ui32
4915 #define nc_run cb_run.value.ui32
4916 #define nc_delete cb_delete.value.ui32
4917 #define nc_maxreq cb_maxreq.value.ui32
4918 #define nc_maxlist cb_maxlist.value.ui32
4919 #define nc_alloc cb_alloc.value.ui32
4920 #define nc_runouts cb_runouts.value.ui32
4921 #define nc_L2 cb_L2.value.ui32
4922 #define nc_grow cb_grow.value.ui32
4924 static kmutex_t ddi_callback_mutex
;
4927 * callbacks are handled using a L1/L2 cache. The L1 cache
4928 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4929 * we can't get callbacks from the L1 cache [because pageout is doing
4930 * I/O at the time freemem is 0], we allocate callbacks out of the
4931 * L2 cache. The L2 cache is static and depends on the memory size.
4932 * [We might also count the number of devices at probe time and
4933 * allocate one structure per device and adjust for deferred attach]
4936 impl_ddi_callback_init(void)
4942 physmegs
= physmem
>> (20 - PAGESHIFT
);
4943 if (physmegs
< 48) {
4944 ncallbacks
= nc_low
;
4945 } else if (physmegs
< 128) {
4946 ncallbacks
= nc_med
;
4948 ncallbacks
= nc_high
;
4954 callbackq
= kmem_zalloc(
4955 ncallbacks
* sizeof (struct ddi_callback
), KM_SLEEP
);
4956 for (i
= 0; i
< ncallbacks
-1; i
++)
4957 callbackq
[i
].c_nfree
= &callbackq
[i
+1];
4958 callbackqfree
= callbackq
;
4961 if (ksp
= kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED
,
4962 sizeof (cbstats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
)) {
4963 ksp
->ks_data
= (void *) &cbstats
;
4970 callback_insert(int (*funcp
)(caddr_t
), caddr_t arg
, uintptr_t *listid
,
4973 struct ddi_callback
*list
, *marker
, *new;
4974 size_t size
= sizeof (struct ddi_callback
);
4976 list
= marker
= (struct ddi_callback
*)*listid
;
4977 while (list
!= NULL
) {
4978 if (list
->c_call
== funcp
&& list
->c_arg
== arg
) {
4979 list
->c_count
+= count
;
4983 list
= list
->c_nlist
;
4985 new = kmem_alloc(size
, KM_NOSLEEP
);
4987 new = callbackqfree
;
4989 new = kmem_alloc_tryhard(sizeof (struct ddi_callback
),
4990 &size
, KM_NOSLEEP
| KM_PANIC
);
4993 callbackqfree
= new->c_nfree
;
4997 if (marker
!= NULL
) {
4998 marker
->c_nlist
= new;
5000 *listid
= (uintptr_t)new;
5003 new->c_nlist
= NULL
;
5004 new->c_call
= funcp
;
5006 new->c_count
= count
;
5009 if (cbstats
.nc_alloc
> cbstats
.nc_maxlist
)
5010 cbstats
.nc_maxlist
= cbstats
.nc_alloc
;
5014 ddi_set_callback(int (*funcp
)(caddr_t
), caddr_t arg
, uintptr_t *listid
)
5016 mutex_enter(&ddi_callback_mutex
);
5018 if ((cbstats
.nc_asked
- cbstats
.nc_run
) > cbstats
.nc_maxreq
)
5019 cbstats
.nc_maxreq
= (cbstats
.nc_asked
- cbstats
.nc_run
);
5020 (void) callback_insert(funcp
, arg
, listid
, 1);
5021 mutex_exit(&ddi_callback_mutex
);
5025 real_callback_run(void *Queue
)
5027 int (*funcp
)(caddr_t
);
5031 struct ddi_callback
*list
, *marker
;
5032 int check_pending
= 1;
5036 mutex_enter(&ddi_callback_mutex
);
5038 list
= (struct ddi_callback
*)*listid
;
5040 mutex_exit(&ddi_callback_mutex
);
5043 if (check_pending
) {
5045 while (marker
!= NULL
) {
5046 pending
+= marker
->c_count
;
5047 marker
= marker
->c_nlist
;
5051 ASSERT(pending
> 0);
5052 ASSERT(list
->c_count
> 0);
5053 funcp
= list
->c_call
;
5055 count
= list
->c_count
;
5056 *(uintptr_t *)Queue
= (uintptr_t)list
->c_nlist
;
5057 if (list
>= &callbackq
[0] &&
5058 list
<= &callbackq
[ncallbacks
-1]) {
5059 list
->c_nfree
= callbackqfree
;
5060 callbackqfree
= list
;
5062 kmem_free(list
, list
->c_size
);
5064 cbstats
.nc_delete
++;
5066 mutex_exit(&ddi_callback_mutex
);
5069 if ((rval
= (*funcp
)(arg
)) == 0) {
5071 mutex_enter(&ddi_callback_mutex
);
5072 (void) callback_insert(funcp
, arg
, listid
,
5074 cbstats
.nc_runouts
++;
5077 mutex_enter(&ddi_callback_mutex
);
5080 mutex_exit(&ddi_callback_mutex
);
5081 } while (rval
!= 0 && (--count
> 0));
5082 } while (pending
> 0);
5086 ddi_run_callback(uintptr_t *listid
)
5088 softcall(real_callback_run
, listid
);
5093 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5097 * Solaris DDI specific (Solaris DDI)
5100 * func: the callback function
5102 * The callback function will be invoked. The function is invoked
5103 * in kernel context if the argument level passed is the zero.
5104 * Otherwise it's invoked in interrupt context at the specified
5107 * arg: the argument passed to the callback function
5109 * interval: interval time
5111 * level : callback interrupt level
5113 * If the value is the zero, the callback function is invoked
5114 * in kernel context. If the value is more than the zero, but
5115 * less than or equal to ten, the callback function is invoked in
5116 * interrupt context at the specified interrupt level, which may
5117 * be used for real time applications.
5119 * This value must be in range of 0-10, which can be a numeric
5120 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5123 * ddi_periodic_add(9F) schedules the specified function to be
5124 * periodically invoked in the interval time.
5126 * As well as timeout(9F), the exact time interval over which the function
5127 * takes effect cannot be guaranteed, but the value given is a close
5130 * Drivers waiting on behalf of processes with real-time constraints must
5131 * pass non-zero value with the level argument to ddi_periodic_add(9F).
5134 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5135 * which must be used for ddi_periodic_delete(9F) to specify the request.
5138 * ddi_periodic_add(9F) can be called in user or kernel context, but
5139 * it cannot be called in interrupt context, which is different from
5143 ddi_periodic_add(void (*func
)(void *), void *arg
, hrtime_t interval
, int level
)
5146 * Sanity check of the argument level.
5148 if (level
< DDI_IPL_0
|| level
> DDI_IPL_10
)
5150 "ddi_periodic_add: invalid interrupt level (%d).", level
);
5153 * Sanity check of the context. ddi_periodic_add() cannot be
5154 * called in either interrupt context or high interrupt context.
5156 if (servicing_interrupt())
5158 "ddi_periodic_add: called in (high) interrupt context.");
5160 return ((ddi_periodic_t
)i_timeout(func
, arg
, interval
, level
));
5165 * ddi_periodic_delete(ddi_periodic_t req)
5168 * Solaris DDI specific (Solaris DDI)
5171 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5175 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5176 * previously requested.
5178 * ddi_periodic_delete(9F) will not return until the pending request
5179 * is canceled or executed.
5181 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5182 * timeout which is either running on another CPU, or has already
5183 * completed causes no problems. However, unlike untimeout(9F), there is
5184 * no restrictions on the lock which might be held across the call to
5185 * ddi_periodic_delete(9F).
5187 * Drivers should be structured with the understanding that the arrival of
5188 * both an interrupt and a timeout for that interrupt can occasionally
5189 * occur, in either order.
5192 * ddi_periodic_delete(9F) can be called in user or kernel context, but
5193 * it cannot be called in interrupt context, which is different from
5197 ddi_periodic_delete(ddi_periodic_t req
)
5200 * Sanity check of the context. ddi_periodic_delete() cannot be
5201 * called in either interrupt context or high interrupt context.
5203 if (servicing_interrupt())
5205 "ddi_periodic_delete: called in (high) interrupt context.");
5207 i_untimeout((timeout_t
)req
);
5211 nodevinfo(dev_t dev
, int otyp
)
5213 _NOTE(ARGUNUSED(dev
, otyp
))
5214 return ((dev_info_t
*)0);
5218 * A driver should support its own getinfo(9E) entry point. This function
5219 * is provided as a convenience for ON drivers that don't expect their
5220 * getinfo(9E) entry point to be called. A driver that uses this must not
5221 * call ddi_create_minor_node.
5224 ddi_no_info(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
, void **result
)
5226 _NOTE(ARGUNUSED(dip
, infocmd
, arg
, result
))
5227 return (DDI_FAILURE
);
5231 * A driver should support its own getinfo(9E) entry point. This function
5232 * is provided as a convenience for ON drivers that where the minor number
5233 * is the instance. Drivers that do not have 1:1 mapping must implement
5234 * their own getinfo(9E) function.
5237 ddi_getinfo_1to1(dev_info_t
*dip
, ddi_info_cmd_t infocmd
,
5238 void *arg
, void **result
)
5240 _NOTE(ARGUNUSED(dip
))
5243 if (infocmd
!= DDI_INFO_DEVT2INSTANCE
)
5244 return (DDI_FAILURE
);
5246 instance
= getminor((dev_t
)(uintptr_t)arg
);
5247 *result
= (void *)(uintptr_t)instance
;
5248 return (DDI_SUCCESS
);
5252 ddifail(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
5254 _NOTE(ARGUNUSED(devi
, cmd
))
5255 return (DDI_FAILURE
);
5259 ddi_no_dma_map(dev_info_t
*dip
, dev_info_t
*rdip
,
5260 struct ddi_dma_req
*dmareqp
, ddi_dma_handle_t
*handlep
)
5262 _NOTE(ARGUNUSED(dip
, rdip
, dmareqp
, handlep
))
5263 return (DDI_DMA_NOMAPPING
);
5267 ddi_no_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
5268 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
5270 _NOTE(ARGUNUSED(dip
, rdip
, attr
, waitfp
, arg
, handlep
))
5271 return (DDI_DMA_BADATTR
);
5275 ddi_no_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
5276 ddi_dma_handle_t handle
)
5278 _NOTE(ARGUNUSED(dip
, rdip
, handle
))
5279 return (DDI_FAILURE
);
5283 ddi_no_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
5284 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
5285 ddi_dma_cookie_t
*cp
, uint_t
*ccountp
)
5287 _NOTE(ARGUNUSED(dip
, rdip
, handle
, dmareq
, cp
, ccountp
))
5288 return (DDI_DMA_NOMAPPING
);
5292 ddi_no_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
5293 ddi_dma_handle_t handle
)
5295 _NOTE(ARGUNUSED(dip
, rdip
, handle
))
5296 return (DDI_FAILURE
);
5300 ddi_no_dma_flush(dev_info_t
*dip
, dev_info_t
*rdip
,
5301 ddi_dma_handle_t handle
, off_t off
, size_t len
,
5304 _NOTE(ARGUNUSED(dip
, rdip
, handle
, off
, len
, cache_flags
))
5305 return (DDI_FAILURE
);
5309 ddi_no_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
5310 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
,
5311 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
5313 _NOTE(ARGUNUSED(dip
, rdip
, handle
, win
, offp
, lenp
, cookiep
, ccountp
))
5314 return (DDI_FAILURE
);
5318 ddi_no_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
5319 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
5320 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t flags
)
5322 _NOTE(ARGUNUSED(dip
, rdip
, handle
, request
, offp
, lenp
, objp
, flags
))
5323 return (DDI_FAILURE
);
5331 nochpoll(dev_t dev
, short events
, int anyyet
, short *reventsp
,
5332 struct pollhead
**pollhdrp
)
5334 _NOTE(ARGUNUSED(dev
, events
, anyyet
, reventsp
, pollhdrp
))
5347 return ((clock_t)lbolt_hybrid());
5351 ddi_get_lbolt64(void)
5353 return (lbolt_hybrid());
5361 if ((now
= gethrestime_sec()) == 0) {
5363 mutex_enter(&tod_lock
);
5365 mutex_exit(&tod_lock
);
5375 return (ttoproc(curthread
)->p_pid
);
5379 ddi_get_kt_did(void)
5381 return (curthread
->t_did
);
5385 * This function returns B_TRUE if the caller can reasonably expect that a call
5386 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5387 * by user-level signal. If it returns B_FALSE, then the caller should use
5388 * other means to make certain that the wait will not hang "forever."
5390 * It does not check the signal mask, nor for reception of any particular
5393 * Currently, a thread can receive a signal if it's not a kernel thread and it
5394 * is not in the middle of exit(2) tear-down. Threads that are in that
5395 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5396 * cv_timedwait, and qwait_sig to qwait.
5399 ddi_can_receive_sig(void)
5403 if (curthread
->t_proc_flag
& TP_LWPEXIT
)
5405 if ((pp
= ttoproc(curthread
)) == NULL
)
5407 return (pp
->p_as
!= &kas
);
5411 * Swap bytes in 16-bit [half-]words
5414 swab(void *src
, void *dst
, size_t nbytes
)
5416 uchar_t
*pf
= (uchar_t
*)src
;
5417 uchar_t
*pt
= (uchar_t
*)dst
;
5421 nshorts
= nbytes
>> 1;
5423 while (--nshorts
>= 0) {
5431 ddi_append_minor_node(dev_info_t
*ddip
, struct ddi_minor_data
*dmdp
)
5434 struct ddi_minor_data
*dp
;
5436 ndi_devi_enter(ddip
, &circ
);
5437 if ((dp
= DEVI(ddip
)->devi_minor
) == (struct ddi_minor_data
*)NULL
) {
5438 DEVI(ddip
)->devi_minor
= dmdp
;
5440 while (dp
->next
!= (struct ddi_minor_data
*)NULL
)
5444 ndi_devi_exit(ddip
, circ
);
5448 * Part of the obsolete SunCluster DDI Hooks.
5449 * Keep for binary compatibility
5452 ddi_getiminor(dev_t dev
)
5454 return (getminor(dev
));
5458 i_log_devfs_minor_create(dev_info_t
*dip
, char *minor_name
)
5463 char *pathname
, *class_name
;
5464 sysevent_t
*ev
= NULL
;
5466 sysevent_value_t se_val
;
5467 sysevent_attr_list_t
*ev_attr_list
= NULL
;
5469 /* determine interrupt context */
5470 se_flag
= (servicing_interrupt()) ? SE_NOSLEEP
: SE_SLEEP
;
5471 kmem_flag
= (se_flag
== SE_SLEEP
) ? KM_SLEEP
: KM_NOSLEEP
;
5473 i_ddi_di_cache_invalidate();
5476 if ((se_flag
== SE_NOSLEEP
) && sunddi_debug
) {
5477 cmn_err(CE_CONT
, "ddi_create_minor_node: called from "
5478 "interrupt level by driver %s",
5479 ddi_driver_name(dip
));
5483 ev
= sysevent_alloc(EC_DEVFS
, ESC_DEVFS_MINOR_CREATE
, EP_DDI
, se_flag
);
5488 pathname
= kmem_alloc(MAXPATHLEN
, kmem_flag
);
5489 if (pathname
== NULL
) {
5494 (void) ddi_pathname(dip
, pathname
);
5495 ASSERT(strlen(pathname
));
5496 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5497 se_val
.value
.sv_string
= pathname
;
5498 if (sysevent_add_attr(&ev_attr_list
, DEVFS_PATHNAME
,
5499 &se_val
, se_flag
) != 0) {
5500 kmem_free(pathname
, MAXPATHLEN
);
5504 kmem_free(pathname
, MAXPATHLEN
);
5506 /* add the device class attribute */
5507 if ((class_name
= i_ddi_devi_class(dip
)) != NULL
) {
5508 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5509 se_val
.value
.sv_string
= class_name
;
5510 if (sysevent_add_attr(&ev_attr_list
,
5511 DEVFS_DEVI_CLASS
, &se_val
, SE_SLEEP
) != 0) {
5512 sysevent_free_attr(ev_attr_list
);
5518 * allow for NULL minor names
5520 if (minor_name
!= NULL
) {
5521 se_val
.value
.sv_string
= minor_name
;
5522 if (sysevent_add_attr(&ev_attr_list
, DEVFS_MINOR_NAME
,
5523 &se_val
, se_flag
) != 0) {
5524 sysevent_free_attr(ev_attr_list
);
5530 if (sysevent_attach_attributes(ev
, ev_attr_list
) != 0) {
5531 sysevent_free_attr(ev_attr_list
);
5536 if ((se_err
= log_sysevent(ev
, se_flag
, &eid
)) != 0) {
5537 if (se_err
== SE_NO_TRANSPORT
) {
5538 cmn_err(CE_WARN
, "/devices or /dev may not be current "
5539 "for driver %s (%s). Run devfsadm -i %s",
5540 ddi_driver_name(dip
), "syseventd not responding",
5541 ddi_driver_name(dip
));
5549 return (DDI_SUCCESS
);
5551 cmn_err(CE_WARN
, "/devices or /dev may not be current "
5552 "for driver %s. Run devfsadm -i %s",
5553 ddi_driver_name(dip
), ddi_driver_name(dip
));
5554 return (DDI_SUCCESS
);
5558 * failing to remove a minor node is not of interest
5559 * therefore we do not generate an error message
5562 i_log_devfs_minor_remove(dev_info_t
*dip
, char *minor_name
)
5564 char *pathname
, *class_name
;
5567 sysevent_value_t se_val
;
5568 sysevent_attr_list_t
*ev_attr_list
= NULL
;
5571 * only log ddi_remove_minor_node() calls outside the scope
5572 * of attach/detach reconfigurations and when the dip is
5573 * still initialized.
5575 if (DEVI_IS_ATTACHING(dip
) || DEVI_IS_DETACHING(dip
) ||
5576 (i_ddi_node_state(dip
) < DS_INITIALIZED
)) {
5577 return (DDI_SUCCESS
);
5580 i_ddi_di_cache_invalidate();
5582 ev
= sysevent_alloc(EC_DEVFS
, ESC_DEVFS_MINOR_REMOVE
, EP_DDI
, SE_SLEEP
);
5584 return (DDI_SUCCESS
);
5587 pathname
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
5588 if (pathname
== NULL
) {
5590 return (DDI_SUCCESS
);
5593 (void) ddi_pathname(dip
, pathname
);
5594 ASSERT(strlen(pathname
));
5595 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5596 se_val
.value
.sv_string
= pathname
;
5597 if (sysevent_add_attr(&ev_attr_list
, DEVFS_PATHNAME
,
5598 &se_val
, SE_SLEEP
) != 0) {
5599 kmem_free(pathname
, MAXPATHLEN
);
5601 return (DDI_SUCCESS
);
5604 kmem_free(pathname
, MAXPATHLEN
);
5607 * allow for NULL minor names
5609 if (minor_name
!= NULL
) {
5610 se_val
.value
.sv_string
= minor_name
;
5611 if (sysevent_add_attr(&ev_attr_list
, DEVFS_MINOR_NAME
,
5612 &se_val
, SE_SLEEP
) != 0) {
5613 sysevent_free_attr(ev_attr_list
);
5618 if ((class_name
= i_ddi_devi_class(dip
)) != NULL
) {
5619 /* add the device class, driver name and instance attributes */
5621 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5622 se_val
.value
.sv_string
= class_name
;
5623 if (sysevent_add_attr(&ev_attr_list
,
5624 DEVFS_DEVI_CLASS
, &se_val
, SE_SLEEP
) != 0) {
5625 sysevent_free_attr(ev_attr_list
);
5629 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5630 se_val
.value
.sv_string
= (char *)ddi_driver_name(dip
);
5631 if (sysevent_add_attr(&ev_attr_list
,
5632 DEVFS_DRIVER_NAME
, &se_val
, SE_SLEEP
) != 0) {
5633 sysevent_free_attr(ev_attr_list
);
5637 se_val
.value_type
= SE_DATA_TYPE_INT32
;
5638 se_val
.value
.sv_int32
= ddi_get_instance(dip
);
5639 if (sysevent_add_attr(&ev_attr_list
,
5640 DEVFS_INSTANCE
, &se_val
, SE_SLEEP
) != 0) {
5641 sysevent_free_attr(ev_attr_list
);
5647 if (sysevent_attach_attributes(ev
, ev_attr_list
) != 0) {
5648 sysevent_free_attr(ev_attr_list
);
5650 (void) log_sysevent(ev
, SE_SLEEP
, &eid
);
5654 return (DDI_SUCCESS
);
5658 * Derive the device class of the node.
5659 * Device class names aren't defined yet. Until this is done we use
5660 * devfs event subclass names as device class names.
5663 derive_devi_class(dev_info_t
*dip
, char *node_type
, int flag
)
5665 int rv
= DDI_SUCCESS
;
5667 if (i_ddi_devi_class(dip
) == NULL
) {
5668 if (strncmp(node_type
, DDI_NT_BLOCK
,
5669 sizeof (DDI_NT_BLOCK
) - 1) == 0 &&
5670 (node_type
[sizeof (DDI_NT_BLOCK
) - 1] == '\0' ||
5671 node_type
[sizeof (DDI_NT_BLOCK
) - 1] == ':') &&
5672 strcmp(node_type
, DDI_NT_FD
) != 0) {
5674 rv
= i_ddi_set_devi_class(dip
, ESC_DISK
, flag
);
5676 } else if (strncmp(node_type
, DDI_NT_NET
,
5677 sizeof (DDI_NT_NET
) - 1) == 0 &&
5678 (node_type
[sizeof (DDI_NT_NET
) - 1] == '\0' ||
5679 node_type
[sizeof (DDI_NT_NET
) - 1] == ':')) {
5681 rv
= i_ddi_set_devi_class(dip
, ESC_NETWORK
, flag
);
5683 } else if (strncmp(node_type
, DDI_NT_PRINTER
,
5684 sizeof (DDI_NT_PRINTER
) - 1) == 0 &&
5685 (node_type
[sizeof (DDI_NT_PRINTER
) - 1] == '\0' ||
5686 node_type
[sizeof (DDI_NT_PRINTER
) - 1] == ':')) {
5688 rv
= i_ddi_set_devi_class(dip
, ESC_PRINTER
, flag
);
5690 } else if (strncmp(node_type
, DDI_PSEUDO
,
5691 sizeof (DDI_PSEUDO
) -1) == 0 &&
5692 (strncmp(ESC_LOFI
, ddi_node_name(dip
),
5693 sizeof (ESC_LOFI
) -1) == 0)) {
5694 rv
= i_ddi_set_devi_class(dip
, ESC_LOFI
, flag
);
5702 * Check compliance with PSARC 2003/375:
5704 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5705 * exceed IFNAMSIZ (16) characters in length.
5708 verify_name(char *name
)
5710 size_t len
= strlen(name
);
5713 if (len
== 0 || len
> IFNAMSIZ
)
5716 for (cp
= name
; *cp
!= '\0'; cp
++) {
5717 if (!isalnum(*cp
) && *cp
!= '_')
5725 * ddi_create_minor_common: Create a ddi_minor_data structure and
5726 * attach it to the given devinfo node.
5730 ddi_create_minor_common(dev_info_t
*dip
, char *name
, int spec_type
,
5731 minor_t minor_num
, char *node_type
, int flag
, ddi_minor_type mtype
,
5732 const char *read_priv
, const char *write_priv
, mode_t priv_mode
)
5734 struct ddi_minor_data
*dmdp
;
5737 if (spec_type
!= S_IFCHR
&& spec_type
!= S_IFBLK
)
5738 return (DDI_FAILURE
);
5741 return (DDI_FAILURE
);
5744 * Log a message if the minor number the driver is creating
5745 * is not expressible on the on-disk filesystem (currently
5746 * this is limited to 18 bits both by UFS). The device can
5747 * be opened via devfs, but not by device special files created
5750 if (minor_num
> L_MAXMIN32
) {
5752 "%s%d:%s minor 0x%x too big for 32-bit applications",
5753 ddi_driver_name(dip
), ddi_get_instance(dip
),
5755 return (DDI_FAILURE
);
5758 /* dip must be bound and attached */
5759 major
= ddi_driver_major(dip
);
5760 ASSERT(major
!= DDI_MAJOR_T_NONE
);
5763 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5765 if (node_type
== NULL
) {
5766 node_type
= DDI_PSEUDO
;
5767 NDI_CONFIG_DEBUG((CE_NOTE
, "!illegal node_type NULL for %s%d "
5768 " minor node %s; default to DDI_PSEUDO",
5769 ddi_driver_name(dip
), ddi_get_instance(dip
), name
));
5773 * If the driver is a network driver, ensure that the name falls within
5774 * the interface naming constraints specified by PSARC/2003/375.
5776 if (strcmp(node_type
, DDI_NT_NET
) == 0) {
5777 if (!verify_name(name
))
5778 return (DDI_FAILURE
);
5780 if (mtype
== DDM_MINOR
) {
5781 struct devnames
*dnp
= &devnamesp
[major
];
5783 /* Mark driver as a network driver */
5784 LOCK_DEV_OPS(&dnp
->dn_lock
);
5785 dnp
->dn_flags
|= DN_NETWORK_DRIVER
;
5788 * If this minor node is created during the device
5789 * attachment, this is a physical network device.
5790 * Mark the driver as a physical network driver.
5792 if (DEVI_IS_ATTACHING(dip
))
5793 dnp
->dn_flags
|= DN_NETWORK_PHYSDRIVER
;
5794 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
5798 if (mtype
== DDM_MINOR
) {
5799 if (derive_devi_class(dip
, node_type
, KM_NOSLEEP
) !=
5801 return (DDI_FAILURE
);
5805 * Take care of minor number information for the node.
5808 if ((dmdp
= kmem_zalloc(sizeof (struct ddi_minor_data
),
5809 KM_NOSLEEP
)) == NULL
) {
5810 return (DDI_FAILURE
);
5812 if ((dmdp
->ddm_name
= i_ddi_strdup(name
, KM_NOSLEEP
)) == NULL
) {
5813 kmem_free(dmdp
, sizeof (struct ddi_minor_data
));
5814 return (DDI_FAILURE
);
5817 dmdp
->ddm_dev
= makedevice(major
, minor_num
);
5818 dmdp
->ddm_spec_type
= spec_type
;
5819 dmdp
->ddm_node_type
= node_type
;
5821 if (flag
& CLONE_DEV
) {
5822 dmdp
->type
= DDM_ALIAS
;
5823 dmdp
->ddm_dev
= makedevice(ddi_driver_major(clone_dip
), major
);
5825 if (flag
& PRIVONLY_DEV
) {
5826 dmdp
->ddm_flags
|= DM_NO_FSPERM
;
5828 if (read_priv
|| write_priv
) {
5829 dmdp
->ddm_node_priv
=
5830 devpolicy_priv_by_name(read_priv
, write_priv
);
5832 dmdp
->ddm_priv_mode
= priv_mode
;
5834 ddi_append_minor_node(dip
, dmdp
);
5837 * only log ddi_create_minor_node() calls which occur
5838 * outside the scope of attach(9e)/detach(9e) reconfigurations
5840 if (!(DEVI_IS_ATTACHING(dip
) || DEVI_IS_DETACHING(dip
)) &&
5841 mtype
!= DDM_INTERNAL_PATH
) {
5842 (void) i_log_devfs_minor_create(dip
, name
);
5846 * Check if any dacf rules match the creation of this minor node
5848 dacfc_match_create_minor(name
, node_type
, dip
, dmdp
, flag
);
5849 return (DDI_SUCCESS
);
5853 ddi_create_minor_node(dev_info_t
*dip
, char *name
, int spec_type
,
5854 minor_t minor_num
, char *node_type
, int flag
)
5856 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5857 node_type
, flag
, DDM_MINOR
, NULL
, NULL
, 0));
5861 ddi_create_priv_minor_node(dev_info_t
*dip
, char *name
, int spec_type
,
5862 minor_t minor_num
, char *node_type
, int flag
,
5863 const char *rdpriv
, const char *wrpriv
, mode_t priv_mode
)
5865 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5866 node_type
, flag
, DDM_MINOR
, rdpriv
, wrpriv
, priv_mode
));
5870 ddi_create_default_minor_node(dev_info_t
*dip
, char *name
, int spec_type
,
5871 minor_t minor_num
, char *node_type
, int flag
)
5873 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5874 node_type
, flag
, DDM_DEFAULT
, NULL
, NULL
, 0));
5878 * Internal (non-ddi) routine for drivers to export names known
5879 * to the kernel (especially ddi_pathname_to_dev_t and friends)
5880 * but not exported externally to /dev
5883 ddi_create_internal_pathname(dev_info_t
*dip
, char *name
, int spec_type
,
5886 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5887 "internal", 0, DDM_INTERNAL_PATH
, NULL
, NULL
, 0));
5891 ddi_remove_minor_node(dev_info_t
*dip
, char *name
)
5894 struct ddi_minor_data
*dmdp
, *dmdp1
;
5895 struct ddi_minor_data
**dmdp_prev
;
5897 ndi_devi_enter(dip
, &circ
);
5898 dmdp_prev
= &DEVI(dip
)->devi_minor
;
5899 dmdp
= DEVI(dip
)->devi_minor
;
5900 while (dmdp
!= NULL
) {
5902 if ((name
== NULL
|| (dmdp
->ddm_name
!= NULL
&&
5903 strcmp(name
, dmdp
->ddm_name
) == 0))) {
5904 if (dmdp
->ddm_name
!= NULL
) {
5905 if (dmdp
->type
!= DDM_INTERNAL_PATH
)
5906 (void) i_log_devfs_minor_remove(dip
,
5908 kmem_free(dmdp
->ddm_name
,
5909 strlen(dmdp
->ddm_name
) + 1);
5912 * Release device privilege, if any.
5913 * Release dacf client data associated with this minor
5914 * node by storing NULL.
5916 if (dmdp
->ddm_node_priv
)
5917 dpfree(dmdp
->ddm_node_priv
);
5918 dacf_store_info((dacf_infohdl_t
)dmdp
, NULL
);
5919 kmem_free(dmdp
, sizeof (struct ddi_minor_data
));
5922 * OK, we found it, so get out now -- if we drive on,
5923 * we will strcmp against garbage. See 1139209.
5928 dmdp_prev
= &dmdp
->next
;
5932 ndi_devi_exit(dip
, circ
);
5939 return (panicstr
!= NULL
);
5944 * Find first bit set in a mask (returned counting from 1 up)
5954 * Find last bit set. Take mask and clear
5955 * all but the most significant bit, and
5956 * then let ffs do the rest of the work.
5958 * Algorithm courtesy of Steve Chessin.
5967 if ((nx
= (mask
& (mask
- 1))) == 0)
5975 * The ddi_soft_state_* routines comprise generic storage management utilities
5976 * for driver soft state structures (in "the old days," this was done with
5977 * statically sized array - big systems and dynamic loading and unloading
5978 * make heap allocation more attractive).
5982 * Allocate a set of pointers to 'n_items' objects of size 'size'
5983 * bytes. Each pointer is initialized to nil.
5985 * The 'size' and 'n_items' values are stashed in the opaque
5986 * handle returned to the caller.
5988 * This implementation interprets 'set of pointers' to mean 'array
5989 * of pointers' but note that nothing in the interface definition
5990 * precludes an implementation that uses, for example, a linked list.
5991 * However there should be a small efficiency gain from using an array
5994 * NOTE As an optimization, we make our growable array allocations in
5995 * powers of two (bytes), since that's how much kmem_alloc (currently)
5996 * gives us anyway. It should save us some free/realloc's ..
5998 * As a further optimization, we make the growable array start out
5999 * with MIN_N_ITEMS in it.
6002 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */
6005 ddi_soft_state_init(void **state_p
, size_t size
, size_t n_items
)
6007 i_ddi_soft_state
*ss
;
6009 if (state_p
== NULL
|| size
== 0)
6012 ss
= kmem_zalloc(sizeof (*ss
), KM_SLEEP
);
6013 mutex_init(&ss
->lock
, NULL
, MUTEX_DRIVER
, NULL
);
6016 if (n_items
< MIN_N_ITEMS
)
6017 ss
->n_items
= MIN_N_ITEMS
;
6021 if ((bitlog
= ddi_fls(n_items
)) == ddi_ffs(n_items
))
6023 ss
->n_items
= 1 << bitlog
;
6026 ASSERT(ss
->n_items
>= n_items
);
6028 ss
->array
= kmem_zalloc(ss
->n_items
* sizeof (void *), KM_SLEEP
);
6035 * Allocate a state structure of size 'size' to be associated
6038 * In this implementation, the array is extended to
6039 * allow the requested offset, if needed.
6042 ddi_soft_state_zalloc(void *state
, int item
)
6044 i_ddi_soft_state
*ss
= (i_ddi_soft_state
*)state
;
6048 if ((state
== NULL
) || (item
< 0))
6049 return (DDI_FAILURE
);
6051 mutex_enter(&ss
->lock
);
6052 if (ss
->size
== 0) {
6053 mutex_exit(&ss
->lock
);
6054 cmn_err(CE_WARN
, "ddi_soft_state_zalloc: bad handle: %s",
6055 mod_containing_pc(caller()));
6056 return (DDI_FAILURE
);
6059 array
= ss
->array
; /* NULL if ss->n_items == 0 */
6060 ASSERT(ss
->n_items
!= 0 && array
!= NULL
);
6063 * refuse to tread on an existing element
6065 if (item
< ss
->n_items
&& array
[item
] != NULL
) {
6066 mutex_exit(&ss
->lock
);
6067 return (DDI_FAILURE
);
6071 * Allocate a new element to plug in
6073 new_element
= kmem_zalloc(ss
->size
, KM_SLEEP
);
6076 * Check if the array is big enough, if not, grow it.
6078 if (item
>= ss
->n_items
) {
6081 struct i_ddi_soft_state
*dirty
;
6084 * Allocate a new array of the right length, copy
6085 * all the old pointers to the new array, then
6086 * if it exists at all, put the old array on the
6089 * Note that we can't kmem_free() the old array.
6091 * Why -- well the 'get' operation is 'mutex-free', so we
6092 * can't easily catch a suspended thread that is just about
6093 * to dereference the array we just grew out of. So we
6094 * cons up a header and put it on a list of 'dirty'
6095 * pointer arrays. (Dirty in the sense that there may
6096 * be suspended threads somewhere that are in the middle
6097 * of referencing them). Fortunately, we -can- garbage
6098 * collect it all at ddi_soft_state_fini time.
6100 new_n_items
= ss
->n_items
;
6101 while (new_n_items
< (1 + item
))
6102 new_n_items
<<= 1; /* double array size .. */
6104 ASSERT(new_n_items
>= (1 + item
)); /* sanity check! */
6106 new_array
= kmem_zalloc(new_n_items
* sizeof (void *),
6109 * Copy the pointers into the new array
6111 bcopy(array
, new_array
, ss
->n_items
* sizeof (void *));
6114 * Save the old array on the dirty list
6116 dirty
= kmem_zalloc(sizeof (*dirty
), KM_SLEEP
);
6117 dirty
->array
= ss
->array
;
6118 dirty
->n_items
= ss
->n_items
;
6119 dirty
->next
= ss
->next
;
6122 ss
->array
= (array
= new_array
);
6123 ss
->n_items
= new_n_items
;
6126 ASSERT(array
!= NULL
&& item
< ss
->n_items
&& array
[item
] == NULL
);
6128 array
[item
] = new_element
;
6130 mutex_exit(&ss
->lock
);
6131 return (DDI_SUCCESS
);
6135 * Fetch a pointer to the allocated soft state structure.
6137 * This is designed to be cheap.
6139 * There's an argument that there should be more checking for
6140 * nil pointers and out of bounds on the array.. but we do a lot
6141 * of that in the alloc/free routines.
6143 * An array has the convenience that we don't need to lock read-access
6144 * to it c.f. a linked list. However our "expanding array" strategy
6145 * means that we should hold a readers lock on the i_ddi_soft_state
6148 * However, from a performance viewpoint, we need to do it without
6149 * any locks at all -- this also makes it a leaf routine. The algorithm
6150 * is 'lock-free' because we only discard the pointer arrays at
6151 * ddi_soft_state_fini() time.
6154 ddi_get_soft_state(void *state
, int item
)
6156 i_ddi_soft_state
*ss
= (i_ddi_soft_state
*)state
;
6158 ASSERT((ss
!= NULL
) && (item
>= 0));
6160 if (item
< ss
->n_items
&& ss
->array
!= NULL
)
6161 return (ss
->array
[item
]);
6166 * Free the state structure corresponding to 'item.' Freeing an
6167 * element that has either gone or was never allocated is not
6168 * considered an error. Note that we free the state structure, but
6169 * we don't shrink our pointer array, or discard 'dirty' arrays,
6170 * since even a few pointers don't really waste too much memory.
6172 * Passing an item number that is out of bounds, or a null pointer will
6173 * provoke an error message.
6176 ddi_soft_state_free(void *state
, int item
)
6178 i_ddi_soft_state
*ss
= (i_ddi_soft_state
*)state
;
6181 static char msg
[] = "ddi_soft_state_free:";
6184 cmn_err(CE_WARN
, "%s null handle: %s",
6185 msg
, mod_containing_pc(caller()));
6191 mutex_enter(&ss
->lock
);
6193 if ((array
= ss
->array
) == NULL
|| ss
->size
== 0) {
6194 cmn_err(CE_WARN
, "%s bad handle: %s",
6195 msg
, mod_containing_pc(caller()));
6196 } else if (item
< 0 || item
>= ss
->n_items
) {
6197 cmn_err(CE_WARN
, "%s item %d not in range [0..%lu]: %s",
6198 msg
, item
, ss
->n_items
- 1, mod_containing_pc(caller()));
6199 } else if (array
[item
] != NULL
) {
6200 element
= array
[item
];
6204 mutex_exit(&ss
->lock
);
6207 kmem_free(element
, ss
->size
);
6211 * Free the entire set of pointers, and any
6212 * soft state structures contained therein.
6214 * Note that we don't grab the ss->lock mutex, even though
6215 * we're inspecting the various fields of the data structure.
6217 * There is an implicit assumption that this routine will
6218 * never run concurrently with any of the above on this
6219 * particular state structure i.e. by the time the driver
6220 * calls this routine, there should be no other threads
6221 * running in the driver.
6224 ddi_soft_state_fini(void **state_p
)
6226 i_ddi_soft_state
*ss
, *dirty
;
6228 static char msg
[] = "ddi_soft_state_fini:";
6230 if (state_p
== NULL
||
6231 (ss
= (i_ddi_soft_state
*)(*state_p
)) == NULL
) {
6232 cmn_err(CE_WARN
, "%s null handle: %s",
6233 msg
, mod_containing_pc(caller()));
6237 if (ss
->size
== 0) {
6238 cmn_err(CE_WARN
, "%s bad handle: %s",
6239 msg
, mod_containing_pc(caller()));
6243 if (ss
->n_items
> 0) {
6244 for (item
= 0; item
< ss
->n_items
; item
++)
6245 ddi_soft_state_free(ss
, item
);
6246 kmem_free(ss
->array
, ss
->n_items
* sizeof (void *));
6250 * Now delete any dirty arrays from previous 'grow' operations
6252 for (dirty
= ss
->next
; dirty
; dirty
= ss
->next
) {
6253 ss
->next
= dirty
->next
;
6254 kmem_free(dirty
->array
, dirty
->n_items
* sizeof (void *));
6255 kmem_free(dirty
, sizeof (*dirty
));
6258 mutex_destroy(&ss
->lock
);
6259 kmem_free(ss
, sizeof (*ss
));
6264 #define SS_N_ITEMS_PER_HASH 16
6265 #define SS_MIN_HASH_SZ 16
6266 #define SS_MAX_HASH_SZ 4096
6269 ddi_soft_state_bystr_init(ddi_soft_state_bystr
**state_p
, size_t size
,
6272 i_ddi_soft_state_bystr
*sss
;
6275 ASSERT(state_p
&& size
&& n_items
);
6276 if ((state_p
== NULL
) || (size
== 0) || (n_items
== 0))
6279 /* current implementation is based on hash, convert n_items to hash */
6280 hash_sz
= n_items
/ SS_N_ITEMS_PER_HASH
;
6281 if (hash_sz
< SS_MIN_HASH_SZ
)
6282 hash_sz
= SS_MIN_HASH_SZ
;
6283 else if (hash_sz
> SS_MAX_HASH_SZ
)
6284 hash_sz
= SS_MAX_HASH_SZ
;
6286 /* allocate soft_state pool */
6287 sss
= kmem_zalloc(sizeof (*sss
), KM_SLEEP
);
6288 sss
->ss_size
= size
;
6289 sss
->ss_mod_hash
= mod_hash_create_strhash("soft_state_bystr",
6290 hash_sz
, mod_hash_null_valdtor
);
6291 *state_p
= (ddi_soft_state_bystr
*)sss
;
6296 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr
*state
, const char *str
)
6298 i_ddi_soft_state_bystr
*sss
= (i_ddi_soft_state_bystr
*)state
;
6302 ASSERT(sss
&& str
&& sss
->ss_mod_hash
);
6303 if ((sss
== NULL
) || (str
== NULL
) || (sss
->ss_mod_hash
== NULL
))
6304 return (DDI_FAILURE
);
6305 sso
= kmem_zalloc(sss
->ss_size
, KM_SLEEP
);
6306 dup_str
= i_ddi_strdup((char *)str
, KM_SLEEP
);
6307 if (mod_hash_insert(sss
->ss_mod_hash
,
6308 (mod_hash_key_t
)dup_str
, (mod_hash_val_t
)sso
) == 0)
6309 return (DDI_SUCCESS
);
6312 * The only error from an strhash insert is caused by a duplicate key.
6313 * We refuse to tread on an existing elements, so free and fail.
6315 kmem_free(dup_str
, strlen(dup_str
) + 1);
6316 kmem_free(sso
, sss
->ss_size
);
6317 return (DDI_FAILURE
);
6321 ddi_soft_state_bystr_get(ddi_soft_state_bystr
*state
, const char *str
)
6323 i_ddi_soft_state_bystr
*sss
= (i_ddi_soft_state_bystr
*)state
;
6326 ASSERT(sss
&& str
&& sss
->ss_mod_hash
);
6327 if ((sss
== NULL
) || (str
== NULL
) || (sss
->ss_mod_hash
== NULL
))
6330 if (mod_hash_find(sss
->ss_mod_hash
,
6331 (mod_hash_key_t
)str
, (mod_hash_val_t
*)&sso
) == 0)
6337 ddi_soft_state_bystr_free(ddi_soft_state_bystr
*state
, const char *str
)
6339 i_ddi_soft_state_bystr
*sss
= (i_ddi_soft_state_bystr
*)state
;
6342 ASSERT(sss
&& str
&& sss
->ss_mod_hash
);
6343 if ((sss
== NULL
) || (str
== NULL
) || (sss
->ss_mod_hash
== NULL
))
6346 (void) mod_hash_remove(sss
->ss_mod_hash
,
6347 (mod_hash_key_t
)str
, (mod_hash_val_t
*)&sso
);
6348 kmem_free(sso
, sss
->ss_size
);
6352 ddi_soft_state_bystr_fini(ddi_soft_state_bystr
**state_p
)
6354 i_ddi_soft_state_bystr
*sss
;
6357 if (state_p
== NULL
)
6360 sss
= (i_ddi_soft_state_bystr
*)(*state_p
);
6364 ASSERT(sss
->ss_mod_hash
);
6365 if (sss
->ss_mod_hash
) {
6366 mod_hash_destroy_strhash(sss
->ss_mod_hash
);
6367 sss
->ss_mod_hash
= NULL
;
6370 kmem_free(sss
, sizeof (*sss
));
6375 * The ddi_strid_* routines provide string-to-index management utilities.
6377 /* allocate and initialize an strid set */
6379 ddi_strid_init(ddi_strid
**strid_p
, int n_items
)
6384 if (strid_p
== NULL
)
6385 return (DDI_FAILURE
);
6387 /* current implementation is based on hash, convert n_items to hash */
6388 hash_sz
= n_items
/ SS_N_ITEMS_PER_HASH
;
6389 if (hash_sz
< SS_MIN_HASH_SZ
)
6390 hash_sz
= SS_MIN_HASH_SZ
;
6391 else if (hash_sz
> SS_MAX_HASH_SZ
)
6392 hash_sz
= SS_MAX_HASH_SZ
;
6394 ss
= kmem_alloc(sizeof (*ss
), KM_SLEEP
);
6395 ss
->strid_chunksz
= n_items
;
6396 ss
->strid_spacesz
= n_items
;
6397 ss
->strid_space
= id_space_create("strid", 1, n_items
);
6398 ss
->strid_bystr
= mod_hash_create_strhash("strid_bystr", hash_sz
,
6399 mod_hash_null_valdtor
);
6400 ss
->strid_byid
= mod_hash_create_idhash("strid_byid", hash_sz
,
6401 mod_hash_null_valdtor
);
6402 *strid_p
= (ddi_strid
*)ss
;
6403 return (DDI_SUCCESS
);
6406 /* allocate an id mapping within the specified set for str, return id */
6408 i_ddi_strid_alloc(ddi_strid
*strid
, char *str
)
6410 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6415 if ((ss
== NULL
) || (str
== NULL
))
6419 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6420 * range as compressed as possible. This is important to minimize
6421 * the amount of space used when the id is used as a ddi_soft_state
6422 * index by the caller.
6424 * If the id list is exhausted, increase the size of the list
6425 * by the chuck size specified in ddi_strid_init and reattempt
6428 if ((id
= id_allocff_nosleep(ss
->strid_space
)) == (id_t
)-1) {
6429 id_space_extend(ss
->strid_space
, ss
->strid_spacesz
,
6430 ss
->strid_spacesz
+ ss
->strid_chunksz
);
6431 ss
->strid_spacesz
+= ss
->strid_chunksz
;
6432 if ((id
= id_allocff_nosleep(ss
->strid_space
)) == (id_t
)-1)
6437 * NOTE: since we create and destroy in unison we can save space by
6438 * using bystr key as the byid value. This means destroy must occur
6439 * in (byid, bystr) order.
6441 s
= i_ddi_strdup(str
, KM_SLEEP
);
6442 if (mod_hash_insert(ss
->strid_bystr
, (mod_hash_key_t
)s
,
6443 (mod_hash_val_t
)(intptr_t)id
) != 0) {
6444 ddi_strid_free(strid
, id
);
6447 if (mod_hash_insert(ss
->strid_byid
, (mod_hash_key_t
)(intptr_t)id
,
6448 (mod_hash_val_t
)s
) != 0) {
6449 ddi_strid_free(strid
, id
);
6453 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6457 /* allocate an id mapping within the specified set for str, return id */
6459 ddi_strid_alloc(ddi_strid
*strid
, char *str
)
6461 return (i_ddi_strid_alloc(strid
, str
));
6464 /* return the id within the specified strid given the str */
6466 ddi_strid_str2id(ddi_strid
*strid
, char *str
)
6468 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6473 if (ss
&& str
&& (mod_hash_find(ss
->strid_bystr
,
6474 (mod_hash_key_t
)str
, &hv
) == 0))
6475 id
= (int)(intptr_t)hv
;
6479 /* return str within the specified strid given the id */
6481 ddi_strid_id2str(ddi_strid
*strid
, id_t id
)
6483 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6487 ASSERT(ss
&& id
> 0);
6488 if (ss
&& (id
> 0) && (mod_hash_find(ss
->strid_byid
,
6489 (mod_hash_key_t
)(uintptr_t)id
, &hv
) == 0))
6494 /* free the id mapping within the specified strid */
6496 ddi_strid_free(ddi_strid
*strid
, id_t id
)
6498 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6501 ASSERT(ss
&& id
> 0);
6502 if ((ss
== NULL
) || (id
<= 0))
6505 /* bystr key is byid value: destroy order must be (byid, bystr) */
6506 str
= ddi_strid_id2str(strid
, id
);
6507 (void) mod_hash_destroy(ss
->strid_byid
, (mod_hash_key_t
)(uintptr_t)id
);
6508 id_free(ss
->strid_space
, id
);
6511 (void) mod_hash_destroy(ss
->strid_bystr
, (mod_hash_key_t
)str
);
6514 /* destroy the strid set */
6516 ddi_strid_fini(ddi_strid
**strid_p
)
6521 if (strid_p
== NULL
)
6524 ss
= (i_ddi_strid
*)(*strid_p
);
6528 /* bystr key is byid value: destroy order must be (byid, bystr) */
6530 mod_hash_destroy_hash(ss
->strid_byid
);
6532 mod_hash_destroy_hash(ss
->strid_bystr
);
6533 if (ss
->strid_space
)
6534 id_space_destroy(ss
->strid_space
);
6535 kmem_free(ss
, sizeof (*ss
));
6540 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6541 * Storage is double buffered to prevent updates during devi_addr use -
6542 * double buffering is adaquate for reliable ddi_deviname() consumption.
6543 * The double buffer is not freed until dev_info structure destruction
6544 * (by i_ddi_free_node).
6547 ddi_set_name_addr(dev_info_t
*dip
, char *name
)
6549 char *buf
= DEVI(dip
)->devi_addr_buf
;
6553 buf
= kmem_zalloc(2 * MAXNAMELEN
, KM_SLEEP
);
6554 DEVI(dip
)->devi_addr_buf
= buf
;
6558 ASSERT(strlen(name
) < MAXNAMELEN
);
6559 newaddr
= (DEVI(dip
)->devi_addr
== buf
) ?
6560 (buf
+ MAXNAMELEN
) : buf
;
6561 (void) strlcpy(newaddr
, name
, MAXNAMELEN
);
6565 DEVI(dip
)->devi_addr
= newaddr
;
6569 ddi_get_name_addr(dev_info_t
*dip
)
6571 return (DEVI(dip
)->devi_addr
);
6575 ddi_set_parent_data(dev_info_t
*dip
, void *pd
)
6577 DEVI(dip
)->devi_parent_data
= pd
;
6581 ddi_get_parent_data(dev_info_t
*dip
)
6583 return (DEVI(dip
)->devi_parent_data
);
6587 * ddi_name_to_major: returns the major number of a named module,
6588 * derived from the current driver alias binding.
6590 * Caveat: drivers should avoid the use of this function, in particular
6591 * together with ddi_get_name/ddi_binding name, as per
6592 * major = ddi_name_to_major(ddi_get_name(devi));
6593 * ddi_name_to_major() relies on the state of the device/alias binding,
6594 * which can and does change dynamically as aliases are administered
6595 * over time. An attached device instance cannot rely on the major
6596 * number returned by ddi_name_to_major() to match its own major number.
6598 * For driver use, ddi_driver_major() reliably returns the major number
6599 * for the module to which the device was bound at attach time over
6600 * the life of the instance.
6601 * major = ddi_driver_major(dev_info_t *)
6604 ddi_name_to_major(char *name
)
6606 return (mod_name_to_major(name
));
6610 * ddi_major_to_name: Returns the module name bound to a major number.
6613 ddi_major_to_name(major_t major
)
6615 return (mod_major_to_name(major
));
6619 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6620 * pointed at by 'name.' A devinfo node is named as a result of calling
6623 * Note: the driver must be held before calling this function!
6626 ddi_deviname(dev_info_t
*dip
, char *name
)
6631 if (dip
== ddi_root_node()) {
6636 if (i_ddi_node_state(dip
) < DS_BOUND
) {
6640 * Use ddi_get_name_addr() without checking state so we get
6641 * a unit-address if we are called after ddi_set_name_addr()
6642 * by nexus DDI_CTL_INITCHILD code, but before completing
6643 * node promotion to DS_INITIALIZED. We currently have
6644 * two situations where we are called in this state:
6645 * o For framework processing of a path-oriented alias.
6646 * o If a SCSA nexus driver calls ddi_devid_register()
6647 * from it's tran_tgt_init(9E) implementation.
6649 addrname
= ddi_get_name_addr(dip
);
6650 if (addrname
== NULL
)
6654 if (*addrname
== '\0') {
6655 (void) sprintf(name
, "/%s", ddi_node_name(dip
));
6657 (void) sprintf(name
, "/%s@%s", ddi_node_name(dip
), addrname
);
6664 * Spits out the name of device node, typically name@addr, for a given node,
6665 * using the driver name, not the nodename.
6667 * Used by match_parent. Not to be used elsewhere.
6670 i_ddi_parname(dev_info_t
*dip
, char *name
)
6674 if (dip
== ddi_root_node()) {
6679 ASSERT(i_ddi_node_state(dip
) >= DS_INITIALIZED
);
6681 if (*(addrname
= ddi_get_name_addr(dip
)) == '\0')
6682 (void) sprintf(name
, "%s", ddi_binding_name(dip
));
6684 (void) sprintf(name
, "%s@%s", ddi_binding_name(dip
), addrname
);
6689 pathname_work(dev_info_t
*dip
, char *path
)
6693 if (dip
== ddi_root_node()) {
6697 (void) pathname_work(ddi_get_parent(dip
), path
);
6698 bp
= path
+ strlen(path
);
6699 (void) ddi_deviname(dip
, bp
);
6704 ddi_pathname(dev_info_t
*dip
, char *path
)
6706 return (pathname_work(dip
, path
));
6710 ddi_pathname_minor(struct ddi_minor_data
*dmdp
, char *path
)
6712 if (dmdp
->dip
== NULL
)
6715 (void) ddi_pathname(dmdp
->dip
, path
);
6716 if (dmdp
->ddm_name
) {
6717 (void) strcat(path
, ":");
6718 (void) strcat(path
, dmdp
->ddm_name
);
6725 pathname_work_obp(dev_info_t
*dip
, char *path
)
6731 * look up the "obp-path" property, return the path if it exists
6733 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
6734 "obp-path", &obp_path
) == DDI_PROP_SUCCESS
) {
6735 (void) strcpy(path
, obp_path
);
6736 ddi_prop_free(obp_path
);
6741 * stop at root, no obp path
6743 if (dip
== ddi_root_node()) {
6747 obp_path
= pathname_work_obp(ddi_get_parent(dip
), path
);
6748 if (obp_path
== NULL
)
6752 * append our component to parent's obp path
6754 bp
= path
+ strlen(path
);
6755 if (*(bp
- 1) != '/')
6756 (void) strcat(bp
++, "/");
6757 (void) ddi_deviname(dip
, bp
);
6762 * return the 'obp-path' based path for the given node, or NULL if the node
6763 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6764 * function can't be called from interrupt context (since we need to
6765 * lookup a string property).
6768 ddi_pathname_obp(dev_info_t
*dip
, char *path
)
6770 ASSERT(!servicing_interrupt());
6771 if (dip
== NULL
|| path
== NULL
)
6774 /* split work into a separate function to aid debugging */
6775 return (pathname_work_obp(dip
, path
));
6779 ddi_pathname_obp_set(dev_info_t
*dip
, char *component
)
6782 char *obp_path
= NULL
;
6783 int rc
= DDI_FAILURE
;
6786 return (DDI_FAILURE
);
6788 obp_path
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
6790 pdip
= ddi_get_parent(dip
);
6792 if (ddi_pathname_obp(pdip
, obp_path
) == NULL
) {
6793 (void) ddi_pathname(pdip
, obp_path
);
6797 (void) strncat(obp_path
, "/", MAXPATHLEN
);
6798 (void) strncat(obp_path
, component
, MAXPATHLEN
);
6800 rc
= ndi_prop_update_string(DDI_DEV_T_NONE
, dip
, "obp-path",
6804 kmem_free(obp_path
, MAXPATHLEN
);
6810 * Given a dev_t, return the pathname of the corresponding device in the
6811 * buffer pointed at by "path." The buffer is assumed to be large enough
6812 * to hold the pathname of the device (MAXPATHLEN).
6814 * The pathname of a device is the pathname of the devinfo node to which
6815 * the device "belongs," concatenated with the character ':' and the name
6816 * of the minor node corresponding to the dev_t. If spec_type is 0 then
6817 * just the pathname of the devinfo node is returned without driving attach
6818 * of that node. For a non-zero spec_type, an attach is performed and a
6819 * search of the minor list occurs.
6821 * It is possible that the path associated with the dev_t is not
6822 * currently available in the devinfo tree. In order to have a
6823 * dev_t, a device must have been discovered before, which means
6824 * that the path is always in the instance tree. The one exception
6825 * to this is if the dev_t is associated with a pseudo driver, in
6826 * which case the device must exist on the pseudo branch of the
6827 * devinfo tree as a result of parsing .conf files.
6830 ddi_dev_pathname(dev_t devt
, int spec_type
, char *path
)
6833 major_t major
= getmajor(devt
);
6839 if (major
>= devcnt
)
6841 if (major
== clone_major
) {
6842 /* clone has no minor nodes, manufacture the path here */
6843 if ((drvname
= ddi_major_to_name(getminor(devt
))) == NULL
)
6846 (void) snprintf(path
, MAXPATHLEN
, "%s:%s", CLONE_PATH
, drvname
);
6847 return (DDI_SUCCESS
);
6850 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6851 if ((instance
= dev_to_instance(devt
)) == -1)
6854 /* reconstruct the path given the major/instance */
6855 if (e_ddi_majorinstance_to_path(major
, instance
, path
) != DDI_SUCCESS
)
6858 /* if spec_type given we must drive attach and search minor nodes */
6859 if ((spec_type
== S_IFCHR
) || (spec_type
== S_IFBLK
)) {
6860 /* attach the path so we can search minors */
6861 if ((dip
= e_ddi_hold_devi_by_path(path
, 0)) == NULL
)
6864 /* Add minorname to path. */
6865 ndi_devi_enter(dip
, &circ
);
6866 minorname
= i_ddi_devtspectype_to_minorname(dip
,
6869 (void) strcat(path
, ":");
6870 (void) strcat(path
, minorname
);
6872 ndi_devi_exit(dip
, circ
);
6873 ddi_release_devi(dip
);
6874 if (minorname
== NULL
)
6877 ASSERT(strlen(path
) < MAXPATHLEN
);
6878 return (DDI_SUCCESS
);
6881 return (DDI_FAILURE
);
6885 * Given a major number and an instance, return the path.
6886 * This interface does NOT drive attach.
6889 e_ddi_majorinstance_to_path(major_t major
, int instance
, char *path
)
6891 struct devnames
*dnp
;
6894 if ((major
>= devcnt
) || (instance
== -1)) {
6896 return (DDI_FAILURE
);
6899 /* look for the major/instance in the instance tree */
6900 if (e_ddi_instance_majorinstance_to_path(major
, instance
,
6901 path
) == DDI_SUCCESS
) {
6902 ASSERT(strlen(path
) < MAXPATHLEN
);
6903 return (DDI_SUCCESS
);
6907 * Not in instance tree, find the instance on the per driver list and
6908 * construct path to instance via ddi_pathname(). This is how paths
6909 * down the 'pseudo' branch are constructed.
6911 dnp
= &(devnamesp
[major
]);
6912 LOCK_DEV_OPS(&(dnp
->dn_lock
));
6913 for (dip
= dnp
->dn_head
; dip
;
6914 dip
= (dev_info_t
*)DEVI(dip
)->devi_next
) {
6915 /* Skip if instance does not match. */
6916 if (DEVI(dip
)->devi_instance
!= instance
)
6920 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6921 * node demotion, so it is not an effective way of ensuring
6922 * that the ddi_pathname result has a unit-address. Instead,
6923 * we reverify the node state after calling ddi_pathname().
6925 if (i_ddi_node_state(dip
) >= DS_INITIALIZED
) {
6926 (void) ddi_pathname(dip
, path
);
6927 if (i_ddi_node_state(dip
) < DS_INITIALIZED
)
6929 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
6930 ASSERT(strlen(path
) < MAXPATHLEN
);
6931 return (DDI_SUCCESS
);
6934 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
6936 /* can't reconstruct the path */
6938 return (DDI_FAILURE
);
6941 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6944 * Given the dip for a network interface return the ppa for that interface.
6946 * In all cases except GLD v0 drivers, the ppa == instance.
6947 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6948 * So for these drivers when the attach routine calls gld_register(),
6949 * the GLD framework creates an integer property called "gld_driver_ppa"
6950 * that can be queried here.
6952 * The only time this function is used is when a system is booting over nfs.
6953 * In this case the system has to resolve the pathname of the boot device
6957 i_ddi_devi_get_ppa(dev_info_t
*dip
)
6959 return (ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
6960 DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
,
6961 GLD_DRIVER_PPA
, ddi_get_instance(dip
)));
6965 * i_ddi_devi_set_ppa() should only be called from gld_register()
6966 * and only for GLD v0 drivers
6969 i_ddi_devi_set_ppa(dev_info_t
*dip
, int ppa
)
6971 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE
, dip
, GLD_DRIVER_PPA
, ppa
);
6976 * Private DDI Console bell functions.
6979 ddi_ring_console_bell(clock_t duration
)
6981 if (ddi_console_bell_func
!= NULL
)
6982 (*ddi_console_bell_func
)(duration
);
6986 ddi_set_console_bell(void (*bellfunc
)(clock_t duration
))
6988 ddi_console_bell_func
= bellfunc
;
6992 ddi_dma_alloc_handle(dev_info_t
*dip
, ddi_dma_attr_t
*attr
,
6993 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
6995 int (*funcp
)() = ddi_dma_allochdl
;
6996 ddi_dma_attr_t dma_attr
;
6997 struct bus_ops
*bop
;
6999 if (attr
== (ddi_dma_attr_t
*)0)
7000 return (DDI_DMA_BADATTR
);
7004 bop
= DEVI(dip
)->devi_ops
->devo_bus_ops
;
7005 if (bop
&& bop
->bus_dma_allochdl
)
7006 funcp
= bop
->bus_dma_allochdl
;
7008 return ((*funcp
)(dip
, dip
, &dma_attr
, waitfp
, arg
, handlep
));
7012 ddi_dma_free_handle(ddi_dma_handle_t
*handlep
)
7014 ddi_dma_handle_t h
= *handlep
;
7015 (void) ddi_dma_freehdl(HD
, HD
, h
);
7018 static uintptr_t dma_mem_list_id
= 0;
7022 ddi_dma_mem_alloc(ddi_dma_handle_t handle
, size_t length
,
7023 ddi_device_acc_attr_t
*accattrp
, uint_t flags
,
7024 int (*waitfp
)(caddr_t
), caddr_t arg
, caddr_t
*kaddrp
,
7025 size_t *real_length
, ddi_acc_handle_t
*handlep
)
7027 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7028 dev_info_t
*dip
= hp
->dmai_rdip
;
7030 ddi_dma_attr_t
*attrp
= &hp
->dmai_attr
;
7031 uint_t sleepflag
, xfermodes
;
7035 if (waitfp
== DDI_DMA_SLEEP
)
7036 fp
= (int (*)())KM_SLEEP
;
7037 else if (waitfp
== DDI_DMA_DONTWAIT
)
7038 fp
= (int (*)())KM_NOSLEEP
;
7041 *handlep
= impl_acc_hdl_alloc(fp
, arg
);
7042 if (*handlep
== NULL
)
7043 return (DDI_FAILURE
);
7045 /* check if the cache attributes are supported */
7046 if (i_ddi_check_cache_attr(flags
) == B_FALSE
)
7047 return (DDI_FAILURE
);
7050 * Transfer the meaningful bits to xfermodes.
7051 * Double-check if the 3rd party driver correctly sets the bits.
7052 * If not, set DDI_DMA_STREAMING to keep compatibility.
7054 xfermodes
= flags
& (DDI_DMA_CONSISTENT
| DDI_DMA_STREAMING
);
7055 if (xfermodes
== 0) {
7056 xfermodes
= DDI_DMA_STREAMING
;
7060 * initialize the common elements of data access handle
7062 ap
= impl_acc_hdl_get(*handlep
);
7063 ap
->ah_vers
= VERS_ACCHDL
;
7067 ap
->ah_xfermodes
= flags
;
7068 ap
->ah_acc
= *accattrp
;
7070 sleepflag
= ((waitfp
== DDI_DMA_SLEEP
) ? 1 : 0);
7071 if (xfermodes
== DDI_DMA_CONSISTENT
) {
7072 rval
= i_ddi_mem_alloc(dip
, attrp
, length
, sleepflag
,
7073 flags
, accattrp
, kaddrp
, NULL
, ap
);
7074 *real_length
= length
;
7076 rval
= i_ddi_mem_alloc(dip
, attrp
, length
, sleepflag
,
7077 flags
, accattrp
, kaddrp
, real_length
, ap
);
7079 if (rval
== DDI_SUCCESS
) {
7080 ap
->ah_len
= (off_t
)(*real_length
);
7081 ap
->ah_addr
= *kaddrp
;
7083 impl_acc_hdl_free(*handlep
);
7084 *handlep
= (ddi_acc_handle_t
)NULL
;
7085 if (waitfp
!= DDI_DMA_SLEEP
&& waitfp
!= DDI_DMA_DONTWAIT
) {
7086 ddi_set_callback(waitfp
, arg
, &dma_mem_list_id
);
7094 ddi_dma_mem_free(ddi_acc_handle_t
*handlep
)
7098 ap
= impl_acc_hdl_get(*handlep
);
7101 i_ddi_mem_free((caddr_t
)ap
->ah_addr
, ap
);
7106 impl_acc_hdl_free(*handlep
);
7107 *handlep
= (ddi_acc_handle_t
)NULL
;
7109 if (dma_mem_list_id
!= 0) {
7110 ddi_run_callback(&dma_mem_list_id
);
7115 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle
, struct buf
*bp
,
7116 uint_t flags
, int (*waitfp
)(caddr_t
), caddr_t arg
,
7117 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
7119 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7120 dev_info_t
*dip
, *rdip
;
7121 struct ddi_dma_req dmareq
;
7124 dmareq
.dmar_flags
= flags
;
7125 dmareq
.dmar_fp
= waitfp
;
7126 dmareq
.dmar_arg
= arg
;
7127 dmareq
.dmar_object
.dmao_size
= (uint_t
)bp
->b_bcount
;
7129 if (bp
->b_flags
& B_PAGEIO
) {
7130 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_PAGES
;
7131 dmareq
.dmar_object
.dmao_obj
.pp_obj
.pp_pp
= bp
->b_pages
;
7132 dmareq
.dmar_object
.dmao_obj
.pp_obj
.pp_offset
=
7133 (uint_t
)(((uintptr_t)bp
->b_un
.b_addr
) & MMU_PAGEOFFSET
);
7135 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_addr
= bp
->b_un
.b_addr
;
7136 if (bp
->b_flags
& B_SHADOW
) {
7137 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
=
7139 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_BUFVADDR
;
7141 dmareq
.dmar_object
.dmao_type
=
7142 (bp
->b_flags
& (B_PHYS
| B_REMAPPED
)) ?
7143 DMA_OTYP_BUFVADDR
: DMA_OTYP_VADDR
;
7144 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
= NULL
;
7148 * If the buffer has no proc pointer, or the proc
7149 * struct has the kernel address space, or the buffer has
7150 * been marked B_REMAPPED (meaning that it is now
7151 * mapped into the kernel's address space), then
7152 * the address space is kas (kernel address space).
7154 if ((bp
->b_proc
== NULL
) || (bp
->b_proc
->p_as
== &kas
) ||
7155 (bp
->b_flags
& B_REMAPPED
)) {
7156 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
= 0;
7158 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
=
7163 dip
= rdip
= hp
->dmai_rdip
;
7164 if (dip
!= ddi_root_node())
7165 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_bindhdl
;
7166 funcp
= DEVI(rdip
)->devi_bus_dma_bindfunc
;
7167 return ((*funcp
)(dip
, rdip
, handle
, &dmareq
, cookiep
, ccountp
));
7171 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle
, struct as
*as
,
7172 caddr_t addr
, size_t len
, uint_t flags
, int (*waitfp
)(caddr_t
),
7173 caddr_t arg
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
7175 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7176 dev_info_t
*dip
, *rdip
;
7177 struct ddi_dma_req dmareq
;
7180 if (len
== (uint_t
)0) {
7181 return (DDI_DMA_NOMAPPING
);
7183 dmareq
.dmar_flags
= flags
;
7184 dmareq
.dmar_fp
= waitfp
;
7185 dmareq
.dmar_arg
= arg
;
7186 dmareq
.dmar_object
.dmao_size
= len
;
7187 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_VADDR
;
7188 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
= as
;
7189 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_addr
= addr
;
7190 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
= NULL
;
7192 dip
= rdip
= hp
->dmai_rdip
;
7193 if (dip
!= ddi_root_node())
7194 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_bindhdl
;
7195 funcp
= DEVI(rdip
)->devi_bus_dma_bindfunc
;
7196 return ((*funcp
)(dip
, rdip
, handle
, &dmareq
, cookiep
, ccountp
));
7200 ddi_dma_nextcookie(ddi_dma_handle_t handle
, ddi_dma_cookie_t
*cookiep
)
7202 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7203 ddi_dma_cookie_t
*cp
;
7205 cp
= hp
->dmai_cookie
;
7208 cookiep
->dmac_notused
= cp
->dmac_notused
;
7209 cookiep
->dmac_type
= cp
->dmac_type
;
7210 cookiep
->dmac_address
= cp
->dmac_address
;
7211 cookiep
->dmac_size
= cp
->dmac_size
;
7216 ddi_dma_numwin(ddi_dma_handle_t handle
, uint_t
*nwinp
)
7218 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7219 if ((hp
->dmai_rflags
& DDI_DMA_PARTIAL
) == 0) {
7220 return (DDI_FAILURE
);
7222 *nwinp
= hp
->dmai_nwin
;
7223 return (DDI_SUCCESS
);
7228 ddi_dma_getwin(ddi_dma_handle_t h
, uint_t win
, off_t
*offp
,
7229 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
7231 int (*funcp
)() = ddi_dma_win
;
7232 struct bus_ops
*bop
;
7234 bop
= DEVI(HD
)->devi_ops
->devo_bus_ops
;
7235 if (bop
&& bop
->bus_dma_win
)
7236 funcp
= bop
->bus_dma_win
;
7238 return ((*funcp
)(HD
, HD
, h
, win
, offp
, lenp
, cookiep
, ccountp
));
7242 ddi_dma_set_sbus64(ddi_dma_handle_t h
, ulong_t burstsizes
)
7244 return (ddi_dma_mctl(HD
, HD
, h
, DDI_DMA_SET_SBUS64
, 0,
7245 &burstsizes
, 0, 0));
7249 i_ddi_dma_fault_check(ddi_dma_impl_t
*hp
)
7251 return (hp
->dmai_fault
);
7255 ddi_check_dma_handle(ddi_dma_handle_t handle
)
7257 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7258 int (*check
)(ddi_dma_impl_t
*);
7260 if ((check
= hp
->dmai_fault_check
) == NULL
)
7261 check
= i_ddi_dma_fault_check
;
7263 return (((*check
)(hp
) == DDI_SUCCESS
) ? DDI_SUCCESS
: DDI_FAILURE
);
7267 i_ddi_dma_set_fault(ddi_dma_handle_t handle
)
7269 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7270 void (*notify
)(ddi_dma_impl_t
*);
7272 if (!hp
->dmai_fault
) {
7274 if ((notify
= hp
->dmai_fault_notify
) != NULL
)
7280 i_ddi_dma_clr_fault(ddi_dma_handle_t handle
)
7282 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7283 void (*notify
)(ddi_dma_impl_t
*);
7285 if (hp
->dmai_fault
) {
7287 if ((notify
= hp
->dmai_fault_notify
) != NULL
)
7293 * register mapping routines.
7296 ddi_regs_map_setup(dev_info_t
*dip
, uint_t rnumber
, caddr_t
*addrp
,
7297 offset_t offset
, offset_t len
, ddi_device_acc_attr_t
*accattrp
,
7298 ddi_acc_handle_t
*handle
)
7305 * Allocate and initialize the common elements of data access handle.
7307 *handle
= impl_acc_hdl_alloc(KM_SLEEP
, NULL
);
7308 hp
= impl_acc_hdl_get(*handle
);
7309 hp
->ah_vers
= VERS_ACCHDL
;
7311 hp
->ah_rnumber
= rnumber
;
7312 hp
->ah_offset
= offset
;
7314 hp
->ah_acc
= *accattrp
;
7317 * Set up the mapping request and call to parent.
7319 mr
.map_op
= DDI_MO_MAP_LOCKED
;
7320 mr
.map_type
= DDI_MT_RNUMBER
;
7321 mr
.map_obj
.rnumber
= rnumber
;
7322 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
7323 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
7324 mr
.map_handlep
= hp
;
7325 mr
.map_vers
= DDI_MAP_VERSION
;
7326 result
= ddi_map(dip
, &mr
, offset
, len
, addrp
);
7329 * check for end result
7331 if (result
!= DDI_SUCCESS
) {
7332 impl_acc_hdl_free(*handle
);
7333 *handle
= (ddi_acc_handle_t
)NULL
;
7335 hp
->ah_addr
= *addrp
;
7342 ddi_regs_map_free(ddi_acc_handle_t
*handlep
)
7347 hp
= impl_acc_hdl_get(*handlep
);
7350 mr
.map_op
= DDI_MO_UNMAP
;
7351 mr
.map_type
= DDI_MT_RNUMBER
;
7352 mr
.map_obj
.rnumber
= hp
->ah_rnumber
;
7353 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
7354 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
7355 mr
.map_handlep
= hp
;
7356 mr
.map_vers
= DDI_MAP_VERSION
;
7359 * Call my parent to unmap my regs.
7361 (void) ddi_map(hp
->ah_dip
, &mr
, hp
->ah_offset
,
7362 hp
->ah_len
, &hp
->ah_addr
);
7366 impl_acc_hdl_free(*handlep
);
7367 *handlep
= (ddi_acc_handle_t
)NULL
;
7371 ddi_device_zero(ddi_acc_handle_t handle
, caddr_t dev_addr
, size_t bytecount
,
7372 ssize_t dev_advcnt
, uint_t dev_datasz
)
7379 /* check for total byte count is multiple of data transfer size */
7380 if (bytecount
!= ((bytecount
/ dev_datasz
) * dev_datasz
))
7381 return (DDI_FAILURE
);
7383 switch (dev_datasz
) {
7384 case DDI_DATA_SZ01_ACC
:
7385 for (b
= (uint8_t *)dev_addr
;
7386 bytecount
!= 0; bytecount
-= 1, b
+= dev_advcnt
)
7387 ddi_put8(handle
, b
, 0);
7389 case DDI_DATA_SZ02_ACC
:
7390 for (w
= (uint16_t *)dev_addr
;
7391 bytecount
!= 0; bytecount
-= 2, w
+= dev_advcnt
)
7392 ddi_put16(handle
, w
, 0);
7394 case DDI_DATA_SZ04_ACC
:
7395 for (l
= (uint32_t *)dev_addr
;
7396 bytecount
!= 0; bytecount
-= 4, l
+= dev_advcnt
)
7397 ddi_put32(handle
, l
, 0);
7399 case DDI_DATA_SZ08_ACC
:
7400 for (ll
= (uint64_t *)dev_addr
;
7401 bytecount
!= 0; bytecount
-= 8, ll
+= dev_advcnt
)
7402 ddi_put64(handle
, ll
, 0x0ll
);
7405 return (DDI_FAILURE
);
7407 return (DDI_SUCCESS
);
7412 ddi_acc_handle_t src_handle
, caddr_t src_addr
, ssize_t src_advcnt
,
7413 ddi_acc_handle_t dest_handle
, caddr_t dest_addr
, ssize_t dest_advcnt
,
7414 size_t bytecount
, uint_t dev_datasz
)
7416 uint8_t *b_src
, *b_dst
;
7417 uint16_t *w_src
, *w_dst
;
7418 uint32_t *l_src
, *l_dst
;
7419 uint64_t *ll_src
, *ll_dst
;
7421 /* check for total byte count is multiple of data transfer size */
7422 if (bytecount
!= ((bytecount
/ dev_datasz
) * dev_datasz
))
7423 return (DDI_FAILURE
);
7425 switch (dev_datasz
) {
7426 case DDI_DATA_SZ01_ACC
:
7427 b_src
= (uint8_t *)src_addr
;
7428 b_dst
= (uint8_t *)dest_addr
;
7430 for (; bytecount
!= 0; bytecount
-= 1) {
7431 ddi_put8(dest_handle
, b_dst
,
7432 ddi_get8(src_handle
, b_src
));
7433 b_dst
+= dest_advcnt
;
7434 b_src
+= src_advcnt
;
7437 case DDI_DATA_SZ02_ACC
:
7438 w_src
= (uint16_t *)src_addr
;
7439 w_dst
= (uint16_t *)dest_addr
;
7441 for (; bytecount
!= 0; bytecount
-= 2) {
7442 ddi_put16(dest_handle
, w_dst
,
7443 ddi_get16(src_handle
, w_src
));
7444 w_dst
+= dest_advcnt
;
7445 w_src
+= src_advcnt
;
7448 case DDI_DATA_SZ04_ACC
:
7449 l_src
= (uint32_t *)src_addr
;
7450 l_dst
= (uint32_t *)dest_addr
;
7452 for (; bytecount
!= 0; bytecount
-= 4) {
7453 ddi_put32(dest_handle
, l_dst
,
7454 ddi_get32(src_handle
, l_src
));
7455 l_dst
+= dest_advcnt
;
7456 l_src
+= src_advcnt
;
7459 case DDI_DATA_SZ08_ACC
:
7460 ll_src
= (uint64_t *)src_addr
;
7461 ll_dst
= (uint64_t *)dest_addr
;
7463 for (; bytecount
!= 0; bytecount
-= 8) {
7464 ddi_put64(dest_handle
, ll_dst
,
7465 ddi_get64(src_handle
, ll_src
));
7466 ll_dst
+= dest_advcnt
;
7467 ll_src
+= src_advcnt
;
7471 return (DDI_FAILURE
);
7473 return (DDI_SUCCESS
);
7476 #define swap16(value) \
7477 ((((value) & 0xff) << 8) | ((value) >> 8))
7479 #define swap32(value) \
7480 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7481 (uint32_t)swap16((uint16_t)((value) >> 16)))
7483 #define swap64(value) \
7484 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7486 (uint64_t)swap32((uint32_t)((value) >> 32)))
7489 ddi_swap16(uint16_t value
)
7491 return (swap16(value
));
7495 ddi_swap32(uint32_t value
)
7497 return (swap32(value
));
7501 ddi_swap64(uint64_t value
)
7503 return (swap64(value
));
7507 * Convert a binding name to a driver name.
7508 * A binding name is the name used to determine the driver for a
7509 * device - it may be either an alias for the driver or the name
7510 * of the driver itself.
7513 i_binding_to_drv_name(char *bname
)
7517 ASSERT(bname
!= NULL
);
7519 if ((major_no
= ddi_name_to_major(bname
)) == -1)
7521 return (ddi_major_to_name(major_no
));
7525 * Search for minor name that has specified dev_t and spec_type.
7526 * If spec_type is zero then any dev_t match works. Since we
7527 * are returning a pointer to the minor name string, we require the
7528 * caller to do the locking.
7531 i_ddi_devtspectype_to_minorname(dev_info_t
*dip
, dev_t dev
, int spec_type
)
7533 struct ddi_minor_data
*dmdp
;
7536 * The did layered driver currently intentionally returns a
7537 * devinfo ptr for an underlying sd instance based on a did
7538 * dev_t. In this case it is not an error.
7540 * The did layered driver is associated with Sun Cluster.
7542 ASSERT((ddi_driver_major(dip
) == getmajor(dev
)) ||
7543 (strcmp(ddi_major_to_name(getmajor(dev
)), "did") == 0));
7545 ASSERT(DEVI_BUSY_OWNED(dip
));
7546 for (dmdp
= DEVI(dip
)->devi_minor
; dmdp
; dmdp
= dmdp
->next
) {
7547 if (((dmdp
->type
== DDM_MINOR
) ||
7548 (dmdp
->type
== DDM_INTERNAL_PATH
) ||
7549 (dmdp
->type
== DDM_DEFAULT
)) &&
7550 (dmdp
->ddm_dev
== dev
) &&
7551 ((((spec_type
& (S_IFCHR
|S_IFBLK
))) == 0) ||
7552 (dmdp
->ddm_spec_type
== spec_type
)))
7553 return (dmdp
->ddm_name
);
7560 * Find the devt and spectype of the specified minor_name.
7561 * Return DDI_FAILURE if minor_name not found. Since we are
7562 * returning everything via arguments we can do the locking.
7565 i_ddi_minorname_to_devtspectype(dev_info_t
*dip
, char *minor_name
,
7566 dev_t
*devtp
, int *spectypep
)
7569 struct ddi_minor_data
*dmdp
;
7571 /* deal with clone minor nodes */
7572 if (dip
== clone_dip
) {
7575 * Make sure minor_name is a STREAMS driver.
7576 * We load the driver but don't attach to any instances.
7579 major
= ddi_name_to_major(minor_name
);
7580 if (major
== DDI_MAJOR_T_NONE
)
7581 return (DDI_FAILURE
);
7583 if (ddi_hold_driver(major
) == NULL
)
7584 return (DDI_FAILURE
);
7586 if (STREAMSTAB(major
) == NULL
) {
7587 ddi_rele_driver(major
);
7588 return (DDI_FAILURE
);
7590 ddi_rele_driver(major
);
7593 *devtp
= makedevice(clone_major
, (minor_t
)major
);
7596 *spectypep
= S_IFCHR
;
7598 return (DDI_SUCCESS
);
7601 ndi_devi_enter(dip
, &circ
);
7602 for (dmdp
= DEVI(dip
)->devi_minor
; dmdp
; dmdp
= dmdp
->next
) {
7603 if (((dmdp
->type
!= DDM_MINOR
) &&
7604 (dmdp
->type
!= DDM_INTERNAL_PATH
) &&
7605 (dmdp
->type
!= DDM_DEFAULT
)) ||
7606 strcmp(minor_name
, dmdp
->ddm_name
))
7610 *devtp
= dmdp
->ddm_dev
;
7613 *spectypep
= dmdp
->ddm_spec_type
;
7615 ndi_devi_exit(dip
, circ
);
7616 return (DDI_SUCCESS
);
7618 ndi_devi_exit(dip
, circ
);
7620 return (DDI_FAILURE
);
7623 static kmutex_t devid_gen_mutex
;
7624 static short devid_gen_number
;
7628 static int devid_register_corrupt
= 0;
7629 static int devid_register_corrupt_major
= 0;
7630 static int devid_register_corrupt_hint
= 0;
7631 static int devid_register_corrupt_hint_major
= 0;
7633 static int devid_lyr_debug
= 0;
7635 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \
7636 if (devid_lyr_debug) \
7637 ddi_debug_devid_devts(msg, ndevs, devs)
7641 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7649 ddi_debug_devid_devts(char *msg
, int ndevs
, dev_t
*devs
)
7653 cmn_err(CE_CONT
, "%s:\n", msg
);
7654 for (i
= 0; i
< ndevs
; i
++) {
7655 cmn_err(CE_CONT
, " 0x%lx\n", devs
[i
]);
7660 ddi_debug_devid_paths(char *msg
, int npaths
, char **paths
)
7664 cmn_err(CE_CONT
, "%s:\n", msg
);
7665 for (i
= 0; i
< npaths
; i
++) {
7666 cmn_err(CE_CONT
, " %s\n", paths
[i
]);
7671 ddi_debug_devid_devts_per_path(char *path
, int ndevs
, dev_t
*devs
)
7675 cmn_err(CE_CONT
, "dev_ts per path %s\n", path
);
7676 for (i
= 0; i
< ndevs
; i
++) {
7677 cmn_err(CE_CONT
, " 0x%lx\n", devs
[i
]);
7684 * Register device id into DDI framework.
7685 * Must be called when the driver is bound.
7688 i_ddi_devid_register(dev_info_t
*dip
, ddi_devid_t devid
)
7690 impl_devid_t
*i_devid
= (impl_devid_t
*)devid
;
7692 const char *driver_name
;
7696 if ((dip
== NULL
) ||
7697 ((major
= ddi_driver_major(dip
)) == DDI_MAJOR_T_NONE
))
7698 return (DDI_FAILURE
);
7700 /* verify that the devid is valid */
7701 if (ddi_devid_valid(devid
) != DDI_SUCCESS
)
7702 return (DDI_FAILURE
);
7704 /* Updating driver name hint in devid */
7705 driver_name
= ddi_driver_name(dip
);
7706 driver_len
= strlen(driver_name
);
7707 if (driver_len
> DEVID_HINT_SIZE
) {
7708 /* Pick up last four characters of driver name */
7709 driver_name
+= driver_len
- DEVID_HINT_SIZE
;
7710 driver_len
= DEVID_HINT_SIZE
;
7712 bzero(i_devid
->did_driver
, DEVID_HINT_SIZE
);
7713 bcopy(driver_name
, i_devid
->did_driver
, driver_len
);
7716 /* Corrupt the devid for testing. */
7717 if (devid_register_corrupt
)
7718 i_devid
->did_id
[0] += devid_register_corrupt
;
7719 if (devid_register_corrupt_major
&&
7720 (major
== devid_register_corrupt_major
))
7721 i_devid
->did_id
[0] += 1;
7722 if (devid_register_corrupt_hint
)
7723 i_devid
->did_driver
[0] += devid_register_corrupt_hint
;
7724 if (devid_register_corrupt_hint_major
&&
7725 (major
== devid_register_corrupt_hint_major
))
7726 i_devid
->did_driver
[0] += 1;
7729 /* encode the devid as a string */
7730 if ((devid_str
= ddi_devid_str_encode(devid
, NULL
)) == NULL
)
7731 return (DDI_FAILURE
);
7733 /* add string as a string property */
7734 if (ndi_prop_update_string(DDI_DEV_T_NONE
, dip
,
7735 DEVID_PROP_NAME
, devid_str
) != DDI_SUCCESS
) {
7736 cmn_err(CE_WARN
, "%s%d: devid property update failed",
7737 ddi_driver_name(dip
), ddi_get_instance(dip
));
7738 ddi_devid_str_free(devid_str
);
7739 return (DDI_FAILURE
);
7742 /* keep pointer to devid string for interrupt context fma code */
7743 if (DEVI(dip
)->devi_devid_str
)
7744 ddi_devid_str_free(DEVI(dip
)->devi_devid_str
);
7745 DEVI(dip
)->devi_devid_str
= devid_str
;
7746 return (DDI_SUCCESS
);
7750 ddi_devid_register(dev_info_t
*dip
, ddi_devid_t devid
)
7754 rval
= i_ddi_devid_register(dip
, devid
);
7755 if (rval
== DDI_SUCCESS
) {
7757 * Register devid in devid-to-path cache
7759 if (e_devid_cache_register(dip
, devid
) == DDI_SUCCESS
) {
7760 mutex_enter(&DEVI(dip
)->devi_lock
);
7761 DEVI(dip
)->devi_flags
|= DEVI_CACHED_DEVID
;
7762 mutex_exit(&DEVI(dip
)->devi_lock
);
7763 } else if (ddi_get_name_addr(dip
)) {
7765 * We only expect cache_register DDI_FAILURE when we
7766 * can't form the full path because of NULL devi_addr.
7768 cmn_err(CE_WARN
, "%s%d: failed to cache devid",
7769 ddi_driver_name(dip
), ddi_get_instance(dip
));
7772 cmn_err(CE_WARN
, "%s%d: failed to register devid",
7773 ddi_driver_name(dip
), ddi_get_instance(dip
));
7779 * Remove (unregister) device id from DDI framework.
7780 * Must be called when device is detached.
7783 i_ddi_devid_unregister(dev_info_t
*dip
)
7785 if (DEVI(dip
)->devi_devid_str
) {
7786 ddi_devid_str_free(DEVI(dip
)->devi_devid_str
);
7787 DEVI(dip
)->devi_devid_str
= NULL
;
7790 /* remove the devid property */
7791 (void) ndi_prop_remove(DDI_DEV_T_NONE
, dip
, DEVID_PROP_NAME
);
7795 ddi_devid_unregister(dev_info_t
*dip
)
7797 mutex_enter(&DEVI(dip
)->devi_lock
);
7798 DEVI(dip
)->devi_flags
&= ~DEVI_CACHED_DEVID
;
7799 mutex_exit(&DEVI(dip
)->devi_lock
);
7800 e_devid_cache_unregister(dip
);
7801 i_ddi_devid_unregister(dip
);
7805 * Allocate and initialize a device id.
7810 ushort_t devid_type
,
7813 ddi_devid_t
*ret_devid
)
7815 impl_devid_t
*i_devid
;
7816 int sz
= sizeof (*i_devid
) + nbytes
- sizeof (char);
7818 const char *driver_name
;
7820 switch (devid_type
) {
7821 case DEVID_SCSI3_WWN
:
7823 case DEVID_SCSI_SERIAL
:
7825 case DEVID_ATA_SERIAL
:
7829 return (DDI_FAILURE
);
7831 return (DDI_FAILURE
);
7835 return (DDI_FAILURE
);
7837 return (DDI_FAILURE
);
7838 nbytes
= sizeof (int) +
7839 sizeof (struct timeval32
) + sizeof (short);
7843 return (DDI_FAILURE
);
7846 if ((i_devid
= kmem_zalloc(sz
, KM_SLEEP
)) == NULL
)
7847 return (DDI_FAILURE
);
7849 i_devid
->did_magic_hi
= DEVID_MAGIC_MSB
;
7850 i_devid
->did_magic_lo
= DEVID_MAGIC_LSB
;
7851 i_devid
->did_rev_hi
= DEVID_REV_MSB
;
7852 i_devid
->did_rev_lo
= DEVID_REV_LSB
;
7853 DEVID_FORMTYPE(i_devid
, devid_type
);
7854 DEVID_FORMLEN(i_devid
, nbytes
);
7856 /* Fill in driver name hint */
7857 driver_name
= ddi_driver_name(dip
);
7858 driver_len
= strlen(driver_name
);
7859 if (driver_len
> DEVID_HINT_SIZE
) {
7860 /* Pick up last four characters of driver name */
7861 driver_name
+= driver_len
- DEVID_HINT_SIZE
;
7862 driver_len
= DEVID_HINT_SIZE
;
7865 bcopy(driver_name
, i_devid
->did_driver
, driver_len
);
7867 /* Fill in id field */
7868 if (devid_type
== DEVID_FAB
) {
7871 struct timeval32 timestamp32
;
7876 /* increase the generation number */
7877 mutex_enter(&devid_gen_mutex
);
7878 gen
= devid_gen_number
++;
7879 mutex_exit(&devid_gen_mutex
);
7881 cp
= i_devid
->did_id
;
7883 /* Fill in host id (big-endian byte ordering) */
7884 hostid
= zone_get_hostid(NULL
);
7885 *cp
++ = hibyte(hiword(hostid
));
7886 *cp
++ = lobyte(hiword(hostid
));
7887 *cp
++ = hibyte(loword(hostid
));
7888 *cp
++ = lobyte(loword(hostid
));
7891 * Fill in timestamp (big-endian byte ordering)
7893 * (Note that the format may have to be changed
7894 * before 2038 comes around, though it's arguably
7895 * unique enough as it is..)
7897 uniqtime32(×tamp32
);
7898 ip
= (int *)×tamp32
;
7900 i
< sizeof (timestamp32
) / sizeof (int); i
++, ip
++) {
7903 *cp
++ = hibyte(hiword(val
));
7904 *cp
++ = lobyte(hiword(val
));
7905 *cp
++ = hibyte(loword(val
));
7906 *cp
++ = lobyte(loword(val
));
7909 /* fill in the generation number */
7910 *cp
++ = hibyte(gen
);
7911 *cp
++ = lobyte(gen
);
7913 bcopy(id
, i_devid
->did_id
, nbytes
);
7915 /* return device id */
7916 *ret_devid
= (ddi_devid_t
)i_devid
;
7917 return (DDI_SUCCESS
);
7921 ddi_devid_get(dev_info_t
*dip
, ddi_devid_t
*ret_devid
)
7923 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY
, dip
, ret_devid
));
7927 i_ddi_devi_get_devid(dev_t dev
, dev_info_t
*dip
, ddi_devid_t
*ret_devid
)
7931 ASSERT(dev
!= DDI_DEV_T_NONE
);
7933 /* look up the property, devt specific first */
7934 if (ddi_prop_lookup_string(dev
, dip
, DDI_PROP_DONTPASS
,
7935 DEVID_PROP_NAME
, &devidstr
) != DDI_PROP_SUCCESS
) {
7936 if ((dev
== DDI_DEV_T_ANY
) ||
7937 (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
,
7938 DDI_PROP_DONTPASS
, DEVID_PROP_NAME
, &devidstr
) !=
7939 DDI_PROP_SUCCESS
)) {
7940 return (DDI_FAILURE
);
7944 /* convert to binary form */
7945 if (ddi_devid_str_decode(devidstr
, ret_devid
, NULL
) == -1) {
7946 ddi_prop_free(devidstr
);
7947 return (DDI_FAILURE
);
7949 ddi_prop_free(devidstr
);
7950 return (DDI_SUCCESS
);
7954 * Return a copy of the device id for dev_t
7957 ddi_lyr_get_devid(dev_t dev
, ddi_devid_t
*ret_devid
)
7963 if ((dip
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
7964 return (DDI_FAILURE
);
7966 rval
= i_ddi_devi_get_devid(dev
, dip
, ret_devid
);
7968 ddi_release_devi(dip
); /* e_ddi_hold_devi_by_dev() */
7973 * Return a copy of the minor name for dev_t and spec_type
7976 ddi_lyr_get_minor_name(dev_t dev
, int spec_type
, char **minor_name
)
7984 if ((dip
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
) {
7986 return (DDI_FAILURE
);
7989 /* Find the minor name and copy into max size buf */
7990 buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
7991 ndi_devi_enter(dip
, &circ
);
7992 nm
= i_ddi_devtspectype_to_minorname(dip
, dev
, spec_type
);
7994 (void) strcpy(buf
, nm
);
7995 ndi_devi_exit(dip
, circ
);
7996 ddi_release_devi(dip
); /* e_ddi_hold_devi_by_dev() */
7999 /* duplicate into min size buf for return result */
8000 *minor_name
= i_ddi_strdup(buf
, KM_SLEEP
);
8007 /* free max size buf and return */
8008 kmem_free(buf
, MAXNAMELEN
);
8013 ddi_lyr_devid_to_devlist(
8019 ASSERT(ddi_devid_valid(devid
) == DDI_SUCCESS
);
8021 if (e_devid_cache_to_devt_list(devid
, minor_name
,
8022 retndevs
, retdevs
) == DDI_SUCCESS
) {
8023 ASSERT(*retndevs
> 0);
8024 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8025 *retndevs
, *retdevs
);
8026 return (DDI_SUCCESS
);
8029 if (e_ddi_devid_discovery(devid
) == DDI_FAILURE
) {
8030 return (DDI_FAILURE
);
8033 if (e_devid_cache_to_devt_list(devid
, minor_name
,
8034 retndevs
, retdevs
) == DDI_SUCCESS
) {
8035 ASSERT(*retndevs
> 0);
8036 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8037 *retndevs
, *retdevs
);
8038 return (DDI_SUCCESS
);
8041 return (DDI_FAILURE
);
8045 ddi_lyr_free_devlist(dev_t
*devlist
, int ndevs
)
8047 kmem_free(devlist
, sizeof (dev_t
) * ndevs
);
8051 * Note: This will need to be fixed if we ever allow processes to
8052 * have more than one data model per exec.
8055 ddi_mmap_get_model(void)
8057 return (get_udatamodel());
8061 ddi_model_convert_from(model_t model
)
8063 return ((model
& DDI_MODEL_MASK
) & ~DDI_MODEL_NATIVE
);
8067 * ddi interfaces managing storage and retrieval of eventcookies.
8071 * Invoke bus nexus driver's implementation of the
8072 * (*bus_remove_eventcall)() interface to remove a registered
8073 * callback handler for "event".
8076 ddi_remove_event_handler(ddi_callback_id_t id
)
8078 ndi_event_callbacks_t
*cb
= (ndi_event_callbacks_t
*)id
;
8083 return (DDI_FAILURE
);
8086 ddip
= NDI_EVENT_DDIP(cb
->ndi_evtcb_cookie
);
8087 return (ndi_busop_remove_eventcall(ddip
, id
));
8091 * Invoke bus nexus driver's implementation of the
8092 * (*bus_add_eventcall)() interface to register a callback handler
8096 ddi_add_event_handler(dev_info_t
*dip
, ddi_eventcookie_t event
,
8097 void (*handler
)(dev_info_t
*, ddi_eventcookie_t
, void *, void *),
8098 void *arg
, ddi_callback_id_t
*id
)
8100 return (ndi_busop_add_eventcall(dip
, dip
, event
, handler
, arg
, id
));
8105 * Return a handle for event "name" by calling up the device tree
8106 * hierarchy via (*bus_get_eventcookie)() interface until claimed
8107 * by a bus nexus or top of dev_info tree is reached.
8110 ddi_get_eventcookie(dev_info_t
*dip
, char *name
,
8111 ddi_eventcookie_t
*event_cookiep
)
8113 return (ndi_busop_get_eventcookie(dip
, dip
,
8114 name
, event_cookiep
));
8118 * This procedure is provided as the general callback function when
8119 * umem_lockmemory calls as_add_callback for long term memory locking.
8120 * When as_unmap, as_setprot, or as_free encounter segments which have
8121 * locked memory, this callback will be invoked.
8124 umem_lock_undo(struct as
*as
, void *arg
, uint_t event
)
8126 _NOTE(ARGUNUSED(as
, event
))
8127 struct ddi_umem_cookie
*cp
= (struct ddi_umem_cookie
*)arg
;
8130 * Call the cleanup function. Decrement the cookie reference
8131 * count, if it goes to zero, return the memory for the cookie.
8132 * The i_ddi_umem_unlock for this cookie may or may not have been
8133 * called already. It is the responsibility of the caller of
8134 * umem_lockmemory to handle the case of the cleanup routine
8135 * being called after a ddi_umem_unlock for the cookie
8139 (*cp
->callbacks
.cbo_umem_lock_cleanup
)((ddi_umem_cookie_t
)cp
);
8141 /* remove the cookie if reference goes to zero */
8142 if (atomic_add_long_nv((ulong_t
*)(&(cp
->cook_refcnt
)), -1) == 0) {
8143 kmem_free(cp
, sizeof (struct ddi_umem_cookie
));
8148 * The following two Consolidation Private routines provide generic
8149 * interfaces to increase/decrease the amount of device-locked memory.
8151 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8152 * must be called every time i_ddi_incr_locked_memory() is called.
8156 i_ddi_incr_locked_memory(proc_t
*procp
, rctl_qty_t inc
)
8158 ASSERT(procp
!= NULL
);
8159 mutex_enter(&procp
->p_lock
);
8160 if (rctl_incr_locked_mem(procp
, NULL
, inc
, 1)) {
8161 mutex_exit(&procp
->p_lock
);
8164 mutex_exit(&procp
->p_lock
);
8169 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8170 * must be called every time i_ddi_decr_locked_memory() is called.
8174 i_ddi_decr_locked_memory(proc_t
*procp
, rctl_qty_t dec
)
8176 ASSERT(procp
!= NULL
);
8177 mutex_enter(&procp
->p_lock
);
8178 rctl_decr_locked_mem(procp
, NULL
, dec
, 1);
8179 mutex_exit(&procp
->p_lock
);
8183 * The cookie->upd_max_lock_rctl flag is used to determine if we should
8184 * charge device locked memory to the max-locked-memory rctl. Tracking
8185 * device locked memory causes the rctl locks to get hot under high-speed
8186 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit,
8187 * we bypass charging the locked memory to the rctl altogether. The cookie's
8188 * flag tells us if the rctl value should be updated when unlocking the memory,
8189 * in case the rctl gets changed after the memory was locked. Any device
8190 * locked memory in that rare case will not be counted toward the rctl limit.
8192 * When tracking the locked memory, the kproject_t parameter is always NULL
8193 * in the code paths:
8194 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8195 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8196 * Thus, we always use the tk_proj member to check the projp setting.
8199 init_lockedmem_rctl_flag(struct ddi_umem_cookie
*cookie
)
8210 projp
= p
->p_task
->tk_proj
;
8215 if (zonep
->zone_locked_mem_ctl
== UINT64_MAX
&&
8216 projp
->kpj_data
.kpd_locked_mem_ctl
== UINT64_MAX
)
8217 cookie
->upd_max_lock_rctl
= 0;
8219 cookie
->upd_max_lock_rctl
= 1;
8223 * This routine checks if the max-locked-memory resource ctl is
8224 * exceeded, if not increments it, grabs a hold on the project.
8225 * Returns 0 if successful otherwise returns error code
8228 umem_incr_devlockmem(struct ddi_umem_cookie
*cookie
)
8234 if (cookie
->upd_max_lock_rctl
== 0)
8237 procp
= cookie
->procp
;
8240 if ((ret
= i_ddi_incr_locked_memory(procp
,
8241 cookie
->size
)) != 0) {
8248 * Decrements the max-locked-memory resource ctl and releases
8249 * the hold on the project that was acquired during umem_incr_devlockmem
8252 umem_decr_devlockmem(struct ddi_umem_cookie
*cookie
)
8256 if (cookie
->upd_max_lock_rctl
== 0)
8259 proc
= (proc_t
*)cookie
->procp
;
8263 i_ddi_decr_locked_memory(proc
, cookie
->size
);
8267 * A consolidation private function which is essentially equivalent to
8268 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8269 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8270 * the ops_vector is valid.
8272 * Lock the virtual address range in the current process and create a
8273 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8274 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8277 * Note: The resource control accounting currently uses a full charge model
8278 * in other words attempts to lock the same/overlapping areas of memory
8279 * will deduct the full size of the buffer from the projects running
8280 * counter for the device locked memory.
8282 * addr, size should be PAGESIZE aligned
8284 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8285 * identifies whether the locked memory will be read or written or both
8286 * DDI_UMEMLOCK_LONGTERM must be set when the locking will
8287 * be maintained for an indefinitely long period (essentially permanent),
8288 * rather than for what would be required for a typical I/O completion.
8289 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8290 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8291 * This is to prevent a deadlock if a file truncation is attempted after
8292 * after the locking is done.
8294 * Returns 0 on success
8295 * EINVAL - for invalid parameters
8296 * EPERM, ENOMEM and other error codes returned by as_pagelock
8297 * ENOMEM - is returned if the current request to lock memory exceeds
8298 * *.max-locked-memory resource control value.
8299 * EFAULT - memory pertains to a regular file mapped shared and
8300 * and DDI_UMEMLOCK_LONGTERM flag is set
8301 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8304 umem_lockmemory(caddr_t addr
, size_t len
, int flags
, ddi_umem_cookie_t
*cookie
,
8305 struct umem_callback_ops
*ops_vector
,
8309 struct ddi_umem_cookie
*p
;
8310 void (*driver_callback
)() = NULL
;
8315 /* Allow device drivers to not have to reference "curproc" */
8319 *cookie
= NULL
; /* in case of any error return */
8321 /* These are the only three valid flags */
8322 if ((flags
& ~(DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
|
8323 DDI_UMEMLOCK_LONGTERM
)) != 0)
8326 /* At least one (can be both) of the two access flags must be set */
8327 if ((flags
& (DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
)) == 0)
8330 /* addr and len must be page-aligned */
8331 if (((uintptr_t)addr
& PAGEOFFSET
) != 0)
8334 if ((len
& PAGEOFFSET
) != 0)
8338 * For longterm locking a driver callback must be specified; if
8339 * not longterm then a callback is optional.
8341 if (ops_vector
!= NULL
) {
8342 if (ops_vector
->cbo_umem_callback_version
!=
8343 UMEM_CALLBACK_VERSION
)
8346 driver_callback
= ops_vector
->cbo_umem_lock_cleanup
;
8348 if ((driver_callback
== NULL
) && (flags
& DDI_UMEMLOCK_LONGTERM
))
8352 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8353 * be called on first ddi_umem_lock or umem_lockmemory call.
8355 if (ddi_umem_unlock_thread
== NULL
)
8356 i_ddi_umem_unlock_thread_start();
8358 /* Allocate memory for the cookie */
8359 p
= kmem_zalloc(sizeof (struct ddi_umem_cookie
), KM_SLEEP
);
8361 /* Convert the flags to seg_rw type */
8362 if (flags
& DDI_UMEMLOCK_WRITE
) {
8363 p
->s_flags
= S_WRITE
;
8365 p
->s_flags
= S_READ
;
8368 /* Store procp in cookie for later iosetup/unlock */
8369 p
->procp
= (void *)procp
;
8372 * Store the struct as pointer in cookie for later use by
8373 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8374 * is called after relvm is called.
8379 * The size field is needed for lockmem accounting.
8382 init_lockedmem_rctl_flag(p
);
8384 if (umem_incr_devlockmem(p
) != 0) {
8386 * The requested memory cannot be locked
8388 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8389 *cookie
= (ddi_umem_cookie_t
)NULL
;
8393 /* Lock the pages corresponding to addr, len in memory */
8394 error
= as_pagelock(as
, &(p
->pparray
), addr
, len
, p
->s_flags
);
8396 umem_decr_devlockmem(p
);
8397 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8398 *cookie
= (ddi_umem_cookie_t
)NULL
;
8403 * For longterm locking the addr must pertain to a seg_vn segment or
8404 * or a seg_spt segment.
8405 * If the segment pertains to a regular file, it cannot be
8406 * mapped MAP_SHARED.
8407 * This is to prevent a deadlock if a file truncation is attempted
8408 * after the locking is done.
8409 * Doing this after as_pagelock guarantees persistence of the as; if
8410 * an unacceptable segment is found, the cleanup includes calling
8411 * as_pageunlock before returning EFAULT.
8413 * segdev is allowed here as it is already locked. This allows
8414 * for memory exported by drivers through mmap() (which is already
8415 * locked) to be allowed for LONGTERM.
8417 if (flags
& DDI_UMEMLOCK_LONGTERM
) {
8418 extern struct seg_ops segspt_shmops
;
8419 extern struct seg_ops segdev_ops
;
8420 AS_LOCK_ENTER(as
, &as
->a_lock
, RW_READER
);
8421 for (seg
= as_segat(as
, addr
); ; seg
= AS_SEGNEXT(as
, seg
)) {
8422 if (seg
== NULL
|| seg
->s_base
> addr
+ len
)
8424 if (seg
->s_ops
== &segdev_ops
)
8426 if (((seg
->s_ops
!= &segvn_ops
) &&
8427 (seg
->s_ops
!= &segspt_shmops
)) ||
8428 ((SEGOP_GETVP(seg
, addr
, &vp
) == 0 &&
8429 vp
!= NULL
&& vp
->v_type
== VREG
) &&
8430 (SEGOP_GETTYPE(seg
, addr
) & MAP_SHARED
))) {
8431 as_pageunlock(as
, p
->pparray
,
8432 addr
, len
, p
->s_flags
);
8433 AS_LOCK_EXIT(as
, &as
->a_lock
);
8434 umem_decr_devlockmem(p
);
8435 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8436 *cookie
= (ddi_umem_cookie_t
)NULL
;
8440 AS_LOCK_EXIT(as
, &as
->a_lock
);
8444 /* Initialize the fields in the ddi_umem_cookie */
8446 p
->type
= UMEM_LOCKED
;
8447 if (driver_callback
!= NULL
) {
8448 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8450 p
->callbacks
= *ops_vector
;
8452 /* only i_ddi_umme_unlock needs the cookie */
8456 *cookie
= (ddi_umem_cookie_t
)p
;
8459 * If a driver callback was specified, add an entry to the
8460 * as struct callback list. The as_pagelock above guarantees
8461 * the persistence of as.
8463 if (driver_callback
) {
8464 error
= as_add_callback(as
, umem_lock_undo
, p
, AS_ALL_EVENT
,
8465 addr
, len
, KM_SLEEP
);
8467 as_pageunlock(as
, p
->pparray
,
8468 addr
, len
, p
->s_flags
);
8469 umem_decr_devlockmem(p
);
8470 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8471 *cookie
= (ddi_umem_cookie_t
)NULL
;
8478 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8479 * the cookie. Called from i_ddi_umem_unlock_thread.
8483 i_ddi_umem_unlock(struct ddi_umem_cookie
*p
)
8488 * There is no way to determine whether a callback to
8489 * umem_lock_undo was registered via as_add_callback.
8490 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8491 * a valid callback function structure.) as_delete_callback
8492 * is called to delete a possible registered callback. If the
8493 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8494 * indicates that there was a callback registered, and that is was
8495 * successfully deleted. Thus, the cookie reference count
8496 * will never be decremented by umem_lock_undo. Just return the
8497 * memory for the cookie, since both users of the cookie are done.
8498 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8499 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED
8500 * indicates that callback processing is taking place and, and
8501 * umem_lock_undo is, or will be, executing, and thus decrementing
8502 * the cookie reference count when it is complete.
8504 * This needs to be done before as_pageunlock so that the
8505 * persistence of as is guaranteed because of the locked pages.
8508 rc
= as_delete_callback(p
->asp
, p
);
8512 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8513 * after relvm is called so use p->asp.
8515 as_pageunlock(p
->asp
, p
->pparray
, p
->cvaddr
, p
->size
, p
->s_flags
);
8518 * Now that we have unlocked the memory decrement the
8519 * *.max-locked-memory rctl
8521 umem_decr_devlockmem(p
);
8523 if (rc
== AS_CALLBACK_DELETED
) {
8524 /* umem_lock_undo will not happen, return the cookie memory */
8525 ASSERT(p
->cook_refcnt
== 2);
8526 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8529 * umem_undo_lock may happen if as_delete_callback returned
8530 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8531 * reference count, atomically, and return the cookie
8532 * memory if the reference count goes to zero. The only
8533 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8534 * case, just return the cookie memory.
8536 if ((rc
!= AS_CALLBACK_DELETE_DEFERRED
) ||
8537 (atomic_add_long_nv((ulong_t
*)(&(p
->cook_refcnt
)), -1)
8539 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8545 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8547 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8548 * until it is empty. Then, wait for more to be added. This thread is awoken
8549 * via calls to ddi_umem_unlock.
8553 i_ddi_umem_unlock_thread(void)
8555 struct ddi_umem_cookie
*ret_cookie
;
8556 callb_cpr_t cprinfo
;
8558 /* process the ddi_umem_unlock list */
8559 CALLB_CPR_INIT(&cprinfo
, &ddi_umem_unlock_mutex
,
8560 callb_generic_cpr
, "unlock_thread");
8562 mutex_enter(&ddi_umem_unlock_mutex
);
8563 if (ddi_umem_unlock_head
!= NULL
) { /* list not empty */
8564 ret_cookie
= ddi_umem_unlock_head
;
8565 /* take if off the list */
8566 if ((ddi_umem_unlock_head
=
8567 ddi_umem_unlock_head
->unl_forw
) == NULL
) {
8568 ddi_umem_unlock_tail
= NULL
;
8570 mutex_exit(&ddi_umem_unlock_mutex
);
8571 /* unlock the pages in this cookie */
8572 (void) i_ddi_umem_unlock(ret_cookie
);
8573 } else { /* list is empty, wait for next ddi_umem_unlock */
8574 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
8575 cv_wait(&ddi_umem_unlock_cv
, &ddi_umem_unlock_mutex
);
8576 CALLB_CPR_SAFE_END(&cprinfo
, &ddi_umem_unlock_mutex
);
8577 mutex_exit(&ddi_umem_unlock_mutex
);
8580 /* ddi_umem_unlock_thread does not exit */
8585 * Start the thread that will process the ddi_umem_unlock list if it is
8586 * not already started (i_ddi_umem_unlock_thread).
8589 i_ddi_umem_unlock_thread_start(void)
8591 mutex_enter(&ddi_umem_unlock_mutex
);
8592 if (ddi_umem_unlock_thread
== NULL
) {
8593 ddi_umem_unlock_thread
= thread_create(NULL
, 0,
8594 i_ddi_umem_unlock_thread
, NULL
, 0, &p0
,
8595 TS_RUN
, minclsyspri
);
8597 mutex_exit(&ddi_umem_unlock_mutex
);
8601 * Lock the virtual address range in the current process and create a
8602 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8603 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8606 * Note: The resource control accounting currently uses a full charge model
8607 * in other words attempts to lock the same/overlapping areas of memory
8608 * will deduct the full size of the buffer from the projects running
8609 * counter for the device locked memory. This applies to umem_lockmemory too.
8611 * addr, size should be PAGESIZE aligned
8612 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8613 * identifies whether the locked memory will be read or written or both
8615 * Returns 0 on success
8616 * EINVAL - for invalid parameters
8617 * EPERM, ENOMEM and other error codes returned by as_pagelock
8618 * ENOMEM - is returned if the current request to lock memory exceeds
8619 * *.max-locked-memory resource control value.
8620 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8623 ddi_umem_lock(caddr_t addr
, size_t len
, int flags
, ddi_umem_cookie_t
*cookie
)
8626 struct ddi_umem_cookie
*p
;
8628 *cookie
= NULL
; /* in case of any error return */
8630 /* These are the only two valid flags */
8631 if ((flags
& ~(DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
)) != 0) {
8635 /* At least one of the two flags (or both) must be set */
8636 if ((flags
& (DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
)) == 0) {
8640 /* addr and len must be page-aligned */
8641 if (((uintptr_t)addr
& PAGEOFFSET
) != 0) {
8645 if ((len
& PAGEOFFSET
) != 0) {
8650 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8651 * be called on first ddi_umem_lock or umem_lockmemory call.
8653 if (ddi_umem_unlock_thread
== NULL
)
8654 i_ddi_umem_unlock_thread_start();
8656 /* Allocate memory for the cookie */
8657 p
= kmem_zalloc(sizeof (struct ddi_umem_cookie
), KM_SLEEP
);
8659 /* Convert the flags to seg_rw type */
8660 if (flags
& DDI_UMEMLOCK_WRITE
) {
8661 p
->s_flags
= S_WRITE
;
8663 p
->s_flags
= S_READ
;
8666 /* Store curproc in cookie for later iosetup/unlock */
8667 p
->procp
= (void *)curproc
;
8670 * Store the struct as pointer in cookie for later use by
8671 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8672 * is called after relvm is called.
8674 p
->asp
= curproc
->p_as
;
8676 * The size field is needed for lockmem accounting.
8679 init_lockedmem_rctl_flag(p
);
8681 if (umem_incr_devlockmem(p
) != 0) {
8683 * The requested memory cannot be locked
8685 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8686 *cookie
= (ddi_umem_cookie_t
)NULL
;
8690 /* Lock the pages corresponding to addr, len in memory */
8691 error
= as_pagelock(((proc_t
*)p
->procp
)->p_as
, &(p
->pparray
),
8692 addr
, len
, p
->s_flags
);
8694 umem_decr_devlockmem(p
);
8695 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8696 *cookie
= (ddi_umem_cookie_t
)NULL
;
8700 /* Initialize the fields in the ddi_umem_cookie */
8702 p
->type
= UMEM_LOCKED
;
8705 *cookie
= (ddi_umem_cookie_t
)p
;
8710 * Add the cookie to the ddi_umem_unlock list. Pages will be
8711 * unlocked by i_ddi_umem_unlock_thread.
8715 ddi_umem_unlock(ddi_umem_cookie_t cookie
)
8717 struct ddi_umem_cookie
*p
= (struct ddi_umem_cookie
*)cookie
;
8719 ASSERT(p
->type
== UMEM_LOCKED
);
8720 ASSERT(CPU_ON_INTR(CPU
) == 0); /* cannot be high level */
8721 ASSERT(ddi_umem_unlock_thread
!= NULL
);
8723 p
->unl_forw
= (struct ddi_umem_cookie
*)NULL
; /* end of list */
8725 * Queue the unlock request and notify i_ddi_umem_unlock thread
8726 * if it's called in the interrupt context. Otherwise, unlock pages
8729 if (servicing_interrupt()) {
8730 /* queue the unlock request and notify the thread */
8731 mutex_enter(&ddi_umem_unlock_mutex
);
8732 if (ddi_umem_unlock_head
== NULL
) {
8733 ddi_umem_unlock_head
= ddi_umem_unlock_tail
= p
;
8734 cv_broadcast(&ddi_umem_unlock_cv
);
8736 ddi_umem_unlock_tail
->unl_forw
= p
;
8737 ddi_umem_unlock_tail
= p
;
8739 mutex_exit(&ddi_umem_unlock_mutex
);
8741 /* unlock the pages right away */
8742 (void) i_ddi_umem_unlock(p
);
8747 * Create a buf structure from a ddi_umem_cookie
8748 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8749 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8750 * off, len - identifies the portion of the memory represented by the cookie
8751 * that the buf points to.
8752 * NOTE: off, len need to follow the alignment/size restrictions of the
8753 * device (dev) that this buf will be passed to. Some devices
8754 * will accept unrestricted alignment/size, whereas others (such as
8755 * st) require some block-size alignment/size. It is the caller's
8756 * responsibility to ensure that the alignment/size restrictions
8757 * are met (we cannot assert as we do not know the restrictions)
8759 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8760 * the flags used in ddi_umem_lock
8762 * The following three arguments are used to initialize fields in the
8763 * buf structure and are uninterpreted by this routine.
8769 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8771 * Returns a buf structure pointer on success (to be freed by freerbuf)
8772 * NULL on any parameter error or memory alloc failure
8776 ddi_umem_iosetup(ddi_umem_cookie_t cookie
, off_t off
, size_t len
,
8777 int direction
, dev_t dev
, daddr_t blkno
,
8778 int (*iodone
)(struct buf
*), int sleepflag
)
8780 struct ddi_umem_cookie
*p
= (struct ddi_umem_cookie
*)cookie
;
8784 * check for valid cookie offset, len
8786 if ((off
+ len
) > p
->size
) {
8790 if (len
> p
->size
) {
8794 /* direction has to be one of B_READ or B_WRITE */
8795 if ((direction
!= B_READ
) && (direction
!= B_WRITE
)) {
8799 /* These are the only two valid sleepflags */
8800 if ((sleepflag
!= DDI_UMEM_SLEEP
) && (sleepflag
!= DDI_UMEM_NOSLEEP
)) {
8805 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8807 if ((p
->type
!= UMEM_LOCKED
) && (p
->type
!= KMEM_NON_PAGEABLE
)) {
8811 /* If type is KMEM_NON_PAGEABLE procp is NULL */
8812 ASSERT((p
->type
== KMEM_NON_PAGEABLE
) ?
8813 (p
->procp
== NULL
) : (p
->procp
!= NULL
));
8815 bp
= kmem_alloc(sizeof (struct buf
), sleepflag
);
8821 bp
->b_flags
= B_BUSY
| B_PHYS
| direction
;
8823 bp
->b_lblkno
= blkno
;
8824 bp
->b_iodone
= iodone
;
8826 bp
->b_proc
= (proc_t
*)p
->procp
;
8827 ASSERT(((uintptr_t)(p
->cvaddr
) & PAGEOFFSET
) == 0);
8828 bp
->b_un
.b_addr
= (caddr_t
)((uintptr_t)(p
->cvaddr
) + off
);
8829 if (p
->pparray
!= NULL
) {
8830 bp
->b_flags
|= B_SHADOW
;
8831 ASSERT(((uintptr_t)(p
->cvaddr
) & PAGEOFFSET
) == 0);
8832 bp
->b_shadow
= p
->pparray
+ btop(off
);
8838 * Fault-handling and related routines
8842 ddi_get_devstate(dev_info_t
*dip
)
8844 if (DEVI_IS_DEVICE_OFFLINE(dip
))
8845 return (DDI_DEVSTATE_OFFLINE
);
8846 else if (DEVI_IS_DEVICE_DOWN(dip
) || DEVI_IS_BUS_DOWN(dip
))
8847 return (DDI_DEVSTATE_DOWN
);
8848 else if (DEVI_IS_BUS_QUIESCED(dip
))
8849 return (DDI_DEVSTATE_QUIESCED
);
8850 else if (DEVI_IS_DEVICE_DEGRADED(dip
))
8851 return (DDI_DEVSTATE_DEGRADED
);
8853 return (DDI_DEVSTATE_UP
);
8857 ddi_dev_report_fault(dev_info_t
*dip
, ddi_fault_impact_t impact
,
8858 ddi_fault_location_t location
, const char *message
)
8860 struct ddi_fault_event_data fd
;
8861 ddi_eventcookie_t ec
;
8864 * Assemble all the information into a fault-event-data structure
8867 fd
.f_impact
= impact
;
8868 fd
.f_location
= location
;
8869 fd
.f_message
= message
;
8870 fd
.f_oldstate
= ddi_get_devstate(dip
);
8873 * Get eventcookie from defining parent.
8875 if (ddi_get_eventcookie(dip
, DDI_DEVI_FAULT_EVENT
, &ec
) !=
8879 (void) ndi_post_event(dip
, dip
, ec
, &fd
);
8883 i_ddi_devi_class(dev_info_t
*dip
)
8885 return (DEVI(dip
)->devi_device_class
);
8889 i_ddi_set_devi_class(dev_info_t
*dip
, char *devi_class
, int flag
)
8891 struct dev_info
*devi
= DEVI(dip
);
8893 mutex_enter(&devi
->devi_lock
);
8895 if (devi
->devi_device_class
)
8896 kmem_free(devi
->devi_device_class
,
8897 strlen(devi
->devi_device_class
) + 1);
8899 if ((devi
->devi_device_class
= i_ddi_strdup(devi_class
, flag
))
8901 mutex_exit(&devi
->devi_lock
);
8902 return (DDI_SUCCESS
);
8905 mutex_exit(&devi
->devi_lock
);
8907 return (DDI_FAILURE
);
8912 * Task Queues DDI interfaces.
8917 ddi_taskq_create(dev_info_t
*dip
, const char *name
, int nthreads
,
8918 pri_t pri
, uint_t cflags
)
8920 char full_name
[TASKQ_NAMELEN
];
8921 const char *tq_name
;
8927 nodeid
= ddi_get_instance(dip
);
8932 (void) snprintf(full_name
, sizeof (full_name
), "%s_%s",
8933 ddi_driver_name(dip
), name
);
8935 tq_name
= full_name
;
8938 return ((ddi_taskq_t
*)taskq_create_instance(tq_name
, nodeid
, nthreads
,
8939 pri
== TASKQ_DEFAULTPRI
? minclsyspri
: pri
,
8940 nthreads
, INT_MAX
, TASKQ_PREPOPULATE
));
8944 ddi_taskq_destroy(ddi_taskq_t
*tq
)
8946 taskq_destroy((taskq_t
*)tq
);
8950 ddi_taskq_dispatch(ddi_taskq_t
*tq
, void (* func
)(void *),
8951 void *arg
, uint_t dflags
)
8953 taskqid_t id
= taskq_dispatch((taskq_t
*)tq
, func
, arg
,
8954 dflags
== DDI_SLEEP
? TQ_SLEEP
: TQ_NOSLEEP
);
8956 return (id
!= 0 ? DDI_SUCCESS
: DDI_FAILURE
);
8960 ddi_taskq_wait(ddi_taskq_t
*tq
)
8962 taskq_wait((taskq_t
*)tq
);
8966 ddi_taskq_suspend(ddi_taskq_t
*tq
)
8968 taskq_suspend((taskq_t
*)tq
);
8972 ddi_taskq_suspended(ddi_taskq_t
*tq
)
8974 return (taskq_suspended((taskq_t
*)tq
));
8978 ddi_taskq_resume(ddi_taskq_t
*tq
)
8980 taskq_resume((taskq_t
*)tq
);
8992 boolean_t nonum
= B_TRUE
;
8996 for (p
= ifname
+ l
; p
!= ifname
; l
--) {
8999 (void) strlcpy(alnum
, ifname
, l
+ 1);
9000 if (ddi_strtoul(p
+ 1, NULL
, 10, &num
) != 0)
9001 return (DDI_FAILURE
);
9006 if (l
== 0 || nonum
)
9007 return (DDI_FAILURE
);
9010 return (DDI_SUCCESS
);
9014 * Default initialization function for drivers that don't need to quiesce.
9018 ddi_quiesce_not_needed(dev_info_t
*dip
)
9020 return (DDI_SUCCESS
);
9024 * Initialization function for drivers that should implement quiesce()
9029 ddi_quiesce_not_supported(dev_info_t
*dip
)
9031 return (DDI_FAILURE
);
9035 ddi_strdup(const char *str
, int flag
)
9040 ASSERT(str
!= NULL
);
9041 ASSERT((flag
== KM_SLEEP
) || (flag
== KM_NOSLEEP
));
9044 if ((ptr
= kmem_alloc(n
+ 1, flag
)) == NULL
)
9046 bcopy(str
, ptr
, n
+ 1);
9051 strdup(const char *str
)
9053 return (ddi_strdup(str
, KM_SLEEP
));
9059 ASSERT(str
!= NULL
);
9060 kmem_free(str
, strlen(str
) + 1);
9064 * Generic DDI callback interfaces.
9068 ddi_cb_register(dev_info_t
*dip
, ddi_cb_flags_t flags
, ddi_cb_func_t cbfunc
,
9069 void *arg1
, void *arg2
, ddi_cb_handle_t
*ret_hdlp
)
9073 ASSERT(dip
!= NULL
);
9074 ASSERT(DDI_CB_FLAG_VALID(flags
));
9075 ASSERT(cbfunc
!= NULL
);
9076 ASSERT(ret_hdlp
!= NULL
);
9078 /* Sanity check the context */
9079 ASSERT(!servicing_interrupt());
9080 if (servicing_interrupt())
9081 return (DDI_FAILURE
);
9083 /* Validate parameters */
9084 if ((dip
== NULL
) || !DDI_CB_FLAG_VALID(flags
) ||
9085 (cbfunc
== NULL
) || (ret_hdlp
== NULL
))
9086 return (DDI_EINVAL
);
9088 /* Check for previous registration */
9089 if (DEVI(dip
)->devi_cb_p
!= NULL
)
9090 return (DDI_EALREADY
);
9092 /* Allocate and initialize callback */
9093 cbp
= kmem_zalloc(sizeof (ddi_cb_t
), KM_SLEEP
);
9095 cbp
->cb_func
= cbfunc
;
9096 cbp
->cb_arg1
= arg1
;
9097 cbp
->cb_arg2
= arg2
;
9098 cbp
->cb_flags
= flags
;
9099 DEVI(dip
)->devi_cb_p
= cbp
;
9101 /* If adding an IRM callback, notify IRM */
9102 if (flags
& DDI_CB_FLAG_INTR
)
9103 i_ddi_irm_set_cb(dip
, B_TRUE
);
9105 *ret_hdlp
= (ddi_cb_handle_t
)&(DEVI(dip
)->devi_cb_p
);
9106 return (DDI_SUCCESS
);
9110 ddi_cb_unregister(ddi_cb_handle_t hdl
)
9115 ASSERT(hdl
!= NULL
);
9117 /* Sanity check the context */
9118 ASSERT(!servicing_interrupt());
9119 if (servicing_interrupt())
9120 return (DDI_FAILURE
);
9122 /* Validate parameters */
9123 if ((hdl
== NULL
) || ((cbp
= *(ddi_cb_t
**)hdl
) == NULL
) ||
9124 ((dip
= cbp
->cb_dip
) == NULL
))
9125 return (DDI_EINVAL
);
9127 /* If removing an IRM callback, notify IRM */
9128 if (cbp
->cb_flags
& DDI_CB_FLAG_INTR
)
9129 i_ddi_irm_set_cb(dip
, B_FALSE
);
9131 /* Destroy the callback */
9132 kmem_free(cbp
, sizeof (ddi_cb_t
));
9133 DEVI(dip
)->devi_cb_p
= NULL
;
9135 return (DDI_SUCCESS
);
9139 * Platform independent DR routines
9171 * Prom tree node list
9175 struct ptnode
*next
;
9179 * Prom tree walk arg
9186 struct ptnode
*head
;
9190 visit_node(pnode_t nodeid
, struct pta
*ap
)
9192 struct ptnode
**nextp
;
9193 int (*select
)(pnode_t
, void *, uint_t
);
9195 ASSERT(nodeid
!= OBP_NONODE
&& nodeid
!= OBP_BADNODE
);
9197 select
= ap
->bp
->create
.prom_branch_select
;
9201 if (select(nodeid
, ap
->bp
->arg
, 0) == DDI_SUCCESS
) {
9203 for (nextp
= &ap
->head
; *nextp
; nextp
= &(*nextp
)->next
)
9206 *nextp
= kmem_zalloc(sizeof (struct ptnode
), KM_SLEEP
);
9208 (*nextp
)->nodeid
= nodeid
;
9211 if ((ap
->flags
& DEVI_BRANCH_CHILD
) == DEVI_BRANCH_CHILD
)
9214 nodeid
= prom_childnode(nodeid
);
9215 while (nodeid
!= OBP_NONODE
&& nodeid
!= OBP_BADNODE
) {
9216 visit_node(nodeid
, ap
);
9217 nodeid
= prom_nextnode(nodeid
);
9222 * NOTE: The caller of this function must check for device contracts
9223 * or LDI callbacks against this dip before setting the dip offline.
9226 set_infant_dip_offline(dev_info_t
*dip
, void *arg
)
9228 char *path
= (char *)arg
;
9233 if (i_ddi_node_state(dip
) >= DS_ATTACHED
) {
9234 (void) ddi_pathname(dip
, path
);
9235 cmn_err(CE_WARN
, "Attempt to set offline flag on attached "
9237 return (DDI_FAILURE
);
9240 mutex_enter(&(DEVI(dip
)->devi_lock
));
9241 if (!DEVI_IS_DEVICE_OFFLINE(dip
))
9242 DEVI_SET_DEVICE_OFFLINE(dip
);
9243 mutex_exit(&(DEVI(dip
)->devi_lock
));
9245 return (DDI_SUCCESS
);
9248 typedef struct result
{
9254 dip_set_offline(dev_info_t
*dip
, void *arg
)
9257 result_t
*resp
= (result_t
*)arg
;
9263 * We stop the walk if e_ddi_offline_notify() returns
9264 * failure, because this implies that one or more consumers
9265 * (either LDI or contract based) has blocked the offline.
9266 * So there is no point in conitnuing the walk
9268 if (e_ddi_offline_notify(dip
) == DDI_FAILURE
) {
9269 resp
->result
= DDI_FAILURE
;
9270 return (DDI_WALK_TERMINATE
);
9274 * If set_infant_dip_offline() returns failure, it implies
9275 * that we failed to set a particular dip offline. This
9276 * does not imply that the offline as a whole should fail.
9277 * We want to do the best we can, so we continue the walk.
9279 if (set_infant_dip_offline(dip
, resp
->path
) == DDI_SUCCESS
)
9284 e_ddi_offline_finalize(dip
, end
);
9286 return (DDI_WALK_CONTINUE
);
9290 * The call to e_ddi_offline_notify() exists for the
9291 * unlikely error case that a branch we are trying to
9292 * create already exists and has device contracts or LDI
9293 * event callbacks against it.
9295 * We allow create to succeed for such branches only if
9296 * no constraints block the offline.
9299 branch_set_offline(dev_info_t
*dip
, char *path
)
9306 if (e_ddi_offline_notify(dip
) == DDI_FAILURE
) {
9307 return (DDI_FAILURE
);
9310 if (set_infant_dip_offline(dip
, path
) == DDI_SUCCESS
)
9315 e_ddi_offline_finalize(dip
, end
);
9317 if (end
== DDI_FAILURE
)
9318 return (DDI_FAILURE
);
9320 res
.result
= DDI_SUCCESS
;
9323 ndi_devi_enter(dip
, &circ
);
9324 ddi_walk_devs(ddi_get_child(dip
), dip_set_offline
, &res
);
9325 ndi_devi_exit(dip
, circ
);
9327 return (res
.result
);
9332 create_prom_branch(void *arg
, int has_changed
)
9339 struct pta
*ap
= arg
;
9344 ASSERT(ap
->fdip
== NULL
);
9345 ASSERT(ap
->pdip
&& ndi_dev_is_prom_node(ap
->pdip
));
9349 nodeid
= ddi_get_nodeid(ap
->pdip
);
9350 if (nodeid
== OBP_NONODE
|| nodeid
== OBP_BADNODE
) {
9351 cmn_err(CE_WARN
, "create_prom_branch: invalid "
9352 "nodeid: 0x%x", nodeid
);
9358 nodeid
= prom_childnode(nodeid
);
9359 while (nodeid
!= OBP_NONODE
&& nodeid
!= OBP_BADNODE
) {
9360 visit_node(nodeid
, ap
);
9361 nodeid
= prom_nextnode(nodeid
);
9364 if (ap
->head
== NULL
)
9367 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
9369 while ((tnp
= ap
->head
) != NULL
) {
9370 ap
->head
= tnp
->next
;
9372 ndi_devi_enter(ap
->pdip
, &circ
);
9375 * Check if the branch already exists.
9378 dip
= e_ddi_nodeid_to_dip(tnp
->nodeid
);
9382 /* Parent is held busy, so release hold */
9385 cmn_err(CE_WARN
, "create_prom_branch: dip(%p) exists"
9386 " for nodeid 0x%x", (void *)dip
, tnp
->nodeid
);
9389 dip
= i_ddi_create_branch(ap
->pdip
, tnp
->nodeid
);
9392 kmem_free(tnp
, sizeof (struct ptnode
));
9395 * Hold the branch if it is not already held
9397 if (dip
&& !exists
) {
9398 e_ddi_branch_hold(dip
);
9401 ASSERT(dip
== NULL
|| e_ddi_branch_held(dip
));
9404 * Set all dips in the newly created branch offline so that
9405 * only a "configure" operation can attach
9408 if (dip
== NULL
|| branch_set_offline(dip
, path
)
9410 ndi_devi_exit(ap
->pdip
, circ
);
9415 ASSERT(ddi_get_parent(dip
) == ap
->pdip
);
9417 ndi_devi_exit(ap
->pdip
, circ
);
9419 if (ap
->flags
& DEVI_BRANCH_CONFIGURE
) {
9420 int error
= e_ddi_branch_configure(dip
, &ap
->fdip
, 0);
9421 if (error
&& rv
== 0)
9426 * Invoke devi_branch_callback() (if it exists) only for
9427 * newly created branches
9429 if (bp
->devi_branch_callback
&& !exists
)
9430 bp
->devi_branch_callback(dip
, bp
->arg
, 0);
9433 kmem_free(path
, MAXPATHLEN
);
9439 sid_node_create(dev_info_t
*pdip
, devi_branch_t
*bp
, dev_info_t
**rdipp
)
9446 static const char *noname
= "<none>";
9449 ASSERT(DEVI_BUSY_OWNED(pdip
));
9454 * Creating the root of a branch ?
9458 flags
= DEVI_BRANCH_ROOT
;
9461 ndi_devi_alloc_sleep(pdip
, (char *)noname
, DEVI_SID_NODEID
, &dip
);
9462 rv
= bp
->create
.sid_branch_create(dip
, bp
->arg
, flags
);
9464 nbuf
= kmem_alloc(OBP_MAXDRVNAME
, KM_SLEEP
);
9466 if (rv
== DDI_WALK_ERROR
) {
9467 cmn_err(CE_WARN
, "e_ddi_branch_create: Error setting"
9468 " properties on devinfo node %p", (void *)dip
);
9472 len
= OBP_MAXDRVNAME
;
9473 if (ddi_getlongprop_buf(DDI_DEV_T_ANY
, dip
,
9474 DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
, "name", nbuf
, &len
)
9475 != DDI_PROP_SUCCESS
) {
9476 cmn_err(CE_WARN
, "e_ddi_branch_create: devinfo node %p has"
9477 "no name property", (void *)dip
);
9481 ASSERT(i_ddi_node_state(dip
) == DS_PROTO
);
9482 if (ndi_devi_set_nodename(dip
, nbuf
, 0) != NDI_SUCCESS
) {
9483 cmn_err(CE_WARN
, "e_ddi_branch_create: cannot set name (%s)"
9484 " for devinfo node %p", nbuf
, (void *)dip
);
9488 kmem_free(nbuf
, OBP_MAXDRVNAME
);
9491 * Ignore bind failures just like boot does
9493 (void) ndi_devi_bind_driver(dip
, 0);
9496 case DDI_WALK_CONTINUE
:
9497 case DDI_WALK_PRUNESIB
:
9498 ndi_devi_enter(dip
, &circ
);
9500 i
= DDI_WALK_CONTINUE
;
9501 for (; i
== DDI_WALK_CONTINUE
; ) {
9502 i
= sid_node_create(dip
, bp
, NULL
);
9505 ASSERT(i
== DDI_WALK_ERROR
|| i
== DDI_WALK_PRUNESIB
);
9506 if (i
== DDI_WALK_ERROR
)
9509 * If PRUNESIB stop creating siblings
9510 * of dip's child. Subsequent walk behavior
9511 * is determined by rv returned by dip.
9514 ndi_devi_exit(dip
, circ
);
9516 case DDI_WALK_TERMINATE
:
9518 * Don't create children and ask our parent
9519 * to not create siblings either.
9521 rv
= DDI_WALK_PRUNESIB
;
9523 case DDI_WALK_PRUNECHILD
:
9525 * Don't create children, but ask parent to continue
9528 rv
= DDI_WALK_CONTINUE
;
9539 * Set device offline - only the "configure" op should cause an attach.
9540 * Note that it is safe to set the dip offline without checking
9541 * for either device contract or layered driver (LDI) based constraints
9542 * since there cannot be any contracts or LDI opens of this device.
9543 * This is because this node is a newly created dip with the parent busy
9544 * held, so no other thread can come in and attach this dip. A dip that
9545 * has never been attached cannot have contracts since by definition
9546 * a device contract (an agreement between a process and a device minor
9547 * node) can only be created against a device that has minor nodes
9548 * i.e is attached. Similarly an LDI open will only succeed if the
9549 * dip is attached. We assert below that the dip is not attached.
9551 ASSERT(i_ddi_node_state(dip
) < DS_ATTACHED
);
9552 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
9553 ret
= set_infant_dip_offline(dip
, path
);
9554 ASSERT(ret
== DDI_SUCCESS
);
9555 kmem_free(path
, MAXPATHLEN
);
9559 (void) ndi_devi_free(dip
);
9560 kmem_free(nbuf
, OBP_MAXDRVNAME
);
9561 return (DDI_WALK_ERROR
);
9571 int rv
= 0, state
= DDI_WALK_CONTINUE
;
9574 while (state
== DDI_WALK_CONTINUE
) {
9577 ndi_devi_enter(pdip
, &circ
);
9579 state
= sid_node_create(pdip
, bp
, &rdip
);
9581 ndi_devi_exit(pdip
, circ
);
9582 ASSERT(state
== DDI_WALK_ERROR
);
9586 e_ddi_branch_hold(rdip
);
9588 ndi_devi_exit(pdip
, circ
);
9590 if (flags
& DEVI_BRANCH_CONFIGURE
) {
9591 int error
= e_ddi_branch_configure(rdip
, dipp
, 0);
9592 if (error
&& rv
== 0)
9597 * devi_branch_callback() is optional
9599 if (bp
->devi_branch_callback
)
9600 bp
->devi_branch_callback(rdip
, bp
->arg
, 0);
9603 ASSERT(state
== DDI_WALK_ERROR
|| state
== DDI_WALK_PRUNESIB
);
9605 return (state
== DDI_WALK_ERROR
? EIO
: rv
);
9609 e_ddi_branch_create(
9615 int prom_devi
, sid_devi
, error
;
9617 if (pdip
== NULL
|| bp
== NULL
|| bp
->type
== 0)
9620 prom_devi
= (bp
->type
== DEVI_BRANCH_PROM
) ? 1 : 0;
9621 sid_devi
= (bp
->type
== DEVI_BRANCH_SID
) ? 1 : 0;
9623 if (prom_devi
&& bp
->create
.prom_branch_select
== NULL
)
9625 else if (sid_devi
&& bp
->create
.sid_branch_create
== NULL
)
9627 else if (!prom_devi
&& !sid_devi
)
9630 if (flags
& DEVI_BRANCH_EVENT
)
9634 struct pta pta
= {0};
9640 error
= prom_tree_access(create_prom_branch
, &pta
, NULL
);
9645 ndi_rele_devi(pta
.fdip
);
9647 error
= create_sid_branch(pdip
, bp
, dipp
, flags
);
9654 e_ddi_branch_configure(dev_info_t
*rdip
, dev_info_t
**dipp
, uint_t flags
)
9663 if (rdip
== NULL
|| flags
!= 0 || (flags
& DEVI_BRANCH_EVENT
))
9666 pdip
= ddi_get_parent(rdip
);
9668 ndi_hold_devi(pdip
);
9670 if (!e_ddi_branch_held(rdip
)) {
9671 ndi_rele_devi(pdip
);
9672 cmn_err(CE_WARN
, "e_ddi_branch_configure: "
9673 "dip(%p) not held", (void *)rdip
);
9677 if (i_ddi_node_state(rdip
) < DS_INITIALIZED
) {
9679 * First attempt to bind a driver. If we fail, return
9680 * success (On some platforms, dips for some device
9681 * types (CPUs) may not have a driver)
9683 if (ndi_devi_bind_driver(rdip
, 0) != NDI_SUCCESS
) {
9684 ndi_rele_devi(pdip
);
9688 if (ddi_initchild(pdip
, rdip
) != DDI_SUCCESS
) {
9694 ASSERT(i_ddi_node_state(rdip
) >= DS_INITIALIZED
);
9696 devnm
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
9698 (void) ddi_deviname(rdip
, devnm
);
9700 if ((rv
= ndi_devi_config_one(pdip
, devnm
+1, &rdip
,
9701 NDI_DEVI_ONLINE
| NDI_CONFIG
)) == NDI_SUCCESS
) {
9702 /* release hold from ndi_devi_config_one() */
9703 ndi_rele_devi(rdip
);
9706 kmem_free(devnm
, MAXNAMELEN
+ 1);
9708 if (rv
!= NDI_SUCCESS
&& dipp
&& rdip
) {
9709 ndi_hold_devi(rdip
);
9712 ndi_rele_devi(pdip
);
9713 return (ndi2errno(rv
));
9717 e_ddi_branch_hold(dev_info_t
*rdip
)
9719 if (e_ddi_branch_held(rdip
)) {
9720 cmn_err(CE_WARN
, "e_ddi_branch_hold: branch already held");
9724 mutex_enter(&DEVI(rdip
)->devi_lock
);
9725 if ((DEVI(rdip
)->devi_flags
& DEVI_BRANCH_HELD
) == 0) {
9726 DEVI(rdip
)->devi_flags
|= DEVI_BRANCH_HELD
;
9727 DEVI(rdip
)->devi_ref
++;
9729 ASSERT(DEVI(rdip
)->devi_ref
> 0);
9730 mutex_exit(&DEVI(rdip
)->devi_lock
);
9734 e_ddi_branch_held(dev_info_t
*rdip
)
9738 mutex_enter(&DEVI(rdip
)->devi_lock
);
9739 if ((DEVI(rdip
)->devi_flags
& DEVI_BRANCH_HELD
) &&
9740 DEVI(rdip
)->devi_ref
> 0) {
9743 mutex_exit(&DEVI(rdip
)->devi_lock
);
9749 e_ddi_branch_rele(dev_info_t
*rdip
)
9751 mutex_enter(&DEVI(rdip
)->devi_lock
);
9752 DEVI(rdip
)->devi_flags
&= ~DEVI_BRANCH_HELD
;
9753 DEVI(rdip
)->devi_ref
--;
9754 mutex_exit(&DEVI(rdip
)->devi_lock
);
9758 e_ddi_branch_unconfigure(
9775 pdip
= ddi_get_parent(rdip
);
9780 * Check if caller holds pdip busy - can cause deadlocks during
9783 if (DEVI_BUSY_OWNED(pdip
)) {
9784 cmn_err(CE_WARN
, "e_ddi_branch_unconfigure: failed: parent"
9785 " devinfo node(%p) is busy held", (void *)pdip
);
9789 destroy
= (flags
& DEVI_BRANCH_DESTROY
) ? 1 : 0;
9791 devnm
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
9793 ndi_devi_enter(pdip
, &circ
);
9794 (void) ddi_deviname(rdip
, devnm
);
9795 ndi_devi_exit(pdip
, circ
);
9798 * ddi_deviname() returns a component name with / prepended.
9800 (void) devfs_clean(pdip
, devnm
+ 1, DV_CLEAN_FORCE
);
9802 ndi_devi_enter(pdip
, &circ
);
9805 * Recreate device name as it may have changed state (init/uninit)
9806 * when parent busy lock was dropped for devfs_clean()
9808 (void) ddi_deviname(rdip
, devnm
);
9810 if (!e_ddi_branch_held(rdip
)) {
9811 kmem_free(devnm
, MAXNAMELEN
+ 1);
9812 ndi_devi_exit(pdip
, circ
);
9813 cmn_err(CE_WARN
, "e_ddi_%s_branch: dip(%p) not held",
9814 destroy
? "destroy" : "unconfigure", (void *)rdip
);
9819 * Release hold on the branch. This is ok since we are holding the
9820 * parent busy. If rdip is not removed, we must do a hold on the
9821 * branch before returning.
9823 e_ddi_branch_rele(rdip
);
9825 nflags
= NDI_DEVI_OFFLINE
;
9826 if (destroy
|| (flags
& DEVI_BRANCH_DESTROY
)) {
9827 nflags
|= NDI_DEVI_REMOVE
;
9830 nflags
|= NDI_UNCONFIG
; /* uninit but don't remove */
9833 if (flags
& DEVI_BRANCH_EVENT
)
9834 nflags
|= NDI_POST_EVENT
;
9836 if (i_ddi_devi_attached(pdip
) &&
9837 (i_ddi_node_state(rdip
) >= DS_INITIALIZED
)) {
9838 rv
= ndi_devi_unconfig_one(pdip
, devnm
+1, dipp
, nflags
);
9840 rv
= e_ddi_devi_unconfig(rdip
, dipp
, nflags
);
9841 if (rv
== NDI_SUCCESS
) {
9842 ASSERT(!destroy
|| ddi_get_child(rdip
) == NULL
);
9843 rv
= ndi_devi_offline(rdip
, nflags
);
9847 if (!destroy
|| rv
!= NDI_SUCCESS
) {
9848 /* The dip still exists, so do a hold */
9849 e_ddi_branch_hold(rdip
);
9852 kmem_free(devnm
, MAXNAMELEN
+ 1);
9853 ndi_devi_exit(pdip
, circ
);
9854 return (ndi2errno(rv
));
9858 e_ddi_branch_destroy(dev_info_t
*rdip
, dev_info_t
**dipp
, uint_t flag
)
9860 return (e_ddi_branch_unconfigure(rdip
, dipp
,
9861 flag
|DEVI_BRANCH_DESTROY
));
9865 * Number of chains for hash table
9867 #define NUMCHAINS 17
9875 mod_hash_t
*dv_hash
;
9877 int (*callback
)(dev_info_t
*, void *, uint_t
);
9882 visit_dip(dev_info_t
*dip
, void *arg
)
9884 uintptr_t sbusy
, dvbusy
, ref
;
9885 struct devi_busy
*bsp
= arg
;
9887 ASSERT(bsp
->callback
);
9890 * A dip cannot be busy if its reference count is 0
9892 if ((ref
= e_ddi_devi_holdcnt(dip
)) == 0) {
9893 return (bsp
->callback(dip
, bsp
->arg
, 0));
9896 if (mod_hash_find(bsp
->dv_hash
, dip
, (mod_hash_val_t
*)&dvbusy
))
9900 * To catch device opens currently maintained on specfs common snodes.
9902 if (mod_hash_find(bsp
->s_hash
, dip
, (mod_hash_val_t
*)&sbusy
))
9906 if (ref
< sbusy
|| ref
< dvbusy
) {
9907 cmn_err(CE_WARN
, "dip(%p): sopen = %lu, dvopen = %lu "
9908 "dip ref = %lu\n", (void *)dip
, sbusy
, dvbusy
, ref
);
9912 dvbusy
= (sbusy
> dvbusy
) ? sbusy
: dvbusy
;
9914 return (bsp
->callback(dip
, bsp
->arg
, dvbusy
));
9918 visit_snode(struct snode
*sp
, void *arg
)
9923 struct devi_busy
*bsp
= arg
;
9928 * The stable lock is held. This prevents
9929 * the snode and its associated dip from
9933 count
= spec_devi_open_count(sp
, &dip
);
9936 return (DDI_WALK_CONTINUE
);
9940 if (mod_hash_remove(bsp
->s_hash
, dip
, (mod_hash_val_t
*)&sbusy
))
9945 if (mod_hash_insert(bsp
->s_hash
, dip
, (mod_hash_val_t
)sbusy
)) {
9946 cmn_err(CE_WARN
, "%s: s_hash insert failed: dip=0x%p, "
9947 "sbusy = %lu", "e_ddi_branch_referenced",
9948 (void *)dip
, sbusy
);
9951 bsp
->s_total
+= count
;
9953 return (DDI_WALK_CONTINUE
);
9957 visit_dvnode(struct dv_node
*dv
, void *arg
)
9962 struct devi_busy
*bsp
= arg
;
9964 ASSERT(dv
&& dv
->dv_devi
);
9968 mutex_enter(&vp
->v_lock
);
9969 count
= vp
->v_count
;
9970 mutex_exit(&vp
->v_lock
);
9975 if (mod_hash_remove(bsp
->dv_hash
, dv
->dv_devi
,
9976 (mod_hash_val_t
*)&dvbusy
))
9981 if (mod_hash_insert(bsp
->dv_hash
, dv
->dv_devi
,
9982 (mod_hash_val_t
)dvbusy
)) {
9983 cmn_err(CE_WARN
, "%s: dv_hash insert failed: dip=0x%p, "
9984 "dvbusy=%lu", "e_ddi_branch_referenced",
9985 (void *)dv
->dv_devi
, dvbusy
);
9988 bsp
->dv_total
+= count
;
9992 * Returns reference count on success or -1 on failure.
9995 e_ddi_branch_referenced(
9997 int (*callback
)(dev_info_t
*dip
, void *arg
, uint_t ref
),
10003 struct devi_busy bsa
= {0};
10007 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
10009 ndi_hold_devi(rdip
);
10011 pdip
= ddi_get_parent(rdip
);
10016 * Check if caller holds pdip busy - can cause deadlocks during
10019 if (!e_ddi_branch_held(rdip
) || DEVI_BUSY_OWNED(pdip
)) {
10020 cmn_err(CE_WARN
, "e_ddi_branch_referenced: failed: "
10021 "devinfo branch(%p) not held or parent busy held",
10023 ndi_rele_devi(rdip
);
10024 kmem_free(path
, MAXPATHLEN
);
10028 ndi_devi_enter(pdip
, &circ
);
10029 (void) ddi_pathname(rdip
, path
);
10030 ndi_devi_exit(pdip
, circ
);
10032 bsa
.dv_hash
= mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS
,
10033 mod_hash_null_valdtor
, sizeof (struct dev_info
));
10035 bsa
.s_hash
= mod_hash_create_ptrhash("snode busy hash", NUMCHAINS
,
10036 mod_hash_null_valdtor
, sizeof (struct snode
));
10038 if (devfs_walk(path
, visit_dvnode
, &bsa
)) {
10039 cmn_err(CE_WARN
, "e_ddi_branch_referenced: "
10040 "devfs walk failed for: %s", path
);
10041 kmem_free(path
, MAXPATHLEN
);
10042 bsa
.s_total
= bsa
.dv_total
= -1;
10046 kmem_free(path
, MAXPATHLEN
);
10049 * Walk the snode table to detect device opens, which are currently
10050 * maintained on specfs common snodes.
10052 spec_snode_walk(visit_snode
, &bsa
);
10054 if (callback
== NULL
)
10057 bsa
.callback
= callback
;
10060 if (visit_dip(rdip
, &bsa
) == DDI_WALK_CONTINUE
) {
10061 ndi_devi_enter(rdip
, &circ
);
10062 ddi_walk_devs(ddi_get_child(rdip
), visit_dip
, &bsa
);
10063 ndi_devi_exit(rdip
, circ
);
10067 ndi_rele_devi(rdip
);
10068 mod_hash_destroy_ptrhash(bsa
.s_hash
);
10069 mod_hash_destroy_ptrhash(bsa
.dv_hash
);
10070 return (bsa
.s_total
> bsa
.dv_total
? bsa
.s_total
: bsa
.dv_total
);