4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Garrett D'Amore <garrett@damore.org>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
37 #include <sys/model.h>
42 #include <sys/t_lock.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */
60 #include <sys/ndi_impldefs.h> /* include prototypes */
61 #include <sys/ddi_periodic.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
86 #include <sys/clock_impl.h>
88 #include <sys/modhash.h>
89 #include <sys/sunldi_impl.h>
90 #include <sys/fs/dv_node.h>
91 #include <sys/fs/snode.h>
93 extern pri_t minclsyspri
;
95 extern rctl_hndl_t rc_project_locked_mem
;
96 extern rctl_hndl_t rc_zone_locked_mem
;
99 static int sunddi_debug
= 0;
102 /* ddi_umem_unlock miscellaneous */
104 static void i_ddi_umem_unlock_thread_start(void);
106 static kmutex_t ddi_umem_unlock_mutex
; /* unlock list mutex */
107 static kcondvar_t ddi_umem_unlock_cv
; /* unlock list block/unblock */
108 static kthread_t
*ddi_umem_unlock_thread
;
110 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list.
112 static struct ddi_umem_cookie
*ddi_umem_unlock_head
= NULL
;
113 static struct ddi_umem_cookie
*ddi_umem_unlock_tail
= NULL
;
116 * DDI(Sun) Function and flag definitions:
121 * Used to indicate which entries were chosen from a range.
123 char *chosen_reg
= "chosen-reg";
127 * Function used to ring system console bell
129 void (*ddi_console_bell_func
)(clock_t duration
);
132 * Creating register mappings and handling interrupts:
136 * Generic ddi_map: Call parent to fulfill request...
140 ddi_map(dev_info_t
*dp
, ddi_map_req_t
*mp
, off_t offset
,
141 off_t len
, caddr_t
*addrp
)
146 pdip
= (dev_info_t
*)DEVI(dp
)->devi_parent
;
147 return ((DEVI(pdip
)->devi_ops
->devo_bus_ops
->bus_map
)(pdip
,
148 dp
, mp
, offset
, len
, addrp
));
152 * ddi_apply_range: (Called by nexi only.)
153 * Apply ranges in parent node dp, to child regspec rp...
157 ddi_apply_range(dev_info_t
*dp
, dev_info_t
*rdip
, struct regspec
*rp
)
159 return (i_ddi_apply_range(dp
, rdip
, rp
));
163 ddi_map_regs(dev_info_t
*dip
, uint_t rnumber
, caddr_t
*kaddrp
, off_t offset
,
177 * get the 'registers' or the 'reg' property.
178 * We look up the reg property as an array of
181 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
,
182 DDI_PROP_DONTPASS
, "registers", (int **)®list
, &length
);
183 if (rc
!= DDI_PROP_SUCCESS
)
184 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
,
185 DDI_PROP_DONTPASS
, "reg", (int **)®list
, &length
);
186 if (rc
== DDI_PROP_SUCCESS
) {
188 * point to the required entry.
190 reg
= reglist
[rnumber
];
195 * make a new property containing ONLY the required tuple.
197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE
, dip
,
198 chosen_reg
, (int *)®
, (sizeof (reg
)/sizeof (int)))
199 != DDI_PROP_SUCCESS
) {
200 cmn_err(CE_WARN
, "%s%d: cannot create '%s' "
201 "property", DEVI(dip
)->devi_name
,
202 DEVI(dip
)->devi_instance
, chosen_reg
);
205 * free the memory allocated by
206 * ddi_prop_lookup_int_array ().
208 ddi_prop_free((void *)reglist
);
211 mr
.map_op
= DDI_MO_MAP_LOCKED
;
212 mr
.map_type
= DDI_MT_RNUMBER
;
213 mr
.map_obj
.rnumber
= rnumber
;
214 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
215 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
216 mr
.map_handlep
= NULL
;
217 mr
.map_vers
= DDI_MAP_VERSION
;
220 * Call my parent to map in my regs.
223 return (ddi_map(dip
, &mr
, offset
, len
, kaddrp
));
227 ddi_unmap_regs(dev_info_t
*dip
, uint_t rnumber
, caddr_t
*kaddrp
, off_t offset
,
232 mr
.map_op
= DDI_MO_UNMAP
;
233 mr
.map_type
= DDI_MT_RNUMBER
;
234 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
235 mr
.map_prot
= PROT_READ
| PROT_WRITE
; /* who cares? */
236 mr
.map_obj
.rnumber
= rnumber
;
237 mr
.map_handlep
= NULL
;
238 mr
.map_vers
= DDI_MAP_VERSION
;
241 * Call my parent to unmap my regs.
244 (void) ddi_map(dip
, &mr
, offset
, len
, kaddrp
);
245 *kaddrp
= (caddr_t
)0;
247 (void) ddi_prop_remove(DDI_DEV_T_NONE
, dip
, chosen_reg
);
252 ddi_bus_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
253 off_t offset
, off_t len
, caddr_t
*vaddrp
)
255 return (i_ddi_bus_map(dip
, rdip
, mp
, offset
, len
, vaddrp
));
259 * nullbusmap: The/DDI default bus_map entry point for nexi
260 * not conforming to the reg/range paradigm (i.e. scsi, etc.)
261 * with no HAT/MMU layer to be programmed at this level.
263 * If the call is to map by rnumber, return an error,
264 * otherwise pass anything else up the tree to my parent.
267 nullbusmap(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
268 off_t offset
, off_t len
, caddr_t
*vaddrp
)
270 _NOTE(ARGUNUSED(rdip
))
271 if (mp
->map_type
== DDI_MT_RNUMBER
)
272 return (DDI_ME_UNSUPPORTED
);
274 return (ddi_map(dip
, mp
, offset
, len
, vaddrp
));
278 * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279 * Only for use by nexi using the reg/range paradigm.
282 ddi_rnumber_to_regspec(dev_info_t
*dip
, int rnumber
)
284 return (i_ddi_rnumber_to_regspec(dip
, rnumber
));
289 * Note that we allow the dip to be nil because we may be called
290 * prior even to the instantiation of the devinfo tree itself - all
291 * regular leaf and nexus drivers should always use a non-nil dip!
293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294 * simply get a synchronous fault as soon as we touch a missing address.
296 * Poke is rather more carefully handled because we might poke to a write
297 * buffer, "succeed", then only find some time later that we got an
298 * asynchronous fault that indicated that the address we were writing to
299 * was not really backed by hardware.
303 i_ddi_peekpoke(dev_info_t
*devi
, ddi_ctl_enum_t cmd
, size_t size
,
304 void *addr
, void *value_p
)
313 peekpoke_ctlops_t peekpoke_args
;
314 uint64_t dummy_result
;
317 /* Note: size is assumed to be correct; it is not checked. */
318 peekpoke_args
.size
= size
;
319 peekpoke_args
.dev_addr
= (uintptr_t)addr
;
320 peekpoke_args
.handle
= NULL
;
321 peekpoke_args
.repcount
= 1;
322 peekpoke_args
.flags
= 0;
324 if (cmd
== DDI_CTLOPS_POKE
) {
326 case sizeof (uint8_t):
327 peekpoke_value
.u8
= *(uint8_t *)value_p
;
329 case sizeof (uint16_t):
330 peekpoke_value
.u16
= *(uint16_t *)value_p
;
332 case sizeof (uint32_t):
333 peekpoke_value
.u32
= *(uint32_t *)value_p
;
335 case sizeof (uint64_t):
336 peekpoke_value
.u64
= *(uint64_t *)value_p
;
341 peekpoke_args
.host_addr
= (uintptr_t)&peekpoke_value
.u64
;
344 rval
= ddi_ctlops(devi
, devi
, cmd
, &peekpoke_args
,
347 rval
= peekpoke_mem(cmd
, &peekpoke_args
);
350 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
352 if ((cmd
== DDI_CTLOPS_PEEK
) & (value_p
!= NULL
)) {
354 case sizeof (uint8_t):
355 *(uint8_t *)value_p
= peekpoke_value
.u8
;
357 case sizeof (uint16_t):
358 *(uint16_t *)value_p
= peekpoke_value
.u16
;
360 case sizeof (uint32_t):
361 *(uint32_t *)value_p
= peekpoke_value
.u32
;
363 case sizeof (uint64_t):
364 *(uint64_t *)value_p
= peekpoke_value
.u64
;
373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
377 ddi_peek(dev_info_t
*devi
, size_t size
, void *addr
, void *value_p
)
380 case sizeof (uint8_t):
381 case sizeof (uint16_t):
382 case sizeof (uint32_t):
383 case sizeof (uint64_t):
386 return (DDI_FAILURE
);
389 return (i_ddi_peekpoke(devi
, DDI_CTLOPS_PEEK
, size
, addr
, value_p
));
393 ddi_poke(dev_info_t
*devi
, size_t size
, void *addr
, void *value_p
)
396 case sizeof (uint8_t):
397 case sizeof (uint16_t):
398 case sizeof (uint32_t):
399 case sizeof (uint64_t):
402 return (DDI_FAILURE
);
405 return (i_ddi_peekpoke(devi
, DDI_CTLOPS_POKE
, size
, addr
, value_p
));
409 ddi_peek8(dev_info_t
*dip
, int8_t *addr
, int8_t *val_p
)
411 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
416 ddi_peek16(dev_info_t
*dip
, int16_t *addr
, int16_t *val_p
)
418 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
423 ddi_peek32(dev_info_t
*dip
, int32_t *addr
, int32_t *val_p
)
425 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
430 ddi_peek64(dev_info_t
*dip
, int64_t *addr
, int64_t *val_p
)
432 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
438 * We need to separate the old interfaces from the new ones and leave them
439 * in here for a while. Previous versions of the OS defined the new interfaces
440 * to the old interfaces. This way we can fix things up so that we can
441 * eventually remove these interfaces.
442 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443 * or earlier will actually have a reference to ddi_peekc in the binary.
447 ddi_peekc(dev_info_t
*dip
, int8_t *addr
, int8_t *val_p
)
449 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
454 ddi_peeks(dev_info_t
*dip
, int16_t *addr
, int16_t *val_p
)
456 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
461 ddi_peekl(dev_info_t
*dip
, int32_t *addr
, int32_t *val_p
)
463 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
468 ddi_peekd(dev_info_t
*dip
, int64_t *addr
, int64_t *val_p
)
470 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_PEEK
, sizeof (*val_p
), addr
,
476 ddi_poke8(dev_info_t
*dip
, int8_t *addr
, int8_t val
)
478 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
482 ddi_poke16(dev_info_t
*dip
, int16_t *addr
, int16_t val
)
484 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
488 ddi_poke32(dev_info_t
*dip
, int32_t *addr
, int32_t val
)
490 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
494 ddi_poke64(dev_info_t
*dip
, int64_t *addr
, int64_t val
)
496 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
500 * We need to separate the old interfaces from the new ones and leave them
501 * in here for a while. Previous versions of the OS defined the new interfaces
502 * to the old interfaces. This way we can fix things up so that we can
503 * eventually remove these interfaces.
504 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505 * or earlier will actually have a reference to ddi_pokec in the binary.
509 ddi_pokec(dev_info_t
*dip
, int8_t *addr
, int8_t val
)
511 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
515 ddi_pokes(dev_info_t
*dip
, int16_t *addr
, int16_t val
)
517 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
521 ddi_pokel(dev_info_t
*dip
, int32_t *addr
, int32_t val
)
523 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
527 ddi_poked(dev_info_t
*dip
, int64_t *addr
, int64_t val
)
529 return (i_ddi_peekpoke(dip
, DDI_CTLOPS_POKE
, sizeof (val
), addr
, &val
));
534 * ddi_peekpokeio() is used primarily by the mem drivers for moving
535 * data to and from uio structures via peek and poke. Note that we
536 * use "internal" routines ddi_peek and ddi_poke to make this go
537 * slightly faster, avoiding the call overhead ..
540 ddi_peekpokeio(dev_info_t
*devi
, struct uio
*uio
, enum uio_rw rw
,
541 caddr_t addr
, size_t len
, uint_t xfersize
)
548 if (xfersize
> sizeof (long))
549 xfersize
= sizeof (long);
552 if ((len
| (uintptr_t)addr
) & 1) {
553 sz
= sizeof (int8_t);
554 if (rw
== UIO_WRITE
) {
555 if ((o
= uwritec(uio
)) == -1)
556 return (DDI_FAILURE
);
557 if (ddi_poke8(devi
, (int8_t *)addr
,
558 (int8_t)o
) != DDI_SUCCESS
)
559 return (DDI_FAILURE
);
561 if (i_ddi_peekpoke(devi
, DDI_CTLOPS_PEEK
, sz
,
562 (int8_t *)addr
, &w8
) != DDI_SUCCESS
)
563 return (DDI_FAILURE
);
565 return (DDI_FAILURE
);
569 case sizeof (int64_t):
570 if (((len
| (uintptr_t)addr
) &
571 (sizeof (int64_t) - 1)) == 0) {
576 case sizeof (int32_t):
577 if (((len
| (uintptr_t)addr
) &
578 (sizeof (int32_t) - 1)) == 0) {
585 * This still assumes that we might have an
586 * I/O bus out there that permits 16-bit
587 * transfers (and that it would be upset by
588 * 32-bit transfers from such locations).
590 sz
= sizeof (int16_t);
594 if (rw
== UIO_READ
) {
595 if (i_ddi_peekpoke(devi
, DDI_CTLOPS_PEEK
, sz
,
596 addr
, &ibuffer
) != DDI_SUCCESS
)
597 return (DDI_FAILURE
);
600 if (uiomove(&ibuffer
, sz
, rw
, uio
))
601 return (DDI_FAILURE
);
603 if (rw
== UIO_WRITE
) {
604 if (i_ddi_peekpoke(devi
, DDI_CTLOPS_POKE
, sz
,
605 addr
, &ibuffer
) != DDI_SUCCESS
)
606 return (DDI_FAILURE
);
612 return (DDI_SUCCESS
);
616 * These routines are used by drivers that do layered ioctls
617 * On sparc, they're implemented in assembler to avoid spilling
618 * register windows in the common (copyin) case ..
620 #if !defined(__sparc)
622 ddi_copyin(const void *buf
, void *kernbuf
, size_t size
, int flags
)
625 return (kcopy(buf
, kernbuf
, size
) ? -1 : 0);
626 return (copyin(buf
, kernbuf
, size
));
630 ddi_copyout(const void *buf
, void *kernbuf
, size_t size
, int flags
)
633 return (kcopy(buf
, kernbuf
, size
) ? -1 : 0);
634 return (copyout(buf
, kernbuf
, size
));
636 #endif /* !__sparc */
639 * Conversions in nexus pagesize units. We don't duplicate the
640 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
644 ddi_btop(dev_info_t
*dip
, unsigned long bytes
)
648 (void) ddi_ctlops(dip
, dip
, DDI_CTLOPS_BTOP
, &bytes
, &pages
);
653 ddi_btopr(dev_info_t
*dip
, unsigned long bytes
)
657 (void) ddi_ctlops(dip
, dip
, DDI_CTLOPS_BTOPR
, &bytes
, &pages
);
662 ddi_ptob(dev_info_t
*dip
, unsigned long pages
)
666 (void) ddi_ctlops(dip
, dip
, DDI_CTLOPS_PTOB
, &pages
, &bytes
);
671 ddi_enter_critical(void)
673 return ((uint_t
)spl7());
677 ddi_exit_critical(unsigned int spl
)
683 * Nexus ctlops punter
686 #if !defined(__sparc)
688 * Request bus_ctl parent to handle a bus_ctl request
690 * (The sparc version is in sparc_ddi.s)
693 ddi_ctlops(dev_info_t
*d
, dev_info_t
*r
, ddi_ctl_enum_t op
, void *a
, void *v
)
698 return (DDI_FAILURE
);
700 if ((d
= (dev_info_t
*)DEVI(d
)->devi_bus_ctl
) == NULL
)
701 return (DDI_FAILURE
);
703 fp
= DEVI(d
)->devi_ops
->devo_bus_ops
->bus_ctl
;
704 return ((*fp
)(d
, r
, op
, a
, v
));
713 #if !defined(__sparc)
715 * Request bus_dma_ctl parent to fiddle with a dma request.
717 * (The sparc version is in sparc_subr.s)
720 ddi_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
721 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
722 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t flags
)
726 if (dip
!= ddi_root_node())
727 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_ctl
;
728 fp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_ctl
;
729 return ((*fp
) (dip
, rdip
, handle
, request
, offp
, lenp
, objp
, flags
));
734 * For all DMA control functions, call the DMA control
735 * routine and return status.
737 * Just plain assume that the parent is to be called.
738 * If a nexus driver or a thread outside the framework
739 * of a nexus driver or a leaf driver calls these functions,
740 * it is up to them to deal with the fact that the parent's
741 * bus_dma_ctl function will be the first one called.
744 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
747 * This routine is left in place to satisfy link dependencies
748 * for any 3rd party nexus drivers that rely on it. It is never
753 ddi_dma_map(dev_info_t
*dip
, dev_info_t
*rdip
,
754 struct ddi_dma_req
*dmareqp
, ddi_dma_handle_t
*handlep
)
756 return (DDI_FAILURE
);
759 #if !defined(__sparc)
762 * The SPARC versions of these routines are done in assembler to
763 * save register windows, so they're in sparc_subr.s.
767 ddi_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
768 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
770 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_attr_t
*,
771 int (*)(caddr_t
), caddr_t
, ddi_dma_handle_t
*);
773 if (dip
!= ddi_root_node())
774 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_allochdl
;
776 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_allochdl
;
777 return ((*funcp
)(dip
, rdip
, attr
, waitfp
, arg
, handlep
));
781 ddi_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handlep
)
783 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
);
785 if (dip
!= ddi_root_node())
786 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_allochdl
;
788 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_freehdl
;
789 return ((*funcp
)(dip
, rdip
, handlep
));
793 ddi_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
794 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
795 ddi_dma_cookie_t
*cp
, uint_t
*ccountp
)
797 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
798 struct ddi_dma_req
*, ddi_dma_cookie_t
*, uint_t
*);
800 if (dip
!= ddi_root_node())
801 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_bindhdl
;
803 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_bindhdl
;
804 return ((*funcp
)(dip
, rdip
, handle
, dmareq
, cp
, ccountp
));
808 ddi_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
809 ddi_dma_handle_t handle
)
811 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
);
813 if (dip
!= ddi_root_node())
814 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_unbindhdl
;
816 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_unbindhdl
;
817 return ((*funcp
)(dip
, rdip
, handle
));
822 ddi_dma_flush(dev_info_t
*dip
, dev_info_t
*rdip
,
823 ddi_dma_handle_t handle
, off_t off
, size_t len
,
826 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
827 off_t
, size_t, uint_t
);
829 if (dip
!= ddi_root_node())
830 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_flush
;
832 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_flush
;
833 return ((*funcp
)(dip
, rdip
, handle
, off
, len
, cache_flags
));
837 ddi_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
838 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
,
839 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
841 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
842 uint_t
, off_t
*, size_t *, ddi_dma_cookie_t
*, uint_t
*);
844 if (dip
!= ddi_root_node())
845 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_win
;
847 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_win
;
848 return ((*funcp
)(dip
, rdip
, handle
, win
, offp
, lenp
,
853 ddi_dma_sync(ddi_dma_handle_t h
, off_t o
, size_t l
, uint_t whom
)
855 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)h
;
856 dev_info_t
*dip
, *rdip
;
857 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
, off_t
,
861 * the DMA nexus driver will set DMP_NOSYNC if the
862 * platform does not require any sync operation. For
863 * example if the memory is uncached or consistent
864 * and without any I/O write buffers involved.
866 if ((hp
->dmai_rflags
& DMP_NOSYNC
) == DMP_NOSYNC
)
867 return (DDI_SUCCESS
);
869 dip
= rdip
= hp
->dmai_rdip
;
870 if (dip
!= ddi_root_node())
871 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_flush
;
872 funcp
= DEVI(dip
)->devi_ops
->devo_bus_ops
->bus_dma_flush
;
873 return ((*funcp
)(dip
, rdip
, h
, o
, l
, whom
));
877 ddi_dma_unbind_handle(ddi_dma_handle_t h
)
879 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)h
;
880 dev_info_t
*dip
, *rdip
;
881 int (*funcp
)(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
);
883 dip
= rdip
= hp
->dmai_rdip
;
884 if (dip
!= ddi_root_node())
885 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_unbindhdl
;
886 funcp
= DEVI(rdip
)->devi_bus_dma_unbindfunc
;
887 return ((*funcp
)(dip
, rdip
, h
));
890 #endif /* !__sparc */
893 * DMA burst sizes, and transfer minimums
897 ddi_dma_burstsizes(ddi_dma_handle_t handle
)
899 ddi_dma_impl_t
*dimp
= (ddi_dma_impl_t
*)handle
;
904 return (dimp
->dmai_burstsizes
);
908 * Given two DMA attribute structures, apply the attributes
909 * of one to the other, following the rules of attributes
910 * and the wishes of the caller.
912 * The rules of DMA attribute structures are that you cannot
913 * make things *less* restrictive as you apply one set
914 * of attributes to another.
918 ddi_dma_attr_merge(ddi_dma_attr_t
*attr
, ddi_dma_attr_t
*mod
)
920 attr
->dma_attr_addr_lo
=
921 MAX(attr
->dma_attr_addr_lo
, mod
->dma_attr_addr_lo
);
922 attr
->dma_attr_addr_hi
=
923 MIN(attr
->dma_attr_addr_hi
, mod
->dma_attr_addr_hi
);
924 attr
->dma_attr_count_max
=
925 MIN(attr
->dma_attr_count_max
, mod
->dma_attr_count_max
);
926 attr
->dma_attr_align
=
927 MAX(attr
->dma_attr_align
, mod
->dma_attr_align
);
928 attr
->dma_attr_burstsizes
=
929 (uint_t
)(attr
->dma_attr_burstsizes
& mod
->dma_attr_burstsizes
);
930 attr
->dma_attr_minxfer
=
931 maxbit(attr
->dma_attr_minxfer
, mod
->dma_attr_minxfer
);
932 attr
->dma_attr_maxxfer
=
933 MIN(attr
->dma_attr_maxxfer
, mod
->dma_attr_maxxfer
);
934 attr
->dma_attr_seg
= MIN(attr
->dma_attr_seg
, mod
->dma_attr_seg
);
935 attr
->dma_attr_sgllen
= MIN((uint_t
)attr
->dma_attr_sgllen
,
936 (uint_t
)mod
->dma_attr_sgllen
);
937 attr
->dma_attr_granular
=
938 MAX(attr
->dma_attr_granular
, mod
->dma_attr_granular
);
942 * mmap/segmap interface:
946 * ddi_segmap: setup the default segment driver. Calls the drivers
947 * XXmmap routine to validate the range to be mapped.
948 * Return ENXIO of the range is not valid. Create
949 * a seg_dev segment that contains all of the
950 * necessary information and will reference the
951 * default segment driver routines. It returns zero
952 * on success or non-zero on failure.
955 ddi_segmap(dev_t dev
, off_t offset
, struct as
*asp
, caddr_t
*addrp
, off_t len
,
956 uint_t prot
, uint_t maxprot
, uint_t flags
, cred_t
*credp
)
958 extern int spec_segmap(dev_t
, off_t
, struct as
*, caddr_t
*,
959 off_t
, uint_t
, uint_t
, uint_t
, struct cred
*);
961 return (spec_segmap(dev
, offset
, asp
, addrp
, len
,
962 prot
, maxprot
, flags
, credp
));
966 * ddi_map_fault: Resolve mappings at fault time. Used by segment
967 * drivers. Allows each successive parent to resolve
968 * address translations and add its mappings to the
969 * mapping list supplied in the page structure. It
970 * returns zero on success or non-zero on failure.
974 ddi_map_fault(dev_info_t
*dip
, struct hat
*hat
, struct seg
*seg
,
975 caddr_t addr
, struct devpage
*dp
, pfn_t pfn
, uint_t prot
, uint_t lock
)
977 return (i_ddi_map_fault(dip
, dip
, hat
, seg
, addr
, dp
, pfn
, prot
, lock
));
981 * ddi_device_mapping_check: Called from ddi_segmap_setup.
982 * Invokes platform specific DDI to determine whether attributes specified
983 * in attr(9s) are valid for the region of memory that will be made
984 * available for direct access to user process via the mmap(2) system call.
987 ddi_device_mapping_check(dev_t dev
, ddi_device_acc_attr_t
*accattrp
,
988 uint_t rnumber
, uint_t
*hat_flags
)
990 ddi_acc_handle_t handle
;
997 * we use e_ddi_hold_devi_by_dev to search for the devi. We
998 * release it immediately since it should already be held by
1002 e_ddi_hold_devi_by_dev(dev
, E_DDI_HOLD_DEVI_NOATTACH
)) == NULL
)
1004 ddi_release_devi(dip
); /* for e_ddi_hold_devi_by_dev() */
1007 * Allocate and initialize the common elements of data
1010 handle
= impl_acc_hdl_alloc(KM_SLEEP
, NULL
);
1014 hp
= impl_acc_hdl_get(handle
);
1015 hp
->ah_vers
= VERS_ACCHDL
;
1017 hp
->ah_rnumber
= rnumber
;
1020 hp
->ah_acc
= *accattrp
;
1023 * Set up the mapping request and call to parent.
1025 mr
.map_op
= DDI_MO_MAP_HANDLE
;
1026 mr
.map_type
= DDI_MT_RNUMBER
;
1027 mr
.map_obj
.rnumber
= rnumber
;
1028 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
1029 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
1030 mr
.map_handlep
= hp
;
1031 mr
.map_vers
= DDI_MAP_VERSION
;
1032 result
= ddi_map(dip
, &mr
, 0, 0, NULL
);
1035 * Region must be mappable, pick up flags from the framework.
1037 *hat_flags
= hp
->ah_hat_flags
;
1039 impl_acc_hdl_free(handle
);
1042 * check for end result.
1044 if (result
!= DDI_SUCCESS
)
1051 * Property functions: See also, ddipropdefs.h.
1053 * These functions are the framework for the property functions,
1054 * i.e. they support software defined properties. All implementation
1055 * specific property handling (i.e.: self-identifying devices and
1056 * PROM defined properties are handled in the implementation specific
1057 * functions (defined in ddi_implfuncs.h).
1061 * nopropop: Shouldn't be called, right?
1064 nopropop(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
, int mod_flags
,
1065 char *name
, caddr_t valuep
, int *lengthp
)
1067 _NOTE(ARGUNUSED(dev
, dip
, prop_op
, mod_flags
, name
, valuep
, lengthp
))
1068 return (DDI_PROP_NOT_FOUND
);
1071 #ifdef DDI_PROP_DEBUG
1072 int ddi_prop_debug_flag
= 0;
1075 ddi_prop_debug(int enable
)
1077 int prev
= ddi_prop_debug_flag
;
1079 if ((enable
!= 0) || (prev
!= 0))
1080 printf("ddi_prop_debug: debugging %s\n",
1081 enable
? "enabled" : "disabled");
1082 ddi_prop_debug_flag
= enable
;
1086 #endif /* DDI_PROP_DEBUG */
1089 * Search a property list for a match, if found return pointer
1090 * to matching prop struct, else return NULL.
1094 i_ddi_prop_search(dev_t dev
, char *name
, uint_t flags
, ddi_prop_t
**list_head
)
1099 * find the property in child's devinfo:
1100 * Search order defined by this search function is first matching
1101 * property with input dev == DDI_DEV_T_ANY matching any dev or
1102 * dev == propp->prop_dev, name == propp->name, and the correct
1103 * data type as specified in the flags. If a DDI_DEV_T_NONE dev
1104 * value made it this far then it implies a DDI_DEV_T_ANY search.
1106 if (dev
== DDI_DEV_T_NONE
)
1107 dev
= DDI_DEV_T_ANY
;
1109 for (propp
= *list_head
; propp
!= NULL
; propp
= propp
->prop_next
) {
1111 if (!DDI_STRSAME(propp
->prop_name
, name
))
1114 if ((dev
!= DDI_DEV_T_ANY
) && (propp
->prop_dev
!= dev
))
1117 if (((propp
->prop_flags
& flags
) & DDI_PROP_TYPE_MASK
) == 0)
1123 return ((ddi_prop_t
*)0);
1127 * Search for property within devnames structures
1130 i_ddi_search_global_prop(dev_t dev
, char *name
, uint_t flags
)
1133 struct devnames
*dnp
;
1137 * Valid dev_t value is needed to index into the
1138 * correct devnames entry, therefore a dev_t
1139 * value of DDI_DEV_T_ANY is not appropriate.
1141 ASSERT(dev
!= DDI_DEV_T_ANY
);
1142 if (dev
== DDI_DEV_T_ANY
) {
1143 return ((ddi_prop_t
*)0);
1146 major
= getmajor(dev
);
1147 dnp
= &(devnamesp
[major
]);
1149 if (dnp
->dn_global_prop_ptr
== NULL
)
1150 return ((ddi_prop_t
*)0);
1152 LOCK_DEV_OPS(&dnp
->dn_lock
);
1154 for (propp
= dnp
->dn_global_prop_ptr
->prop_list
;
1156 propp
= (ddi_prop_t
*)propp
->prop_next
) {
1158 if (!DDI_STRSAME(propp
->prop_name
, name
))
1161 if ((!(flags
& DDI_PROP_ROOTNEX_GLOBAL
)) &&
1162 (!(flags
& LDI_DEV_T_ANY
)) && (propp
->prop_dev
!= dev
))
1165 if (((propp
->prop_flags
& flags
) & DDI_PROP_TYPE_MASK
) == 0)
1168 /* Property found, return it */
1169 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
1173 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
1174 return ((ddi_prop_t
*)0);
1177 static char prop_no_mem_msg
[] = "can't allocate memory for ddi property <%s>";
1180 * ddi_prop_search_global:
1181 * Search the global property list within devnames
1182 * for the named property. Return the encoded value.
1185 i_ddi_prop_search_global(dev_t dev
, uint_t flags
, char *name
,
1186 void *valuep
, uint_t
*lengthp
)
1191 propp
= i_ddi_search_global_prop(dev
, name
, flags
);
1193 /* Property NOT found, bail */
1194 if (propp
== (ddi_prop_t
*)0)
1195 return (DDI_PROP_NOT_FOUND
);
1197 if (propp
->prop_flags
& DDI_PROP_UNDEF_IT
)
1198 return (DDI_PROP_UNDEFINED
);
1200 if ((buffer
= kmem_alloc(propp
->prop_len
,
1201 (flags
& DDI_PROP_CANSLEEP
) ? KM_SLEEP
: KM_NOSLEEP
)) == NULL
) {
1202 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
1203 return (DDI_PROP_NO_MEMORY
);
1207 * Return the encoded data
1209 *(caddr_t
*)valuep
= buffer
;
1210 *lengthp
= propp
->prop_len
;
1211 bcopy(propp
->prop_val
, buffer
, propp
->prop_len
);
1213 return (DDI_PROP_SUCCESS
);
1217 * ddi_prop_search_common: Lookup and return the encoded value
1220 ddi_prop_search_common(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1221 uint_t flags
, char *name
, void *valuep
, uint_t
*lengthp
)
1226 caddr_t prealloc
= NULL
;
1231 /*CONSTANTCONDITION*/
1234 mutex_enter(&(DEVI(dip
)->devi_lock
));
1238 * find the property in child's devinfo:
1240 * 1. driver defined properties
1241 * 2. system defined properties
1242 * 3. driver global properties
1243 * 4. boot defined properties
1246 propp
= i_ddi_prop_search(dev
, name
, flags
,
1247 &(DEVI(dip
)->devi_drv_prop_ptr
));
1248 if (propp
== NULL
) {
1249 propp
= i_ddi_prop_search(dev
, name
, flags
,
1250 &(DEVI(dip
)->devi_sys_prop_ptr
));
1252 if ((propp
== NULL
) && DEVI(dip
)->devi_global_prop_list
) {
1253 propp
= i_ddi_prop_search(dev
, name
, flags
,
1254 &DEVI(dip
)->devi_global_prop_list
->prop_list
);
1257 if (propp
== NULL
) {
1258 propp
= i_ddi_prop_search(dev
, name
, flags
,
1259 &(DEVI(dip
)->devi_hw_prop_ptr
));
1263 * Software property found?
1265 if (propp
!= (ddi_prop_t
*)0) {
1268 * If explicit undefine, return now.
1270 if (propp
->prop_flags
& DDI_PROP_UNDEF_IT
) {
1271 mutex_exit(&(DEVI(dip
)->devi_lock
));
1273 kmem_free(prealloc
, plength
);
1274 return (DDI_PROP_UNDEFINED
);
1278 * If we only want to know if it exists, return now
1280 if (prop_op
== PROP_EXISTS
) {
1281 mutex_exit(&(DEVI(dip
)->devi_lock
));
1282 ASSERT(prealloc
== NULL
);
1283 return (DDI_PROP_SUCCESS
);
1287 * If length only request or prop length == 0,
1288 * service request and return now.
1290 if ((prop_op
== PROP_LEN
) ||(propp
->prop_len
== 0)) {
1291 *lengthp
= propp
->prop_len
;
1294 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1295 * that means prop_len is 0, so set valuep
1298 if (prop_op
== PROP_LEN_AND_VAL_ALLOC
)
1299 *(caddr_t
*)valuep
= NULL
;
1301 mutex_exit(&(DEVI(dip
)->devi_lock
));
1303 kmem_free(prealloc
, plength
);
1304 return (DDI_PROP_SUCCESS
);
1308 * If LEN_AND_VAL_ALLOC and the request can sleep,
1309 * drop the mutex, allocate the buffer, and go
1310 * through the loop again. If we already allocated
1311 * the buffer, and the size of the property changed,
1314 if ((prop_op
== PROP_LEN_AND_VAL_ALLOC
) &&
1315 (flags
& DDI_PROP_CANSLEEP
)) {
1316 if (prealloc
&& (propp
->prop_len
!= plength
)) {
1317 kmem_free(prealloc
, plength
);
1320 if (prealloc
== NULL
) {
1321 plength
= propp
->prop_len
;
1322 mutex_exit(&(DEVI(dip
)->devi_lock
));
1323 prealloc
= kmem_alloc(plength
,
1330 * Allocate buffer, if required. Either way,
1331 * set `buffer' variable.
1333 i
= *lengthp
; /* Get callers length */
1334 *lengthp
= propp
->prop_len
; /* Set callers length */
1338 case PROP_LEN_AND_VAL_ALLOC
:
1340 if (prealloc
== NULL
) {
1341 buffer
= kmem_alloc(propp
->prop_len
,
1347 if (buffer
== NULL
) {
1348 mutex_exit(&(DEVI(dip
)->devi_lock
));
1349 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
1350 return (DDI_PROP_NO_MEMORY
);
1352 /* Set callers buf ptr */
1353 *(caddr_t
*)valuep
= buffer
;
1356 case PROP_LEN_AND_VAL_BUF
:
1358 if (propp
->prop_len
> (i
)) {
1359 mutex_exit(&(DEVI(dip
)->devi_lock
));
1360 return (DDI_PROP_BUF_TOO_SMALL
);
1363 buffer
= valuep
; /* Get callers buf ptr */
1373 bcopy(propp
->prop_val
, buffer
, propp
->prop_len
);
1374 mutex_exit(&(DEVI(dip
)->devi_lock
));
1375 return (DDI_PROP_SUCCESS
);
1378 mutex_exit(&(DEVI(dip
)->devi_lock
));
1380 kmem_free(prealloc
, plength
);
1384 * Prop not found, call parent bus_ops to deal with possible
1385 * h/w layer (possible PROM defined props, etc.) and to
1386 * possibly ascend the hierarchy, if allowed by flags.
1388 pdip
= (dev_info_t
*)DEVI(dip
)->devi_parent
;
1391 * One last call for the root driver PROM props?
1393 if (dip
== ddi_root_node()) {
1394 return (ddi_bus_prop_op(dev
, dip
, dip
, prop_op
,
1395 flags
, name
, valuep
, (int *)lengthp
));
1399 * We may have been called to check for properties
1400 * within a single devinfo node that has no parent -
1405 (DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
)) ==
1406 (DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
));
1407 return (DDI_PROP_NOT_FOUND
);
1411 * Instead of recursing, we do iterative calls up the tree.
1412 * As a bit of optimization, skip the bus_op level if the
1413 * node is a s/w node and if the parent's bus_prop_op function
1414 * is `ddi_bus_prop_op', because we know that in this case,
1415 * this function does nothing.
1417 * 4225415: If the parent isn't attached, or the child
1418 * hasn't been named by the parent yet, use the default
1419 * ddi_bus_prop_op as a proxy for the parent. This
1420 * allows property lookups in any child/parent state to
1421 * include 'prom' and inherited properties, even when
1422 * there are no drivers attached to the child or parent.
1425 bop
= ddi_bus_prop_op
;
1426 if (i_ddi_devi_attached(pdip
) &&
1427 (i_ddi_node_state(dip
) >= DS_INITIALIZED
))
1428 bop
= DEVI(pdip
)->devi_ops
->devo_bus_ops
->bus_prop_op
;
1430 i
= DDI_PROP_NOT_FOUND
;
1432 if ((bop
!= ddi_bus_prop_op
) || ndi_dev_is_prom_node(dip
)) {
1433 i
= (*bop
)(dev
, pdip
, dip
, prop_op
,
1434 flags
| DDI_PROP_DONTPASS
,
1435 name
, valuep
, lengthp
);
1438 if ((flags
& DDI_PROP_DONTPASS
) ||
1439 (i
!= DDI_PROP_NOT_FOUND
))
1449 * ddi_prop_op: The basic property operator for drivers.
1451 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1458 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer
1460 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to
1461 * address of allocated buffer, if successful)
1464 ddi_prop_op(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
, int mod_flags
,
1465 char *name
, caddr_t valuep
, int *lengthp
)
1469 ASSERT((mod_flags
& DDI_PROP_TYPE_MASK
) == 0);
1472 * If this was originally an LDI prop lookup then we bail here.
1473 * The reason is that the LDI property lookup interfaces first call
1474 * a drivers prop_op() entry point to allow it to override
1475 * properties. But if we've made it here, then the driver hasn't
1476 * overriden any properties. We don't want to continue with the
1477 * property search here because we don't have any type inforamtion.
1478 * When we return failure, the LDI interfaces will then proceed to
1479 * call the typed property interfaces to look up the property.
1481 if (mod_flags
& DDI_PROP_DYNAMIC
)
1482 return (DDI_PROP_NOT_FOUND
);
1485 * check for pre-typed property consumer asking for typed property:
1486 * see e_ddi_getprop_int64.
1488 if (mod_flags
& DDI_PROP_CONSUMER_TYPED
)
1489 mod_flags
|= DDI_PROP_TYPE_INT64
;
1490 mod_flags
|= DDI_PROP_TYPE_ANY
;
1492 i
= ddi_prop_search_common(dev
, dip
, prop_op
,
1493 mod_flags
, name
, valuep
, (uint_t
*)lengthp
);
1494 if (i
== DDI_PROP_FOUND_1275
)
1495 return (DDI_PROP_SUCCESS
);
1500 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1501 * maintain size in number of blksize blocks. Provides a dynamic property
1502 * implementation for size oriented properties based on nblocks64 and blksize
1503 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64
1504 * is too large. This interface should not be used with a nblocks64 that
1505 * represents the driver's idea of how to represent unknown, if nblocks is
1506 * unknown use ddi_prop_op.
1509 ddi_prop_op_nblocks_blksize(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1510 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
,
1511 uint64_t nblocks64
, uint_t blksize
)
1516 /* convert block size to shift value */
1517 ASSERT(BIT_ONLYONESET(blksize
));
1518 blkshift
= highbit(blksize
) - 1;
1521 * There is no point in supporting nblocks64 values that don't have
1522 * an accurate uint64_t byte count representation.
1524 if (nblocks64
>= (UINT64_MAX
>> blkshift
))
1525 return (ddi_prop_op(dev
, dip
, prop_op
, mod_flags
,
1526 name
, valuep
, lengthp
));
1528 size64
= nblocks64
<< blkshift
;
1529 return (ddi_prop_op_size_blksize(dev
, dip
, prop_op
, mod_flags
,
1530 name
, valuep
, lengthp
, size64
, blksize
));
1534 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1537 ddi_prop_op_nblocks(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1538 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
, uint64_t nblocks64
)
1540 return (ddi_prop_op_nblocks_blksize(dev
, dip
, prop_op
,
1541 mod_flags
, name
, valuep
, lengthp
, nblocks64
, DEV_BSIZE
));
1545 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1546 * maintain size in bytes. Provides a of dynamic property implementation for
1547 * size oriented properties based on size64 value and blksize passed in by the
1548 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface
1549 * should not be used with a size64 that represents the driver's idea of how
1550 * to represent unknown, if size is unknown use ddi_prop_op.
1552 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1553 * integers. While the most likely interface to request them ([bc]devi_size)
1554 * is declared int (signed) there is no enforcement of this, which means we
1555 * can't enforce limitations here without risking regression.
1558 ddi_prop_op_size_blksize(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1559 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
, uint64_t size64
,
1568 * This is a kludge to support capture of size(9P) pure dynamic
1569 * properties in snapshots for non-cmlb code (without exposing
1570 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1571 * should be removed.
1573 if (i_ddi_prop_dyn_driver_get(dip
) == NULL
) {
1574 static i_ddi_prop_dyn_t prop_dyn_size
[] = {
1575 {"Size", DDI_PROP_TYPE_INT64
, S_IFCHR
},
1576 {"Nblocks", DDI_PROP_TYPE_INT64
, S_IFBLK
},
1579 i_ddi_prop_dyn_driver_set(dip
, prop_dyn_size
);
1582 /* convert block size to shift value */
1583 ASSERT(BIT_ONLYONESET(blksize
));
1584 blkshift
= highbit(blksize
) - 1;
1586 /* compute DEV_BSIZE nblocks value */
1587 nblocks64
= size64
>> blkshift
;
1589 /* get callers length, establish length of our dynamic properties */
1590 callers_length
= *lengthp
;
1592 if (strcmp(name
, "Nblocks") == 0)
1593 *lengthp
= sizeof (uint64_t);
1594 else if (strcmp(name
, "Size") == 0)
1595 *lengthp
= sizeof (uint64_t);
1596 else if ((strcmp(name
, "nblocks") == 0) && (nblocks64
< UINT_MAX
))
1597 *lengthp
= sizeof (uint32_t);
1598 else if ((strcmp(name
, "size") == 0) && (size64
< UINT_MAX
))
1599 *lengthp
= sizeof (uint32_t);
1600 else if ((strcmp(name
, "blksize") == 0) && (blksize
< UINT_MAX
))
1601 *lengthp
= sizeof (uint32_t);
1603 /* fallback to ddi_prop_op */
1604 return (ddi_prop_op(dev
, dip
, prop_op
, mod_flags
,
1605 name
, valuep
, lengthp
));
1608 /* service request for the length of the property */
1609 if (prop_op
== PROP_LEN
)
1610 return (DDI_PROP_SUCCESS
);
1613 case PROP_LEN_AND_VAL_ALLOC
:
1614 if ((buffer
= kmem_alloc(*lengthp
,
1615 (mod_flags
& DDI_PROP_CANSLEEP
) ?
1616 KM_SLEEP
: KM_NOSLEEP
)) == NULL
)
1617 return (DDI_PROP_NO_MEMORY
);
1619 *(caddr_t
*)valuep
= buffer
; /* set callers buf ptr */
1622 case PROP_LEN_AND_VAL_BUF
:
1623 /* the length of the property and the request must match */
1624 if (callers_length
!= *lengthp
)
1625 return (DDI_PROP_INVAL_ARG
);
1627 buffer
= valuep
; /* get callers buf ptr */
1631 return (DDI_PROP_INVAL_ARG
);
1634 /* transfer the value into the buffer */
1635 if (strcmp(name
, "Nblocks") == 0)
1636 *((uint64_t *)buffer
) = nblocks64
;
1637 else if (strcmp(name
, "Size") == 0)
1638 *((uint64_t *)buffer
) = size64
;
1639 else if (strcmp(name
, "nblocks") == 0)
1640 *((uint32_t *)buffer
) = (uint32_t)nblocks64
;
1641 else if (strcmp(name
, "size") == 0)
1642 *((uint32_t *)buffer
) = (uint32_t)size64
;
1643 else if (strcmp(name
, "blksize") == 0)
1644 *((uint32_t *)buffer
) = (uint32_t)blksize
;
1645 return (DDI_PROP_SUCCESS
);
1649 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1652 ddi_prop_op_size(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1653 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
, uint64_t size64
)
1655 return (ddi_prop_op_size_blksize(dev
, dip
, prop_op
,
1656 mod_flags
, name
, valuep
, lengthp
, size64
, DEV_BSIZE
));
1660 * Variable length props...
1664 * ddi_getlongprop: Get variable length property len+val into a buffer
1665 * allocated by property provider via kmem_alloc. Requester
1666 * is responsible for freeing returned property via kmem_free.
1670 * dev_t: Input: dev_t of property.
1671 * dip: Input: dev_info_t pointer of child.
1672 * flags: Input: Possible flag modifiers are:
1673 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found.
1674 * DDI_PROP_CANSLEEP: Memory allocation may sleep.
1675 * name: Input: name of property.
1676 * valuep: Output: Addr of callers buffer pointer.
1677 * lengthp:Output: *lengthp will contain prop length on exit.
1681 * DDI_PROP_SUCCESS: Prop found and returned.
1682 * DDI_PROP_NOT_FOUND: Prop not found
1683 * DDI_PROP_UNDEFINED: Prop explicitly undefined.
1684 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem.
1688 ddi_getlongprop(dev_t dev
, dev_info_t
*dip
, int flags
,
1689 char *name
, caddr_t valuep
, int *lengthp
)
1691 return (ddi_prop_op(dev
, dip
, PROP_LEN_AND_VAL_ALLOC
,
1692 flags
, name
, valuep
, lengthp
));
1697 * ddi_getlongprop_buf: Get long prop into pre-allocated callers
1698 * buffer. (no memory allocation by provider).
1700 * dev_t: Input: dev_t of property.
1701 * dip: Input: dev_info_t pointer of child.
1702 * flags: Input: DDI_PROP_DONTPASS or NULL
1703 * name: Input: name of property
1704 * valuep: Input: ptr to callers buffer.
1705 * lengthp:I/O: ptr to length of callers buffer on entry,
1706 * actual length of property on exit.
1710 * DDI_PROP_SUCCESS Prop found and returned
1711 * DDI_PROP_NOT_FOUND Prop not found
1712 * DDI_PROP_UNDEFINED Prop explicitly undefined.
1713 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small,
1714 * no value returned, but actual prop
1715 * length returned in *lengthp
1720 ddi_getlongprop_buf(dev_t dev
, dev_info_t
*dip
, int flags
,
1721 char *name
, caddr_t valuep
, int *lengthp
)
1723 return (ddi_prop_op(dev
, dip
, PROP_LEN_AND_VAL_BUF
,
1724 flags
, name
, valuep
, lengthp
));
1728 * Integer/boolean sized props.
1730 * Call is value only... returns found boolean or int sized prop value or
1731 * defvalue if prop not found or is wrong length or is explicitly undefined.
1732 * Only flag is DDI_PROP_DONTPASS...
1734 * By convention, this interface returns boolean (0) sized properties
1737 * This never returns an error, if property not found or specifically
1738 * undefined, the input `defvalue' is returned.
1742 ddi_getprop(dev_t dev
, dev_info_t
*dip
, int flags
, char *name
, int defvalue
)
1744 int propvalue
= defvalue
;
1745 int proplength
= sizeof (int);
1748 error
= ddi_prop_op(dev
, dip
, PROP_LEN_AND_VAL_BUF
,
1749 flags
, name
, (caddr_t
)&propvalue
, &proplength
);
1751 if ((error
== DDI_PROP_SUCCESS
) && (proplength
== 0))
1758 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1759 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1763 ddi_getproplen(dev_t dev
, dev_info_t
*dip
, int flags
, char *name
, int *lengthp
)
1765 return (ddi_prop_op(dev
, dip
, PROP_LEN
, flags
, name
, NULL
, lengthp
));
1769 * Allocate a struct prop_driver_data, along with 'size' bytes
1770 * for decoded property data. This structure is freed by
1771 * calling ddi_prop_free(9F).
1774 ddi_prop_decode_alloc(size_t size
, void (*prop_free
)(struct prop_driver_data
*))
1776 struct prop_driver_data
*pdd
;
1779 * Allocate a structure with enough memory to store the decoded data.
1781 pdd
= kmem_zalloc(sizeof (struct prop_driver_data
) + size
, KM_SLEEP
);
1782 pdd
->pdd_size
= (sizeof (struct prop_driver_data
) + size
);
1783 pdd
->pdd_prop_free
= prop_free
;
1786 * Return a pointer to the location to put the decoded data.
1788 return ((void *)((caddr_t
)pdd
+ sizeof (struct prop_driver_data
)));
1792 * Allocated the memory needed to store the encoded data in the property
1796 ddi_prop_encode_alloc(prop_handle_t
*ph
, size_t size
)
1799 * If size is zero, then set data to NULL and size to 0. This
1800 * is a boolean property.
1805 ph
->ph_cur_pos
= NULL
;
1806 ph
->ph_save_pos
= NULL
;
1808 if (ph
->ph_flags
== DDI_PROP_DONTSLEEP
) {
1809 ph
->ph_data
= kmem_zalloc(size
, KM_NOSLEEP
);
1810 if (ph
->ph_data
== NULL
)
1811 return (DDI_PROP_NO_MEMORY
);
1813 ph
->ph_data
= kmem_zalloc(size
, KM_SLEEP
);
1815 ph
->ph_cur_pos
= ph
->ph_data
;
1816 ph
->ph_save_pos
= ph
->ph_data
;
1818 return (DDI_PROP_SUCCESS
);
1822 * Free the space allocated by the lookup routines. Each lookup routine
1823 * returns a pointer to the decoded data to the driver. The driver then
1824 * passes this pointer back to us. This data actually lives in a struct
1825 * prop_driver_data. We use negative indexing to find the beginning of
1826 * the structure and then free the entire structure using the size and
1827 * the free routine stored in the structure.
1830 ddi_prop_free(void *datap
)
1832 struct prop_driver_data
*pdd
;
1837 pdd
= (struct prop_driver_data
*)
1838 ((caddr_t
)datap
- sizeof (struct prop_driver_data
));
1840 * Call the free routine to free it
1842 (*pdd
->pdd_prop_free
)(pdd
);
1846 * Free the data associated with an array of ints,
1847 * allocated with ddi_prop_decode_alloc().
1850 ddi_prop_free_ints(struct prop_driver_data
*pdd
)
1852 kmem_free(pdd
, pdd
->pdd_size
);
1856 * Free a single string property or a single string contained within
1857 * the argv style return value of an array of strings.
1860 ddi_prop_free_string(struct prop_driver_data
*pdd
)
1862 kmem_free(pdd
, pdd
->pdd_size
);
1867 * Free an array of strings.
1870 ddi_prop_free_strings(struct prop_driver_data
*pdd
)
1872 kmem_free(pdd
, pdd
->pdd_size
);
1876 * Free the data associated with an array of bytes.
1879 ddi_prop_free_bytes(struct prop_driver_data
*pdd
)
1881 kmem_free(pdd
, pdd
->pdd_size
);
1885 * Reset the current location pointer in the property handle to the
1886 * beginning of the data.
1889 ddi_prop_reset_pos(prop_handle_t
*ph
)
1891 ph
->ph_cur_pos
= ph
->ph_data
;
1892 ph
->ph_save_pos
= ph
->ph_data
;
1896 * Restore the current location pointer in the property handle to the
1900 ddi_prop_save_pos(prop_handle_t
*ph
)
1902 ph
->ph_save_pos
= ph
->ph_cur_pos
;
1906 * Save the location that the current location pointer is pointing to..
1909 ddi_prop_restore_pos(prop_handle_t
*ph
)
1911 ph
->ph_cur_pos
= ph
->ph_save_pos
;
1915 * Property encode/decode functions
1919 * Decode a single integer property
1922 ddi_prop_fm_decode_int(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
1928 * If there is nothing to decode return an error
1930 if (ph
->ph_size
== 0)
1931 return (DDI_PROP_END_OF_DATA
);
1934 * Decode the property as a single integer and return it
1935 * in data if we were able to decode it.
1937 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_DECODE
, &tmp
);
1938 if (i
< DDI_PROP_RESULT_OK
) {
1940 case DDI_PROP_RESULT_EOF
:
1941 return (DDI_PROP_END_OF_DATA
);
1943 case DDI_PROP_RESULT_ERROR
:
1944 return (DDI_PROP_CANNOT_DECODE
);
1950 return (DDI_PROP_SUCCESS
);
1954 * Decode a single 64 bit integer property
1957 ddi_prop_fm_decode_int64(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
1963 * If there is nothing to decode return an error
1965 if (ph
->ph_size
== 0)
1966 return (DDI_PROP_END_OF_DATA
);
1969 * Decode the property as a single integer and return it
1970 * in data if we were able to decode it.
1972 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_DECODE
, &tmp
);
1973 if (i
< DDI_PROP_RESULT_OK
) {
1975 case DDI_PROP_RESULT_EOF
:
1976 return (DDI_PROP_END_OF_DATA
);
1978 case DDI_PROP_RESULT_ERROR
:
1979 return (DDI_PROP_CANNOT_DECODE
);
1983 *(int64_t *)data
= tmp
;
1985 return (DDI_PROP_SUCCESS
);
1989 * Decode an array of integers property
1992 ddi_prop_fm_decode_ints(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2001 * Figure out how many array elements there are by going through the
2002 * data without decoding it first and counting.
2005 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_SKIP
, NULL
);
2012 * If there are no elements return an error
2015 return (DDI_PROP_END_OF_DATA
);
2018 * If we cannot skip through the data, we cannot decode it
2020 if (i
== DDI_PROP_RESULT_ERROR
)
2021 return (DDI_PROP_CANNOT_DECODE
);
2024 * Reset the data pointer to the beginning of the encoded data
2026 ddi_prop_reset_pos(ph
);
2029 * Allocated memory to store the decoded value in.
2031 intp
= ddi_prop_decode_alloc((cnt
* sizeof (int)),
2032 ddi_prop_free_ints
);
2035 * Decode each element and place it in the space we just allocated
2038 for (n
= 0; n
< cnt
; n
++, tmp
++) {
2039 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_DECODE
, tmp
);
2040 if (i
< DDI_PROP_RESULT_OK
) {
2042 * Free the space we just allocated
2043 * and return an error.
2045 ddi_prop_free(intp
);
2047 case DDI_PROP_RESULT_EOF
:
2048 return (DDI_PROP_END_OF_DATA
);
2050 case DDI_PROP_RESULT_ERROR
:
2051 return (DDI_PROP_CANNOT_DECODE
);
2057 *(int **)data
= intp
;
2059 return (DDI_PROP_SUCCESS
);
2063 * Decode a 64 bit integer array property
2066 ddi_prop_fm_decode_int64_array(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2075 * Count the number of array elements by going
2076 * through the data without decoding it.
2079 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_SKIP
, NULL
);
2086 * If there are no elements return an error
2089 return (DDI_PROP_END_OF_DATA
);
2092 * If we cannot skip through the data, we cannot decode it
2094 if (i
== DDI_PROP_RESULT_ERROR
)
2095 return (DDI_PROP_CANNOT_DECODE
);
2098 * Reset the data pointer to the beginning of the encoded data
2100 ddi_prop_reset_pos(ph
);
2103 * Allocate memory to store the decoded value.
2105 intp
= ddi_prop_decode_alloc((cnt
* sizeof (int64_t)),
2106 ddi_prop_free_ints
);
2109 * Decode each element and place it in the space allocated
2112 for (n
= 0; n
< cnt
; n
++, tmp
++) {
2113 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_DECODE
, tmp
);
2114 if (i
< DDI_PROP_RESULT_OK
) {
2116 * Free the space we just allocated
2117 * and return an error.
2119 ddi_prop_free(intp
);
2121 case DDI_PROP_RESULT_EOF
:
2122 return (DDI_PROP_END_OF_DATA
);
2124 case DDI_PROP_RESULT_ERROR
:
2125 return (DDI_PROP_CANNOT_DECODE
);
2131 *(int64_t **)data
= intp
;
2133 return (DDI_PROP_SUCCESS
);
2137 * Encode an array of integers property (Can be one element)
2140 ddi_prop_fm_encode_ints(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2148 * If there is no data, we cannot do anything
2151 return (DDI_PROP_CANNOT_ENCODE
);
2154 * Get the size of an encoded int.
2156 size
= DDI_PROP_INT(ph
, DDI_PROP_CMD_GET_ESIZE
, NULL
);
2158 if (size
< DDI_PROP_RESULT_OK
) {
2160 case DDI_PROP_RESULT_EOF
:
2161 return (DDI_PROP_END_OF_DATA
);
2163 case DDI_PROP_RESULT_ERROR
:
2164 return (DDI_PROP_CANNOT_ENCODE
);
2169 * Allocate space in the handle to store the encoded int.
2171 if (ddi_prop_encode_alloc(ph
, size
* nelements
) !=
2173 return (DDI_PROP_NO_MEMORY
);
2176 * Encode the array of ints.
2179 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2180 i
= DDI_PROP_INT(ph
, DDI_PROP_CMD_ENCODE
, tmp
);
2181 if (i
< DDI_PROP_RESULT_OK
) {
2183 case DDI_PROP_RESULT_EOF
:
2184 return (DDI_PROP_END_OF_DATA
);
2186 case DDI_PROP_RESULT_ERROR
:
2187 return (DDI_PROP_CANNOT_ENCODE
);
2192 return (DDI_PROP_SUCCESS
);
2197 * Encode a 64 bit integer array property
2200 ddi_prop_fm_encode_int64(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2208 * If there is no data, we cannot do anything
2211 return (DDI_PROP_CANNOT_ENCODE
);
2214 * Get the size of an encoded 64 bit int.
2216 size
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_GET_ESIZE
, NULL
);
2218 if (size
< DDI_PROP_RESULT_OK
) {
2220 case DDI_PROP_RESULT_EOF
:
2221 return (DDI_PROP_END_OF_DATA
);
2223 case DDI_PROP_RESULT_ERROR
:
2224 return (DDI_PROP_CANNOT_ENCODE
);
2229 * Allocate space in the handle to store the encoded int.
2231 if (ddi_prop_encode_alloc(ph
, size
* nelements
) !=
2233 return (DDI_PROP_NO_MEMORY
);
2236 * Encode the array of ints.
2238 tmp
= (int64_t *)data
;
2239 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2240 i
= DDI_PROP_INT64(ph
, DDI_PROP_CMD_ENCODE
, tmp
);
2241 if (i
< DDI_PROP_RESULT_OK
) {
2243 case DDI_PROP_RESULT_EOF
:
2244 return (DDI_PROP_END_OF_DATA
);
2246 case DDI_PROP_RESULT_ERROR
:
2247 return (DDI_PROP_CANNOT_ENCODE
);
2252 return (DDI_PROP_SUCCESS
);
2256 * Decode a single string property
2259 ddi_prop_fm_decode_string(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2267 * If there is nothing to decode return an error
2269 if (ph
->ph_size
== 0)
2270 return (DDI_PROP_END_OF_DATA
);
2273 * Get the decoded size of the encoded string.
2275 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_DSIZE
, NULL
);
2276 if (size
< DDI_PROP_RESULT_OK
) {
2278 case DDI_PROP_RESULT_EOF
:
2279 return (DDI_PROP_END_OF_DATA
);
2281 case DDI_PROP_RESULT_ERROR
:
2282 return (DDI_PROP_CANNOT_DECODE
);
2287 * Allocated memory to store the decoded value in.
2289 str
= ddi_prop_decode_alloc((size_t)size
, ddi_prop_free_string
);
2291 ddi_prop_reset_pos(ph
);
2294 * Decode the str and place it in the space we just allocated
2297 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_DECODE
, tmp
);
2298 if (i
< DDI_PROP_RESULT_OK
) {
2300 * Free the space we just allocated
2301 * and return an error.
2305 case DDI_PROP_RESULT_EOF
:
2306 return (DDI_PROP_END_OF_DATA
);
2308 case DDI_PROP_RESULT_ERROR
:
2309 return (DDI_PROP_CANNOT_DECODE
);
2313 *(char **)data
= str
;
2316 return (DDI_PROP_SUCCESS
);
2320 * Decode an array of strings.
2323 ddi_prop_fm_decode_strings(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2335 * Figure out how many array elements there are by going through the
2336 * data without decoding it first and counting.
2339 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_SKIP
, NULL
);
2346 * If there are no elements return an error
2349 return (DDI_PROP_END_OF_DATA
);
2352 * If we cannot skip through the data, we cannot decode it
2354 if (i
== DDI_PROP_RESULT_ERROR
)
2355 return (DDI_PROP_CANNOT_DECODE
);
2358 * Reset the data pointer to the beginning of the encoded data
2360 ddi_prop_reset_pos(ph
);
2363 * Figure out how much memory we need for the sum total
2365 nbytes
= (cnt
+ 1) * sizeof (char *);
2367 for (n
= 0; n
< cnt
; n
++) {
2369 * Get the decoded size of the current encoded string.
2371 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_DSIZE
, NULL
);
2372 if (size
< DDI_PROP_RESULT_OK
) {
2374 case DDI_PROP_RESULT_EOF
:
2375 return (DDI_PROP_END_OF_DATA
);
2377 case DDI_PROP_RESULT_ERROR
:
2378 return (DDI_PROP_CANNOT_DECODE
);
2386 * Allocate memory in which to store the decoded strings.
2388 strs
= ddi_prop_decode_alloc(nbytes
, ddi_prop_free_strings
);
2391 * Set up pointers for each string by figuring out yet
2392 * again how long each string is.
2394 ddi_prop_reset_pos(ph
);
2395 ptr
= (caddr_t
)strs
+ ((cnt
+ 1) * sizeof (char *));
2396 for (tmp
= strs
, n
= 0; n
< cnt
; n
++, tmp
++) {
2398 * Get the decoded size of the current encoded string.
2400 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_DSIZE
, NULL
);
2401 if (size
< DDI_PROP_RESULT_OK
) {
2402 ddi_prop_free(strs
);
2404 case DDI_PROP_RESULT_EOF
:
2405 return (DDI_PROP_END_OF_DATA
);
2407 case DDI_PROP_RESULT_ERROR
:
2408 return (DDI_PROP_CANNOT_DECODE
);
2417 * String array is terminated by a NULL
2422 * Finally, we can decode each string
2424 ddi_prop_reset_pos(ph
);
2425 for (tmp
= strs
, n
= 0; n
< cnt
; n
++, tmp
++) {
2426 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_DECODE
, *tmp
);
2427 if (i
< DDI_PROP_RESULT_OK
) {
2429 * Free the space we just allocated
2430 * and return an error
2432 ddi_prop_free(strs
);
2434 case DDI_PROP_RESULT_EOF
:
2435 return (DDI_PROP_END_OF_DATA
);
2437 case DDI_PROP_RESULT_ERROR
:
2438 return (DDI_PROP_CANNOT_DECODE
);
2443 *(char ***)data
= strs
;
2446 return (DDI_PROP_SUCCESS
);
2453 ddi_prop_fm_encode_string(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2460 * If there is no data, we cannot do anything
2463 return (DDI_PROP_CANNOT_ENCODE
);
2466 * Get the size of the encoded string.
2468 tmp
= (char **)data
;
2469 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_ESIZE
, *tmp
);
2470 if (size
< DDI_PROP_RESULT_OK
) {
2472 case DDI_PROP_RESULT_EOF
:
2473 return (DDI_PROP_END_OF_DATA
);
2475 case DDI_PROP_RESULT_ERROR
:
2476 return (DDI_PROP_CANNOT_ENCODE
);
2481 * Allocate space in the handle to store the encoded string.
2483 if (ddi_prop_encode_alloc(ph
, size
) != DDI_PROP_SUCCESS
)
2484 return (DDI_PROP_NO_MEMORY
);
2486 ddi_prop_reset_pos(ph
);
2489 * Encode the string.
2491 tmp
= (char **)data
;
2492 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_ENCODE
, *tmp
);
2493 if (i
< DDI_PROP_RESULT_OK
) {
2495 case DDI_PROP_RESULT_EOF
:
2496 return (DDI_PROP_END_OF_DATA
);
2498 case DDI_PROP_RESULT_ERROR
:
2499 return (DDI_PROP_CANNOT_ENCODE
);
2503 return (DDI_PROP_SUCCESS
);
2508 * Encode an array of strings.
2511 ddi_prop_fm_encode_strings(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2520 * If there is no data, we cannot do anything
2523 return (DDI_PROP_CANNOT_ENCODE
);
2526 * Get the total size required to encode all the strings.
2529 tmp
= (char **)data
;
2530 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2531 size
= DDI_PROP_STR(ph
, DDI_PROP_CMD_GET_ESIZE
, *tmp
);
2532 if (size
< DDI_PROP_RESULT_OK
) {
2534 case DDI_PROP_RESULT_EOF
:
2535 return (DDI_PROP_END_OF_DATA
);
2537 case DDI_PROP_RESULT_ERROR
:
2538 return (DDI_PROP_CANNOT_ENCODE
);
2541 total_size
+= (uint_t
)size
;
2545 * Allocate space in the handle to store the encoded strings.
2547 if (ddi_prop_encode_alloc(ph
, total_size
) != DDI_PROP_SUCCESS
)
2548 return (DDI_PROP_NO_MEMORY
);
2550 ddi_prop_reset_pos(ph
);
2553 * Encode the array of strings.
2555 tmp
= (char **)data
;
2556 for (cnt
= 0; cnt
< nelements
; cnt
++, tmp
++) {
2557 i
= DDI_PROP_STR(ph
, DDI_PROP_CMD_ENCODE
, *tmp
);
2558 if (i
< DDI_PROP_RESULT_OK
) {
2560 case DDI_PROP_RESULT_EOF
:
2561 return (DDI_PROP_END_OF_DATA
);
2563 case DDI_PROP_RESULT_ERROR
:
2564 return (DDI_PROP_CANNOT_ENCODE
);
2569 return (DDI_PROP_SUCCESS
);
2574 * Decode an array of bytes.
2577 ddi_prop_fm_decode_bytes(prop_handle_t
*ph
, void *data
, uint_t
*nelements
)
2584 * If there are no elements return an error
2586 if (ph
->ph_size
== 0)
2587 return (DDI_PROP_END_OF_DATA
);
2590 * Get the size of the encoded array of bytes.
2592 nbytes
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_GET_DSIZE
,
2594 if (nbytes
< DDI_PROP_RESULT_OK
) {
2596 case DDI_PROP_RESULT_EOF
:
2597 return (DDI_PROP_END_OF_DATA
);
2599 case DDI_PROP_RESULT_ERROR
:
2600 return (DDI_PROP_CANNOT_DECODE
);
2605 * Allocated memory to store the decoded value in.
2607 tmp
= ddi_prop_decode_alloc(nbytes
, ddi_prop_free_bytes
);
2610 * Decode each element and place it in the space we just allocated
2612 i
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_DECODE
, tmp
, nbytes
);
2613 if (i
< DDI_PROP_RESULT_OK
) {
2615 * Free the space we just allocated
2616 * and return an error
2620 case DDI_PROP_RESULT_EOF
:
2621 return (DDI_PROP_END_OF_DATA
);
2623 case DDI_PROP_RESULT_ERROR
:
2624 return (DDI_PROP_CANNOT_DECODE
);
2628 *(uchar_t
**)data
= tmp
;
2629 *nelements
= nbytes
;
2631 return (DDI_PROP_SUCCESS
);
2635 * Encode an array of bytes.
2638 ddi_prop_fm_encode_bytes(prop_handle_t
*ph
, void *data
, uint_t nelements
)
2644 * If there are no elements, then this is a boolean property,
2645 * so just create a property handle with no data and return.
2647 if (nelements
== 0) {
2648 (void) ddi_prop_encode_alloc(ph
, 0);
2649 return (DDI_PROP_SUCCESS
);
2653 * Get the size of the encoded array of bytes.
2655 size
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_GET_ESIZE
, (uchar_t
*)data
,
2657 if (size
< DDI_PROP_RESULT_OK
) {
2659 case DDI_PROP_RESULT_EOF
:
2660 return (DDI_PROP_END_OF_DATA
);
2662 case DDI_PROP_RESULT_ERROR
:
2663 return (DDI_PROP_CANNOT_DECODE
);
2668 * Allocate space in the handle to store the encoded bytes.
2670 if (ddi_prop_encode_alloc(ph
, (uint_t
)size
) != DDI_PROP_SUCCESS
)
2671 return (DDI_PROP_NO_MEMORY
);
2674 * Encode the array of bytes.
2676 i
= DDI_PROP_BYTES(ph
, DDI_PROP_CMD_ENCODE
, (uchar_t
*)data
,
2678 if (i
< DDI_PROP_RESULT_OK
) {
2680 case DDI_PROP_RESULT_EOF
:
2681 return (DDI_PROP_END_OF_DATA
);
2683 case DDI_PROP_RESULT_ERROR
:
2684 return (DDI_PROP_CANNOT_ENCODE
);
2688 return (DDI_PROP_SUCCESS
);
2692 * OBP 1275 integer, string and byte operators.
2694 * DDI_PROP_CMD_DECODE:
2696 * DDI_PROP_RESULT_ERROR: cannot decode the data
2697 * DDI_PROP_RESULT_EOF: end of data
2698 * DDI_PROP_OK: data was decoded
2700 * DDI_PROP_CMD_ENCODE:
2702 * DDI_PROP_RESULT_ERROR: cannot encode the data
2703 * DDI_PROP_RESULT_EOF: end of data
2704 * DDI_PROP_OK: data was encoded
2706 * DDI_PROP_CMD_SKIP:
2708 * DDI_PROP_RESULT_ERROR: cannot skip the data
2709 * DDI_PROP_RESULT_EOF: end of data
2710 * DDI_PROP_OK: data was skipped
2712 * DDI_PROP_CMD_GET_ESIZE:
2714 * DDI_PROP_RESULT_ERROR: cannot get encoded size
2715 * DDI_PROP_RESULT_EOF: end of data
2716 * > 0: the encoded size
2718 * DDI_PROP_CMD_GET_DSIZE:
2720 * DDI_PROP_RESULT_ERROR: cannot get decoded size
2721 * DDI_PROP_RESULT_EOF: end of data
2722 * > 0: the decoded size
2726 * OBP 1275 integer operator
2728 * OBP properties are a byte stream of data, so integers may not be
2729 * properly aligned. Therefore we need to copy them one byte at a time.
2732 ddi_prop_1275_int(prop_handle_t
*ph
, uint_t cmd
, int *data
)
2737 case DDI_PROP_CMD_DECODE
:
2739 * Check that there is encoded data
2741 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0)
2742 return (DDI_PROP_RESULT_ERROR
);
2743 if (ph
->ph_flags
& PH_FROM_PROM
) {
2744 i
= MIN(ph
->ph_size
, PROP_1275_INT_SIZE
);
2745 if ((int *)ph
->ph_cur_pos
> ((int *)ph
->ph_data
+
2747 return (DDI_PROP_RESULT_ERROR
);
2749 if (ph
->ph_size
< sizeof (int) ||
2750 ((int *)ph
->ph_cur_pos
> ((int *)ph
->ph_data
+
2751 ph
->ph_size
- sizeof (int))))
2752 return (DDI_PROP_RESULT_ERROR
);
2756 * Copy the integer, using the implementation-specific
2757 * copy function if the property is coming from the PROM.
2759 if (ph
->ph_flags
& PH_FROM_PROM
) {
2760 *data
= impl_ddi_prop_int_from_prom(
2761 (uchar_t
*)ph
->ph_cur_pos
,
2762 (ph
->ph_size
< PROP_1275_INT_SIZE
) ?
2763 ph
->ph_size
: PROP_1275_INT_SIZE
);
2765 bcopy(ph
->ph_cur_pos
, data
, sizeof (int));
2769 * Move the current location to the start of the next
2770 * bit of undecoded data.
2772 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2774 return (DDI_PROP_RESULT_OK
);
2776 case DDI_PROP_CMD_ENCODE
:
2778 * Check that there is room to encoded the data
2780 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2781 ph
->ph_size
< PROP_1275_INT_SIZE
||
2782 ((int *)ph
->ph_cur_pos
> ((int *)ph
->ph_data
+
2783 ph
->ph_size
- sizeof (int))))
2784 return (DDI_PROP_RESULT_ERROR
);
2787 * Encode the integer into the byte stream one byte at a
2790 bcopy(data
, ph
->ph_cur_pos
, sizeof (int));
2793 * Move the current location to the start of the next bit of
2794 * space where we can store encoded data.
2796 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+ PROP_1275_INT_SIZE
;
2797 return (DDI_PROP_RESULT_OK
);
2799 case DDI_PROP_CMD_SKIP
:
2801 * Check that there is encoded data
2803 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2804 ph
->ph_size
< PROP_1275_INT_SIZE
)
2805 return (DDI_PROP_RESULT_ERROR
);
2808 if ((caddr_t
)ph
->ph_cur_pos
==
2809 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2810 return (DDI_PROP_RESULT_EOF
);
2811 } else if ((caddr_t
)ph
->ph_cur_pos
>
2812 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2813 return (DDI_PROP_RESULT_EOF
);
2817 * Move the current location to the start of the next bit of
2820 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+ PROP_1275_INT_SIZE
;
2821 return (DDI_PROP_RESULT_OK
);
2823 case DDI_PROP_CMD_GET_ESIZE
:
2825 * Return the size of an encoded integer on OBP
2827 return (PROP_1275_INT_SIZE
);
2829 case DDI_PROP_CMD_GET_DSIZE
:
2831 * Return the size of a decoded integer on the system.
2833 return (sizeof (int));
2837 panic("ddi_prop_1275_int: %x impossible", cmd
);
2840 return (DDI_PROP_RESULT_ERROR
);
2846 * 64 bit integer operator.
2848 * This is an extension, defined by Sun, to the 1275 integer
2849 * operator. This routine handles the encoding/decoding of
2850 * 64 bit integer properties.
2853 ddi_prop_int64_op(prop_handle_t
*ph
, uint_t cmd
, int64_t *data
)
2857 case DDI_PROP_CMD_DECODE
:
2859 * Check that there is encoded data
2861 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0)
2862 return (DDI_PROP_RESULT_ERROR
);
2863 if (ph
->ph_flags
& PH_FROM_PROM
) {
2864 return (DDI_PROP_RESULT_ERROR
);
2866 if (ph
->ph_size
< sizeof (int64_t) ||
2867 ((int64_t *)ph
->ph_cur_pos
>
2868 ((int64_t *)ph
->ph_data
+
2869 ph
->ph_size
- sizeof (int64_t))))
2870 return (DDI_PROP_RESULT_ERROR
);
2873 * Copy the integer, using the implementation-specific
2874 * copy function if the property is coming from the PROM.
2876 if (ph
->ph_flags
& PH_FROM_PROM
) {
2877 return (DDI_PROP_RESULT_ERROR
);
2879 bcopy(ph
->ph_cur_pos
, data
, sizeof (int64_t));
2883 * Move the current location to the start of the next
2884 * bit of undecoded data.
2886 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2888 return (DDI_PROP_RESULT_OK
);
2890 case DDI_PROP_CMD_ENCODE
:
2892 * Check that there is room to encoded the data
2894 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2895 ph
->ph_size
< sizeof (int64_t) ||
2896 ((int64_t *)ph
->ph_cur_pos
> ((int64_t *)ph
->ph_data
+
2897 ph
->ph_size
- sizeof (int64_t))))
2898 return (DDI_PROP_RESULT_ERROR
);
2901 * Encode the integer into the byte stream one byte at a
2904 bcopy(data
, ph
->ph_cur_pos
, sizeof (int64_t));
2907 * Move the current location to the start of the next bit of
2908 * space where we can store encoded data.
2910 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2912 return (DDI_PROP_RESULT_OK
);
2914 case DDI_PROP_CMD_SKIP
:
2916 * Check that there is encoded data
2918 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
2919 ph
->ph_size
< sizeof (int64_t))
2920 return (DDI_PROP_RESULT_ERROR
);
2922 if ((caddr_t
)ph
->ph_cur_pos
==
2923 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2924 return (DDI_PROP_RESULT_EOF
);
2925 } else if ((caddr_t
)ph
->ph_cur_pos
>
2926 (caddr_t
)ph
->ph_data
+ ph
->ph_size
) {
2927 return (DDI_PROP_RESULT_EOF
);
2931 * Move the current location to the start of
2932 * the next bit of undecoded data.
2934 ph
->ph_cur_pos
= (uchar_t
*)ph
->ph_cur_pos
+
2936 return (DDI_PROP_RESULT_OK
);
2938 case DDI_PROP_CMD_GET_ESIZE
:
2940 * Return the size of an encoded integer on OBP
2942 return (sizeof (int64_t));
2944 case DDI_PROP_CMD_GET_DSIZE
:
2946 * Return the size of a decoded integer on the system.
2948 return (sizeof (int64_t));
2952 panic("ddi_prop_int64_op: %x impossible", cmd
);
2955 return (DDI_PROP_RESULT_ERROR
);
2961 * OBP 1275 string operator.
2963 * OBP strings are NULL terminated.
2966 ddi_prop_1275_string(prop_handle_t
*ph
, uint_t cmd
, char *data
)
2973 case DDI_PROP_CMD_DECODE
:
2975 * Check that there is encoded data
2977 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0) {
2978 return (DDI_PROP_RESULT_ERROR
);
2982 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2983 * how to NULL terminate result.
2985 p
= (char *)ph
->ph_cur_pos
;
2986 end
= (char *)ph
->ph_data
+ ph
->ph_size
;
2988 return (DDI_PROP_RESULT_EOF
);
2992 if (*p
++ == 0) { /* NULL from OBP */
2994 return (DDI_PROP_RESULT_OK
);
2999 * If OBP did not NULL terminate string, which happens
3000 * (at least) for 'true'/'false' boolean values, account for
3001 * the space and store null termination on decode.
3005 return (DDI_PROP_RESULT_OK
);
3007 case DDI_PROP_CMD_ENCODE
:
3009 * Check that there is room to encoded the data
3011 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0) {
3012 return (DDI_PROP_RESULT_ERROR
);
3015 n
= strlen(data
) + 1;
3016 if ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3018 return (DDI_PROP_RESULT_ERROR
);
3022 * Copy the NULL terminated string
3024 bcopy(data
, ph
->ph_cur_pos
, n
);
3027 * Move the current location to the start of the next bit of
3028 * space where we can store encoded data.
3030 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ n
;
3031 return (DDI_PROP_RESULT_OK
);
3033 case DDI_PROP_CMD_SKIP
:
3035 * Check that there is encoded data
3037 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0) {
3038 return (DDI_PROP_RESULT_ERROR
);
3042 * Return the string length plus one for the NULL
3043 * We know the size of the property, we need to
3044 * ensure that the string is properly formatted,
3045 * since we may be looking up random OBP data.
3047 p
= (char *)ph
->ph_cur_pos
;
3048 end
= (char *)ph
->ph_data
+ ph
->ph_size
;
3050 return (DDI_PROP_RESULT_EOF
);
3053 if (*p
++ == 0) { /* NULL from OBP */
3055 return (DDI_PROP_RESULT_OK
);
3060 * Accommodate the fact that OBP does not always NULL
3061 * terminate strings.
3064 return (DDI_PROP_RESULT_OK
);
3066 case DDI_PROP_CMD_GET_ESIZE
:
3068 * Return the size of the encoded string on OBP.
3070 return (strlen(data
) + 1);
3072 case DDI_PROP_CMD_GET_DSIZE
:
3074 * Return the string length plus one for the NULL.
3075 * We know the size of the property, we need to
3076 * ensure that the string is properly formatted,
3077 * since we may be looking up random OBP data.
3079 p
= (char *)ph
->ph_cur_pos
;
3080 end
= (char *)ph
->ph_data
+ ph
->ph_size
;
3082 return (DDI_PROP_RESULT_EOF
);
3084 for (n
= 0; p
< end
; n
++) {
3085 if (*p
++ == 0) { /* NULL from OBP */
3092 * If OBP did not NULL terminate string, which happens for
3093 * 'true'/'false' boolean values, account for the space
3094 * to store null termination here.
3101 panic("ddi_prop_1275_string: %x impossible", cmd
);
3104 return (DDI_PROP_RESULT_ERROR
);
3110 * OBP 1275 byte operator
3112 * Caller must specify the number of bytes to get. OBP encodes bytes
3113 * as a byte so there is a 1-to-1 translation.
3116 ddi_prop_1275_bytes(prop_handle_t
*ph
, uint_t cmd
, uchar_t
*data
,
3120 case DDI_PROP_CMD_DECODE
:
3122 * Check that there is encoded data
3124 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
3125 ph
->ph_size
< nelements
||
3126 ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3127 ph
->ph_size
- nelements
)))
3128 return (DDI_PROP_RESULT_ERROR
);
3131 * Copy out the bytes
3133 bcopy(ph
->ph_cur_pos
, data
, nelements
);
3136 * Move the current location
3138 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ nelements
;
3139 return (DDI_PROP_RESULT_OK
);
3141 case DDI_PROP_CMD_ENCODE
:
3143 * Check that there is room to encode the data
3145 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
3146 ph
->ph_size
< nelements
||
3147 ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3148 ph
->ph_size
- nelements
)))
3149 return (DDI_PROP_RESULT_ERROR
);
3154 bcopy(data
, ph
->ph_cur_pos
, nelements
);
3157 * Move the current location to the start of the next bit of
3158 * space where we can store encoded data.
3160 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ nelements
;
3161 return (DDI_PROP_RESULT_OK
);
3163 case DDI_PROP_CMD_SKIP
:
3165 * Check that there is encoded data
3167 if (ph
->ph_cur_pos
== NULL
|| ph
->ph_size
== 0 ||
3168 ph
->ph_size
< nelements
)
3169 return (DDI_PROP_RESULT_ERROR
);
3171 if ((char *)ph
->ph_cur_pos
> ((char *)ph
->ph_data
+
3172 ph
->ph_size
- nelements
))
3173 return (DDI_PROP_RESULT_EOF
);
3176 * Move the current location
3178 ph
->ph_cur_pos
= (char *)ph
->ph_cur_pos
+ nelements
;
3179 return (DDI_PROP_RESULT_OK
);
3181 case DDI_PROP_CMD_GET_ESIZE
:
3183 * The size in bytes of the encoded size is the
3184 * same as the decoded size provided by the caller.
3188 case DDI_PROP_CMD_GET_DSIZE
:
3190 * Just return the number of bytes specified by the caller.
3196 panic("ddi_prop_1275_bytes: %x impossible", cmd
);
3199 return (DDI_PROP_RESULT_ERROR
);
3205 * Used for properties that come from the OBP, hardware configuration files,
3206 * or that are created by calls to ddi_prop_update(9F).
3208 static struct prop_handle_ops prop_1275_ops
= {
3210 ddi_prop_1275_string
,
3211 ddi_prop_1275_bytes
,
3217 * Interface to create/modify a managed property on child's behalf...
3218 * Flags interpreted are:
3219 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep.
3220 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list.
3222 * Use same dev_t when modifying or undefining a property.
3223 * Search for properties with DDI_DEV_T_ANY to match first named
3224 * property on the list.
3226 * Properties are stored LIFO and subsequently will match the first
3227 * `matching' instance.
3231 * ddi_prop_add: Add a software defined property
3235 * define to get a new ddi_prop_t.
3236 * km_flags are KM_SLEEP or KM_NOSLEEP.
3239 #define DDI_NEW_PROP_T(km_flags) \
3240 (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3243 ddi_prop_add(dev_t dev
, dev_info_t
*dip
, int flags
,
3244 char *name
, caddr_t value
, int length
)
3246 ddi_prop_t
*new_propp
, *propp
;
3247 ddi_prop_t
**list_head
= &(DEVI(dip
)->devi_drv_prop_ptr
);
3248 int km_flags
= KM_NOSLEEP
;
3252 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3255 if (dev
== DDI_DEV_T_ANY
|| name
== (char *)0 || strlen(name
) == 0)
3256 return (DDI_PROP_INVAL_ARG
);
3258 if (flags
& DDI_PROP_CANSLEEP
)
3259 km_flags
= KM_SLEEP
;
3261 if (flags
& DDI_PROP_SYSTEM_DEF
)
3262 list_head
= &(DEVI(dip
)->devi_sys_prop_ptr
);
3263 else if (flags
& DDI_PROP_HW_DEF
)
3264 list_head
= &(DEVI(dip
)->devi_hw_prop_ptr
);
3266 if ((new_propp
= DDI_NEW_PROP_T(km_flags
)) == NULL
) {
3267 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3268 return (DDI_PROP_NO_MEMORY
);
3272 * If dev is major number 0, then we need to do a ddi_name_to_major
3273 * to get the real major number for the device. This needs to be
3274 * done because some drivers need to call ddi_prop_create in their
3275 * attach routines but they don't have a dev. By creating the dev
3276 * ourself if the major number is 0, drivers will not have to know what
3277 * their major number. They can just create a dev with major number
3278 * 0 and pass it in. For device 0, we will be doing a little extra
3279 * work by recreating the same dev that we already have, but its the
3280 * price you pay :-).
3282 * This fixes bug #1098060.
3284 if (getmajor(dev
) == DDI_MAJOR_T_UNKNOWN
) {
3285 new_propp
->prop_dev
=
3286 makedevice(ddi_name_to_major(DEVI(dip
)->devi_binding_name
),
3289 new_propp
->prop_dev
= dev
;
3292 * Allocate space for property name and copy it in...
3295 name_buf_len
= strlen(name
) + 1;
3296 new_propp
->prop_name
= kmem_alloc(name_buf_len
, km_flags
);
3297 if (new_propp
->prop_name
== 0) {
3298 kmem_free(new_propp
, sizeof (ddi_prop_t
));
3299 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3300 return (DDI_PROP_NO_MEMORY
);
3302 bcopy(name
, new_propp
->prop_name
, name_buf_len
);
3305 * Set the property type
3307 new_propp
->prop_flags
= flags
& DDI_PROP_TYPE_MASK
;
3310 * Set length and value ONLY if not an explicit property undefine:
3311 * NOTE: value and length are zero for explicit undefines.
3314 if (flags
& DDI_PROP_UNDEF_IT
) {
3315 new_propp
->prop_flags
|= DDI_PROP_UNDEF_IT
;
3317 if ((new_propp
->prop_len
= length
) != 0) {
3318 new_propp
->prop_val
= kmem_alloc(length
, km_flags
);
3319 if (new_propp
->prop_val
== 0) {
3320 kmem_free(new_propp
->prop_name
, name_buf_len
);
3321 kmem_free(new_propp
, sizeof (ddi_prop_t
));
3322 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3323 return (DDI_PROP_NO_MEMORY
);
3325 bcopy(value
, new_propp
->prop_val
, length
);
3330 * Link property into beginning of list. (Properties are LIFO order.)
3333 mutex_enter(&(DEVI(dip
)->devi_lock
));
3335 new_propp
->prop_next
= propp
;
3336 *list_head
= new_propp
;
3337 mutex_exit(&(DEVI(dip
)->devi_lock
));
3338 return (DDI_PROP_SUCCESS
);
3343 * ddi_prop_change: Modify a software managed property value
3345 * Set new length and value if found.
3346 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3347 * input name is the NULL string.
3348 * returns DDI_PROP_NO_MEMORY if unable to allocate memory
3350 * Note: an undef can be modified to be a define,
3351 * (you can't go the other way.)
3355 ddi_prop_change(dev_t dev
, dev_info_t
*dip
, int flags
,
3356 char *name
, caddr_t value
, int length
)
3359 ddi_prop_t
**ppropp
;
3362 if ((dev
== DDI_DEV_T_ANY
) || (name
== NULL
) || (strlen(name
) == 0))
3363 return (DDI_PROP_INVAL_ARG
);
3366 * Preallocate buffer, even if we don't need it...
3369 p
= kmem_alloc(length
, (flags
& DDI_PROP_CANSLEEP
) ?
3370 KM_SLEEP
: KM_NOSLEEP
);
3372 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3373 return (DDI_PROP_NO_MEMORY
);
3378 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3379 * number, a real dev_t value should be created based upon the dip's
3380 * binding driver. See ddi_prop_add...
3382 if (getmajor(dev
) == DDI_MAJOR_T_UNKNOWN
)
3384 ddi_name_to_major(DEVI(dip
)->devi_binding_name
),
3388 * Check to see if the property exists. If so we modify it.
3389 * Else we create it by calling ddi_prop_add().
3391 mutex_enter(&(DEVI(dip
)->devi_lock
));
3392 ppropp
= &DEVI(dip
)->devi_drv_prop_ptr
;
3393 if (flags
& DDI_PROP_SYSTEM_DEF
)
3394 ppropp
= &DEVI(dip
)->devi_sys_prop_ptr
;
3395 else if (flags
& DDI_PROP_HW_DEF
)
3396 ppropp
= &DEVI(dip
)->devi_hw_prop_ptr
;
3398 if ((propp
= i_ddi_prop_search(dev
, name
, flags
, ppropp
)) != NULL
) {
3400 * Need to reallocate buffer? If so, do it
3401 * carefully (reuse same space if new prop
3402 * is same size and non-NULL sized).
3405 bcopy(value
, p
, length
);
3407 if (propp
->prop_len
!= 0)
3408 kmem_free(propp
->prop_val
, propp
->prop_len
);
3410 propp
->prop_len
= length
;
3411 propp
->prop_val
= p
;
3412 propp
->prop_flags
&= ~DDI_PROP_UNDEF_IT
;
3413 mutex_exit(&(DEVI(dip
)->devi_lock
));
3414 return (DDI_PROP_SUCCESS
);
3417 mutex_exit(&(DEVI(dip
)->devi_lock
));
3419 kmem_free(p
, length
);
3421 return (ddi_prop_add(dev
, dip
, flags
, name
, value
, length
));
3425 * Common update routine used to update and encode a property. Creates
3426 * a property handle, calls the property encode routine, figures out if
3427 * the property already exists and updates if it does. Otherwise it
3428 * creates if it does not exist.
3431 ddi_prop_update_common(dev_t match_dev
, dev_info_t
*dip
, int flags
,
3432 char *name
, void *data
, uint_t nelements
,
3433 int (*prop_create
)(prop_handle_t
*, void *data
, uint_t nelements
))
3440 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3443 if (match_dev
== DDI_DEV_T_ANY
|| name
== NULL
|| strlen(name
) == 0)
3444 return (DDI_PROP_INVAL_ARG
);
3450 ph
.ph_cur_pos
= NULL
;
3451 ph
.ph_save_pos
= NULL
;
3453 ph
.ph_ops
= &prop_1275_ops
;
3457 * For compatibility with the old interfaces. The old interfaces
3458 * didn't sleep by default and slept when the flag was set. These
3459 * interfaces to the opposite. So the old interfaces now set the
3460 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3463 * Blocked data or unblocked data allocation
3464 * for ph.ph_data in ddi_prop_encode_alloc()
3466 if (flags
& DDI_PROP_DONTSLEEP
) {
3468 ph
.ph_flags
= DDI_PROP_DONTSLEEP
;
3470 ourflags
= flags
| DDI_PROP_CANSLEEP
;
3471 ph
.ph_flags
= DDI_PROP_CANSLEEP
;
3475 * Encode the data and store it in the property handle by
3476 * calling the prop_encode routine.
3478 if ((rval
= (*prop_create
)(&ph
, data
, nelements
)) !=
3480 if (rval
== DDI_PROP_NO_MEMORY
)
3481 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
3482 if (ph
.ph_size
!= 0)
3483 kmem_free(ph
.ph_data
, ph
.ph_size
);
3488 * The old interfaces use a stacking approach to creating
3489 * properties. If we are being called from the old interfaces,
3490 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3491 * create without checking.
3493 if (flags
& DDI_PROP_STACK_CREATE
) {
3494 rval
= ddi_prop_add(match_dev
, dip
,
3495 ourflags
, name
, ph
.ph_data
, ph
.ph_size
);
3497 rval
= ddi_prop_change(match_dev
, dip
,
3498 ourflags
, name
, ph
.ph_data
, ph
.ph_size
);
3502 * Free the encoded data allocated in the prop_encode routine.
3504 if (ph
.ph_size
!= 0)
3505 kmem_free(ph
.ph_data
, ph
.ph_size
);
3512 * ddi_prop_create: Define a managed property:
3513 * See above for details.
3517 ddi_prop_create(dev_t dev
, dev_info_t
*dip
, int flag
,
3518 char *name
, caddr_t value
, int length
)
3520 if (!(flag
& DDI_PROP_CANSLEEP
)) {
3521 flag
|= DDI_PROP_DONTSLEEP
;
3522 #ifdef DDI_PROP_DEBUG
3524 cmn_err(CE_NOTE
, "!ddi_prop_create: interface obsolete,"
3525 "use ddi_prop_update (prop = %s, node = %s%d)",
3526 name
, ddi_driver_name(dip
), ddi_get_instance(dip
));
3527 #endif /* DDI_PROP_DEBUG */
3529 flag
&= ~DDI_PROP_SYSTEM_DEF
;
3530 flag
|= DDI_PROP_STACK_CREATE
| DDI_PROP_TYPE_ANY
;
3531 return (ddi_prop_update_common(dev
, dip
, flag
, name
,
3532 value
, length
, ddi_prop_fm_encode_bytes
));
3536 e_ddi_prop_create(dev_t dev
, dev_info_t
*dip
, int flag
,
3537 char *name
, caddr_t value
, int length
)
3539 if (!(flag
& DDI_PROP_CANSLEEP
))
3540 flag
|= DDI_PROP_DONTSLEEP
;
3541 flag
|= DDI_PROP_SYSTEM_DEF
| DDI_PROP_STACK_CREATE
| DDI_PROP_TYPE_ANY
;
3542 return (ddi_prop_update_common(dev
, dip
, flag
,
3543 name
, value
, length
, ddi_prop_fm_encode_bytes
));
3547 ddi_prop_modify(dev_t dev
, dev_info_t
*dip
, int flag
,
3548 char *name
, caddr_t value
, int length
)
3550 ASSERT((flag
& DDI_PROP_TYPE_MASK
) == 0);
3553 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3556 if (dev
== DDI_DEV_T_ANY
|| name
== NULL
|| strlen(name
) == 0)
3557 return (DDI_PROP_INVAL_ARG
);
3559 if (!(flag
& DDI_PROP_CANSLEEP
))
3560 flag
|= DDI_PROP_DONTSLEEP
;
3561 flag
&= ~DDI_PROP_SYSTEM_DEF
;
3562 if (ddi_prop_exists(dev
, dip
, (flag
| DDI_PROP_NOTPROM
), name
) == 0)
3563 return (DDI_PROP_NOT_FOUND
);
3565 return (ddi_prop_update_common(dev
, dip
,
3566 (flag
| DDI_PROP_TYPE_BYTE
), name
,
3567 value
, length
, ddi_prop_fm_encode_bytes
));
3571 e_ddi_prop_modify(dev_t dev
, dev_info_t
*dip
, int flag
,
3572 char *name
, caddr_t value
, int length
)
3574 ASSERT((flag
& DDI_PROP_TYPE_MASK
) == 0);
3577 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3580 if (dev
== DDI_DEV_T_ANY
|| name
== NULL
|| strlen(name
) == 0)
3581 return (DDI_PROP_INVAL_ARG
);
3583 if (ddi_prop_exists(dev
, dip
, (flag
| DDI_PROP_SYSTEM_DEF
), name
) == 0)
3584 return (DDI_PROP_NOT_FOUND
);
3586 if (!(flag
& DDI_PROP_CANSLEEP
))
3587 flag
|= DDI_PROP_DONTSLEEP
;
3588 return (ddi_prop_update_common(dev
, dip
,
3589 (flag
| DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_BYTE
),
3590 name
, value
, length
, ddi_prop_fm_encode_bytes
));
3595 * Common lookup routine used to lookup and decode a property.
3596 * Creates a property handle, searches for the raw encoded data,
3597 * fills in the handle, and calls the property decode functions
3600 * This routine is not static because ddi_bus_prop_op() which lives in
3601 * ddi_impl.c calls it. No driver should be calling this routine.
3604 ddi_prop_lookup_common(dev_t match_dev
, dev_info_t
*dip
,
3605 uint_t flags
, char *name
, void *data
, uint_t
*nelements
,
3606 int (*prop_decoder
)(prop_handle_t
*, void *data
, uint_t
*nelements
))
3612 if ((match_dev
== DDI_DEV_T_NONE
) ||
3613 (name
== NULL
) || (strlen(name
) == 0))
3614 return (DDI_PROP_INVAL_ARG
);
3616 ourflags
= (flags
& DDI_PROP_DONTSLEEP
) ? flags
:
3617 flags
| DDI_PROP_CANSLEEP
;
3620 * Get the encoded data
3622 bzero(&ph
, sizeof (prop_handle_t
));
3624 if ((flags
& DDI_UNBND_DLPI2
) || (flags
& DDI_PROP_ROOTNEX_GLOBAL
)) {
3626 * For rootnex and unbound dlpi style-2 devices, index into
3627 * the devnames' array and search the global
3630 ourflags
&= ~DDI_UNBND_DLPI2
;
3631 rval
= i_ddi_prop_search_global(match_dev
,
3632 ourflags
, name
, &ph
.ph_data
, &ph
.ph_size
);
3634 rval
= ddi_prop_search_common(match_dev
, dip
,
3635 PROP_LEN_AND_VAL_ALLOC
, ourflags
, name
,
3636 &ph
.ph_data
, &ph
.ph_size
);
3640 if (rval
!= DDI_PROP_SUCCESS
&& rval
!= DDI_PROP_FOUND_1275
) {
3641 ASSERT(ph
.ph_data
== NULL
);
3642 ASSERT(ph
.ph_size
== 0);
3647 * If the encoded data came from a OBP or software
3648 * use the 1275 OBP decode/encode routines.
3650 ph
.ph_cur_pos
= ph
.ph_data
;
3651 ph
.ph_save_pos
= ph
.ph_data
;
3652 ph
.ph_ops
= &prop_1275_ops
;
3653 ph
.ph_flags
= (rval
== DDI_PROP_FOUND_1275
) ? PH_FROM_PROM
: 0;
3655 rval
= (*prop_decoder
)(&ph
, data
, nelements
);
3658 * Free the encoded data
3660 if (ph
.ph_size
!= 0)
3661 kmem_free(ph
.ph_data
, ph
.ph_size
);
3667 * Lookup and return an array of composite properties. The driver must
3668 * provide the decode routine.
3671 ddi_prop_lookup(dev_t match_dev
, dev_info_t
*dip
,
3672 uint_t flags
, char *name
, void *data
, uint_t
*nelements
,
3673 int (*prop_decoder
)(prop_handle_t
*, void *data
, uint_t
*nelements
))
3675 return (ddi_prop_lookup_common(match_dev
, dip
,
3676 (flags
| DDI_PROP_TYPE_COMPOSITE
), name
,
3677 data
, nelements
, prop_decoder
));
3681 * Return 1 if a property exists (no type checking done).
3682 * Return 0 if it does not exist.
3685 ddi_prop_exists(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
, char *name
)
3690 i
= ddi_prop_search_common(match_dev
, dip
, PROP_EXISTS
,
3691 flags
| DDI_PROP_TYPE_MASK
, name
, NULL
, &x
);
3692 return (i
== DDI_PROP_SUCCESS
|| i
== DDI_PROP_FOUND_1275
);
3697 * Update an array of composite properties. The driver must
3698 * provide the encode routine.
3701 ddi_prop_update(dev_t match_dev
, dev_info_t
*dip
,
3702 char *name
, void *data
, uint_t nelements
,
3703 int (*prop_create
)(prop_handle_t
*, void *data
, uint_t nelements
))
3705 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_COMPOSITE
,
3706 name
, data
, nelements
, prop_create
));
3710 * Get a single integer or boolean property and return it.
3711 * If the property does not exists, or cannot be decoded,
3712 * then return the defvalue passed in.
3714 * This routine always succeeds.
3717 ddi_prop_get_int(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3718 char *name
, int defvalue
)
3724 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3725 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3728 cmn_err(CE_WARN
, "ddi_prop_get_int: invalid flag"
3729 " 0x%x (prop = %s, node = %s%d)", flags
,
3730 name
, ddi_driver_name(dip
), ddi_get_instance(dip
));
3733 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3734 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
3737 if ((rval
= ddi_prop_lookup_common(match_dev
, dip
,
3738 (flags
| DDI_PROP_TYPE_INT
), name
, &data
, &nelements
,
3739 ddi_prop_fm_decode_int
)) != DDI_PROP_SUCCESS
) {
3740 if (rval
== DDI_PROP_END_OF_DATA
)
3749 * Get a single 64 bit integer or boolean property and return it.
3750 * If the property does not exists, or cannot be decoded,
3751 * then return the defvalue passed in.
3753 * This routine always succeeds.
3756 ddi_prop_get_int64(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3757 char *name
, int64_t defvalue
)
3763 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3764 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3767 cmn_err(CE_WARN
, "ddi_prop_get_int64: invalid flag"
3768 " 0x%x (prop = %s, node = %s%d)", flags
,
3769 name
, ddi_driver_name(dip
), ddi_get_instance(dip
));
3772 return (DDI_PROP_INVAL_ARG
);
3775 if ((rval
= ddi_prop_lookup_common(match_dev
, dip
,
3776 (flags
| DDI_PROP_TYPE_INT64
| DDI_PROP_NOTPROM
),
3777 name
, &data
, &nelements
, ddi_prop_fm_decode_int64
))
3778 != DDI_PROP_SUCCESS
) {
3779 if (rval
== DDI_PROP_END_OF_DATA
)
3788 * Get an array of integer property
3791 ddi_prop_lookup_int_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3792 char *name
, int **data
, uint_t
*nelements
)
3794 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3795 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3798 cmn_err(CE_WARN
, "ddi_prop_lookup_int_array: "
3799 "invalid flag 0x%x (prop = %s, node = %s%d)",
3800 flags
, name
, ddi_driver_name(dip
),
3801 ddi_get_instance(dip
));
3804 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3805 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
3808 return (ddi_prop_lookup_common(match_dev
, dip
,
3809 (flags
| DDI_PROP_TYPE_INT
), name
, data
,
3810 nelements
, ddi_prop_fm_decode_ints
));
3814 * Get an array of 64 bit integer properties
3817 ddi_prop_lookup_int64_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3818 char *name
, int64_t **data
, uint_t
*nelements
)
3820 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3821 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3824 cmn_err(CE_WARN
, "ddi_prop_lookup_int64_array: "
3825 "invalid flag 0x%x (prop = %s, node = %s%d)",
3826 flags
, name
, ddi_driver_name(dip
),
3827 ddi_get_instance(dip
));
3830 return (DDI_PROP_INVAL_ARG
);
3833 return (ddi_prop_lookup_common(match_dev
, dip
,
3834 (flags
| DDI_PROP_TYPE_INT64
| DDI_PROP_NOTPROM
),
3835 name
, data
, nelements
, ddi_prop_fm_decode_int64_array
));
3839 * Update a single integer property. If the property exists on the drivers
3840 * property list it updates, else it creates it.
3843 ddi_prop_update_int(dev_t match_dev
, dev_info_t
*dip
,
3844 char *name
, int data
)
3846 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT
,
3847 name
, &data
, 1, ddi_prop_fm_encode_ints
));
3851 * Update a single 64 bit integer property.
3852 * Update the driver property list if it exists, else create it.
3855 ddi_prop_update_int64(dev_t match_dev
, dev_info_t
*dip
,
3856 char *name
, int64_t data
)
3858 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT64
,
3859 name
, &data
, 1, ddi_prop_fm_encode_int64
));
3863 e_ddi_prop_update_int(dev_t match_dev
, dev_info_t
*dip
,
3864 char *name
, int data
)
3866 return (ddi_prop_update_common(match_dev
, dip
,
3867 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT
,
3868 name
, &data
, 1, ddi_prop_fm_encode_ints
));
3872 e_ddi_prop_update_int64(dev_t match_dev
, dev_info_t
*dip
,
3873 char *name
, int64_t data
)
3875 return (ddi_prop_update_common(match_dev
, dip
,
3876 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT64
,
3877 name
, &data
, 1, ddi_prop_fm_encode_int64
));
3881 * Update an array of integer property. If the property exists on the drivers
3882 * property list it updates, else it creates it.
3885 ddi_prop_update_int_array(dev_t match_dev
, dev_info_t
*dip
,
3886 char *name
, int *data
, uint_t nelements
)
3888 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT
,
3889 name
, data
, nelements
, ddi_prop_fm_encode_ints
));
3893 * Update an array of 64 bit integer properties.
3894 * Update the driver property list if it exists, else create it.
3897 ddi_prop_update_int64_array(dev_t match_dev
, dev_info_t
*dip
,
3898 char *name
, int64_t *data
, uint_t nelements
)
3900 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_INT64
,
3901 name
, data
, nelements
, ddi_prop_fm_encode_int64
));
3905 e_ddi_prop_update_int64_array(dev_t match_dev
, dev_info_t
*dip
,
3906 char *name
, int64_t *data
, uint_t nelements
)
3908 return (ddi_prop_update_common(match_dev
, dip
,
3909 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT64
,
3910 name
, data
, nelements
, ddi_prop_fm_encode_int64
));
3914 e_ddi_prop_update_int_array(dev_t match_dev
, dev_info_t
*dip
,
3915 char *name
, int *data
, uint_t nelements
)
3917 return (ddi_prop_update_common(match_dev
, dip
,
3918 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_INT
,
3919 name
, data
, nelements
, ddi_prop_fm_encode_ints
));
3923 * Get a single string property.
3926 ddi_prop_lookup_string(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3927 char *name
, char **data
)
3931 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3932 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3935 cmn_err(CE_WARN
, "%s: invalid flag 0x%x "
3936 "(prop = %s, node = %s%d); invalid bits ignored",
3937 "ddi_prop_lookup_string", flags
, name
,
3938 ddi_driver_name(dip
), ddi_get_instance(dip
));
3941 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3942 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
3945 return (ddi_prop_lookup_common(match_dev
, dip
,
3946 (flags
| DDI_PROP_TYPE_STRING
), name
, data
,
3947 &x
, ddi_prop_fm_decode_string
));
3951 * Get an array of strings property.
3954 ddi_prop_lookup_string_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
3955 char *name
, char ***data
, uint_t
*nelements
)
3957 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3958 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
3961 cmn_err(CE_WARN
, "ddi_prop_lookup_string_array: "
3962 "invalid flag 0x%x (prop = %s, node = %s%d)",
3963 flags
, name
, ddi_driver_name(dip
),
3964 ddi_get_instance(dip
));
3967 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
3968 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
3971 return (ddi_prop_lookup_common(match_dev
, dip
,
3972 (flags
| DDI_PROP_TYPE_STRING
), name
, data
,
3973 nelements
, ddi_prop_fm_decode_strings
));
3977 * Update a single string property.
3980 ddi_prop_update_string(dev_t match_dev
, dev_info_t
*dip
,
3981 char *name
, char *data
)
3983 return (ddi_prop_update_common(match_dev
, dip
,
3984 DDI_PROP_TYPE_STRING
, name
, &data
, 1,
3985 ddi_prop_fm_encode_string
));
3989 e_ddi_prop_update_string(dev_t match_dev
, dev_info_t
*dip
,
3990 char *name
, char *data
)
3992 return (ddi_prop_update_common(match_dev
, dip
,
3993 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_STRING
,
3994 name
, &data
, 1, ddi_prop_fm_encode_string
));
3999 * Update an array of strings property.
4002 ddi_prop_update_string_array(dev_t match_dev
, dev_info_t
*dip
,
4003 char *name
, char **data
, uint_t nelements
)
4005 return (ddi_prop_update_common(match_dev
, dip
,
4006 DDI_PROP_TYPE_STRING
, name
, data
, nelements
,
4007 ddi_prop_fm_encode_strings
));
4011 e_ddi_prop_update_string_array(dev_t match_dev
, dev_info_t
*dip
,
4012 char *name
, char **data
, uint_t nelements
)
4014 return (ddi_prop_update_common(match_dev
, dip
,
4015 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_STRING
,
4016 name
, data
, nelements
,
4017 ddi_prop_fm_encode_strings
));
4022 * Get an array of bytes property.
4025 ddi_prop_lookup_byte_array(dev_t match_dev
, dev_info_t
*dip
, uint_t flags
,
4026 char *name
, uchar_t
**data
, uint_t
*nelements
)
4028 if (flags
& ~(DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
4029 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
| DDI_PROP_ROOTNEX_GLOBAL
)) {
4032 cmn_err(CE_WARN
, "ddi_prop_lookup_byte_array: "
4033 " invalid flag 0x%x (prop = %s, node = %s%d)",
4034 flags
, name
, ddi_driver_name(dip
),
4035 ddi_get_instance(dip
));
4038 flags
&= DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
|
4039 LDI_DEV_T_ANY
| DDI_UNBND_DLPI2
;
4042 return (ddi_prop_lookup_common(match_dev
, dip
,
4043 (flags
| DDI_PROP_TYPE_BYTE
), name
, data
,
4044 nelements
, ddi_prop_fm_decode_bytes
));
4048 * Update an array of bytes property.
4051 ddi_prop_update_byte_array(dev_t match_dev
, dev_info_t
*dip
,
4052 char *name
, uchar_t
*data
, uint_t nelements
)
4055 return (DDI_PROP_INVAL_ARG
);
4057 return (ddi_prop_update_common(match_dev
, dip
, DDI_PROP_TYPE_BYTE
,
4058 name
, data
, nelements
, ddi_prop_fm_encode_bytes
));
4063 e_ddi_prop_update_byte_array(dev_t match_dev
, dev_info_t
*dip
,
4064 char *name
, uchar_t
*data
, uint_t nelements
)
4067 return (DDI_PROP_INVAL_ARG
);
4069 return (ddi_prop_update_common(match_dev
, dip
,
4070 DDI_PROP_SYSTEM_DEF
| DDI_PROP_TYPE_BYTE
,
4071 name
, data
, nelements
, ddi_prop_fm_encode_bytes
));
4076 * ddi_prop_remove_common: Undefine a managed property:
4077 * Input dev_t must match dev_t when defined.
4078 * Returns DDI_PROP_NOT_FOUND, possibly.
4079 * DDI_PROP_INVAL_ARG is also possible if dev is
4080 * DDI_DEV_T_ANY or incoming name is the NULL string.
4083 ddi_prop_remove_common(dev_t dev
, dev_info_t
*dip
, char *name
, int flag
)
4085 ddi_prop_t
**list_head
= &(DEVI(dip
)->devi_drv_prop_ptr
);
4087 ddi_prop_t
*lastpropp
= NULL
;
4089 if ((dev
== DDI_DEV_T_ANY
) || (name
== (char *)0) ||
4090 (strlen(name
) == 0)) {
4091 return (DDI_PROP_INVAL_ARG
);
4094 if (flag
& DDI_PROP_SYSTEM_DEF
)
4095 list_head
= &(DEVI(dip
)->devi_sys_prop_ptr
);
4096 else if (flag
& DDI_PROP_HW_DEF
)
4097 list_head
= &(DEVI(dip
)->devi_hw_prop_ptr
);
4099 mutex_enter(&(DEVI(dip
)->devi_lock
));
4101 for (propp
= *list_head
; propp
!= NULL
; propp
= propp
->prop_next
) {
4102 if (DDI_STRSAME(propp
->prop_name
, name
) &&
4103 (dev
== propp
->prop_dev
)) {
4105 * Unlink this propp allowing for it to
4106 * be first in the list:
4109 if (lastpropp
== NULL
)
4110 *list_head
= propp
->prop_next
;
4112 lastpropp
->prop_next
= propp
->prop_next
;
4114 mutex_exit(&(DEVI(dip
)->devi_lock
));
4117 * Free memory and return...
4119 kmem_free(propp
->prop_name
,
4120 strlen(propp
->prop_name
) + 1);
4121 if (propp
->prop_len
!= 0)
4122 kmem_free(propp
->prop_val
, propp
->prop_len
);
4123 kmem_free(propp
, sizeof (ddi_prop_t
));
4124 return (DDI_PROP_SUCCESS
);
4128 mutex_exit(&(DEVI(dip
)->devi_lock
));
4129 return (DDI_PROP_NOT_FOUND
);
4133 ddi_prop_remove(dev_t dev
, dev_info_t
*dip
, char *name
)
4135 return (ddi_prop_remove_common(dev
, dip
, name
, 0));
4139 e_ddi_prop_remove(dev_t dev
, dev_info_t
*dip
, char *name
)
4141 return (ddi_prop_remove_common(dev
, dip
, name
, DDI_PROP_SYSTEM_DEF
));
4145 * e_ddi_prop_list_delete: remove a list of properties
4146 * Note that the caller needs to provide the required protection
4147 * (eg. devi_lock if these properties are still attached to a devi)
4150 e_ddi_prop_list_delete(ddi_prop_t
*props
)
4152 i_ddi_prop_list_delete(props
);
4156 * ddi_prop_remove_all_common:
4157 * Used before unloading a driver to remove
4158 * all properties. (undefines all dev_t's props.)
4159 * Also removes `explicitly undefined' props.
4160 * No errors possible.
4163 ddi_prop_remove_all_common(dev_info_t
*dip
, int flag
)
4165 ddi_prop_t
**list_head
;
4167 mutex_enter(&(DEVI(dip
)->devi_lock
));
4168 if (flag
& DDI_PROP_SYSTEM_DEF
) {
4169 list_head
= &(DEVI(dip
)->devi_sys_prop_ptr
);
4170 } else if (flag
& DDI_PROP_HW_DEF
) {
4171 list_head
= &(DEVI(dip
)->devi_hw_prop_ptr
);
4173 list_head
= &(DEVI(dip
)->devi_drv_prop_ptr
);
4175 i_ddi_prop_list_delete(*list_head
);
4177 mutex_exit(&(DEVI(dip
)->devi_lock
));
4182 * ddi_prop_remove_all: Remove all driver prop definitions.
4186 ddi_prop_remove_all(dev_info_t
*dip
)
4188 i_ddi_prop_dyn_driver_set(dip
, NULL
);
4189 ddi_prop_remove_all_common(dip
, 0);
4193 * e_ddi_prop_remove_all: Remove all system prop definitions.
4197 e_ddi_prop_remove_all(dev_info_t
*dip
)
4199 ddi_prop_remove_all_common(dip
, (int)DDI_PROP_SYSTEM_DEF
);
4204 * ddi_prop_undefine: Explicitly undefine a property. Property
4205 * searches which match this property return
4206 * the error code DDI_PROP_UNDEFINED.
4208 * Use ddi_prop_remove to negate effect of
4211 * See above for error returns.
4215 ddi_prop_undefine(dev_t dev
, dev_info_t
*dip
, int flag
, char *name
)
4217 if (!(flag
& DDI_PROP_CANSLEEP
))
4218 flag
|= DDI_PROP_DONTSLEEP
;
4219 flag
|= DDI_PROP_STACK_CREATE
| DDI_PROP_UNDEF_IT
| DDI_PROP_TYPE_ANY
;
4220 return (ddi_prop_update_common(dev
, dip
, flag
,
4221 name
, NULL
, 0, ddi_prop_fm_encode_bytes
));
4225 e_ddi_prop_undefine(dev_t dev
, dev_info_t
*dip
, int flag
, char *name
)
4227 if (!(flag
& DDI_PROP_CANSLEEP
))
4228 flag
|= DDI_PROP_DONTSLEEP
;
4229 flag
|= DDI_PROP_SYSTEM_DEF
| DDI_PROP_STACK_CREATE
|
4230 DDI_PROP_UNDEF_IT
| DDI_PROP_TYPE_ANY
;
4231 return (ddi_prop_update_common(dev
, dip
, flag
,
4232 name
, NULL
, 0, ddi_prop_fm_encode_bytes
));
4236 * Support for gathering dynamic properties in devinfo snapshot.
4239 i_ddi_prop_dyn_driver_set(dev_info_t
*dip
, i_ddi_prop_dyn_t
*dp
)
4241 DEVI(dip
)->devi_prop_dyn_driver
= dp
;
4245 i_ddi_prop_dyn_driver_get(dev_info_t
*dip
)
4247 return (DEVI(dip
)->devi_prop_dyn_driver
);
4251 i_ddi_prop_dyn_parent_set(dev_info_t
*dip
, i_ddi_prop_dyn_t
*dp
)
4253 DEVI(dip
)->devi_prop_dyn_parent
= dp
;
4257 i_ddi_prop_dyn_parent_get(dev_info_t
*dip
)
4259 return (DEVI(dip
)->devi_prop_dyn_parent
);
4263 i_ddi_prop_dyn_cache_invalidate(dev_info_t
*dip
, i_ddi_prop_dyn_t
*dp
)
4265 /* for now we invalidate the entire cached snapshot */
4267 i_ddi_di_cache_invalidate();
4272 ddi_prop_cache_invalidate(dev_t dev
, dev_info_t
*dip
, char *name
, int flags
)
4274 /* for now we invalidate the entire cached snapshot */
4275 i_ddi_di_cache_invalidate();
4280 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4282 * if input dip != child_dip, then call is on behalf of child
4283 * to search PROM, do it via ddi_prop_search_common() and ascend only
4286 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4287 * to search for PROM defined props only.
4289 * Note that the PROM search is done only if the requested dev
4290 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4291 * have no associated dev, thus are automatically associated with
4294 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4296 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4297 * that the property resides in the prom.
4300 impl_ddi_bus_prop_op(dev_t dev
, dev_info_t
*dip
, dev_info_t
*ch_dip
,
4301 ddi_prop_op_t prop_op
, int mod_flags
,
4302 char *name
, caddr_t valuep
, int *lengthp
)
4308 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4309 * look in caller's PROM if it's a self identifying device...
4311 * Note that this is very similar to ddi_prop_op, but we
4312 * search the PROM instead of the s/w defined properties,
4313 * and we are called on by the parent driver to do this for
4317 if (((dev
== DDI_DEV_T_NONE
) || (dev
== DDI_DEV_T_ANY
)) &&
4318 ndi_dev_is_prom_node(ch_dip
) &&
4319 ((mod_flags
& DDI_PROP_NOTPROM
) == 0)) {
4320 len
= prom_getproplen((pnode_t
)DEVI(ch_dip
)->devi_nodeid
, name
);
4322 return (DDI_PROP_NOT_FOUND
);
4326 * If exists only request, we're done
4328 if (prop_op
== PROP_EXISTS
) {
4329 return (DDI_PROP_FOUND_1275
);
4333 * If length only request or prop length == 0, get out
4335 if ((prop_op
== PROP_LEN
) || (len
== 0)) {
4337 return (DDI_PROP_FOUND_1275
);
4341 * Allocate buffer if required... (either way `buffer'
4342 * is receiving address).
4347 case PROP_LEN_AND_VAL_ALLOC
:
4349 buffer
= kmem_alloc((size_t)len
,
4350 mod_flags
& DDI_PROP_CANSLEEP
?
4351 KM_SLEEP
: KM_NOSLEEP
);
4352 if (buffer
== NULL
) {
4353 return (DDI_PROP_NO_MEMORY
);
4355 *(caddr_t
*)valuep
= buffer
;
4358 case PROP_LEN_AND_VAL_BUF
:
4360 if (len
> (*lengthp
)) {
4362 return (DDI_PROP_BUF_TOO_SMALL
);
4373 * Call the PROM function to do the copy.
4375 (void) prom_getprop((pnode_t
)DEVI(ch_dip
)->devi_nodeid
,
4378 *lengthp
= len
; /* return the actual length to the caller */
4379 (void) impl_fix_props(dip
, ch_dip
, name
, len
, buffer
);
4380 return (DDI_PROP_FOUND_1275
);
4383 return (DDI_PROP_NOT_FOUND
);
4387 * The ddi_bus_prop_op default bus nexus prop op function.
4389 * Code to search hardware layer (PROM), if it exists,
4390 * on behalf of child, then, if appropriate, ascend and check
4391 * my own software defined properties...
4394 ddi_bus_prop_op(dev_t dev
, dev_info_t
*dip
, dev_info_t
*ch_dip
,
4395 ddi_prop_op_t prop_op
, int mod_flags
,
4396 char *name
, caddr_t valuep
, int *lengthp
)
4400 error
= impl_ddi_bus_prop_op(dev
, dip
, ch_dip
, prop_op
, mod_flags
,
4401 name
, valuep
, lengthp
);
4403 if (error
== DDI_PROP_SUCCESS
|| error
== DDI_PROP_FOUND_1275
||
4404 error
== DDI_PROP_BUF_TOO_SMALL
)
4407 if (error
== DDI_PROP_NO_MEMORY
) {
4408 cmn_err(CE_CONT
, prop_no_mem_msg
, name
);
4409 return (DDI_PROP_NO_MEMORY
);
4413 * Check the 'options' node as a last resort
4415 if ((mod_flags
& DDI_PROP_DONTPASS
) != 0)
4416 return (DDI_PROP_NOT_FOUND
);
4418 if (ch_dip
== ddi_root_node()) {
4420 * As a last resort, when we've reached
4421 * the top and still haven't found the
4422 * property, see if the desired property
4423 * is attached to the options node.
4425 * The options dip is attached right after boot.
4427 ASSERT(options_dip
!= NULL
);
4429 * Force the "don't pass" flag to *just* see
4430 * what the options node has to offer.
4432 return (ddi_prop_search_common(dev
, options_dip
, prop_op
,
4433 mod_flags
|DDI_PROP_DONTPASS
, name
, valuep
,
4434 (uint_t
*)lengthp
));
4438 * Otherwise, continue search with parent's s/w defined properties...
4439 * NOTE: Using `dip' in following call increments the level.
4442 return (ddi_prop_search_common(dev
, dip
, prop_op
, mod_flags
,
4443 name
, valuep
, (uint_t
*)lengthp
));
4447 * External property functions used by other parts of the kernel...
4451 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4455 e_ddi_getlongprop(dev_t dev
, vtype_t type
, char *name
, int flags
,
4456 caddr_t valuep
, int *lengthp
)
4458 _NOTE(ARGUNUSED(type
))
4460 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_ALLOC
;
4463 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4464 return (DDI_PROP_NOT_FOUND
);
4466 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
, name
, valuep
, lengthp
);
4467 ddi_release_devi(devi
);
4472 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf.
4476 e_ddi_getlongprop_buf(dev_t dev
, vtype_t type
, char *name
, int flags
,
4477 caddr_t valuep
, int *lengthp
)
4479 _NOTE(ARGUNUSED(type
))
4481 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_BUF
;
4484 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4485 return (DDI_PROP_NOT_FOUND
);
4487 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
, name
, valuep
, lengthp
);
4488 ddi_release_devi(devi
);
4493 * e_ddi_getprop: See comments for ddi_getprop.
4496 e_ddi_getprop(dev_t dev
, vtype_t type
, char *name
, int flags
, int defvalue
)
4498 _NOTE(ARGUNUSED(type
))
4500 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_BUF
;
4501 int propvalue
= defvalue
;
4502 int proplength
= sizeof (int);
4505 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4508 error
= cdev_prop_op(dev
, devi
, prop_op
,
4509 flags
, name
, (caddr_t
)&propvalue
, &proplength
);
4510 ddi_release_devi(devi
);
4512 if ((error
== DDI_PROP_SUCCESS
) && (proplength
== 0))
4519 * e_ddi_getprop_int64:
4521 * This is a typed interfaces, but predates typed properties. With the
4522 * introduction of typed properties the framework tries to ensure
4523 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4524 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a
4525 * typed interface invokes legacy (non-typed) interfaces:
4526 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the
4527 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support
4528 * this type of lookup as a single operation we invoke the legacy
4529 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4530 * framework ddi_prop_op(9F) implementation is expected to check for
4531 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4532 * (currently TYPE_INT64).
4535 e_ddi_getprop_int64(dev_t dev
, vtype_t type
, char *name
,
4536 int flags
, int64_t defvalue
)
4538 _NOTE(ARGUNUSED(type
))
4540 ddi_prop_op_t prop_op
= PROP_LEN_AND_VAL_BUF
;
4541 int64_t propvalue
= defvalue
;
4542 int proplength
= sizeof (propvalue
);
4545 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4548 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
|
4549 DDI_PROP_CONSUMER_TYPED
, name
, (caddr_t
)&propvalue
, &proplength
);
4550 ddi_release_devi(devi
);
4552 if ((error
== DDI_PROP_SUCCESS
) && (proplength
== 0))
4559 * e_ddi_getproplen: See comments for ddi_getproplen.
4562 e_ddi_getproplen(dev_t dev
, vtype_t type
, char *name
, int flags
, int *lengthp
)
4564 _NOTE(ARGUNUSED(type
))
4566 ddi_prop_op_t prop_op
= PROP_LEN
;
4569 if ((devi
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
4570 return (DDI_PROP_NOT_FOUND
);
4572 error
= cdev_prop_op(dev
, devi
, prop_op
, flags
, name
, NULL
, lengthp
);
4573 ddi_release_devi(devi
);
4578 * Routines to get at elements of the dev_info structure
4582 * ddi_binding_name: Return the driver binding name of the devinfo node
4583 * This is the name the OS used to bind the node to a driver.
4586 ddi_binding_name(dev_info_t
*dip
)
4588 return (DEVI(dip
)->devi_binding_name
);
4592 * ddi_driver_major: Return the major number of the driver that
4593 * the supplied devinfo is bound to. If not yet bound,
4596 * When used by the driver bound to 'devi', this
4597 * function will reliably return the driver major number.
4598 * Other ways of determining the driver major number, such as
4599 * major = ddi_name_to_major(ddi_get_name(devi));
4600 * major = ddi_name_to_major(ddi_binding_name(devi));
4601 * can return a different result as the driver/alias binding
4602 * can change dynamically, and thus should be avoided.
4605 ddi_driver_major(dev_info_t
*devi
)
4607 return (DEVI(devi
)->devi_major
);
4611 * ddi_driver_name: Return the normalized driver name. this is the
4612 * actual driver name
4615 ddi_driver_name(dev_info_t
*devi
)
4619 if ((major
= ddi_driver_major(devi
)) != DDI_MAJOR_T_NONE
)
4620 return (ddi_major_to_name(major
));
4622 return (ddi_node_name(devi
));
4626 * i_ddi_set_binding_name: Set binding name.
4628 * Set the binding name to the given name.
4629 * This routine is for use by the ddi implementation, not by drivers.
4632 i_ddi_set_binding_name(dev_info_t
*dip
, char *name
)
4634 DEVI(dip
)->devi_binding_name
= name
;
4639 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4640 * the implementation has used to bind the node to a driver.
4643 ddi_get_name(dev_info_t
*dip
)
4645 return (DEVI(dip
)->devi_binding_name
);
4649 * ddi_node_name: Return the name property of the devinfo node
4650 * This may differ from ddi_binding_name if the node name
4651 * does not define a binding to a driver (i.e. generic names).
4654 ddi_node_name(dev_info_t
*dip
)
4656 return (DEVI(dip
)->devi_node_name
);
4661 * ddi_get_nodeid: Get nodeid stored in dev_info structure.
4664 ddi_get_nodeid(dev_info_t
*dip
)
4666 return (DEVI(dip
)->devi_nodeid
);
4670 ddi_get_instance(dev_info_t
*dip
)
4672 return (DEVI(dip
)->devi_instance
);
4676 ddi_get_driver(dev_info_t
*dip
)
4678 return (DEVI(dip
)->devi_ops
);
4682 ddi_set_driver(dev_info_t
*dip
, struct dev_ops
*devo
)
4684 DEVI(dip
)->devi_ops
= devo
;
4688 * ddi_set_driver_private/ddi_get_driver_private:
4689 * Get/set device driver private data in devinfo.
4692 ddi_set_driver_private(dev_info_t
*dip
, void *data
)
4694 DEVI(dip
)->devi_driver_data
= data
;
4698 ddi_get_driver_private(dev_info_t
*dip
)
4700 return (DEVI(dip
)->devi_driver_data
);
4704 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4708 ddi_get_parent(dev_info_t
*dip
)
4710 return ((dev_info_t
*)DEVI(dip
)->devi_parent
);
4714 ddi_get_child(dev_info_t
*dip
)
4716 return ((dev_info_t
*)DEVI(dip
)->devi_child
);
4720 ddi_get_next_sibling(dev_info_t
*dip
)
4722 return ((dev_info_t
*)DEVI(dip
)->devi_sibling
);
4726 ddi_get_next(dev_info_t
*dip
)
4728 return ((dev_info_t
*)DEVI(dip
)->devi_next
);
4732 ddi_set_next(dev_info_t
*dip
, dev_info_t
*nextdip
)
4734 DEVI(dip
)->devi_next
= DEVI(nextdip
);
4738 * ddi_root_node: Return root node of devinfo tree
4744 extern dev_info_t
*top_devinfo
;
4746 return (top_devinfo
);
4750 * Miscellaneous functions:
4754 * Implementation specific hooks
4758 ddi_report_dev(dev_info_t
*d
)
4762 (void) ddi_ctlops(d
, d
, DDI_CTLOPS_REPORTDEV
, (void *)0, (void *)0);
4765 * If this devinfo node has cb_ops, it's implicitly accessible from
4766 * userland, so we print its full name together with the instance
4767 * number 'abbreviation' that the driver may use internally.
4769 if (DEVI(d
)->devi_ops
->devo_cb_ops
!= (struct cb_ops
*)0 &&
4770 (b
= kmem_zalloc(MAXPATHLEN
, KM_NOSLEEP
))) {
4771 cmn_err(CE_CONT
, "?%s%d is %s\n",
4772 ddi_driver_name(d
), ddi_get_instance(d
),
4773 ddi_pathname(d
, b
));
4774 kmem_free(b
, MAXPATHLEN
);
4779 * ddi_ctlops() is described in the assembler not to buy a new register
4780 * window when it's called and can reduce cost in climbing the device tree
4781 * without using the tail call optimization.
4784 ddi_dev_regsize(dev_info_t
*dev
, uint_t rnumber
, off_t
*result
)
4788 ret
= ddi_ctlops(dev
, dev
, DDI_CTLOPS_REGSIZE
,
4789 (void *)&rnumber
, (void *)result
);
4791 return (ret
== DDI_SUCCESS
? DDI_SUCCESS
: DDI_FAILURE
);
4795 ddi_dev_nregs(dev_info_t
*dev
, int *result
)
4797 return (ddi_ctlops(dev
, dev
, DDI_CTLOPS_NREGS
, 0, (void *)result
));
4801 ddi_dev_is_sid(dev_info_t
*d
)
4803 return (ddi_ctlops(d
, d
, DDI_CTLOPS_SIDDEV
, (void *)0, (void *)0));
4807 ddi_slaveonly(dev_info_t
*d
)
4809 return (ddi_ctlops(d
, d
, DDI_CTLOPS_SLAVEONLY
, (void *)0, (void *)0));
4813 ddi_dev_affinity(dev_info_t
*a
, dev_info_t
*b
)
4815 return (ddi_ctlops(a
, a
, DDI_CTLOPS_AFFINITY
, (void *)b
, (void *)0));
4819 ddi_streams_driver(dev_info_t
*dip
)
4821 if (i_ddi_devi_attached(dip
) &&
4822 (DEVI(dip
)->devi_ops
->devo_cb_ops
!= NULL
) &&
4823 (DEVI(dip
)->devi_ops
->devo_cb_ops
->cb_str
!= NULL
))
4824 return (DDI_SUCCESS
);
4825 return (DDI_FAILURE
);
4829 * callback free list
4832 static int ncallbacks
;
4833 static int nc_low
= 170;
4834 static int nc_med
= 512;
4835 static int nc_high
= 2048;
4836 static struct ddi_callback
*callbackq
;
4837 static struct ddi_callback
*callbackqfree
;
4840 * set/run callback lists
4843 kstat_named_t cb_asked
;
4844 kstat_named_t cb_new
;
4845 kstat_named_t cb_run
;
4846 kstat_named_t cb_delete
;
4847 kstat_named_t cb_maxreq
;
4848 kstat_named_t cb_maxlist
;
4849 kstat_named_t cb_alloc
;
4850 kstat_named_t cb_runouts
;
4851 kstat_named_t cb_L2
;
4852 kstat_named_t cb_grow
;
4854 {"asked", KSTAT_DATA_UINT32
},
4855 {"new", KSTAT_DATA_UINT32
},
4856 {"run", KSTAT_DATA_UINT32
},
4857 {"delete", KSTAT_DATA_UINT32
},
4858 {"maxreq", KSTAT_DATA_UINT32
},
4859 {"maxlist", KSTAT_DATA_UINT32
},
4860 {"alloc", KSTAT_DATA_UINT32
},
4861 {"runouts", KSTAT_DATA_UINT32
},
4862 {"L2", KSTAT_DATA_UINT32
},
4863 {"grow", KSTAT_DATA_UINT32
},
4866 #define nc_asked cb_asked.value.ui32
4867 #define nc_new cb_new.value.ui32
4868 #define nc_run cb_run.value.ui32
4869 #define nc_delete cb_delete.value.ui32
4870 #define nc_maxreq cb_maxreq.value.ui32
4871 #define nc_maxlist cb_maxlist.value.ui32
4872 #define nc_alloc cb_alloc.value.ui32
4873 #define nc_runouts cb_runouts.value.ui32
4874 #define nc_L2 cb_L2.value.ui32
4875 #define nc_grow cb_grow.value.ui32
4877 static kmutex_t ddi_callback_mutex
;
4880 * callbacks are handled using a L1/L2 cache. The L1 cache
4881 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4882 * we can't get callbacks from the L1 cache [because pageout is doing
4883 * I/O at the time freemem is 0], we allocate callbacks out of the
4884 * L2 cache. The L2 cache is static and depends on the memory size.
4885 * [We might also count the number of devices at probe time and
4886 * allocate one structure per device and adjust for deferred attach]
4889 impl_ddi_callback_init(void)
4895 physmegs
= physmem
>> (20 - PAGESHIFT
);
4896 if (physmegs
< 48) {
4897 ncallbacks
= nc_low
;
4898 } else if (physmegs
< 128) {
4899 ncallbacks
= nc_med
;
4901 ncallbacks
= nc_high
;
4907 callbackq
= kmem_zalloc(
4908 ncallbacks
* sizeof (struct ddi_callback
), KM_SLEEP
);
4909 for (i
= 0; i
< ncallbacks
-1; i
++)
4910 callbackq
[i
].c_nfree
= &callbackq
[i
+1];
4911 callbackqfree
= callbackq
;
4914 if (ksp
= kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED
,
4915 sizeof (cbstats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
)) {
4916 ksp
->ks_data
= (void *) &cbstats
;
4923 callback_insert(int (*funcp
)(caddr_t
), caddr_t arg
, uintptr_t *listid
,
4926 struct ddi_callback
*list
, *marker
, *new;
4927 size_t size
= sizeof (struct ddi_callback
);
4929 list
= marker
= (struct ddi_callback
*)*listid
;
4930 while (list
!= NULL
) {
4931 if (list
->c_call
== funcp
&& list
->c_arg
== arg
) {
4932 list
->c_count
+= count
;
4936 list
= list
->c_nlist
;
4938 new = kmem_alloc(size
, KM_NOSLEEP
);
4940 new = callbackqfree
;
4942 new = kmem_alloc_tryhard(sizeof (struct ddi_callback
),
4943 &size
, KM_NOSLEEP
| KM_PANIC
);
4946 callbackqfree
= new->c_nfree
;
4950 if (marker
!= NULL
) {
4951 marker
->c_nlist
= new;
4953 *listid
= (uintptr_t)new;
4956 new->c_nlist
= NULL
;
4957 new->c_call
= funcp
;
4959 new->c_count
= count
;
4962 if (cbstats
.nc_alloc
> cbstats
.nc_maxlist
)
4963 cbstats
.nc_maxlist
= cbstats
.nc_alloc
;
4967 ddi_set_callback(int (*funcp
)(caddr_t
), caddr_t arg
, uintptr_t *listid
)
4969 mutex_enter(&ddi_callback_mutex
);
4971 if ((cbstats
.nc_asked
- cbstats
.nc_run
) > cbstats
.nc_maxreq
)
4972 cbstats
.nc_maxreq
= (cbstats
.nc_asked
- cbstats
.nc_run
);
4973 (void) callback_insert(funcp
, arg
, listid
, 1);
4974 mutex_exit(&ddi_callback_mutex
);
4978 real_callback_run(void *Queue
)
4980 int (*funcp
)(caddr_t
);
4984 struct ddi_callback
*list
, *marker
;
4985 int check_pending
= 1;
4989 mutex_enter(&ddi_callback_mutex
);
4991 list
= (struct ddi_callback
*)*listid
;
4993 mutex_exit(&ddi_callback_mutex
);
4996 if (check_pending
) {
4998 while (marker
!= NULL
) {
4999 pending
+= marker
->c_count
;
5000 marker
= marker
->c_nlist
;
5004 ASSERT(pending
> 0);
5005 ASSERT(list
->c_count
> 0);
5006 funcp
= list
->c_call
;
5008 count
= list
->c_count
;
5009 *(uintptr_t *)Queue
= (uintptr_t)list
->c_nlist
;
5010 if (list
>= &callbackq
[0] &&
5011 list
<= &callbackq
[ncallbacks
-1]) {
5012 list
->c_nfree
= callbackqfree
;
5013 callbackqfree
= list
;
5015 kmem_free(list
, list
->c_size
);
5017 cbstats
.nc_delete
++;
5019 mutex_exit(&ddi_callback_mutex
);
5022 if ((rval
= (*funcp
)(arg
)) == 0) {
5024 mutex_enter(&ddi_callback_mutex
);
5025 (void) callback_insert(funcp
, arg
, listid
,
5027 cbstats
.nc_runouts
++;
5030 mutex_enter(&ddi_callback_mutex
);
5033 mutex_exit(&ddi_callback_mutex
);
5034 } while (rval
!= 0 && (--count
> 0));
5035 } while (pending
> 0);
5039 ddi_run_callback(uintptr_t *listid
)
5041 softcall(real_callback_run
, listid
);
5046 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5050 * Solaris DDI specific (Solaris DDI)
5053 * func: the callback function
5055 * The callback function will be invoked. The function is invoked
5056 * in kernel context if the argument level passed is the zero.
5057 * Otherwise it's invoked in interrupt context at the specified
5060 * arg: the argument passed to the callback function
5062 * interval: interval time
5064 * level : callback interrupt level
5066 * If the value is the zero, the callback function is invoked
5067 * in kernel context. If the value is more than the zero, but
5068 * less than or equal to ten, the callback function is invoked in
5069 * interrupt context at the specified interrupt level, which may
5070 * be used for real time applications.
5072 * This value must be in range of 0-10, which can be a numeric
5073 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5076 * ddi_periodic_add(9F) schedules the specified function to be
5077 * periodically invoked in the interval time.
5079 * As well as timeout(9F), the exact time interval over which the function
5080 * takes effect cannot be guaranteed, but the value given is a close
5083 * Drivers waiting on behalf of processes with real-time constraints must
5084 * pass non-zero value with the level argument to ddi_periodic_add(9F).
5087 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5088 * which must be used for ddi_periodic_delete(9F) to specify the request.
5091 * ddi_periodic_add(9F) can be called in user or kernel context, but
5092 * it cannot be called in interrupt context, which is different from
5096 ddi_periodic_add(void (*func
)(void *), void *arg
, hrtime_t interval
, int level
)
5099 * Sanity check of the argument level.
5101 if (level
< DDI_IPL_0
|| level
> DDI_IPL_10
)
5103 "ddi_periodic_add: invalid interrupt level (%d).", level
);
5106 * Sanity check of the context. ddi_periodic_add() cannot be
5107 * called in either interrupt context or high interrupt context.
5109 if (servicing_interrupt())
5111 "ddi_periodic_add: called in (high) interrupt context.");
5113 return ((ddi_periodic_t
)i_timeout(func
, arg
, interval
, level
));
5118 * ddi_periodic_delete(ddi_periodic_t req)
5121 * Solaris DDI specific (Solaris DDI)
5124 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5128 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5129 * previously requested.
5131 * ddi_periodic_delete(9F) will not return until the pending request
5132 * is canceled or executed.
5134 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5135 * timeout which is either running on another CPU, or has already
5136 * completed causes no problems. However, unlike untimeout(9F), there is
5137 * no restrictions on the lock which might be held across the call to
5138 * ddi_periodic_delete(9F).
5140 * Drivers should be structured with the understanding that the arrival of
5141 * both an interrupt and a timeout for that interrupt can occasionally
5142 * occur, in either order.
5145 * ddi_periodic_delete(9F) can be called in user or kernel context, but
5146 * it cannot be called in interrupt context, which is different from
5150 ddi_periodic_delete(ddi_periodic_t req
)
5153 * Sanity check of the context. ddi_periodic_delete() cannot be
5154 * called in either interrupt context or high interrupt context.
5156 if (servicing_interrupt())
5158 "ddi_periodic_delete: called in (high) interrupt context.");
5160 i_untimeout((timeout_t
)req
);
5164 nodevinfo(dev_t dev
, int otyp
)
5166 _NOTE(ARGUNUSED(dev
, otyp
))
5167 return ((dev_info_t
*)0);
5171 * A driver should support its own getinfo(9E) entry point. This function
5172 * is provided as a convenience for ON drivers that don't expect their
5173 * getinfo(9E) entry point to be called. A driver that uses this must not
5174 * call ddi_create_minor_node.
5177 ddi_no_info(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
, void **result
)
5179 _NOTE(ARGUNUSED(dip
, infocmd
, arg
, result
))
5180 return (DDI_FAILURE
);
5184 * A driver should support its own getinfo(9E) entry point. This function
5185 * is provided as a convenience for ON drivers that where the minor number
5186 * is the instance. Drivers that do not have 1:1 mapping must implement
5187 * their own getinfo(9E) function.
5190 ddi_getinfo_1to1(dev_info_t
*dip
, ddi_info_cmd_t infocmd
,
5191 void *arg
, void **result
)
5193 _NOTE(ARGUNUSED(dip
))
5196 if (infocmd
!= DDI_INFO_DEVT2INSTANCE
)
5197 return (DDI_FAILURE
);
5199 instance
= getminor((dev_t
)(uintptr_t)arg
);
5200 *result
= (void *)(uintptr_t)instance
;
5201 return (DDI_SUCCESS
);
5205 ddifail(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
5207 _NOTE(ARGUNUSED(devi
, cmd
))
5208 return (DDI_FAILURE
);
5212 ddi_no_dma_map(dev_info_t
*dip
, dev_info_t
*rdip
,
5213 struct ddi_dma_req
*dmareqp
, ddi_dma_handle_t
*handlep
)
5215 _NOTE(ARGUNUSED(dip
, rdip
, dmareqp
, handlep
))
5216 return (DDI_DMA_NOMAPPING
);
5220 ddi_no_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
5221 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
5223 _NOTE(ARGUNUSED(dip
, rdip
, attr
, waitfp
, arg
, handlep
))
5224 return (DDI_DMA_BADATTR
);
5228 ddi_no_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
5229 ddi_dma_handle_t handle
)
5231 _NOTE(ARGUNUSED(dip
, rdip
, handle
))
5232 return (DDI_FAILURE
);
5236 ddi_no_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
5237 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
5238 ddi_dma_cookie_t
*cp
, uint_t
*ccountp
)
5240 _NOTE(ARGUNUSED(dip
, rdip
, handle
, dmareq
, cp
, ccountp
))
5241 return (DDI_DMA_NOMAPPING
);
5245 ddi_no_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
5246 ddi_dma_handle_t handle
)
5248 _NOTE(ARGUNUSED(dip
, rdip
, handle
))
5249 return (DDI_FAILURE
);
5253 ddi_no_dma_flush(dev_info_t
*dip
, dev_info_t
*rdip
,
5254 ddi_dma_handle_t handle
, off_t off
, size_t len
,
5257 _NOTE(ARGUNUSED(dip
, rdip
, handle
, off
, len
, cache_flags
))
5258 return (DDI_FAILURE
);
5262 ddi_no_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
5263 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
,
5264 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
5266 _NOTE(ARGUNUSED(dip
, rdip
, handle
, win
, offp
, lenp
, cookiep
, ccountp
))
5267 return (DDI_FAILURE
);
5271 ddi_no_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
5272 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
5273 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t flags
)
5275 _NOTE(ARGUNUSED(dip
, rdip
, handle
, request
, offp
, lenp
, objp
, flags
))
5276 return (DDI_FAILURE
);
5284 nochpoll(dev_t dev
, short events
, int anyyet
, short *reventsp
,
5285 struct pollhead
**pollhdrp
)
5287 _NOTE(ARGUNUSED(dev
, events
, anyyet
, reventsp
, pollhdrp
))
5300 return ((clock_t)lbolt_hybrid());
5304 ddi_get_lbolt64(void)
5306 return (lbolt_hybrid());
5314 if ((now
= gethrestime_sec()) == 0) {
5316 mutex_enter(&tod_lock
);
5318 mutex_exit(&tod_lock
);
5328 return (ttoproc(curthread
)->p_pid
);
5332 ddi_get_kt_did(void)
5334 return (curthread
->t_did
);
5338 * This function returns B_TRUE if the caller can reasonably expect that a call
5339 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5340 * by user-level signal. If it returns B_FALSE, then the caller should use
5341 * other means to make certain that the wait will not hang "forever."
5343 * It does not check the signal mask, nor for reception of any particular
5346 * Currently, a thread can receive a signal if it's not a kernel thread and it
5347 * is not in the middle of exit(2) tear-down. Threads that are in that
5348 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5349 * cv_timedwait, and qwait_sig to qwait.
5352 ddi_can_receive_sig(void)
5356 if (curthread
->t_proc_flag
& TP_LWPEXIT
)
5358 if ((pp
= ttoproc(curthread
)) == NULL
)
5360 return (pp
->p_as
!= &kas
);
5364 * Swap bytes in 16-bit [half-]words
5367 swab(void *src
, void *dst
, size_t nbytes
)
5369 uchar_t
*pf
= (uchar_t
*)src
;
5370 uchar_t
*pt
= (uchar_t
*)dst
;
5374 nshorts
= nbytes
>> 1;
5376 while (--nshorts
>= 0) {
5384 ddi_append_minor_node(dev_info_t
*ddip
, struct ddi_minor_data
*dmdp
)
5387 struct ddi_minor_data
*dp
;
5389 ndi_devi_enter(ddip
, &circ
);
5390 if ((dp
= DEVI(ddip
)->devi_minor
) == (struct ddi_minor_data
*)NULL
) {
5391 DEVI(ddip
)->devi_minor
= dmdp
;
5393 while (dp
->next
!= (struct ddi_minor_data
*)NULL
)
5397 ndi_devi_exit(ddip
, circ
);
5401 * Part of the obsolete SunCluster DDI Hooks.
5402 * Keep for binary compatibility
5405 ddi_getiminor(dev_t dev
)
5407 return (getminor(dev
));
5411 i_log_devfs_minor_create(dev_info_t
*dip
, char *minor_name
)
5416 char *pathname
, *class_name
;
5417 sysevent_t
*ev
= NULL
;
5419 sysevent_value_t se_val
;
5420 sysevent_attr_list_t
*ev_attr_list
= NULL
;
5422 /* determine interrupt context */
5423 se_flag
= (servicing_interrupt()) ? SE_NOSLEEP
: SE_SLEEP
;
5424 kmem_flag
= (se_flag
== SE_SLEEP
) ? KM_SLEEP
: KM_NOSLEEP
;
5426 i_ddi_di_cache_invalidate();
5429 if ((se_flag
== SE_NOSLEEP
) && sunddi_debug
) {
5430 cmn_err(CE_CONT
, "ddi_create_minor_node: called from "
5431 "interrupt level by driver %s",
5432 ddi_driver_name(dip
));
5436 ev
= sysevent_alloc(EC_DEVFS
, ESC_DEVFS_MINOR_CREATE
, EP_DDI
, se_flag
);
5441 pathname
= kmem_alloc(MAXPATHLEN
, kmem_flag
);
5442 if (pathname
== NULL
) {
5447 (void) ddi_pathname(dip
, pathname
);
5448 ASSERT(strlen(pathname
));
5449 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5450 se_val
.value
.sv_string
= pathname
;
5451 if (sysevent_add_attr(&ev_attr_list
, DEVFS_PATHNAME
,
5452 &se_val
, se_flag
) != 0) {
5453 kmem_free(pathname
, MAXPATHLEN
);
5457 kmem_free(pathname
, MAXPATHLEN
);
5459 /* add the device class attribute */
5460 if ((class_name
= i_ddi_devi_class(dip
)) != NULL
) {
5461 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5462 se_val
.value
.sv_string
= class_name
;
5463 if (sysevent_add_attr(&ev_attr_list
,
5464 DEVFS_DEVI_CLASS
, &se_val
, SE_SLEEP
) != 0) {
5465 sysevent_free_attr(ev_attr_list
);
5471 * allow for NULL minor names
5473 if (minor_name
!= NULL
) {
5474 se_val
.value
.sv_string
= minor_name
;
5475 if (sysevent_add_attr(&ev_attr_list
, DEVFS_MINOR_NAME
,
5476 &se_val
, se_flag
) != 0) {
5477 sysevent_free_attr(ev_attr_list
);
5483 if (sysevent_attach_attributes(ev
, ev_attr_list
) != 0) {
5484 sysevent_free_attr(ev_attr_list
);
5489 if ((se_err
= log_sysevent(ev
, se_flag
, &eid
)) != 0) {
5490 if (se_err
== SE_NO_TRANSPORT
) {
5491 cmn_err(CE_WARN
, "/devices or /dev may not be current "
5492 "for driver %s (%s). Run devfsadm -i %s",
5493 ddi_driver_name(dip
), "syseventd not responding",
5494 ddi_driver_name(dip
));
5502 return (DDI_SUCCESS
);
5504 cmn_err(CE_WARN
, "/devices or /dev may not be current "
5505 "for driver %s. Run devfsadm -i %s",
5506 ddi_driver_name(dip
), ddi_driver_name(dip
));
5507 return (DDI_SUCCESS
);
5511 * failing to remove a minor node is not of interest
5512 * therefore we do not generate an error message
5515 i_log_devfs_minor_remove(dev_info_t
*dip
, char *minor_name
)
5517 char *pathname
, *class_name
;
5520 sysevent_value_t se_val
;
5521 sysevent_attr_list_t
*ev_attr_list
= NULL
;
5524 * only log ddi_remove_minor_node() calls outside the scope
5525 * of attach/detach reconfigurations and when the dip is
5526 * still initialized.
5528 if (DEVI_IS_ATTACHING(dip
) || DEVI_IS_DETACHING(dip
) ||
5529 (i_ddi_node_state(dip
) < DS_INITIALIZED
)) {
5530 return (DDI_SUCCESS
);
5533 i_ddi_di_cache_invalidate();
5535 ev
= sysevent_alloc(EC_DEVFS
, ESC_DEVFS_MINOR_REMOVE
, EP_DDI
, SE_SLEEP
);
5537 return (DDI_SUCCESS
);
5540 pathname
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
5541 if (pathname
== NULL
) {
5543 return (DDI_SUCCESS
);
5546 (void) ddi_pathname(dip
, pathname
);
5547 ASSERT(strlen(pathname
));
5548 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5549 se_val
.value
.sv_string
= pathname
;
5550 if (sysevent_add_attr(&ev_attr_list
, DEVFS_PATHNAME
,
5551 &se_val
, SE_SLEEP
) != 0) {
5552 kmem_free(pathname
, MAXPATHLEN
);
5554 return (DDI_SUCCESS
);
5557 kmem_free(pathname
, MAXPATHLEN
);
5560 * allow for NULL minor names
5562 if (minor_name
!= NULL
) {
5563 se_val
.value
.sv_string
= minor_name
;
5564 if (sysevent_add_attr(&ev_attr_list
, DEVFS_MINOR_NAME
,
5565 &se_val
, SE_SLEEP
) != 0) {
5566 sysevent_free_attr(ev_attr_list
);
5571 if ((class_name
= i_ddi_devi_class(dip
)) != NULL
) {
5572 /* add the device class, driver name and instance attributes */
5574 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5575 se_val
.value
.sv_string
= class_name
;
5576 if (sysevent_add_attr(&ev_attr_list
,
5577 DEVFS_DEVI_CLASS
, &se_val
, SE_SLEEP
) != 0) {
5578 sysevent_free_attr(ev_attr_list
);
5582 se_val
.value_type
= SE_DATA_TYPE_STRING
;
5583 se_val
.value
.sv_string
= (char *)ddi_driver_name(dip
);
5584 if (sysevent_add_attr(&ev_attr_list
,
5585 DEVFS_DRIVER_NAME
, &se_val
, SE_SLEEP
) != 0) {
5586 sysevent_free_attr(ev_attr_list
);
5590 se_val
.value_type
= SE_DATA_TYPE_INT32
;
5591 se_val
.value
.sv_int32
= ddi_get_instance(dip
);
5592 if (sysevent_add_attr(&ev_attr_list
,
5593 DEVFS_INSTANCE
, &se_val
, SE_SLEEP
) != 0) {
5594 sysevent_free_attr(ev_attr_list
);
5600 if (sysevent_attach_attributes(ev
, ev_attr_list
) != 0) {
5601 sysevent_free_attr(ev_attr_list
);
5603 (void) log_sysevent(ev
, SE_SLEEP
, &eid
);
5607 return (DDI_SUCCESS
);
5611 * Derive the device class of the node.
5612 * Device class names aren't defined yet. Until this is done we use
5613 * devfs event subclass names as device class names.
5616 derive_devi_class(dev_info_t
*dip
, char *node_type
, int flag
)
5618 int rv
= DDI_SUCCESS
;
5620 if (i_ddi_devi_class(dip
) == NULL
) {
5621 if (strncmp(node_type
, DDI_NT_BLOCK
,
5622 sizeof (DDI_NT_BLOCK
) - 1) == 0 &&
5623 (node_type
[sizeof (DDI_NT_BLOCK
) - 1] == '\0' ||
5624 node_type
[sizeof (DDI_NT_BLOCK
) - 1] == ':') &&
5625 strcmp(node_type
, DDI_NT_FD
) != 0) {
5627 rv
= i_ddi_set_devi_class(dip
, ESC_DISK
, flag
);
5629 } else if (strncmp(node_type
, DDI_NT_NET
,
5630 sizeof (DDI_NT_NET
) - 1) == 0 &&
5631 (node_type
[sizeof (DDI_NT_NET
) - 1] == '\0' ||
5632 node_type
[sizeof (DDI_NT_NET
) - 1] == ':')) {
5634 rv
= i_ddi_set_devi_class(dip
, ESC_NETWORK
, flag
);
5636 } else if (strncmp(node_type
, DDI_NT_PRINTER
,
5637 sizeof (DDI_NT_PRINTER
) - 1) == 0 &&
5638 (node_type
[sizeof (DDI_NT_PRINTER
) - 1] == '\0' ||
5639 node_type
[sizeof (DDI_NT_PRINTER
) - 1] == ':')) {
5641 rv
= i_ddi_set_devi_class(dip
, ESC_PRINTER
, flag
);
5643 } else if (strncmp(node_type
, DDI_PSEUDO
,
5644 sizeof (DDI_PSEUDO
) -1) == 0 &&
5645 (strncmp(ESC_LOFI
, ddi_node_name(dip
),
5646 sizeof (ESC_LOFI
) -1) == 0)) {
5647 rv
= i_ddi_set_devi_class(dip
, ESC_LOFI
, flag
);
5655 * Check compliance with PSARC 2003/375:
5657 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5658 * exceed IFNAMSIZ (16) characters in length.
5661 verify_name(char *name
)
5663 size_t len
= strlen(name
);
5666 if (len
== 0 || len
> IFNAMSIZ
)
5669 for (cp
= name
; *cp
!= '\0'; cp
++) {
5670 if (!isalnum(*cp
) && *cp
!= '_')
5678 * ddi_create_minor_common: Create a ddi_minor_data structure and
5679 * attach it to the given devinfo node.
5683 ddi_create_minor_common(dev_info_t
*dip
, char *name
, int spec_type
,
5684 minor_t minor_num
, char *node_type
, int flag
, ddi_minor_type mtype
,
5685 const char *read_priv
, const char *write_priv
, mode_t priv_mode
)
5687 struct ddi_minor_data
*dmdp
;
5690 if (spec_type
!= S_IFCHR
&& spec_type
!= S_IFBLK
)
5691 return (DDI_FAILURE
);
5694 return (DDI_FAILURE
);
5697 * Log a message if the minor number the driver is creating
5698 * is not expressible on the on-disk filesystem (currently
5699 * this is limited to 18 bits both by UFS). The device can
5700 * be opened via devfs, but not by device special files created
5703 if (minor_num
> L_MAXMIN32
) {
5705 "%s%d:%s minor 0x%x too big for 32-bit applications",
5706 ddi_driver_name(dip
), ddi_get_instance(dip
),
5708 return (DDI_FAILURE
);
5711 /* dip must be bound and attached */
5712 major
= ddi_driver_major(dip
);
5713 ASSERT(major
!= DDI_MAJOR_T_NONE
);
5716 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5718 if (node_type
== NULL
) {
5719 node_type
= DDI_PSEUDO
;
5720 NDI_CONFIG_DEBUG((CE_NOTE
, "!illegal node_type NULL for %s%d "
5721 " minor node %s; default to DDI_PSEUDO",
5722 ddi_driver_name(dip
), ddi_get_instance(dip
), name
));
5726 * If the driver is a network driver, ensure that the name falls within
5727 * the interface naming constraints specified by PSARC/2003/375.
5729 if (strcmp(node_type
, DDI_NT_NET
) == 0) {
5730 if (!verify_name(name
))
5731 return (DDI_FAILURE
);
5733 if (mtype
== DDM_MINOR
) {
5734 struct devnames
*dnp
= &devnamesp
[major
];
5736 /* Mark driver as a network driver */
5737 LOCK_DEV_OPS(&dnp
->dn_lock
);
5738 dnp
->dn_flags
|= DN_NETWORK_DRIVER
;
5741 * If this minor node is created during the device
5742 * attachment, this is a physical network device.
5743 * Mark the driver as a physical network driver.
5745 if (DEVI_IS_ATTACHING(dip
))
5746 dnp
->dn_flags
|= DN_NETWORK_PHYSDRIVER
;
5747 UNLOCK_DEV_OPS(&dnp
->dn_lock
);
5751 if (mtype
== DDM_MINOR
) {
5752 if (derive_devi_class(dip
, node_type
, KM_NOSLEEP
) !=
5754 return (DDI_FAILURE
);
5758 * Take care of minor number information for the node.
5761 if ((dmdp
= kmem_zalloc(sizeof (struct ddi_minor_data
),
5762 KM_NOSLEEP
)) == NULL
) {
5763 return (DDI_FAILURE
);
5765 if ((dmdp
->ddm_name
= i_ddi_strdup(name
, KM_NOSLEEP
)) == NULL
) {
5766 kmem_free(dmdp
, sizeof (struct ddi_minor_data
));
5767 return (DDI_FAILURE
);
5770 dmdp
->ddm_dev
= makedevice(major
, minor_num
);
5771 dmdp
->ddm_spec_type
= spec_type
;
5772 dmdp
->ddm_node_type
= node_type
;
5774 if (flag
& CLONE_DEV
) {
5775 dmdp
->type
= DDM_ALIAS
;
5776 dmdp
->ddm_dev
= makedevice(ddi_driver_major(clone_dip
), major
);
5778 if (flag
& PRIVONLY_DEV
) {
5779 dmdp
->ddm_flags
|= DM_NO_FSPERM
;
5781 if (read_priv
|| write_priv
) {
5782 dmdp
->ddm_node_priv
=
5783 devpolicy_priv_by_name(read_priv
, write_priv
);
5785 dmdp
->ddm_priv_mode
= priv_mode
;
5787 ddi_append_minor_node(dip
, dmdp
);
5790 * only log ddi_create_minor_node() calls which occur
5791 * outside the scope of attach(9e)/detach(9e) reconfigurations
5793 if (!(DEVI_IS_ATTACHING(dip
) || DEVI_IS_DETACHING(dip
)) &&
5794 mtype
!= DDM_INTERNAL_PATH
) {
5795 (void) i_log_devfs_minor_create(dip
, name
);
5799 * Check if any dacf rules match the creation of this minor node
5801 dacfc_match_create_minor(name
, node_type
, dip
, dmdp
, flag
);
5802 return (DDI_SUCCESS
);
5806 ddi_create_minor_node(dev_info_t
*dip
, char *name
, int spec_type
,
5807 minor_t minor_num
, char *node_type
, int flag
)
5809 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5810 node_type
, flag
, DDM_MINOR
, NULL
, NULL
, 0));
5814 ddi_create_priv_minor_node(dev_info_t
*dip
, char *name
, int spec_type
,
5815 minor_t minor_num
, char *node_type
, int flag
,
5816 const char *rdpriv
, const char *wrpriv
, mode_t priv_mode
)
5818 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5819 node_type
, flag
, DDM_MINOR
, rdpriv
, wrpriv
, priv_mode
));
5823 ddi_create_default_minor_node(dev_info_t
*dip
, char *name
, int spec_type
,
5824 minor_t minor_num
, char *node_type
, int flag
)
5826 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5827 node_type
, flag
, DDM_DEFAULT
, NULL
, NULL
, 0));
5831 * Internal (non-ddi) routine for drivers to export names known
5832 * to the kernel (especially ddi_pathname_to_dev_t and friends)
5833 * but not exported externally to /dev
5836 ddi_create_internal_pathname(dev_info_t
*dip
, char *name
, int spec_type
,
5839 return (ddi_create_minor_common(dip
, name
, spec_type
, minor_num
,
5840 "internal", 0, DDM_INTERNAL_PATH
, NULL
, NULL
, 0));
5844 ddi_remove_minor_node(dev_info_t
*dip
, char *name
)
5847 struct ddi_minor_data
*dmdp
, *dmdp1
;
5848 struct ddi_minor_data
**dmdp_prev
;
5850 ndi_devi_enter(dip
, &circ
);
5851 dmdp_prev
= &DEVI(dip
)->devi_minor
;
5852 dmdp
= DEVI(dip
)->devi_minor
;
5853 while (dmdp
!= NULL
) {
5855 if ((name
== NULL
|| (dmdp
->ddm_name
!= NULL
&&
5856 strcmp(name
, dmdp
->ddm_name
) == 0))) {
5857 if (dmdp
->ddm_name
!= NULL
) {
5858 if (dmdp
->type
!= DDM_INTERNAL_PATH
)
5859 (void) i_log_devfs_minor_remove(dip
,
5861 kmem_free(dmdp
->ddm_name
,
5862 strlen(dmdp
->ddm_name
) + 1);
5865 * Release device privilege, if any.
5866 * Release dacf client data associated with this minor
5867 * node by storing NULL.
5869 if (dmdp
->ddm_node_priv
)
5870 dpfree(dmdp
->ddm_node_priv
);
5871 dacf_store_info((dacf_infohdl_t
)dmdp
, NULL
);
5872 kmem_free(dmdp
, sizeof (struct ddi_minor_data
));
5875 * OK, we found it, so get out now -- if we drive on,
5876 * we will strcmp against garbage. See 1139209.
5881 dmdp_prev
= &dmdp
->next
;
5885 ndi_devi_exit(dip
, circ
);
5892 return (panicstr
!= NULL
);
5897 * Find first bit set in a mask (returned counting from 1 up)
5907 * Find last bit set. Take mask and clear
5908 * all but the most significant bit, and
5909 * then let ffs do the rest of the work.
5911 * Algorithm courtesy of Steve Chessin.
5920 if ((nx
= (mask
& (mask
- 1))) == 0)
5928 * The ddi_soft_state_* routines comprise generic storage management utilities
5929 * for driver soft state structures (in "the old days," this was done with
5930 * statically sized array - big systems and dynamic loading and unloading
5931 * make heap allocation more attractive).
5935 * Allocate a set of pointers to 'n_items' objects of size 'size'
5936 * bytes. Each pointer is initialized to nil.
5938 * The 'size' and 'n_items' values are stashed in the opaque
5939 * handle returned to the caller.
5941 * This implementation interprets 'set of pointers' to mean 'array
5942 * of pointers' but note that nothing in the interface definition
5943 * precludes an implementation that uses, for example, a linked list.
5944 * However there should be a small efficiency gain from using an array
5947 * NOTE As an optimization, we make our growable array allocations in
5948 * powers of two (bytes), since that's how much kmem_alloc (currently)
5949 * gives us anyway. It should save us some free/realloc's ..
5951 * As a further optimization, we make the growable array start out
5952 * with MIN_N_ITEMS in it.
5955 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */
5958 ddi_soft_state_init(void **state_p
, size_t size
, size_t n_items
)
5960 i_ddi_soft_state
*ss
;
5962 if (state_p
== NULL
|| size
== 0)
5965 ss
= kmem_zalloc(sizeof (*ss
), KM_SLEEP
);
5966 mutex_init(&ss
->lock
, NULL
, MUTEX_DRIVER
, NULL
);
5969 if (n_items
< MIN_N_ITEMS
)
5970 ss
->n_items
= MIN_N_ITEMS
;
5974 if ((bitlog
= ddi_fls(n_items
)) == ddi_ffs(n_items
))
5976 ss
->n_items
= 1 << bitlog
;
5979 ASSERT(ss
->n_items
>= n_items
);
5981 ss
->array
= kmem_zalloc(ss
->n_items
* sizeof (void *), KM_SLEEP
);
5988 * Allocate a state structure of size 'size' to be associated
5991 * In this implementation, the array is extended to
5992 * allow the requested offset, if needed.
5995 ddi_soft_state_zalloc(void *state
, int item
)
5997 i_ddi_soft_state
*ss
= (i_ddi_soft_state
*)state
;
6001 if ((state
== NULL
) || (item
< 0))
6002 return (DDI_FAILURE
);
6004 mutex_enter(&ss
->lock
);
6005 if (ss
->size
== 0) {
6006 mutex_exit(&ss
->lock
);
6007 cmn_err(CE_WARN
, "ddi_soft_state_zalloc: bad handle: %s",
6008 mod_containing_pc(caller()));
6009 return (DDI_FAILURE
);
6012 array
= ss
->array
; /* NULL if ss->n_items == 0 */
6013 ASSERT(ss
->n_items
!= 0 && array
!= NULL
);
6016 * refuse to tread on an existing element
6018 if (item
< ss
->n_items
&& array
[item
] != NULL
) {
6019 mutex_exit(&ss
->lock
);
6020 return (DDI_FAILURE
);
6024 * Allocate a new element to plug in
6026 new_element
= kmem_zalloc(ss
->size
, KM_SLEEP
);
6029 * Check if the array is big enough, if not, grow it.
6031 if (item
>= ss
->n_items
) {
6034 struct i_ddi_soft_state
*dirty
;
6037 * Allocate a new array of the right length, copy
6038 * all the old pointers to the new array, then
6039 * if it exists at all, put the old array on the
6042 * Note that we can't kmem_free() the old array.
6044 * Why -- well the 'get' operation is 'mutex-free', so we
6045 * can't easily catch a suspended thread that is just about
6046 * to dereference the array we just grew out of. So we
6047 * cons up a header and put it on a list of 'dirty'
6048 * pointer arrays. (Dirty in the sense that there may
6049 * be suspended threads somewhere that are in the middle
6050 * of referencing them). Fortunately, we -can- garbage
6051 * collect it all at ddi_soft_state_fini time.
6053 new_n_items
= ss
->n_items
;
6054 while (new_n_items
< (1 + item
))
6055 new_n_items
<<= 1; /* double array size .. */
6057 ASSERT(new_n_items
>= (1 + item
)); /* sanity check! */
6059 new_array
= kmem_zalloc(new_n_items
* sizeof (void *),
6062 * Copy the pointers into the new array
6064 bcopy(array
, new_array
, ss
->n_items
* sizeof (void *));
6067 * Save the old array on the dirty list
6069 dirty
= kmem_zalloc(sizeof (*dirty
), KM_SLEEP
);
6070 dirty
->array
= ss
->array
;
6071 dirty
->n_items
= ss
->n_items
;
6072 dirty
->next
= ss
->next
;
6075 ss
->array
= (array
= new_array
);
6076 ss
->n_items
= new_n_items
;
6079 ASSERT(array
!= NULL
&& item
< ss
->n_items
&& array
[item
] == NULL
);
6081 array
[item
] = new_element
;
6083 mutex_exit(&ss
->lock
);
6084 return (DDI_SUCCESS
);
6088 * Fetch a pointer to the allocated soft state structure.
6090 * This is designed to be cheap.
6092 * There's an argument that there should be more checking for
6093 * nil pointers and out of bounds on the array.. but we do a lot
6094 * of that in the alloc/free routines.
6096 * An array has the convenience that we don't need to lock read-access
6097 * to it c.f. a linked list. However our "expanding array" strategy
6098 * means that we should hold a readers lock on the i_ddi_soft_state
6101 * However, from a performance viewpoint, we need to do it without
6102 * any locks at all -- this also makes it a leaf routine. The algorithm
6103 * is 'lock-free' because we only discard the pointer arrays at
6104 * ddi_soft_state_fini() time.
6107 ddi_get_soft_state(void *state
, int item
)
6109 i_ddi_soft_state
*ss
= (i_ddi_soft_state
*)state
;
6111 ASSERT((ss
!= NULL
) && (item
>= 0));
6113 if (item
< ss
->n_items
&& ss
->array
!= NULL
)
6114 return (ss
->array
[item
]);
6119 * Free the state structure corresponding to 'item.' Freeing an
6120 * element that has either gone or was never allocated is not
6121 * considered an error. Note that we free the state structure, but
6122 * we don't shrink our pointer array, or discard 'dirty' arrays,
6123 * since even a few pointers don't really waste too much memory.
6125 * Passing an item number that is out of bounds, or a null pointer will
6126 * provoke an error message.
6129 ddi_soft_state_free(void *state
, int item
)
6131 i_ddi_soft_state
*ss
= (i_ddi_soft_state
*)state
;
6134 static char msg
[] = "ddi_soft_state_free:";
6137 cmn_err(CE_WARN
, "%s null handle: %s",
6138 msg
, mod_containing_pc(caller()));
6144 mutex_enter(&ss
->lock
);
6146 if ((array
= ss
->array
) == NULL
|| ss
->size
== 0) {
6147 cmn_err(CE_WARN
, "%s bad handle: %s",
6148 msg
, mod_containing_pc(caller()));
6149 } else if (item
< 0 || item
>= ss
->n_items
) {
6150 cmn_err(CE_WARN
, "%s item %d not in range [0..%lu]: %s",
6151 msg
, item
, ss
->n_items
- 1, mod_containing_pc(caller()));
6152 } else if (array
[item
] != NULL
) {
6153 element
= array
[item
];
6157 mutex_exit(&ss
->lock
);
6160 kmem_free(element
, ss
->size
);
6164 * Free the entire set of pointers, and any
6165 * soft state structures contained therein.
6167 * Note that we don't grab the ss->lock mutex, even though
6168 * we're inspecting the various fields of the data structure.
6170 * There is an implicit assumption that this routine will
6171 * never run concurrently with any of the above on this
6172 * particular state structure i.e. by the time the driver
6173 * calls this routine, there should be no other threads
6174 * running in the driver.
6177 ddi_soft_state_fini(void **state_p
)
6179 i_ddi_soft_state
*ss
, *dirty
;
6181 static char msg
[] = "ddi_soft_state_fini:";
6183 if (state_p
== NULL
||
6184 (ss
= (i_ddi_soft_state
*)(*state_p
)) == NULL
) {
6185 cmn_err(CE_WARN
, "%s null handle: %s",
6186 msg
, mod_containing_pc(caller()));
6190 if (ss
->size
== 0) {
6191 cmn_err(CE_WARN
, "%s bad handle: %s",
6192 msg
, mod_containing_pc(caller()));
6196 if (ss
->n_items
> 0) {
6197 for (item
= 0; item
< ss
->n_items
; item
++)
6198 ddi_soft_state_free(ss
, item
);
6199 kmem_free(ss
->array
, ss
->n_items
* sizeof (void *));
6203 * Now delete any dirty arrays from previous 'grow' operations
6205 for (dirty
= ss
->next
; dirty
; dirty
= ss
->next
) {
6206 ss
->next
= dirty
->next
;
6207 kmem_free(dirty
->array
, dirty
->n_items
* sizeof (void *));
6208 kmem_free(dirty
, sizeof (*dirty
));
6211 mutex_destroy(&ss
->lock
);
6212 kmem_free(ss
, sizeof (*ss
));
6217 #define SS_N_ITEMS_PER_HASH 16
6218 #define SS_MIN_HASH_SZ 16
6219 #define SS_MAX_HASH_SZ 4096
6222 ddi_soft_state_bystr_init(ddi_soft_state_bystr
**state_p
, size_t size
,
6225 i_ddi_soft_state_bystr
*sss
;
6228 ASSERT(state_p
&& size
&& n_items
);
6229 if ((state_p
== NULL
) || (size
== 0) || (n_items
== 0))
6232 /* current implementation is based on hash, convert n_items to hash */
6233 hash_sz
= n_items
/ SS_N_ITEMS_PER_HASH
;
6234 if (hash_sz
< SS_MIN_HASH_SZ
)
6235 hash_sz
= SS_MIN_HASH_SZ
;
6236 else if (hash_sz
> SS_MAX_HASH_SZ
)
6237 hash_sz
= SS_MAX_HASH_SZ
;
6239 /* allocate soft_state pool */
6240 sss
= kmem_zalloc(sizeof (*sss
), KM_SLEEP
);
6241 sss
->ss_size
= size
;
6242 sss
->ss_mod_hash
= mod_hash_create_strhash("soft_state_bystr",
6243 hash_sz
, mod_hash_null_valdtor
);
6244 *state_p
= (ddi_soft_state_bystr
*)sss
;
6249 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr
*state
, const char *str
)
6251 i_ddi_soft_state_bystr
*sss
= (i_ddi_soft_state_bystr
*)state
;
6255 ASSERT(sss
&& str
&& sss
->ss_mod_hash
);
6256 if ((sss
== NULL
) || (str
== NULL
) || (sss
->ss_mod_hash
== NULL
))
6257 return (DDI_FAILURE
);
6258 sso
= kmem_zalloc(sss
->ss_size
, KM_SLEEP
);
6259 dup_str
= i_ddi_strdup((char *)str
, KM_SLEEP
);
6260 if (mod_hash_insert(sss
->ss_mod_hash
,
6261 (mod_hash_key_t
)dup_str
, (mod_hash_val_t
)sso
) == 0)
6262 return (DDI_SUCCESS
);
6265 * The only error from an strhash insert is caused by a duplicate key.
6266 * We refuse to tread on an existing elements, so free and fail.
6268 kmem_free(dup_str
, strlen(dup_str
) + 1);
6269 kmem_free(sso
, sss
->ss_size
);
6270 return (DDI_FAILURE
);
6274 ddi_soft_state_bystr_get(ddi_soft_state_bystr
*state
, const char *str
)
6276 i_ddi_soft_state_bystr
*sss
= (i_ddi_soft_state_bystr
*)state
;
6279 ASSERT(sss
&& str
&& sss
->ss_mod_hash
);
6280 if ((sss
== NULL
) || (str
== NULL
) || (sss
->ss_mod_hash
== NULL
))
6283 if (mod_hash_find(sss
->ss_mod_hash
,
6284 (mod_hash_key_t
)str
, (mod_hash_val_t
*)&sso
) == 0)
6290 ddi_soft_state_bystr_free(ddi_soft_state_bystr
*state
, const char *str
)
6292 i_ddi_soft_state_bystr
*sss
= (i_ddi_soft_state_bystr
*)state
;
6295 ASSERT(sss
&& str
&& sss
->ss_mod_hash
);
6296 if ((sss
== NULL
) || (str
== NULL
) || (sss
->ss_mod_hash
== NULL
))
6299 (void) mod_hash_remove(sss
->ss_mod_hash
,
6300 (mod_hash_key_t
)str
, (mod_hash_val_t
*)&sso
);
6301 kmem_free(sso
, sss
->ss_size
);
6305 ddi_soft_state_bystr_fini(ddi_soft_state_bystr
**state_p
)
6307 i_ddi_soft_state_bystr
*sss
;
6310 if (state_p
== NULL
)
6313 sss
= (i_ddi_soft_state_bystr
*)(*state_p
);
6317 ASSERT(sss
->ss_mod_hash
);
6318 if (sss
->ss_mod_hash
) {
6319 mod_hash_destroy_strhash(sss
->ss_mod_hash
);
6320 sss
->ss_mod_hash
= NULL
;
6323 kmem_free(sss
, sizeof (*sss
));
6328 * The ddi_strid_* routines provide string-to-index management utilities.
6330 /* allocate and initialize an strid set */
6332 ddi_strid_init(ddi_strid
**strid_p
, int n_items
)
6337 if (strid_p
== NULL
)
6338 return (DDI_FAILURE
);
6340 /* current implementation is based on hash, convert n_items to hash */
6341 hash_sz
= n_items
/ SS_N_ITEMS_PER_HASH
;
6342 if (hash_sz
< SS_MIN_HASH_SZ
)
6343 hash_sz
= SS_MIN_HASH_SZ
;
6344 else if (hash_sz
> SS_MAX_HASH_SZ
)
6345 hash_sz
= SS_MAX_HASH_SZ
;
6347 ss
= kmem_alloc(sizeof (*ss
), KM_SLEEP
);
6348 ss
->strid_chunksz
= n_items
;
6349 ss
->strid_spacesz
= n_items
;
6350 ss
->strid_space
= id_space_create("strid", 1, n_items
);
6351 ss
->strid_bystr
= mod_hash_create_strhash("strid_bystr", hash_sz
,
6352 mod_hash_null_valdtor
);
6353 ss
->strid_byid
= mod_hash_create_idhash("strid_byid", hash_sz
,
6354 mod_hash_null_valdtor
);
6355 *strid_p
= (ddi_strid
*)ss
;
6356 return (DDI_SUCCESS
);
6359 /* allocate an id mapping within the specified set for str, return id */
6361 i_ddi_strid_alloc(ddi_strid
*strid
, char *str
)
6363 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6368 if ((ss
== NULL
) || (str
== NULL
))
6372 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6373 * range as compressed as possible. This is important to minimize
6374 * the amount of space used when the id is used as a ddi_soft_state
6375 * index by the caller.
6377 * If the id list is exhausted, increase the size of the list
6378 * by the chuck size specified in ddi_strid_init and reattempt
6381 if ((id
= id_allocff_nosleep(ss
->strid_space
)) == (id_t
)-1) {
6382 id_space_extend(ss
->strid_space
, ss
->strid_spacesz
,
6383 ss
->strid_spacesz
+ ss
->strid_chunksz
);
6384 ss
->strid_spacesz
+= ss
->strid_chunksz
;
6385 if ((id
= id_allocff_nosleep(ss
->strid_space
)) == (id_t
)-1)
6390 * NOTE: since we create and destroy in unison we can save space by
6391 * using bystr key as the byid value. This means destroy must occur
6392 * in (byid, bystr) order.
6394 s
= i_ddi_strdup(str
, KM_SLEEP
);
6395 if (mod_hash_insert(ss
->strid_bystr
, (mod_hash_key_t
)s
,
6396 (mod_hash_val_t
)(intptr_t)id
) != 0) {
6397 ddi_strid_free(strid
, id
);
6400 if (mod_hash_insert(ss
->strid_byid
, (mod_hash_key_t
)(intptr_t)id
,
6401 (mod_hash_val_t
)s
) != 0) {
6402 ddi_strid_free(strid
, id
);
6406 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6410 /* allocate an id mapping within the specified set for str, return id */
6412 ddi_strid_alloc(ddi_strid
*strid
, char *str
)
6414 return (i_ddi_strid_alloc(strid
, str
));
6417 /* return the id within the specified strid given the str */
6419 ddi_strid_str2id(ddi_strid
*strid
, char *str
)
6421 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6426 if (ss
&& str
&& (mod_hash_find(ss
->strid_bystr
,
6427 (mod_hash_key_t
)str
, &hv
) == 0))
6428 id
= (int)(intptr_t)hv
;
6432 /* return str within the specified strid given the id */
6434 ddi_strid_id2str(ddi_strid
*strid
, id_t id
)
6436 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6440 ASSERT(ss
&& id
> 0);
6441 if (ss
&& (id
> 0) && (mod_hash_find(ss
->strid_byid
,
6442 (mod_hash_key_t
)(uintptr_t)id
, &hv
) == 0))
6447 /* free the id mapping within the specified strid */
6449 ddi_strid_free(ddi_strid
*strid
, id_t id
)
6451 i_ddi_strid
*ss
= (i_ddi_strid
*)strid
;
6454 ASSERT(ss
&& id
> 0);
6455 if ((ss
== NULL
) || (id
<= 0))
6458 /* bystr key is byid value: destroy order must be (byid, bystr) */
6459 str
= ddi_strid_id2str(strid
, id
);
6460 (void) mod_hash_destroy(ss
->strid_byid
, (mod_hash_key_t
)(uintptr_t)id
);
6461 id_free(ss
->strid_space
, id
);
6464 (void) mod_hash_destroy(ss
->strid_bystr
, (mod_hash_key_t
)str
);
6467 /* destroy the strid set */
6469 ddi_strid_fini(ddi_strid
**strid_p
)
6474 if (strid_p
== NULL
)
6477 ss
= (i_ddi_strid
*)(*strid_p
);
6481 /* bystr key is byid value: destroy order must be (byid, bystr) */
6483 mod_hash_destroy_hash(ss
->strid_byid
);
6485 mod_hash_destroy_hash(ss
->strid_bystr
);
6486 if (ss
->strid_space
)
6487 id_space_destroy(ss
->strid_space
);
6488 kmem_free(ss
, sizeof (*ss
));
6493 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6494 * Storage is double buffered to prevent updates during devi_addr use -
6495 * double buffering is adaquate for reliable ddi_deviname() consumption.
6496 * The double buffer is not freed until dev_info structure destruction
6497 * (by i_ddi_free_node).
6500 ddi_set_name_addr(dev_info_t
*dip
, char *name
)
6502 char *buf
= DEVI(dip
)->devi_addr_buf
;
6506 buf
= kmem_zalloc(2 * MAXNAMELEN
, KM_SLEEP
);
6507 DEVI(dip
)->devi_addr_buf
= buf
;
6511 ASSERT(strlen(name
) < MAXNAMELEN
);
6512 newaddr
= (DEVI(dip
)->devi_addr
== buf
) ?
6513 (buf
+ MAXNAMELEN
) : buf
;
6514 (void) strlcpy(newaddr
, name
, MAXNAMELEN
);
6518 DEVI(dip
)->devi_addr
= newaddr
;
6522 ddi_get_name_addr(dev_info_t
*dip
)
6524 return (DEVI(dip
)->devi_addr
);
6528 ddi_set_parent_data(dev_info_t
*dip
, void *pd
)
6530 DEVI(dip
)->devi_parent_data
= pd
;
6534 ddi_get_parent_data(dev_info_t
*dip
)
6536 return (DEVI(dip
)->devi_parent_data
);
6540 * ddi_name_to_major: returns the major number of a named module,
6541 * derived from the current driver alias binding.
6543 * Caveat: drivers should avoid the use of this function, in particular
6544 * together with ddi_get_name/ddi_binding name, as per
6545 * major = ddi_name_to_major(ddi_get_name(devi));
6546 * ddi_name_to_major() relies on the state of the device/alias binding,
6547 * which can and does change dynamically as aliases are administered
6548 * over time. An attached device instance cannot rely on the major
6549 * number returned by ddi_name_to_major() to match its own major number.
6551 * For driver use, ddi_driver_major() reliably returns the major number
6552 * for the module to which the device was bound at attach time over
6553 * the life of the instance.
6554 * major = ddi_driver_major(dev_info_t *)
6557 ddi_name_to_major(char *name
)
6559 return (mod_name_to_major(name
));
6563 * ddi_major_to_name: Returns the module name bound to a major number.
6566 ddi_major_to_name(major_t major
)
6568 return (mod_major_to_name(major
));
6572 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6573 * pointed at by 'name.' A devinfo node is named as a result of calling
6576 * Note: the driver must be held before calling this function!
6579 ddi_deviname(dev_info_t
*dip
, char *name
)
6584 if (dip
== ddi_root_node()) {
6589 if (i_ddi_node_state(dip
) < DS_BOUND
) {
6593 * Use ddi_get_name_addr() without checking state so we get
6594 * a unit-address if we are called after ddi_set_name_addr()
6595 * by nexus DDI_CTL_INITCHILD code, but before completing
6596 * node promotion to DS_INITIALIZED. We currently have
6597 * two situations where we are called in this state:
6598 * o For framework processing of a path-oriented alias.
6599 * o If a SCSA nexus driver calls ddi_devid_register()
6600 * from it's tran_tgt_init(9E) implementation.
6602 addrname
= ddi_get_name_addr(dip
);
6603 if (addrname
== NULL
)
6607 if (*addrname
== '\0') {
6608 (void) sprintf(name
, "/%s", ddi_node_name(dip
));
6610 (void) sprintf(name
, "/%s@%s", ddi_node_name(dip
), addrname
);
6617 * Spits out the name of device node, typically name@addr, for a given node,
6618 * using the driver name, not the nodename.
6620 * Used by match_parent. Not to be used elsewhere.
6623 i_ddi_parname(dev_info_t
*dip
, char *name
)
6627 if (dip
== ddi_root_node()) {
6632 ASSERT(i_ddi_node_state(dip
) >= DS_INITIALIZED
);
6634 if (*(addrname
= ddi_get_name_addr(dip
)) == '\0')
6635 (void) sprintf(name
, "%s", ddi_binding_name(dip
));
6637 (void) sprintf(name
, "%s@%s", ddi_binding_name(dip
), addrname
);
6642 pathname_work(dev_info_t
*dip
, char *path
)
6646 if (dip
== ddi_root_node()) {
6650 (void) pathname_work(ddi_get_parent(dip
), path
);
6651 bp
= path
+ strlen(path
);
6652 (void) ddi_deviname(dip
, bp
);
6657 ddi_pathname(dev_info_t
*dip
, char *path
)
6659 return (pathname_work(dip
, path
));
6663 ddi_pathname_minor(struct ddi_minor_data
*dmdp
, char *path
)
6665 if (dmdp
->dip
== NULL
)
6668 (void) ddi_pathname(dmdp
->dip
, path
);
6669 if (dmdp
->ddm_name
) {
6670 (void) strcat(path
, ":");
6671 (void) strcat(path
, dmdp
->ddm_name
);
6678 pathname_work_obp(dev_info_t
*dip
, char *path
)
6684 * look up the "obp-path" property, return the path if it exists
6686 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
6687 "obp-path", &obp_path
) == DDI_PROP_SUCCESS
) {
6688 (void) strcpy(path
, obp_path
);
6689 ddi_prop_free(obp_path
);
6694 * stop at root, no obp path
6696 if (dip
== ddi_root_node()) {
6700 obp_path
= pathname_work_obp(ddi_get_parent(dip
), path
);
6701 if (obp_path
== NULL
)
6705 * append our component to parent's obp path
6707 bp
= path
+ strlen(path
);
6708 if (*(bp
- 1) != '/')
6709 (void) strcat(bp
++, "/");
6710 (void) ddi_deviname(dip
, bp
);
6715 * return the 'obp-path' based path for the given node, or NULL if the node
6716 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6717 * function can't be called from interrupt context (since we need to
6718 * lookup a string property).
6721 ddi_pathname_obp(dev_info_t
*dip
, char *path
)
6723 ASSERT(!servicing_interrupt());
6724 if (dip
== NULL
|| path
== NULL
)
6727 /* split work into a separate function to aid debugging */
6728 return (pathname_work_obp(dip
, path
));
6732 ddi_pathname_obp_set(dev_info_t
*dip
, char *component
)
6735 char *obp_path
= NULL
;
6736 int rc
= DDI_FAILURE
;
6739 return (DDI_FAILURE
);
6741 obp_path
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
6743 pdip
= ddi_get_parent(dip
);
6745 if (ddi_pathname_obp(pdip
, obp_path
) == NULL
) {
6746 (void) ddi_pathname(pdip
, obp_path
);
6750 (void) strncat(obp_path
, "/", MAXPATHLEN
);
6751 (void) strncat(obp_path
, component
, MAXPATHLEN
);
6753 rc
= ndi_prop_update_string(DDI_DEV_T_NONE
, dip
, "obp-path",
6757 kmem_free(obp_path
, MAXPATHLEN
);
6763 * Given a dev_t, return the pathname of the corresponding device in the
6764 * buffer pointed at by "path." The buffer is assumed to be large enough
6765 * to hold the pathname of the device (MAXPATHLEN).
6767 * The pathname of a device is the pathname of the devinfo node to which
6768 * the device "belongs," concatenated with the character ':' and the name
6769 * of the minor node corresponding to the dev_t. If spec_type is 0 then
6770 * just the pathname of the devinfo node is returned without driving attach
6771 * of that node. For a non-zero spec_type, an attach is performed and a
6772 * search of the minor list occurs.
6774 * It is possible that the path associated with the dev_t is not
6775 * currently available in the devinfo tree. In order to have a
6776 * dev_t, a device must have been discovered before, which means
6777 * that the path is always in the instance tree. The one exception
6778 * to this is if the dev_t is associated with a pseudo driver, in
6779 * which case the device must exist on the pseudo branch of the
6780 * devinfo tree as a result of parsing .conf files.
6783 ddi_dev_pathname(dev_t devt
, int spec_type
, char *path
)
6786 major_t major
= getmajor(devt
);
6792 if (major
>= devcnt
)
6794 if (major
== clone_major
) {
6795 /* clone has no minor nodes, manufacture the path here */
6796 if ((drvname
= ddi_major_to_name(getminor(devt
))) == NULL
)
6799 (void) snprintf(path
, MAXPATHLEN
, "%s:%s", CLONE_PATH
, drvname
);
6800 return (DDI_SUCCESS
);
6803 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6804 if ((instance
= dev_to_instance(devt
)) == -1)
6807 /* reconstruct the path given the major/instance */
6808 if (e_ddi_majorinstance_to_path(major
, instance
, path
) != DDI_SUCCESS
)
6811 /* if spec_type given we must drive attach and search minor nodes */
6812 if ((spec_type
== S_IFCHR
) || (spec_type
== S_IFBLK
)) {
6813 /* attach the path so we can search minors */
6814 if ((dip
= e_ddi_hold_devi_by_path(path
, 0)) == NULL
)
6817 /* Add minorname to path. */
6818 ndi_devi_enter(dip
, &circ
);
6819 minorname
= i_ddi_devtspectype_to_minorname(dip
,
6822 (void) strcat(path
, ":");
6823 (void) strcat(path
, minorname
);
6825 ndi_devi_exit(dip
, circ
);
6826 ddi_release_devi(dip
);
6827 if (minorname
== NULL
)
6830 ASSERT(strlen(path
) < MAXPATHLEN
);
6831 return (DDI_SUCCESS
);
6834 return (DDI_FAILURE
);
6838 * Given a major number and an instance, return the path.
6839 * This interface does NOT drive attach.
6842 e_ddi_majorinstance_to_path(major_t major
, int instance
, char *path
)
6844 struct devnames
*dnp
;
6847 if ((major
>= devcnt
) || (instance
== -1)) {
6849 return (DDI_FAILURE
);
6852 /* look for the major/instance in the instance tree */
6853 if (e_ddi_instance_majorinstance_to_path(major
, instance
,
6854 path
) == DDI_SUCCESS
) {
6855 ASSERT(strlen(path
) < MAXPATHLEN
);
6856 return (DDI_SUCCESS
);
6860 * Not in instance tree, find the instance on the per driver list and
6861 * construct path to instance via ddi_pathname(). This is how paths
6862 * down the 'pseudo' branch are constructed.
6864 dnp
= &(devnamesp
[major
]);
6865 LOCK_DEV_OPS(&(dnp
->dn_lock
));
6866 for (dip
= dnp
->dn_head
; dip
;
6867 dip
= (dev_info_t
*)DEVI(dip
)->devi_next
) {
6868 /* Skip if instance does not match. */
6869 if (DEVI(dip
)->devi_instance
!= instance
)
6873 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6874 * node demotion, so it is not an effective way of ensuring
6875 * that the ddi_pathname result has a unit-address. Instead,
6876 * we reverify the node state after calling ddi_pathname().
6878 if (i_ddi_node_state(dip
) >= DS_INITIALIZED
) {
6879 (void) ddi_pathname(dip
, path
);
6880 if (i_ddi_node_state(dip
) < DS_INITIALIZED
)
6882 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
6883 ASSERT(strlen(path
) < MAXPATHLEN
);
6884 return (DDI_SUCCESS
);
6887 UNLOCK_DEV_OPS(&(dnp
->dn_lock
));
6889 /* can't reconstruct the path */
6891 return (DDI_FAILURE
);
6894 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6897 * Given the dip for a network interface return the ppa for that interface.
6899 * In all cases except GLD v0 drivers, the ppa == instance.
6900 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6901 * So for these drivers when the attach routine calls gld_register(),
6902 * the GLD framework creates an integer property called "gld_driver_ppa"
6903 * that can be queried here.
6905 * The only time this function is used is when a system is booting over nfs.
6906 * In this case the system has to resolve the pathname of the boot device
6910 i_ddi_devi_get_ppa(dev_info_t
*dip
)
6912 return (ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
6913 DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
,
6914 GLD_DRIVER_PPA
, ddi_get_instance(dip
)));
6918 * i_ddi_devi_set_ppa() should only be called from gld_register()
6919 * and only for GLD v0 drivers
6922 i_ddi_devi_set_ppa(dev_info_t
*dip
, int ppa
)
6924 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE
, dip
, GLD_DRIVER_PPA
, ppa
);
6929 * Private DDI Console bell functions.
6932 ddi_ring_console_bell(clock_t duration
)
6934 if (ddi_console_bell_func
!= NULL
)
6935 (*ddi_console_bell_func
)(duration
);
6939 ddi_set_console_bell(void (*bellfunc
)(clock_t duration
))
6941 ddi_console_bell_func
= bellfunc
;
6945 ddi_dma_alloc_handle(dev_info_t
*dip
, ddi_dma_attr_t
*attr
,
6946 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
6948 int (*funcp
)() = ddi_dma_allochdl
;
6949 ddi_dma_attr_t dma_attr
;
6950 struct bus_ops
*bop
;
6952 if (attr
== (ddi_dma_attr_t
*)0)
6953 return (DDI_DMA_BADATTR
);
6957 bop
= DEVI(dip
)->devi_ops
->devo_bus_ops
;
6958 if (bop
&& bop
->bus_dma_allochdl
)
6959 funcp
= bop
->bus_dma_allochdl
;
6961 return ((*funcp
)(dip
, dip
, &dma_attr
, waitfp
, arg
, handlep
));
6965 ddi_dma_free_handle(ddi_dma_handle_t
*handlep
)
6967 ddi_dma_handle_t h
= *handlep
;
6968 (void) ddi_dma_freehdl(HD
, HD
, h
);
6971 static uintptr_t dma_mem_list_id
= 0;
6975 ddi_dma_mem_alloc(ddi_dma_handle_t handle
, size_t length
,
6976 ddi_device_acc_attr_t
*accattrp
, uint_t flags
,
6977 int (*waitfp
)(caddr_t
), caddr_t arg
, caddr_t
*kaddrp
,
6978 size_t *real_length
, ddi_acc_handle_t
*handlep
)
6980 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
6981 dev_info_t
*dip
= hp
->dmai_rdip
;
6983 ddi_dma_attr_t
*attrp
= &hp
->dmai_attr
;
6984 uint_t sleepflag
, xfermodes
;
6988 if (waitfp
== DDI_DMA_SLEEP
)
6989 fp
= (int (*)())KM_SLEEP
;
6990 else if (waitfp
== DDI_DMA_DONTWAIT
)
6991 fp
= (int (*)())KM_NOSLEEP
;
6994 *handlep
= impl_acc_hdl_alloc(fp
, arg
);
6995 if (*handlep
== NULL
)
6996 return (DDI_FAILURE
);
6998 /* check if the cache attributes are supported */
6999 if (i_ddi_check_cache_attr(flags
) == B_FALSE
)
7000 return (DDI_FAILURE
);
7003 * Transfer the meaningful bits to xfermodes.
7004 * Double-check if the 3rd party driver correctly sets the bits.
7005 * If not, set DDI_DMA_STREAMING to keep compatibility.
7007 xfermodes
= flags
& (DDI_DMA_CONSISTENT
| DDI_DMA_STREAMING
);
7008 if (xfermodes
== 0) {
7009 xfermodes
= DDI_DMA_STREAMING
;
7013 * initialize the common elements of data access handle
7015 ap
= impl_acc_hdl_get(*handlep
);
7016 ap
->ah_vers
= VERS_ACCHDL
;
7020 ap
->ah_xfermodes
= flags
;
7021 ap
->ah_acc
= *accattrp
;
7023 sleepflag
= ((waitfp
== DDI_DMA_SLEEP
) ? 1 : 0);
7024 if (xfermodes
== DDI_DMA_CONSISTENT
) {
7025 rval
= i_ddi_mem_alloc(dip
, attrp
, length
, sleepflag
,
7026 flags
, accattrp
, kaddrp
, NULL
, ap
);
7027 *real_length
= length
;
7029 rval
= i_ddi_mem_alloc(dip
, attrp
, length
, sleepflag
,
7030 flags
, accattrp
, kaddrp
, real_length
, ap
);
7032 if (rval
== DDI_SUCCESS
) {
7033 ap
->ah_len
= (off_t
)(*real_length
);
7034 ap
->ah_addr
= *kaddrp
;
7036 impl_acc_hdl_free(*handlep
);
7037 *handlep
= (ddi_acc_handle_t
)NULL
;
7038 if (waitfp
!= DDI_DMA_SLEEP
&& waitfp
!= DDI_DMA_DONTWAIT
) {
7039 ddi_set_callback(waitfp
, arg
, &dma_mem_list_id
);
7047 ddi_dma_mem_free(ddi_acc_handle_t
*handlep
)
7051 ap
= impl_acc_hdl_get(*handlep
);
7054 i_ddi_mem_free((caddr_t
)ap
->ah_addr
, ap
);
7059 impl_acc_hdl_free(*handlep
);
7060 *handlep
= (ddi_acc_handle_t
)NULL
;
7062 if (dma_mem_list_id
!= 0) {
7063 ddi_run_callback(&dma_mem_list_id
);
7068 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle
, struct buf
*bp
,
7069 uint_t flags
, int (*waitfp
)(caddr_t
), caddr_t arg
,
7070 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
7072 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7073 dev_info_t
*dip
, *rdip
;
7074 struct ddi_dma_req dmareq
;
7077 dmareq
.dmar_flags
= flags
;
7078 dmareq
.dmar_fp
= waitfp
;
7079 dmareq
.dmar_arg
= arg
;
7080 dmareq
.dmar_object
.dmao_size
= (uint_t
)bp
->b_bcount
;
7082 if (bp
->b_flags
& B_PAGEIO
) {
7083 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_PAGES
;
7084 dmareq
.dmar_object
.dmao_obj
.pp_obj
.pp_pp
= bp
->b_pages
;
7085 dmareq
.dmar_object
.dmao_obj
.pp_obj
.pp_offset
=
7086 (uint_t
)(((uintptr_t)bp
->b_un
.b_addr
) & MMU_PAGEOFFSET
);
7088 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_addr
= bp
->b_un
.b_addr
;
7089 if (bp
->b_flags
& B_SHADOW
) {
7090 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
=
7092 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_BUFVADDR
;
7094 dmareq
.dmar_object
.dmao_type
=
7095 (bp
->b_flags
& (B_PHYS
| B_REMAPPED
)) ?
7096 DMA_OTYP_BUFVADDR
: DMA_OTYP_VADDR
;
7097 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
= NULL
;
7101 * If the buffer has no proc pointer, or the proc
7102 * struct has the kernel address space, or the buffer has
7103 * been marked B_REMAPPED (meaning that it is now
7104 * mapped into the kernel's address space), then
7105 * the address space is kas (kernel address space).
7107 if ((bp
->b_proc
== NULL
) || (bp
->b_proc
->p_as
== &kas
) ||
7108 (bp
->b_flags
& B_REMAPPED
)) {
7109 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
= 0;
7111 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
=
7116 dip
= rdip
= hp
->dmai_rdip
;
7117 if (dip
!= ddi_root_node())
7118 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_bindhdl
;
7119 funcp
= DEVI(rdip
)->devi_bus_dma_bindfunc
;
7120 return ((*funcp
)(dip
, rdip
, handle
, &dmareq
, cookiep
, ccountp
));
7124 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle
, struct as
*as
,
7125 caddr_t addr
, size_t len
, uint_t flags
, int (*waitfp
)(caddr_t
),
7126 caddr_t arg
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
7128 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7129 dev_info_t
*dip
, *rdip
;
7130 struct ddi_dma_req dmareq
;
7133 if (len
== (uint_t
)0) {
7134 return (DDI_DMA_NOMAPPING
);
7136 dmareq
.dmar_flags
= flags
;
7137 dmareq
.dmar_fp
= waitfp
;
7138 dmareq
.dmar_arg
= arg
;
7139 dmareq
.dmar_object
.dmao_size
= len
;
7140 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_VADDR
;
7141 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
= as
;
7142 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_addr
= addr
;
7143 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
= NULL
;
7145 dip
= rdip
= hp
->dmai_rdip
;
7146 if (dip
!= ddi_root_node())
7147 dip
= (dev_info_t
*)DEVI(dip
)->devi_bus_dma_bindhdl
;
7148 funcp
= DEVI(rdip
)->devi_bus_dma_bindfunc
;
7149 return ((*funcp
)(dip
, rdip
, handle
, &dmareq
, cookiep
, ccountp
));
7153 ddi_dma_nextcookie(ddi_dma_handle_t handle
, ddi_dma_cookie_t
*cookiep
)
7155 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7156 ddi_dma_cookie_t
*cp
;
7158 cp
= hp
->dmai_cookie
;
7161 cookiep
->dmac_notused
= cp
->dmac_notused
;
7162 cookiep
->dmac_type
= cp
->dmac_type
;
7163 cookiep
->dmac_address
= cp
->dmac_address
;
7164 cookiep
->dmac_size
= cp
->dmac_size
;
7169 ddi_dma_numwin(ddi_dma_handle_t handle
, uint_t
*nwinp
)
7171 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7172 if ((hp
->dmai_rflags
& DDI_DMA_PARTIAL
) == 0) {
7173 return (DDI_FAILURE
);
7175 *nwinp
= hp
->dmai_nwin
;
7176 return (DDI_SUCCESS
);
7181 ddi_dma_getwin(ddi_dma_handle_t h
, uint_t win
, off_t
*offp
,
7182 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
7184 int (*funcp
)() = ddi_dma_win
;
7185 struct bus_ops
*bop
;
7187 bop
= DEVI(HD
)->devi_ops
->devo_bus_ops
;
7188 if (bop
&& bop
->bus_dma_win
)
7189 funcp
= bop
->bus_dma_win
;
7191 return ((*funcp
)(HD
, HD
, h
, win
, offp
, lenp
, cookiep
, ccountp
));
7195 ddi_dma_set_sbus64(ddi_dma_handle_t h
, ulong_t burstsizes
)
7197 return (ddi_dma_mctl(HD
, HD
, h
, DDI_DMA_SET_SBUS64
, 0,
7198 &burstsizes
, 0, 0));
7202 i_ddi_dma_fault_check(ddi_dma_impl_t
*hp
)
7204 return (hp
->dmai_fault
);
7208 ddi_check_dma_handle(ddi_dma_handle_t handle
)
7210 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7211 int (*check
)(ddi_dma_impl_t
*);
7213 if ((check
= hp
->dmai_fault_check
) == NULL
)
7214 check
= i_ddi_dma_fault_check
;
7216 return (((*check
)(hp
) == DDI_SUCCESS
) ? DDI_SUCCESS
: DDI_FAILURE
);
7220 i_ddi_dma_set_fault(ddi_dma_handle_t handle
)
7222 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7223 void (*notify
)(ddi_dma_impl_t
*);
7225 if (!hp
->dmai_fault
) {
7227 if ((notify
= hp
->dmai_fault_notify
) != NULL
)
7233 i_ddi_dma_clr_fault(ddi_dma_handle_t handle
)
7235 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
7236 void (*notify
)(ddi_dma_impl_t
*);
7238 if (hp
->dmai_fault
) {
7240 if ((notify
= hp
->dmai_fault_notify
) != NULL
)
7246 * register mapping routines.
7249 ddi_regs_map_setup(dev_info_t
*dip
, uint_t rnumber
, caddr_t
*addrp
,
7250 offset_t offset
, offset_t len
, ddi_device_acc_attr_t
*accattrp
,
7251 ddi_acc_handle_t
*handle
)
7258 * Allocate and initialize the common elements of data access handle.
7260 *handle
= impl_acc_hdl_alloc(KM_SLEEP
, NULL
);
7261 hp
= impl_acc_hdl_get(*handle
);
7262 hp
->ah_vers
= VERS_ACCHDL
;
7264 hp
->ah_rnumber
= rnumber
;
7265 hp
->ah_offset
= offset
;
7267 hp
->ah_acc
= *accattrp
;
7270 * Set up the mapping request and call to parent.
7272 mr
.map_op
= DDI_MO_MAP_LOCKED
;
7273 mr
.map_type
= DDI_MT_RNUMBER
;
7274 mr
.map_obj
.rnumber
= rnumber
;
7275 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
7276 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
7277 mr
.map_handlep
= hp
;
7278 mr
.map_vers
= DDI_MAP_VERSION
;
7279 result
= ddi_map(dip
, &mr
, offset
, len
, addrp
);
7282 * check for end result
7284 if (result
!= DDI_SUCCESS
) {
7285 impl_acc_hdl_free(*handle
);
7286 *handle
= (ddi_acc_handle_t
)NULL
;
7288 hp
->ah_addr
= *addrp
;
7295 ddi_regs_map_free(ddi_acc_handle_t
*handlep
)
7300 hp
= impl_acc_hdl_get(*handlep
);
7303 mr
.map_op
= DDI_MO_UNMAP
;
7304 mr
.map_type
= DDI_MT_RNUMBER
;
7305 mr
.map_obj
.rnumber
= hp
->ah_rnumber
;
7306 mr
.map_prot
= PROT_READ
| PROT_WRITE
;
7307 mr
.map_flags
= DDI_MF_KERNEL_MAPPING
;
7308 mr
.map_handlep
= hp
;
7309 mr
.map_vers
= DDI_MAP_VERSION
;
7312 * Call my parent to unmap my regs.
7314 (void) ddi_map(hp
->ah_dip
, &mr
, hp
->ah_offset
,
7315 hp
->ah_len
, &hp
->ah_addr
);
7319 impl_acc_hdl_free(*handlep
);
7320 *handlep
= (ddi_acc_handle_t
)NULL
;
7324 ddi_device_zero(ddi_acc_handle_t handle
, caddr_t dev_addr
, size_t bytecount
,
7325 ssize_t dev_advcnt
, uint_t dev_datasz
)
7332 /* check for total byte count is multiple of data transfer size */
7333 if (bytecount
!= ((bytecount
/ dev_datasz
) * dev_datasz
))
7334 return (DDI_FAILURE
);
7336 switch (dev_datasz
) {
7337 case DDI_DATA_SZ01_ACC
:
7338 for (b
= (uint8_t *)dev_addr
;
7339 bytecount
!= 0; bytecount
-= 1, b
+= dev_advcnt
)
7340 ddi_put8(handle
, b
, 0);
7342 case DDI_DATA_SZ02_ACC
:
7343 for (w
= (uint16_t *)dev_addr
;
7344 bytecount
!= 0; bytecount
-= 2, w
+= dev_advcnt
)
7345 ddi_put16(handle
, w
, 0);
7347 case DDI_DATA_SZ04_ACC
:
7348 for (l
= (uint32_t *)dev_addr
;
7349 bytecount
!= 0; bytecount
-= 4, l
+= dev_advcnt
)
7350 ddi_put32(handle
, l
, 0);
7352 case DDI_DATA_SZ08_ACC
:
7353 for (ll
= (uint64_t *)dev_addr
;
7354 bytecount
!= 0; bytecount
-= 8, ll
+= dev_advcnt
)
7355 ddi_put64(handle
, ll
, 0x0ll
);
7358 return (DDI_FAILURE
);
7360 return (DDI_SUCCESS
);
7365 ddi_acc_handle_t src_handle
, caddr_t src_addr
, ssize_t src_advcnt
,
7366 ddi_acc_handle_t dest_handle
, caddr_t dest_addr
, ssize_t dest_advcnt
,
7367 size_t bytecount
, uint_t dev_datasz
)
7369 uint8_t *b_src
, *b_dst
;
7370 uint16_t *w_src
, *w_dst
;
7371 uint32_t *l_src
, *l_dst
;
7372 uint64_t *ll_src
, *ll_dst
;
7374 /* check for total byte count is multiple of data transfer size */
7375 if (bytecount
!= ((bytecount
/ dev_datasz
) * dev_datasz
))
7376 return (DDI_FAILURE
);
7378 switch (dev_datasz
) {
7379 case DDI_DATA_SZ01_ACC
:
7380 b_src
= (uint8_t *)src_addr
;
7381 b_dst
= (uint8_t *)dest_addr
;
7383 for (; bytecount
!= 0; bytecount
-= 1) {
7384 ddi_put8(dest_handle
, b_dst
,
7385 ddi_get8(src_handle
, b_src
));
7386 b_dst
+= dest_advcnt
;
7387 b_src
+= src_advcnt
;
7390 case DDI_DATA_SZ02_ACC
:
7391 w_src
= (uint16_t *)src_addr
;
7392 w_dst
= (uint16_t *)dest_addr
;
7394 for (; bytecount
!= 0; bytecount
-= 2) {
7395 ddi_put16(dest_handle
, w_dst
,
7396 ddi_get16(src_handle
, w_src
));
7397 w_dst
+= dest_advcnt
;
7398 w_src
+= src_advcnt
;
7401 case DDI_DATA_SZ04_ACC
:
7402 l_src
= (uint32_t *)src_addr
;
7403 l_dst
= (uint32_t *)dest_addr
;
7405 for (; bytecount
!= 0; bytecount
-= 4) {
7406 ddi_put32(dest_handle
, l_dst
,
7407 ddi_get32(src_handle
, l_src
));
7408 l_dst
+= dest_advcnt
;
7409 l_src
+= src_advcnt
;
7412 case DDI_DATA_SZ08_ACC
:
7413 ll_src
= (uint64_t *)src_addr
;
7414 ll_dst
= (uint64_t *)dest_addr
;
7416 for (; bytecount
!= 0; bytecount
-= 8) {
7417 ddi_put64(dest_handle
, ll_dst
,
7418 ddi_get64(src_handle
, ll_src
));
7419 ll_dst
+= dest_advcnt
;
7420 ll_src
+= src_advcnt
;
7424 return (DDI_FAILURE
);
7426 return (DDI_SUCCESS
);
7429 #define swap16(value) \
7430 ((((value) & 0xff) << 8) | ((value) >> 8))
7432 #define swap32(value) \
7433 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7434 (uint32_t)swap16((uint16_t)((value) >> 16)))
7436 #define swap64(value) \
7437 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7439 (uint64_t)swap32((uint32_t)((value) >> 32)))
7442 ddi_swap16(uint16_t value
)
7444 return (swap16(value
));
7448 ddi_swap32(uint32_t value
)
7450 return (swap32(value
));
7454 ddi_swap64(uint64_t value
)
7456 return (swap64(value
));
7460 * Convert a binding name to a driver name.
7461 * A binding name is the name used to determine the driver for a
7462 * device - it may be either an alias for the driver or the name
7463 * of the driver itself.
7466 i_binding_to_drv_name(char *bname
)
7470 ASSERT(bname
!= NULL
);
7472 if ((major_no
= ddi_name_to_major(bname
)) == -1)
7474 return (ddi_major_to_name(major_no
));
7478 * Search for minor name that has specified dev_t and spec_type.
7479 * If spec_type is zero then any dev_t match works. Since we
7480 * are returning a pointer to the minor name string, we require the
7481 * caller to do the locking.
7484 i_ddi_devtspectype_to_minorname(dev_info_t
*dip
, dev_t dev
, int spec_type
)
7486 struct ddi_minor_data
*dmdp
;
7489 * The did layered driver currently intentionally returns a
7490 * devinfo ptr for an underlying sd instance based on a did
7491 * dev_t. In this case it is not an error.
7493 * The did layered driver is associated with Sun Cluster.
7495 ASSERT((ddi_driver_major(dip
) == getmajor(dev
)) ||
7496 (strcmp(ddi_major_to_name(getmajor(dev
)), "did") == 0));
7498 ASSERT(DEVI_BUSY_OWNED(dip
));
7499 for (dmdp
= DEVI(dip
)->devi_minor
; dmdp
; dmdp
= dmdp
->next
) {
7500 if (((dmdp
->type
== DDM_MINOR
) ||
7501 (dmdp
->type
== DDM_INTERNAL_PATH
) ||
7502 (dmdp
->type
== DDM_DEFAULT
)) &&
7503 (dmdp
->ddm_dev
== dev
) &&
7504 ((((spec_type
& (S_IFCHR
|S_IFBLK
))) == 0) ||
7505 (dmdp
->ddm_spec_type
== spec_type
)))
7506 return (dmdp
->ddm_name
);
7513 * Find the devt and spectype of the specified minor_name.
7514 * Return DDI_FAILURE if minor_name not found. Since we are
7515 * returning everything via arguments we can do the locking.
7518 i_ddi_minorname_to_devtspectype(dev_info_t
*dip
, char *minor_name
,
7519 dev_t
*devtp
, int *spectypep
)
7522 struct ddi_minor_data
*dmdp
;
7524 /* deal with clone minor nodes */
7525 if (dip
== clone_dip
) {
7528 * Make sure minor_name is a STREAMS driver.
7529 * We load the driver but don't attach to any instances.
7532 major
= ddi_name_to_major(minor_name
);
7533 if (major
== DDI_MAJOR_T_NONE
)
7534 return (DDI_FAILURE
);
7536 if (ddi_hold_driver(major
) == NULL
)
7537 return (DDI_FAILURE
);
7539 if (STREAMSTAB(major
) == NULL
) {
7540 ddi_rele_driver(major
);
7541 return (DDI_FAILURE
);
7543 ddi_rele_driver(major
);
7546 *devtp
= makedevice(clone_major
, (minor_t
)major
);
7549 *spectypep
= S_IFCHR
;
7551 return (DDI_SUCCESS
);
7554 ndi_devi_enter(dip
, &circ
);
7555 for (dmdp
= DEVI(dip
)->devi_minor
; dmdp
; dmdp
= dmdp
->next
) {
7556 if (((dmdp
->type
!= DDM_MINOR
) &&
7557 (dmdp
->type
!= DDM_INTERNAL_PATH
) &&
7558 (dmdp
->type
!= DDM_DEFAULT
)) ||
7559 strcmp(minor_name
, dmdp
->ddm_name
))
7563 *devtp
= dmdp
->ddm_dev
;
7566 *spectypep
= dmdp
->ddm_spec_type
;
7568 ndi_devi_exit(dip
, circ
);
7569 return (DDI_SUCCESS
);
7571 ndi_devi_exit(dip
, circ
);
7573 return (DDI_FAILURE
);
7576 static kmutex_t devid_gen_mutex
;
7577 static short devid_gen_number
;
7581 static int devid_register_corrupt
= 0;
7582 static int devid_register_corrupt_major
= 0;
7583 static int devid_register_corrupt_hint
= 0;
7584 static int devid_register_corrupt_hint_major
= 0;
7586 static int devid_lyr_debug
= 0;
7588 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \
7589 if (devid_lyr_debug) \
7590 ddi_debug_devid_devts(msg, ndevs, devs)
7594 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7602 ddi_debug_devid_devts(char *msg
, int ndevs
, dev_t
*devs
)
7606 cmn_err(CE_CONT
, "%s:\n", msg
);
7607 for (i
= 0; i
< ndevs
; i
++) {
7608 cmn_err(CE_CONT
, " 0x%lx\n", devs
[i
]);
7613 ddi_debug_devid_paths(char *msg
, int npaths
, char **paths
)
7617 cmn_err(CE_CONT
, "%s:\n", msg
);
7618 for (i
= 0; i
< npaths
; i
++) {
7619 cmn_err(CE_CONT
, " %s\n", paths
[i
]);
7624 ddi_debug_devid_devts_per_path(char *path
, int ndevs
, dev_t
*devs
)
7628 cmn_err(CE_CONT
, "dev_ts per path %s\n", path
);
7629 for (i
= 0; i
< ndevs
; i
++) {
7630 cmn_err(CE_CONT
, " 0x%lx\n", devs
[i
]);
7637 * Register device id into DDI framework.
7638 * Must be called when the driver is bound.
7641 i_ddi_devid_register(dev_info_t
*dip
, ddi_devid_t devid
)
7643 impl_devid_t
*i_devid
= (impl_devid_t
*)devid
;
7645 const char *driver_name
;
7649 if ((dip
== NULL
) ||
7650 ((major
= ddi_driver_major(dip
)) == DDI_MAJOR_T_NONE
))
7651 return (DDI_FAILURE
);
7653 /* verify that the devid is valid */
7654 if (ddi_devid_valid(devid
) != DDI_SUCCESS
)
7655 return (DDI_FAILURE
);
7657 /* Updating driver name hint in devid */
7658 driver_name
= ddi_driver_name(dip
);
7659 driver_len
= strlen(driver_name
);
7660 if (driver_len
> DEVID_HINT_SIZE
) {
7661 /* Pick up last four characters of driver name */
7662 driver_name
+= driver_len
- DEVID_HINT_SIZE
;
7663 driver_len
= DEVID_HINT_SIZE
;
7665 bzero(i_devid
->did_driver
, DEVID_HINT_SIZE
);
7666 bcopy(driver_name
, i_devid
->did_driver
, driver_len
);
7669 /* Corrupt the devid for testing. */
7670 if (devid_register_corrupt
)
7671 i_devid
->did_id
[0] += devid_register_corrupt
;
7672 if (devid_register_corrupt_major
&&
7673 (major
== devid_register_corrupt_major
))
7674 i_devid
->did_id
[0] += 1;
7675 if (devid_register_corrupt_hint
)
7676 i_devid
->did_driver
[0] += devid_register_corrupt_hint
;
7677 if (devid_register_corrupt_hint_major
&&
7678 (major
== devid_register_corrupt_hint_major
))
7679 i_devid
->did_driver
[0] += 1;
7682 /* encode the devid as a string */
7683 if ((devid_str
= ddi_devid_str_encode(devid
, NULL
)) == NULL
)
7684 return (DDI_FAILURE
);
7686 /* add string as a string property */
7687 if (ndi_prop_update_string(DDI_DEV_T_NONE
, dip
,
7688 DEVID_PROP_NAME
, devid_str
) != DDI_SUCCESS
) {
7689 cmn_err(CE_WARN
, "%s%d: devid property update failed",
7690 ddi_driver_name(dip
), ddi_get_instance(dip
));
7691 ddi_devid_str_free(devid_str
);
7692 return (DDI_FAILURE
);
7695 /* keep pointer to devid string for interrupt context fma code */
7696 if (DEVI(dip
)->devi_devid_str
)
7697 ddi_devid_str_free(DEVI(dip
)->devi_devid_str
);
7698 DEVI(dip
)->devi_devid_str
= devid_str
;
7699 return (DDI_SUCCESS
);
7703 ddi_devid_register(dev_info_t
*dip
, ddi_devid_t devid
)
7707 rval
= i_ddi_devid_register(dip
, devid
);
7708 if (rval
== DDI_SUCCESS
) {
7710 * Register devid in devid-to-path cache
7712 if (e_devid_cache_register(dip
, devid
) == DDI_SUCCESS
) {
7713 mutex_enter(&DEVI(dip
)->devi_lock
);
7714 DEVI(dip
)->devi_flags
|= DEVI_CACHED_DEVID
;
7715 mutex_exit(&DEVI(dip
)->devi_lock
);
7716 } else if (ddi_get_name_addr(dip
)) {
7718 * We only expect cache_register DDI_FAILURE when we
7719 * can't form the full path because of NULL devi_addr.
7721 cmn_err(CE_WARN
, "%s%d: failed to cache devid",
7722 ddi_driver_name(dip
), ddi_get_instance(dip
));
7725 cmn_err(CE_WARN
, "%s%d: failed to register devid",
7726 ddi_driver_name(dip
), ddi_get_instance(dip
));
7732 * Remove (unregister) device id from DDI framework.
7733 * Must be called when device is detached.
7736 i_ddi_devid_unregister(dev_info_t
*dip
)
7738 if (DEVI(dip
)->devi_devid_str
) {
7739 ddi_devid_str_free(DEVI(dip
)->devi_devid_str
);
7740 DEVI(dip
)->devi_devid_str
= NULL
;
7743 /* remove the devid property */
7744 (void) ndi_prop_remove(DDI_DEV_T_NONE
, dip
, DEVID_PROP_NAME
);
7748 ddi_devid_unregister(dev_info_t
*dip
)
7750 mutex_enter(&DEVI(dip
)->devi_lock
);
7751 DEVI(dip
)->devi_flags
&= ~DEVI_CACHED_DEVID
;
7752 mutex_exit(&DEVI(dip
)->devi_lock
);
7753 e_devid_cache_unregister(dip
);
7754 i_ddi_devid_unregister(dip
);
7758 * Allocate and initialize a device id.
7763 ushort_t devid_type
,
7766 ddi_devid_t
*ret_devid
)
7768 impl_devid_t
*i_devid
;
7769 int sz
= sizeof (*i_devid
) + nbytes
- sizeof (char);
7771 const char *driver_name
;
7773 switch (devid_type
) {
7774 case DEVID_SCSI3_WWN
:
7776 case DEVID_SCSI_SERIAL
:
7778 case DEVID_ATA_SERIAL
:
7782 return (DDI_FAILURE
);
7784 return (DDI_FAILURE
);
7788 return (DDI_FAILURE
);
7790 return (DDI_FAILURE
);
7791 nbytes
= sizeof (int) +
7792 sizeof (struct timeval32
) + sizeof (short);
7796 return (DDI_FAILURE
);
7799 if ((i_devid
= kmem_zalloc(sz
, KM_SLEEP
)) == NULL
)
7800 return (DDI_FAILURE
);
7802 i_devid
->did_magic_hi
= DEVID_MAGIC_MSB
;
7803 i_devid
->did_magic_lo
= DEVID_MAGIC_LSB
;
7804 i_devid
->did_rev_hi
= DEVID_REV_MSB
;
7805 i_devid
->did_rev_lo
= DEVID_REV_LSB
;
7806 DEVID_FORMTYPE(i_devid
, devid_type
);
7807 DEVID_FORMLEN(i_devid
, nbytes
);
7809 /* Fill in driver name hint */
7810 driver_name
= ddi_driver_name(dip
);
7811 driver_len
= strlen(driver_name
);
7812 if (driver_len
> DEVID_HINT_SIZE
) {
7813 /* Pick up last four characters of driver name */
7814 driver_name
+= driver_len
- DEVID_HINT_SIZE
;
7815 driver_len
= DEVID_HINT_SIZE
;
7818 bcopy(driver_name
, i_devid
->did_driver
, driver_len
);
7820 /* Fill in id field */
7821 if (devid_type
== DEVID_FAB
) {
7824 struct timeval32 timestamp32
;
7829 /* increase the generation number */
7830 mutex_enter(&devid_gen_mutex
);
7831 gen
= devid_gen_number
++;
7832 mutex_exit(&devid_gen_mutex
);
7834 cp
= i_devid
->did_id
;
7836 /* Fill in host id (big-endian byte ordering) */
7837 hostid
= zone_get_hostid(NULL
);
7838 *cp
++ = hibyte(hiword(hostid
));
7839 *cp
++ = lobyte(hiword(hostid
));
7840 *cp
++ = hibyte(loword(hostid
));
7841 *cp
++ = lobyte(loword(hostid
));
7844 * Fill in timestamp (big-endian byte ordering)
7846 * (Note that the format may have to be changed
7847 * before 2038 comes around, though it's arguably
7848 * unique enough as it is..)
7850 uniqtime32(×tamp32
);
7851 ip
= (int *)×tamp32
;
7853 i
< sizeof (timestamp32
) / sizeof (int); i
++, ip
++) {
7856 *cp
++ = hibyte(hiword(val
));
7857 *cp
++ = lobyte(hiword(val
));
7858 *cp
++ = hibyte(loword(val
));
7859 *cp
++ = lobyte(loword(val
));
7862 /* fill in the generation number */
7863 *cp
++ = hibyte(gen
);
7864 *cp
++ = lobyte(gen
);
7866 bcopy(id
, i_devid
->did_id
, nbytes
);
7868 /* return device id */
7869 *ret_devid
= (ddi_devid_t
)i_devid
;
7870 return (DDI_SUCCESS
);
7874 ddi_devid_get(dev_info_t
*dip
, ddi_devid_t
*ret_devid
)
7876 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY
, dip
, ret_devid
));
7880 i_ddi_devi_get_devid(dev_t dev
, dev_info_t
*dip
, ddi_devid_t
*ret_devid
)
7884 ASSERT(dev
!= DDI_DEV_T_NONE
);
7886 /* look up the property, devt specific first */
7887 if (ddi_prop_lookup_string(dev
, dip
, DDI_PROP_DONTPASS
,
7888 DEVID_PROP_NAME
, &devidstr
) != DDI_PROP_SUCCESS
) {
7889 if ((dev
== DDI_DEV_T_ANY
) ||
7890 (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
,
7891 DDI_PROP_DONTPASS
, DEVID_PROP_NAME
, &devidstr
) !=
7892 DDI_PROP_SUCCESS
)) {
7893 return (DDI_FAILURE
);
7897 /* convert to binary form */
7898 if (ddi_devid_str_decode(devidstr
, ret_devid
, NULL
) == -1) {
7899 ddi_prop_free(devidstr
);
7900 return (DDI_FAILURE
);
7902 ddi_prop_free(devidstr
);
7903 return (DDI_SUCCESS
);
7907 * Return a copy of the device id for dev_t
7910 ddi_lyr_get_devid(dev_t dev
, ddi_devid_t
*ret_devid
)
7916 if ((dip
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
)
7917 return (DDI_FAILURE
);
7919 rval
= i_ddi_devi_get_devid(dev
, dip
, ret_devid
);
7921 ddi_release_devi(dip
); /* e_ddi_hold_devi_by_dev() */
7926 * Return a copy of the minor name for dev_t and spec_type
7929 ddi_lyr_get_minor_name(dev_t dev
, int spec_type
, char **minor_name
)
7937 if ((dip
= e_ddi_hold_devi_by_dev(dev
, 0)) == NULL
) {
7939 return (DDI_FAILURE
);
7942 /* Find the minor name and copy into max size buf */
7943 buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
7944 ndi_devi_enter(dip
, &circ
);
7945 nm
= i_ddi_devtspectype_to_minorname(dip
, dev
, spec_type
);
7947 (void) strcpy(buf
, nm
);
7948 ndi_devi_exit(dip
, circ
);
7949 ddi_release_devi(dip
); /* e_ddi_hold_devi_by_dev() */
7952 /* duplicate into min size buf for return result */
7953 *minor_name
= i_ddi_strdup(buf
, KM_SLEEP
);
7960 /* free max size buf and return */
7961 kmem_free(buf
, MAXNAMELEN
);
7966 ddi_lyr_devid_to_devlist(
7972 ASSERT(ddi_devid_valid(devid
) == DDI_SUCCESS
);
7974 if (e_devid_cache_to_devt_list(devid
, minor_name
,
7975 retndevs
, retdevs
) == DDI_SUCCESS
) {
7976 ASSERT(*retndevs
> 0);
7977 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7978 *retndevs
, *retdevs
);
7979 return (DDI_SUCCESS
);
7982 if (e_ddi_devid_discovery(devid
) == DDI_FAILURE
) {
7983 return (DDI_FAILURE
);
7986 if (e_devid_cache_to_devt_list(devid
, minor_name
,
7987 retndevs
, retdevs
) == DDI_SUCCESS
) {
7988 ASSERT(*retndevs
> 0);
7989 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7990 *retndevs
, *retdevs
);
7991 return (DDI_SUCCESS
);
7994 return (DDI_FAILURE
);
7998 ddi_lyr_free_devlist(dev_t
*devlist
, int ndevs
)
8000 kmem_free(devlist
, sizeof (dev_t
) * ndevs
);
8004 * Note: This will need to be fixed if we ever allow processes to
8005 * have more than one data model per exec.
8008 ddi_mmap_get_model(void)
8010 return (get_udatamodel());
8014 ddi_model_convert_from(model_t model
)
8016 return ((model
& DDI_MODEL_MASK
) & ~DDI_MODEL_NATIVE
);
8020 * ddi interfaces managing storage and retrieval of eventcookies.
8024 * Invoke bus nexus driver's implementation of the
8025 * (*bus_remove_eventcall)() interface to remove a registered
8026 * callback handler for "event".
8029 ddi_remove_event_handler(ddi_callback_id_t id
)
8031 ndi_event_callbacks_t
*cb
= (ndi_event_callbacks_t
*)id
;
8036 return (DDI_FAILURE
);
8039 ddip
= NDI_EVENT_DDIP(cb
->ndi_evtcb_cookie
);
8040 return (ndi_busop_remove_eventcall(ddip
, id
));
8044 * Invoke bus nexus driver's implementation of the
8045 * (*bus_add_eventcall)() interface to register a callback handler
8049 ddi_add_event_handler(dev_info_t
*dip
, ddi_eventcookie_t event
,
8050 void (*handler
)(dev_info_t
*, ddi_eventcookie_t
, void *, void *),
8051 void *arg
, ddi_callback_id_t
*id
)
8053 return (ndi_busop_add_eventcall(dip
, dip
, event
, handler
, arg
, id
));
8058 * Return a handle for event "name" by calling up the device tree
8059 * hierarchy via (*bus_get_eventcookie)() interface until claimed
8060 * by a bus nexus or top of dev_info tree is reached.
8063 ddi_get_eventcookie(dev_info_t
*dip
, char *name
,
8064 ddi_eventcookie_t
*event_cookiep
)
8066 return (ndi_busop_get_eventcookie(dip
, dip
,
8067 name
, event_cookiep
));
8071 * This procedure is provided as the general callback function when
8072 * umem_lockmemory calls as_add_callback for long term memory locking.
8073 * When as_unmap, as_setprot, or as_free encounter segments which have
8074 * locked memory, this callback will be invoked.
8077 umem_lock_undo(struct as
*as
, void *arg
, uint_t event
)
8079 _NOTE(ARGUNUSED(as
, event
))
8080 struct ddi_umem_cookie
*cp
= (struct ddi_umem_cookie
*)arg
;
8083 * Call the cleanup function. Decrement the cookie reference
8084 * count, if it goes to zero, return the memory for the cookie.
8085 * The i_ddi_umem_unlock for this cookie may or may not have been
8086 * called already. It is the responsibility of the caller of
8087 * umem_lockmemory to handle the case of the cleanup routine
8088 * being called after a ddi_umem_unlock for the cookie
8092 (*cp
->callbacks
.cbo_umem_lock_cleanup
)((ddi_umem_cookie_t
)cp
);
8094 /* remove the cookie if reference goes to zero */
8095 if (atomic_dec_ulong_nv((ulong_t
*)(&(cp
->cook_refcnt
))) == 0) {
8096 kmem_free(cp
, sizeof (struct ddi_umem_cookie
));
8101 * The following two Consolidation Private routines provide generic
8102 * interfaces to increase/decrease the amount of device-locked memory.
8104 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8105 * must be called every time i_ddi_incr_locked_memory() is called.
8109 i_ddi_incr_locked_memory(proc_t
*procp
, rctl_qty_t inc
)
8111 ASSERT(procp
!= NULL
);
8112 mutex_enter(&procp
->p_lock
);
8113 if (rctl_incr_locked_mem(procp
, NULL
, inc
, 1)) {
8114 mutex_exit(&procp
->p_lock
);
8117 mutex_exit(&procp
->p_lock
);
8122 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8123 * must be called every time i_ddi_decr_locked_memory() is called.
8127 i_ddi_decr_locked_memory(proc_t
*procp
, rctl_qty_t dec
)
8129 ASSERT(procp
!= NULL
);
8130 mutex_enter(&procp
->p_lock
);
8131 rctl_decr_locked_mem(procp
, NULL
, dec
, 1);
8132 mutex_exit(&procp
->p_lock
);
8136 * The cookie->upd_max_lock_rctl flag is used to determine if we should
8137 * charge device locked memory to the max-locked-memory rctl. Tracking
8138 * device locked memory causes the rctl locks to get hot under high-speed
8139 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit,
8140 * we bypass charging the locked memory to the rctl altogether. The cookie's
8141 * flag tells us if the rctl value should be updated when unlocking the memory,
8142 * in case the rctl gets changed after the memory was locked. Any device
8143 * locked memory in that rare case will not be counted toward the rctl limit.
8145 * When tracking the locked memory, the kproject_t parameter is always NULL
8146 * in the code paths:
8147 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8148 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8149 * Thus, we always use the tk_proj member to check the projp setting.
8152 init_lockedmem_rctl_flag(struct ddi_umem_cookie
*cookie
)
8163 projp
= p
->p_task
->tk_proj
;
8168 if (zonep
->zone_locked_mem_ctl
== UINT64_MAX
&&
8169 projp
->kpj_data
.kpd_locked_mem_ctl
== UINT64_MAX
)
8170 cookie
->upd_max_lock_rctl
= 0;
8172 cookie
->upd_max_lock_rctl
= 1;
8176 * This routine checks if the max-locked-memory resource ctl is
8177 * exceeded, if not increments it, grabs a hold on the project.
8178 * Returns 0 if successful otherwise returns error code
8181 umem_incr_devlockmem(struct ddi_umem_cookie
*cookie
)
8187 if (cookie
->upd_max_lock_rctl
== 0)
8190 procp
= cookie
->procp
;
8193 if ((ret
= i_ddi_incr_locked_memory(procp
,
8194 cookie
->size
)) != 0) {
8201 * Decrements the max-locked-memory resource ctl and releases
8202 * the hold on the project that was acquired during umem_incr_devlockmem
8205 umem_decr_devlockmem(struct ddi_umem_cookie
*cookie
)
8209 if (cookie
->upd_max_lock_rctl
== 0)
8212 proc
= (proc_t
*)cookie
->procp
;
8216 i_ddi_decr_locked_memory(proc
, cookie
->size
);
8220 * A consolidation private function which is essentially equivalent to
8221 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8222 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8223 * the ops_vector is valid.
8225 * Lock the virtual address range in the current process and create a
8226 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8227 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8230 * Note: The resource control accounting currently uses a full charge model
8231 * in other words attempts to lock the same/overlapping areas of memory
8232 * will deduct the full size of the buffer from the projects running
8233 * counter for the device locked memory.
8235 * addr, size should be PAGESIZE aligned
8237 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8238 * identifies whether the locked memory will be read or written or both
8239 * DDI_UMEMLOCK_LONGTERM must be set when the locking will
8240 * be maintained for an indefinitely long period (essentially permanent),
8241 * rather than for what would be required for a typical I/O completion.
8242 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8243 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8244 * This is to prevent a deadlock if a file truncation is attempted after
8245 * after the locking is done.
8247 * Returns 0 on success
8248 * EINVAL - for invalid parameters
8249 * EPERM, ENOMEM and other error codes returned by as_pagelock
8250 * ENOMEM - is returned if the current request to lock memory exceeds
8251 * *.max-locked-memory resource control value.
8252 * EFAULT - memory pertains to a regular file mapped shared and
8253 * and DDI_UMEMLOCK_LONGTERM flag is set
8254 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8257 umem_lockmemory(caddr_t addr
, size_t len
, int flags
, ddi_umem_cookie_t
*cookie
,
8258 struct umem_callback_ops
*ops_vector
,
8262 struct ddi_umem_cookie
*p
;
8263 void (*driver_callback
)() = NULL
;
8268 /* Allow device drivers to not have to reference "curproc" */
8272 *cookie
= NULL
; /* in case of any error return */
8274 /* These are the only three valid flags */
8275 if ((flags
& ~(DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
|
8276 DDI_UMEMLOCK_LONGTERM
)) != 0)
8279 /* At least one (can be both) of the two access flags must be set */
8280 if ((flags
& (DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
)) == 0)
8283 /* addr and len must be page-aligned */
8284 if (((uintptr_t)addr
& PAGEOFFSET
) != 0)
8287 if ((len
& PAGEOFFSET
) != 0)
8291 * For longterm locking a driver callback must be specified; if
8292 * not longterm then a callback is optional.
8294 if (ops_vector
!= NULL
) {
8295 if (ops_vector
->cbo_umem_callback_version
!=
8296 UMEM_CALLBACK_VERSION
)
8299 driver_callback
= ops_vector
->cbo_umem_lock_cleanup
;
8301 if ((driver_callback
== NULL
) && (flags
& DDI_UMEMLOCK_LONGTERM
))
8305 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8306 * be called on first ddi_umem_lock or umem_lockmemory call.
8308 if (ddi_umem_unlock_thread
== NULL
)
8309 i_ddi_umem_unlock_thread_start();
8311 /* Allocate memory for the cookie */
8312 p
= kmem_zalloc(sizeof (struct ddi_umem_cookie
), KM_SLEEP
);
8314 /* Convert the flags to seg_rw type */
8315 if (flags
& DDI_UMEMLOCK_WRITE
) {
8316 p
->s_flags
= S_WRITE
;
8318 p
->s_flags
= S_READ
;
8321 /* Store procp in cookie for later iosetup/unlock */
8322 p
->procp
= (void *)procp
;
8325 * Store the struct as pointer in cookie for later use by
8326 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8327 * is called after relvm is called.
8332 * The size field is needed for lockmem accounting.
8335 init_lockedmem_rctl_flag(p
);
8337 if (umem_incr_devlockmem(p
) != 0) {
8339 * The requested memory cannot be locked
8341 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8342 *cookie
= (ddi_umem_cookie_t
)NULL
;
8346 /* Lock the pages corresponding to addr, len in memory */
8347 error
= as_pagelock(as
, &(p
->pparray
), addr
, len
, p
->s_flags
);
8349 umem_decr_devlockmem(p
);
8350 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8351 *cookie
= (ddi_umem_cookie_t
)NULL
;
8356 * For longterm locking the addr must pertain to a seg_vn segment or
8357 * or a seg_spt segment.
8358 * If the segment pertains to a regular file, it cannot be
8359 * mapped MAP_SHARED.
8360 * This is to prevent a deadlock if a file truncation is attempted
8361 * after the locking is done.
8362 * Doing this after as_pagelock guarantees persistence of the as; if
8363 * an unacceptable segment is found, the cleanup includes calling
8364 * as_pageunlock before returning EFAULT.
8366 * segdev is allowed here as it is already locked. This allows
8367 * for memory exported by drivers through mmap() (which is already
8368 * locked) to be allowed for LONGTERM.
8370 if (flags
& DDI_UMEMLOCK_LONGTERM
) {
8371 extern struct seg_ops segspt_shmops
;
8372 extern struct seg_ops segdev_ops
;
8373 AS_LOCK_ENTER(as
, RW_READER
);
8374 for (seg
= as_segat(as
, addr
); ; seg
= AS_SEGNEXT(as
, seg
)) {
8375 if (seg
== NULL
|| seg
->s_base
> addr
+ len
)
8377 if (seg
->s_ops
== &segdev_ops
)
8379 if (((seg
->s_ops
!= &segvn_ops
) &&
8380 (seg
->s_ops
!= &segspt_shmops
)) ||
8381 ((SEGOP_GETVP(seg
, addr
, &vp
) == 0 &&
8382 vp
!= NULL
&& vp
->v_type
== VREG
) &&
8383 (SEGOP_GETTYPE(seg
, addr
) & MAP_SHARED
))) {
8384 as_pageunlock(as
, p
->pparray
,
8385 addr
, len
, p
->s_flags
);
8387 umem_decr_devlockmem(p
);
8388 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8389 *cookie
= (ddi_umem_cookie_t
)NULL
;
8397 /* Initialize the fields in the ddi_umem_cookie */
8399 p
->type
= UMEM_LOCKED
;
8400 if (driver_callback
!= NULL
) {
8401 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8403 p
->callbacks
= *ops_vector
;
8405 /* only i_ddi_umme_unlock needs the cookie */
8409 *cookie
= (ddi_umem_cookie_t
)p
;
8412 * If a driver callback was specified, add an entry to the
8413 * as struct callback list. The as_pagelock above guarantees
8414 * the persistence of as.
8416 if (driver_callback
) {
8417 error
= as_add_callback(as
, umem_lock_undo
, p
, AS_ALL_EVENT
,
8418 addr
, len
, KM_SLEEP
);
8420 as_pageunlock(as
, p
->pparray
,
8421 addr
, len
, p
->s_flags
);
8422 umem_decr_devlockmem(p
);
8423 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8424 *cookie
= (ddi_umem_cookie_t
)NULL
;
8431 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8432 * the cookie. Called from i_ddi_umem_unlock_thread.
8436 i_ddi_umem_unlock(struct ddi_umem_cookie
*p
)
8441 * There is no way to determine whether a callback to
8442 * umem_lock_undo was registered via as_add_callback.
8443 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8444 * a valid callback function structure.) as_delete_callback
8445 * is called to delete a possible registered callback. If the
8446 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8447 * indicates that there was a callback registered, and that is was
8448 * successfully deleted. Thus, the cookie reference count
8449 * will never be decremented by umem_lock_undo. Just return the
8450 * memory for the cookie, since both users of the cookie are done.
8451 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8452 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED
8453 * indicates that callback processing is taking place and, and
8454 * umem_lock_undo is, or will be, executing, and thus decrementing
8455 * the cookie reference count when it is complete.
8457 * This needs to be done before as_pageunlock so that the
8458 * persistence of as is guaranteed because of the locked pages.
8461 rc
= as_delete_callback(p
->asp
, p
);
8465 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8466 * after relvm is called so use p->asp.
8468 as_pageunlock(p
->asp
, p
->pparray
, p
->cvaddr
, p
->size
, p
->s_flags
);
8471 * Now that we have unlocked the memory decrement the
8472 * *.max-locked-memory rctl
8474 umem_decr_devlockmem(p
);
8476 if (rc
== AS_CALLBACK_DELETED
) {
8477 /* umem_lock_undo will not happen, return the cookie memory */
8478 ASSERT(p
->cook_refcnt
== 2);
8479 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8482 * umem_undo_lock may happen if as_delete_callback returned
8483 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8484 * reference count, atomically, and return the cookie
8485 * memory if the reference count goes to zero. The only
8486 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8487 * case, just return the cookie memory.
8489 if ((rc
!= AS_CALLBACK_DELETE_DEFERRED
) ||
8490 (atomic_dec_ulong_nv((ulong_t
*)(&(p
->cook_refcnt
)))
8492 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8498 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8500 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8501 * until it is empty. Then, wait for more to be added. This thread is awoken
8502 * via calls to ddi_umem_unlock.
8506 i_ddi_umem_unlock_thread(void)
8508 struct ddi_umem_cookie
*ret_cookie
;
8509 callb_cpr_t cprinfo
;
8511 /* process the ddi_umem_unlock list */
8512 CALLB_CPR_INIT(&cprinfo
, &ddi_umem_unlock_mutex
,
8513 callb_generic_cpr
, "unlock_thread");
8515 mutex_enter(&ddi_umem_unlock_mutex
);
8516 if (ddi_umem_unlock_head
!= NULL
) { /* list not empty */
8517 ret_cookie
= ddi_umem_unlock_head
;
8518 /* take if off the list */
8519 if ((ddi_umem_unlock_head
=
8520 ddi_umem_unlock_head
->unl_forw
) == NULL
) {
8521 ddi_umem_unlock_tail
= NULL
;
8523 mutex_exit(&ddi_umem_unlock_mutex
);
8524 /* unlock the pages in this cookie */
8525 (void) i_ddi_umem_unlock(ret_cookie
);
8526 } else { /* list is empty, wait for next ddi_umem_unlock */
8527 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
8528 cv_wait(&ddi_umem_unlock_cv
, &ddi_umem_unlock_mutex
);
8529 CALLB_CPR_SAFE_END(&cprinfo
, &ddi_umem_unlock_mutex
);
8530 mutex_exit(&ddi_umem_unlock_mutex
);
8533 /* ddi_umem_unlock_thread does not exit */
8538 * Start the thread that will process the ddi_umem_unlock list if it is
8539 * not already started (i_ddi_umem_unlock_thread).
8542 i_ddi_umem_unlock_thread_start(void)
8544 mutex_enter(&ddi_umem_unlock_mutex
);
8545 if (ddi_umem_unlock_thread
== NULL
) {
8546 ddi_umem_unlock_thread
= thread_create(NULL
, 0,
8547 i_ddi_umem_unlock_thread
, NULL
, 0, &p0
,
8548 TS_RUN
, minclsyspri
);
8550 mutex_exit(&ddi_umem_unlock_mutex
);
8554 * Lock the virtual address range in the current process and create a
8555 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8556 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8559 * Note: The resource control accounting currently uses a full charge model
8560 * in other words attempts to lock the same/overlapping areas of memory
8561 * will deduct the full size of the buffer from the projects running
8562 * counter for the device locked memory. This applies to umem_lockmemory too.
8564 * addr, size should be PAGESIZE aligned
8565 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8566 * identifies whether the locked memory will be read or written or both
8568 * Returns 0 on success
8569 * EINVAL - for invalid parameters
8570 * EPERM, ENOMEM and other error codes returned by as_pagelock
8571 * ENOMEM - is returned if the current request to lock memory exceeds
8572 * *.max-locked-memory resource control value.
8573 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8576 ddi_umem_lock(caddr_t addr
, size_t len
, int flags
, ddi_umem_cookie_t
*cookie
)
8579 struct ddi_umem_cookie
*p
;
8581 *cookie
= NULL
; /* in case of any error return */
8583 /* These are the only two valid flags */
8584 if ((flags
& ~(DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
)) != 0) {
8588 /* At least one of the two flags (or both) must be set */
8589 if ((flags
& (DDI_UMEMLOCK_READ
| DDI_UMEMLOCK_WRITE
)) == 0) {
8593 /* addr and len must be page-aligned */
8594 if (((uintptr_t)addr
& PAGEOFFSET
) != 0) {
8598 if ((len
& PAGEOFFSET
) != 0) {
8603 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8604 * be called on first ddi_umem_lock or umem_lockmemory call.
8606 if (ddi_umem_unlock_thread
== NULL
)
8607 i_ddi_umem_unlock_thread_start();
8609 /* Allocate memory for the cookie */
8610 p
= kmem_zalloc(sizeof (struct ddi_umem_cookie
), KM_SLEEP
);
8612 /* Convert the flags to seg_rw type */
8613 if (flags
& DDI_UMEMLOCK_WRITE
) {
8614 p
->s_flags
= S_WRITE
;
8616 p
->s_flags
= S_READ
;
8619 /* Store curproc in cookie for later iosetup/unlock */
8620 p
->procp
= (void *)curproc
;
8623 * Store the struct as pointer in cookie for later use by
8624 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8625 * is called after relvm is called.
8627 p
->asp
= curproc
->p_as
;
8629 * The size field is needed for lockmem accounting.
8632 init_lockedmem_rctl_flag(p
);
8634 if (umem_incr_devlockmem(p
) != 0) {
8636 * The requested memory cannot be locked
8638 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8639 *cookie
= (ddi_umem_cookie_t
)NULL
;
8643 /* Lock the pages corresponding to addr, len in memory */
8644 error
= as_pagelock(((proc_t
*)p
->procp
)->p_as
, &(p
->pparray
),
8645 addr
, len
, p
->s_flags
);
8647 umem_decr_devlockmem(p
);
8648 kmem_free(p
, sizeof (struct ddi_umem_cookie
));
8649 *cookie
= (ddi_umem_cookie_t
)NULL
;
8653 /* Initialize the fields in the ddi_umem_cookie */
8655 p
->type
= UMEM_LOCKED
;
8658 *cookie
= (ddi_umem_cookie_t
)p
;
8663 * Add the cookie to the ddi_umem_unlock list. Pages will be
8664 * unlocked by i_ddi_umem_unlock_thread.
8668 ddi_umem_unlock(ddi_umem_cookie_t cookie
)
8670 struct ddi_umem_cookie
*p
= (struct ddi_umem_cookie
*)cookie
;
8672 ASSERT(p
->type
== UMEM_LOCKED
);
8673 ASSERT(CPU_ON_INTR(CPU
) == 0); /* cannot be high level */
8674 ASSERT(ddi_umem_unlock_thread
!= NULL
);
8676 p
->unl_forw
= (struct ddi_umem_cookie
*)NULL
; /* end of list */
8678 * Queue the unlock request and notify i_ddi_umem_unlock thread
8679 * if it's called in the interrupt context. Otherwise, unlock pages
8682 if (servicing_interrupt()) {
8683 /* queue the unlock request and notify the thread */
8684 mutex_enter(&ddi_umem_unlock_mutex
);
8685 if (ddi_umem_unlock_head
== NULL
) {
8686 ddi_umem_unlock_head
= ddi_umem_unlock_tail
= p
;
8687 cv_broadcast(&ddi_umem_unlock_cv
);
8689 ddi_umem_unlock_tail
->unl_forw
= p
;
8690 ddi_umem_unlock_tail
= p
;
8692 mutex_exit(&ddi_umem_unlock_mutex
);
8694 /* unlock the pages right away */
8695 (void) i_ddi_umem_unlock(p
);
8700 * Create a buf structure from a ddi_umem_cookie
8701 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8702 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8703 * off, len - identifies the portion of the memory represented by the cookie
8704 * that the buf points to.
8705 * NOTE: off, len need to follow the alignment/size restrictions of the
8706 * device (dev) that this buf will be passed to. Some devices
8707 * will accept unrestricted alignment/size, whereas others (such as
8708 * st) require some block-size alignment/size. It is the caller's
8709 * responsibility to ensure that the alignment/size restrictions
8710 * are met (we cannot assert as we do not know the restrictions)
8712 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8713 * the flags used in ddi_umem_lock
8715 * The following three arguments are used to initialize fields in the
8716 * buf structure and are uninterpreted by this routine.
8722 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8724 * Returns a buf structure pointer on success (to be freed by freerbuf)
8725 * NULL on any parameter error or memory alloc failure
8729 ddi_umem_iosetup(ddi_umem_cookie_t cookie
, off_t off
, size_t len
,
8730 int direction
, dev_t dev
, daddr_t blkno
,
8731 int (*iodone
)(struct buf
*), int sleepflag
)
8733 struct ddi_umem_cookie
*p
= (struct ddi_umem_cookie
*)cookie
;
8737 * check for valid cookie offset, len
8739 if ((off
+ len
) > p
->size
) {
8743 if (len
> p
->size
) {
8747 /* direction has to be one of B_READ or B_WRITE */
8748 if ((direction
!= B_READ
) && (direction
!= B_WRITE
)) {
8752 /* These are the only two valid sleepflags */
8753 if ((sleepflag
!= DDI_UMEM_SLEEP
) && (sleepflag
!= DDI_UMEM_NOSLEEP
)) {
8758 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8760 if ((p
->type
!= UMEM_LOCKED
) && (p
->type
!= KMEM_NON_PAGEABLE
)) {
8764 /* If type is KMEM_NON_PAGEABLE procp is NULL */
8765 ASSERT((p
->type
== KMEM_NON_PAGEABLE
) ?
8766 (p
->procp
== NULL
) : (p
->procp
!= NULL
));
8768 bp
= kmem_alloc(sizeof (struct buf
), sleepflag
);
8774 bp
->b_flags
= B_BUSY
| B_PHYS
| direction
;
8776 bp
->b_lblkno
= blkno
;
8777 bp
->b_iodone
= iodone
;
8779 bp
->b_proc
= (proc_t
*)p
->procp
;
8780 ASSERT(((uintptr_t)(p
->cvaddr
) & PAGEOFFSET
) == 0);
8781 bp
->b_un
.b_addr
= (caddr_t
)((uintptr_t)(p
->cvaddr
) + off
);
8782 if (p
->pparray
!= NULL
) {
8783 bp
->b_flags
|= B_SHADOW
;
8784 ASSERT(((uintptr_t)(p
->cvaddr
) & PAGEOFFSET
) == 0);
8785 bp
->b_shadow
= p
->pparray
+ btop(off
);
8791 * Fault-handling and related routines
8795 ddi_get_devstate(dev_info_t
*dip
)
8797 if (DEVI_IS_DEVICE_OFFLINE(dip
))
8798 return (DDI_DEVSTATE_OFFLINE
);
8799 else if (DEVI_IS_DEVICE_DOWN(dip
) || DEVI_IS_BUS_DOWN(dip
))
8800 return (DDI_DEVSTATE_DOWN
);
8801 else if (DEVI_IS_BUS_QUIESCED(dip
))
8802 return (DDI_DEVSTATE_QUIESCED
);
8803 else if (DEVI_IS_DEVICE_DEGRADED(dip
))
8804 return (DDI_DEVSTATE_DEGRADED
);
8806 return (DDI_DEVSTATE_UP
);
8810 ddi_dev_report_fault(dev_info_t
*dip
, ddi_fault_impact_t impact
,
8811 ddi_fault_location_t location
, const char *message
)
8813 struct ddi_fault_event_data fd
;
8814 ddi_eventcookie_t ec
;
8817 * Assemble all the information into a fault-event-data structure
8820 fd
.f_impact
= impact
;
8821 fd
.f_location
= location
;
8822 fd
.f_message
= message
;
8823 fd
.f_oldstate
= ddi_get_devstate(dip
);
8826 * Get eventcookie from defining parent.
8828 if (ddi_get_eventcookie(dip
, DDI_DEVI_FAULT_EVENT
, &ec
) !=
8832 (void) ndi_post_event(dip
, dip
, ec
, &fd
);
8836 i_ddi_devi_class(dev_info_t
*dip
)
8838 return (DEVI(dip
)->devi_device_class
);
8842 i_ddi_set_devi_class(dev_info_t
*dip
, char *devi_class
, int flag
)
8844 struct dev_info
*devi
= DEVI(dip
);
8846 mutex_enter(&devi
->devi_lock
);
8848 if (devi
->devi_device_class
)
8849 kmem_free(devi
->devi_device_class
,
8850 strlen(devi
->devi_device_class
) + 1);
8852 if ((devi
->devi_device_class
= i_ddi_strdup(devi_class
, flag
))
8854 mutex_exit(&devi
->devi_lock
);
8855 return (DDI_SUCCESS
);
8858 mutex_exit(&devi
->devi_lock
);
8860 return (DDI_FAILURE
);
8865 * Task Queues DDI interfaces.
8870 ddi_taskq_create(dev_info_t
*dip
, const char *name
, int nthreads
,
8871 pri_t pri
, uint_t cflags
)
8873 char full_name
[TASKQ_NAMELEN
];
8874 const char *tq_name
;
8880 nodeid
= ddi_get_instance(dip
);
8885 (void) snprintf(full_name
, sizeof (full_name
), "%s_%s",
8886 ddi_driver_name(dip
), name
);
8888 tq_name
= full_name
;
8891 return ((ddi_taskq_t
*)taskq_create_instance(tq_name
, nodeid
, nthreads
,
8892 pri
== TASKQ_DEFAULTPRI
? minclsyspri
: pri
,
8893 nthreads
, INT_MAX
, TASKQ_PREPOPULATE
));
8897 ddi_taskq_destroy(ddi_taskq_t
*tq
)
8899 taskq_destroy((taskq_t
*)tq
);
8903 ddi_taskq_dispatch(ddi_taskq_t
*tq
, void (* func
)(void *),
8904 void *arg
, uint_t dflags
)
8906 taskqid_t id
= taskq_dispatch((taskq_t
*)tq
, func
, arg
,
8907 dflags
== DDI_SLEEP
? TQ_SLEEP
: TQ_NOSLEEP
);
8909 return (id
!= 0 ? DDI_SUCCESS
: DDI_FAILURE
);
8913 ddi_taskq_wait(ddi_taskq_t
*tq
)
8915 taskq_wait((taskq_t
*)tq
);
8919 ddi_taskq_suspend(ddi_taskq_t
*tq
)
8921 taskq_suspend((taskq_t
*)tq
);
8925 ddi_taskq_suspended(ddi_taskq_t
*tq
)
8927 return (taskq_suspended((taskq_t
*)tq
));
8931 ddi_taskq_resume(ddi_taskq_t
*tq
)
8933 taskq_resume((taskq_t
*)tq
);
8945 boolean_t nonum
= B_TRUE
;
8949 for (p
= ifname
+ l
; p
!= ifname
; l
--) {
8952 (void) strlcpy(alnum
, ifname
, l
+ 1);
8953 if (ddi_strtoul(p
+ 1, NULL
, 10, &num
) != 0)
8954 return (DDI_FAILURE
);
8959 if (l
== 0 || nonum
)
8960 return (DDI_FAILURE
);
8963 return (DDI_SUCCESS
);
8967 * Default initialization function for drivers that don't need to quiesce.
8971 ddi_quiesce_not_needed(dev_info_t
*dip
)
8973 return (DDI_SUCCESS
);
8977 * Initialization function for drivers that should implement quiesce()
8982 ddi_quiesce_not_supported(dev_info_t
*dip
)
8984 return (DDI_FAILURE
);
8988 ddi_strdup(const char *str
, int flag
)
8993 ASSERT(str
!= NULL
);
8994 ASSERT((flag
== KM_SLEEP
) || (flag
== KM_NOSLEEP
));
8997 if ((ptr
= kmem_alloc(n
+ 1, flag
)) == NULL
)
8999 bcopy(str
, ptr
, n
+ 1);
9004 strdup(const char *str
)
9006 return (ddi_strdup(str
, KM_SLEEP
));
9012 ASSERT(str
!= NULL
);
9013 kmem_free(str
, strlen(str
) + 1);
9017 * Generic DDI callback interfaces.
9021 ddi_cb_register(dev_info_t
*dip
, ddi_cb_flags_t flags
, ddi_cb_func_t cbfunc
,
9022 void *arg1
, void *arg2
, ddi_cb_handle_t
*ret_hdlp
)
9026 ASSERT(dip
!= NULL
);
9027 ASSERT(DDI_CB_FLAG_VALID(flags
));
9028 ASSERT(cbfunc
!= NULL
);
9029 ASSERT(ret_hdlp
!= NULL
);
9031 /* Sanity check the context */
9032 ASSERT(!servicing_interrupt());
9033 if (servicing_interrupt())
9034 return (DDI_FAILURE
);
9036 /* Validate parameters */
9037 if ((dip
== NULL
) || !DDI_CB_FLAG_VALID(flags
) ||
9038 (cbfunc
== NULL
) || (ret_hdlp
== NULL
))
9039 return (DDI_EINVAL
);
9041 /* Check for previous registration */
9042 if (DEVI(dip
)->devi_cb_p
!= NULL
)
9043 return (DDI_EALREADY
);
9045 /* Allocate and initialize callback */
9046 cbp
= kmem_zalloc(sizeof (ddi_cb_t
), KM_SLEEP
);
9048 cbp
->cb_func
= cbfunc
;
9049 cbp
->cb_arg1
= arg1
;
9050 cbp
->cb_arg2
= arg2
;
9051 cbp
->cb_flags
= flags
;
9052 DEVI(dip
)->devi_cb_p
= cbp
;
9054 /* If adding an IRM callback, notify IRM */
9055 if (flags
& DDI_CB_FLAG_INTR
)
9056 i_ddi_irm_set_cb(dip
, B_TRUE
);
9058 *ret_hdlp
= (ddi_cb_handle_t
)&(DEVI(dip
)->devi_cb_p
);
9059 return (DDI_SUCCESS
);
9063 ddi_cb_unregister(ddi_cb_handle_t hdl
)
9068 ASSERT(hdl
!= NULL
);
9070 /* Sanity check the context */
9071 ASSERT(!servicing_interrupt());
9072 if (servicing_interrupt())
9073 return (DDI_FAILURE
);
9075 /* Validate parameters */
9076 if ((hdl
== NULL
) || ((cbp
= *(ddi_cb_t
**)hdl
) == NULL
) ||
9077 ((dip
= cbp
->cb_dip
) == NULL
))
9078 return (DDI_EINVAL
);
9080 /* If removing an IRM callback, notify IRM */
9081 if (cbp
->cb_flags
& DDI_CB_FLAG_INTR
)
9082 i_ddi_irm_set_cb(dip
, B_FALSE
);
9084 /* Destroy the callback */
9085 kmem_free(cbp
, sizeof (ddi_cb_t
));
9086 DEVI(dip
)->devi_cb_p
= NULL
;
9088 return (DDI_SUCCESS
);
9092 * Platform independent DR routines
9124 * Prom tree node list
9128 struct ptnode
*next
;
9132 * Prom tree walk arg
9139 struct ptnode
*head
;
9143 visit_node(pnode_t nodeid
, struct pta
*ap
)
9145 struct ptnode
**nextp
;
9146 int (*select
)(pnode_t
, void *, uint_t
);
9148 ASSERT(nodeid
!= OBP_NONODE
&& nodeid
!= OBP_BADNODE
);
9150 select
= ap
->bp
->create
.prom_branch_select
;
9154 if (select(nodeid
, ap
->bp
->arg
, 0) == DDI_SUCCESS
) {
9156 for (nextp
= &ap
->head
; *nextp
; nextp
= &(*nextp
)->next
)
9159 *nextp
= kmem_zalloc(sizeof (struct ptnode
), KM_SLEEP
);
9161 (*nextp
)->nodeid
= nodeid
;
9164 if ((ap
->flags
& DEVI_BRANCH_CHILD
) == DEVI_BRANCH_CHILD
)
9167 nodeid
= prom_childnode(nodeid
);
9168 while (nodeid
!= OBP_NONODE
&& nodeid
!= OBP_BADNODE
) {
9169 visit_node(nodeid
, ap
);
9170 nodeid
= prom_nextnode(nodeid
);
9175 * NOTE: The caller of this function must check for device contracts
9176 * or LDI callbacks against this dip before setting the dip offline.
9179 set_infant_dip_offline(dev_info_t
*dip
, void *arg
)
9181 char *path
= (char *)arg
;
9186 if (i_ddi_node_state(dip
) >= DS_ATTACHED
) {
9187 (void) ddi_pathname(dip
, path
);
9188 cmn_err(CE_WARN
, "Attempt to set offline flag on attached "
9190 return (DDI_FAILURE
);
9193 mutex_enter(&(DEVI(dip
)->devi_lock
));
9194 if (!DEVI_IS_DEVICE_OFFLINE(dip
))
9195 DEVI_SET_DEVICE_OFFLINE(dip
);
9196 mutex_exit(&(DEVI(dip
)->devi_lock
));
9198 return (DDI_SUCCESS
);
9201 typedef struct result
{
9207 dip_set_offline(dev_info_t
*dip
, void *arg
)
9210 result_t
*resp
= (result_t
*)arg
;
9216 * We stop the walk if e_ddi_offline_notify() returns
9217 * failure, because this implies that one or more consumers
9218 * (either LDI or contract based) has blocked the offline.
9219 * So there is no point in conitnuing the walk
9221 if (e_ddi_offline_notify(dip
) == DDI_FAILURE
) {
9222 resp
->result
= DDI_FAILURE
;
9223 return (DDI_WALK_TERMINATE
);
9227 * If set_infant_dip_offline() returns failure, it implies
9228 * that we failed to set a particular dip offline. This
9229 * does not imply that the offline as a whole should fail.
9230 * We want to do the best we can, so we continue the walk.
9232 if (set_infant_dip_offline(dip
, resp
->path
) == DDI_SUCCESS
)
9237 e_ddi_offline_finalize(dip
, end
);
9239 return (DDI_WALK_CONTINUE
);
9243 * The call to e_ddi_offline_notify() exists for the
9244 * unlikely error case that a branch we are trying to
9245 * create already exists and has device contracts or LDI
9246 * event callbacks against it.
9248 * We allow create to succeed for such branches only if
9249 * no constraints block the offline.
9252 branch_set_offline(dev_info_t
*dip
, char *path
)
9259 if (e_ddi_offline_notify(dip
) == DDI_FAILURE
) {
9260 return (DDI_FAILURE
);
9263 if (set_infant_dip_offline(dip
, path
) == DDI_SUCCESS
)
9268 e_ddi_offline_finalize(dip
, end
);
9270 if (end
== DDI_FAILURE
)
9271 return (DDI_FAILURE
);
9273 res
.result
= DDI_SUCCESS
;
9276 ndi_devi_enter(dip
, &circ
);
9277 ddi_walk_devs(ddi_get_child(dip
), dip_set_offline
, &res
);
9278 ndi_devi_exit(dip
, circ
);
9280 return (res
.result
);
9285 create_prom_branch(void *arg
, int has_changed
)
9292 struct pta
*ap
= arg
;
9297 ASSERT(ap
->fdip
== NULL
);
9298 ASSERT(ap
->pdip
&& ndi_dev_is_prom_node(ap
->pdip
));
9302 nodeid
= ddi_get_nodeid(ap
->pdip
);
9303 if (nodeid
== OBP_NONODE
|| nodeid
== OBP_BADNODE
) {
9304 cmn_err(CE_WARN
, "create_prom_branch: invalid "
9305 "nodeid: 0x%x", nodeid
);
9311 nodeid
= prom_childnode(nodeid
);
9312 while (nodeid
!= OBP_NONODE
&& nodeid
!= OBP_BADNODE
) {
9313 visit_node(nodeid
, ap
);
9314 nodeid
= prom_nextnode(nodeid
);
9317 if (ap
->head
== NULL
)
9320 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
9322 while ((tnp
= ap
->head
) != NULL
) {
9323 ap
->head
= tnp
->next
;
9325 ndi_devi_enter(ap
->pdip
, &circ
);
9328 * Check if the branch already exists.
9331 dip
= e_ddi_nodeid_to_dip(tnp
->nodeid
);
9335 /* Parent is held busy, so release hold */
9338 cmn_err(CE_WARN
, "create_prom_branch: dip(%p) exists"
9339 " for nodeid 0x%x", (void *)dip
, tnp
->nodeid
);
9342 dip
= i_ddi_create_branch(ap
->pdip
, tnp
->nodeid
);
9345 kmem_free(tnp
, sizeof (struct ptnode
));
9348 * Hold the branch if it is not already held
9350 if (dip
&& !exists
) {
9351 e_ddi_branch_hold(dip
);
9354 ASSERT(dip
== NULL
|| e_ddi_branch_held(dip
));
9357 * Set all dips in the newly created branch offline so that
9358 * only a "configure" operation can attach
9361 if (dip
== NULL
|| branch_set_offline(dip
, path
)
9363 ndi_devi_exit(ap
->pdip
, circ
);
9368 ASSERT(ddi_get_parent(dip
) == ap
->pdip
);
9370 ndi_devi_exit(ap
->pdip
, circ
);
9372 if (ap
->flags
& DEVI_BRANCH_CONFIGURE
) {
9373 int error
= e_ddi_branch_configure(dip
, &ap
->fdip
, 0);
9374 if (error
&& rv
== 0)
9379 * Invoke devi_branch_callback() (if it exists) only for
9380 * newly created branches
9382 if (bp
->devi_branch_callback
&& !exists
)
9383 bp
->devi_branch_callback(dip
, bp
->arg
, 0);
9386 kmem_free(path
, MAXPATHLEN
);
9392 sid_node_create(dev_info_t
*pdip
, devi_branch_t
*bp
, dev_info_t
**rdipp
)
9399 static const char *noname
= "<none>";
9402 ASSERT(DEVI_BUSY_OWNED(pdip
));
9407 * Creating the root of a branch ?
9411 flags
= DEVI_BRANCH_ROOT
;
9414 ndi_devi_alloc_sleep(pdip
, (char *)noname
, DEVI_SID_NODEID
, &dip
);
9415 rv
= bp
->create
.sid_branch_create(dip
, bp
->arg
, flags
);
9417 nbuf
= kmem_alloc(OBP_MAXDRVNAME
, KM_SLEEP
);
9419 if (rv
== DDI_WALK_ERROR
) {
9420 cmn_err(CE_WARN
, "e_ddi_branch_create: Error setting"
9421 " properties on devinfo node %p", (void *)dip
);
9425 len
= OBP_MAXDRVNAME
;
9426 if (ddi_getlongprop_buf(DDI_DEV_T_ANY
, dip
,
9427 DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
, "name", nbuf
, &len
)
9428 != DDI_PROP_SUCCESS
) {
9429 cmn_err(CE_WARN
, "e_ddi_branch_create: devinfo node %p has"
9430 "no name property", (void *)dip
);
9434 ASSERT(i_ddi_node_state(dip
) == DS_PROTO
);
9435 if (ndi_devi_set_nodename(dip
, nbuf
, 0) != NDI_SUCCESS
) {
9436 cmn_err(CE_WARN
, "e_ddi_branch_create: cannot set name (%s)"
9437 " for devinfo node %p", nbuf
, (void *)dip
);
9441 kmem_free(nbuf
, OBP_MAXDRVNAME
);
9444 * Ignore bind failures just like boot does
9446 (void) ndi_devi_bind_driver(dip
, 0);
9449 case DDI_WALK_CONTINUE
:
9450 case DDI_WALK_PRUNESIB
:
9451 ndi_devi_enter(dip
, &circ
);
9453 i
= DDI_WALK_CONTINUE
;
9454 for (; i
== DDI_WALK_CONTINUE
; ) {
9455 i
= sid_node_create(dip
, bp
, NULL
);
9458 ASSERT(i
== DDI_WALK_ERROR
|| i
== DDI_WALK_PRUNESIB
);
9459 if (i
== DDI_WALK_ERROR
)
9462 * If PRUNESIB stop creating siblings
9463 * of dip's child. Subsequent walk behavior
9464 * is determined by rv returned by dip.
9467 ndi_devi_exit(dip
, circ
);
9469 case DDI_WALK_TERMINATE
:
9471 * Don't create children and ask our parent
9472 * to not create siblings either.
9474 rv
= DDI_WALK_PRUNESIB
;
9476 case DDI_WALK_PRUNECHILD
:
9478 * Don't create children, but ask parent to continue
9481 rv
= DDI_WALK_CONTINUE
;
9492 * Set device offline - only the "configure" op should cause an attach.
9493 * Note that it is safe to set the dip offline without checking
9494 * for either device contract or layered driver (LDI) based constraints
9495 * since there cannot be any contracts or LDI opens of this device.
9496 * This is because this node is a newly created dip with the parent busy
9497 * held, so no other thread can come in and attach this dip. A dip that
9498 * has never been attached cannot have contracts since by definition
9499 * a device contract (an agreement between a process and a device minor
9500 * node) can only be created against a device that has minor nodes
9501 * i.e is attached. Similarly an LDI open will only succeed if the
9502 * dip is attached. We assert below that the dip is not attached.
9504 ASSERT(i_ddi_node_state(dip
) < DS_ATTACHED
);
9505 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
9506 ret
= set_infant_dip_offline(dip
, path
);
9507 ASSERT(ret
== DDI_SUCCESS
);
9508 kmem_free(path
, MAXPATHLEN
);
9512 (void) ndi_devi_free(dip
);
9513 kmem_free(nbuf
, OBP_MAXDRVNAME
);
9514 return (DDI_WALK_ERROR
);
9524 int rv
= 0, state
= DDI_WALK_CONTINUE
;
9527 while (state
== DDI_WALK_CONTINUE
) {
9530 ndi_devi_enter(pdip
, &circ
);
9532 state
= sid_node_create(pdip
, bp
, &rdip
);
9534 ndi_devi_exit(pdip
, circ
);
9535 ASSERT(state
== DDI_WALK_ERROR
);
9539 e_ddi_branch_hold(rdip
);
9541 ndi_devi_exit(pdip
, circ
);
9543 if (flags
& DEVI_BRANCH_CONFIGURE
) {
9544 int error
= e_ddi_branch_configure(rdip
, dipp
, 0);
9545 if (error
&& rv
== 0)
9550 * devi_branch_callback() is optional
9552 if (bp
->devi_branch_callback
)
9553 bp
->devi_branch_callback(rdip
, bp
->arg
, 0);
9556 ASSERT(state
== DDI_WALK_ERROR
|| state
== DDI_WALK_PRUNESIB
);
9558 return (state
== DDI_WALK_ERROR
? EIO
: rv
);
9562 e_ddi_branch_create(
9568 int prom_devi
, sid_devi
, error
;
9570 if (pdip
== NULL
|| bp
== NULL
|| bp
->type
== 0)
9573 prom_devi
= (bp
->type
== DEVI_BRANCH_PROM
) ? 1 : 0;
9574 sid_devi
= (bp
->type
== DEVI_BRANCH_SID
) ? 1 : 0;
9576 if (prom_devi
&& bp
->create
.prom_branch_select
== NULL
)
9578 else if (sid_devi
&& bp
->create
.sid_branch_create
== NULL
)
9580 else if (!prom_devi
&& !sid_devi
)
9583 if (flags
& DEVI_BRANCH_EVENT
)
9587 struct pta pta
= {0};
9593 error
= prom_tree_access(create_prom_branch
, &pta
, NULL
);
9598 ndi_rele_devi(pta
.fdip
);
9600 error
= create_sid_branch(pdip
, bp
, dipp
, flags
);
9607 e_ddi_branch_configure(dev_info_t
*rdip
, dev_info_t
**dipp
, uint_t flags
)
9616 if (rdip
== NULL
|| flags
!= 0 || (flags
& DEVI_BRANCH_EVENT
))
9619 pdip
= ddi_get_parent(rdip
);
9621 ndi_hold_devi(pdip
);
9623 if (!e_ddi_branch_held(rdip
)) {
9624 ndi_rele_devi(pdip
);
9625 cmn_err(CE_WARN
, "e_ddi_branch_configure: "
9626 "dip(%p) not held", (void *)rdip
);
9630 if (i_ddi_node_state(rdip
) < DS_INITIALIZED
) {
9632 * First attempt to bind a driver. If we fail, return
9633 * success (On some platforms, dips for some device
9634 * types (CPUs) may not have a driver)
9636 if (ndi_devi_bind_driver(rdip
, 0) != NDI_SUCCESS
) {
9637 ndi_rele_devi(pdip
);
9641 if (ddi_initchild(pdip
, rdip
) != DDI_SUCCESS
) {
9647 ASSERT(i_ddi_node_state(rdip
) >= DS_INITIALIZED
);
9649 devnm
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
9651 (void) ddi_deviname(rdip
, devnm
);
9653 if ((rv
= ndi_devi_config_one(pdip
, devnm
+1, &rdip
,
9654 NDI_DEVI_ONLINE
| NDI_CONFIG
)) == NDI_SUCCESS
) {
9655 /* release hold from ndi_devi_config_one() */
9656 ndi_rele_devi(rdip
);
9659 kmem_free(devnm
, MAXNAMELEN
+ 1);
9661 if (rv
!= NDI_SUCCESS
&& dipp
&& rdip
) {
9662 ndi_hold_devi(rdip
);
9665 ndi_rele_devi(pdip
);
9666 return (ndi2errno(rv
));
9670 e_ddi_branch_hold(dev_info_t
*rdip
)
9672 if (e_ddi_branch_held(rdip
)) {
9673 cmn_err(CE_WARN
, "e_ddi_branch_hold: branch already held");
9677 mutex_enter(&DEVI(rdip
)->devi_lock
);
9678 if ((DEVI(rdip
)->devi_flags
& DEVI_BRANCH_HELD
) == 0) {
9679 DEVI(rdip
)->devi_flags
|= DEVI_BRANCH_HELD
;
9680 DEVI(rdip
)->devi_ref
++;
9682 ASSERT(DEVI(rdip
)->devi_ref
> 0);
9683 mutex_exit(&DEVI(rdip
)->devi_lock
);
9687 e_ddi_branch_held(dev_info_t
*rdip
)
9691 mutex_enter(&DEVI(rdip
)->devi_lock
);
9692 if ((DEVI(rdip
)->devi_flags
& DEVI_BRANCH_HELD
) &&
9693 DEVI(rdip
)->devi_ref
> 0) {
9696 mutex_exit(&DEVI(rdip
)->devi_lock
);
9702 e_ddi_branch_rele(dev_info_t
*rdip
)
9704 mutex_enter(&DEVI(rdip
)->devi_lock
);
9705 DEVI(rdip
)->devi_flags
&= ~DEVI_BRANCH_HELD
;
9706 DEVI(rdip
)->devi_ref
--;
9707 mutex_exit(&DEVI(rdip
)->devi_lock
);
9711 e_ddi_branch_unconfigure(
9728 pdip
= ddi_get_parent(rdip
);
9733 * Check if caller holds pdip busy - can cause deadlocks during
9736 if (DEVI_BUSY_OWNED(pdip
)) {
9737 cmn_err(CE_WARN
, "e_ddi_branch_unconfigure: failed: parent"
9738 " devinfo node(%p) is busy held", (void *)pdip
);
9742 destroy
= (flags
& DEVI_BRANCH_DESTROY
) ? 1 : 0;
9744 devnm
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
9746 ndi_devi_enter(pdip
, &circ
);
9747 (void) ddi_deviname(rdip
, devnm
);
9748 ndi_devi_exit(pdip
, circ
);
9751 * ddi_deviname() returns a component name with / prepended.
9753 (void) devfs_clean(pdip
, devnm
+ 1, DV_CLEAN_FORCE
);
9755 ndi_devi_enter(pdip
, &circ
);
9758 * Recreate device name as it may have changed state (init/uninit)
9759 * when parent busy lock was dropped for devfs_clean()
9761 (void) ddi_deviname(rdip
, devnm
);
9763 if (!e_ddi_branch_held(rdip
)) {
9764 kmem_free(devnm
, MAXNAMELEN
+ 1);
9765 ndi_devi_exit(pdip
, circ
);
9766 cmn_err(CE_WARN
, "e_ddi_%s_branch: dip(%p) not held",
9767 destroy
? "destroy" : "unconfigure", (void *)rdip
);
9772 * Release hold on the branch. This is ok since we are holding the
9773 * parent busy. If rdip is not removed, we must do a hold on the
9774 * branch before returning.
9776 e_ddi_branch_rele(rdip
);
9778 nflags
= NDI_DEVI_OFFLINE
;
9779 if (destroy
|| (flags
& DEVI_BRANCH_DESTROY
)) {
9780 nflags
|= NDI_DEVI_REMOVE
;
9783 nflags
|= NDI_UNCONFIG
; /* uninit but don't remove */
9786 if (flags
& DEVI_BRANCH_EVENT
)
9787 nflags
|= NDI_POST_EVENT
;
9789 if (i_ddi_devi_attached(pdip
) &&
9790 (i_ddi_node_state(rdip
) >= DS_INITIALIZED
)) {
9791 rv
= ndi_devi_unconfig_one(pdip
, devnm
+1, dipp
, nflags
);
9793 rv
= e_ddi_devi_unconfig(rdip
, dipp
, nflags
);
9794 if (rv
== NDI_SUCCESS
) {
9795 ASSERT(!destroy
|| ddi_get_child(rdip
) == NULL
);
9796 rv
= ndi_devi_offline(rdip
, nflags
);
9800 if (!destroy
|| rv
!= NDI_SUCCESS
) {
9801 /* The dip still exists, so do a hold */
9802 e_ddi_branch_hold(rdip
);
9805 kmem_free(devnm
, MAXNAMELEN
+ 1);
9806 ndi_devi_exit(pdip
, circ
);
9807 return (ndi2errno(rv
));
9811 e_ddi_branch_destroy(dev_info_t
*rdip
, dev_info_t
**dipp
, uint_t flag
)
9813 return (e_ddi_branch_unconfigure(rdip
, dipp
,
9814 flag
|DEVI_BRANCH_DESTROY
));
9818 * Number of chains for hash table
9820 #define NUMCHAINS 17
9828 mod_hash_t
*dv_hash
;
9830 int (*callback
)(dev_info_t
*, void *, uint_t
);
9835 visit_dip(dev_info_t
*dip
, void *arg
)
9837 uintptr_t sbusy
, dvbusy
, ref
;
9838 struct devi_busy
*bsp
= arg
;
9840 ASSERT(bsp
->callback
);
9843 * A dip cannot be busy if its reference count is 0
9845 if ((ref
= e_ddi_devi_holdcnt(dip
)) == 0) {
9846 return (bsp
->callback(dip
, bsp
->arg
, 0));
9849 if (mod_hash_find(bsp
->dv_hash
, dip
, (mod_hash_val_t
*)&dvbusy
))
9853 * To catch device opens currently maintained on specfs common snodes.
9855 if (mod_hash_find(bsp
->s_hash
, dip
, (mod_hash_val_t
*)&sbusy
))
9859 if (ref
< sbusy
|| ref
< dvbusy
) {
9860 cmn_err(CE_WARN
, "dip(%p): sopen = %lu, dvopen = %lu "
9861 "dip ref = %lu\n", (void *)dip
, sbusy
, dvbusy
, ref
);
9865 dvbusy
= (sbusy
> dvbusy
) ? sbusy
: dvbusy
;
9867 return (bsp
->callback(dip
, bsp
->arg
, dvbusy
));
9871 visit_snode(struct snode
*sp
, void *arg
)
9876 struct devi_busy
*bsp
= arg
;
9881 * The stable lock is held. This prevents
9882 * the snode and its associated dip from
9886 count
= spec_devi_open_count(sp
, &dip
);
9889 return (DDI_WALK_CONTINUE
);
9893 if (mod_hash_remove(bsp
->s_hash
, dip
, (mod_hash_val_t
*)&sbusy
))
9898 if (mod_hash_insert(bsp
->s_hash
, dip
, (mod_hash_val_t
)sbusy
)) {
9899 cmn_err(CE_WARN
, "%s: s_hash insert failed: dip=0x%p, "
9900 "sbusy = %lu", "e_ddi_branch_referenced",
9901 (void *)dip
, sbusy
);
9904 bsp
->s_total
+= count
;
9906 return (DDI_WALK_CONTINUE
);
9910 visit_dvnode(struct dv_node
*dv
, void *arg
)
9915 struct devi_busy
*bsp
= arg
;
9917 ASSERT(dv
&& dv
->dv_devi
);
9921 mutex_enter(&vp
->v_lock
);
9922 count
= vp
->v_count
;
9923 mutex_exit(&vp
->v_lock
);
9928 if (mod_hash_remove(bsp
->dv_hash
, dv
->dv_devi
,
9929 (mod_hash_val_t
*)&dvbusy
))
9934 if (mod_hash_insert(bsp
->dv_hash
, dv
->dv_devi
,
9935 (mod_hash_val_t
)dvbusy
)) {
9936 cmn_err(CE_WARN
, "%s: dv_hash insert failed: dip=0x%p, "
9937 "dvbusy=%lu", "e_ddi_branch_referenced",
9938 (void *)dv
->dv_devi
, dvbusy
);
9941 bsp
->dv_total
+= count
;
9945 * Returns reference count on success or -1 on failure.
9948 e_ddi_branch_referenced(
9950 int (*callback
)(dev_info_t
*dip
, void *arg
, uint_t ref
),
9956 struct devi_busy bsa
= {0};
9960 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
9962 ndi_hold_devi(rdip
);
9964 pdip
= ddi_get_parent(rdip
);
9969 * Check if caller holds pdip busy - can cause deadlocks during
9972 if (!e_ddi_branch_held(rdip
) || DEVI_BUSY_OWNED(pdip
)) {
9973 cmn_err(CE_WARN
, "e_ddi_branch_referenced: failed: "
9974 "devinfo branch(%p) not held or parent busy held",
9976 ndi_rele_devi(rdip
);
9977 kmem_free(path
, MAXPATHLEN
);
9981 ndi_devi_enter(pdip
, &circ
);
9982 (void) ddi_pathname(rdip
, path
);
9983 ndi_devi_exit(pdip
, circ
);
9985 bsa
.dv_hash
= mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS
,
9986 mod_hash_null_valdtor
, sizeof (struct dev_info
));
9988 bsa
.s_hash
= mod_hash_create_ptrhash("snode busy hash", NUMCHAINS
,
9989 mod_hash_null_valdtor
, sizeof (struct snode
));
9991 if (devfs_walk(path
, visit_dvnode
, &bsa
)) {
9992 cmn_err(CE_WARN
, "e_ddi_branch_referenced: "
9993 "devfs walk failed for: %s", path
);
9994 kmem_free(path
, MAXPATHLEN
);
9995 bsa
.s_total
= bsa
.dv_total
= -1;
9999 kmem_free(path
, MAXPATHLEN
);
10002 * Walk the snode table to detect device opens, which are currently
10003 * maintained on specfs common snodes.
10005 spec_snode_walk(visit_snode
, &bsa
);
10007 if (callback
== NULL
)
10010 bsa
.callback
= callback
;
10013 if (visit_dip(rdip
, &bsa
) == DDI_WALK_CONTINUE
) {
10014 ndi_devi_enter(rdip
, &circ
);
10015 ddi_walk_devs(ddi_get_child(rdip
), visit_dip
, &bsa
);
10016 ndi_devi_exit(rdip
, circ
);
10020 ndi_rele_devi(rdip
);
10021 mod_hash_destroy_ptrhash(bsa
.s_hash
);
10022 mod_hash_destroy_ptrhash(bsa
.dv_hash
);
10023 return (bsa
.s_total
> bsa
.dv_total
? bsa
.s_total
: bsa
.dv_total
);