4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
33 #include <sys/errno.h>
34 #include <sys/modctl.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ddi_impldefs.h>
42 #include <sys/fm/protocol.h>
43 #include <sys/fm/util.h>
44 #include <sys/fm/io/ddi.h>
45 #include <sys/sysevent/eventdefs.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/debug.h>
51 #include <sys/bofi_impl.h>
54 * Testing the resilience of a hardened device driver requires a suitably wide
55 * range of different types of "typical" hardware faults to be injected,
56 * preferably in a controlled and repeatable fashion. This is not in general
57 * possible via hardware, so the "fault injection test harness" is provided.
58 * This works by intercepting calls from the driver to various DDI routines,
59 * and then corrupting the result of those DDI routine calls as if the
60 * hardware had caused the corruption.
62 * Conceptually, the bofi driver consists of two parts:
64 * A driver interface that supports a number of ioctls which allow error
65 * definitions ("errdefs") to be defined and subsequently managed. The
66 * driver is a clone driver, so each open will create a separate
67 * invocation. Any errdefs created by using ioctls to that invocation
68 * will automatically be deleted when that invocation is closed.
70 * Intercept routines: When the bofi driver is attached, it edits the
71 * bus_ops structure of the bus nexus specified by the "bofi-nexus"
72 * field in the "bofi.conf" file, thus allowing the
73 * bofi driver to intercept various ddi functions. These intercept
74 * routines primarily carry out fault injections based on the errdefs
75 * created for that device.
77 * Faults can be injected into:
79 * DMA (corrupting data for DMA to/from memory areas defined by
80 * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
82 * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
85 * Interrupts (generating spurious interrupts, losing interrupts,
86 * delaying interrupts).
88 * By default, ddi routines called from all drivers will be intercepted
89 * and faults potentially injected. However, the "bofi-to-test" field in
90 * the "bofi.conf" file can be set to a space-separated list of drivers to
91 * test (or by preceding each driver name in the list with an "!", a list
92 * of drivers not to test).
94 * In addition to fault injection, the bofi driver does a number of static
95 * checks which are controlled by properties in the "bofi.conf" file.
97 * "bofi-ddi-check" - if set will validate that there are no PIO access
98 * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
100 * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
101 * validate that calls to ddi_get8(), ddi_put8(), etc are not made
102 * specifying addresses outside the range of the access_handle.
104 * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
105 * are being made correctly.
108 extern void *bp_mapin_common(struct buf
*, int);
110 static int bofi_ddi_check
;
111 static int bofi_sync_check
;
112 static int bofi_range_check
;
114 static struct bofi_link bofi_link_array
[BOFI_NLINKS
], *bofi_link_freelist
;
116 #define LLSZMASK (sizeof (uint64_t)-1)
118 #define HDL_HASH_TBL_SIZE 64
119 static struct bofi_shadow hhash_table
[HDL_HASH_TBL_SIZE
];
120 static struct bofi_shadow dhash_table
[HDL_HASH_TBL_SIZE
];
121 #define HDL_DHASH(x) \
122 (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
123 #define HDL_HHASH(x) \
124 (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
126 static struct bofi_shadow shadow_list
;
127 static struct bofi_errent
*errent_listp
;
129 static char driver_list
[NAMESIZE
];
130 static int driver_list_size
;
131 static int driver_list_neg
;
132 static char nexus_name
[NAMESIZE
];
134 static int initialized
= 0;
137 static int clone_tab
[NCLONES
];
139 static dev_info_t
*our_dip
;
141 static kmutex_t bofi_mutex
;
142 static kmutex_t clone_tab_mutex
;
143 static kmutex_t bofi_low_mutex
;
144 static ddi_iblock_cookie_t bofi_low_cookie
;
145 static uint_t
bofi_signal(caddr_t arg
);
146 static int bofi_getinfo(dev_info_t
*, ddi_info_cmd_t
, void *, void **);
147 static int bofi_attach(dev_info_t
*, ddi_attach_cmd_t
);
148 static int bofi_detach(dev_info_t
*, ddi_detach_cmd_t
);
149 static int bofi_open(dev_t
*, int, int, cred_t
*);
150 static int bofi_close(dev_t
, int, int, cred_t
*);
151 static int bofi_ioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
152 static int bofi_errdef_alloc(struct bofi_errdef
*, char *,
153 struct bofi_errent
*);
154 static int bofi_errdef_free(struct bofi_errent
*);
155 static void bofi_start(struct bofi_errctl
*, char *);
156 static void bofi_stop(struct bofi_errctl
*, char *);
157 static void bofi_broadcast(struct bofi_errctl
*, char *);
158 static void bofi_clear_acc_chk(struct bofi_errctl
*, char *);
159 static void bofi_clear_errors(struct bofi_errctl
*, char *);
160 static void bofi_clear_errdefs(struct bofi_errctl
*, char *);
161 static int bofi_errdef_check(struct bofi_errstate
*,
162 struct acc_log_elem
**);
163 static int bofi_errdef_check_w(struct bofi_errstate
*,
164 struct acc_log_elem
**);
165 static int bofi_map(dev_info_t
*, dev_info_t
*, ddi_map_req_t
*,
166 off_t
, off_t
, caddr_t
*);
167 static int bofi_dma_allochdl(dev_info_t
*, dev_info_t
*,
168 ddi_dma_attr_t
*, int (*)(caddr_t
), caddr_t
,
170 static int bofi_dma_freehdl(dev_info_t
*, dev_info_t
*,
172 static int bofi_dma_bindhdl(dev_info_t
*, dev_info_t
*,
173 ddi_dma_handle_t
, struct ddi_dma_req
*, ddi_dma_cookie_t
*,
175 static int bofi_dma_unbindhdl(dev_info_t
*, dev_info_t
*,
177 static int bofi_dma_flush(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
178 off_t
, size_t, uint_t
);
179 static int bofi_dma_ctl(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
180 enum ddi_dma_ctlops
, off_t
*, size_t *, caddr_t
*, uint_t
);
181 static int bofi_dma_win(dev_info_t
*, dev_info_t
*, ddi_dma_handle_t
,
182 uint_t
, off_t
*, size_t *, ddi_dma_cookie_t
*, uint_t
*);
183 static int bofi_intr_ops(dev_info_t
*dip
, dev_info_t
*rdip
,
184 ddi_intr_op_t intr_op
, ddi_intr_handle_impl_t
*hdlp
,
186 static int bofi_fm_ereport_callback(sysevent_t
*ev
, void *cookie
);
188 evchan_t
*bofi_error_chan
;
190 #define FM_SIMULATED_DMA "simulated.dma"
191 #define FM_SIMULATED_PIO "simulated.pio"
194 static void bofi_dvma_kaddr_load(ddi_dma_handle_t
, caddr_t
, uint_t
,
195 uint_t
, ddi_dma_cookie_t
*);
196 static void bofi_dvma_unload(ddi_dma_handle_t
, uint_t
, uint_t
);
197 static void bofi_dvma_sync(ddi_dma_handle_t
, uint_t
, uint_t
);
198 static void bofi_dvma_reserve(dev_info_t
*, ddi_dma_handle_t
);
200 static int driver_under_test(dev_info_t
*);
201 static int bofi_check_acc_hdl(ddi_acc_impl_t
*);
202 static int bofi_check_dma_hdl(ddi_dma_impl_t
*);
203 static int bofi_post_event(dev_info_t
*dip
, dev_info_t
*rdip
,
204 ddi_eventcookie_t eventhdl
, void *impl_data
);
206 static struct bus_ops bofi_bus_ops
= {
223 ndi_busop_get_eventcookie
,
224 ndi_busop_add_eventcall
,
225 ndi_busop_remove_eventcall
,
238 static struct cb_ops bofi_cb_ops
= {
239 bofi_open
, /* open */
240 bofi_close
, /* close */
241 nodev
, /* strategy */
246 bofi_ioctl
, /* ioctl */
250 nochpoll
, /* chpoll */
251 ddi_prop_op
, /* prop_op */
252 NULL
, /* for STREAMS drivers */
253 D_MP
, /* driver compatibility flag */
254 CB_REV
, /* cb_ops revision */
259 static struct dev_ops bofi_ops
= {
260 DEVO_REV
, /* driver build version */
261 0, /* device reference count */
269 (struct bus_ops
*)NULL
,
271 ddi_quiesce_not_needed
, /* quiesce */
274 /* module configuration stuff */
277 static struct modldrv modldrv
= {
283 static struct modlinkage modlinkage
= {
289 static struct bus_ops save_bus_ops
;
292 static struct dvma_ops bofi_dvma_ops
= {
294 bofi_dvma_kaddr_load
,
301 * support routine - map user page into kernel virtual
304 dmareq_mapin(offset_t len
, caddr_t addr
, struct as
*as
, int flag
)
310 * mock up a buf structure so we can call bp_mapin_common()
312 buf
.b_flags
= B_PHYS
;
313 buf
.b_un
.b_addr
= (caddr_t
)addr
;
314 buf
.b_bcount
= (size_t)len
;
317 return (bp_mapin_common(&buf
, flag
));
322 * support routine - map page chain into kernel virtual
325 dmareq_pp_mapin(offset_t len
, uint_t offset
, page_t
*pp
, int flag
)
330 * mock up a buf structure so we can call bp_mapin_common()
332 buf
.b_flags
= B_PAGEIO
;
333 buf
.b_un
.b_addr
= (caddr_t
)(uintptr_t)offset
;
334 buf
.b_bcount
= (size_t)len
;
336 return (bp_mapin_common(&buf
, flag
));
341 * support routine - map page array into kernel virtual
344 dmareq_pplist_mapin(uint_t len
, caddr_t addr
, page_t
**pplist
, struct as
*as
,
351 * mock up a buf structure so we can call bp_mapin_common()
353 buf
.b_flags
= B_PHYS
|B_SHADOW
;
354 buf
.b_un
.b_addr
= addr
;
356 buf
.b_shadow
= pplist
;
359 return (bp_mapin_common(&buf
, flag
));
364 * support routine - map dmareq into kernel virtual if not already
365 * fills in *lenp with length
366 * *mapaddr will be new kernel virtual address - or null if no mapping needed
369 ddi_dmareq_mapin(struct ddi_dma_req
*dmareqp
, caddr_t
*mapaddrp
,
372 int sleep
= (dmareqp
->dmar_fp
== DDI_DMA_SLEEP
) ? VM_SLEEP
: VM_NOSLEEP
;
374 *lenp
= dmareqp
->dmar_object
.dmao_size
;
375 if (dmareqp
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) {
376 *mapaddrp
= dmareq_pp_mapin(dmareqp
->dmar_object
.dmao_size
,
377 dmareqp
->dmar_object
.dmao_obj
.pp_obj
.pp_offset
,
378 dmareqp
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
, sleep
);
380 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
!= NULL
) {
381 *mapaddrp
= dmareq_pplist_mapin(dmareqp
->dmar_object
.dmao_size
,
382 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
,
383 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
,
384 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
, sleep
);
386 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
== &kas
) {
388 return (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
);
389 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
== NULL
) {
391 return (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
);
393 *mapaddrp
= dmareq_mapin(dmareqp
->dmar_object
.dmao_size
,
394 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_addr
,
395 dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_as
, sleep
);
402 * support routine - free off kernel virtual mapping as allocated by
406 ddi_dmareq_mapout(caddr_t addr
, offset_t len
, int map_flags
, page_t
*pp
,
414 * mock up a buf structure
416 buf
.b_flags
= B_REMAPPED
| map_flags
;
417 buf
.b_un
.b_addr
= addr
;
418 buf
.b_bcount
= (size_t)len
;
420 buf
.b_shadow
= pplist
;
434 * reset the bus_ops structure of the specified nexus to point to
435 * the original values in the save_bus_ops structure.
437 * Note that both this routine and modify_bus_ops() rely on the current
438 * behavior of the framework in that nexus drivers are not unloadable
443 reset_bus_ops(char *name
, struct bus_ops
*bop
)
450 mutex_enter(&mod_lock
);
452 * find specified module
456 if (strcmp(name
, modp
->mod_modname
) == 0) {
457 if (!modp
->mod_linkage
) {
458 mutex_exit(&mod_lock
);
461 mp
= modp
->mod_linkage
->ml_linkage
[0];
462 if (!mp
|| !mp
->drv_dev_ops
) {
463 mutex_exit(&mod_lock
);
466 ops
= mp
->drv_dev_ops
;
467 bp
= ops
->devo_bus_ops
;
469 mutex_exit(&mod_lock
);
472 if (ops
->devo_refcnt
> 0) {
474 * As long as devices are active with modified
475 * bus ops bofi must not go away. There may be
476 * drivers with modified access or dma handles.
478 mutex_exit(&mod_lock
);
481 cmn_err(CE_NOTE
, "bofi reset bus_ops for %s",
483 bp
->bus_intr_op
= bop
->bus_intr_op
;
484 bp
->bus_post_event
= bop
->bus_post_event
;
485 bp
->bus_map
= bop
->bus_map
;
486 bp
->bus_dma_map
= bop
->bus_dma_map
;
487 bp
->bus_dma_allochdl
= bop
->bus_dma_allochdl
;
488 bp
->bus_dma_freehdl
= bop
->bus_dma_freehdl
;
489 bp
->bus_dma_bindhdl
= bop
->bus_dma_bindhdl
;
490 bp
->bus_dma_unbindhdl
= bop
->bus_dma_unbindhdl
;
491 bp
->bus_dma_flush
= bop
->bus_dma_flush
;
492 bp
->bus_dma_win
= bop
->bus_dma_win
;
493 bp
->bus_dma_ctl
= bop
->bus_dma_ctl
;
494 mutex_exit(&mod_lock
);
497 } while ((modp
= modp
->mod_next
) != &modules
);
498 mutex_exit(&mod_lock
);
503 * modify the bus_ops structure of the specified nexus to point to bofi
504 * routines, saving the original values in the save_bus_ops structure
508 modify_bus_ops(char *name
, struct bus_ops
*bop
)
515 if (ddi_name_to_major(name
) == -1)
518 mutex_enter(&mod_lock
);
520 * find specified module
524 if (strcmp(name
, modp
->mod_modname
) == 0) {
525 if (!modp
->mod_linkage
) {
526 mutex_exit(&mod_lock
);
529 mp
= modp
->mod_linkage
->ml_linkage
[0];
530 if (!mp
|| !mp
->drv_dev_ops
) {
531 mutex_exit(&mod_lock
);
534 ops
= mp
->drv_dev_ops
;
535 bp
= ops
->devo_bus_ops
;
537 mutex_exit(&mod_lock
);
540 if (ops
->devo_refcnt
== 0) {
542 * If there is no device active for this
543 * module then there is nothing to do for bofi.
545 mutex_exit(&mod_lock
);
548 cmn_err(CE_NOTE
, "bofi modify bus_ops for %s",
551 bp
->bus_intr_op
= bop
->bus_intr_op
;
552 bp
->bus_post_event
= bop
->bus_post_event
;
553 bp
->bus_map
= bop
->bus_map
;
554 bp
->bus_dma_map
= bop
->bus_dma_map
;
555 bp
->bus_dma_allochdl
= bop
->bus_dma_allochdl
;
556 bp
->bus_dma_freehdl
= bop
->bus_dma_freehdl
;
557 bp
->bus_dma_bindhdl
= bop
->bus_dma_bindhdl
;
558 bp
->bus_dma_unbindhdl
= bop
->bus_dma_unbindhdl
;
559 bp
->bus_dma_flush
= bop
->bus_dma_flush
;
560 bp
->bus_dma_win
= bop
->bus_dma_win
;
561 bp
->bus_dma_ctl
= bop
->bus_dma_ctl
;
562 mutex_exit(&mod_lock
);
565 } while ((modp
= modp
->mod_next
) != &modules
);
566 mutex_exit(&mod_lock
);
576 e
= ddi_soft_state_init(&statep
, sizeof (struct bofi_errent
), 1);
579 if ((e
= mod_install(&modlinkage
)) != 0)
580 ddi_soft_state_fini(&statep
);
590 if ((e
= mod_remove(&modlinkage
)) != 0)
592 ddi_soft_state_fini(&statep
);
598 _info(struct modinfo
*modinfop
)
600 return (mod_info(&modlinkage
, modinfop
));
605 bofi_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
615 if (cmd
!= DDI_ATTACH
)
616 return (DDI_FAILURE
);
618 * only one instance - but we clone using the open routine
620 if (ddi_get_instance(dip
) > 0)
621 return (DDI_FAILURE
);
624 if ((name
= ddi_get_name(dip
)) == NULL
)
625 return (DDI_FAILURE
);
626 (void) snprintf(buf
, sizeof (buf
), "%s,ctl", name
);
627 if (ddi_create_minor_node(dip
, buf
, S_IFCHR
, 0,
628 DDI_PSEUDO
, NULL
) == DDI_FAILURE
)
629 return (DDI_FAILURE
);
631 if (ddi_get_soft_iblock_cookie(dip
, DDI_SOFTINT_MED
,
632 &bofi_low_cookie
) != DDI_SUCCESS
) {
633 ddi_remove_minor_node(dip
, buf
);
634 return (DDI_FAILURE
); /* fail attach */
637 * get nexus name (from conf file)
639 if (ddi_prop_op(DDI_DEV_T_ANY
, dip
, PROP_LEN_AND_VAL_BUF
, 0,
640 "bofi-nexus", nexus_name
, &size
) != DDI_PROP_SUCCESS
) {
641 ddi_remove_minor_node(dip
, buf
);
642 return (DDI_FAILURE
);
645 * get whether to do dma map kmem private checking
647 if ((bofi_range_check
= ddi_prop_lookup_string(DDI_DEV_T_ANY
,
648 dip
, 0, "bofi-range-check", &ptr
)) != DDI_PROP_SUCCESS
)
649 bofi_range_check
= 0;
650 else if (strcmp(ptr
, "panic") == 0)
651 bofi_range_check
= 2;
652 else if (strcmp(ptr
, "warn") == 0)
653 bofi_range_check
= 1;
655 bofi_range_check
= 0;
659 * get whether to prevent direct access to register
661 if ((bofi_ddi_check
= ddi_prop_lookup_string(DDI_DEV_T_ANY
,
662 dip
, 0, "bofi-ddi-check", &ptr
)) != DDI_PROP_SUCCESS
)
664 else if (strcmp(ptr
, "on") == 0)
671 * get whether to do copy on ddi_dma_sync
673 if ((bofi_sync_check
= ddi_prop_lookup_string(DDI_DEV_T_ANY
,
674 dip
, 0, "bofi-sync-check", &ptr
)) != DDI_PROP_SUCCESS
)
676 else if (strcmp(ptr
, "on") == 0)
683 * get driver-under-test names (from conf file)
686 if (ddi_prop_op(DDI_DEV_T_ANY
, dip
, PROP_LEN_AND_VAL_BUF
, 0,
687 "bofi-to-test", driver_list
, &size
) != DDI_PROP_SUCCESS
)
690 * and convert into a sequence of strings
694 driver_list_size
= strlen(driver_list
);
695 for (i
= 0; i
< driver_list_size
; i
++) {
696 if (driver_list
[i
] == ' ') {
697 driver_list
[i
] = '\0';
699 } else if (new_string
) {
700 if (driver_list
[i
] != '!')
706 * initialize mutex, lists
708 mutex_init(&clone_tab_mutex
, NULL
, MUTEX_DRIVER
,
711 * fake up iblock cookie - need to protect outselves
712 * against drivers that use hilevel interrupts
717 mutex_init(&bofi_mutex
, NULL
, MUTEX_SPIN
, (void *)(uintptr_t)s
);
718 mutex_init(&bofi_low_mutex
, NULL
, MUTEX_DRIVER
,
719 (void *)bofi_low_cookie
);
720 shadow_list
.next
= &shadow_list
;
721 shadow_list
.prev
= &shadow_list
;
722 for (i
= 0; i
< HDL_HASH_TBL_SIZE
; i
++) {
723 hhash_table
[i
].hnext
= &hhash_table
[i
];
724 hhash_table
[i
].hprev
= &hhash_table
[i
];
725 dhash_table
[i
].dnext
= &dhash_table
[i
];
726 dhash_table
[i
].dprev
= &dhash_table
[i
];
728 for (i
= 1; i
< BOFI_NLINKS
; i
++)
729 bofi_link_array
[i
].link
= &bofi_link_array
[i
-1];
730 bofi_link_freelist
= &bofi_link_array
[BOFI_NLINKS
- 1];
732 * overlay bus_ops structure
734 if (modify_bus_ops(nexus_name
, &bofi_bus_ops
) == 0) {
735 ddi_remove_minor_node(dip
, buf
);
736 mutex_destroy(&clone_tab_mutex
);
737 mutex_destroy(&bofi_mutex
);
738 mutex_destroy(&bofi_low_mutex
);
739 return (DDI_FAILURE
);
741 if (sysevent_evc_bind(FM_ERROR_CHAN
, &bofi_error_chan
, 0) == 0)
742 (void) sysevent_evc_subscribe(bofi_error_chan
, "bofi",
743 EC_FM
, bofi_fm_ereport_callback
, NULL
, 0);
746 * save dip for getinfo
752 return (DDI_SUCCESS
);
757 bofi_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
762 if (cmd
!= DDI_DETACH
)
763 return (DDI_FAILURE
);
764 if (ddi_get_instance(dip
) > 0)
765 return (DDI_FAILURE
);
766 if ((name
= ddi_get_name(dip
)) == NULL
)
767 return (DDI_FAILURE
);
768 (void) snprintf(buf
, sizeof (buf
), "%s,ctl", name
);
769 mutex_enter(&bofi_low_mutex
);
770 mutex_enter(&bofi_mutex
);
772 * make sure test bofi is no longer in use
774 if (shadow_list
.next
!= &shadow_list
|| errent_listp
!= NULL
) {
775 mutex_exit(&bofi_mutex
);
776 mutex_exit(&bofi_low_mutex
);
777 return (DDI_FAILURE
);
779 mutex_exit(&bofi_mutex
);
780 mutex_exit(&bofi_low_mutex
);
783 * restore bus_ops structure
785 if (reset_bus_ops(nexus_name
, &save_bus_ops
) == 0)
786 return (DDI_FAILURE
);
788 (void) sysevent_evc_unbind(bofi_error_chan
);
790 mutex_destroy(&clone_tab_mutex
);
791 mutex_destroy(&bofi_mutex
);
792 mutex_destroy(&bofi_low_mutex
);
793 ddi_remove_minor_node(dip
, buf
);
796 return (DDI_SUCCESS
);
802 bofi_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **result
)
804 dev_t dev
= (dev_t
)arg
;
805 int minor
= (int)getminor(dev
);
809 case DDI_INFO_DEVT2DEVINFO
:
810 if (minor
!= 0 || our_dip
== NULL
) {
811 *result
= (void *)NULL
;
812 retval
= DDI_FAILURE
;
814 *result
= (void *)our_dip
;
815 retval
= DDI_SUCCESS
;
818 case DDI_INFO_DEVT2INSTANCE
:
820 retval
= DDI_SUCCESS
;
823 retval
= DDI_FAILURE
;
831 bofi_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
)
833 int minor
= (int)getminor(*devp
);
834 struct bofi_errent
*softc
;
837 * only allow open on minor=0 - the clone device
842 * fail if not attached
847 * find a free slot and grab it
849 mutex_enter(&clone_tab_mutex
);
850 for (minor
= 1; minor
< NCLONES
; minor
++) {
851 if (clone_tab
[minor
] == 0) {
852 clone_tab
[minor
] = 1;
856 mutex_exit(&clone_tab_mutex
);
857 if (minor
== NCLONES
)
860 * soft state structure for this clone is used to maintain a list
861 * of allocated errdefs so they can be freed on close
863 if (ddi_soft_state_zalloc(statep
, minor
) != DDI_SUCCESS
) {
864 mutex_enter(&clone_tab_mutex
);
865 clone_tab
[minor
] = 0;
866 mutex_exit(&clone_tab_mutex
);
869 softc
= ddi_get_soft_state(statep
, minor
);
870 softc
->cnext
= softc
;
871 softc
->cprev
= softc
;
873 *devp
= makedevice(getmajor(*devp
), minor
);
880 bofi_close(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
882 int minor
= (int)getminor(dev
);
883 struct bofi_errent
*softc
;
884 struct bofi_errent
*ep
, *next_ep
;
886 softc
= ddi_get_soft_state(statep
, minor
);
890 * find list of errdefs and free them off
892 for (ep
= softc
->cnext
; ep
!= softc
; ) {
894 (void) bofi_errdef_free(ep
);
898 * free clone tab slot
900 mutex_enter(&clone_tab_mutex
);
901 clone_tab
[minor
] = 0;
902 mutex_exit(&clone_tab_mutex
);
904 ddi_soft_state_free(statep
, minor
);
911 bofi_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
914 struct bofi_errent
*softc
;
915 int minor
= (int)getminor(dev
);
916 struct bofi_errdef errdef
;
917 struct bofi_errctl errctl
;
918 struct bofi_errstate errstate
;
920 struct bofi_get_handles get_handles
;
921 struct bofi_get_hdl_info hdl_info
;
922 struct handle_info
*hdlip
;
923 struct handle_info
*hib
;
928 int req_count
, count
, err
;
930 struct bofi_shadow
*hp
;
932 struct bofi_shadow
*hhashp
;
938 * add a new error definition
940 #ifdef _MULTI_DATAMODEL
941 switch (ddi_model_convert_from(mode
& FMODELS
)) {
942 case DDI_MODEL_ILP32
:
945 * For use when a 32 bit app makes a call into a
948 struct bofi_errdef32 errdef_32
;
950 if (ddi_copyin((void *)arg
, &errdef_32
,
951 sizeof (struct bofi_errdef32
), mode
)) {
954 errdef
.namesize
= errdef_32
.namesize
;
955 (void) strncpy(errdef
.name
, errdef_32
.name
, NAMESIZE
);
956 errdef
.instance
= errdef_32
.instance
;
957 errdef
.rnumber
= errdef_32
.rnumber
;
958 errdef
.offset
= errdef_32
.offset
;
959 errdef
.len
= errdef_32
.len
;
960 errdef
.access_type
= errdef_32
.access_type
;
961 errdef
.access_count
= errdef_32
.access_count
;
962 errdef
.fail_count
= errdef_32
.fail_count
;
963 errdef
.acc_chk
= errdef_32
.acc_chk
;
964 errdef
.optype
= errdef_32
.optype
;
965 errdef
.operand
= errdef_32
.operand
;
966 errdef
.log
.logsize
= errdef_32
.log
.logsize
;
967 errdef
.log
.entries
= errdef_32
.log
.entries
;
968 errdef
.log
.flags
= errdef_32
.log
.flags
;
969 errdef
.log
.wrapcnt
= errdef_32
.log
.wrapcnt
;
970 errdef
.log
.start_time
= errdef_32
.log
.start_time
;
971 errdef
.log
.stop_time
= errdef_32
.log
.stop_time
;
973 (caddr_t
)(uintptr_t)errdef_32
.log
.logbase
;
974 errdef
.errdef_handle
= errdef_32
.errdef_handle
;
978 if (ddi_copyin((void *)arg
, &errdef
,
979 sizeof (struct bofi_errdef
), mode
))
983 #else /* ! _MULTI_DATAMODEL */
984 if (ddi_copyin((void *)arg
, &errdef
,
985 sizeof (struct bofi_errdef
), mode
) != 0)
987 #endif /* _MULTI_DATAMODEL */
991 if (errdef
.fail_count
== 0)
993 if (errdef
.optype
!= 0) {
994 if (errdef
.access_type
& BOFI_INTR
&&
995 errdef
.optype
!= BOFI_DELAY_INTR
&&
996 errdef
.optype
!= BOFI_LOSE_INTR
&&
997 errdef
.optype
!= BOFI_EXTRA_INTR
)
999 if ((errdef
.access_type
& (BOFI_DMA_RW
|BOFI_PIO_R
)) &&
1000 errdef
.optype
== BOFI_NO_TRANSFER
)
1002 if ((errdef
.access_type
& (BOFI_PIO_RW
)) &&
1003 errdef
.optype
!= BOFI_EQUAL
&&
1004 errdef
.optype
!= BOFI_OR
&&
1005 errdef
.optype
!= BOFI_XOR
&&
1006 errdef
.optype
!= BOFI_AND
&&
1007 errdef
.optype
!= BOFI_NO_TRANSFER
)
1011 * find softstate for this clone, so we can tag
1012 * new errdef on to it
1014 softc
= ddi_get_soft_state(statep
, minor
);
1020 if (errdef
.namesize
> NAMESIZE
)
1022 namep
= kmem_zalloc(errdef
.namesize
+1, KM_SLEEP
);
1023 (void) strncpy(namep
, errdef
.name
, errdef
.namesize
);
1025 if (bofi_errdef_alloc(&errdef
, namep
, softc
) != DDI_SUCCESS
) {
1026 (void) bofi_errdef_free((struct bofi_errent
*)
1027 (uintptr_t)errdef
.errdef_handle
);
1028 kmem_free(namep
, errdef
.namesize
+1);
1032 * copy out errdef again, including filled in errdef_handle
1034 #ifdef _MULTI_DATAMODEL
1035 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1036 case DDI_MODEL_ILP32
:
1039 * For use when a 32 bit app makes a call into a
1042 struct bofi_errdef32 errdef_32
;
1044 errdef_32
.namesize
= errdef
.namesize
;
1045 (void) strncpy(errdef_32
.name
, errdef
.name
, NAMESIZE
);
1046 errdef_32
.instance
= errdef
.instance
;
1047 errdef_32
.rnumber
= errdef
.rnumber
;
1048 errdef_32
.offset
= errdef
.offset
;
1049 errdef_32
.len
= errdef
.len
;
1050 errdef_32
.access_type
= errdef
.access_type
;
1051 errdef_32
.access_count
= errdef
.access_count
;
1052 errdef_32
.fail_count
= errdef
.fail_count
;
1053 errdef_32
.acc_chk
= errdef
.acc_chk
;
1054 errdef_32
.optype
= errdef
.optype
;
1055 errdef_32
.operand
= errdef
.operand
;
1056 errdef_32
.log
.logsize
= errdef
.log
.logsize
;
1057 errdef_32
.log
.entries
= errdef
.log
.entries
;
1058 errdef_32
.log
.flags
= errdef
.log
.flags
;
1059 errdef_32
.log
.wrapcnt
= errdef
.log
.wrapcnt
;
1060 errdef_32
.log
.start_time
= errdef
.log
.start_time
;
1061 errdef_32
.log
.stop_time
= errdef
.log
.stop_time
;
1062 errdef_32
.log
.logbase
=
1063 (caddr32_t
)(uintptr_t)errdef
.log
.logbase
;
1064 errdef_32
.errdef_handle
= errdef
.errdef_handle
;
1065 if (ddi_copyout(&errdef_32
, (void *)arg
,
1066 sizeof (struct bofi_errdef32
), mode
) != 0) {
1067 (void) bofi_errdef_free((struct bofi_errent
*)
1068 errdef
.errdef_handle
);
1069 kmem_free(namep
, errdef
.namesize
+1);
1074 case DDI_MODEL_NONE
:
1075 if (ddi_copyout(&errdef
, (void *)arg
,
1076 sizeof (struct bofi_errdef
), mode
) != 0) {
1077 (void) bofi_errdef_free((struct bofi_errent
*)
1078 errdef
.errdef_handle
);
1079 kmem_free(namep
, errdef
.namesize
+1);
1084 #else /* ! _MULTI_DATAMODEL */
1085 if (ddi_copyout(&errdef
, (void *)arg
,
1086 sizeof (struct bofi_errdef
), mode
) != 0) {
1087 (void) bofi_errdef_free((struct bofi_errent
*)
1088 (uintptr_t)errdef
.errdef_handle
);
1089 kmem_free(namep
, errdef
.namesize
+1);
1092 #endif /* _MULTI_DATAMODEL */
1096 * delete existing errdef
1098 if (ddi_copyin((void *)arg
, &ed_handle
,
1099 sizeof (void *), mode
) != 0)
1101 return (bofi_errdef_free((struct bofi_errent
*)ed_handle
));
1104 * start all errdefs corresponding to
1105 * this name and instance
1107 if (ddi_copyin((void *)arg
, &errctl
,
1108 sizeof (struct bofi_errctl
), mode
) != 0)
1113 if (errctl
.namesize
> NAMESIZE
)
1115 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1116 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1117 bofi_start(&errctl
, namep
);
1118 kmem_free(namep
, errctl
.namesize
+1);
1122 * stop all errdefs corresponding to
1123 * this name and instance
1125 if (ddi_copyin((void *)arg
, &errctl
,
1126 sizeof (struct bofi_errctl
), mode
) != 0)
1131 if (errctl
.namesize
> NAMESIZE
)
1133 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1134 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1135 bofi_stop(&errctl
, namep
);
1136 kmem_free(namep
, errctl
.namesize
+1);
1138 case BOFI_BROADCAST
:
1140 * wakeup all errdefs corresponding to
1141 * this name and instance
1143 if (ddi_copyin((void *)arg
, &errctl
,
1144 sizeof (struct bofi_errctl
), mode
) != 0)
1149 if (errctl
.namesize
> NAMESIZE
)
1151 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1152 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1153 bofi_broadcast(&errctl
, namep
);
1154 kmem_free(namep
, errctl
.namesize
+1);
1156 case BOFI_CLEAR_ACC_CHK
:
1158 * clear "acc_chk" for all errdefs corresponding to
1159 * this name and instance
1161 if (ddi_copyin((void *)arg
, &errctl
,
1162 sizeof (struct bofi_errctl
), mode
) != 0)
1167 if (errctl
.namesize
> NAMESIZE
)
1169 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1170 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1171 bofi_clear_acc_chk(&errctl
, namep
);
1172 kmem_free(namep
, errctl
.namesize
+1);
1174 case BOFI_CLEAR_ERRORS
:
1176 * set "fail_count" to 0 for all errdefs corresponding to
1177 * this name and instance whose "access_count"
1180 if (ddi_copyin((void *)arg
, &errctl
,
1181 sizeof (struct bofi_errctl
), mode
) != 0)
1186 if (errctl
.namesize
> NAMESIZE
)
1188 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1189 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1190 bofi_clear_errors(&errctl
, namep
);
1191 kmem_free(namep
, errctl
.namesize
+1);
1193 case BOFI_CLEAR_ERRDEFS
:
1195 * set "access_count" and "fail_count" to 0 for all errdefs
1196 * corresponding to this name and instance
1198 if (ddi_copyin((void *)arg
, &errctl
,
1199 sizeof (struct bofi_errctl
), mode
) != 0)
1204 if (errctl
.namesize
> NAMESIZE
)
1206 namep
= kmem_zalloc(errctl
.namesize
+1, KM_SLEEP
);
1207 (void) strncpy(namep
, errctl
.name
, errctl
.namesize
);
1208 bofi_clear_errdefs(&errctl
, namep
);
1209 kmem_free(namep
, errctl
.namesize
+1);
1211 case BOFI_CHK_STATE
:
1213 struct acc_log_elem
*klg
;
1216 * get state for this errdef - read in dummy errstate
1217 * with just the errdef_handle filled in
1219 #ifdef _MULTI_DATAMODEL
1220 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1221 case DDI_MODEL_ILP32
:
1224 * For use when a 32 bit app makes a call into a
1227 struct bofi_errstate32 errstate_32
;
1229 if (ddi_copyin((void *)arg
, &errstate_32
,
1230 sizeof (struct bofi_errstate32
), mode
) != 0) {
1233 errstate
.fail_time
= errstate_32
.fail_time
;
1234 errstate
.msg_time
= errstate_32
.msg_time
;
1235 errstate
.access_count
= errstate_32
.access_count
;
1236 errstate
.fail_count
= errstate_32
.fail_count
;
1237 errstate
.acc_chk
= errstate_32
.acc_chk
;
1238 errstate
.errmsg_count
= errstate_32
.errmsg_count
;
1239 (void) strncpy(errstate
.buffer
, errstate_32
.buffer
,
1241 errstate
.severity
= errstate_32
.severity
;
1242 errstate
.log
.logsize
= errstate_32
.log
.logsize
;
1243 errstate
.log
.entries
= errstate_32
.log
.entries
;
1244 errstate
.log
.flags
= errstate_32
.log
.flags
;
1245 errstate
.log
.wrapcnt
= errstate_32
.log
.wrapcnt
;
1246 errstate
.log
.start_time
= errstate_32
.log
.start_time
;
1247 errstate
.log
.stop_time
= errstate_32
.log
.stop_time
;
1248 errstate
.log
.logbase
=
1249 (caddr_t
)(uintptr_t)errstate_32
.log
.logbase
;
1250 errstate
.errdef_handle
= errstate_32
.errdef_handle
;
1253 case DDI_MODEL_NONE
:
1254 if (ddi_copyin((void *)arg
, &errstate
,
1255 sizeof (struct bofi_errstate
), mode
) != 0)
1259 #else /* ! _MULTI_DATAMODEL */
1260 if (ddi_copyin((void *)arg
, &errstate
,
1261 sizeof (struct bofi_errstate
), mode
) != 0)
1263 #endif /* _MULTI_DATAMODEL */
1264 if ((retval
= bofi_errdef_check(&errstate
, &klg
)) == EINVAL
)
1267 * copy out real errstate structure
1269 uls
= errstate
.log
.logsize
;
1270 if (errstate
.log
.entries
> uls
&& uls
)
1271 /* insufficient user memory */
1272 errstate
.log
.entries
= uls
;
1273 /* always pass back a time */
1274 if (errstate
.log
.stop_time
== 0ul)
1275 (void) drv_getparm(TIME
, &(errstate
.log
.stop_time
));
1277 #ifdef _MULTI_DATAMODEL
1278 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1279 case DDI_MODEL_ILP32
:
1282 * For use when a 32 bit app makes a call into a
1285 struct bofi_errstate32 errstate_32
;
1287 errstate_32
.fail_time
= errstate
.fail_time
;
1288 errstate_32
.msg_time
= errstate
.msg_time
;
1289 errstate_32
.access_count
= errstate
.access_count
;
1290 errstate_32
.fail_count
= errstate
.fail_count
;
1291 errstate_32
.acc_chk
= errstate
.acc_chk
;
1292 errstate_32
.errmsg_count
= errstate
.errmsg_count
;
1293 (void) strncpy(errstate_32
.buffer
, errstate
.buffer
,
1295 errstate_32
.severity
= errstate
.severity
;
1296 errstate_32
.log
.logsize
= errstate
.log
.logsize
;
1297 errstate_32
.log
.entries
= errstate
.log
.entries
;
1298 errstate_32
.log
.flags
= errstate
.log
.flags
;
1299 errstate_32
.log
.wrapcnt
= errstate
.log
.wrapcnt
;
1300 errstate_32
.log
.start_time
= errstate
.log
.start_time
;
1301 errstate_32
.log
.stop_time
= errstate
.log
.stop_time
;
1302 errstate_32
.log
.logbase
=
1303 (caddr32_t
)(uintptr_t)errstate
.log
.logbase
;
1304 errstate_32
.errdef_handle
= errstate
.errdef_handle
;
1305 if (ddi_copyout(&errstate_32
, (void *)arg
,
1306 sizeof (struct bofi_errstate32
), mode
) != 0)
1310 case DDI_MODEL_NONE
:
1311 if (ddi_copyout(&errstate
, (void *)arg
,
1312 sizeof (struct bofi_errstate
), mode
) != 0)
1316 #else /* ! _MULTI_DATAMODEL */
1317 if (ddi_copyout(&errstate
, (void *)arg
,
1318 sizeof (struct bofi_errstate
), mode
) != 0)
1320 #endif /* _MULTI_DATAMODEL */
1321 if (uls
&& errstate
.log
.entries
&&
1322 ddi_copyout(klg
, errstate
.log
.logbase
,
1323 errstate
.log
.entries
* sizeof (struct acc_log_elem
),
1329 case BOFI_CHK_STATE_W
:
1331 struct acc_log_elem
*klg
;
1334 * get state for this errdef - read in dummy errstate
1335 * with just the errdef_handle filled in. Then wait for
1336 * a ddi_report_fault message to come back
1338 #ifdef _MULTI_DATAMODEL
1339 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1340 case DDI_MODEL_ILP32
:
1343 * For use when a 32 bit app makes a call into a
1346 struct bofi_errstate32 errstate_32
;
1348 if (ddi_copyin((void *)arg
, &errstate_32
,
1349 sizeof (struct bofi_errstate32
), mode
) != 0) {
1352 errstate
.fail_time
= errstate_32
.fail_time
;
1353 errstate
.msg_time
= errstate_32
.msg_time
;
1354 errstate
.access_count
= errstate_32
.access_count
;
1355 errstate
.fail_count
= errstate_32
.fail_count
;
1356 errstate
.acc_chk
= errstate_32
.acc_chk
;
1357 errstate
.errmsg_count
= errstate_32
.errmsg_count
;
1358 (void) strncpy(errstate
.buffer
, errstate_32
.buffer
,
1360 errstate
.severity
= errstate_32
.severity
;
1361 errstate
.log
.logsize
= errstate_32
.log
.logsize
;
1362 errstate
.log
.entries
= errstate_32
.log
.entries
;
1363 errstate
.log
.flags
= errstate_32
.log
.flags
;
1364 errstate
.log
.wrapcnt
= errstate_32
.log
.wrapcnt
;
1365 errstate
.log
.start_time
= errstate_32
.log
.start_time
;
1366 errstate
.log
.stop_time
= errstate_32
.log
.stop_time
;
1367 errstate
.log
.logbase
=
1368 (caddr_t
)(uintptr_t)errstate_32
.log
.logbase
;
1369 errstate
.errdef_handle
= errstate_32
.errdef_handle
;
1372 case DDI_MODEL_NONE
:
1373 if (ddi_copyin((void *)arg
, &errstate
,
1374 sizeof (struct bofi_errstate
), mode
) != 0)
1378 #else /* ! _MULTI_DATAMODEL */
1379 if (ddi_copyin((void *)arg
, &errstate
,
1380 sizeof (struct bofi_errstate
), mode
) != 0)
1382 #endif /* _MULTI_DATAMODEL */
1383 if ((retval
= bofi_errdef_check_w(&errstate
, &klg
)) == EINVAL
)
1386 * copy out real errstate structure
1388 uls
= errstate
.log
.logsize
;
1389 uls
= errstate
.log
.logsize
;
1390 if (errstate
.log
.entries
> uls
&& uls
)
1391 /* insufficient user memory */
1392 errstate
.log
.entries
= uls
;
1393 /* always pass back a time */
1394 if (errstate
.log
.stop_time
== 0ul)
1395 (void) drv_getparm(TIME
, &(errstate
.log
.stop_time
));
1397 #ifdef _MULTI_DATAMODEL
1398 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1399 case DDI_MODEL_ILP32
:
1402 * For use when a 32 bit app makes a call into a
1405 struct bofi_errstate32 errstate_32
;
1407 errstate_32
.fail_time
= errstate
.fail_time
;
1408 errstate_32
.msg_time
= errstate
.msg_time
;
1409 errstate_32
.access_count
= errstate
.access_count
;
1410 errstate_32
.fail_count
= errstate
.fail_count
;
1411 errstate_32
.acc_chk
= errstate
.acc_chk
;
1412 errstate_32
.errmsg_count
= errstate
.errmsg_count
;
1413 (void) strncpy(errstate_32
.buffer
, errstate
.buffer
,
1415 errstate_32
.severity
= errstate
.severity
;
1416 errstate_32
.log
.logsize
= errstate
.log
.logsize
;
1417 errstate_32
.log
.entries
= errstate
.log
.entries
;
1418 errstate_32
.log
.flags
= errstate
.log
.flags
;
1419 errstate_32
.log
.wrapcnt
= errstate
.log
.wrapcnt
;
1420 errstate_32
.log
.start_time
= errstate
.log
.start_time
;
1421 errstate_32
.log
.stop_time
= errstate
.log
.stop_time
;
1422 errstate_32
.log
.logbase
=
1423 (caddr32_t
)(uintptr_t)errstate
.log
.logbase
;
1424 errstate_32
.errdef_handle
= errstate
.errdef_handle
;
1425 if (ddi_copyout(&errstate_32
, (void *)arg
,
1426 sizeof (struct bofi_errstate32
), mode
) != 0)
1430 case DDI_MODEL_NONE
:
1431 if (ddi_copyout(&errstate
, (void *)arg
,
1432 sizeof (struct bofi_errstate
), mode
) != 0)
1436 #else /* ! _MULTI_DATAMODEL */
1437 if (ddi_copyout(&errstate
, (void *)arg
,
1438 sizeof (struct bofi_errstate
), mode
) != 0)
1440 #endif /* _MULTI_DATAMODEL */
1442 if (uls
&& errstate
.log
.entries
&&
1443 ddi_copyout(klg
, errstate
.log
.logbase
,
1444 errstate
.log
.entries
* sizeof (struct acc_log_elem
),
1450 case BOFI_GET_HANDLES
:
1452 * display existing handles
1454 #ifdef _MULTI_DATAMODEL
1455 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1456 case DDI_MODEL_ILP32
:
1459 * For use when a 32 bit app makes a call into a
1462 struct bofi_get_handles32 get_handles_32
;
1464 if (ddi_copyin((void *)arg
, &get_handles_32
,
1465 sizeof (get_handles_32
), mode
) != 0) {
1468 get_handles
.namesize
= get_handles_32
.namesize
;
1469 (void) strncpy(get_handles
.name
, get_handles_32
.name
,
1471 get_handles
.instance
= get_handles_32
.instance
;
1472 get_handles
.count
= get_handles_32
.count
;
1473 get_handles
.buffer
=
1474 (caddr_t
)(uintptr_t)get_handles_32
.buffer
;
1477 case DDI_MODEL_NONE
:
1478 if (ddi_copyin((void *)arg
, &get_handles
,
1479 sizeof (get_handles
), mode
) != 0)
1483 #else /* ! _MULTI_DATAMODEL */
1484 if (ddi_copyin((void *)arg
, &get_handles
,
1485 sizeof (get_handles
), mode
) != 0)
1487 #endif /* _MULTI_DATAMODEL */
1491 if (get_handles
.namesize
> NAMESIZE
)
1493 namep
= kmem_zalloc(get_handles
.namesize
+1, KM_SLEEP
);
1494 (void) strncpy(namep
, get_handles
.name
, get_handles
.namesize
);
1495 req_count
= get_handles
.count
;
1496 bufptr
= buffer
= kmem_zalloc(req_count
, KM_SLEEP
);
1497 endbuf
= bufptr
+ req_count
;
1499 * display existing handles
1501 mutex_enter(&bofi_low_mutex
);
1502 mutex_enter(&bofi_mutex
);
1503 for (i
= 0; i
< HDL_HASH_TBL_SIZE
; i
++) {
1504 hhashp
= &hhash_table
[i
];
1505 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
) {
1506 if (!driver_under_test(hp
->dip
))
1508 if (ddi_name_to_major(ddi_get_name(hp
->dip
)) !=
1509 ddi_name_to_major(namep
))
1511 if (hp
->instance
!= get_handles
.instance
)
1514 * print information per handle - note that
1515 * DMA* means an unbound DMA handle
1517 (void) snprintf(bufptr
, (size_t)(endbuf
-bufptr
),
1518 " %s %d %s ", hp
->name
, hp
->instance
,
1519 (hp
->type
== BOFI_INT_HDL
) ? "INTR" :
1520 (hp
->type
== BOFI_ACC_HDL
) ? "PIO" :
1521 (hp
->type
== BOFI_DMA_HDL
) ? "DMA" :
1522 (hp
->hparrayp
!= NULL
) ? "DVMA" : "DMA*");
1523 bufptr
+= strlen(bufptr
);
1524 if (hp
->type
== BOFI_ACC_HDL
) {
1525 if (hp
->len
== INT_MAX
- hp
->offset
)
1526 (void) snprintf(bufptr
,
1527 (size_t)(endbuf
-bufptr
),
1528 "reg set %d off 0x%llx\n",
1529 hp
->rnumber
, hp
->offset
);
1531 (void) snprintf(bufptr
,
1532 (size_t)(endbuf
-bufptr
),
1533 "reg set %d off 0x%llx"
1535 hp
->rnumber
, hp
->offset
,
1537 } else if (hp
->type
== BOFI_DMA_HDL
)
1538 (void) snprintf(bufptr
,
1539 (size_t)(endbuf
-bufptr
),
1540 "handle no %d len 0x%llx"
1541 " addr 0x%p\n", hp
->rnumber
,
1542 hp
->len
, (void *)hp
->addr
);
1543 else if (hp
->type
== BOFI_NULL
&&
1544 hp
->hparrayp
== NULL
)
1545 (void) snprintf(bufptr
,
1546 (size_t)(endbuf
-bufptr
),
1547 "handle no %d\n", hp
->rnumber
);
1549 (void) snprintf(bufptr
,
1550 (size_t)(endbuf
-bufptr
), "\n");
1551 bufptr
+= strlen(bufptr
);
1554 mutex_exit(&bofi_mutex
);
1555 mutex_exit(&bofi_low_mutex
);
1556 err
= ddi_copyout(buffer
, get_handles
.buffer
, req_count
, mode
);
1557 kmem_free(namep
, get_handles
.namesize
+1);
1558 kmem_free(buffer
, req_count
);
1563 case BOFI_GET_HANDLE_INFO
:
1565 * display existing handles
1567 #ifdef _MULTI_DATAMODEL
1568 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1569 case DDI_MODEL_ILP32
:
1572 * For use when a 32 bit app makes a call into a
1575 struct bofi_get_hdl_info32 hdl_info_32
;
1577 if (ddi_copyin((void *)arg
, &hdl_info_32
,
1578 sizeof (hdl_info_32
), mode
)) {
1581 hdl_info
.namesize
= hdl_info_32
.namesize
;
1582 (void) strncpy(hdl_info
.name
, hdl_info_32
.name
,
1584 hdl_info
.count
= hdl_info_32
.count
;
1585 hdl_info
.hdli
= (caddr_t
)(uintptr_t)hdl_info_32
.hdli
;
1588 case DDI_MODEL_NONE
:
1589 if (ddi_copyin((void *)arg
, &hdl_info
,
1590 sizeof (hdl_info
), mode
))
1594 #else /* ! _MULTI_DATAMODEL */
1595 if (ddi_copyin((void *)arg
, &hdl_info
,
1596 sizeof (hdl_info
), mode
))
1598 #endif /* _MULTI_DATAMODEL */
1599 if (hdl_info
.namesize
> NAMESIZE
)
1601 namep
= kmem_zalloc(hdl_info
.namesize
+ 1, KM_SLEEP
);
1602 (void) strncpy(namep
, hdl_info
.name
, hdl_info
.namesize
);
1603 req_count
= hdl_info
.count
;
1604 count
= hdl_info
.count
= 0; /* the actual no of handles */
1605 if (req_count
> 0) {
1607 kmem_zalloc(req_count
* sizeof (struct handle_info
),
1611 req_count
= hdl_info
.count
= 0;
1615 * display existing handles
1617 mutex_enter(&bofi_low_mutex
);
1618 mutex_enter(&bofi_mutex
);
1619 for (i
= 0; i
< HDL_HASH_TBL_SIZE
; i
++) {
1620 hhashp
= &hhash_table
[i
];
1621 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
) {
1622 if (!driver_under_test(hp
->dip
) ||
1623 ddi_name_to_major(ddi_get_name(hp
->dip
)) !=
1624 ddi_name_to_major(namep
) ||
1625 ++(hdl_info
.count
) > req_count
||
1629 hdlip
->instance
= hp
->instance
;
1630 hdlip
->rnumber
= hp
->rnumber
;
1633 hdlip
->access_type
= BOFI_PIO_RW
;
1634 hdlip
->offset
= hp
->offset
;
1635 hdlip
->len
= hp
->len
;
1638 hdlip
->access_type
= 0;
1639 if (hp
->flags
& DDI_DMA_WRITE
)
1640 hdlip
->access_type
|=
1642 if (hp
->flags
& DDI_DMA_READ
)
1643 hdlip
->access_type
|=
1645 hdlip
->len
= hp
->len
;
1646 hdlip
->addr_cookie
=
1647 (uint64_t)(uintptr_t)hp
->addr
;
1650 hdlip
->access_type
= BOFI_INTR
;
1653 hdlip
->access_type
= 0;
1660 mutex_exit(&bofi_mutex
);
1661 mutex_exit(&bofi_low_mutex
);
1663 #ifdef _MULTI_DATAMODEL
1664 switch (ddi_model_convert_from(mode
& FMODELS
)) {
1665 case DDI_MODEL_ILP32
:
1668 * For use when a 32 bit app makes a call into a
1671 struct bofi_get_hdl_info32 hdl_info_32
;
1673 hdl_info_32
.namesize
= hdl_info
.namesize
;
1674 (void) strncpy(hdl_info_32
.name
, hdl_info
.name
,
1676 hdl_info_32
.count
= hdl_info
.count
;
1677 hdl_info_32
.hdli
= (caddr32_t
)(uintptr_t)hdl_info
.hdli
;
1678 if (ddi_copyout(&hdl_info_32
, (void *)arg
,
1679 sizeof (hdl_info_32
), mode
) != 0) {
1680 kmem_free(namep
, hdl_info
.namesize
+1);
1683 req_count
* sizeof (*hib
));
1688 case DDI_MODEL_NONE
:
1689 if (ddi_copyout(&hdl_info
, (void *)arg
,
1690 sizeof (hdl_info
), mode
) != 0) {
1691 kmem_free(namep
, hdl_info
.namesize
+1);
1694 req_count
* sizeof (*hib
));
1699 #else /* ! _MULTI_DATAMODEL */
1700 if (ddi_copyout(&hdl_info
, (void *)arg
,
1701 sizeof (hdl_info
), mode
) != 0) {
1702 kmem_free(namep
, hdl_info
.namesize
+1);
1704 kmem_free(hib
, req_count
* sizeof (*hib
));
1707 #endif /* ! _MULTI_DATAMODEL */
1709 if (ddi_copyout(hib
, hdl_info
.hdli
,
1710 count
* sizeof (*hib
), mode
) != 0) {
1711 kmem_free(namep
, hdl_info
.namesize
+1);
1714 req_count
* sizeof (*hib
));
1718 kmem_free(namep
, hdl_info
.namesize
+1);
1720 kmem_free(hib
, req_count
* sizeof (*hib
));
1729 * add a new error definition
1732 bofi_errdef_alloc(struct bofi_errdef
*errdefp
, char *namep
,
1733 struct bofi_errent
*softc
)
1735 struct bofi_errent
*ep
;
1736 struct bofi_shadow
*hp
;
1737 struct bofi_link
*lp
;
1740 * allocate errdef structure and put on in-use list
1742 ep
= kmem_zalloc(sizeof (struct bofi_errent
), KM_SLEEP
);
1743 ep
->errdef
= *errdefp
;
1745 ep
->errdef
.errdef_handle
= (uint64_t)(uintptr_t)ep
;
1746 ep
->errstate
.severity
= DDI_SERVICE_RESTORED
;
1747 ep
->errstate
.errdef_handle
= (uint64_t)(uintptr_t)ep
;
1748 cv_init(&ep
->cv
, NULL
, CV_DRIVER
, NULL
);
1750 * allocate space for logging
1752 ep
->errdef
.log
.entries
= 0;
1753 ep
->errdef
.log
.wrapcnt
= 0;
1754 if (ep
->errdef
.access_type
& BOFI_LOG
)
1755 ep
->logbase
= kmem_alloc(sizeof (struct acc_log_elem
) *
1756 ep
->errdef
.log
.logsize
, KM_SLEEP
);
1760 * put on in-use list
1762 mutex_enter(&bofi_low_mutex
);
1763 mutex_enter(&bofi_mutex
);
1764 ep
->next
= errent_listp
;
1767 * and add it to the per-clone list
1769 ep
->cnext
= softc
->cnext
;
1770 softc
->cnext
->cprev
= ep
;
1775 * look for corresponding shadow handle structures and if we find any
1776 * tag this errdef structure on to their link lists.
1778 for (hp
= shadow_list
.next
; hp
!= &shadow_list
; hp
= hp
->next
) {
1779 if (ddi_name_to_major(hp
->name
) == ddi_name_to_major(namep
) &&
1780 hp
->instance
== errdefp
->instance
&&
1781 (((errdefp
->access_type
& BOFI_DMA_RW
) &&
1782 (ep
->errdef
.rnumber
== -1 ||
1783 hp
->rnumber
== ep
->errdef
.rnumber
) &&
1784 hp
->type
== BOFI_DMA_HDL
&&
1785 (((uintptr_t)(hp
->addr
+ ep
->errdef
.offset
+
1786 ep
->errdef
.len
) & ~LLSZMASK
) >
1787 ((uintptr_t)((hp
->addr
+ ep
->errdef
.offset
) +
1788 LLSZMASK
) & ~LLSZMASK
))) ||
1789 ((errdefp
->access_type
& BOFI_INTR
) &&
1790 hp
->type
== BOFI_INT_HDL
) ||
1791 ((errdefp
->access_type
& BOFI_PIO_RW
) &&
1792 hp
->type
== BOFI_ACC_HDL
&&
1793 (errdefp
->rnumber
== -1 ||
1794 hp
->rnumber
== errdefp
->rnumber
) &&
1795 (errdefp
->len
== 0 ||
1796 hp
->offset
< errdefp
->offset
+ errdefp
->len
) &&
1797 hp
->offset
+ hp
->len
> errdefp
->offset
))) {
1798 lp
= bofi_link_freelist
;
1800 bofi_link_freelist
= lp
->link
;
1802 lp
->link
= hp
->link
;
1807 errdefp
->errdef_handle
= (uint64_t)(uintptr_t)ep
;
1808 mutex_exit(&bofi_mutex
);
1809 mutex_exit(&bofi_low_mutex
);
1810 ep
->softintr_id
= NULL
;
1811 return (ddi_add_softintr(our_dip
, DDI_SOFTINT_MED
, &ep
->softintr_id
,
1812 NULL
, NULL
, bofi_signal
, (caddr_t
)&ep
->errdef
));
1817 * delete existing errdef
1820 bofi_errdef_free(struct bofi_errent
*ep
)
1822 struct bofi_errent
*hep
, *prev_hep
;
1823 struct bofi_link
*lp
, *prev_lp
, *next_lp
;
1824 struct bofi_shadow
*hp
;
1826 mutex_enter(&bofi_low_mutex
);
1827 mutex_enter(&bofi_mutex
);
1829 * don't just assume its a valid ep - check that its on the
1833 for (hep
= errent_listp
; hep
!= NULL
; ) {
1840 mutex_exit(&bofi_mutex
);
1841 mutex_exit(&bofi_low_mutex
);
1845 * found it - delete from in-use list
1849 prev_hep
->next
= hep
->next
;
1851 errent_listp
= hep
->next
;
1853 * and take it off the per-clone list
1855 hep
->cnext
->cprev
= hep
->cprev
;
1856 hep
->cprev
->cnext
= hep
->cnext
;
1858 * see if we are on any shadow handle link lists - and if we
1859 * are then take us off
1861 for (hp
= shadow_list
.next
; hp
!= &shadow_list
; hp
= hp
->next
) {
1863 for (lp
= hp
->link
; lp
!= NULL
; ) {
1864 if (lp
->errentp
== ep
) {
1866 prev_lp
->link
= lp
->link
;
1868 hp
->link
= lp
->link
;
1870 lp
->link
= bofi_link_freelist
;
1871 bofi_link_freelist
= lp
;
1879 mutex_exit(&bofi_mutex
);
1880 mutex_exit(&bofi_low_mutex
);
1882 cv_destroy(&ep
->cv
);
1883 kmem_free(ep
->name
, ep
->errdef
.namesize
+1);
1884 if ((ep
->errdef
.access_type
& BOFI_LOG
) &&
1885 ep
->errdef
.log
.logsize
&& ep
->logbase
) /* double check */
1886 kmem_free(ep
->logbase
,
1887 sizeof (struct acc_log_elem
) * ep
->errdef
.log
.logsize
);
1889 if (ep
->softintr_id
)
1890 ddi_remove_softintr(ep
->softintr_id
);
1891 kmem_free(ep
, sizeof (struct bofi_errent
));
1897 * start all errdefs corresponding to this name and instance
1900 bofi_start(struct bofi_errctl
*errctlp
, char *namep
)
1902 struct bofi_errent
*ep
;
1905 * look for any errdefs with matching name and instance
1907 mutex_enter(&bofi_low_mutex
);
1908 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
1909 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
1910 errctlp
->instance
== ep
->errdef
.instance
) {
1911 ep
->state
|= BOFI_DEV_ACTIVE
;
1912 (void) drv_getparm(TIME
, &(ep
->errdef
.log
.start_time
));
1913 ep
->errdef
.log
.stop_time
= 0ul;
1915 mutex_exit(&bofi_low_mutex
);
1920 * stop all errdefs corresponding to this name and instance
1923 bofi_stop(struct bofi_errctl
*errctlp
, char *namep
)
1925 struct bofi_errent
*ep
;
1928 * look for any errdefs with matching name and instance
1930 mutex_enter(&bofi_low_mutex
);
1931 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
1932 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
1933 errctlp
->instance
== ep
->errdef
.instance
) {
1934 ep
->state
&= ~BOFI_DEV_ACTIVE
;
1935 if (ep
->errdef
.log
.stop_time
== 0ul)
1936 (void) drv_getparm(TIME
,
1937 &(ep
->errdef
.log
.stop_time
));
1939 mutex_exit(&bofi_low_mutex
);
1944 * wake up any thread waiting on this errdefs
1947 bofi_signal(caddr_t arg
)
1949 struct bofi_errdef
*edp
= (struct bofi_errdef
*)arg
;
1950 struct bofi_errent
*hep
;
1951 struct bofi_errent
*ep
=
1952 (struct bofi_errent
*)(uintptr_t)edp
->errdef_handle
;
1954 mutex_enter(&bofi_low_mutex
);
1955 for (hep
= errent_listp
; hep
!= NULL
; ) {
1961 mutex_exit(&bofi_low_mutex
);
1962 return (DDI_INTR_UNCLAIMED
);
1964 if ((ep
->errdef
.access_type
& BOFI_LOG
) &&
1965 (edp
->log
.flags
& BOFI_LOG_FULL
)) {
1966 edp
->log
.stop_time
= bofi_gettime();
1967 ep
->state
|= BOFI_NEW_MESSAGE
;
1968 if (ep
->state
& BOFI_MESSAGE_WAIT
)
1969 cv_broadcast(&ep
->cv
);
1970 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
1972 if (ep
->errstate
.msg_time
!= 0) {
1973 ep
->state
|= BOFI_NEW_MESSAGE
;
1974 if (ep
->state
& BOFI_MESSAGE_WAIT
)
1975 cv_broadcast(&ep
->cv
);
1976 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
1978 mutex_exit(&bofi_low_mutex
);
1979 return (DDI_INTR_CLAIMED
);
1984 * wake up all errdefs corresponding to this name and instance
1987 bofi_broadcast(struct bofi_errctl
*errctlp
, char *namep
)
1989 struct bofi_errent
*ep
;
1992 * look for any errdefs with matching name and instance
1994 mutex_enter(&bofi_low_mutex
);
1995 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
1996 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
1997 errctlp
->instance
== ep
->errdef
.instance
) {
2001 ep
->state
|= BOFI_NEW_MESSAGE
;
2002 if (ep
->state
& BOFI_MESSAGE_WAIT
)
2003 cv_broadcast(&ep
->cv
);
2004 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
2006 mutex_exit(&bofi_low_mutex
);
2011 * clear "acc_chk" for all errdefs corresponding to this name and instance
2015 bofi_clear_acc_chk(struct bofi_errctl
*errctlp
, char *namep
)
2017 struct bofi_errent
*ep
;
2020 * look for any errdefs with matching name and instance
2022 mutex_enter(&bofi_low_mutex
);
2023 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
2024 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
2025 errctlp
->instance
== ep
->errdef
.instance
) {
2026 mutex_enter(&bofi_mutex
);
2027 if (ep
->errdef
.access_count
== 0 &&
2028 ep
->errdef
.fail_count
== 0)
2029 ep
->errdef
.acc_chk
= 0;
2030 mutex_exit(&bofi_mutex
);
2034 ep
->state
|= BOFI_NEW_MESSAGE
;
2035 if (ep
->state
& BOFI_MESSAGE_WAIT
)
2036 cv_broadcast(&ep
->cv
);
2037 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
2039 mutex_exit(&bofi_low_mutex
);
2044 * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2045 * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2048 bofi_clear_errors(struct bofi_errctl
*errctlp
, char *namep
)
2050 struct bofi_errent
*ep
;
2053 * look for any errdefs with matching name and instance
2055 mutex_enter(&bofi_low_mutex
);
2056 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
2057 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
2058 errctlp
->instance
== ep
->errdef
.instance
) {
2059 mutex_enter(&bofi_mutex
);
2060 if (ep
->errdef
.access_count
== 0) {
2061 ep
->errdef
.acc_chk
= 0;
2062 ep
->errdef
.fail_count
= 0;
2063 mutex_exit(&bofi_mutex
);
2064 if (ep
->errdef
.log
.stop_time
== 0ul)
2065 (void) drv_getparm(TIME
,
2066 &(ep
->errdef
.log
.stop_time
));
2068 mutex_exit(&bofi_mutex
);
2072 ep
->state
|= BOFI_NEW_MESSAGE
;
2073 if (ep
->state
& BOFI_MESSAGE_WAIT
)
2074 cv_broadcast(&ep
->cv
);
2075 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
2077 mutex_exit(&bofi_low_mutex
);
2082 * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2083 * this name and instance, set "acc_chk" to 0, and wake them up.
2086 bofi_clear_errdefs(struct bofi_errctl
*errctlp
, char *namep
)
2088 struct bofi_errent
*ep
;
2091 * look for any errdefs with matching name and instance
2093 mutex_enter(&bofi_low_mutex
);
2094 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
)
2095 if (strncmp(namep
, ep
->name
, NAMESIZE
) == 0 &&
2096 errctlp
->instance
== ep
->errdef
.instance
) {
2097 mutex_enter(&bofi_mutex
);
2098 ep
->errdef
.acc_chk
= 0;
2099 ep
->errdef
.access_count
= 0;
2100 ep
->errdef
.fail_count
= 0;
2101 mutex_exit(&bofi_mutex
);
2102 if (ep
->errdef
.log
.stop_time
== 0ul)
2103 (void) drv_getparm(TIME
,
2104 &(ep
->errdef
.log
.stop_time
));
2108 ep
->state
|= BOFI_NEW_MESSAGE
;
2109 if (ep
->state
& BOFI_MESSAGE_WAIT
)
2110 cv_broadcast(&ep
->cv
);
2111 ep
->state
&= ~BOFI_MESSAGE_WAIT
;
2113 mutex_exit(&bofi_low_mutex
);
2118 * get state for this errdef
2121 bofi_errdef_check(struct bofi_errstate
*errstatep
, struct acc_log_elem
**logpp
)
2123 struct bofi_errent
*hep
;
2124 struct bofi_errent
*ep
;
2126 ep
= (struct bofi_errent
*)(uintptr_t)errstatep
->errdef_handle
;
2127 mutex_enter(&bofi_low_mutex
);
2129 * don't just assume its a valid ep - check that its on the
2132 for (hep
= errent_listp
; hep
!= NULL
; hep
= hep
->next
)
2136 mutex_exit(&bofi_low_mutex
);
2139 mutex_enter(&bofi_mutex
);
2140 ep
->errstate
.access_count
= ep
->errdef
.access_count
;
2141 ep
->errstate
.fail_count
= ep
->errdef
.fail_count
;
2142 ep
->errstate
.acc_chk
= ep
->errdef
.acc_chk
;
2143 ep
->errstate
.log
= ep
->errdef
.log
;
2144 *logpp
= ep
->logbase
;
2145 *errstatep
= ep
->errstate
;
2146 mutex_exit(&bofi_mutex
);
2147 mutex_exit(&bofi_low_mutex
);
2153 * Wait for a ddi_report_fault message to come back for this errdef
2154 * Then return state for this errdef.
2155 * fault report is intercepted by bofi_post_event, which triggers
2156 * bofi_signal via a softint, which will wake up this routine if
2160 bofi_errdef_check_w(struct bofi_errstate
*errstatep
,
2161 struct acc_log_elem
**logpp
)
2163 struct bofi_errent
*hep
;
2164 struct bofi_errent
*ep
;
2167 ep
= (struct bofi_errent
*)(uintptr_t)errstatep
->errdef_handle
;
2168 mutex_enter(&bofi_low_mutex
);
2171 * don't just assume its a valid ep - check that its on the
2174 for (hep
= errent_listp
; hep
!= NULL
; hep
= hep
->next
)
2178 mutex_exit(&bofi_low_mutex
);
2182 * wait for ddi_report_fault for the devinfo corresponding
2185 if (rval
== 0 && !(ep
->state
& BOFI_NEW_MESSAGE
)) {
2186 ep
->state
|= BOFI_MESSAGE_WAIT
;
2187 if (cv_wait_sig(&ep
->cv
, &bofi_low_mutex
) == 0) {
2188 if (!(ep
->state
& BOFI_NEW_MESSAGE
))
2193 ep
->state
&= ~BOFI_NEW_MESSAGE
;
2195 * we either didn't need to sleep, we've been woken up or we've been
2196 * signaled - either way return state now
2198 mutex_enter(&bofi_mutex
);
2199 ep
->errstate
.access_count
= ep
->errdef
.access_count
;
2200 ep
->errstate
.fail_count
= ep
->errdef
.fail_count
;
2201 ep
->errstate
.acc_chk
= ep
->errdef
.acc_chk
;
2202 ep
->errstate
.log
= ep
->errdef
.log
;
2203 *logpp
= ep
->logbase
;
2204 *errstatep
= ep
->errstate
;
2205 mutex_exit(&bofi_mutex
);
2206 mutex_exit(&bofi_low_mutex
);
2212 * support routine - check if requested driver is defined as under test in the
2216 driver_under_test(dev_info_t
*rdip
)
2222 rname
= ddi_get_name(rdip
);
2223 rmaj
= ddi_name_to_major(rname
);
2226 * Enforce the user to specifically request the following drivers.
2228 for (i
= 0; i
< driver_list_size
; i
+= (1 + strlen(&driver_list
[i
]))) {
2229 if (driver_list_neg
== 0) {
2230 if (rmaj
== ddi_name_to_major(&driver_list
[i
]))
2233 if (rmaj
== ddi_name_to_major(&driver_list
[i
+1]))
2237 if (driver_list_neg
== 0)
2246 log_acc_event(struct bofi_errent
*ep
, uint_t at
, offset_t offset
, off_t len
,
2247 size_t repcount
, uint64_t *valuep
)
2249 struct bofi_errdef
*edp
= &(ep
->errdef
);
2250 struct acc_log
*log
= &edp
->log
;
2252 ASSERT(log
!= NULL
);
2253 ASSERT(MUTEX_HELD(&bofi_mutex
));
2255 if (log
->flags
& BOFI_LOG_REPIO
)
2257 else if (repcount
== 0 && edp
->access_count
> 0 &&
2258 (log
->flags
& BOFI_LOG_FULL
) == 0)
2259 edp
->access_count
+= 1;
2261 if (repcount
&& log
->entries
< log
->logsize
) {
2262 struct acc_log_elem
*elem
= ep
->logbase
+ log
->entries
;
2264 if (log
->flags
& BOFI_LOG_TIMESTAMP
)
2265 elem
->access_time
= bofi_gettime();
2266 elem
->access_type
= at
;
2267 elem
->offset
= offset
;
2268 elem
->value
= valuep
? *valuep
: 0ll;
2270 elem
->repcount
= repcount
;
2272 if (log
->entries
== log
->logsize
) {
2273 log
->flags
|= BOFI_LOG_FULL
;
2274 ddi_trigger_softintr(((struct bofi_errent
*)
2275 (uintptr_t)edp
->errdef_handle
)->softintr_id
);
2278 if ((log
->flags
& BOFI_LOG_WRAP
) && edp
->access_count
<= 1) {
2280 edp
->access_count
= log
->logsize
;
2281 log
->entries
= 0; /* wrap back to the start */
2287 * got a condition match on dma read/write - check counts and corrupt
2290 * bofi_mutex always held when this is called.
2293 do_dma_corrupt(struct bofi_shadow
*hp
, struct bofi_errent
*ep
,
2294 uint_t synctype
, off_t off
, off_t length
)
2302 ddi_dma_impl_t
*hdlp
;
2305 ASSERT(MUTEX_HELD(&bofi_mutex
));
2306 if ((ep
->errdef
.access_count
||
2307 ep
->errdef
.fail_count
) &&
2308 (ep
->errdef
.access_type
& BOFI_LOG
)) {
2311 if (synctype
== DDI_DMA_SYNC_FORDEV
)
2313 else if (synctype
== DDI_DMA_SYNC_FORCPU
||
2314 synctype
== DDI_DMA_SYNC_FORKERNEL
)
2318 if ((off
<= ep
->errdef
.offset
&&
2319 off
+ length
> ep
->errdef
.offset
) ||
2320 (off
> ep
->errdef
.offset
&&
2321 off
< ep
->errdef
.offset
+ ep
->errdef
.len
)) {
2322 logaddr
= (caddr_t
)((uintptr_t)(hp
->addr
+
2323 off
+ LLSZMASK
) & ~LLSZMASK
);
2325 log_acc_event(ep
, atype
, logaddr
- hp
->addr
,
2329 if (ep
->errdef
.access_count
> 1) {
2330 ep
->errdef
.access_count
--;
2331 } else if (ep
->errdef
.fail_count
> 0) {
2332 ep
->errdef
.fail_count
--;
2333 ep
->errdef
.access_count
= 0;
2335 * OK do the corruption
2337 if (ep
->errstate
.fail_time
== 0)
2338 ep
->errstate
.fail_time
= bofi_gettime();
2340 * work out how much to corrupt
2342 * Make sure endaddr isn't greater than hp->addr + hp->len.
2343 * If endaddr becomes less than addr len becomes negative
2344 * and the following loop isn't entered.
2346 addr
= (uint64_t *)((uintptr_t)((hp
->addr
+
2347 ep
->errdef
.offset
) + LLSZMASK
) & ~LLSZMASK
);
2348 endaddr
= (uint64_t *)((uintptr_t)(hp
->addr
+ min(hp
->len
,
2349 ep
->errdef
.offset
+ ep
->errdef
.len
)) & ~LLSZMASK
);
2350 len
= endaddr
- addr
;
2351 operand
= ep
->errdef
.operand
;
2352 hdlp
= (ddi_dma_impl_t
*)(hp
->hdl
.dma_handle
);
2353 errp
= &hdlp
->dmai_error
;
2354 if (ep
->errdef
.acc_chk
& 2) {
2356 char buf
[FM_MAX_CLASS
];
2358 errp
->err_status
= DDI_FM_NONFATAL
;
2359 (void) snprintf(buf
, FM_MAX_CLASS
, FM_SIMULATED_DMA
);
2360 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
2361 ddi_fm_ereport_post(hp
->dip
, buf
, ena
,
2362 DDI_NOSLEEP
, FM_VERSION
, DATA_TYPE_UINT8
,
2363 FM_EREPORT_VERS0
, NULL
);
2365 switch (ep
->errdef
.optype
) {
2367 for (i
= 0; i
< len
; i
++)
2368 *(addr
+ i
) = operand
;
2371 for (i
= 0; i
< len
; i
++)
2372 *(addr
+ i
) &= operand
;
2375 for (i
= 0; i
< len
; i
++)
2376 *(addr
+ i
) |= operand
;
2379 for (i
= 0; i
< len
; i
++)
2380 *(addr
+ i
) ^= operand
;
2390 static uint64_t do_bofi_rd8(struct bofi_shadow
*, caddr_t
);
2391 static uint64_t do_bofi_rd16(struct bofi_shadow
*, caddr_t
);
2392 static uint64_t do_bofi_rd32(struct bofi_shadow
*, caddr_t
);
2393 static uint64_t do_bofi_rd64(struct bofi_shadow
*, caddr_t
);
2397 * check all errdefs linked to this shadow handle. If we've got a condition
2398 * match check counts and corrupt data if necessary
2400 * bofi_mutex always held when this is called.
2402 * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2403 * from io-space before calling this, so we pass in the func to do the
2404 * transfer as a parameter.
2407 do_pior_corrupt(struct bofi_shadow
*hp
, caddr_t addr
,
2408 uint64_t (*func
)(), size_t repcount
, size_t accsize
)
2410 struct bofi_errent
*ep
;
2411 struct bofi_link
*lp
;
2416 uint64_t get_val
, gv
;
2417 ddi_acc_impl_t
*hdlp
;
2420 ASSERT(MUTEX_HELD(&bofi_mutex
));
2422 * check through all errdefs associated with this shadow handle
2424 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
2426 if (ep
->errdef
.len
== 0)
2429 minlen
= min(hp
->len
, ep
->errdef
.len
);
2430 base
= addr
- hp
->addr
- ep
->errdef
.offset
+ hp
->offset
;
2431 if ((ep
->errdef
.access_type
& BOFI_PIO_R
) &&
2432 (ep
->state
& BOFI_DEV_ACTIVE
) &&
2433 base
>= 0 && base
< minlen
) {
2435 * condition match for pio read
2437 if (ep
->errdef
.access_count
> 1) {
2438 ep
->errdef
.access_count
--;
2439 if (done_get
== 0) {
2441 gv
= get_val
= func(hp
, addr
);
2443 if (ep
->errdef
.access_type
& BOFI_LOG
) {
2444 log_acc_event(ep
, BOFI_PIO_R
,
2446 accsize
, repcount
, &gv
);
2448 } else if (ep
->errdef
.fail_count
> 0) {
2449 ep
->errdef
.fail_count
--;
2450 ep
->errdef
.access_count
= 0;
2454 if (ep
->errstate
.fail_time
== 0)
2455 ep
->errstate
.fail_time
= bofi_gettime();
2456 operand
= ep
->errdef
.operand
;
2457 if (done_get
== 0) {
2458 if (ep
->errdef
.optype
==
2461 * no transfer - bomb out
2465 gv
= get_val
= func(hp
, addr
);
2468 if (ep
->errdef
.access_type
& BOFI_LOG
) {
2469 log_acc_event(ep
, BOFI_PIO_R
,
2471 accsize
, repcount
, &gv
);
2473 hdlp
= (ddi_acc_impl_t
*)(hp
->hdl
.acc_handle
);
2474 errp
= hdlp
->ahi_err
;
2475 if (ep
->errdef
.acc_chk
& 1) {
2477 char buf
[FM_MAX_CLASS
];
2479 errp
->err_status
= DDI_FM_NONFATAL
;
2480 (void) snprintf(buf
, FM_MAX_CLASS
,
2482 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
2483 ddi_fm_ereport_post(hp
->dip
, buf
, ena
,
2484 DDI_NOSLEEP
, FM_VERSION
,
2485 DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
2488 switch (ep
->errdef
.optype
) {
2509 return (func(hp
, addr
));
2516 * check all errdefs linked to this shadow handle. If we've got a condition
2517 * match check counts and corrupt data if necessary
2519 * bofi_mutex always held when this is called.
2521 * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2522 * is to be written out to io-space, 1 otherwise
2525 do_piow_corrupt(struct bofi_shadow
*hp
, caddr_t addr
, uint64_t *valuep
,
2526 size_t size
, size_t repcount
)
2528 struct bofi_errent
*ep
;
2529 struct bofi_link
*lp
;
2532 uint64_t v
= *valuep
;
2533 ddi_acc_impl_t
*hdlp
;
2536 ASSERT(MUTEX_HELD(&bofi_mutex
));
2538 * check through all errdefs associated with this shadow handle
2540 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
2542 if (ep
->errdef
.len
== 0)
2545 minlen
= min(hp
->len
, ep
->errdef
.len
);
2546 base
= (caddr_t
)addr
- hp
->addr
- ep
->errdef
.offset
+hp
->offset
;
2547 if ((ep
->errdef
.access_type
& BOFI_PIO_W
) &&
2548 (ep
->state
& BOFI_DEV_ACTIVE
) &&
2549 base
>= 0 && base
< minlen
) {
2551 * condition match for pio write
2554 if (ep
->errdef
.access_count
> 1) {
2555 ep
->errdef
.access_count
--;
2556 if (ep
->errdef
.access_type
& BOFI_LOG
)
2557 log_acc_event(ep
, BOFI_PIO_W
,
2558 addr
- hp
->addr
, size
,
2560 } else if (ep
->errdef
.fail_count
> 0) {
2561 ep
->errdef
.fail_count
--;
2562 ep
->errdef
.access_count
= 0;
2563 if (ep
->errdef
.access_type
& BOFI_LOG
)
2564 log_acc_event(ep
, BOFI_PIO_W
,
2565 addr
- hp
->addr
, size
,
2570 if (ep
->errstate
.fail_time
== 0)
2571 ep
->errstate
.fail_time
= bofi_gettime();
2572 hdlp
= (ddi_acc_impl_t
*)(hp
->hdl
.acc_handle
);
2573 errp
= hdlp
->ahi_err
;
2574 if (ep
->errdef
.acc_chk
& 1) {
2576 char buf
[FM_MAX_CLASS
];
2578 errp
->err_status
= DDI_FM_NONFATAL
;
2579 (void) snprintf(buf
, FM_MAX_CLASS
,
2581 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
2582 ddi_fm_ereport_post(hp
->dip
, buf
, ena
,
2583 DDI_NOSLEEP
, FM_VERSION
,
2584 DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
2587 switch (ep
->errdef
.optype
) {
2589 *valuep
= ep
->errdef
.operand
;
2592 *valuep
&= ep
->errdef
.operand
;
2595 *valuep
|= ep
->errdef
.operand
;
2598 *valuep
^= ep
->errdef
.operand
;
2600 case BOFI_NO_TRANSFER
:
2602 * no transfer - bomb out
2617 do_bofi_rd8(struct bofi_shadow
*hp
, caddr_t addr
)
2619 return (hp
->save
.acc
.ahi_get8(&hp
->save
.acc
, (uint8_t *)addr
));
2622 #define BOFI_READ_CHECKS(type) \
2623 if (bofi_ddi_check) \
2624 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2625 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2626 (caddr_t)addr - hp->addr >= hp->len)) { \
2627 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2628 "ddi_get() out of range addr %p not in %p/%llx", \
2629 (void *)addr, (void *)hp->addr, hp->len); \
2634 * our getb() routine - use tryenter
2637 bofi_rd8(ddi_acc_impl_t
*handle
, uint8_t *addr
)
2639 struct bofi_shadow
*hp
;
2642 hp
= handle
->ahi_common
.ah_bus_private
;
2643 BOFI_READ_CHECKS(uint8_t)
2644 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2645 return (hp
->save
.acc
.ahi_get8(&hp
->save
.acc
, addr
));
2646 retval
= (uint8_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd8
, 1,
2648 mutex_exit(&bofi_mutex
);
2654 do_bofi_rd16(struct bofi_shadow
*hp
, caddr_t addr
)
2656 return (hp
->save
.acc
.ahi_get16(&hp
->save
.acc
, (uint16_t *)addr
));
2661 * our getw() routine - use tryenter
2664 bofi_rd16(ddi_acc_impl_t
*handle
, uint16_t *addr
)
2666 struct bofi_shadow
*hp
;
2669 hp
= handle
->ahi_common
.ah_bus_private
;
2670 BOFI_READ_CHECKS(uint16_t)
2671 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2672 return (hp
->save
.acc
.ahi_get16(&hp
->save
.acc
, addr
));
2673 retval
= (uint16_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd16
, 1,
2675 mutex_exit(&bofi_mutex
);
2681 do_bofi_rd32(struct bofi_shadow
*hp
, caddr_t addr
)
2683 return (hp
->save
.acc
.ahi_get32(&hp
->save
.acc
, (uint32_t *)addr
));
2688 * our getl() routine - use tryenter
2691 bofi_rd32(ddi_acc_impl_t
*handle
, uint32_t *addr
)
2693 struct bofi_shadow
*hp
;
2696 hp
= handle
->ahi_common
.ah_bus_private
;
2697 BOFI_READ_CHECKS(uint32_t)
2698 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2699 return (hp
->save
.acc
.ahi_get32(&hp
->save
.acc
, addr
));
2700 retval
= (uint32_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd32
, 1,
2702 mutex_exit(&bofi_mutex
);
2708 do_bofi_rd64(struct bofi_shadow
*hp
, caddr_t addr
)
2710 return (hp
->save
.acc
.ahi_get64(&hp
->save
.acc
, (uint64_t *)addr
));
2715 * our getll() routine - use tryenter
2718 bofi_rd64(ddi_acc_impl_t
*handle
, uint64_t *addr
)
2720 struct bofi_shadow
*hp
;
2723 hp
= handle
->ahi_common
.ah_bus_private
;
2724 BOFI_READ_CHECKS(uint64_t)
2725 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
))
2726 return (hp
->save
.acc
.ahi_get64(&hp
->save
.acc
, addr
));
2727 retval
= (uint64_t)do_pior_corrupt(hp
, (caddr_t
)addr
, do_bofi_rd64
, 1,
2729 mutex_exit(&bofi_mutex
);
2733 #define BOFI_WRITE_TESTS(type) \
2734 if (bofi_ddi_check) \
2735 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2736 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2737 (caddr_t)addr - hp->addr >= hp->len)) { \
2738 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2739 "ddi_put() out of range addr %p not in %p/%llx\n", \
2740 (void *)addr, (void *)hp->addr, hp->len); \
2745 * our putb() routine - use tryenter
2748 bofi_wr8(ddi_acc_impl_t
*handle
, uint8_t *addr
, uint8_t value
)
2750 struct bofi_shadow
*hp
;
2751 uint64_t llvalue
= value
;
2753 hp
= handle
->ahi_common
.ah_bus_private
;
2754 BOFI_WRITE_TESTS(uint8_t)
2755 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2756 hp
->save
.acc
.ahi_put8(&hp
->save
.acc
, addr
, (uint8_t)llvalue
);
2759 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 1, 1))
2760 hp
->save
.acc
.ahi_put8(&hp
->save
.acc
, addr
, (uint8_t)llvalue
);
2761 mutex_exit(&bofi_mutex
);
2766 * our putw() routine - use tryenter
2769 bofi_wr16(ddi_acc_impl_t
*handle
, uint16_t *addr
, uint16_t value
)
2771 struct bofi_shadow
*hp
;
2772 uint64_t llvalue
= value
;
2774 hp
= handle
->ahi_common
.ah_bus_private
;
2775 BOFI_WRITE_TESTS(uint16_t)
2776 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2777 hp
->save
.acc
.ahi_put16(&hp
->save
.acc
, addr
, (uint16_t)llvalue
);
2780 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 2, 1))
2781 hp
->save
.acc
.ahi_put16(&hp
->save
.acc
, addr
, (uint16_t)llvalue
);
2782 mutex_exit(&bofi_mutex
);
2787 * our putl() routine - use tryenter
2790 bofi_wr32(ddi_acc_impl_t
*handle
, uint32_t *addr
, uint32_t value
)
2792 struct bofi_shadow
*hp
;
2793 uint64_t llvalue
= value
;
2795 hp
= handle
->ahi_common
.ah_bus_private
;
2796 BOFI_WRITE_TESTS(uint32_t)
2797 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2798 hp
->save
.acc
.ahi_put32(&hp
->save
.acc
, addr
, (uint32_t)llvalue
);
2801 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 4, 1))
2802 hp
->save
.acc
.ahi_put32(&hp
->save
.acc
, addr
, (uint32_t)llvalue
);
2803 mutex_exit(&bofi_mutex
);
2808 * our putll() routine - use tryenter
2811 bofi_wr64(ddi_acc_impl_t
*handle
, uint64_t *addr
, uint64_t value
)
2813 struct bofi_shadow
*hp
;
2814 uint64_t llvalue
= value
;
2816 hp
= handle
->ahi_common
.ah_bus_private
;
2817 BOFI_WRITE_TESTS(uint64_t)
2818 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2819 hp
->save
.acc
.ahi_put64(&hp
->save
.acc
, addr
, (uint64_t)llvalue
);
2822 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 8, 1))
2823 hp
->save
.acc
.ahi_put64(&hp
->save
.acc
, addr
, (uint64_t)llvalue
);
2824 mutex_exit(&bofi_mutex
);
2827 #define BOFI_REP_READ_TESTS(type) \
2828 if (bofi_ddi_check) \
2829 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2830 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2831 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2832 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2833 "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2834 (void *)dev_addr, (void *)hp->addr, hp->len); \
2835 if ((caddr_t)dev_addr < hp->addr || \
2836 (caddr_t)dev_addr - hp->addr >= hp->len) \
2838 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2842 * our rep_getb() routine - use tryenter
2845 bofi_rep_rd8(ddi_acc_impl_t
*handle
, uint8_t *host_addr
, uint8_t *dev_addr
,
2846 size_t repcount
, uint_t flags
)
2848 struct bofi_shadow
*hp
;
2852 hp
= handle
->ahi_common
.ah_bus_private
;
2853 BOFI_REP_READ_TESTS(uint8_t)
2854 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2855 hp
->save
.acc
.ahi_rep_get8(&hp
->save
.acc
, host_addr
, dev_addr
,
2859 for (i
= 0; i
< repcount
; i
++) {
2860 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2861 *(host_addr
+ i
) = (uint8_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2862 do_bofi_rd8
, i
? 0 : repcount
, 1);
2864 mutex_exit(&bofi_mutex
);
2869 * our rep_getw() routine - use tryenter
2872 bofi_rep_rd16(ddi_acc_impl_t
*handle
, uint16_t *host_addr
,
2873 uint16_t *dev_addr
, size_t repcount
, uint_t flags
)
2875 struct bofi_shadow
*hp
;
2879 hp
= handle
->ahi_common
.ah_bus_private
;
2880 BOFI_REP_READ_TESTS(uint16_t)
2881 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2882 hp
->save
.acc
.ahi_rep_get16(&hp
->save
.acc
, host_addr
, dev_addr
,
2886 for (i
= 0; i
< repcount
; i
++) {
2887 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2888 *(host_addr
+ i
) = (uint16_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2889 do_bofi_rd16
, i
? 0 : repcount
, 2);
2891 mutex_exit(&bofi_mutex
);
2896 * our rep_getl() routine - use tryenter
2899 bofi_rep_rd32(ddi_acc_impl_t
*handle
, uint32_t *host_addr
,
2900 uint32_t *dev_addr
, size_t repcount
, uint_t flags
)
2902 struct bofi_shadow
*hp
;
2906 hp
= handle
->ahi_common
.ah_bus_private
;
2907 BOFI_REP_READ_TESTS(uint32_t)
2908 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2909 hp
->save
.acc
.ahi_rep_get32(&hp
->save
.acc
, host_addr
, dev_addr
,
2913 for (i
= 0; i
< repcount
; i
++) {
2914 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2915 *(host_addr
+ i
) = (uint32_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2916 do_bofi_rd32
, i
? 0 : repcount
, 4);
2918 mutex_exit(&bofi_mutex
);
2923 * our rep_getll() routine - use tryenter
2926 bofi_rep_rd64(ddi_acc_impl_t
*handle
, uint64_t *host_addr
,
2927 uint64_t *dev_addr
, size_t repcount
, uint_t flags
)
2929 struct bofi_shadow
*hp
;
2933 hp
= handle
->ahi_common
.ah_bus_private
;
2934 BOFI_REP_READ_TESTS(uint64_t)
2935 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2936 hp
->save
.acc
.ahi_rep_get64(&hp
->save
.acc
, host_addr
, dev_addr
,
2940 for (i
= 0; i
< repcount
; i
++) {
2941 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2942 *(host_addr
+ i
) = (uint64_t)do_pior_corrupt(hp
, (caddr_t
)addr
,
2943 do_bofi_rd64
, i
? 0 : repcount
, 8);
2945 mutex_exit(&bofi_mutex
);
2948 #define BOFI_REP_WRITE_TESTS(type) \
2949 if (bofi_ddi_check) \
2950 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2951 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2952 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2953 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2954 "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2955 (void *)dev_addr, (void *)hp->addr, hp->len); \
2956 if ((caddr_t)dev_addr < hp->addr || \
2957 (caddr_t)dev_addr - hp->addr >= hp->len) \
2959 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2963 * our rep_putb() routine - use tryenter
2966 bofi_rep_wr8(ddi_acc_impl_t
*handle
, uint8_t *host_addr
, uint8_t *dev_addr
,
2967 size_t repcount
, uint_t flags
)
2969 struct bofi_shadow
*hp
;
2974 hp
= handle
->ahi_common
.ah_bus_private
;
2975 BOFI_REP_WRITE_TESTS(uint8_t)
2976 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
2977 hp
->save
.acc
.ahi_rep_put8(&hp
->save
.acc
, host_addr
, dev_addr
,
2981 for (i
= 0; i
< repcount
; i
++) {
2982 llvalue
= *(host_addr
+ i
);
2983 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
2984 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 1, i
? 0 :
2986 hp
->save
.acc
.ahi_put8(&hp
->save
.acc
, addr
,
2989 mutex_exit(&bofi_mutex
);
2994 * our rep_putw() routine - use tryenter
2997 bofi_rep_wr16(ddi_acc_impl_t
*handle
, uint16_t *host_addr
,
2998 uint16_t *dev_addr
, size_t repcount
, uint_t flags
)
3000 struct bofi_shadow
*hp
;
3005 hp
= handle
->ahi_common
.ah_bus_private
;
3006 BOFI_REP_WRITE_TESTS(uint16_t)
3007 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
3008 hp
->save
.acc
.ahi_rep_put16(&hp
->save
.acc
, host_addr
, dev_addr
,
3012 for (i
= 0; i
< repcount
; i
++) {
3013 llvalue
= *(host_addr
+ i
);
3014 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
3015 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 2, i
? 0 :
3017 hp
->save
.acc
.ahi_put16(&hp
->save
.acc
, addr
,
3020 mutex_exit(&bofi_mutex
);
3025 * our rep_putl() routine - use tryenter
3028 bofi_rep_wr32(ddi_acc_impl_t
*handle
, uint32_t *host_addr
,
3029 uint32_t *dev_addr
, size_t repcount
, uint_t flags
)
3031 struct bofi_shadow
*hp
;
3036 hp
= handle
->ahi_common
.ah_bus_private
;
3037 BOFI_REP_WRITE_TESTS(uint32_t)
3038 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
3039 hp
->save
.acc
.ahi_rep_put32(&hp
->save
.acc
, host_addr
, dev_addr
,
3043 for (i
= 0; i
< repcount
; i
++) {
3044 llvalue
= *(host_addr
+ i
);
3045 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
3046 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 4, i
? 0 :
3048 hp
->save
.acc
.ahi_put32(&hp
->save
.acc
, addr
,
3051 mutex_exit(&bofi_mutex
);
3056 * our rep_putll() routine - use tryenter
3059 bofi_rep_wr64(ddi_acc_impl_t
*handle
, uint64_t *host_addr
,
3060 uint64_t *dev_addr
, size_t repcount
, uint_t flags
)
3062 struct bofi_shadow
*hp
;
3067 hp
= handle
->ahi_common
.ah_bus_private
;
3068 BOFI_REP_WRITE_TESTS(uint64_t)
3069 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
3070 hp
->save
.acc
.ahi_rep_put64(&hp
->save
.acc
, host_addr
, dev_addr
,
3074 for (i
= 0; i
< repcount
; i
++) {
3075 llvalue
= *(host_addr
+ i
);
3076 addr
= dev_addr
+ ((flags
== DDI_DEV_AUTOINCR
) ? i
: 0);
3077 if (do_piow_corrupt(hp
, (caddr_t
)addr
, &llvalue
, 8, i
? 0 :
3079 hp
->save
.acc
.ahi_put64(&hp
->save
.acc
, addr
,
3082 mutex_exit(&bofi_mutex
);
3087 * our ddi_map routine
3090 bofi_map(dev_info_t
*dip
, dev_info_t
*rdip
,
3091 ddi_map_req_t
*reqp
, off_t offset
, off_t len
, caddr_t
*vaddrp
)
3094 struct bofi_shadow
*hp
;
3095 struct bofi_errent
*ep
;
3096 struct bofi_link
*lp
, *next_lp
;
3098 struct bofi_shadow
*dhashp
;
3099 struct bofi_shadow
*hhashp
;
3101 switch (reqp
->map_op
) {
3102 case DDI_MO_MAP_LOCKED
:
3104 * for this case get nexus to do real work first
3106 retval
= save_bus_ops
.bus_map(dip
, rdip
, reqp
, offset
, len
,
3108 if (retval
!= DDI_SUCCESS
)
3111 ap
= (ddi_acc_impl_t
*)reqp
->map_handlep
;
3113 return (DDI_SUCCESS
);
3115 * if driver_list is set, only intercept those drivers
3117 if (!driver_under_test(ap
->ahi_common
.ah_dip
))
3118 return (DDI_SUCCESS
);
3121 * support for ddi_regs_map_setup()
3122 * - allocate shadow handle structure and fill it in
3124 hp
= kmem_zalloc(sizeof (struct bofi_shadow
), KM_SLEEP
);
3125 (void) strncpy(hp
->name
, ddi_get_name(ap
->ahi_common
.ah_dip
),
3127 hp
->instance
= ddi_get_instance(ap
->ahi_common
.ah_dip
);
3128 hp
->dip
= ap
->ahi_common
.ah_dip
;
3131 * return spurious value to catch direct access to registers
3134 *vaddrp
= (caddr_t
)64;
3135 hp
->rnumber
= ((ddi_acc_hdl_t
*)ap
)->ah_rnumber
;
3136 hp
->offset
= offset
;
3138 hp
->len
= INT_MAX
- offset
;
3140 hp
->len
= min(len
, INT_MAX
- offset
);
3141 hp
->hdl
.acc_handle
= (ddi_acc_handle_t
)ap
;
3143 hp
->type
= BOFI_ACC_HDL
;
3145 * save existing function pointers and plug in our own
3148 ap
->ahi_get8
= bofi_rd8
;
3149 ap
->ahi_get16
= bofi_rd16
;
3150 ap
->ahi_get32
= bofi_rd32
;
3151 ap
->ahi_get64
= bofi_rd64
;
3152 ap
->ahi_put8
= bofi_wr8
;
3153 ap
->ahi_put16
= bofi_wr16
;
3154 ap
->ahi_put32
= bofi_wr32
;
3155 ap
->ahi_put64
= bofi_wr64
;
3156 ap
->ahi_rep_get8
= bofi_rep_rd8
;
3157 ap
->ahi_rep_get16
= bofi_rep_rd16
;
3158 ap
->ahi_rep_get32
= bofi_rep_rd32
;
3159 ap
->ahi_rep_get64
= bofi_rep_rd64
;
3160 ap
->ahi_rep_put8
= bofi_rep_wr8
;
3161 ap
->ahi_rep_put16
= bofi_rep_wr16
;
3162 ap
->ahi_rep_put32
= bofi_rep_wr32
;
3163 ap
->ahi_rep_put64
= bofi_rep_wr64
;
3164 ap
->ahi_fault_check
= bofi_check_acc_hdl
;
3165 #if defined(__sparc)
3167 ap
->ahi_acc_attr
&= ~DDI_ACCATTR_DIRECT
;
3170 * stick in a pointer to our shadow handle
3172 ap
->ahi_common
.ah_bus_private
= hp
;
3174 * add to dhash, hhash and inuse lists
3176 mutex_enter(&bofi_low_mutex
);
3177 mutex_enter(&bofi_mutex
);
3178 hp
->next
= shadow_list
.next
;
3179 shadow_list
.next
->prev
= hp
;
3180 hp
->prev
= &shadow_list
;
3181 shadow_list
.next
= hp
;
3182 hhashp
= HDL_HHASH(ap
);
3183 hp
->hnext
= hhashp
->hnext
;
3184 hhashp
->hnext
->hprev
= hp
;
3187 dhashp
= HDL_DHASH(hp
->dip
);
3188 hp
->dnext
= dhashp
->dnext
;
3189 dhashp
->dnext
->dprev
= hp
;
3193 * chain on any pre-existing errdefs that apply to this
3196 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
) {
3197 if (ddi_name_to_major(hp
->name
) ==
3198 ddi_name_to_major(ep
->name
) &&
3199 hp
->instance
== ep
->errdef
.instance
&&
3200 (ep
->errdef
.access_type
& BOFI_PIO_RW
) &&
3201 (ep
->errdef
.rnumber
== -1 ||
3202 hp
->rnumber
== ep
->errdef
.rnumber
) &&
3203 (ep
->errdef
.len
== 0 ||
3204 offset
< ep
->errdef
.offset
+ ep
->errdef
.len
) &&
3205 offset
+ hp
->len
> ep
->errdef
.offset
) {
3206 lp
= bofi_link_freelist
;
3208 bofi_link_freelist
= lp
->link
;
3210 lp
->link
= hp
->link
;
3215 mutex_exit(&bofi_mutex
);
3216 mutex_exit(&bofi_low_mutex
);
3217 return (DDI_SUCCESS
);
3220 ap
= (ddi_acc_impl_t
*)reqp
->map_handlep
;
3224 * support for ddi_regs_map_free()
3225 * - check we really have a shadow handle for this one
3227 mutex_enter(&bofi_low_mutex
);
3228 mutex_enter(&bofi_mutex
);
3229 hhashp
= HDL_HHASH(ap
);
3230 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3231 if (hp
->hdl
.acc_handle
== (ddi_acc_handle_t
)ap
)
3234 mutex_exit(&bofi_mutex
);
3235 mutex_exit(&bofi_low_mutex
);
3239 * got a shadow handle - restore original pointers
3244 * remove from dhash, hhash and inuse lists
3246 hp
->hnext
->hprev
= hp
->hprev
;
3247 hp
->hprev
->hnext
= hp
->hnext
;
3248 hp
->dnext
->dprev
= hp
->dprev
;
3249 hp
->dprev
->dnext
= hp
->dnext
;
3250 hp
->next
->prev
= hp
->prev
;
3251 hp
->prev
->next
= hp
->next
;
3253 * free any errdef link structures tagged onto the shadow handle
3255 for (lp
= hp
->link
; lp
!= NULL
; ) {
3257 lp
->link
= bofi_link_freelist
;
3258 bofi_link_freelist
= lp
;
3262 mutex_exit(&bofi_mutex
);
3263 mutex_exit(&bofi_low_mutex
);
3265 * finally delete shadow handle
3267 kmem_free(hp
, sizeof (struct bofi_shadow
));
3272 return (save_bus_ops
.bus_map(dip
, rdip
, reqp
, offset
, len
, vaddrp
));
3277 * chain any pre-existing errdefs on to newly created dma handle
3278 * if required call do_dma_corrupt() to corrupt data
3281 chain_on_errdefs(struct bofi_shadow
*hp
)
3283 struct bofi_errent
*ep
;
3284 struct bofi_link
*lp
;
3286 ASSERT(MUTEX_HELD(&bofi_mutex
));
3288 * chain on any pre-existing errdefs that apply to this dma_handle
3290 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
) {
3291 if (ddi_name_to_major(hp
->name
) ==
3292 ddi_name_to_major(ep
->name
) &&
3293 hp
->instance
== ep
->errdef
.instance
&&
3294 (ep
->errdef
.rnumber
== -1 ||
3295 hp
->rnumber
== ep
->errdef
.rnumber
) &&
3296 ((ep
->errdef
.access_type
& BOFI_DMA_RW
) &&
3297 (((uintptr_t)(hp
->addr
+ ep
->errdef
.offset
+
3298 ep
->errdef
.len
) & ~LLSZMASK
) >
3299 ((uintptr_t)((hp
->addr
+ ep
->errdef
.offset
) +
3300 LLSZMASK
) & ~LLSZMASK
)))) {
3302 * got a match - link it on
3304 lp
= bofi_link_freelist
;
3306 bofi_link_freelist
= lp
->link
;
3308 lp
->link
= hp
->link
;
3310 if ((ep
->errdef
.access_type
& BOFI_DMA_W
) &&
3311 (hp
->flags
& DDI_DMA_WRITE
) &&
3312 (ep
->state
& BOFI_DEV_ACTIVE
)) {
3313 do_dma_corrupt(hp
, ep
,
3314 DDI_DMA_SYNC_FORDEV
,
3324 * need to do copy byte-by-byte in case one of pages is little-endian
3327 xbcopy(void *from
, void *to
, u_longlong_t len
)
3338 * our ddi_dma_allochdl routine
3341 bofi_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attrp
,
3342 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
3344 int retval
= DDI_DMA_NORESOURCES
;
3345 struct bofi_shadow
*hp
, *xhp
;
3347 struct bofi_shadow
*dhashp
;
3348 struct bofi_shadow
*hhashp
;
3352 * if driver_list is set, only intercept those drivers
3354 if (!driver_under_test(rdip
))
3355 return (save_bus_ops
.bus_dma_allochdl(dip
, rdip
, attrp
,
3356 waitfp
, arg
, handlep
));
3359 * allocate shadow handle structure and fill it in
3361 hp
= kmem_zalloc(sizeof (struct bofi_shadow
),
3362 ((waitfp
== DDI_DMA_SLEEP
) ? KM_SLEEP
: KM_NOSLEEP
));
3365 * what to do here? Wait a bit and try again
3367 if (waitfp
!= DDI_DMA_DONTWAIT
)
3368 (void) timeout((void (*)())waitfp
, arg
, 10);
3371 (void) strncpy(hp
->name
, ddi_get_name(rdip
), NAMESIZE
);
3372 hp
->instance
= ddi_get_instance(rdip
);
3375 hp
->type
= BOFI_NULL
;
3377 * call nexus to do the real work
3379 retval
= save_bus_ops
.bus_dma_allochdl(dip
, rdip
, attrp
, waitfp
, arg
,
3381 if (retval
!= DDI_SUCCESS
) {
3382 kmem_free(hp
, sizeof (struct bofi_shadow
));
3386 * now point set dma_handle to point to real handle
3388 hp
->hdl
.dma_handle
= *handlep
;
3389 mp
= (ddi_dma_impl_t
*)*handlep
;
3390 mp
->dmai_fault_check
= bofi_check_dma_hdl
;
3392 * bind and unbind are cached in devinfo - must overwrite them
3393 * - note that our bind and unbind are quite happy dealing with
3394 * any handles for this devinfo that were previously allocated
3396 if (save_bus_ops
.bus_dma_bindhdl
== DEVI(rdip
)->devi_bus_dma_bindfunc
)
3397 DEVI(rdip
)->devi_bus_dma_bindfunc
= bofi_dma_bindhdl
;
3398 if (save_bus_ops
.bus_dma_unbindhdl
==
3399 DEVI(rdip
)->devi_bus_dma_unbindfunc
)
3400 DEVI(rdip
)->devi_bus_dma_unbindfunc
= bofi_dma_unbindhdl
;
3401 mutex_enter(&bofi_low_mutex
);
3402 mutex_enter(&bofi_mutex
);
3404 * get an "rnumber" for this handle - really just seeking to
3405 * get a unique number - generally only care for early allocated
3406 * handles - so we get as far as INT_MAX, just stay there
3408 dhashp
= HDL_DHASH(hp
->dip
);
3409 for (xhp
= dhashp
->dnext
; xhp
!= dhashp
; xhp
= xhp
->dnext
)
3410 if (ddi_name_to_major(xhp
->name
) ==
3411 ddi_name_to_major(hp
->name
) &&
3412 xhp
->instance
== hp
->instance
&&
3413 (xhp
->type
== BOFI_DMA_HDL
||
3414 xhp
->type
== BOFI_NULL
))
3415 if (xhp
->rnumber
>= maxrnumber
) {
3416 if (xhp
->rnumber
== INT_MAX
)
3417 maxrnumber
= INT_MAX
;
3419 maxrnumber
= xhp
->rnumber
+ 1;
3421 hp
->rnumber
= maxrnumber
;
3423 * add to dhash, hhash and inuse lists
3425 hp
->next
= shadow_list
.next
;
3426 shadow_list
.next
->prev
= hp
;
3427 hp
->prev
= &shadow_list
;
3428 shadow_list
.next
= hp
;
3429 hhashp
= HDL_HHASH(*handlep
);
3430 hp
->hnext
= hhashp
->hnext
;
3431 hhashp
->hnext
->hprev
= hp
;
3434 dhashp
= HDL_DHASH(hp
->dip
);
3435 hp
->dnext
= dhashp
->dnext
;
3436 dhashp
->dnext
->dprev
= hp
;
3439 mutex_exit(&bofi_mutex
);
3440 mutex_exit(&bofi_low_mutex
);
3446 * our ddi_dma_freehdl routine
3449 bofi_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
3452 struct bofi_shadow
*hp
;
3453 struct bofi_shadow
*hhashp
;
3456 * find shadow for this handle
3458 mutex_enter(&bofi_low_mutex
);
3459 mutex_enter(&bofi_mutex
);
3460 hhashp
= HDL_HHASH(handle
);
3461 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3462 if (hp
->hdl
.dma_handle
== handle
)
3464 mutex_exit(&bofi_mutex
);
3465 mutex_exit(&bofi_low_mutex
);
3467 * call nexus to do the real work
3469 retval
= save_bus_ops
.bus_dma_freehdl(dip
, rdip
, handle
);
3470 if (retval
!= DDI_SUCCESS
) {
3474 * did we really have a shadow for this handle
3479 * yes we have - see if it's still bound
3481 mutex_enter(&bofi_low_mutex
);
3482 mutex_enter(&bofi_mutex
);
3483 if (hp
->type
!= BOFI_NULL
)
3484 panic("driver freeing bound dma_handle");
3486 * remove from dhash, hhash and inuse lists
3488 hp
->hnext
->hprev
= hp
->hprev
;
3489 hp
->hprev
->hnext
= hp
->hnext
;
3490 hp
->dnext
->dprev
= hp
->dprev
;
3491 hp
->dprev
->dnext
= hp
->dnext
;
3492 hp
->next
->prev
= hp
->prev
;
3493 hp
->prev
->next
= hp
->next
;
3494 mutex_exit(&bofi_mutex
);
3495 mutex_exit(&bofi_low_mutex
);
3497 kmem_free(hp
, sizeof (struct bofi_shadow
));
3503 * our ddi_dma_bindhdl routine
3506 bofi_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
3507 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareqp
,
3508 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
3510 int retval
= DDI_DMA_NORESOURCES
;
3511 auto struct ddi_dma_req dmareq
;
3512 struct bofi_shadow
*hp
;
3513 struct bofi_shadow
*hhashp
;
3515 unsigned long pagemask
= ddi_ptob(rdip
, 1) - 1;
3518 * check we really have a shadow for this handle
3520 mutex_enter(&bofi_low_mutex
);
3521 mutex_enter(&bofi_mutex
);
3522 hhashp
= HDL_HHASH(handle
);
3523 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3524 if (hp
->hdl
.dma_handle
== handle
)
3526 mutex_exit(&bofi_mutex
);
3527 mutex_exit(&bofi_low_mutex
);
3530 * no we don't - just call nexus to do the real work
3532 return save_bus_ops
.bus_dma_bindhdl(dip
, rdip
, handle
, dmareqp
,
3536 * yes we have - see if it's already bound
3538 if (hp
->type
!= BOFI_NULL
)
3539 return (DDI_DMA_INUSE
);
3541 hp
->flags
= dmareqp
->dmar_flags
;
3542 if (dmareqp
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) {
3543 hp
->map_flags
= B_PAGEIO
;
3544 hp
->map_pp
= dmareqp
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
;
3545 } else if (dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
!= NULL
) {
3546 hp
->map_flags
= B_SHADOW
;
3547 hp
->map_pplist
= dmareqp
->dmar_object
.dmao_obj
.virt_obj
.v_priv
;
3552 * get a kernel virtual mapping
3554 hp
->addr
= ddi_dmareq_mapin(dmareqp
, &hp
->mapaddr
, &hp
->len
);
3555 if (hp
->addr
== NULL
)
3557 if (bofi_sync_check
) {
3559 * Take a copy and pass pointers to this up to nexus instead.
3560 * Data will be copied from the original on explicit
3561 * and implicit ddi_dma_sync()
3563 * - maintain page alignment because some devices assume it.
3565 hp
->origaddr
= hp
->addr
;
3566 hp
->allocaddr
= ddi_umem_alloc(
3567 ((uintptr_t)hp
->addr
& pagemask
) + hp
->len
,
3568 (dmareqp
->dmar_fp
== DDI_DMA_SLEEP
) ? KM_SLEEP
: KM_NOSLEEP
,
3570 if (hp
->allocaddr
== NULL
)
3572 hp
->addr
= hp
->allocaddr
+ ((uintptr_t)hp
->addr
& pagemask
);
3573 if (dmareqp
->dmar_flags
& DDI_DMA_WRITE
)
3574 xbcopy(hp
->origaddr
, hp
->addr
, hp
->len
);
3576 dmareq
.dmar_object
.dmao_size
= hp
->len
;
3577 dmareq
.dmar_object
.dmao_type
= DMA_OTYP_VADDR
;
3578 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_as
= &kas
;
3579 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_addr
= hp
->addr
;
3580 dmareq
.dmar_object
.dmao_obj
.virt_obj
.v_priv
= NULL
;
3584 * call nexus to do the real work
3586 retval
= save_bus_ops
.bus_dma_bindhdl(dip
, rdip
, handle
, dmareqp
,
3588 if (retval
!= DDI_SUCCESS
)
3593 mp
= (ddi_dma_impl_t
*)handle
;
3594 mp
->dmai_rflags
&= ~DMP_NOSYNC
;
3596 * chain on any pre-existing errdefs that apply to this
3597 * acc_handle and corrupt if required (as there is an implicit
3598 * ddi_dma_sync() in this call)
3600 mutex_enter(&bofi_low_mutex
);
3601 mutex_enter(&bofi_mutex
);
3602 hp
->type
= BOFI_DMA_HDL
;
3603 chain_on_errdefs(hp
);
3604 mutex_exit(&bofi_mutex
);
3605 mutex_exit(&bofi_low_mutex
);
3609 if (dmareqp
->dmar_fp
!= DDI_DMA_DONTWAIT
) {
3611 * what to do here? Wait a bit and try again
3613 (void) timeout((void (*)())dmareqp
->dmar_fp
,
3614 dmareqp
->dmar_arg
, 10);
3618 ddi_dmareq_mapout(hp
->mapaddr
, hp
->len
, hp
->map_flags
,
3619 hp
->map_pp
, hp
->map_pplist
);
3620 if (bofi_sync_check
&& hp
->allocaddr
)
3621 ddi_umem_free(hp
->umem_cookie
);
3623 hp
->allocaddr
= NULL
;
3624 hp
->origaddr
= NULL
;
3631 * our ddi_dma_unbindhdl routine
3634 bofi_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
3636 struct bofi_link
*lp
, *next_lp
;
3637 struct bofi_errent
*ep
;
3639 struct bofi_shadow
*hp
;
3640 struct bofi_shadow
*hhashp
;
3643 * call nexus to do the real work
3645 retval
= save_bus_ops
.bus_dma_unbindhdl(dip
, rdip
, handle
);
3646 if (retval
!= DDI_SUCCESS
)
3649 * check we really have a shadow for this handle
3651 mutex_enter(&bofi_low_mutex
);
3652 mutex_enter(&bofi_mutex
);
3653 hhashp
= HDL_HHASH(handle
);
3654 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3655 if (hp
->hdl
.dma_handle
== handle
)
3658 mutex_exit(&bofi_mutex
);
3659 mutex_exit(&bofi_low_mutex
);
3663 * yes we have - see if it's already unbound
3665 if (hp
->type
== BOFI_NULL
)
3666 panic("driver unbinding unbound dma_handle");
3668 * free any errdef link structures tagged on to this
3671 for (lp
= hp
->link
; lp
!= NULL
; ) {
3674 * there is an implicit sync_for_cpu on free -
3675 * may need to corrupt
3678 if ((ep
->errdef
.access_type
& BOFI_DMA_R
) &&
3679 (hp
->flags
& DDI_DMA_READ
) &&
3680 (ep
->state
& BOFI_DEV_ACTIVE
)) {
3681 do_dma_corrupt(hp
, ep
, DDI_DMA_SYNC_FORCPU
, 0, hp
->len
);
3683 lp
->link
= bofi_link_freelist
;
3684 bofi_link_freelist
= lp
;
3688 hp
->type
= BOFI_NULL
;
3689 mutex_exit(&bofi_mutex
);
3690 mutex_exit(&bofi_low_mutex
);
3692 if (bofi_sync_check
&& (hp
->flags
& DDI_DMA_READ
))
3694 * implicit sync_for_cpu - copy data back
3697 xbcopy(hp
->addr
, hp
->origaddr
, hp
->len
);
3698 ddi_dmareq_mapout(hp
->mapaddr
, hp
->len
, hp
->map_flags
,
3699 hp
->map_pp
, hp
->map_pplist
);
3700 if (bofi_sync_check
&& hp
->allocaddr
)
3701 ddi_umem_free(hp
->umem_cookie
);
3703 hp
->allocaddr
= NULL
;
3704 hp
->origaddr
= NULL
;
3710 * our ddi_dma_sync routine
3713 bofi_dma_flush(dev_info_t
*dip
, dev_info_t
*rdip
,
3714 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t flags
)
3716 struct bofi_link
*lp
;
3717 struct bofi_errent
*ep
;
3718 struct bofi_shadow
*hp
;
3719 struct bofi_shadow
*hhashp
;
3722 if (flags
== DDI_DMA_SYNC_FORCPU
|| flags
== DDI_DMA_SYNC_FORKERNEL
) {
3724 * in this case get nexus driver to do sync first
3726 retval
= save_bus_ops
.bus_dma_flush(dip
, rdip
, handle
, off
,
3728 if (retval
!= DDI_SUCCESS
)
3732 * check we really have a shadow for this handle
3734 mutex_enter(&bofi_low_mutex
);
3735 mutex_enter(&bofi_mutex
);
3736 hhashp
= HDL_HHASH(handle
);
3737 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3738 if (hp
->hdl
.dma_handle
== handle
&&
3739 hp
->type
== BOFI_DMA_HDL
)
3741 mutex_exit(&bofi_mutex
);
3742 mutex_exit(&bofi_low_mutex
);
3745 * yes - do we need to copy data from original
3747 if (bofi_sync_check
&& flags
== DDI_DMA_SYNC_FORDEV
)
3749 xbcopy(hp
->origaddr
+off
, hp
->addr
+off
,
3750 len
? len
: (hp
->len
- off
));
3752 * yes - check if we need to corrupt the data
3754 mutex_enter(&bofi_low_mutex
);
3755 mutex_enter(&bofi_mutex
);
3756 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
3758 if ((((ep
->errdef
.access_type
& BOFI_DMA_R
) &&
3759 (flags
== DDI_DMA_SYNC_FORCPU
||
3760 flags
== DDI_DMA_SYNC_FORKERNEL
)) ||
3761 ((ep
->errdef
.access_type
& BOFI_DMA_W
) &&
3762 (flags
== DDI_DMA_SYNC_FORDEV
))) &&
3763 (ep
->state
& BOFI_DEV_ACTIVE
)) {
3764 do_dma_corrupt(hp
, ep
, flags
, off
,
3765 len
? len
: (hp
->len
- off
));
3768 mutex_exit(&bofi_mutex
);
3769 mutex_exit(&bofi_low_mutex
);
3771 * do we need to copy data to original
3773 if (bofi_sync_check
&& (flags
== DDI_DMA_SYNC_FORCPU
||
3774 flags
== DDI_DMA_SYNC_FORKERNEL
))
3776 xbcopy(hp
->addr
+off
, hp
->origaddr
+off
,
3777 len
? len
: (hp
->len
- off
));
3779 if (flags
== DDI_DMA_SYNC_FORDEV
)
3781 * in this case get nexus driver to do sync last
3783 retval
= save_bus_ops
.bus_dma_flush(dip
, rdip
, handle
, off
,
3790 * our dma_win routine
3793 bofi_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
3794 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
,
3795 size_t *lenp
, ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
3797 struct bofi_shadow
*hp
;
3798 struct bofi_shadow
*hhashp
;
3803 * call nexus to do the real work
3805 retval
= save_bus_ops
.bus_dma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
3807 if (retval
!= DDI_SUCCESS
)
3810 * check we really have a shadow for this handle
3812 mutex_enter(&bofi_low_mutex
);
3813 mutex_enter(&bofi_mutex
);
3814 hhashp
= HDL_HHASH(handle
);
3815 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3816 if (hp
->hdl
.dma_handle
== handle
)
3820 * yes - make sure DMP_NOSYNC is unset
3822 mp
= (ddi_dma_impl_t
*)handle
;
3823 mp
->dmai_rflags
&= ~DMP_NOSYNC
;
3825 mutex_exit(&bofi_mutex
);
3826 mutex_exit(&bofi_low_mutex
);
3832 * our dma_ctl routine
3835 bofi_dma_ctl(dev_info_t
*dip
, dev_info_t
*rdip
,
3836 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
3837 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t flags
)
3839 struct bofi_shadow
*hp
;
3840 struct bofi_shadow
*hhashp
;
3843 struct bofi_shadow
*dummyhp
;
3846 * get nexus to do real work
3848 retval
= save_bus_ops
.bus_dma_ctl(dip
, rdip
, handle
, request
, offp
,
3850 if (retval
!= DDI_SUCCESS
)
3853 * if driver_list is set, only intercept those drivers
3855 if (!driver_under_test(rdip
))
3856 return (DDI_SUCCESS
);
3858 #if defined(__sparc)
3860 * check if this is a dvma_reserve - that one's like a
3861 * dma_allochdl and needs to be handled separately
3863 if (request
== DDI_DMA_RESERVE
) {
3864 bofi_dvma_reserve(rdip
, *(ddi_dma_handle_t
*)objp
);
3865 return (DDI_SUCCESS
);
3869 * check we really have a shadow for this handle
3871 mutex_enter(&bofi_low_mutex
);
3872 mutex_enter(&bofi_mutex
);
3873 hhashp
= HDL_HHASH(handle
);
3874 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
3875 if (hp
->hdl
.dma_handle
== handle
)
3878 mutex_exit(&bofi_mutex
);
3879 mutex_exit(&bofi_low_mutex
);
3883 * yes we have - see what kind of command this is
3886 case DDI_DMA_RELEASE
:
3888 * dvma release - release dummy handle and all the index handles
3891 dummyhp
->hnext
->hprev
= dummyhp
->hprev
;
3892 dummyhp
->hprev
->hnext
= dummyhp
->hnext
;
3893 mutex_exit(&bofi_mutex
);
3894 mutex_exit(&bofi_low_mutex
);
3895 for (i
= 0; i
< dummyhp
->len
; i
++) {
3896 hp
= dummyhp
->hparrayp
[i
];
3898 * chek none of the index handles were still loaded
3900 if (hp
->type
!= BOFI_NULL
)
3901 panic("driver releasing loaded dvma");
3903 * remove from dhash and inuse lists
3905 mutex_enter(&bofi_low_mutex
);
3906 mutex_enter(&bofi_mutex
);
3907 hp
->dnext
->dprev
= hp
->dprev
;
3908 hp
->dprev
->dnext
= hp
->dnext
;
3909 hp
->next
->prev
= hp
->prev
;
3910 hp
->prev
->next
= hp
->next
;
3911 mutex_exit(&bofi_mutex
);
3912 mutex_exit(&bofi_low_mutex
);
3914 if (bofi_sync_check
&& hp
->allocaddr
)
3915 ddi_umem_free(hp
->umem_cookie
);
3916 kmem_free(hp
, sizeof (struct bofi_shadow
));
3918 kmem_free(dummyhp
->hparrayp
, dummyhp
->len
*
3919 sizeof (struct bofi_shadow
*));
3920 kmem_free(dummyhp
, sizeof (struct bofi_shadow
));
3925 mutex_exit(&bofi_mutex
);
3926 mutex_exit(&bofi_low_mutex
);
3930 #if defined(__sparc)
3932 * dvma reserve case from bofi_dma_ctl()
3935 bofi_dvma_reserve(dev_info_t
*rdip
, ddi_dma_handle_t handle
)
3937 struct bofi_shadow
*hp
;
3938 struct bofi_shadow
*dummyhp
;
3939 struct bofi_shadow
*dhashp
;
3940 struct bofi_shadow
*hhashp
;
3942 struct fast_dvma
*nexus_private
;
3945 mp
= (ddi_dma_impl_t
*)handle
;
3946 count
= mp
->dmai_ndvmapages
;
3948 * allocate dummy shadow handle structure
3950 dummyhp
= kmem_zalloc(sizeof (*dummyhp
), KM_SLEEP
);
3951 if (mp
->dmai_rflags
& DMP_BYPASSNEXUS
) {
3953 * overlay our routines over the nexus's dvma routines
3955 nexus_private
= (struct fast_dvma
*)mp
->dmai_nexus_private
;
3956 dummyhp
->save
.dvma_ops
= *(nexus_private
->ops
);
3957 nexus_private
->ops
= &bofi_dvma_ops
;
3960 * now fill in the dummy handle. This just gets put on hhash queue
3961 * so our dvma routines can find and index off to the handle they
3964 (void) strncpy(dummyhp
->name
, ddi_get_name(rdip
), NAMESIZE
);
3965 dummyhp
->instance
= ddi_get_instance(rdip
);
3966 dummyhp
->rnumber
= -1;
3967 dummyhp
->dip
= rdip
;
3968 dummyhp
->len
= count
;
3969 dummyhp
->hdl
.dma_handle
= handle
;
3970 dummyhp
->link
= NULL
;
3971 dummyhp
->type
= BOFI_NULL
;
3973 * allocate space for real handles
3975 dummyhp
->hparrayp
= kmem_alloc(count
*
3976 sizeof (struct bofi_shadow
*), KM_SLEEP
);
3977 for (i
= 0; i
< count
; i
++) {
3979 * allocate shadow handle structures and fill them in
3981 hp
= kmem_zalloc(sizeof (*hp
), KM_SLEEP
);
3982 (void) strncpy(hp
->name
, ddi_get_name(rdip
), NAMESIZE
);
3983 hp
->instance
= ddi_get_instance(rdip
);
3986 hp
->hdl
.dma_handle
= 0;
3988 hp
->type
= BOFI_NULL
;
3989 if (bofi_sync_check
) {
3990 unsigned long pagemask
= ddi_ptob(rdip
, 1) - 1;
3992 * Take a copy and set this to be hp->addr
3993 * Data will be copied to and from the original on
3994 * explicit and implicit ddi_dma_sync()
3996 * - maintain page alignment because some devices
3999 hp
->allocaddr
= ddi_umem_alloc(
4000 ((int)(uintptr_t)hp
->addr
& pagemask
)
4002 KM_SLEEP
, &hp
->umem_cookie
);
4003 hp
->addr
= hp
->allocaddr
+
4004 ((int)(uintptr_t)hp
->addr
& pagemask
);
4007 * add to dhash and inuse lists.
4008 * these don't go on hhash queue.
4010 mutex_enter(&bofi_low_mutex
);
4011 mutex_enter(&bofi_mutex
);
4012 hp
->next
= shadow_list
.next
;
4013 shadow_list
.next
->prev
= hp
;
4014 hp
->prev
= &shadow_list
;
4015 shadow_list
.next
= hp
;
4016 dhashp
= HDL_DHASH(hp
->dip
);
4017 hp
->dnext
= dhashp
->dnext
;
4018 dhashp
->dnext
->dprev
= hp
;
4021 dummyhp
->hparrayp
[i
] = hp
;
4022 mutex_exit(&bofi_mutex
);
4023 mutex_exit(&bofi_low_mutex
);
4026 * add dummy handle to hhash list only
4028 mutex_enter(&bofi_low_mutex
);
4029 mutex_enter(&bofi_mutex
);
4030 hhashp
= HDL_HHASH(handle
);
4031 dummyhp
->hnext
= hhashp
->hnext
;
4032 hhashp
->hnext
->hprev
= dummyhp
;
4033 dummyhp
->hprev
= hhashp
;
4034 hhashp
->hnext
= dummyhp
;
4035 mutex_exit(&bofi_mutex
);
4036 mutex_exit(&bofi_low_mutex
);
4040 * our dvma_kaddr_load()
4043 bofi_dvma_kaddr_load(ddi_dma_handle_t h
, caddr_t a
, uint_t len
, uint_t index
,
4044 ddi_dma_cookie_t
*cp
)
4046 struct bofi_shadow
*dummyhp
;
4047 struct bofi_shadow
*hp
;
4048 struct bofi_shadow
*hhashp
;
4049 struct bofi_errent
*ep
;
4050 struct bofi_link
*lp
;
4053 * check we really have a dummy shadow for this handle
4055 mutex_enter(&bofi_low_mutex
);
4056 mutex_enter(&bofi_mutex
);
4057 hhashp
= HDL_HHASH(h
);
4058 for (dummyhp
= hhashp
->hnext
; dummyhp
!= hhashp
;
4059 dummyhp
= dummyhp
->hnext
)
4060 if (dummyhp
->hdl
.dma_handle
== h
)
4062 mutex_exit(&bofi_mutex
);
4063 mutex_exit(&bofi_low_mutex
);
4064 if (dummyhp
== hhashp
) {
4066 * no dummy shadow - panic
4068 panic("driver dvma_kaddr_load with no reserve");
4074 hp
= dummyhp
->hparrayp
[index
];
4076 * check its not already loaded
4078 if (hp
->type
!= BOFI_NULL
)
4079 panic("driver loading loaded dvma");
4081 * if were doing copying, just need to change origaddr and get
4082 * nexus to map hp->addr again
4083 * if not, set hp->addr to new address.
4084 * - note these are always kernel virtual addresses - no need to map
4086 if (bofi_sync_check
&& hp
->allocaddr
) {
4093 * get nexus to do the real work
4095 dummyhp
->save
.dvma_ops
.dvma_kaddr_load(h
, a
, len
, index
, cp
);
4097 * chain on any pre-existing errdefs that apply to this dma_handle
4098 * no need to corrupt - there's no implicit dma_sync on this one
4100 mutex_enter(&bofi_low_mutex
);
4101 mutex_enter(&bofi_mutex
);
4102 hp
->type
= BOFI_DMA_HDL
;
4103 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
) {
4104 if (ddi_name_to_major(hp
->name
) ==
4105 ddi_name_to_major(ep
->name
) &&
4106 hp
->instance
== ep
->errdef
.instance
&&
4107 (ep
->errdef
.rnumber
== -1 ||
4108 hp
->rnumber
== ep
->errdef
.rnumber
) &&
4109 ((ep
->errdef
.access_type
& BOFI_DMA_RW
) &&
4110 (((uintptr_t)(hp
->addr
+ ep
->errdef
.offset
+
4111 ep
->errdef
.len
) & ~LLSZMASK
) >
4112 ((uintptr_t)((hp
->addr
+ ep
->errdef
.offset
) +
4113 LLSZMASK
) & ~LLSZMASK
)))) {
4114 lp
= bofi_link_freelist
;
4116 bofi_link_freelist
= lp
->link
;
4118 lp
->link
= hp
->link
;
4123 mutex_exit(&bofi_mutex
);
4124 mutex_exit(&bofi_low_mutex
);
4131 bofi_dvma_unload(ddi_dma_handle_t h
, uint_t index
, uint_t view
)
4133 struct bofi_link
*lp
, *next_lp
;
4134 struct bofi_errent
*ep
;
4135 struct bofi_shadow
*dummyhp
;
4136 struct bofi_shadow
*hp
;
4137 struct bofi_shadow
*hhashp
;
4140 * check we really have a dummy shadow for this handle
4142 mutex_enter(&bofi_low_mutex
);
4143 mutex_enter(&bofi_mutex
);
4144 hhashp
= HDL_HHASH(h
);
4145 for (dummyhp
= hhashp
->hnext
; dummyhp
!= hhashp
;
4146 dummyhp
= dummyhp
->hnext
)
4147 if (dummyhp
->hdl
.dma_handle
== h
)
4149 mutex_exit(&bofi_mutex
);
4150 mutex_exit(&bofi_low_mutex
);
4151 if (dummyhp
== hhashp
) {
4153 * no dummy shadow - panic
4155 panic("driver dvma_unload with no reserve");
4157 dummyhp
->save
.dvma_ops
.dvma_unload(h
, index
, view
);
4161 hp
= dummyhp
->hparrayp
[index
];
4163 * check its not already unloaded
4165 if (hp
->type
== BOFI_NULL
)
4166 panic("driver unloading unloaded dvma");
4168 * free any errdef link structures tagged on to this
4169 * shadow handle - do corruption if necessary
4171 mutex_enter(&bofi_low_mutex
);
4172 mutex_enter(&bofi_mutex
);
4173 for (lp
= hp
->link
; lp
!= NULL
; ) {
4176 if ((ep
->errdef
.access_type
& BOFI_DMA_R
) &&
4177 (view
== DDI_DMA_SYNC_FORCPU
||
4178 view
== DDI_DMA_SYNC_FORKERNEL
) &&
4179 (ep
->state
& BOFI_DEV_ACTIVE
)) {
4180 do_dma_corrupt(hp
, ep
, view
, 0, hp
->len
);
4182 lp
->link
= bofi_link_freelist
;
4183 bofi_link_freelist
= lp
;
4187 hp
->type
= BOFI_NULL
;
4188 mutex_exit(&bofi_mutex
);
4189 mutex_exit(&bofi_low_mutex
);
4191 * if there is an explicit sync_for_cpu, then do copy to original
4193 if (bofi_sync_check
&&
4194 (view
== DDI_DMA_SYNC_FORCPU
|| view
== DDI_DMA_SYNC_FORKERNEL
))
4196 xbcopy(hp
->addr
, hp
->origaddr
, hp
->len
);
4203 bofi_dvma_sync(ddi_dma_handle_t h
, uint_t index
, uint_t view
)
4205 struct bofi_link
*lp
;
4206 struct bofi_errent
*ep
;
4207 struct bofi_shadow
*hp
;
4208 struct bofi_shadow
*dummyhp
;
4209 struct bofi_shadow
*hhashp
;
4212 * check we really have a dummy shadow for this handle
4214 mutex_enter(&bofi_low_mutex
);
4215 mutex_enter(&bofi_mutex
);
4216 hhashp
= HDL_HHASH(h
);
4217 for (dummyhp
= hhashp
->hnext
; dummyhp
!= hhashp
;
4218 dummyhp
= dummyhp
->hnext
)
4219 if (dummyhp
->hdl
.dma_handle
== h
)
4221 mutex_exit(&bofi_mutex
);
4222 mutex_exit(&bofi_low_mutex
);
4223 if (dummyhp
== hhashp
) {
4225 * no dummy shadow - panic
4227 panic("driver dvma_sync with no reserve");
4232 hp
= dummyhp
->hparrayp
[index
];
4234 * check its already loaded
4236 if (hp
->type
== BOFI_NULL
)
4237 panic("driver syncing unloaded dvma");
4238 if (view
== DDI_DMA_SYNC_FORCPU
|| view
== DDI_DMA_SYNC_FORKERNEL
)
4240 * in this case do sync first
4242 dummyhp
->save
.dvma_ops
.dvma_sync(h
, index
, view
);
4244 * if there is an explicit sync_for_dev, then do copy from original
4246 if (bofi_sync_check
&& view
== DDI_DMA_SYNC_FORDEV
) {
4248 xbcopy(hp
->origaddr
, hp
->addr
, hp
->len
);
4251 * do corruption if necessary
4253 mutex_enter(&bofi_low_mutex
);
4254 mutex_enter(&bofi_mutex
);
4255 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4257 if ((((ep
->errdef
.access_type
& BOFI_DMA_R
) &&
4258 (view
== DDI_DMA_SYNC_FORCPU
||
4259 view
== DDI_DMA_SYNC_FORKERNEL
)) ||
4260 ((ep
->errdef
.access_type
& BOFI_DMA_W
) &&
4261 (view
== DDI_DMA_SYNC_FORDEV
))) &&
4262 (ep
->state
& BOFI_DEV_ACTIVE
)) {
4263 do_dma_corrupt(hp
, ep
, view
, 0, hp
->len
);
4266 mutex_exit(&bofi_mutex
);
4267 mutex_exit(&bofi_low_mutex
);
4269 * if there is an explicit sync_for_cpu, then do copy to original
4271 if (bofi_sync_check
&&
4272 (view
== DDI_DMA_SYNC_FORCPU
|| view
== DDI_DMA_SYNC_FORKERNEL
)) {
4274 xbcopy(hp
->addr
, hp
->origaddr
, hp
->len
);
4276 if (view
== DDI_DMA_SYNC_FORDEV
)
4278 * in this case do sync last
4280 dummyhp
->save
.dvma_ops
.dvma_sync(h
, index
, view
);
4285 * bofi intercept routine - gets called instead of users interrupt routine
4288 bofi_intercept_intr(caddr_t xp
, caddr_t arg2
)
4290 struct bofi_errent
*ep
;
4291 struct bofi_link
*lp
;
4292 struct bofi_shadow
*hp
;
4295 uint_t retval
= DDI_INTR_UNCLAIMED
;
4297 int unclaimed_counter
= 0;
4298 int jabber_detected
= 0;
4300 hp
= (struct bofi_shadow
*)xp
;
4302 * check if nothing to do
4304 if (hp
->link
== NULL
)
4305 return (hp
->save
.intr
.int_handler
4306 (hp
->save
.intr
.int_handler_arg1
, arg2
));
4307 mutex_enter(&bofi_mutex
);
4309 * look for any errdefs
4311 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4313 if (ep
->state
& BOFI_DEV_ACTIVE
) {
4317 if ((ep
->errdef
.access_count
||
4318 ep
->errdef
.fail_count
) &&
4319 (ep
->errdef
.access_type
& BOFI_LOG
))
4320 log_acc_event(ep
, BOFI_INTR
, 0, 0, 1, 0);
4321 if (ep
->errdef
.access_count
> 1) {
4322 ep
->errdef
.access_count
--;
4323 } else if (ep
->errdef
.fail_count
> 0) {
4324 ep
->errdef
.fail_count
--;
4325 ep
->errdef
.access_count
= 0;
4327 * OK do "corruption"
4329 if (ep
->errstate
.fail_time
== 0)
4330 ep
->errstate
.fail_time
= bofi_gettime();
4331 switch (ep
->errdef
.optype
) {
4332 case BOFI_DELAY_INTR
:
4335 (ep
->errdef
.operand
);
4338 case BOFI_LOSE_INTR
:
4341 case BOFI_EXTRA_INTR
:
4342 intr_count
+= ep
->errdef
.operand
;
4350 mutex_exit(&bofi_mutex
);
4352 * send extra or fewer interrupts as requested
4354 for (i
= 0; i
< intr_count
; i
++) {
4355 result
= hp
->save
.intr
.int_handler
4356 (hp
->save
.intr
.int_handler_arg1
, arg2
);
4357 if (result
== DDI_INTR_CLAIMED
)
4358 unclaimed_counter
>>= 1;
4359 else if (++unclaimed_counter
>= 20)
4360 jabber_detected
= 1;
4365 * if more than 1000 spurious interrupts requested and
4366 * jabber not detected - give warning
4368 if (intr_count
> 1000 && !jabber_detected
)
4369 panic("undetected interrupt jabber: %s%d",
4370 hp
->name
, hp
->instance
);
4372 * return first response - or "unclaimed" if none
4379 * our ddi_check_acc_hdl
4383 bofi_check_acc_hdl(ddi_acc_impl_t
*handle
)
4385 struct bofi_shadow
*hp
;
4386 struct bofi_link
*lp
;
4389 hp
= handle
->ahi_common
.ah_bus_private
;
4390 if (!hp
->link
|| !mutex_tryenter(&bofi_mutex
)) {
4393 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4395 * OR in error state from all associated
4398 if (lp
->errentp
->errdef
.access_count
== 0 &&
4399 (lp
->errentp
->state
& BOFI_DEV_ACTIVE
)) {
4400 result
= (lp
->errentp
->errdef
.acc_chk
& 1);
4403 mutex_exit(&bofi_mutex
);
4408 * our ddi_check_dma_hdl
4412 bofi_check_dma_hdl(ddi_dma_impl_t
*handle
)
4414 struct bofi_shadow
*hp
;
4415 struct bofi_link
*lp
;
4416 struct bofi_shadow
*hhashp
;
4419 if (!mutex_tryenter(&bofi_mutex
)) {
4422 hhashp
= HDL_HHASH(handle
);
4423 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
)
4424 if (hp
->hdl
.dma_handle
== (ddi_dma_handle_t
)handle
)
4427 mutex_exit(&bofi_mutex
);
4431 mutex_exit(&bofi_mutex
);
4434 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4436 * OR in error state from all associated
4439 if (lp
->errentp
->errdef
.access_count
== 0 &&
4440 (lp
->errentp
->state
& BOFI_DEV_ACTIVE
)) {
4441 result
= ((lp
->errentp
->errdef
.acc_chk
& 2) ? 1 : 0);
4444 mutex_exit(&bofi_mutex
);
4451 bofi_post_event(dev_info_t
*dip
, dev_info_t
*rdip
,
4452 ddi_eventcookie_t eventhdl
, void *impl_data
)
4454 ddi_eventcookie_t ec
;
4455 struct ddi_fault_event_data
*arg
;
4456 struct bofi_errent
*ep
;
4457 struct bofi_shadow
*hp
;
4458 struct bofi_shadow
*dhashp
;
4459 struct bofi_link
*lp
;
4462 if (ddi_get_eventcookie(dip
, DDI_DEVI_FAULT_EVENT
, &ec
) != DDI_SUCCESS
)
4463 return (DDI_FAILURE
);
4466 return (save_bus_ops
.bus_post_event(dip
, rdip
, eventhdl
,
4469 arg
= (struct ddi_fault_event_data
*)impl_data
;
4470 mutex_enter(&bofi_mutex
);
4472 * find shadow handles with appropriate dev_infos
4473 * and set error reported on all associated errdef structures
4475 dhashp
= HDL_DHASH(arg
->f_dip
);
4476 for (hp
= dhashp
->dnext
; hp
!= dhashp
; hp
= hp
->dnext
) {
4477 if (hp
->dip
== arg
->f_dip
) {
4478 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4480 ep
->errstate
.errmsg_count
++;
4481 if ((ep
->errstate
.msg_time
== NULL
||
4482 ep
->errstate
.severity
> arg
->f_impact
) &&
4483 (ep
->state
& BOFI_DEV_ACTIVE
)) {
4484 ep
->errstate
.msg_time
= bofi_gettime();
4485 ep
->errstate
.severity
= arg
->f_impact
;
4486 (void) strncpy(ep
->errstate
.buffer
,
4487 arg
->f_message
, ERRMSGSIZE
);
4488 ddi_trigger_softintr(ep
->softintr_id
);
4493 mutex_exit(&bofi_mutex
);
4494 return (save_bus_ops
.bus_post_event(dip
, rdip
, eventhdl
, impl_data
));
4499 bofi_fm_ereport_callback(sysevent_t
*ev
, void *cookie
)
4506 ddi_fault_impact_t impact
;
4507 struct bofi_errent
*ep
;
4508 struct bofi_shadow
*hp
;
4509 struct bofi_link
*lp
;
4510 char service_class
[FM_MAX_CLASS
];
4511 char hppath
[MAXPATHLEN
];
4512 int service_ereport
= 0;
4514 (void) sysevent_get_attr_list(ev
, &nvlist
);
4515 (void) nvlist_lookup_string(nvlist
, FM_CLASS
, &class);
4516 if (nvlist_lookup_nvlist(nvlist
, FM_EREPORT_DETECTOR
, &detector
) == 0)
4517 (void) nvlist_lookup_string(detector
, FM_FMRI_DEV_PATH
, &path
);
4519 (void) snprintf(service_class
, FM_MAX_CLASS
, "%s.%s.%s.",
4520 FM_EREPORT_CLASS
, DDI_IO_CLASS
, DDI_FM_SERVICE_IMPACT
);
4521 if (strncmp(class, service_class
, strlen(service_class
) - 1) == 0)
4522 service_ereport
= 1;
4524 mutex_enter(&bofi_mutex
);
4526 * find shadow handles with appropriate dev_infos
4527 * and set error reported on all associated errdef structures
4529 for (hp
= shadow_list
.next
; hp
!= &shadow_list
; hp
= hp
->next
) {
4530 (void) ddi_pathname(hp
->dip
, hppath
);
4531 if (strcmp(path
, hppath
) != 0)
4533 for (lp
= hp
->link
; lp
!= NULL
; lp
= lp
->link
) {
4535 ep
->errstate
.errmsg_count
++;
4536 if (!(ep
->state
& BOFI_DEV_ACTIVE
))
4538 if (ep
->errstate
.msg_time
!= NULL
)
4540 if (service_ereport
) {
4541 ptr
= class + strlen(service_class
);
4542 if (strcmp(ptr
, DDI_FM_SERVICE_LOST
) == 0)
4543 impact
= DDI_SERVICE_LOST
;
4544 else if (strcmp(ptr
,
4545 DDI_FM_SERVICE_DEGRADED
) == 0)
4546 impact
= DDI_SERVICE_DEGRADED
;
4547 else if (strcmp(ptr
,
4548 DDI_FM_SERVICE_RESTORED
) == 0)
4549 impact
= DDI_SERVICE_RESTORED
;
4551 impact
= DDI_SERVICE_UNAFFECTED
;
4552 if (ep
->errstate
.severity
> impact
)
4553 ep
->errstate
.severity
= impact
;
4554 } else if (ep
->errstate
.buffer
[0] == '\0') {
4555 (void) strncpy(ep
->errstate
.buffer
, class,
4558 if (ep
->errstate
.buffer
[0] != '\0' &&
4559 ep
->errstate
.severity
< DDI_SERVICE_RESTORED
) {
4560 ep
->errstate
.msg_time
= bofi_gettime();
4561 ddi_trigger_softintr(ep
->softintr_id
);
4565 nvlist_free(nvlist
);
4566 mutex_exit(&bofi_mutex
);
4571 * our intr_ops routine
4574 bofi_intr_ops(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_intr_op_t intr_op
,
4575 ddi_intr_handle_impl_t
*hdlp
, void *result
)
4578 struct bofi_shadow
*hp
;
4579 struct bofi_shadow
*dhashp
;
4580 struct bofi_shadow
*hhashp
;
4581 struct bofi_errent
*ep
;
4582 struct bofi_link
*lp
, *next_lp
;
4585 case DDI_INTROP_ADDISR
:
4587 * if driver_list is set, only intercept those drivers
4589 if (!driver_under_test(rdip
))
4590 return (save_bus_ops
.bus_intr_op(dip
, rdip
,
4591 intr_op
, hdlp
, result
));
4593 * allocate shadow handle structure and fill in
4595 hp
= kmem_zalloc(sizeof (struct bofi_shadow
), KM_SLEEP
);
4596 (void) strncpy(hp
->name
, ddi_get_name(rdip
), NAMESIZE
);
4597 hp
->instance
= ddi_get_instance(rdip
);
4598 hp
->save
.intr
.int_handler
= hdlp
->ih_cb_func
;
4599 hp
->save
.intr
.int_handler_arg1
= hdlp
->ih_cb_arg1
;
4600 hdlp
->ih_cb_func
= (ddi_intr_handler_t
*)bofi_intercept_intr
;
4601 hdlp
->ih_cb_arg1
= (caddr_t
)hp
;
4602 hp
->bofi_inum
= hdlp
->ih_inum
;
4605 hp
->type
= BOFI_INT_HDL
;
4607 * save whether hilevel or not
4610 if (hdlp
->ih_pri
>= ddi_intr_get_hilevel_pri())
4616 * call nexus to do real work, but specifying our handler, and
4617 * our shadow handle as argument
4619 retval
= save_bus_ops
.bus_intr_op(dip
, rdip
,
4620 intr_op
, hdlp
, result
);
4621 if (retval
!= DDI_SUCCESS
) {
4622 kmem_free(hp
, sizeof (struct bofi_shadow
));
4626 * add to dhash, hhash and inuse lists
4628 mutex_enter(&bofi_low_mutex
);
4629 mutex_enter(&bofi_mutex
);
4630 hp
->next
= shadow_list
.next
;
4631 shadow_list
.next
->prev
= hp
;
4632 hp
->prev
= &shadow_list
;
4633 shadow_list
.next
= hp
;
4634 hhashp
= HDL_HHASH(hdlp
->ih_inum
);
4635 hp
->hnext
= hhashp
->hnext
;
4636 hhashp
->hnext
->hprev
= hp
;
4639 dhashp
= HDL_DHASH(hp
->dip
);
4640 hp
->dnext
= dhashp
->dnext
;
4641 dhashp
->dnext
->dprev
= hp
;
4645 * chain on any pre-existing errdefs that apply to this
4648 for (ep
= errent_listp
; ep
!= NULL
; ep
= ep
->next
) {
4649 if (ddi_name_to_major(hp
->name
) ==
4650 ddi_name_to_major(ep
->name
) &&
4651 hp
->instance
== ep
->errdef
.instance
&&
4652 (ep
->errdef
.access_type
& BOFI_INTR
)) {
4653 lp
= bofi_link_freelist
;
4655 bofi_link_freelist
= lp
->link
;
4657 lp
->link
= hp
->link
;
4662 mutex_exit(&bofi_mutex
);
4663 mutex_exit(&bofi_low_mutex
);
4665 case DDI_INTROP_REMISR
:
4667 * call nexus routine first
4669 retval
= save_bus_ops
.bus_intr_op(dip
, rdip
,
4670 intr_op
, hdlp
, result
);
4672 * find shadow handle
4674 mutex_enter(&bofi_low_mutex
);
4675 mutex_enter(&bofi_mutex
);
4676 hhashp
= HDL_HHASH(hdlp
->ih_inum
);
4677 for (hp
= hhashp
->hnext
; hp
!= hhashp
; hp
= hp
->hnext
) {
4678 if (hp
->dip
== rdip
&&
4679 hp
->type
== BOFI_INT_HDL
&&
4680 hp
->bofi_inum
== hdlp
->ih_inum
) {
4685 mutex_exit(&bofi_mutex
);
4686 mutex_exit(&bofi_low_mutex
);
4690 * found one - remove from dhash, hhash and inuse lists
4692 hp
->hnext
->hprev
= hp
->hprev
;
4693 hp
->hprev
->hnext
= hp
->hnext
;
4694 hp
->dnext
->dprev
= hp
->dprev
;
4695 hp
->dprev
->dnext
= hp
->dnext
;
4696 hp
->next
->prev
= hp
->prev
;
4697 hp
->prev
->next
= hp
->next
;
4699 * free any errdef link structures
4700 * tagged on to this shadow handle
4702 for (lp
= hp
->link
; lp
!= NULL
; ) {
4704 lp
->link
= bofi_link_freelist
;
4705 bofi_link_freelist
= lp
;
4709 mutex_exit(&bofi_mutex
);
4710 mutex_exit(&bofi_low_mutex
);
4711 kmem_free(hp
, sizeof (struct bofi_shadow
));
4714 return (save_bus_ops
.bus_intr_op(dip
, rdip
,
4715 intr_op
, hdlp
, result
));