Merge illumos-gate
[unleashed.git] / usr / src / uts / common / io / aac / aac.c
blob2b48912892cd6f7854aaed089fdc45603fe9f1f8
1 /*
2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
3 */
5 /*
6 * Copyright (c) 2018, Joyent, Inc.
7 * Copyright 2005-08 Adaptec, Inc.
8 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
9 * Copyright (c) 2000 Michael Smith
10 * Copyright (c) 2001 Scott Long
11 * Copyright (c) 2000 BSDi
12 * All rights reserved.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 #include <sys/modctl.h>
36 #include <sys/conf.h>
37 #include <sys/cmn_err.h>
38 #include <sys/ddi.h>
39 #include <sys/devops.h>
40 #include <sys/pci.h>
41 #include <sys/types.h>
42 #include <sys/ddidmareq.h>
43 #include <sys/scsi/scsi.h>
44 #include <sys/ksynch.h>
45 #include <sys/sunddi.h>
46 #include <sys/byteorder.h>
47 #include "aac_regs.h"
48 #include "aac.h"
51 * FMA header files
53 #include <sys/ddifm.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/util.h>
56 #include <sys/fm/io/ddi.h>
59 * For minor nodes created by the SCSA framework, minor numbers are
60 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
61 * number less than 64.
63 * To support cfgadm, need to confirm the SCSA framework by creating
64 * devctl/scsi and driver specific minor nodes under SCSA format,
65 * and calling scsi_hba_xxx() functions aacordingly.
68 #define AAC_MINOR 32
69 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR)
70 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK)
71 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
73 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran)
74 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
75 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip))
76 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
77 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd)))
78 #define AAC_PD(t) ((t) - AAC_MAX_LD)
79 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \
80 &(softs)->containers[(t)].dev : \
81 ((t) < AAC_MAX_DEV(softs)) ? \
82 &(softs)->nondasds[AAC_PD(t)].dev : NULL)
83 #define AAC_DEVCFG_BEGIN(softs, tgt) \
84 aac_devcfg((softs), (tgt), 1)
85 #define AAC_DEVCFG_END(softs, tgt) \
86 aac_devcfg((softs), (tgt), 0)
87 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private)
88 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
89 if (!(cond)) { \
90 int count = (timeout) * 10; \
91 while (count) { \
92 drv_usecwait(100); \
93 if (cond) \
94 break; \
95 count--; \
96 } \
97 (timeout) = (count + 9) / 10; \
98 } \
101 #define AAC_SENSE_DATA_DESCR_LEN \
102 (sizeof (struct scsi_descr_sense_hdr) + \
103 sizeof (struct scsi_information_sense_descr))
104 #define AAC_ARQ64_LENGTH \
105 (sizeof (struct scsi_arq_status) + \
106 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
109 #define AAC_GETGXADDR(cmdlen, cdbp) \
110 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \
111 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
112 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
114 #define AAC_CDB_INQUIRY_CMDDT 0x02
115 #define AAC_CDB_INQUIRY_EVPD 0x01
116 #define AAC_VPD_PAGE_CODE 1
117 #define AAC_VPD_PAGE_LENGTH 3
118 #define AAC_VPD_PAGE_DATA 4
119 #define AAC_VPD_ID_CODESET 0
120 #define AAC_VPD_ID_TYPE 1
121 #define AAC_VPD_ID_LENGTH 3
122 #define AAC_VPD_ID_DATA 4
124 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08
125 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08
126 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0
127 /* 00b - peripheral device addressing method */
128 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00
129 /* 01b - flat space addressing method */
130 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40
131 /* 10b - logical unit addressing method */
132 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80
134 /* Return the size of FIB with data part type data_type */
135 #define AAC_FIB_SIZEOF(data_type) \
136 (sizeof (struct aac_fib_header) + sizeof (data_type))
137 /* Return the container size defined in mir */
138 #define AAC_MIR_SIZE(softs, acc, mir) \
139 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
140 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
141 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
142 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
144 /* The last entry of aac_cards[] is for unknown cards */
145 #define AAC_UNKNOWN_CARD \
146 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
147 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD)
148 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ)
149 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL)
150 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC))
152 #define PCI_MEM_GET32(softs, off) \
153 ddi_get32((softs)->pci_mem_handle, \
154 (void *)((softs)->pci_mem_base_vaddr + (off)))
155 #define PCI_MEM_PUT32(softs, off, val) \
156 ddi_put32((softs)->pci_mem_handle, \
157 (void *)((softs)->pci_mem_base_vaddr + (off)), \
158 (uint32_t)(val))
159 #define PCI_MEM_GET16(softs, off) \
160 ddi_get16((softs)->pci_mem_handle, \
161 (void *)((softs)->pci_mem_base_vaddr + (off)))
162 #define PCI_MEM_PUT16(softs, off, val) \
163 ddi_put16((softs)->pci_mem_handle, \
164 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
165 /* Write host data at valp to device mem[off] repeatedly count times */
166 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \
167 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
168 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
169 count, DDI_DEV_AUTOINCR)
170 /* Read device data at mem[off] to host addr valp repeatedly count times */
171 #define PCI_MEM_REP_GET8(softs, off, valp, count) \
172 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
173 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
174 count, DDI_DEV_AUTOINCR)
175 #define AAC_GET_FIELD8(acc, d, s, field) \
176 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
177 #define AAC_GET_FIELD32(acc, d, s, field) \
178 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
179 #define AAC_GET_FIELD64(acc, d, s, field) \
180 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
181 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \
182 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
183 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
184 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \
185 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
186 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
188 #define AAC_ENABLE_INTR(softs) { \
189 if (softs->flags & AAC_FLAGS_NEW_COMM) \
190 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
191 else \
192 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
193 softs->state |= AAC_STATE_INTR; \
196 #define AAC_DISABLE_INTR(softs) { \
197 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \
198 softs->state &= ~AAC_STATE_INTR; \
200 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask)
201 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR)
202 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val)
203 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE)
204 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val)
205 #define AAC_FWSTATUS_GET(softs) \
206 ((softs)->aac_if.aif_get_fwstatus(softs))
207 #define AAC_MAILBOX_GET(softs, mb) \
208 ((softs)->aac_if.aif_get_mailbox((softs), (mb)))
209 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
210 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
211 (arg0), (arg1), (arg2), (arg3)))
213 #define AAC_MGT_SLOT_NUM 2
214 #define AAC_THROTTLE_DRAIN -1
216 #define AAC_QUIESCE_TICK 1 /* 1 second */
217 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */
218 #define AAC_DEFAULT_TICK 10 /* 10 seconds */
219 #define AAC_SYNC_TICK (30*60) /* 30 minutes */
221 /* Poll time for aac_do_poll_io() */
222 #define AAC_POLL_TIME 60 /* 60 seconds */
224 /* IOP reset */
225 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */
226 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */
227 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */
230 * Hardware access functions
232 static int aac_rx_get_fwstatus(struct aac_softstate *);
233 static int aac_rx_get_mailbox(struct aac_softstate *, int);
234 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
235 uint32_t, uint32_t, uint32_t);
236 static int aac_rkt_get_fwstatus(struct aac_softstate *);
237 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
238 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
239 uint32_t, uint32_t, uint32_t);
242 * SCSA function prototypes
244 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
245 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
246 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
247 static int aac_quiesce(dev_info_t *);
248 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
251 * Interrupt handler functions
253 static int aac_query_intrs(struct aac_softstate *, int);
254 static int aac_add_intrs(struct aac_softstate *);
255 static void aac_remove_intrs(struct aac_softstate *);
256 static int aac_enable_intrs(struct aac_softstate *);
257 static int aac_disable_intrs(struct aac_softstate *);
258 static uint_t aac_intr_old(caddr_t);
259 static uint_t aac_intr_new(caddr_t);
260 static uint_t aac_softintr(caddr_t);
263 * Internal functions in attach
265 static int aac_check_card_type(struct aac_softstate *);
266 static int aac_check_firmware(struct aac_softstate *);
267 static int aac_common_attach(struct aac_softstate *);
268 static void aac_common_detach(struct aac_softstate *);
269 static int aac_probe_containers(struct aac_softstate *);
270 static int aac_alloc_comm_space(struct aac_softstate *);
271 static int aac_setup_comm_space(struct aac_softstate *);
272 static void aac_free_comm_space(struct aac_softstate *);
273 static int aac_hba_setup(struct aac_softstate *);
276 * Sync FIB operation functions
278 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
279 uint32_t, uint32_t, uint32_t, uint32_t *);
280 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
283 * Command queue operation functions
285 static void aac_cmd_initq(struct aac_cmd_queue *);
286 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
287 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
288 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
291 * FIB queue operation functions
293 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
294 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
297 * Slot operation functions
299 static int aac_create_slots(struct aac_softstate *);
300 static void aac_destroy_slots(struct aac_softstate *);
301 static void aac_alloc_fibs(struct aac_softstate *);
302 static void aac_destroy_fibs(struct aac_softstate *);
303 static struct aac_slot *aac_get_slot(struct aac_softstate *);
304 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
305 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
306 static void aac_free_fib(struct aac_slot *);
309 * Internal functions
311 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *,
312 uint16_t);
313 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
314 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
315 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
316 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
317 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
318 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
319 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *);
320 static void aac_start_waiting_io(struct aac_softstate *);
321 static void aac_drain_comp_q(struct aac_softstate *);
322 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
323 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *);
324 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *);
325 static void aac_start_io(struct aac_softstate *, struct aac_cmd *);
326 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
327 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
328 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
329 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
330 static int aac_dma_sync_ac(struct aac_cmd *);
331 static int aac_shutdown(struct aac_softstate *);
332 static int aac_reset_adapter(struct aac_softstate *);
333 static int aac_do_quiesce(struct aac_softstate *softs);
334 static int aac_do_unquiesce(struct aac_softstate *softs);
335 static void aac_unhold_bus(struct aac_softstate *, int);
336 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
337 int, int);
340 * Adapter Initiated FIB handling function
342 static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t,
343 struct aac_fib *, int);
344 static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *);
347 * Event handling related functions
349 static void aac_timer(void *);
350 static void aac_event_thread(struct aac_softstate *);
351 static void aac_event_disp(struct aac_softstate *, int);
354 * IOCTL interface related functions
356 static int aac_open(dev_t *, int, int, cred_t *);
357 static int aac_close(dev_t, int, int, cred_t *);
358 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
359 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
362 * FMA Prototypes
364 static void aac_fm_init(struct aac_softstate *);
365 static void aac_fm_fini(struct aac_softstate *);
366 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
367 int aac_check_acc_handle(ddi_acc_handle_t);
368 int aac_check_dma_handle(ddi_dma_handle_t);
369 void aac_fm_ereport(struct aac_softstate *, char *);
372 * Auto enumeration functions
374 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
375 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
376 void *, dev_info_t **);
377 static int aac_handle_dr(struct aac_softstate *, int, int, int);
379 extern pri_t minclsyspri;
381 #ifdef DEBUG
383 * UART debug output support
386 #define AAC_PRINT_BUFFER_SIZE 512
387 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */
389 #define AAC_FW_DBG_STRLEN_OFFSET 0x00
390 #define AAC_FW_DBG_FLAGS_OFFSET 0x04
391 #define AAC_FW_DBG_BLED_OFFSET 0x08
393 static int aac_get_fw_debug_buffer(struct aac_softstate *);
394 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
395 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
397 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
398 static char aac_fmt[] = " %s";
399 static char aac_fmt_header[] = " %s.%d: %s";
400 static kmutex_t aac_prt_mutex;
403 * Debug flags to be put into the softstate flags field
404 * when initialized
406 uint32_t aac_debug_flags =
407 /* AACDB_FLAGS_KERNEL_PRINT | */
408 /* AACDB_FLAGS_FW_PRINT | */
409 /* AACDB_FLAGS_MISC | */
410 /* AACDB_FLAGS_FUNC1 | */
411 /* AACDB_FLAGS_FUNC2 | */
412 /* AACDB_FLAGS_SCMD | */
413 /* AACDB_FLAGS_AIF | */
414 /* AACDB_FLAGS_FIB | */
415 /* AACDB_FLAGS_IOCTL | */
417 uint32_t aac_debug_fib_flags =
418 /* AACDB_FLAGS_FIB_RW | */
419 /* AACDB_FLAGS_FIB_IOCTL | */
420 /* AACDB_FLAGS_FIB_SRB | */
421 /* AACDB_FLAGS_FIB_SYNC | */
422 /* AACDB_FLAGS_FIB_HEADER | */
423 /* AACDB_FLAGS_FIB_TIMEOUT | */
426 #endif /* DEBUG */
428 static struct cb_ops aac_cb_ops = {
429 aac_open, /* open */
430 aac_close, /* close */
431 nodev, /* strategy */
432 nodev, /* print */
433 nodev, /* dump */
434 nodev, /* read */
435 nodev, /* write */
436 aac_ioctl, /* ioctl */
437 nodev, /* devmap */
438 nodev, /* mmap */
439 nodev, /* segmap */
440 nochpoll, /* poll */
441 ddi_prop_op, /* cb_prop_op */
442 NULL, /* streamtab */
443 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
444 CB_REV, /* cb_rev */
445 nodev, /* async I/O read entry point */
446 nodev /* async I/O write entry point */
449 static struct dev_ops aac_dev_ops = {
450 DEVO_REV,
452 aac_getinfo,
453 nulldev,
454 nulldev,
455 aac_attach,
456 aac_detach,
457 aac_reset,
458 &aac_cb_ops,
459 NULL,
460 NULL,
461 aac_quiesce,
464 static struct modldrv aac_modldrv = {
465 &mod_driverops,
466 "AAC Driver " AAC_DRIVER_VERSION,
467 &aac_dev_ops,
470 static struct modlinkage aac_modlinkage = {
471 MODREV_1,
472 &aac_modldrv,
473 NULL
476 static struct aac_softstate *aac_softstatep;
479 * Supported card list
480 * ordered in vendor id, subvendor id, subdevice id, and device id
482 static struct aac_card_type aac_cards[] = {
483 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
484 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
485 "Dell", "PERC 3/Di"},
486 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
487 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
488 "Dell", "PERC 3/Di"},
489 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
490 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
491 "Dell", "PERC 3/Si"},
492 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
493 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
494 "Dell", "PERC 3/Di"},
495 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
496 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
497 "Dell", "PERC 3/Si"},
498 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
499 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
500 "Dell", "PERC 3/Di"},
501 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
502 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
503 "Dell", "PERC 3/Di"},
504 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
505 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
506 "Dell", "PERC 3/Di"},
507 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
508 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
509 "Dell", "PERC 3/Di"},
510 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
511 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
512 "Dell", "PERC 3/Di"},
513 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
514 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
515 "Dell", "PERC 320/DC"},
516 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
517 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
519 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
520 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
521 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
522 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
523 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
524 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
526 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
527 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
528 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
529 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
531 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
532 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
533 "Adaptec", "2200S"},
534 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
535 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
536 "Adaptec", "2120S"},
537 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
538 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
539 "Adaptec", "2200S"},
540 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
541 0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
542 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
543 0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
544 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
545 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
546 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
547 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
548 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
549 0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
550 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
551 0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
552 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
553 0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
554 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
555 0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
556 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
557 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
558 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
559 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
560 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
561 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
562 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
563 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
564 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
565 0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
566 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
567 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
568 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
569 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
570 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
571 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
572 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
573 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
574 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
575 0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
576 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
577 0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
578 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
579 0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
580 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
581 0, AAC_TYPE_SATA, "ICP", "9024RO"},
582 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
583 0, AAC_TYPE_SATA, "ICP", "9014RO"},
584 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
585 0, AAC_TYPE_SATA, "ICP", "9047MA"},
586 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
587 0, AAC_TYPE_SATA, "ICP", "9087MA"},
588 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
589 0, AAC_TYPE_SAS, "ICP", "9085LI"},
590 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
591 0, AAC_TYPE_SAS, "ICP", "5085BR"},
592 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
593 0, AAC_TYPE_SATA, "ICP", "9067MA"},
594 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
595 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
596 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
597 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
598 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
599 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
600 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
601 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
602 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
603 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
604 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
605 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
607 {0, 0, 0, 0, AAC_HWIF_UNKNOWN,
608 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
612 * Hardware access functions for i960 based cards
614 static struct aac_interface aac_rx_interface = {
615 aac_rx_get_fwstatus,
616 aac_rx_get_mailbox,
617 aac_rx_set_mailbox
621 * Hardware access functions for Rocket based cards
623 static struct aac_interface aac_rkt_interface = {
624 aac_rkt_get_fwstatus,
625 aac_rkt_get_mailbox,
626 aac_rkt_set_mailbox
629 ddi_device_acc_attr_t aac_acc_attr = {
630 DDI_DEVICE_ATTR_V1,
631 DDI_STRUCTURE_LE_ACC,
632 DDI_STRICTORDER_ACC,
633 DDI_DEFAULT_ACC
636 static struct {
637 int size;
638 int notify;
639 } aac_qinfo[] = {
640 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
641 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
642 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
643 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
644 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
645 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
646 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
647 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
651 * Default aac dma attributes
653 static ddi_dma_attr_t aac_dma_attr = {
654 DMA_ATTR_V0,
655 0, /* lowest usable address */
656 0xffffffffull, /* high DMA address range */
657 0xffffffffull, /* DMA counter register */
658 AAC_DMA_ALIGN, /* DMA address alignment */
659 1, /* DMA burstsizes */
660 1, /* min effective DMA size */
661 0xffffffffull, /* max DMA xfer size */
662 0xffffffffull, /* segment boundary */
663 1, /* s/g list length */
664 AAC_BLK_SIZE, /* granularity of device */
665 0 /* DMA transfer flags */
668 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */
669 static uint32_t aac_timebase = 0; /* internal timer in seconds */
672 _init(void)
674 int rval = 0;
676 #ifdef DEBUG
677 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
678 #endif
679 DBCALLED(NULL, 1);
681 if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
682 sizeof (struct aac_softstate), 0)) != 0)
683 goto error;
685 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
686 ddi_soft_state_fini((void *)&aac_softstatep);
687 goto error;
690 if ((rval = mod_install(&aac_modlinkage)) != 0) {
691 ddi_soft_state_fini((void *)&aac_softstatep);
692 scsi_hba_fini(&aac_modlinkage);
693 goto error;
695 return (rval);
697 error:
698 AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
699 #ifdef DEBUG
700 mutex_destroy(&aac_prt_mutex);
701 #endif
702 return (rval);
706 _info(struct modinfo *modinfop)
708 DBCALLED(NULL, 1);
709 return (mod_info(&aac_modlinkage, modinfop));
713 * An HBA driver cannot be unload unless you reboot,
714 * so this function will be of no use.
717 _fini(void)
719 int rval;
721 DBCALLED(NULL, 1);
723 if ((rval = mod_remove(&aac_modlinkage)) != 0)
724 goto error;
726 scsi_hba_fini(&aac_modlinkage);
727 ddi_soft_state_fini((void *)&aac_softstatep);
728 #ifdef DEBUG
729 mutex_destroy(&aac_prt_mutex);
730 #endif
731 return (0);
733 error:
734 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
735 return (rval);
738 static int
739 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
741 int instance, i;
742 struct aac_softstate *softs = NULL;
743 int attach_state = 0;
744 char *data;
746 DBCALLED(NULL, 1);
748 switch (cmd) {
749 case DDI_ATTACH:
750 break;
751 case DDI_RESUME:
752 return (DDI_FAILURE);
753 default:
754 return (DDI_FAILURE);
757 instance = ddi_get_instance(dip);
759 /* Get soft state */
760 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
761 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
762 goto error;
764 softs = ddi_get_soft_state(aac_softstatep, instance);
765 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
767 softs->instance = instance;
768 softs->devinfo_p = dip;
769 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
770 softs->addr_dma_attr.dma_attr_granular = 1;
771 softs->acc_attr = aac_acc_attr;
772 softs->reg_attr = aac_acc_attr;
773 softs->card = AAC_UNKNOWN_CARD;
774 #ifdef DEBUG
775 softs->debug_flags = aac_debug_flags;
776 softs->debug_fib_flags = aac_debug_fib_flags;
777 #endif
779 /* Initialize FMA */
780 aac_fm_init(softs);
782 /* Check the card type */
783 if (aac_check_card_type(softs) == AACERR) {
784 AACDB_PRINT(softs, CE_WARN, "Card not supported");
785 goto error;
787 /* We have found the right card and everything is OK */
788 attach_state |= AAC_ATTACH_CARD_DETECTED;
790 /* Map PCI mem space */
791 if (ddi_regs_map_setup(dip, 1,
792 (caddr_t *)&softs->pci_mem_base_vaddr, 0,
793 softs->map_size_min, &softs->reg_attr,
794 &softs->pci_mem_handle) != DDI_SUCCESS)
795 goto error;
797 softs->map_size = softs->map_size_min;
798 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
800 AAC_DISABLE_INTR(softs);
802 /* Init mutexes and condvars */
803 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
804 DDI_INTR_PRI(softs->intr_pri));
805 mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER,
806 DDI_INTR_PRI(softs->intr_pri));
807 mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER,
808 DDI_INTR_PRI(softs->intr_pri));
809 mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER,
810 DDI_INTR_PRI(softs->intr_pri));
811 mutex_init(&softs->aifq_mutex, NULL,
812 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
813 cv_init(&softs->event, NULL, CV_DRIVER, NULL);
814 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL);
815 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
816 cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL);
817 cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL);
818 cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL);
819 attach_state |= AAC_ATTACH_KMUTEX_INITED;
821 /* Init the cmd queues */
822 for (i = 0; i < AAC_CMDQ_NUM; i++)
823 aac_cmd_initq(&softs->q_wait[i]);
824 aac_cmd_initq(&softs->q_busy);
825 aac_cmd_initq(&softs->q_comp);
827 /* Check for legacy device naming support */
828 softs->legacy = 1; /* default to use legacy name */
829 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
830 "legacy-name-enable", &data) == DDI_SUCCESS)) {
831 if (strcmp(data, "no") == 0) {
832 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
833 softs->legacy = 0;
835 ddi_prop_free(data);
839 * Everything has been set up till now,
840 * we will do some common attach.
842 mutex_enter(&softs->io_lock);
843 if (aac_common_attach(softs) == AACERR) {
844 mutex_exit(&softs->io_lock);
845 goto error;
847 mutex_exit(&softs->io_lock);
848 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
850 /* Check for buf breakup support */
851 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
852 "breakup-enable", &data) == DDI_SUCCESS)) {
853 if (strcmp(data, "yes") == 0) {
854 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
855 softs->flags |= AAC_FLAGS_BRKUP;
857 ddi_prop_free(data);
859 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
860 if (softs->flags & AAC_FLAGS_BRKUP) {
861 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
862 DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
865 if (aac_hba_setup(softs) != AACOK)
866 goto error;
867 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
869 /* Create devctl/scsi nodes for cfgadm */
870 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
871 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
872 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
873 goto error;
875 attach_state |= AAC_ATTACH_CREATE_DEVCTL;
877 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
878 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
879 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
880 goto error;
882 attach_state |= AAC_ATTACH_CREATE_SCSI;
884 /* Create aac node for app. to issue ioctls */
885 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
886 DDI_PSEUDO, 0) != DDI_SUCCESS) {
887 AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
888 goto error;
891 /* Common attach is OK, so we are attached! */
892 softs->state |= AAC_STATE_RUN;
894 /* Create event thread */
895 softs->fibctx_p = &softs->aifctx;
896 if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread,
897 softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) {
898 AACDB_PRINT(softs, CE_WARN, "aif thread create failed");
899 softs->state &= ~AAC_STATE_RUN;
900 goto error;
903 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
905 /* Create a thread for command timeout */
906 softs->timeout_id = timeout(aac_timer, (void *)softs,
907 (aac_tick * drv_usectohz(1000000)));
909 /* Common attach is OK, so we are attached! */
910 ddi_report_dev(dip);
911 AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
912 return (DDI_SUCCESS);
914 error:
915 if (attach_state & AAC_ATTACH_CREATE_SCSI)
916 ddi_remove_minor_node(dip, "scsi");
917 if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
918 ddi_remove_minor_node(dip, "devctl");
919 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
920 aac_common_detach(softs);
921 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
922 (void) scsi_hba_detach(dip);
923 scsi_hba_tran_free(AAC_DIP2TRAN(dip));
925 if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
926 mutex_destroy(&softs->io_lock);
927 mutex_destroy(&softs->q_comp_mutex);
928 mutex_destroy(&softs->time_mutex);
929 mutex_destroy(&softs->ev_lock);
930 mutex_destroy(&softs->aifq_mutex);
931 cv_destroy(&softs->event);
932 cv_destroy(&softs->sync_fib_cv);
933 cv_destroy(&softs->drain_cv);
934 cv_destroy(&softs->event_wait_cv);
935 cv_destroy(&softs->event_disp_cv);
936 cv_destroy(&softs->aifq_cv);
938 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
939 ddi_regs_map_free(&softs->pci_mem_handle);
940 aac_fm_fini(softs);
941 if (attach_state & AAC_ATTACH_CARD_DETECTED)
942 softs->card = AACERR;
943 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
944 ddi_soft_state_free(aac_softstatep, instance);
945 return (DDI_FAILURE);
948 static int
949 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
951 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
952 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
954 DBCALLED(softs, 1);
956 switch (cmd) {
957 case DDI_DETACH:
958 break;
959 case DDI_SUSPEND:
960 return (DDI_FAILURE);
961 default:
962 return (DDI_FAILURE);
965 mutex_enter(&softs->io_lock);
966 AAC_DISABLE_INTR(softs);
967 softs->state = AAC_STATE_STOPPED;
969 ddi_remove_minor_node(dip, "aac");
970 ddi_remove_minor_node(dip, "scsi");
971 ddi_remove_minor_node(dip, "devctl");
972 mutex_exit(&softs->io_lock);
974 aac_common_detach(softs);
976 mutex_enter(&softs->io_lock);
977 (void) scsi_hba_detach(dip);
978 scsi_hba_tran_free(tran);
979 mutex_exit(&softs->io_lock);
981 /* Stop timer */
982 mutex_enter(&softs->time_mutex);
983 if (softs->timeout_id) {
984 timeout_id_t tid = softs->timeout_id;
985 softs->timeout_id = 0;
987 mutex_exit(&softs->time_mutex);
988 (void) untimeout(tid);
989 mutex_enter(&softs->time_mutex);
991 mutex_exit(&softs->time_mutex);
993 /* Destroy event thread */
994 mutex_enter(&softs->ev_lock);
995 cv_signal(&softs->event_disp_cv);
996 cv_wait(&softs->event_wait_cv, &softs->ev_lock);
997 mutex_exit(&softs->ev_lock);
999 cv_destroy(&softs->aifq_cv);
1000 cv_destroy(&softs->event_disp_cv);
1001 cv_destroy(&softs->event_wait_cv);
1002 cv_destroy(&softs->drain_cv);
1003 cv_destroy(&softs->sync_fib_cv);
1004 cv_destroy(&softs->event);
1005 mutex_destroy(&softs->aifq_mutex);
1006 mutex_destroy(&softs->ev_lock);
1007 mutex_destroy(&softs->time_mutex);
1008 mutex_destroy(&softs->q_comp_mutex);
1009 mutex_destroy(&softs->io_lock);
1011 ddi_regs_map_free(&softs->pci_mem_handle);
1012 aac_fm_fini(softs);
1013 softs->hwif = AAC_HWIF_UNKNOWN;
1014 softs->card = AAC_UNKNOWN_CARD;
1015 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1017 return (DDI_SUCCESS);
1020 /*ARGSUSED*/
1021 static int
1022 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1024 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1026 DBCALLED(softs, 1);
1028 mutex_enter(&softs->io_lock);
1029 AAC_DISABLE_INTR(softs);
1030 (void) aac_shutdown(softs);
1031 mutex_exit(&softs->io_lock);
1033 return (DDI_SUCCESS);
1037 * quiesce(9E) entry point.
1039 * This function is called when the system is single-threaded at high
1040 * PIL with preemption disabled. Therefore, this function must not be
1041 * blocked.
1043 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1044 * DDI_FAILURE indicates an error condition and should almost never happen.
1046 static int
1047 aac_quiesce(dev_info_t *dip)
1049 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1051 if (softs == NULL)
1052 return (DDI_FAILURE);
1054 _NOTE(ASSUMING_PROTECTED(softs->state))
1055 AAC_DISABLE_INTR(softs);
1057 return (DDI_SUCCESS);
1060 /* ARGSUSED */
1061 static int
1062 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg,
1063 void **result)
1065 int error = DDI_SUCCESS;
1067 switch (infocmd) {
1068 case DDI_INFO_DEVT2INSTANCE:
1069 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg)));
1070 break;
1071 default:
1072 error = DDI_FAILURE;
1074 return (error);
1078 * Bring the controller down to a dormant state and detach all child devices.
1079 * This function is called before detach or system shutdown.
1080 * Note: we can assume that the q_wait on the controller is empty, as we
1081 * won't allow shutdown if any device is open.
1083 static int
1084 aac_shutdown(struct aac_softstate *softs)
1086 ddi_acc_handle_t acc;
1087 struct aac_close_command *cc;
1088 int rval;
1090 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
1091 acc = softs->sync_ac.slotp->fib_acc_handle;
1093 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0];
1095 ddi_put32(acc, &cc->Command, VM_CloseAll);
1096 ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1098 /* Flush all caches, set FW to write through mode */
1099 rval = aac_sync_fib(softs, ContainerCommand,
1100 AAC_FIB_SIZEOF(struct aac_close_command));
1101 aac_sync_fib_slot_release(softs, &softs->sync_ac);
1103 AACDB_PRINT(softs, CE_NOTE,
1104 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1105 return (rval);
1108 static uint_t
1109 aac_softintr(caddr_t arg)
1111 struct aac_softstate *softs = (void *)arg;
1113 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1114 aac_drain_comp_q(softs);
1116 return (DDI_INTR_CLAIMED);
1120 * Setup auto sense data for pkt
1122 static void
1123 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1124 uchar_t add_code, uchar_t qual_code, uint64_t info)
1126 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1128 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1129 pkt->pkt_state |= STATE_ARQ_DONE;
1131 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1132 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1133 arqstat->sts_rqpkt_resid = 0;
1134 arqstat->sts_rqpkt_state =
1135 STATE_GOT_BUS |
1136 STATE_GOT_TARGET |
1137 STATE_SENT_CMD |
1138 STATE_XFERRED_DATA;
1139 arqstat->sts_rqpkt_statistics = 0;
1141 if (info <= 0xfffffffful) {
1142 arqstat->sts_sensedata.es_valid = 1;
1143 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1144 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1145 arqstat->sts_sensedata.es_key = key;
1146 arqstat->sts_sensedata.es_add_code = add_code;
1147 arqstat->sts_sensedata.es_qual_code = qual_code;
1149 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1150 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1151 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF;
1152 arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1153 } else { /* 64-bit LBA */
1154 struct scsi_descr_sense_hdr *dsp;
1155 struct scsi_information_sense_descr *isd;
1157 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1158 dsp->ds_class = CLASS_EXTENDED_SENSE;
1159 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1160 dsp->ds_key = key;
1161 dsp->ds_add_code = add_code;
1162 dsp->ds_qual_code = qual_code;
1163 dsp->ds_addl_sense_length =
1164 sizeof (struct scsi_information_sense_descr);
1166 isd = (struct scsi_information_sense_descr *)(dsp+1);
1167 isd->isd_descr_type = DESCR_INFORMATION;
1168 isd->isd_valid = 1;
1169 isd->isd_information[0] = (info >> 56) & 0xFF;
1170 isd->isd_information[1] = (info >> 48) & 0xFF;
1171 isd->isd_information[2] = (info >> 40) & 0xFF;
1172 isd->isd_information[3] = (info >> 32) & 0xFF;
1173 isd->isd_information[4] = (info >> 24) & 0xFF;
1174 isd->isd_information[5] = (info >> 16) & 0xFF;
1175 isd->isd_information[6] = (info >> 8) & 0xFF;
1176 isd->isd_information[7] = (info) & 0xFF;
1181 * Setup auto sense data for HARDWARE ERROR
1183 static void
1184 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1186 union scsi_cdb *cdbp;
1187 uint64_t err_blkno;
1189 cdbp = (void *)acp->pkt->pkt_cdbp;
1190 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1191 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1195 * Send a command to the adapter in New Comm. interface
1197 static int
1198 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1200 uint32_t index, device;
1202 index = PCI_MEM_GET32(softs, AAC_IQUE);
1203 if (index == 0xffffffffUL) {
1204 index = PCI_MEM_GET32(softs, AAC_IQUE);
1205 if (index == 0xffffffffUL)
1206 return (AACERR);
1209 device = index;
1210 PCI_MEM_PUT32(softs, device,
1211 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1212 device += 4;
1213 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1214 device += 4;
1215 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1216 PCI_MEM_PUT32(softs, AAC_IQUE, index);
1217 return (AACOK);
1220 static void
1221 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1223 struct aac_device *dvp = acp->dvp;
1224 int q = AAC_CMDQ(acp);
1226 if (acp->slotp) { /* outstanding cmd */
1227 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) {
1228 aac_release_slot(softs, acp->slotp);
1229 acp->slotp = NULL;
1231 if (dvp) {
1232 dvp->ncmds[q]--;
1233 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1234 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1235 aac_set_throttle(softs, dvp, q,
1236 softs->total_slots);
1238 * Setup auto sense data for UNIT ATTENTION
1239 * Each lun should generate a unit attention
1240 * condition when reset.
1241 * Phys. drives are treated as logical ones
1242 * during error recovery.
1244 if (dvp->type == AAC_DEV_LD) {
1245 struct aac_container *ctp =
1246 (struct aac_container *)dvp;
1247 if (ctp->reset == 0)
1248 goto noreset;
1250 AACDB_PRINT(softs, CE_NOTE,
1251 "Unit attention: reset");
1252 ctp->reset = 0;
1253 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION,
1254 0x29, 0x02, 0);
1257 noreset:
1258 softs->bus_ncmds[q]--;
1259 aac_cmd_delete(&softs->q_busy, acp);
1260 } else { /* cmd in waiting queue */
1261 aac_cmd_delete(&softs->q_wait[q], acp);
1264 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1265 mutex_enter(&softs->q_comp_mutex);
1266 aac_cmd_enqueue(&softs->q_comp, acp);
1267 mutex_exit(&softs->q_comp_mutex);
1268 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1269 cv_broadcast(&softs->event);
1273 static void
1274 aac_handle_io(struct aac_softstate *softs, int index)
1276 struct aac_slot *slotp;
1277 struct aac_cmd *acp;
1278 uint32_t fast;
1280 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1281 index >>= 2;
1283 /* Make sure firmware reported index is valid */
1284 ASSERT(index >= 0 && index < softs->total_slots);
1285 slotp = &softs->io_slot[index];
1286 ASSERT(slotp->index == index);
1287 acp = slotp->acp;
1289 if (acp == NULL || acp->slotp != slotp) {
1290 cmn_err(CE_WARN,
1291 "Firmware error: invalid slot index received from FW");
1292 return;
1295 acp->flags |= AAC_CMD_CMPLT;
1296 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1298 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1300 * For fast response IO, the firmware do not return any FIB
1301 * data, so we need to fill in the FIB status and state so that
1302 * FIB users can handle it correctly.
1304 if (fast) {
1305 uint32_t state;
1307 state = ddi_get32(slotp->fib_acc_handle,
1308 &slotp->fibp->Header.XferState);
1310 * Update state for CPU not for device, no DMA sync
1311 * needed
1313 ddi_put32(slotp->fib_acc_handle,
1314 &slotp->fibp->Header.XferState,
1315 state | AAC_FIBSTATE_DONEADAP);
1316 ddi_put32(slotp->fib_acc_handle,
1317 (void *)&slotp->fibp->data[0], ST_OK);
1320 /* Handle completed ac */
1321 acp->ac_comp(softs, acp);
1322 } else {
1323 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1324 acp->flags |= AAC_CMD_ERR;
1325 if (acp->pkt) {
1326 acp->pkt->pkt_reason = CMD_TRAN_ERR;
1327 acp->pkt->pkt_statistics = 0;
1330 aac_end_io(softs, acp);
1334 * Interrupt handler for New Comm. interface
1335 * New Comm. interface use a different mechanism for interrupt. No explict
1336 * message queues, and driver need only accesses the mapped PCI mem space to
1337 * find the completed FIB or AIF.
1339 static int
1340 aac_process_intr_new(struct aac_softstate *softs)
1342 uint32_t index;
1344 index = AAC_OUTB_GET(softs);
1345 if (index == 0xfffffffful)
1346 index = AAC_OUTB_GET(softs);
1347 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1348 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1349 return (0);
1351 if (index != 0xfffffffful) {
1352 do {
1353 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1354 aac_handle_io(softs, index);
1355 } else if (index != 0xfffffffeul) {
1356 struct aac_fib *fibp; /* FIB in AIF queue */
1357 uint16_t fib_size;
1360 * 0xfffffffe means that the controller wants
1361 * more work, ignore it for now. Otherwise,
1362 * AIF received.
1364 index &= ~2;
1366 fibp = (struct aac_fib *)(softs-> \
1367 pci_mem_base_vaddr + index);
1368 fib_size = PCI_MEM_GET16(softs, index + \
1369 offsetof(struct aac_fib, Header.Size));
1371 aac_save_aif(softs, softs->pci_mem_handle,
1372 fibp, fib_size);
1375 * AIF memory is owned by the adapter, so let it
1376 * know that we are done with it.
1378 AAC_OUTB_SET(softs, index);
1379 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1382 index = AAC_OUTB_GET(softs);
1383 } while (index != 0xfffffffful);
1386 * Process waiting cmds before start new ones to
1387 * ensure first IOs are serviced first.
1389 aac_start_waiting_io(softs);
1390 return (AAC_DB_COMMAND_READY);
1391 } else {
1392 return (0);
1396 static uint_t
1397 aac_intr_new(caddr_t arg)
1399 struct aac_softstate *softs = (void *)arg;
1400 uint_t rval;
1402 mutex_enter(&softs->io_lock);
1403 if (aac_process_intr_new(softs))
1404 rval = DDI_INTR_CLAIMED;
1405 else
1406 rval = DDI_INTR_UNCLAIMED;
1407 mutex_exit(&softs->io_lock);
1409 aac_drain_comp_q(softs);
1410 return (rval);
1414 * Interrupt handler for old interface
1415 * Explicit message queues are used to send FIB to and get completed FIB from
1416 * the adapter. Driver and adapter maitain the queues in the producer/consumer
1417 * manner. The driver has to query the queues to find the completed FIB.
1419 static int
1420 aac_process_intr_old(struct aac_softstate *softs)
1422 uint16_t status;
1424 status = AAC_STATUS_GET(softs);
1425 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1426 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1427 return (DDI_INTR_UNCLAIMED);
1429 if (status & AAC_DB_RESPONSE_READY) {
1430 int slot_idx;
1432 /* ACK the intr */
1433 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1434 (void) AAC_STATUS_GET(softs);
1435 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1436 &slot_idx) == AACOK)
1437 aac_handle_io(softs, slot_idx);
1440 * Process waiting cmds before start new ones to
1441 * ensure first IOs are serviced first.
1443 aac_start_waiting_io(softs);
1444 return (AAC_DB_RESPONSE_READY);
1445 } else if (status & AAC_DB_COMMAND_READY) {
1446 int aif_idx;
1448 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1449 (void) AAC_STATUS_GET(softs);
1450 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1451 AACOK) {
1452 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1453 struct aac_fib *fibp; /* FIB in communication space */
1454 uint16_t fib_size;
1455 uint32_t fib_xfer_state;
1456 uint32_t addr, size;
1458 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1460 #define AAC_SYNC_AIF(softs, aif_idx, type) \
1461 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1462 offsetof(struct aac_comm_space, \
1463 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1464 (type)); }
1466 /* Copy AIF from adapter to the empty AIF slot */
1467 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1468 fibp = &softs->comm_space->adapter_fibs[aif_idx];
1469 fib_size = ddi_get16(acc, &fibp->Header.Size);
1471 aac_save_aif(softs, acc, fibp, fib_size);
1473 /* Complete AIF back to adapter with good status */
1474 fib_xfer_state = LE_32(fibp->Header.XferState);
1475 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1476 ddi_put32(acc, &fibp->Header.XferState,
1477 fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1478 ddi_put32(acc, (void *)&fibp->data[0], ST_OK);
1479 if (fib_size > AAC_FIB_SIZE)
1480 ddi_put16(acc, &fibp->Header.Size,
1481 AAC_FIB_SIZE);
1482 AAC_SYNC_AIF(softs, aif_idx,
1483 DDI_DMA_SYNC_FORDEV);
1486 /* Put the AIF response on the response queue */
1487 addr = ddi_get32(acc,
1488 &softs->comm_space->adapter_fibs[aif_idx]. \
1489 Header.SenderFibAddress);
1490 size = (uint32_t)ddi_get16(acc,
1491 &softs->comm_space->adapter_fibs[aif_idx]. \
1492 Header.Size);
1493 ddi_put32(acc,
1494 &softs->comm_space->adapter_fibs[aif_idx]. \
1495 Header.ReceiverFibAddress, addr);
1496 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1497 addr, size) == AACERR)
1498 cmn_err(CE_NOTE, "!AIF ack failed");
1500 return (AAC_DB_COMMAND_READY);
1501 } else if (status & AAC_DB_PRINTF_READY) {
1502 /* ACK the intr */
1503 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1504 (void) AAC_STATUS_GET(softs);
1505 (void) ddi_dma_sync(softs->comm_space_dma_handle,
1506 offsetof(struct aac_comm_space, adapter_print_buf),
1507 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1508 if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1509 DDI_SUCCESS)
1510 cmn_err(CE_NOTE, "MSG From Adapter: %s",
1511 softs->comm_space->adapter_print_buf);
1512 else
1513 ddi_fm_service_impact(softs->devinfo_p,
1514 DDI_SERVICE_UNAFFECTED);
1515 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1516 return (AAC_DB_PRINTF_READY);
1517 } else if (status & AAC_DB_COMMAND_NOT_FULL) {
1519 * Without these two condition statements, the OS could hang
1520 * after a while, especially if there are a lot of AIF's to
1521 * handle, for instance if a drive is pulled from an array
1522 * under heavy load.
1524 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1525 return (AAC_DB_COMMAND_NOT_FULL);
1526 } else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1527 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1528 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1529 return (AAC_DB_RESPONSE_NOT_FULL);
1530 } else {
1531 return (0);
1535 static uint_t
1536 aac_intr_old(caddr_t arg)
1538 struct aac_softstate *softs = (void *)arg;
1539 int rval;
1541 mutex_enter(&softs->io_lock);
1542 if (aac_process_intr_old(softs))
1543 rval = DDI_INTR_CLAIMED;
1544 else
1545 rval = DDI_INTR_UNCLAIMED;
1546 mutex_exit(&softs->io_lock);
1548 aac_drain_comp_q(softs);
1549 return (rval);
1553 * Query FIXED or MSI interrupts
1555 static int
1556 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1558 dev_info_t *dip = softs->devinfo_p;
1559 int avail, actual, count;
1560 int i, flag, ret;
1562 AACDB_PRINT(softs, CE_NOTE,
1563 "aac_query_intrs:interrupt type 0x%x", intr_type);
1565 /* Get number of interrupts */
1566 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1567 if ((ret != DDI_SUCCESS) || (count == 0)) {
1568 AACDB_PRINT(softs, CE_WARN,
1569 "ddi_intr_get_nintrs() failed, ret %d count %d",
1570 ret, count);
1571 return (DDI_FAILURE);
1574 /* Get number of available interrupts */
1575 ret = ddi_intr_get_navail(dip, intr_type, &avail);
1576 if ((ret != DDI_SUCCESS) || (avail == 0)) {
1577 AACDB_PRINT(softs, CE_WARN,
1578 "ddi_intr_get_navail() failed, ret %d avail %d",
1579 ret, avail);
1580 return (DDI_FAILURE);
1583 AACDB_PRINT(softs, CE_NOTE,
1584 "ddi_intr_get_nvail returned %d, navail() returned %d",
1585 count, avail);
1587 /* Allocate an array of interrupt handles */
1588 softs->intr_size = count * sizeof (ddi_intr_handle_t);
1589 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP);
1591 if (intr_type == DDI_INTR_TYPE_MSI) {
1592 count = 1; /* only one vector needed by now */
1593 flag = DDI_INTR_ALLOC_STRICT;
1594 } else { /* must be DDI_INTR_TYPE_FIXED */
1595 flag = DDI_INTR_ALLOC_NORMAL;
1598 /* Call ddi_intr_alloc() */
1599 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1600 count, &actual, flag);
1602 if ((ret != DDI_SUCCESS) || (actual == 0)) {
1603 AACDB_PRINT(softs, CE_WARN,
1604 "ddi_intr_alloc() failed, ret = %d", ret);
1605 actual = 0;
1606 goto error;
1609 if (actual < count) {
1610 AACDB_PRINT(softs, CE_NOTE,
1611 "Requested: %d, Received: %d", count, actual);
1612 goto error;
1615 softs->intr_cnt = actual;
1617 /* Get priority for first msi, assume remaining are all the same */
1618 if ((ret = ddi_intr_get_pri(softs->htable[0],
1619 &softs->intr_pri)) != DDI_SUCCESS) {
1620 AACDB_PRINT(softs, CE_WARN,
1621 "ddi_intr_get_pri() failed, ret = %d", ret);
1622 goto error;
1625 /* Test for high level mutex */
1626 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1627 AACDB_PRINT(softs, CE_WARN,
1628 "aac_query_intrs: Hi level interrupt not supported");
1629 goto error;
1632 return (DDI_SUCCESS);
1634 error:
1635 /* Free already allocated intr */
1636 for (i = 0; i < actual; i++)
1637 (void) ddi_intr_free(softs->htable[i]);
1639 kmem_free(softs->htable, softs->intr_size);
1640 return (DDI_FAILURE);
1645 * Register FIXED or MSI interrupts, and enable them
1647 static int
1648 aac_add_intrs(struct aac_softstate *softs)
1650 int i, ret;
1651 int actual;
1652 ddi_intr_handler_t *aac_intr;
1654 actual = softs->intr_cnt;
1655 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1656 aac_intr_new : aac_intr_old);
1658 /* Call ddi_intr_add_handler() */
1659 for (i = 0; i < actual; i++) {
1660 if ((ret = ddi_intr_add_handler(softs->htable[i],
1661 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1662 cmn_err(CE_WARN,
1663 "ddi_intr_add_handler() failed ret = %d", ret);
1665 /* Free already allocated intr */
1666 for (i = 0; i < actual; i++)
1667 (void) ddi_intr_free(softs->htable[i]);
1669 kmem_free(softs->htable, softs->intr_size);
1670 return (DDI_FAILURE);
1674 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1675 != DDI_SUCCESS) {
1676 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1678 /* Free already allocated intr */
1679 for (i = 0; i < actual; i++)
1680 (void) ddi_intr_free(softs->htable[i]);
1682 kmem_free(softs->htable, softs->intr_size);
1683 return (DDI_FAILURE);
1686 return (DDI_SUCCESS);
1690 * Unregister FIXED or MSI interrupts
1692 static void
1693 aac_remove_intrs(struct aac_softstate *softs)
1695 int i;
1697 /* Disable all interrupts */
1698 (void) aac_disable_intrs(softs);
1699 /* Call ddi_intr_remove_handler() */
1700 for (i = 0; i < softs->intr_cnt; i++) {
1701 (void) ddi_intr_remove_handler(softs->htable[i]);
1702 (void) ddi_intr_free(softs->htable[i]);
1705 kmem_free(softs->htable, softs->intr_size);
1708 static int
1709 aac_enable_intrs(struct aac_softstate *softs)
1711 int rval = AACOK;
1713 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1714 /* for MSI block enable */
1715 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) !=
1716 DDI_SUCCESS)
1717 rval = AACERR;
1718 } else {
1719 int i;
1721 /* Call ddi_intr_enable() for legacy/MSI non block enable */
1722 for (i = 0; i < softs->intr_cnt; i++) {
1723 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS)
1724 rval = AACERR;
1727 return (rval);
1730 static int
1731 aac_disable_intrs(struct aac_softstate *softs)
1733 int rval = AACOK;
1735 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1736 /* Call ddi_intr_block_disable() */
1737 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) !=
1738 DDI_SUCCESS)
1739 rval = AACERR;
1740 } else {
1741 int i;
1743 for (i = 0; i < softs->intr_cnt; i++) {
1744 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS)
1745 rval = AACERR;
1748 return (rval);
1752 * Set pkt_reason and OR in pkt_statistics flag
1754 static void
1755 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1756 uchar_t reason, uint_t stat)
1758 _NOTE(ARGUNUSED(softs))
1760 if (acp->pkt->pkt_reason == CMD_CMPLT)
1761 acp->pkt->pkt_reason = reason;
1762 acp->pkt->pkt_statistics |= stat;
1766 * Handle a finished pkt of soft SCMD
1768 static void
1769 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1771 ASSERT(acp->pkt);
1773 acp->flags |= AAC_CMD_CMPLT;
1775 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1776 STATE_SENT_CMD | STATE_GOT_STATUS;
1777 if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1778 acp->pkt->pkt_resid = 0;
1780 /* AAC_CMD_NO_INTR means no complete callback */
1781 if (!(acp->flags & AAC_CMD_NO_INTR)) {
1782 mutex_enter(&softs->q_comp_mutex);
1783 aac_cmd_enqueue(&softs->q_comp, acp);
1784 mutex_exit(&softs->q_comp_mutex);
1785 ddi_trigger_softintr(softs->softint_id);
1790 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1794 * Handle completed logical device IO command
1796 /*ARGSUSED*/
1797 static void
1798 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1800 struct aac_slot *slotp = acp->slotp;
1801 struct aac_blockread_response *resp;
1802 uint32_t status;
1804 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1805 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1807 acp->pkt->pkt_state |= STATE_GOT_STATUS;
1810 * block_read/write has a similar response header, use blockread
1811 * response for both.
1813 resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1814 status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1815 if (status == ST_OK) {
1816 acp->pkt->pkt_resid = 0;
1817 acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1818 } else {
1819 aac_set_arq_data_hwerr(acp);
1824 * Handle completed phys. device IO command
1826 static void
1827 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1829 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1830 struct aac_fib *fibp = acp->slotp->fibp;
1831 struct scsi_pkt *pkt = acp->pkt;
1832 struct aac_srb_reply *resp;
1833 uint32_t resp_status;
1835 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1836 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1838 resp = (struct aac_srb_reply *)&fibp->data[0];
1839 resp_status = ddi_get32(acc, &resp->status);
1841 /* First check FIB status */
1842 if (resp_status == ST_OK) {
1843 uint32_t scsi_status;
1844 uint32_t srb_status;
1845 uint32_t data_xfer_length;
1847 scsi_status = ddi_get32(acc, &resp->scsi_status);
1848 srb_status = ddi_get32(acc, &resp->srb_status);
1849 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1851 *pkt->pkt_scbp = (uint8_t)scsi_status;
1852 pkt->pkt_state |= STATE_GOT_STATUS;
1853 if (scsi_status == STATUS_GOOD) {
1854 uchar_t cmd = ((union scsi_cdb *)(void *)
1855 (pkt->pkt_cdbp))->scc_cmd;
1857 /* Next check SRB status */
1858 switch (srb_status & 0x3f) {
1859 case SRB_STATUS_DATA_OVERRUN:
1860 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1861 "scmd=%d, xfer=%d, buflen=%d",
1862 (uint32_t)cmd, data_xfer_length,
1863 acp->bcount);
1865 switch (cmd) {
1866 case SCMD_READ:
1867 case SCMD_WRITE:
1868 case SCMD_READ_G1:
1869 case SCMD_WRITE_G1:
1870 case SCMD_READ_G4:
1871 case SCMD_WRITE_G4:
1872 case SCMD_READ_G5:
1873 case SCMD_WRITE_G5:
1874 aac_set_pkt_reason(softs, acp,
1875 CMD_DATA_OVR, 0);
1876 break;
1878 /*FALLTHRU*/
1879 case SRB_STATUS_ERROR_RECOVERY:
1880 case SRB_STATUS_PENDING:
1881 case SRB_STATUS_SUCCESS:
1883 * pkt_resid should only be calculated if the
1884 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1885 * OVERRUN/UNDERRUN
1887 if (data_xfer_length) {
1888 pkt->pkt_state |= STATE_XFERRED_DATA;
1889 pkt->pkt_resid = acp->bcount - \
1890 data_xfer_length;
1891 ASSERT(pkt->pkt_resid >= 0);
1893 break;
1894 case SRB_STATUS_ABORTED:
1895 AACDB_PRINT(softs, CE_NOTE,
1896 "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1897 data_xfer_length, pkt->pkt_resid);
1898 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1899 STAT_ABORTED);
1900 break;
1901 case SRB_STATUS_ABORT_FAILED:
1902 AACDB_PRINT(softs, CE_NOTE,
1903 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1904 "resid=%d", data_xfer_length,
1905 pkt->pkt_resid);
1906 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1908 break;
1909 case SRB_STATUS_PARITY_ERROR:
1910 AACDB_PRINT(softs, CE_NOTE,
1911 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1912 "resid=%d", data_xfer_length,
1913 pkt->pkt_resid);
1914 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1915 break;
1916 case SRB_STATUS_NO_DEVICE:
1917 case SRB_STATUS_INVALID_PATH_ID:
1918 case SRB_STATUS_INVALID_TARGET_ID:
1919 case SRB_STATUS_INVALID_LUN:
1920 case SRB_STATUS_SELECTION_TIMEOUT:
1921 #ifdef DEBUG
1922 if (AAC_DEV_IS_VALID(acp->dvp)) {
1923 AACDB_PRINT(softs, CE_NOTE,
1924 "SRB_STATUS_NO_DEVICE(%d), " \
1925 "xfer=%d, resid=%d ",
1926 srb_status & 0x3f,
1927 data_xfer_length, pkt->pkt_resid);
1929 #endif
1930 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1931 break;
1932 case SRB_STATUS_COMMAND_TIMEOUT:
1933 case SRB_STATUS_TIMEOUT:
1934 AACDB_PRINT(softs, CE_NOTE,
1935 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1936 "resid=%d", data_xfer_length,
1937 pkt->pkt_resid);
1938 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1939 STAT_TIMEOUT);
1940 break;
1941 case SRB_STATUS_BUS_RESET:
1942 AACDB_PRINT(softs, CE_NOTE,
1943 "SRB_STATUS_BUS_RESET, xfer=%d, " \
1944 "resid=%d", data_xfer_length,
1945 pkt->pkt_resid);
1946 aac_set_pkt_reason(softs, acp, CMD_RESET,
1947 STAT_BUS_RESET);
1948 break;
1949 default:
1950 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1951 "xfer=%d, resid=%d", srb_status & 0x3f,
1952 data_xfer_length, pkt->pkt_resid);
1953 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1954 break;
1956 } else if (scsi_status == STATUS_CHECK) {
1957 /* CHECK CONDITION */
1958 struct scsi_arq_status *arqstat =
1959 (void *)(pkt->pkt_scbp);
1960 uint32_t sense_data_size;
1962 pkt->pkt_state |= STATE_ARQ_DONE;
1964 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1965 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1966 arqstat->sts_rqpkt_resid = 0;
1967 arqstat->sts_rqpkt_state =
1968 STATE_GOT_BUS |
1969 STATE_GOT_TARGET |
1970 STATE_SENT_CMD |
1971 STATE_XFERRED_DATA;
1972 arqstat->sts_rqpkt_statistics = 0;
1974 sense_data_size = ddi_get32(acc,
1975 &resp->sense_data_size);
1976 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1977 AACDB_PRINT(softs, CE_NOTE,
1978 "CHECK CONDITION: sense len=%d, xfer len=%d",
1979 sense_data_size, data_xfer_length);
1981 if (sense_data_size > SENSE_LENGTH)
1982 sense_data_size = SENSE_LENGTH;
1983 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
1984 (uint8_t *)resp->sense_data, sense_data_size,
1985 DDI_DEV_AUTOINCR);
1986 } else {
1987 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
1988 "scsi_status=%d, srb_status=%d",
1989 scsi_status, srb_status);
1990 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1992 } else {
1993 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
1994 resp_status);
1995 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2000 * Handle completed IOCTL command
2002 /*ARGSUSED*/
2003 void
2004 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2006 struct aac_slot *slotp = acp->slotp;
2009 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
2010 * may wait on softs->event, so use cv_broadcast() instead
2011 * of cv_signal().
2013 ASSERT(acp->flags & AAC_CMD_SYNC);
2014 ASSERT(acp->flags & AAC_CMD_NO_CB);
2016 /* Get the size of the response FIB from its FIB.Header.Size field */
2017 acp->fib_size = ddi_get16(slotp->fib_acc_handle,
2018 &slotp->fibp->Header.Size);
2020 ASSERT(acp->fib_size <= softs->aac_max_fib_size);
2021 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
2022 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
2026 * Handle completed sync fib command
2028 /*ARGSUSED*/
2029 void
2030 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2035 * Handle completed Flush command
2037 /*ARGSUSED*/
2038 static void
2039 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2041 struct aac_slot *slotp = acp->slotp;
2042 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2043 struct aac_synchronize_reply *resp;
2044 uint32_t status;
2046 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2048 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2050 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2051 status = ddi_get32(acc, &resp->Status);
2052 if (status != CT_OK)
2053 aac_set_arq_data_hwerr(acp);
2056 /*ARGSUSED*/
2057 static void
2058 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2060 struct aac_slot *slotp = acp->slotp;
2061 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2062 struct aac_Container_resp *resp;
2063 uint32_t status;
2065 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2067 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2069 resp = (struct aac_Container_resp *)&slotp->fibp->data[0];
2070 status = ddi_get32(acc, &resp->Status);
2071 if (status != 0) {
2072 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit");
2073 aac_set_arq_data_hwerr(acp);
2078 * Access PCI space to see if the driver can support the card
2080 static int
2081 aac_check_card_type(struct aac_softstate *softs)
2083 ddi_acc_handle_t pci_config_handle;
2084 int card_index;
2085 uint32_t pci_cmd;
2087 /* Map pci configuration space */
2088 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2089 DDI_SUCCESS) {
2090 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2091 return (AACERR);
2094 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2095 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2096 softs->subvendid = pci_config_get16(pci_config_handle,
2097 PCI_CONF_SUBVENID);
2098 softs->subsysid = pci_config_get16(pci_config_handle,
2099 PCI_CONF_SUBSYSID);
2101 card_index = 0;
2102 while (!CARD_IS_UNKNOWN(card_index)) {
2103 if ((aac_cards[card_index].vendor == softs->vendid) &&
2104 (aac_cards[card_index].device == softs->devid) &&
2105 (aac_cards[card_index].subvendor == softs->subvendid) &&
2106 (aac_cards[card_index].subsys == softs->subsysid)) {
2107 break;
2109 card_index++;
2112 softs->card = card_index;
2113 softs->hwif = aac_cards[card_index].hwif;
2116 * Unknown aac card
2117 * do a generic match based on the VendorID and DeviceID to
2118 * support the new cards in the aac family
2120 if (CARD_IS_UNKNOWN(card_index)) {
2121 if (softs->vendid != 0x9005) {
2122 AACDB_PRINT(softs, CE_WARN,
2123 "Unknown vendor 0x%x", softs->vendid);
2124 goto error;
2126 switch (softs->devid) {
2127 case 0x285:
2128 softs->hwif = AAC_HWIF_I960RX;
2129 break;
2130 case 0x286:
2131 softs->hwif = AAC_HWIF_RKT;
2132 break;
2133 default:
2134 AACDB_PRINT(softs, CE_WARN,
2135 "Unknown device \"pci9005,%x\"", softs->devid);
2136 goto error;
2140 /* Set hardware dependent interface */
2141 switch (softs->hwif) {
2142 case AAC_HWIF_I960RX:
2143 softs->aac_if = aac_rx_interface;
2144 softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2145 break;
2146 case AAC_HWIF_RKT:
2147 softs->aac_if = aac_rkt_interface;
2148 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2149 break;
2150 default:
2151 AACDB_PRINT(softs, CE_WARN,
2152 "Unknown hardware interface %d", softs->hwif);
2153 goto error;
2156 /* Set card names */
2157 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2158 AAC_VENDOR_LEN);
2159 (void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2160 AAC_PRODUCT_LEN);
2162 /* Set up quirks */
2163 softs->flags = aac_cards[card_index].quirks;
2165 /* Force the busmaster enable bit on */
2166 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2167 if ((pci_cmd & PCI_COMM_ME) == 0) {
2168 pci_cmd |= PCI_COMM_ME;
2169 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2170 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2171 if ((pci_cmd & PCI_COMM_ME) == 0) {
2172 cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2173 goto error;
2177 /* Set memory base to map */
2178 softs->pci_mem_base_paddr = 0xfffffff0UL & \
2179 pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2181 pci_config_teardown(&pci_config_handle);
2183 return (AACOK); /* card type detected */
2184 error:
2185 pci_config_teardown(&pci_config_handle);
2186 return (AACERR); /* no matched card found */
2190 * Do the usual interrupt handler setup stuff.
2192 static int
2193 aac_register_intrs(struct aac_softstate *softs)
2195 dev_info_t *dip;
2196 int intr_types;
2198 ASSERT(softs->devinfo_p);
2199 dip = softs->devinfo_p;
2201 /* Get the type of device intrrupts */
2202 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
2203 AACDB_PRINT(softs, CE_WARN,
2204 "ddi_intr_get_supported_types() failed");
2205 return (AACERR);
2207 AACDB_PRINT(softs, CE_NOTE,
2208 "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
2210 /* Query interrupt, and alloc/init all needed struct */
2211 if (intr_types & DDI_INTR_TYPE_MSI) {
2212 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
2213 != DDI_SUCCESS) {
2214 AACDB_PRINT(softs, CE_WARN,
2215 "MSI interrupt query failed");
2216 return (AACERR);
2218 softs->intr_type = DDI_INTR_TYPE_MSI;
2219 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
2220 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
2221 != DDI_SUCCESS) {
2222 AACDB_PRINT(softs, CE_WARN,
2223 "FIXED interrupt query failed");
2224 return (AACERR);
2226 softs->intr_type = DDI_INTR_TYPE_FIXED;
2227 } else {
2228 AACDB_PRINT(softs, CE_WARN,
2229 "Device cannot suppport both FIXED and MSI interrupts");
2230 return (AACERR);
2233 /* Connect interrupt handlers */
2234 if (aac_add_intrs(softs) != DDI_SUCCESS) {
2235 AACDB_PRINT(softs, CE_WARN,
2236 "Interrupt registration failed, intr type: %s",
2237 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
2238 return (AACERR);
2240 (void) aac_enable_intrs(softs);
2242 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
2243 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
2244 AACDB_PRINT(softs, CE_WARN,
2245 "Can not setup soft interrupt handler!");
2246 aac_remove_intrs(softs);
2247 return (AACERR);
2250 return (AACOK);
2253 static void
2254 aac_unregister_intrs(struct aac_softstate *softs)
2256 aac_remove_intrs(softs);
2257 ddi_remove_softintr(softs->softint_id);
2261 * Check the firmware to determine the features to support and the FIB
2262 * parameters to use.
2264 static int
2265 aac_check_firmware(struct aac_softstate *softs)
2267 uint32_t options;
2268 uint32_t atu_size;
2269 ddi_acc_handle_t pci_handle;
2270 uint8_t *data;
2271 uint32_t max_fibs;
2272 uint32_t max_fib_size;
2273 uint32_t sg_tablesize;
2274 uint32_t max_sectors;
2275 uint32_t status;
2277 /* Get supported options */
2278 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2279 &status)) != AACOK) {
2280 if (status != SRB_STATUS_INVALID_REQUEST) {
2281 cmn_err(CE_CONT,
2282 "?Fatal error: request adapter info error");
2283 return (AACERR);
2285 options = 0;
2286 atu_size = 0;
2287 } else {
2288 options = AAC_MAILBOX_GET(softs, 1);
2289 atu_size = AAC_MAILBOX_GET(softs, 2);
2292 if (softs->state & AAC_STATE_RESET) {
2293 if ((softs->support_opt == options) &&
2294 (softs->atu_size == atu_size))
2295 return (AACOK);
2297 cmn_err(CE_WARN,
2298 "?Fatal error: firmware changed, system needs reboot");
2299 return (AACERR);
2303 * The following critical settings are initialized only once during
2304 * driver attachment.
2306 softs->support_opt = options;
2307 softs->atu_size = atu_size;
2309 /* Process supported options */
2310 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2311 (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2312 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2313 softs->flags |= AAC_FLAGS_4GB_WINDOW;
2314 } else {
2316 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2317 * only. IO is handled by the DMA engine which does not suffer
2318 * from the ATU window programming workarounds necessary for
2319 * CPU copy operations.
2321 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2322 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2325 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2326 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2327 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2328 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2329 softs->flags |= AAC_FLAGS_SG_64BIT;
2332 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2333 softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2334 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2337 if (options & AAC_SUPPORTED_NONDASD) {
2338 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2339 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2340 if (strcmp((char *)data, "yes") == 0) {
2341 AACDB_PRINT(softs, CE_NOTE,
2342 "!Enable Non-DASD access");
2343 softs->flags |= AAC_FLAGS_NONDASD;
2345 ddi_prop_free(data);
2349 /* Read preferred settings */
2350 max_fib_size = 0;
2351 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2352 0, 0, 0, 0, NULL)) == AACOK) {
2353 options = AAC_MAILBOX_GET(softs, 1);
2354 max_fib_size = (options & 0xffff);
2355 max_sectors = (options >> 16) << 1;
2356 options = AAC_MAILBOX_GET(softs, 2);
2357 sg_tablesize = (options >> 16);
2358 options = AAC_MAILBOX_GET(softs, 3);
2359 max_fibs = (options & 0xffff);
2362 /* Enable new comm. and rawio at the same time */
2363 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2364 (max_fib_size != 0)) {
2365 /* read out and save PCI MBR */
2366 if ((atu_size > softs->map_size) &&
2367 (ddi_regs_map_setup(softs->devinfo_p, 1,
2368 (caddr_t *)&data, 0, atu_size, &softs->reg_attr,
2369 &pci_handle) == DDI_SUCCESS)) {
2370 ddi_regs_map_free(&softs->pci_mem_handle);
2371 softs->pci_mem_handle = pci_handle;
2372 softs->pci_mem_base_vaddr = data;
2373 softs->map_size = atu_size;
2375 if (atu_size == softs->map_size) {
2376 softs->flags |= AAC_FLAGS_NEW_COMM;
2377 AACDB_PRINT(softs, CE_NOTE,
2378 "!Enable New Comm. interface");
2382 /* Set FIB parameters */
2383 if (softs->flags & AAC_FLAGS_NEW_COMM) {
2384 softs->aac_max_fibs = max_fibs;
2385 softs->aac_max_fib_size = max_fib_size;
2386 softs->aac_max_sectors = max_sectors;
2387 softs->aac_sg_tablesize = sg_tablesize;
2389 softs->flags |= AAC_FLAGS_RAW_IO;
2390 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2391 } else {
2392 softs->aac_max_fibs =
2393 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2394 softs->aac_max_fib_size = AAC_FIB_SIZE;
2395 softs->aac_max_sectors = 128; /* 64K */
2396 if (softs->flags & AAC_FLAGS_17SG)
2397 softs->aac_sg_tablesize = 17;
2398 else if (softs->flags & AAC_FLAGS_34SG)
2399 softs->aac_sg_tablesize = 34;
2400 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2401 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2402 sizeof (struct aac_blockwrite64) +
2403 sizeof (struct aac_sg_entry64)) /
2404 sizeof (struct aac_sg_entry64);
2405 else
2406 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2407 sizeof (struct aac_blockwrite) +
2408 sizeof (struct aac_sg_entry)) /
2409 sizeof (struct aac_sg_entry);
2412 if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2413 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2414 softs->flags |= AAC_FLAGS_LBA_64BIT;
2415 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2417 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2418 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2420 * 64K maximum segment size in scatter gather list is controlled by
2421 * the NEW_COMM bit in the adapter information. If not set, the card
2422 * can only accept a maximum of 64K. It is not recommended to permit
2423 * more than 128KB of total transfer size to the adapters because
2424 * performance is negatively impacted.
2426 * For new comm, segment size equals max xfer size. For old comm,
2427 * we use 64K for both.
2429 softs->buf_dma_attr.dma_attr_count_max =
2430 softs->buf_dma_attr.dma_attr_maxxfer - 1;
2432 /* Setup FIB operations */
2433 if (softs->flags & AAC_FLAGS_RAW_IO)
2434 softs->aac_cmd_fib = aac_cmd_fib_rawio;
2435 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2436 softs->aac_cmd_fib = aac_cmd_fib_brw64;
2437 else
2438 softs->aac_cmd_fib = aac_cmd_fib_brw;
2439 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2440 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2442 /* 64-bit LBA needs descriptor format sense data */
2443 softs->slen = sizeof (struct scsi_arq_status);
2444 if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2445 softs->slen < AAC_ARQ64_LENGTH)
2446 softs->slen = AAC_ARQ64_LENGTH;
2448 AACDB_PRINT(softs, CE_NOTE,
2449 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2450 softs->aac_max_fibs, softs->aac_max_fib_size,
2451 softs->aac_max_sectors, softs->aac_sg_tablesize);
2453 return (AACOK);
2456 static void
2457 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2458 struct FsaRev *fsarev1)
2460 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
2462 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2463 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2464 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2465 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2466 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2470 * The following function comes from Adaptec:
2472 * Query adapter information and supplement adapter information
2474 static int
2475 aac_get_adapter_info(struct aac_softstate *softs,
2476 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2478 struct aac_cmd *acp = &softs->sync_ac;
2479 ddi_acc_handle_t acc;
2480 struct aac_fib *fibp;
2481 struct aac_adapter_info *ainfp;
2482 struct aac_supplement_adapter_info *sinfp;
2483 int rval;
2485 (void) aac_sync_fib_slot_bind(softs, acp);
2486 acc = acp->slotp->fib_acc_handle;
2487 fibp = acp->slotp->fibp;
2489 ddi_put8(acc, &fibp->data[0], 0);
2490 if (aac_sync_fib(softs, RequestAdapterInfo,
2491 AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) {
2492 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2493 rval = AACERR;
2494 goto finish;
2496 ainfp = (struct aac_adapter_info *)fibp->data;
2497 if (ainfr) {
2498 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2499 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2500 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2501 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2502 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2503 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2504 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2505 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2506 aac_fsa_rev(softs, &ainfp->KernelRevision,
2507 &ainfr->KernelRevision);
2508 aac_fsa_rev(softs, &ainfp->MonitorRevision,
2509 &ainfr->MonitorRevision);
2510 aac_fsa_rev(softs, &ainfp->HardwareRevision,
2511 &ainfr->HardwareRevision);
2512 aac_fsa_rev(softs, &ainfp->BIOSRevision,
2513 &ainfr->BIOSRevision);
2514 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2515 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2516 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2517 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2518 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2519 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2521 if (sinfr) {
2522 if (!(softs->support_opt &
2523 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2524 AACDB_PRINT(softs, CE_WARN,
2525 "SupplementAdapterInfo not supported");
2526 rval = AACERR;
2527 goto finish;
2529 ddi_put8(acc, &fibp->data[0], 0);
2530 if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2531 AAC_FIB_SIZEOF(struct aac_supplement_adapter_info))
2532 != AACOK) {
2533 AACDB_PRINT(softs, CE_WARN,
2534 "RequestSupplementAdapterInfo failed");
2535 rval = AACERR;
2536 goto finish;
2538 sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2539 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2540 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2541 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2542 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2543 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2544 AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2545 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2546 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2547 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2548 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2549 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2550 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2551 sizeof (struct vpd_info));
2552 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2553 &sinfr->FlashFirmwareRevision);
2554 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2555 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2556 &sinfr->FlashFirmwareBootRevision);
2557 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2558 MFG_PCBA_SERIAL_NUMBER_WIDTH);
2559 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2560 MFG_WWN_WIDTH);
2561 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2);
2562 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag);
2563 if (sinfr->ExpansionFlag == 1) {
2564 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3);
2565 AAC_GET_FIELD32(acc, sinfr, sinfp,
2566 SupportedPerformanceMode);
2567 AAC_REP_GET_FIELD32(acc, sinfr, sinfp,
2568 ReservedGrowth[0], 80);
2571 rval = AACOK;
2572 finish:
2573 aac_sync_fib_slot_release(softs, acp);
2574 return (rval);
2577 static int
2578 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2579 uint32_t *tgt_max)
2581 struct aac_cmd *acp = &softs->sync_ac;
2582 ddi_acc_handle_t acc;
2583 struct aac_fib *fibp;
2584 struct aac_ctcfg *c_cmd;
2585 struct aac_ctcfg_resp *c_resp;
2586 uint32_t scsi_method_id;
2587 struct aac_bus_info *cmd;
2588 struct aac_bus_info_response *resp;
2589 int rval;
2591 (void) aac_sync_fib_slot_bind(softs, acp);
2592 acc = acp->slotp->fib_acc_handle;
2593 fibp = acp->slotp->fibp;
2595 /* Detect MethodId */
2596 c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2597 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2598 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2599 ddi_put32(acc, &c_cmd->param, 0);
2600 rval = aac_sync_fib(softs, ContainerCommand,
2601 AAC_FIB_SIZEOF(struct aac_ctcfg));
2602 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2603 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2604 AACDB_PRINT(softs, CE_WARN,
2605 "VM_ContainerConfig command fail");
2606 rval = AACERR;
2607 goto finish;
2609 scsi_method_id = ddi_get32(acc, &c_resp->param);
2611 /* Detect phys. bus count and max. target id first */
2612 cmd = (struct aac_bus_info *)&fibp->data[0];
2613 ddi_put32(acc, &cmd->Command, VM_Ioctl);
2614 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2615 ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2616 ddi_put32(acc, &cmd->ObjectId, 0);
2617 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2619 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2620 * driver as the size to be returned. Therefore the driver has to use
2621 * sizeof (struct aac_bus_info_response) because it is greater than
2622 * sizeof (struct aac_bus_info).
2624 rval = aac_sync_fib(softs, ContainerCommand,
2625 AAC_FIB_SIZEOF(struct aac_bus_info_response));
2626 resp = (struct aac_bus_info_response *)cmd;
2628 /* Scan all coordinates with INQUIRY */
2629 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2630 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2631 rval = AACERR;
2632 goto finish;
2634 *bus_max = ddi_get32(acc, &resp->BusCount);
2635 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2637 finish:
2638 aac_sync_fib_slot_release(softs, acp);
2639 return (AACOK);
2643 * The following function comes from Adaptec:
2645 * Routine to be called during initialization of communications with
2646 * the adapter to handle possible adapter configuration issues. When
2647 * the adapter first boots up, it examines attached drives, etc, and
2648 * potentially comes up with a new or revised configuration (relative to
2649 * what's stored in it's NVRAM). Additionally it may discover problems
2650 * that make the current physical configuration unworkable (currently
2651 * applicable only to cluster configuration issues).
2653 * If there are no configuration issues or the issues are considered
2654 * trival by the adapter, it will set it's configuration status to
2655 * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2656 * automatically on it's own.
2658 * However, if there are non-trivial issues, the adapter will set it's
2659 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2660 * and wait for some agent on the host to issue the "\ContainerCommand
2661 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2662 * adapter to commit the new/updated configuration and enable
2663 * un-inhibited operation. The host agent should first issue the
2664 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2665 * command to obtain information about config issues detected by
2666 * the adapter.
2668 * Normally the adapter's PC BIOS will execute on the host following
2669 * adapter poweron and reset and will be responsible for querring the
2670 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2671 * command if appropriate.
2673 * However, with the introduction of IOP reset support, the adapter may
2674 * boot up without the benefit of the adapter's PC BIOS host agent.
2675 * This routine is intended to take care of these issues in situations
2676 * where BIOS doesn't execute following adapter poweron or reset. The
2677 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2678 * there is no harm in doing this when it's already been done.
2680 static int
2681 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2683 struct aac_cmd *acp = &softs->sync_ac;
2684 ddi_acc_handle_t acc;
2685 struct aac_fib *fibp;
2686 struct aac_Container *cmd;
2687 struct aac_Container_resp *resp;
2688 struct aac_cf_status_header *cfg_sts_hdr;
2689 uint32_t resp_status;
2690 uint32_t ct_status;
2691 uint32_t cfg_stat_action;
2692 int rval;
2694 (void) aac_sync_fib_slot_bind(softs, acp);
2695 acc = acp->slotp->fib_acc_handle;
2696 fibp = acp->slotp->fibp;
2698 /* Get adapter config status */
2699 cmd = (struct aac_Container *)&fibp->data[0];
2701 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2702 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2703 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2704 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2705 sizeof (struct aac_cf_status_header));
2706 rval = aac_sync_fib(softs, ContainerCommand,
2707 AAC_FIB_SIZEOF(struct aac_Container));
2708 resp = (struct aac_Container_resp *)cmd;
2709 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2711 resp_status = ddi_get32(acc, &resp->Status);
2712 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2713 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2714 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2716 /* Commit configuration if it's reasonable to do so. */
2717 if (cfg_stat_action <= CFACT_PAUSE) {
2718 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2719 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2720 ddi_put32(acc, &cmd->CTCommand.command,
2721 CT_COMMIT_CONFIG);
2722 rval = aac_sync_fib(softs, ContainerCommand,
2723 AAC_FIB_SIZEOF(struct aac_Container));
2725 resp_status = ddi_get32(acc, &resp->Status);
2726 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2727 if ((rval == AACOK) && (resp_status == 0) &&
2728 (ct_status == CT_OK))
2729 /* Successful completion */
2730 rval = AACMPE_OK;
2731 else
2732 /* Auto-commit aborted due to error(s). */
2733 rval = AACMPE_COMMIT_CONFIG;
2734 } else {
2736 * Auto-commit aborted due to adapter indicating
2737 * configuration issue(s) too dangerous to auto-commit.
2739 rval = AACMPE_CONFIG_STATUS;
2741 } else {
2742 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2743 rval = AACMPE_CONFIG_STATUS;
2746 aac_sync_fib_slot_release(softs, acp);
2747 return (rval);
2751 * Hardware initialization and resource allocation
2753 static int
2754 aac_common_attach(struct aac_softstate *softs)
2756 uint32_t status;
2757 int i;
2758 struct aac_supplement_adapter_info sinf;
2760 DBCALLED(softs, 1);
2763 * Do a little check here to make sure there aren't any outstanding
2764 * FIBs in the message queue. At this point there should not be and
2765 * if there are they are probably left over from another instance of
2766 * the driver like when the system crashes and the crash dump driver
2767 * gets loaded.
2769 while (AAC_OUTB_GET(softs) != 0xfffffffful)
2773 * Wait the card to complete booting up before do anything that
2774 * attempts to communicate with it.
2776 status = AAC_FWSTATUS_GET(softs);
2777 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2778 goto error;
2779 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2780 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2781 if (i == 0) {
2782 cmn_err(CE_CONT, "?Fatal error: controller not ready");
2783 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2784 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2785 goto error;
2788 /* Read and set card supported options and settings */
2789 if (aac_check_firmware(softs) == AACERR) {
2790 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2791 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2792 goto error;
2795 /* Add interrupt handlers */
2796 if (aac_register_intrs(softs) == AACERR) {
2797 cmn_err(CE_CONT,
2798 "?Fatal error: interrupts register failed");
2799 goto error;
2802 /* Setup communication space with the card */
2803 if (softs->comm_space_dma_handle == NULL) {
2804 if (aac_alloc_comm_space(softs) != AACOK)
2805 goto error;
2807 if (aac_setup_comm_space(softs) != AACOK) {
2808 cmn_err(CE_CONT, "?Setup communication space failed");
2809 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2810 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2811 goto error;
2814 #ifdef DEBUG
2815 if (aac_get_fw_debug_buffer(softs) != AACOK)
2816 cmn_err(CE_CONT, "?firmware UART trace not supported");
2817 #endif
2819 /* Allocate slots */
2820 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2821 cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2822 goto error;
2824 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2826 /* Allocate FIBs */
2827 if (softs->total_fibs < softs->total_slots) {
2828 aac_alloc_fibs(softs);
2829 if (softs->total_fibs == 0)
2830 goto error;
2831 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2832 softs->total_fibs);
2835 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */
2836 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */
2838 if (aac_get_adapter_info(softs, NULL, &sinf) == AACOK) {
2839 softs->feature_bits = sinf.FeatureBits;
2840 softs->support_opt2 = sinf.SupportedOptions2;
2842 /* Get adapter names */
2843 if (CARD_IS_UNKNOWN(softs->card)) {
2844 char *p, *p0, *p1;
2847 * Now find the controller name in supp_adapter_info->
2848 * AdapterTypeText. Use the first word as the vendor
2849 * and the other words as the product name.
2851 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2852 "\"%s\"", sinf.AdapterTypeText);
2853 p = sinf.AdapterTypeText;
2854 p0 = p1 = NULL;
2855 /* Skip heading spaces */
2856 while (*p && (*p == ' ' || *p == '\t'))
2857 p++;
2858 p0 = p;
2859 while (*p && (*p != ' ' && *p != '\t'))
2860 p++;
2861 /* Remove middle spaces */
2862 while (*p && (*p == ' ' || *p == '\t'))
2863 *p++ = 0;
2864 p1 = p;
2865 /* Remove trailing spaces */
2866 p = p1 + strlen(p1) - 1;
2867 while (p > p1 && (*p == ' ' || *p == '\t'))
2868 *p-- = 0;
2869 if (*p0 && *p1) {
2870 (void *)strncpy(softs->vendor_name, p0,
2871 AAC_VENDOR_LEN);
2872 (void *)strncpy(softs->product_name, p1,
2873 AAC_PRODUCT_LEN);
2874 } else {
2875 cmn_err(CE_WARN,
2876 "?adapter name mis-formatted\n");
2877 if (*p0)
2878 (void *)strncpy(softs->product_name,
2879 p0, AAC_PRODUCT_LEN);
2882 } else {
2883 cmn_err(CE_CONT, "?Query adapter information failed");
2887 cmn_err(CE_NOTE,
2888 "!aac driver %d.%02d.%02d-%d, found card: " \
2889 "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2890 AAC_DRIVER_MAJOR_VERSION,
2891 AAC_DRIVER_MINOR_VERSION,
2892 AAC_DRIVER_BUGFIX_LEVEL,
2893 AAC_DRIVER_BUILD,
2894 softs->vendor_name, softs->product_name,
2895 softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2896 softs->pci_mem_base_paddr);
2898 /* Perform acceptance of adapter-detected config changes if possible */
2899 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2900 cmn_err(CE_CONT, "?Handle adapter config issues failed");
2901 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2902 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2903 goto error;
2906 /* Setup containers (logical devices) */
2907 if (aac_probe_containers(softs) != AACOK) {
2908 cmn_err(CE_CONT, "?Fatal error: get container info error");
2909 goto error;
2912 /* Check for JBOD support. Default disable */
2913 char *data;
2914 if (softs->feature_bits & AAC_FEATURE_SUPPORTED_JBOD) {
2915 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p,
2916 0, "jbod-enable", &data) == DDI_SUCCESS)) {
2917 if (strcmp(data, "yes") == 0) {
2918 AACDB_PRINT(softs, CE_NOTE,
2919 "Enable JBOD access");
2920 softs->flags |= AAC_FLAGS_JBOD;
2922 ddi_prop_free(data);
2926 /* Setup phys. devices */
2927 if (softs->flags & (AAC_FLAGS_NONDASD | AAC_FLAGS_JBOD)) {
2928 uint32_t bus_max, tgt_max;
2929 uint32_t bus, tgt;
2930 int index;
2932 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2933 cmn_err(CE_CONT, "?Fatal error: get bus info error");
2934 goto error;
2936 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2937 bus_max, tgt_max);
2938 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2939 if (softs->state & AAC_STATE_RESET) {
2940 cmn_err(CE_WARN,
2941 "?Fatal error: bus map changed");
2942 goto error;
2944 softs->bus_max = bus_max;
2945 softs->tgt_max = tgt_max;
2946 if (softs->nondasds) {
2947 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2948 sizeof (struct aac_nondasd));
2950 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2951 sizeof (struct aac_nondasd), KM_SLEEP);
2953 index = 0;
2954 for (bus = 0; bus < softs->bus_max; bus++) {
2955 for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2956 struct aac_nondasd *dvp =
2957 &softs->nondasds[index++];
2958 dvp->dev.type = AAC_DEV_PD;
2959 dvp->bus = bus;
2960 dvp->tid = tgt;
2966 /* Check dma & acc handles allocated in attach */
2967 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2968 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2969 goto error;
2972 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2973 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2974 goto error;
2977 for (i = 0; i < softs->total_slots; i++) {
2978 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
2979 DDI_SUCCESS) {
2980 ddi_fm_service_impact(softs->devinfo_p,
2981 DDI_SERVICE_LOST);
2982 goto error;
2986 return (AACOK);
2987 error:
2988 if (softs->state & AAC_STATE_RESET)
2989 return (AACERR);
2990 if (softs->nondasds) {
2991 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2992 sizeof (struct aac_nondasd));
2993 softs->nondasds = NULL;
2995 if (softs->total_fibs > 0)
2996 aac_destroy_fibs(softs);
2997 if (softs->total_slots > 0)
2998 aac_destroy_slots(softs);
2999 if (softs->comm_space_dma_handle)
3000 aac_free_comm_space(softs);
3001 return (AACERR);
3005 * Hardware shutdown and resource release
3007 static void
3008 aac_common_detach(struct aac_softstate *softs)
3010 DBCALLED(softs, 1);
3012 aac_unregister_intrs(softs);
3014 mutex_enter(&softs->io_lock);
3015 (void) aac_shutdown(softs);
3017 if (softs->nondasds) {
3018 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
3019 sizeof (struct aac_nondasd));
3020 softs->nondasds = NULL;
3022 aac_destroy_fibs(softs);
3023 aac_destroy_slots(softs);
3024 aac_free_comm_space(softs);
3025 mutex_exit(&softs->io_lock);
3029 * Send a synchronous command to the controller and wait for a result.
3030 * Indicate if the controller completed the command with an error status.
3033 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
3034 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
3035 uint32_t *statusp)
3037 int timeout;
3038 uint32_t status;
3040 if (statusp != NULL)
3041 *statusp = SRB_STATUS_SUCCESS;
3043 /* Fill in mailbox */
3044 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
3046 /* Ensure the sync command doorbell flag is cleared */
3047 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3049 /* Then set it to signal the adapter */
3050 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
3052 /* Spin waiting for the command to complete */
3053 timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
3054 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
3055 if (!timeout) {
3056 AACDB_PRINT(softs, CE_WARN,
3057 "Sync command timed out after %d seconds (0x%x)!",
3058 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
3059 return (AACERR);
3062 /* Clear the completion flag */
3063 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3065 /* Get the command status */
3066 status = AAC_MAILBOX_GET(softs, 0);
3067 if (statusp != NULL)
3068 *statusp = status;
3069 if (status != SRB_STATUS_SUCCESS) {
3070 AACDB_PRINT(softs, CE_WARN,
3071 "Sync command fail: status = 0x%x", status);
3072 return (AACERR);
3075 return (AACOK);
3079 * Send a synchronous FIB to the adapter and wait for its completion
3081 static int
3082 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
3084 struct aac_cmd *acp = &softs->sync_ac;
3086 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT;
3087 if (softs->state & AAC_STATE_INTR)
3088 acp->flags |= AAC_CMD_NO_CB;
3089 else
3090 acp->flags |= AAC_CMD_NO_INTR;
3092 acp->ac_comp = aac_sync_complete;
3093 acp->timeout = AAC_SYNC_TIMEOUT;
3094 acp->fib_size = fibsize;
3097 * Only need to setup sync fib header, caller should have init
3098 * fib data
3100 aac_cmd_fib_header(softs, acp, cmd);
3102 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize,
3103 DDI_DMA_SYNC_FORDEV);
3105 aac_start_io(softs, acp);
3107 if (softs->state & AAC_STATE_INTR)
3108 return (aac_do_sync_io(softs, acp));
3109 else
3110 return (aac_do_poll_io(softs, acp));
3113 static void
3114 aac_cmd_initq(struct aac_cmd_queue *q)
3116 q->q_head = NULL;
3117 q->q_tail = (struct aac_cmd *)&q->q_head;
3121 * Remove a cmd from the head of q
3123 static struct aac_cmd *
3124 aac_cmd_dequeue(struct aac_cmd_queue *q)
3126 struct aac_cmd *acp;
3128 _NOTE(ASSUMING_PROTECTED(*q))
3130 if ((acp = q->q_head) != NULL) {
3131 if ((q->q_head = acp->next) != NULL)
3132 acp->next = NULL;
3133 else
3134 q->q_tail = (struct aac_cmd *)&q->q_head;
3135 acp->prev = NULL;
3137 return (acp);
3141 * Add a cmd to the tail of q
3143 static void
3144 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
3146 ASSERT(acp->next == NULL);
3147 acp->prev = q->q_tail;
3148 q->q_tail->next = acp;
3149 q->q_tail = acp;
3153 * Remove the cmd ac from q
3155 static void
3156 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
3158 if (acp->prev) {
3159 if ((acp->prev->next = acp->next) != NULL) {
3160 acp->next->prev = acp->prev;
3161 acp->next = NULL;
3162 } else {
3163 q->q_tail = acp->prev;
3165 acp->prev = NULL;
3167 /* ac is not in the queue */
3171 * Atomically insert an entry into the nominated queue, returns 0 on success or
3172 * AACERR if the queue is full.
3174 * Note: it would be more efficient to defer notifying the controller in
3175 * the case where we may be inserting several entries in rapid succession,
3176 * but implementing this usefully may be difficult (it would involve a
3177 * separate queue/notify interface).
3179 static int
3180 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
3181 uint32_t fib_size)
3183 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3184 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3185 uint32_t pi, ci;
3187 DBCALLED(softs, 2);
3189 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
3191 /* Get the producer/consumer indices */
3192 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3193 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3194 DDI_DMA_SYNC_FORCPU);
3195 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3196 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3197 return (AACERR);
3200 pi = ddi_get32(acc,
3201 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3202 ci = ddi_get32(acc,
3203 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3206 * Wrap the queue first before we check the queue to see
3207 * if it is full
3209 if (pi >= aac_qinfo[queue].size)
3210 pi = 0;
3212 /* XXX queue full */
3213 if ((pi + 1) == ci)
3214 return (AACERR);
3216 /* Fill in queue entry */
3217 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3218 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3219 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3220 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3221 DDI_DMA_SYNC_FORDEV);
3223 /* Update producer index */
3224 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3225 pi + 1);
3226 (void) ddi_dma_sync(dma,
3227 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3228 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3229 DDI_DMA_SYNC_FORDEV);
3231 if (aac_qinfo[queue].notify != 0)
3232 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3233 return (AACOK);
3237 * Atomically remove one entry from the nominated queue, returns 0 on
3238 * success or AACERR if the queue is empty.
3240 static int
3241 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3243 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3244 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3245 uint32_t pi, ci;
3246 int unfull = 0;
3248 DBCALLED(softs, 2);
3250 ASSERT(idxp);
3252 /* Get the producer/consumer indices */
3253 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3254 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3255 DDI_DMA_SYNC_FORCPU);
3256 pi = ddi_get32(acc,
3257 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3258 ci = ddi_get32(acc,
3259 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3261 /* Check for queue empty */
3262 if (ci == pi)
3263 return (AACERR);
3265 if (pi >= aac_qinfo[queue].size)
3266 pi = 0;
3268 /* Check for queue full */
3269 if (ci == pi + 1)
3270 unfull = 1;
3273 * The controller does not wrap the queue,
3274 * so we have to do it by ourselves
3276 if (ci >= aac_qinfo[queue].size)
3277 ci = 0;
3279 /* Fetch the entry */
3280 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3281 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3282 DDI_DMA_SYNC_FORCPU);
3283 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3284 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3285 return (AACERR);
3288 switch (queue) {
3289 case AAC_HOST_NORM_RESP_Q:
3290 case AAC_HOST_HIGH_RESP_Q:
3291 *idxp = ddi_get32(acc,
3292 &(softs->qentries[queue] + ci)->aq_fib_addr);
3293 break;
3295 case AAC_HOST_NORM_CMD_Q:
3296 case AAC_HOST_HIGH_CMD_Q:
3297 *idxp = ddi_get32(acc,
3298 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3299 break;
3301 default:
3302 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3303 return (AACERR);
3306 /* Update consumer index */
3307 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3308 ci + 1);
3309 (void) ddi_dma_sync(dma,
3310 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3311 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3312 DDI_DMA_SYNC_FORDEV);
3314 if (unfull && aac_qinfo[queue].notify != 0)
3315 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3316 return (AACOK);
3319 static struct aac_mntinforesp *
3320 aac_get_mntinfo(struct aac_softstate *softs, int cid)
3322 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3323 struct aac_fib *fibp = softs->sync_ac.slotp->fibp;
3324 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3325 struct aac_mntinforesp *mir;
3327 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3328 (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3329 VM_NameServe64 : VM_NameServe);
3330 ddi_put32(acc, &mi->MntType, FT_FILESYS);
3331 ddi_put32(acc, &mi->MntCount, cid);
3333 if (aac_sync_fib(softs, ContainerCommand,
3334 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3335 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3336 return (NULL);
3339 mir = (struct aac_mntinforesp *)&fibp->data[0];
3340 if (ddi_get32(acc, &mir->Status) == ST_OK)
3341 return (mir);
3342 return (NULL);
3345 static int
3346 aac_get_container_count(struct aac_softstate *softs, int *count)
3348 ddi_acc_handle_t acc;
3349 struct aac_mntinforesp *mir;
3350 int rval;
3352 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3353 acc = softs->sync_ac.slotp->fib_acc_handle;
3355 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) {
3356 rval = AACERR;
3357 goto finish;
3359 *count = ddi_get32(acc, &mir->MntRespCount);
3360 if (*count > AAC_MAX_LD) {
3361 AACDB_PRINT(softs, CE_CONT,
3362 "container count(%d) > AAC_MAX_LD", *count);
3363 rval = AACERR;
3364 goto finish;
3366 rval = AACOK;
3368 finish:
3369 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3370 return (rval);
3373 static int
3374 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3376 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3377 struct aac_Container *ct = (struct aac_Container *) \
3378 &softs->sync_ac.slotp->fibp->data[0];
3380 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3381 ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3382 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3383 ddi_put32(acc, &ct->CTCommand.param[0], cid);
3385 if (aac_sync_fib(softs, ContainerCommand,
3386 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3387 return (AACERR);
3388 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3389 return (AACERR);
3391 *uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3392 return (AACOK);
3396 * Request information of the container cid
3398 static struct aac_mntinforesp *
3399 aac_get_container_info(struct aac_softstate *softs, int cid)
3401 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3402 struct aac_mntinforesp *mir;
3403 int rval_uid;
3404 uint32_t uid;
3406 /* Get container UID first so that it will not overwrite mntinfo */
3407 rval_uid = aac_get_container_uid(softs, cid, &uid);
3409 /* Get container basic info */
3410 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) {
3411 AACDB_PRINT(softs, CE_CONT,
3412 "query container %d info failed", cid);
3413 return (NULL);
3415 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE)
3416 return (mir);
3417 if (rval_uid != AACOK) {
3418 AACDB_PRINT(softs, CE_CONT,
3419 "query container %d uid failed", cid);
3420 return (NULL);
3423 ddi_put32(acc, &mir->Status, uid);
3424 return (mir);
3427 static enum aac_cfg_event
3428 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3430 enum aac_cfg_event event = AAC_CFG_NULL_NOEXIST;
3431 struct aac_container *dvp = &softs->containers[cid];
3432 struct aac_mntinforesp *mir;
3433 ddi_acc_handle_t acc;
3435 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3436 acc = softs->sync_ac.slotp->fib_acc_handle;
3438 /* Get container basic info */
3439 if ((mir = aac_get_container_info(softs, cid)) == NULL) {
3440 /* AAC_CFG_NULL_NOEXIST */
3441 goto finish;
3444 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3445 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3446 AACDB_PRINT(softs, CE_NOTE,
3447 ">>> Container %d deleted", cid);
3448 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3449 event = AAC_CFG_DELETE;
3451 /* AAC_CFG_NULL_NOEXIST */
3452 } else {
3453 uint64_t size;
3454 uint32_t uid;
3456 event = AAC_CFG_NULL_EXIST;
3458 size = AAC_MIR_SIZE(softs, acc, mir);
3459 uid = ddi_get32(acc, &mir->Status);
3460 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3461 if (dvp->uid != uid) {
3462 AACDB_PRINT(softs, CE_WARN,
3463 ">>> Container %u uid changed to %d",
3464 cid, uid);
3465 dvp->uid = uid;
3466 event = AAC_CFG_CHANGE;
3468 if (dvp->size != size) {
3469 AACDB_PRINT(softs, CE_NOTE,
3470 ">>> Container %u size changed to %"PRIu64,
3471 cid, size);
3472 dvp->size = size;
3473 event = AAC_CFG_CHANGE;
3475 } else { /* Init new container */
3476 AACDB_PRINT(softs, CE_NOTE,
3477 ">>> Container %d added: " \
3478 "size=0x%x.%08x, type=%d, name=%s",
3479 cid,
3480 ddi_get32(acc, &mir->MntObj.CapacityHigh),
3481 ddi_get32(acc, &mir->MntObj.Capacity),
3482 ddi_get32(acc, &mir->MntObj.VolType),
3483 mir->MntObj.FileSystemName);
3484 dvp->dev.flags |= AAC_DFLAG_VALID;
3485 dvp->dev.type = AAC_DEV_LD;
3487 dvp->cid = cid;
3488 dvp->uid = uid;
3489 dvp->size = size;
3490 dvp->locked = 0;
3491 dvp->deleted = 0;
3493 event = AAC_CFG_ADD;
3497 finish:
3498 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3499 return (event);
3503 * Do a rescan of all the possible containers and update the container list
3504 * with newly online/offline containers, and prepare for autoconfiguration.
3506 static int
3507 aac_probe_containers(struct aac_softstate *softs)
3509 int i, count, total;
3511 /* Loop over possible containers */
3512 count = softs->container_count;
3513 if (aac_get_container_count(softs, &count) == AACERR)
3514 return (AACERR);
3516 for (i = total = 0; i < count; i++) {
3517 enum aac_cfg_event event = aac_probe_container(softs, i);
3518 if ((event != AAC_CFG_NULL_NOEXIST) &&
3519 (event != AAC_CFG_NULL_EXIST)) {
3520 (void) aac_handle_dr(softs, i, -1, event);
3521 total++;
3525 if (count < softs->container_count) {
3526 struct aac_container *dvp;
3528 for (dvp = &softs->containers[count];
3529 dvp < &softs->containers[softs->container_count]; dvp++) {
3530 if (!AAC_DEV_IS_VALID(&dvp->dev))
3531 continue;
3532 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3533 dvp->cid);
3534 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3535 (void) aac_handle_dr(softs, dvp->cid, -1,
3536 AAC_CFG_DELETE);
3540 softs->container_count = count;
3541 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3542 return (AACOK);
3545 static int
3546 aac_probe_jbod(struct aac_softstate *softs, int tgt, int event)
3548 ASSERT(AAC_MAX_LD <= tgt);
3549 ASSERT(tgt < AAC_MAX_DEV(softs));
3550 struct aac_device *dvp;
3551 dvp = AAC_DEV(softs, tgt);
3553 switch (event) {
3554 case AAC_CFG_ADD:
3555 AACDB_PRINT(softs, CE_NOTE,
3556 ">>> Jbod %d added", tgt - AAC_MAX_LD);
3557 dvp->flags |= AAC_DFLAG_VALID;
3558 dvp->type = AAC_DEV_PD;
3559 break;
3560 case AAC_CFG_DELETE:
3561 AACDB_PRINT(softs, CE_NOTE,
3562 ">>> Jbod %d deleted", tgt - AAC_MAX_LD);
3563 dvp->flags &= ~AAC_DFLAG_VALID;
3564 break;
3565 default:
3566 return (AACERR);
3568 (void) aac_handle_dr(softs, tgt, 0, event);
3569 return (AACOK);
3572 static int
3573 aac_alloc_comm_space(struct aac_softstate *softs)
3575 size_t rlen;
3576 ddi_dma_cookie_t cookie;
3577 uint_t cookien;
3579 /* Allocate DMA for comm. space */
3580 if (ddi_dma_alloc_handle(
3581 softs->devinfo_p,
3582 &softs->addr_dma_attr,
3583 DDI_DMA_SLEEP,
3584 NULL,
3585 &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3586 AACDB_PRINT(softs, CE_WARN,
3587 "Cannot alloc dma handle for communication area");
3588 goto error;
3590 if (ddi_dma_mem_alloc(
3591 softs->comm_space_dma_handle,
3592 sizeof (struct aac_comm_space),
3593 &softs->acc_attr,
3594 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3595 DDI_DMA_SLEEP,
3596 NULL,
3597 (caddr_t *)&softs->comm_space,
3598 &rlen,
3599 &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3600 AACDB_PRINT(softs, CE_WARN,
3601 "Cannot alloc mem for communication area");
3602 goto error;
3604 if (ddi_dma_addr_bind_handle(
3605 softs->comm_space_dma_handle,
3606 NULL,
3607 (caddr_t)softs->comm_space,
3608 sizeof (struct aac_comm_space),
3609 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3610 DDI_DMA_SLEEP,
3611 NULL,
3612 &cookie,
3613 &cookien) != DDI_DMA_MAPPED) {
3614 AACDB_PRINT(softs, CE_WARN,
3615 "DMA bind failed for communication area");
3616 goto error;
3618 softs->comm_space_phyaddr = cookie.dmac_address;
3620 return (AACOK);
3621 error:
3622 if (softs->comm_space_acc_handle) {
3623 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3624 softs->comm_space_acc_handle = NULL;
3626 if (softs->comm_space_dma_handle) {
3627 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3628 softs->comm_space_dma_handle = NULL;
3630 return (AACERR);
3633 static void
3634 aac_free_comm_space(struct aac_softstate *softs)
3637 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3638 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3639 softs->comm_space_acc_handle = NULL;
3640 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3641 softs->comm_space_dma_handle = NULL;
3642 softs->comm_space_phyaddr = (uintptr_t)NULL;
3646 * Initialize the data structures that are required for the communication
3647 * interface to operate
3649 static int
3650 aac_setup_comm_space(struct aac_softstate *softs)
3652 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3653 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3654 uint32_t comm_space_phyaddr;
3655 struct aac_adapter_init *initp;
3656 int qoffset;
3658 comm_space_phyaddr = softs->comm_space_phyaddr;
3660 /* Setup adapter init struct */
3661 initp = &softs->comm_space->init_data;
3662 bzero(initp, sizeof (struct aac_adapter_init));
3664 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3665 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3667 /* Setup new/old comm. specific data */
3668 if (softs->flags & AAC_FLAGS_RAW_IO) {
3669 uint32_t init_flags = 0;
3671 if (softs->flags & AAC_FLAGS_NEW_COMM)
3672 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED;
3673 /* AAC_SUPPORTED_POWER_MANAGEMENT */
3674 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM;
3675 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME;
3677 ddi_put32(acc, &initp->InitStructRevision,
3678 AAC_INIT_STRUCT_REVISION_4);
3679 ddi_put32(acc, &initp->InitFlags, init_flags);
3680 /* Setup the preferred settings */
3681 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3682 ddi_put32(acc, &initp->MaxIoSize,
3683 (softs->aac_max_sectors << 9));
3684 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3685 } else {
3687 * Tells the adapter about the physical location of various
3688 * important shared data structures
3690 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3691 comm_space_phyaddr + \
3692 offsetof(struct aac_comm_space, adapter_fibs));
3693 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3694 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3695 ddi_put32(acc, &initp->AdapterFibsSize,
3696 AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3697 ddi_put32(acc, &initp->PrintfBufferAddress,
3698 comm_space_phyaddr + \
3699 offsetof(struct aac_comm_space, adapter_print_buf));
3700 ddi_put32(acc, &initp->PrintfBufferSize,
3701 AAC_ADAPTER_PRINT_BUFSIZE);
3702 ddi_put32(acc, &initp->MiniPortRevision,
3703 AAC_INIT_STRUCT_MINIPORT_REVISION);
3704 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3706 qoffset = (comm_space_phyaddr + \
3707 offsetof(struct aac_comm_space, qtable)) % \
3708 AAC_QUEUE_ALIGN;
3709 if (qoffset)
3710 qoffset = AAC_QUEUE_ALIGN - qoffset;
3711 softs->qtablep = (struct aac_queue_table *) \
3712 ((char *)&softs->comm_space->qtable + qoffset);
3713 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3714 offsetof(struct aac_comm_space, qtable) + qoffset);
3716 /* Init queue table */
3717 ddi_put32(acc, &softs->qtablep-> \
3718 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3719 AAC_HOST_NORM_CMD_ENTRIES);
3720 ddi_put32(acc, &softs->qtablep-> \
3721 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3722 AAC_HOST_NORM_CMD_ENTRIES);
3723 ddi_put32(acc, &softs->qtablep-> \
3724 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3725 AAC_HOST_HIGH_CMD_ENTRIES);
3726 ddi_put32(acc, &softs->qtablep-> \
3727 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3728 AAC_HOST_HIGH_CMD_ENTRIES);
3729 ddi_put32(acc, &softs->qtablep-> \
3730 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3731 AAC_ADAP_NORM_CMD_ENTRIES);
3732 ddi_put32(acc, &softs->qtablep-> \
3733 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3734 AAC_ADAP_NORM_CMD_ENTRIES);
3735 ddi_put32(acc, &softs->qtablep-> \
3736 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3737 AAC_ADAP_HIGH_CMD_ENTRIES);
3738 ddi_put32(acc, &softs->qtablep-> \
3739 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3740 AAC_ADAP_HIGH_CMD_ENTRIES);
3741 ddi_put32(acc, &softs->qtablep-> \
3742 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3743 AAC_HOST_NORM_RESP_ENTRIES);
3744 ddi_put32(acc, &softs->qtablep-> \
3745 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3746 AAC_HOST_NORM_RESP_ENTRIES);
3747 ddi_put32(acc, &softs->qtablep-> \
3748 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3749 AAC_HOST_HIGH_RESP_ENTRIES);
3750 ddi_put32(acc, &softs->qtablep-> \
3751 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3752 AAC_HOST_HIGH_RESP_ENTRIES);
3753 ddi_put32(acc, &softs->qtablep-> \
3754 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3755 AAC_ADAP_NORM_RESP_ENTRIES);
3756 ddi_put32(acc, &softs->qtablep-> \
3757 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3758 AAC_ADAP_NORM_RESP_ENTRIES);
3759 ddi_put32(acc, &softs->qtablep-> \
3760 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3761 AAC_ADAP_HIGH_RESP_ENTRIES);
3762 ddi_put32(acc, &softs->qtablep-> \
3763 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3764 AAC_ADAP_HIGH_RESP_ENTRIES);
3766 /* Init queue entries */
3767 softs->qentries[AAC_HOST_NORM_CMD_Q] =
3768 &softs->qtablep->qt_HostNormCmdQueue[0];
3769 softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3770 &softs->qtablep->qt_HostHighCmdQueue[0];
3771 softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3772 &softs->qtablep->qt_AdapNormCmdQueue[0];
3773 softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3774 &softs->qtablep->qt_AdapHighCmdQueue[0];
3775 softs->qentries[AAC_HOST_NORM_RESP_Q] =
3776 &softs->qtablep->qt_HostNormRespQueue[0];
3777 softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3778 &softs->qtablep->qt_HostHighRespQueue[0];
3779 softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3780 &softs->qtablep->qt_AdapNormRespQueue[0];
3781 softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3782 &softs->qtablep->qt_AdapHighRespQueue[0];
3784 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3786 /* Send init structure to the card */
3787 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3788 comm_space_phyaddr + \
3789 offsetof(struct aac_comm_space, init_data),
3790 0, 0, 0, NULL) == AACERR) {
3791 AACDB_PRINT(softs, CE_WARN,
3792 "Cannot send init structure to adapter");
3793 return (AACERR);
3796 return (AACOK);
3799 static uchar_t *
3800 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3802 (void) memset(buf, ' ', AAC_VENDOR_LEN);
3803 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3804 return (buf + AAC_VENDOR_LEN);
3807 static uchar_t *
3808 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3810 (void) memset(buf, ' ', AAC_PRODUCT_LEN);
3811 bcopy(softs->product_name, buf, strlen(softs->product_name));
3812 return (buf + AAC_PRODUCT_LEN);
3816 * Construct unit serial number from container uid
3818 static uchar_t *
3819 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3821 int i, d;
3822 uint32_t uid;
3824 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3826 uid = softs->containers[tgt].uid;
3827 for (i = 7; i >= 0; i--) {
3828 d = uid & 0xf;
3829 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3830 uid >>= 4;
3832 return (buf + 8);
3836 * SPC-3 7.5 INQUIRY command implementation
3838 static void
3839 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3840 union scsi_cdb *cdbp, struct buf *bp)
3842 int tgt = pkt->pkt_address.a_target;
3843 char *b_addr = NULL;
3844 uchar_t page = cdbp->cdb_opaque[2];
3846 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3847 /* Command Support Data is not supported */
3848 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3849 return;
3852 if (bp && bp->b_un.b_addr && bp->b_bcount) {
3853 if (bp->b_flags & (B_PHYS | B_PAGEIO))
3854 bp_mapin(bp);
3855 b_addr = bp->b_un.b_addr;
3858 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3859 uchar_t *vpdp = (uchar_t *)b_addr;
3860 uchar_t *idp, *sp;
3862 /* SPC-3 8.4 Vital product data parameters */
3863 switch (page) {
3864 case 0x00:
3865 /* Supported VPD pages */
3866 if (vpdp == NULL ||
3867 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3868 return;
3869 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3870 vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3871 vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3873 vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3874 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3875 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3877 pkt->pkt_state |= STATE_XFERRED_DATA;
3878 break;
3880 case 0x80:
3881 /* Unit serial number page */
3882 if (vpdp == NULL ||
3883 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3884 return;
3885 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3886 vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3887 vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3889 sp = &vpdp[AAC_VPD_PAGE_DATA];
3890 (void) aac_lun_serialno(softs, tgt, sp);
3892 pkt->pkt_state |= STATE_XFERRED_DATA;
3893 break;
3895 case 0x83:
3896 /* Device identification page */
3897 if (vpdp == NULL ||
3898 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3899 return;
3900 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3901 vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3903 idp = &vpdp[AAC_VPD_PAGE_DATA];
3904 bzero(idp, AAC_VPD_ID_LENGTH);
3905 idp[AAC_VPD_ID_CODESET] = 0x02;
3906 idp[AAC_VPD_ID_TYPE] = 0x01;
3909 * SPC-3 Table 111 - Identifier type
3910 * One recommanded method of constructing the remainder
3911 * of identifier field is to concatenate the product
3912 * identification field from the standard INQUIRY data
3913 * field and the product serial number field from the
3914 * unit serial number page.
3916 sp = &idp[AAC_VPD_ID_DATA];
3917 sp = aac_vendor_id(softs, sp);
3918 sp = aac_product_id(softs, sp);
3919 sp = aac_lun_serialno(softs, tgt, sp);
3920 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3921 (uintptr_t)&idp[AAC_VPD_ID_DATA];
3923 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3924 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3925 pkt->pkt_state |= STATE_XFERRED_DATA;
3926 break;
3928 default:
3929 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3930 0x24, 0x00, 0);
3931 break;
3933 } else {
3934 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3935 size_t len = sizeof (struct scsi_inquiry);
3937 if (page != 0) {
3938 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3939 0x24, 0x00, 0);
3940 return;
3942 if (inqp == NULL || bp->b_bcount < len)
3943 return;
3945 bzero(inqp, len);
3946 inqp->inq_len = AAC_ADDITIONAL_LEN;
3947 inqp->inq_ansi = AAC_ANSI_VER;
3948 inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3949 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3950 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3951 bcopy("V1.0", inqp->inq_revision, 4);
3952 inqp->inq_cmdque = 1; /* enable tagged-queuing */
3954 * For "sd-max-xfer-size" property which may impact performance
3955 * when IO threads increase.
3957 inqp->inq_wbus32 = 1;
3959 pkt->pkt_state |= STATE_XFERRED_DATA;
3964 * SPC-3 7.10 MODE SENSE command implementation
3966 static void
3967 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3968 union scsi_cdb *cdbp, struct buf *bp, int capacity)
3970 uchar_t pagecode;
3971 struct mode_header *headerp;
3972 struct mode_header_g1 *g1_headerp;
3973 unsigned int ncyl;
3974 caddr_t sense_data;
3975 caddr_t next_page;
3976 size_t sdata_size;
3977 size_t pages_size;
3978 int unsupport_page = 0;
3980 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
3981 cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
3983 if (!(bp && bp->b_un.b_addr && bp->b_bcount))
3984 return;
3986 if (bp->b_flags & (B_PHYS | B_PAGEIO))
3987 bp_mapin(bp);
3988 pkt->pkt_state |= STATE_XFERRED_DATA;
3989 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
3991 /* calculate the size of needed buffer */
3992 if (cdbp->scc_cmd == SCMD_MODE_SENSE)
3993 sdata_size = MODE_HEADER_LENGTH;
3994 else /* must be SCMD_MODE_SENSE_G1 */
3995 sdata_size = MODE_HEADER_LENGTH_G1;
3997 pages_size = 0;
3998 switch (pagecode) {
3999 case SD_MODE_SENSE_PAGE3_CODE:
4000 pages_size += sizeof (struct mode_format);
4001 break;
4003 case SD_MODE_SENSE_PAGE4_CODE:
4004 pages_size += sizeof (struct mode_geometry);
4005 break;
4007 case MODEPAGE_CTRL_MODE:
4008 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4009 pages_size += sizeof (struct mode_control_scsi3);
4010 } else {
4011 unsupport_page = 1;
4013 break;
4015 case MODEPAGE_ALLPAGES:
4016 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4017 pages_size += sizeof (struct mode_format) +
4018 sizeof (struct mode_geometry) +
4019 sizeof (struct mode_control_scsi3);
4020 } else {
4021 pages_size += sizeof (struct mode_format) +
4022 sizeof (struct mode_geometry);
4024 break;
4026 default:
4027 /* unsupported pages */
4028 unsupport_page = 1;
4031 /* allocate buffer to fill the send data */
4032 sdata_size += pages_size;
4033 sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
4035 if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
4036 headerp = (struct mode_header *)sense_data;
4037 headerp->length = MODE_HEADER_LENGTH + pages_size -
4038 sizeof (headerp->length);
4039 headerp->bdesc_length = 0;
4040 next_page = sense_data + sizeof (struct mode_header);
4041 } else {
4042 g1_headerp = (void *)sense_data;
4043 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
4044 sizeof (g1_headerp->length));
4045 g1_headerp->bdesc_length = 0;
4046 next_page = sense_data + sizeof (struct mode_header_g1);
4049 if (unsupport_page)
4050 goto finish;
4052 if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
4053 pagecode == MODEPAGE_ALLPAGES) {
4054 /* SBC-3 7.1.3.3 Format device page */
4055 struct mode_format *page3p;
4057 page3p = (void *)next_page;
4058 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
4059 page3p->mode_page.length = sizeof (struct mode_format);
4060 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
4061 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
4063 next_page += sizeof (struct mode_format);
4066 if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
4067 pagecode == MODEPAGE_ALLPAGES) {
4068 /* SBC-3 7.1.3.8 Rigid disk device geometry page */
4069 struct mode_geometry *page4p;
4071 page4p = (void *)next_page;
4072 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
4073 page4p->mode_page.length = sizeof (struct mode_geometry);
4074 page4p->heads = AAC_NUMBER_OF_HEADS;
4075 page4p->rpm = BE_16(AAC_ROTATION_SPEED);
4076 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
4077 page4p->cyl_lb = ncyl & 0xff;
4078 page4p->cyl_mb = (ncyl >> 8) & 0xff;
4079 page4p->cyl_ub = (ncyl >> 16) & 0xff;
4081 next_page += sizeof (struct mode_geometry);
4084 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
4085 softs->flags & AAC_FLAGS_LBA_64BIT) {
4086 /* 64-bit LBA need large sense data */
4087 struct mode_control_scsi3 *mctl;
4089 mctl = (void *)next_page;
4090 mctl->mode_page.code = MODEPAGE_CTRL_MODE;
4091 mctl->mode_page.length =
4092 sizeof (struct mode_control_scsi3) -
4093 sizeof (struct mode_page);
4094 mctl->d_sense = 1;
4097 finish:
4098 /* copyout the valid data. */
4099 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
4100 kmem_free(sense_data, sdata_size);
4103 static int
4104 aac_name_node(dev_info_t *dip, char *name, int len)
4106 int tgt, lun;
4108 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4109 DDI_PROP_DONTPASS, "target", -1);
4110 if (tgt == -1)
4111 return (DDI_FAILURE);
4112 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4113 DDI_PROP_DONTPASS, "lun", -1);
4114 if (lun == -1)
4115 return (DDI_FAILURE);
4117 (void) snprintf(name, len, "%x,%x", tgt, lun);
4118 return (DDI_SUCCESS);
4121 /*ARGSUSED*/
4122 static int
4123 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4124 scsi_hba_tran_t *tran, struct scsi_device *sd)
4126 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
4127 #if defined(DEBUG)
4128 int ctl = ddi_get_instance(softs->devinfo_p);
4129 #endif
4130 uint16_t tgt = sd->sd_address.a_target;
4131 uint8_t lun = sd->sd_address.a_lun;
4132 struct aac_device *dvp;
4134 DBCALLED(softs, 2);
4136 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
4138 * If no persistent node exist, we don't allow .conf node
4139 * to be created.
4141 if (aac_find_child(softs, tgt, lun) != NULL) {
4142 if (ndi_merge_node(tgt_dip, aac_name_node) !=
4143 DDI_SUCCESS)
4144 /* Create this .conf node */
4145 return (DDI_SUCCESS);
4147 return (DDI_FAILURE);
4151 * Only support container/phys. device that has been
4152 * detected and valid
4154 mutex_enter(&softs->io_lock);
4155 if (tgt >= AAC_MAX_DEV(softs)) {
4156 AACDB_PRINT_TRAN(softs,
4157 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
4158 mutex_exit(&softs->io_lock);
4159 return (DDI_FAILURE);
4162 if (tgt < AAC_MAX_LD) {
4163 dvp = (struct aac_device *)&softs->containers[tgt];
4164 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
4165 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
4166 ctl, tgt, lun);
4167 mutex_exit(&softs->io_lock);
4168 return (DDI_FAILURE);
4171 * Save the tgt_dip for the given target if one doesn't exist
4172 * already. Dip's for non-existance tgt's will be cleared in
4173 * tgt_free.
4175 if (softs->containers[tgt].dev.dip == NULL &&
4176 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4177 softs->containers[tgt].dev.dip = tgt_dip;
4178 } else {
4179 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
4181 * Save the tgt_dip for the given target if one doesn't exist
4182 * already. Dip's for non-existance tgt's will be cleared in
4183 * tgt_free.
4186 if (softs->nondasds[AAC_PD(tgt)].dev.dip == NULL &&
4187 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4188 softs->nondasds[AAC_PD(tgt)].dev.dip = tgt_dip;
4191 if (softs->flags & AAC_FLAGS_BRKUP) {
4192 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
4193 "buf_break", 1) != DDI_PROP_SUCCESS) {
4194 cmn_err(CE_CONT, "unable to create "
4195 "property for t%dL%d (buf_break)", tgt, lun);
4199 AACDB_PRINT(softs, CE_NOTE,
4200 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
4201 (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
4202 mutex_exit(&softs->io_lock);
4203 return (DDI_SUCCESS);
4206 static void
4207 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4208 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
4210 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
4212 struct aac_softstate *softs = SD2AAC(sd);
4213 int tgt = sd->sd_address.a_target;
4215 mutex_enter(&softs->io_lock);
4216 if (tgt < AAC_MAX_LD) {
4217 if (softs->containers[tgt].dev.dip == tgt_dip)
4218 softs->containers[tgt].dev.dip = NULL;
4219 } else {
4220 if (softs->nondasds[AAC_PD(tgt)].dev.dip == tgt_dip)
4221 softs->nondasds[AAC_PD(tgt)].dev.dip = NULL;
4222 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
4224 mutex_exit(&softs->io_lock);
4228 * Check if the firmware is Up And Running. If it is in the Kernel Panic
4229 * state, (BlinkLED code + 1) is returned.
4230 * 0 -- firmware up and running
4231 * -1 -- firmware dead
4232 * >0 -- firmware kernel panic
4234 static int
4235 aac_check_adapter_health(struct aac_softstate *softs)
4237 int rval;
4239 rval = PCI_MEM_GET32(softs, AAC_OMR0);
4241 if (rval & AAC_KERNEL_UP_AND_RUNNING) {
4242 rval = 0;
4243 } else if (rval & AAC_KERNEL_PANIC) {
4244 cmn_err(CE_WARN, "firmware panic");
4245 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
4246 } else {
4247 cmn_err(CE_WARN, "firmware dead");
4248 rval = -1;
4250 return (rval);
4253 static void
4254 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
4255 uchar_t reason)
4257 acp->flags |= AAC_CMD_ABORT;
4259 if (acp->pkt) {
4260 if (acp->slotp) { /* outstanding cmd */
4261 acp->pkt->pkt_state |= STATE_GOT_STATUS;
4264 switch (reason) {
4265 case CMD_TIMEOUT:
4266 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
4267 acp);
4268 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
4269 STAT_TIMEOUT | STAT_BUS_RESET);
4270 break;
4271 case CMD_RESET:
4272 /* aac support only RESET_ALL */
4273 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
4274 aac_set_pkt_reason(softs, acp, CMD_RESET,
4275 STAT_BUS_RESET);
4276 break;
4277 case CMD_ABORTED:
4278 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4279 acp);
4280 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4281 STAT_ABORTED);
4282 break;
4285 aac_end_io(softs, acp);
4289 * Abort all the pending commands of type iocmd or just the command pkt
4290 * corresponding to pkt
4292 static void
4293 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4294 int reason)
4296 struct aac_cmd *ac_arg, *acp;
4297 int i;
4299 if (pkt == NULL) {
4300 ac_arg = NULL;
4301 } else {
4302 ac_arg = PKT2AC(pkt);
4303 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4304 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4308 * a) outstanding commands on the controller
4309 * Note: should abort outstanding commands only after one
4310 * IOP reset has been done.
4312 if (iocmd & AAC_IOCMD_OUTSTANDING) {
4313 struct aac_cmd *acp;
4315 for (i = 0; i < AAC_MAX_LD; i++) {
4316 if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4317 softs->containers[i].reset = 1;
4319 while ((acp = softs->q_busy.q_head) != NULL)
4320 aac_abort_iocmd(softs, acp, reason);
4323 /* b) commands in the waiting queues */
4324 for (i = 0; i < AAC_CMDQ_NUM; i++) {
4325 if (iocmd & (1 << i)) {
4326 if (ac_arg) {
4327 aac_abort_iocmd(softs, ac_arg, reason);
4328 } else {
4329 while ((acp = softs->q_wait[i].q_head) != NULL)
4330 aac_abort_iocmd(softs, acp, reason);
4337 * The draining thread is shared among quiesce threads. It terminates
4338 * when the adapter is quiesced or stopped by aac_stop_drain().
4340 static void
4341 aac_check_drain(void *arg)
4343 struct aac_softstate *softs = arg;
4345 mutex_enter(&softs->io_lock);
4346 if (softs->ndrains) {
4347 softs->drain_timeid = 0;
4349 * If both ASYNC and SYNC bus throttle are held,
4350 * wake up threads only when both are drained out.
4352 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4353 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4354 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4355 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4356 cv_broadcast(&softs->drain_cv);
4357 else
4358 softs->drain_timeid = timeout(aac_check_drain, softs,
4359 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4361 mutex_exit(&softs->io_lock);
4365 * If not draining the outstanding cmds, drain them. Otherwise,
4366 * only update ndrains.
4368 static void
4369 aac_start_drain(struct aac_softstate *softs)
4371 if (softs->ndrains == 0) {
4372 ASSERT(softs->drain_timeid == 0);
4373 softs->drain_timeid = timeout(aac_check_drain, softs,
4374 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4376 softs->ndrains++;
4380 * Stop the draining thread when no other threads use it any longer.
4381 * Side effect: io_lock may be released in the middle.
4383 static void
4384 aac_stop_drain(struct aac_softstate *softs)
4386 softs->ndrains--;
4387 if (softs->ndrains == 0) {
4388 if (softs->drain_timeid != 0) {
4389 timeout_id_t tid = softs->drain_timeid;
4391 softs->drain_timeid = 0;
4392 mutex_exit(&softs->io_lock);
4393 (void) untimeout(tid);
4394 mutex_enter(&softs->io_lock);
4400 * The following function comes from Adaptec:
4402 * Once do an IOP reset, basically the driver have to re-initialize the card
4403 * as if up from a cold boot, and the driver is responsible for any IO that
4404 * is outstanding to the adapter at the time of the IOP RESET. And prepare
4405 * for IOP RESET by making the init code modular with the ability to call it
4406 * from multiple places.
4408 static int
4409 aac_reset_adapter(struct aac_softstate *softs)
4411 int health;
4412 uint32_t status;
4413 int rval = AAC_IOP_RESET_FAILED;
4415 DBCALLED(softs, 1);
4417 ASSERT(softs->state & AAC_STATE_RESET);
4419 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4420 /* Disable interrupt */
4421 AAC_DISABLE_INTR(softs);
4423 health = aac_check_adapter_health(softs);
4424 if (health == -1) {
4425 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4426 goto finish;
4428 if (health == 0) /* flush drives if possible */
4429 (void) aac_shutdown(softs);
4431 /* Execute IOP reset */
4432 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4433 &status)) != AACOK) {
4434 ddi_acc_handle_t acc;
4435 struct aac_fib *fibp;
4436 struct aac_pause_command *pc;
4438 if ((status & 0xf) == 0xf) {
4439 uint32_t wait_count;
4442 * Sunrise Lake has dual cores and we must drag the
4443 * other core with us to reset simultaneously. There
4444 * are 2 bits in the Inbound Reset Control and Status
4445 * Register (offset 0x38) of the Sunrise Lake to reset
4446 * the chip without clearing out the PCI configuration
4447 * info (COMMAND & BARS).
4449 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4452 * We need to wait for 5 seconds before accessing the MU
4453 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4455 wait_count = 5 * 10000;
4456 while (wait_count) {
4457 drv_usecwait(100); /* delay 100 microseconds */
4458 wait_count--;
4460 } else {
4461 if (status == SRB_STATUS_INVALID_REQUEST)
4462 cmn_err(CE_WARN, "!IOP_RESET not supported");
4463 else /* probably timeout */
4464 cmn_err(CE_WARN, "!IOP_RESET failed");
4466 /* Unwind aac_shutdown() */
4467 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
4468 acc = softs->sync_ac.slotp->fib_acc_handle;
4470 fibp = softs->sync_ac.slotp->fibp;
4471 pc = (struct aac_pause_command *)&fibp->data[0];
4473 bzero(pc, sizeof (*pc));
4474 ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4475 ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4476 ddi_put32(acc, &pc->Timeout, 1);
4477 ddi_put32(acc, &pc->Min, 1);
4478 ddi_put32(acc, &pc->NoRescan, 1);
4480 (void) aac_sync_fib(softs, ContainerCommand,
4481 AAC_FIB_SIZEOF(struct aac_pause_command));
4482 aac_sync_fib_slot_release(softs, &softs->sync_ac);
4484 if (aac_check_adapter_health(softs) != 0)
4485 ddi_fm_service_impact(softs->devinfo_p,
4486 DDI_SERVICE_LOST);
4487 else
4489 * IOP reset not supported or IOP not reseted
4491 rval = AAC_IOP_RESET_ABNORMAL;
4492 goto finish;
4497 * Re-read and renegotiate the FIB parameters, as one of the actions
4498 * that can result from an IOP reset is the running of a new firmware
4499 * image.
4501 if (aac_common_attach(softs) != AACOK)
4502 goto finish;
4504 rval = AAC_IOP_RESET_SUCCEED;
4506 finish:
4507 AAC_ENABLE_INTR(softs);
4508 return (rval);
4511 static void
4512 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4513 int throttle)
4516 * If the bus is draining/quiesced, no changes to the throttles
4517 * are allowed. All throttles should have been set to 0.
4519 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4520 return;
4521 dvp->throttle[q] = throttle;
4524 static void
4525 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4527 int i, q;
4529 /* Hold bus by holding every device on the bus */
4530 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4531 if (iocmds & (1 << q)) {
4532 softs->bus_throttle[q] = 0;
4533 for (i = 0; i < AAC_MAX_LD; i++)
4534 aac_set_throttle(softs,
4535 &softs->containers[i].dev, q, 0);
4536 for (i = 0; i < AAC_MAX_PD(softs); i++)
4537 aac_set_throttle(softs,
4538 &softs->nondasds[i].dev, q, 0);
4543 static void
4544 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4546 int i, q, max_throttle;
4548 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4549 if (iocmds & (1 << q)) {
4551 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4552 * quiesced or being drained by possibly some quiesce
4553 * threads.
4555 if (q == AAC_CMDQ_ASYNC && ((softs->state &
4556 AAC_STATE_QUIESCED) || softs->ndrains))
4557 continue;
4558 if (q == AAC_CMDQ_ASYNC)
4559 max_throttle = softs->total_slots -
4560 AAC_MGT_SLOT_NUM;
4561 else
4562 max_throttle = softs->total_slots - 1;
4563 softs->bus_throttle[q] = max_throttle;
4564 for (i = 0; i < AAC_MAX_LD; i++)
4565 aac_set_throttle(softs,
4566 &softs->containers[i].dev,
4567 q, max_throttle);
4568 for (i = 0; i < AAC_MAX_PD(softs); i++)
4569 aac_set_throttle(softs, &softs->nondasds[i].dev,
4570 q, max_throttle);
4575 static int
4576 aac_do_reset(struct aac_softstate *softs)
4578 int health;
4579 int rval;
4581 softs->state |= AAC_STATE_RESET;
4582 health = aac_check_adapter_health(softs);
4585 * Hold off new io commands and wait all outstanding io
4586 * commands to complete.
4588 if (health == 0) {
4589 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4590 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4592 if (sync_cmds == 0 && async_cmds == 0) {
4593 rval = AAC_IOP_RESET_SUCCEED;
4594 goto finish;
4597 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4598 * to complete the outstanding io commands
4600 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4601 int (*intr_handler)(struct aac_softstate *);
4603 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4605 * Poll the adapter by ourselves in case interrupt is disabled
4606 * and to avoid releasing the io_lock.
4608 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4609 aac_process_intr_new : aac_process_intr_old;
4610 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4611 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4612 drv_usecwait(100);
4613 (void) intr_handler(softs);
4614 timeout--;
4616 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4618 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4619 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4620 /* Cmds drained out */
4621 rval = AAC_IOP_RESET_SUCCEED;
4622 goto finish;
4623 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4624 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4625 /* Cmds not drained out, adapter overloaded */
4626 rval = AAC_IOP_RESET_ABNORMAL;
4627 goto finish;
4632 * If a longer waiting time still can't drain any outstanding io
4633 * commands, do IOP reset.
4635 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4636 softs->state |= AAC_STATE_DEAD;
4638 finish:
4639 softs->state &= ~AAC_STATE_RESET;
4640 return (rval);
4643 static int
4644 aac_tran_reset(struct scsi_address *ap, int level)
4646 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4647 int rval;
4649 DBCALLED(softs, 1);
4651 if (level != RESET_ALL) {
4652 cmn_err(CE_NOTE, "!reset target/lun not supported");
4653 return (0);
4656 mutex_enter(&softs->io_lock);
4657 switch (rval = aac_do_reset(softs)) {
4658 case AAC_IOP_RESET_SUCCEED:
4659 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4660 NULL, CMD_RESET);
4661 aac_start_waiting_io(softs);
4662 break;
4663 case AAC_IOP_RESET_FAILED:
4664 /* Abort IOCTL cmds when adapter is dead */
4665 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4666 break;
4667 case AAC_IOP_RESET_ABNORMAL:
4668 aac_start_waiting_io(softs);
4670 mutex_exit(&softs->io_lock);
4672 aac_drain_comp_q(softs);
4673 return (rval == 0);
4676 static int
4677 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4679 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4681 DBCALLED(softs, 1);
4683 mutex_enter(&softs->io_lock);
4684 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4685 mutex_exit(&softs->io_lock);
4687 aac_drain_comp_q(softs);
4688 return (1);
4691 void
4692 aac_free_dmamap(struct aac_cmd *acp)
4694 /* Free dma mapping */
4695 if (acp->flags & AAC_CMD_DMA_VALID) {
4696 ASSERT(acp->buf_dma_handle);
4697 (void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4698 acp->flags &= ~AAC_CMD_DMA_VALID;
4701 if (acp->abp != NULL) { /* free non-aligned buf DMA */
4702 ASSERT(acp->buf_dma_handle);
4703 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4704 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4705 (uint8_t *)acp->abp, acp->bp->b_bcount,
4706 DDI_DEV_AUTOINCR);
4707 ddi_dma_mem_free(&acp->abh);
4708 acp->abp = NULL;
4711 if (acp->buf_dma_handle) {
4712 ddi_dma_free_handle(&acp->buf_dma_handle);
4713 acp->buf_dma_handle = NULL;
4717 static void
4718 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4720 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4721 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4722 aac_free_dmamap(acp);
4723 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4724 aac_soft_callback(softs, acp);
4728 * Handle command to logical device
4730 static int
4731 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4733 struct aac_container *dvp;
4734 struct scsi_pkt *pkt;
4735 union scsi_cdb *cdbp;
4736 struct buf *bp;
4737 int rval;
4739 dvp = (struct aac_container *)acp->dvp;
4740 pkt = acp->pkt;
4741 cdbp = (void *)pkt->pkt_cdbp;
4742 bp = acp->bp;
4744 switch (cdbp->scc_cmd) {
4745 case SCMD_INQUIRY: /* inquiry */
4746 aac_free_dmamap(acp);
4747 aac_inquiry(softs, pkt, cdbp, bp);
4748 aac_soft_callback(softs, acp);
4749 rval = TRAN_ACCEPT;
4750 break;
4752 case SCMD_READ_CAPACITY: /* read capacity */
4753 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4754 struct scsi_capacity cap;
4755 uint64_t last_lba;
4757 /* check 64-bit LBA */
4758 last_lba = dvp->size - 1;
4759 if (last_lba > 0xffffffffull) {
4760 cap.capacity = 0xfffffffful;
4761 } else {
4762 cap.capacity = BE_32(last_lba);
4764 cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4766 aac_free_dmamap(acp);
4767 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4768 bp_mapin(bp);
4769 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4770 pkt->pkt_state |= STATE_XFERRED_DATA;
4772 aac_soft_callback(softs, acp);
4773 rval = TRAN_ACCEPT;
4774 break;
4776 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4777 /* Check if containers need 64-bit LBA support */
4778 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4779 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4780 struct scsi_capacity_16 cap16;
4781 int cap_len = sizeof (struct scsi_capacity_16);
4783 bzero(&cap16, cap_len);
4784 cap16.sc_capacity = BE_64(dvp->size - 1);
4785 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4787 aac_free_dmamap(acp);
4788 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4789 bp_mapin(bp);
4790 bcopy(&cap16, bp->b_un.b_addr,
4791 min(bp->b_bcount, cap_len));
4792 pkt->pkt_state |= STATE_XFERRED_DATA;
4794 aac_soft_callback(softs, acp);
4795 } else {
4796 aac_unknown_scmd(softs, acp);
4798 rval = TRAN_ACCEPT;
4799 break;
4801 case SCMD_READ_G4: /* read_16 */
4802 case SCMD_WRITE_G4: /* write_16 */
4803 if (softs->flags & AAC_FLAGS_RAW_IO) {
4804 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4805 acp->blkno = ((uint64_t) \
4806 GETG4ADDR(cdbp) << 32) | \
4807 (uint32_t)GETG4ADDRTL(cdbp);
4808 goto do_io;
4810 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4811 aac_unknown_scmd(softs, acp);
4812 rval = TRAN_ACCEPT;
4813 break;
4815 case SCMD_READ: /* read_6 */
4816 case SCMD_WRITE: /* write_6 */
4817 acp->blkno = GETG0ADDR(cdbp);
4818 goto do_io;
4820 case SCMD_READ_G5: /* read_12 */
4821 case SCMD_WRITE_G5: /* write_12 */
4822 acp->blkno = GETG5ADDR(cdbp);
4823 goto do_io;
4825 case SCMD_READ_G1: /* read_10 */
4826 case SCMD_WRITE_G1: /* write_10 */
4827 acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4828 do_io:
4829 if (acp->flags & AAC_CMD_DMA_VALID) {
4830 uint64_t cnt_size = dvp->size;
4833 * If LBA > array size AND rawio, the
4834 * adapter may hang. So check it before
4835 * sending.
4836 * NOTE: (blkno + blkcnt) may overflow
4838 if ((acp->blkno < cnt_size) &&
4839 ((acp->blkno + acp->bcount /
4840 AAC_BLK_SIZE) <= cnt_size)) {
4841 rval = aac_do_io(softs, acp);
4842 } else {
4844 * Request exceeds the capacity of disk,
4845 * set error block number to last LBA
4846 * + 1.
4848 aac_set_arq_data(pkt,
4849 KEY_ILLEGAL_REQUEST, 0x21,
4850 0x00, cnt_size);
4851 aac_soft_callback(softs, acp);
4852 rval = TRAN_ACCEPT;
4854 } else if (acp->bcount == 0) {
4855 /* For 0 length IO, just return ok */
4856 aac_soft_callback(softs, acp);
4857 rval = TRAN_ACCEPT;
4858 } else {
4859 rval = TRAN_BADPKT;
4861 break;
4863 case SCMD_MODE_SENSE: /* mode_sense_6 */
4864 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4865 int capacity;
4867 aac_free_dmamap(acp);
4868 if (dvp->size > 0xffffffffull)
4869 capacity = 0xfffffffful; /* 64-bit LBA */
4870 else
4871 capacity = dvp->size;
4872 aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4873 aac_soft_callback(softs, acp);
4874 rval = TRAN_ACCEPT;
4875 break;
4878 case SCMD_START_STOP:
4879 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
4880 acp->aac_cmd_fib = aac_cmd_fib_startstop;
4881 acp->ac_comp = aac_startstop_complete;
4882 rval = aac_do_io(softs, acp);
4883 break;
4885 /* FALLTHRU */
4886 case SCMD_TEST_UNIT_READY:
4887 case SCMD_REQUEST_SENSE:
4888 case SCMD_FORMAT:
4889 aac_free_dmamap(acp);
4890 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4891 if (acp->flags & AAC_CMD_BUF_READ) {
4892 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4893 bp_mapin(bp);
4894 bzero(bp->b_un.b_addr, bp->b_bcount);
4896 pkt->pkt_state |= STATE_XFERRED_DATA;
4898 aac_soft_callback(softs, acp);
4899 rval = TRAN_ACCEPT;
4900 break;
4902 case SCMD_SYNCHRONIZE_CACHE:
4903 acp->flags |= AAC_CMD_NTAG;
4904 acp->aac_cmd_fib = aac_cmd_fib_sync;
4905 acp->ac_comp = aac_synccache_complete;
4906 rval = aac_do_io(softs, acp);
4907 break;
4909 case SCMD_DOORLOCK:
4910 aac_free_dmamap(acp);
4911 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4912 aac_soft_callback(softs, acp);
4913 rval = TRAN_ACCEPT;
4914 break;
4916 default: /* unknown command */
4917 aac_unknown_scmd(softs, acp);
4918 rval = TRAN_ACCEPT;
4919 break;
4922 return (rval);
4925 static int
4926 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4928 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4929 struct aac_cmd *acp = PKT2AC(pkt);
4930 struct aac_device *dvp = acp->dvp;
4931 int rval;
4933 DBCALLED(softs, 2);
4936 * Reinitialize some fields of ac and pkt; the packet may
4937 * have been resubmitted
4939 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4940 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4941 acp->timeout = acp->pkt->pkt_time;
4942 if (pkt->pkt_flags & FLAG_NOINTR)
4943 acp->flags |= AAC_CMD_NO_INTR;
4944 #ifdef DEBUG
4945 acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4946 #endif
4947 pkt->pkt_reason = CMD_CMPLT;
4948 pkt->pkt_state = 0;
4949 pkt->pkt_statistics = 0;
4950 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4952 if (acp->flags & AAC_CMD_DMA_VALID) {
4953 pkt->pkt_resid = acp->bcount;
4954 /* Consistent packets need to be sync'ed first */
4955 if ((acp->flags & AAC_CMD_CONSISTENT) &&
4956 (acp->flags & AAC_CMD_BUF_WRITE))
4957 if (aac_dma_sync_ac(acp) != AACOK) {
4958 ddi_fm_service_impact(softs->devinfo_p,
4959 DDI_SERVICE_UNAFFECTED);
4960 return (TRAN_BADPKT);
4962 } else {
4963 pkt->pkt_resid = 0;
4966 mutex_enter(&softs->io_lock);
4967 AACDB_PRINT_SCMD(softs, acp);
4968 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4969 !(softs->state & AAC_STATE_DEAD)) {
4970 if (dvp->type == AAC_DEV_LD) {
4971 if (ap->a_lun == 0)
4972 rval = aac_tran_start_ld(softs, acp);
4973 else
4974 goto error;
4975 } else {
4976 rval = aac_do_io(softs, acp);
4978 } else {
4979 error:
4980 #ifdef DEBUG
4981 if (!(softs->state & AAC_STATE_DEAD)) {
4982 AACDB_PRINT_TRAN(softs,
4983 "Cannot send cmd to target t%dL%d: %s",
4984 ap->a_target, ap->a_lun,
4985 "target invalid");
4986 } else {
4987 AACDB_PRINT(softs, CE_WARN,
4988 "Cannot send cmd to target t%dL%d: %s",
4989 ap->a_target, ap->a_lun,
4990 "adapter dead");
4992 #endif
4993 rval = TRAN_FATAL_ERROR;
4995 mutex_exit(&softs->io_lock);
4996 return (rval);
4999 static int
5000 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
5002 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5003 struct aac_device *dvp;
5004 int rval;
5006 DBCALLED(softs, 2);
5008 /* We don't allow inquiring about capabilities for other targets */
5009 if (cap == NULL || whom == 0) {
5010 AACDB_PRINT(softs, CE_WARN,
5011 "GetCap> %s not supported: whom=%d", cap, whom);
5012 return (-1);
5015 mutex_enter(&softs->io_lock);
5016 dvp = AAC_DEV(softs, ap->a_target);
5017 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5018 mutex_exit(&softs->io_lock);
5019 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
5020 ap->a_target, ap->a_lun);
5021 return (-1);
5024 switch (scsi_hba_lookup_capstr(cap)) {
5025 case SCSI_CAP_ARQ: /* auto request sense */
5026 rval = 1;
5027 break;
5028 case SCSI_CAP_UNTAGGED_QING:
5029 case SCSI_CAP_TAGGED_QING:
5030 rval = 1;
5031 break;
5032 case SCSI_CAP_DMA_MAX:
5033 rval = softs->dma_max;
5034 break;
5035 default:
5036 rval = -1;
5037 break;
5039 mutex_exit(&softs->io_lock);
5041 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
5042 cap, ap->a_target, ap->a_lun, rval);
5043 return (rval);
5046 /*ARGSUSED*/
5047 static int
5048 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
5050 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5051 struct aac_device *dvp;
5052 int rval;
5054 DBCALLED(softs, 2);
5056 /* We don't allow inquiring about capabilities for other targets */
5057 if (cap == NULL || whom == 0) {
5058 AACDB_PRINT(softs, CE_WARN,
5059 "SetCap> %s not supported: whom=%d", cap, whom);
5060 return (-1);
5063 mutex_enter(&softs->io_lock);
5064 dvp = AAC_DEV(softs, ap->a_target);
5065 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5066 mutex_exit(&softs->io_lock);
5067 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
5068 ap->a_target, ap->a_lun);
5069 return (-1);
5072 switch (scsi_hba_lookup_capstr(cap)) {
5073 case SCSI_CAP_ARQ:
5074 /* Force auto request sense */
5075 rval = (value == 1) ? 1 : 0;
5076 break;
5077 case SCSI_CAP_UNTAGGED_QING:
5078 case SCSI_CAP_TAGGED_QING:
5079 rval = (value == 1) ? 1 : 0;
5080 break;
5081 default:
5082 rval = -1;
5083 break;
5085 mutex_exit(&softs->io_lock);
5087 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
5088 cap, ap->a_target, ap->a_lun, value, rval);
5089 return (rval);
5092 static void
5093 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5095 struct aac_cmd *acp = PKT2AC(pkt);
5097 DBCALLED(NULL, 2);
5099 if (acp->sgt) {
5100 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5101 acp->left_cookien);
5103 aac_free_dmamap(acp);
5104 ASSERT(acp->slotp == NULL);
5105 scsi_hba_pkt_free(ap, pkt);
5109 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
5110 struct buf *bp, int flags, int (*cb)(), caddr_t arg)
5112 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
5113 uint_t oldcookiec;
5114 int bioerr;
5115 int rval;
5117 oldcookiec = acp->left_cookien;
5119 /* Move window to build s/g map */
5120 if (acp->total_nwin > 0) {
5121 if (++acp->cur_win < acp->total_nwin) {
5122 off_t off;
5123 size_t len;
5125 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
5126 &off, &len, &acp->cookie, &acp->left_cookien);
5127 if (rval == DDI_SUCCESS)
5128 goto get_dma_cookies;
5129 AACDB_PRINT(softs, CE_WARN,
5130 "ddi_dma_getwin() fail %d", rval);
5131 return (AACERR);
5133 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
5134 return (AACERR);
5137 /* We need to transfer data, so we alloc DMA resources for this pkt */
5138 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
5139 uint_t dma_flags = 0;
5140 struct aac_sge *sge;
5143 * We will still use this point to fake some
5144 * infomation in tran_start
5146 acp->bp = bp;
5148 /* Set dma flags */
5149 if (BUF_IS_READ(bp)) {
5150 dma_flags |= DDI_DMA_READ;
5151 acp->flags |= AAC_CMD_BUF_READ;
5152 } else {
5153 dma_flags |= DDI_DMA_WRITE;
5154 acp->flags |= AAC_CMD_BUF_WRITE;
5156 if (flags & PKT_CONSISTENT)
5157 dma_flags |= DDI_DMA_CONSISTENT;
5158 if (flags & PKT_DMA_PARTIAL)
5159 dma_flags |= DDI_DMA_PARTIAL;
5161 /* Alloc buf dma handle */
5162 if (!acp->buf_dma_handle) {
5163 rval = ddi_dma_alloc_handle(softs->devinfo_p,
5164 &softs->buf_dma_attr, cb, arg,
5165 &acp->buf_dma_handle);
5166 if (rval != DDI_SUCCESS) {
5167 AACDB_PRINT(softs, CE_WARN,
5168 "Can't allocate DMA handle, errno=%d",
5169 rval);
5170 goto error_out;
5174 /* Bind buf */
5175 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
5176 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
5177 bp, dma_flags, cb, arg, &acp->cookie,
5178 &acp->left_cookien);
5179 } else {
5180 size_t bufsz;
5182 AACDB_PRINT_TRAN(softs,
5183 "non-aligned buffer: addr=0x%p, cnt=%lu",
5184 (void *)bp->b_un.b_addr, bp->b_bcount);
5185 if (bp->b_flags & (B_PAGEIO|B_PHYS))
5186 bp_mapin(bp);
5188 rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
5189 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
5190 &softs->acc_attr, DDI_DMA_STREAMING,
5191 cb, arg, &acp->abp, &bufsz, &acp->abh);
5193 if (rval != DDI_SUCCESS) {
5194 AACDB_PRINT(softs, CE_NOTE,
5195 "Cannot alloc DMA to non-aligned buf");
5196 bioerr = 0;
5197 goto error_out;
5200 if (acp->flags & AAC_CMD_BUF_WRITE)
5201 ddi_rep_put8(acp->abh,
5202 (uint8_t *)bp->b_un.b_addr,
5203 (uint8_t *)acp->abp, bp->b_bcount,
5204 DDI_DEV_AUTOINCR);
5206 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
5207 NULL, acp->abp, bufsz, dma_flags, cb, arg,
5208 &acp->cookie, &acp->left_cookien);
5211 switch (rval) {
5212 case DDI_DMA_PARTIAL_MAP:
5213 if (ddi_dma_numwin(acp->buf_dma_handle,
5214 &acp->total_nwin) == DDI_FAILURE) {
5215 AACDB_PRINT(softs, CE_WARN,
5216 "Cannot get number of DMA windows");
5217 bioerr = 0;
5218 goto error_out;
5220 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5221 acp->left_cookien);
5222 acp->cur_win = 0;
5223 break;
5225 case DDI_DMA_MAPPED:
5226 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5227 acp->left_cookien);
5228 acp->cur_win = 0;
5229 acp->total_nwin = 1;
5230 break;
5232 case DDI_DMA_NORESOURCES:
5233 bioerr = 0;
5234 AACDB_PRINT(softs, CE_WARN,
5235 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
5236 goto error_out;
5237 case DDI_DMA_BADATTR:
5238 case DDI_DMA_NOMAPPING:
5239 bioerr = EFAULT;
5240 AACDB_PRINT(softs, CE_WARN,
5241 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
5242 goto error_out;
5243 case DDI_DMA_TOOBIG:
5244 bioerr = EINVAL;
5245 AACDB_PRINT(softs, CE_WARN,
5246 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
5247 bp->b_bcount);
5248 goto error_out;
5249 default:
5250 bioerr = EINVAL;
5251 AACDB_PRINT(softs, CE_WARN,
5252 "Cannot bind buf for DMA: %d", rval);
5253 goto error_out;
5255 acp->flags |= AAC_CMD_DMA_VALID;
5257 get_dma_cookies:
5258 ASSERT(acp->left_cookien > 0);
5259 if (acp->left_cookien > softs->aac_sg_tablesize) {
5260 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
5261 acp->left_cookien);
5262 bioerr = EINVAL;
5263 goto error_out;
5265 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
5266 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5267 oldcookiec);
5268 acp->sgt = NULL;
5270 if (acp->sgt == NULL) {
5271 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
5272 acp->left_cookien, kf);
5273 if (acp->sgt == NULL) {
5274 AACDB_PRINT(softs, CE_WARN,
5275 "sgt kmem_alloc fail");
5276 bioerr = ENOMEM;
5277 goto error_out;
5281 sge = &acp->sgt[0];
5282 sge->bcount = acp->cookie.dmac_size;
5283 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5284 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5285 acp->bcount = acp->cookie.dmac_size;
5286 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
5287 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
5288 sge->bcount = acp->cookie.dmac_size;
5289 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5290 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5291 acp->bcount += acp->cookie.dmac_size;
5295 * Note: The old DMA engine do not correctly handle
5296 * dma_attr_maxxfer attribute. So we have to ensure
5297 * it by ourself.
5299 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5300 AACDB_PRINT(softs, CE_NOTE,
5301 "large xfer size received %d\n", acp->bcount);
5302 bioerr = EINVAL;
5303 goto error_out;
5306 acp->total_xfer += acp->bcount;
5308 if (acp->pkt) {
5309 /* Return remaining byte count */
5310 if (acp->total_xfer <= bp->b_bcount) {
5311 acp->pkt->pkt_resid = bp->b_bcount - \
5312 acp->total_xfer;
5313 } else {
5315 * Allocated DMA size is greater than the buf
5316 * size of bp. This is caused by devices like
5317 * tape. we have extra bytes allocated, but
5318 * the packet residual has to stay correct.
5320 acp->pkt->pkt_resid = 0;
5322 AACDB_PRINT_TRAN(softs,
5323 "bp=0x%p, xfered=%d/%d, resid=%d",
5324 (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5325 (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5328 return (AACOK);
5330 error_out:
5331 bioerror(bp, bioerr);
5332 return (AACERR);
5335 static struct scsi_pkt *
5336 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5337 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5338 int (*callback)(), caddr_t arg)
5340 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5341 struct aac_cmd *acp, *new_acp;
5343 DBCALLED(softs, 2);
5345 /* Allocate pkt */
5346 if (pkt == NULL) {
5347 int slen;
5349 /* Force auto request sense */
5350 slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5351 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5352 slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5353 if (pkt == NULL) {
5354 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5355 return (NULL);
5357 acp = new_acp = PKT2AC(pkt);
5358 acp->pkt = pkt;
5359 acp->cmdlen = cmdlen;
5361 if (ap->a_target < AAC_MAX_LD) {
5362 acp->dvp = &softs->containers[ap->a_target].dev;
5363 acp->aac_cmd_fib = softs->aac_cmd_fib;
5364 acp->ac_comp = aac_ld_complete;
5365 } else {
5366 _NOTE(ASSUMING_PROTECTED(softs->nondasds))
5368 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5369 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5370 acp->ac_comp = aac_pd_complete;
5372 } else {
5373 acp = PKT2AC(pkt);
5374 new_acp = NULL;
5377 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5378 return (pkt);
5380 if (new_acp)
5381 aac_tran_destroy_pkt(ap, pkt);
5382 return (NULL);
5386 * tran_sync_pkt(9E) - explicit DMA synchronization
5388 /*ARGSUSED*/
5389 static void
5390 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5392 struct aac_cmd *acp = PKT2AC(pkt);
5394 DBCALLED(NULL, 2);
5396 if (aac_dma_sync_ac(acp) != AACOK)
5397 ddi_fm_service_impact(
5398 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5399 DDI_SERVICE_UNAFFECTED);
5403 * tran_dmafree(9E) - deallocate DMA resources allocated for command
5405 /*ARGSUSED*/
5406 static void
5407 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5409 struct aac_cmd *acp = PKT2AC(pkt);
5411 DBCALLED(NULL, 2);
5413 aac_free_dmamap(acp);
5416 static int
5417 aac_do_quiesce(struct aac_softstate *softs)
5419 aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5420 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5421 aac_start_drain(softs);
5422 do {
5423 if (cv_wait_sig(&softs->drain_cv,
5424 &softs->io_lock) == 0) {
5425 /* Quiesce has been interrupted */
5426 aac_stop_drain(softs);
5427 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5428 aac_start_waiting_io(softs);
5429 return (AACERR);
5431 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5432 aac_stop_drain(softs);
5435 softs->state |= AAC_STATE_QUIESCED;
5436 return (AACOK);
5439 static int
5440 aac_tran_quiesce(dev_info_t *dip)
5442 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5443 int rval;
5445 DBCALLED(softs, 1);
5447 mutex_enter(&softs->io_lock);
5448 if (aac_do_quiesce(softs) == AACOK)
5449 rval = 0;
5450 else
5451 rval = 1;
5452 mutex_exit(&softs->io_lock);
5453 return (rval);
5456 static int
5457 aac_do_unquiesce(struct aac_softstate *softs)
5459 softs->state &= ~AAC_STATE_QUIESCED;
5460 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5462 aac_start_waiting_io(softs);
5463 return (AACOK);
5466 static int
5467 aac_tran_unquiesce(dev_info_t *dip)
5469 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5470 int rval;
5472 DBCALLED(softs, 1);
5474 mutex_enter(&softs->io_lock);
5475 if (aac_do_unquiesce(softs) == AACOK)
5476 rval = 0;
5477 else
5478 rval = 1;
5479 mutex_exit(&softs->io_lock);
5480 return (rval);
5483 static int
5484 aac_hba_setup(struct aac_softstate *softs)
5486 scsi_hba_tran_t *hba_tran;
5487 int rval;
5489 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5490 if (hba_tran == NULL)
5491 return (AACERR);
5492 hba_tran->tran_hba_private = softs;
5493 hba_tran->tran_tgt_init = aac_tran_tgt_init;
5494 hba_tran->tran_tgt_free = aac_tran_tgt_free;
5495 hba_tran->tran_tgt_probe = scsi_hba_probe;
5496 hba_tran->tran_start = aac_tran_start;
5497 hba_tran->tran_getcap = aac_tran_getcap;
5498 hba_tran->tran_setcap = aac_tran_setcap;
5499 hba_tran->tran_init_pkt = aac_tran_init_pkt;
5500 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5501 hba_tran->tran_reset = aac_tran_reset;
5502 hba_tran->tran_abort = aac_tran_abort;
5503 hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5504 hba_tran->tran_dmafree = aac_tran_dmafree;
5505 hba_tran->tran_quiesce = aac_tran_quiesce;
5506 hba_tran->tran_unquiesce = aac_tran_unquiesce;
5507 hba_tran->tran_bus_config = aac_tran_bus_config;
5508 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5509 hba_tran, 0);
5510 if (rval != DDI_SUCCESS) {
5511 scsi_hba_tran_free(hba_tran);
5512 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5513 return (AACERR);
5516 softs->hba_tran = hba_tran;
5517 return (AACOK);
5521 * FIB setup operations
5525 * Init FIB header
5527 static void
5528 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp,
5529 uint16_t cmd)
5531 struct aac_slot *slotp = acp->slotp;
5532 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5533 struct aac_fib *fibp = slotp->fibp;
5534 uint32_t xfer_state;
5536 xfer_state =
5537 AAC_FIBSTATE_HOSTOWNED |
5538 AAC_FIBSTATE_INITIALISED |
5539 AAC_FIBSTATE_EMPTY |
5540 AAC_FIBSTATE_FAST_RESPONSE | /* enable fast io */
5541 AAC_FIBSTATE_FROMHOST |
5542 AAC_FIBSTATE_REXPECTED |
5543 AAC_FIBSTATE_NORM;
5545 if (!(acp->flags & AAC_CMD_SYNC))
5546 xfer_state |= AAC_FIBSTATE_ASYNC;
5548 ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5549 ddi_put16(acc, &fibp->Header.Command, cmd);
5550 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5551 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5552 ddi_put16(acc, &fibp->Header.Size, acp->fib_size);
5553 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size);
5554 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5555 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5556 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5560 * Init FIB for raw IO command
5562 static void
5563 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5565 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5566 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5567 struct aac_sg_entryraw *sgp;
5568 struct aac_sge *sge;
5570 /* Calculate FIB size */
5571 acp->fib_size = sizeof (struct aac_fib_header) + \
5572 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5573 sizeof (struct aac_sg_entryraw);
5575 aac_cmd_fib_header(softs, acp, RawIo);
5577 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5578 ddi_put16(acc, &io->BpTotal, 0);
5579 ddi_put16(acc, &io->BpComplete, 0);
5581 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5582 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5583 ddi_put16(acc, &io->ContainerId,
5584 ((struct aac_container *)acp->dvp)->cid);
5586 /* Fill SG table */
5587 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5588 ddi_put32(acc, &io->ByteCount, acp->bcount);
5590 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5591 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5592 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5593 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5594 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5595 sgp->Next = 0;
5596 sgp->Prev = 0;
5597 sgp->Flags = 0;
5601 /* Init FIB for 64-bit block IO command */
5602 static void
5603 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5605 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5606 struct aac_blockread64 *br = (struct aac_blockread64 *) \
5607 &acp->slotp->fibp->data[0];
5608 struct aac_sg_entry64 *sgp;
5609 struct aac_sge *sge;
5611 acp->fib_size = sizeof (struct aac_fib_header) + \
5612 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5613 sizeof (struct aac_sg_entry64);
5615 aac_cmd_fib_header(softs, acp, ContainerCommand64);
5618 * The definitions for aac_blockread64 and aac_blockwrite64
5619 * are the same.
5621 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5622 ddi_put16(acc, &br->ContainerId,
5623 ((struct aac_container *)acp->dvp)->cid);
5624 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5625 VM_CtHostRead64 : VM_CtHostWrite64);
5626 ddi_put16(acc, &br->Pad, 0);
5627 ddi_put16(acc, &br->Flags, 0);
5629 /* Fill SG table */
5630 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5631 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5633 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5634 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5635 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5636 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5637 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5641 /* Init FIB for block IO command */
5642 static void
5643 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5645 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5646 struct aac_blockread *br = (struct aac_blockread *) \
5647 &acp->slotp->fibp->data[0];
5648 struct aac_sg_entry *sgp;
5649 struct aac_sge *sge = &acp->sgt[0];
5651 if (acp->flags & AAC_CMD_BUF_READ) {
5652 acp->fib_size = sizeof (struct aac_fib_header) + \
5653 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5654 sizeof (struct aac_sg_entry);
5656 ddi_put32(acc, &br->Command, VM_CtBlockRead);
5657 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5658 sgp = &br->SgMap.SgEntry[0];
5659 } else {
5660 struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5662 acp->fib_size = sizeof (struct aac_fib_header) + \
5663 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5664 sizeof (struct aac_sg_entry);
5666 ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5667 ddi_put32(acc, &bw->Stable, CUNSTABLE);
5668 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5669 sgp = &bw->SgMap.SgEntry[0];
5671 aac_cmd_fib_header(softs, acp, ContainerCommand);
5674 * aac_blockread and aac_blockwrite have the similar
5675 * structure head, so use br for bw here
5677 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5678 ddi_put32(acc, &br->ContainerId,
5679 ((struct aac_container *)acp->dvp)->cid);
5680 ddi_put32(acc, &br->ByteCount, acp->bcount);
5682 /* Fill SG table */
5683 for (sge = &acp->sgt[0];
5684 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5685 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5686 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5690 /*ARGSUSED*/
5691 void
5692 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5694 struct aac_slot *slotp = acp->slotp;
5695 struct aac_fib *fibp = slotp->fibp;
5696 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5698 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5699 acp->fib_size, /* only copy data of needed length */
5700 DDI_DEV_AUTOINCR);
5701 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5702 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5705 static void
5706 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5708 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5709 struct aac_synchronize_command *sync =
5710 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0];
5712 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command);
5714 aac_cmd_fib_header(softs, acp, ContainerCommand);
5715 ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5716 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5717 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5718 ddi_put32(acc, &sync->Count,
5719 sizeof (((struct aac_synchronize_reply *)0)->Data));
5723 * Start/Stop unit (Power Management)
5725 static void
5726 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp)
5728 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5729 struct aac_Container *cmd =
5730 (struct aac_Container *)&acp->slotp->fibp->data[0];
5731 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp;
5733 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container);
5735 aac_cmd_fib_header(softs, acp, ContainerCommand);
5736 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
5737 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
5738 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT);
5739 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \
5740 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT);
5741 ddi_put32(acc, &cmd->CTCommand.param[1],
5742 ((struct aac_container *)acp->dvp)->cid);
5743 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1);
5747 * Init FIB for pass-through SCMD
5749 static void
5750 aac_cmd_fib_srb(struct aac_cmd *acp)
5752 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5753 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5754 uint8_t *cdb;
5756 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5757 ddi_put32(acc, &srb->retry_limit, 0);
5758 ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5759 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5760 if (acp->fibp == NULL) {
5761 if (acp->flags & AAC_CMD_BUF_READ)
5762 ddi_put32(acc, &srb->flags, SRB_DataIn);
5763 else if (acp->flags & AAC_CMD_BUF_WRITE)
5764 ddi_put32(acc, &srb->flags, SRB_DataOut);
5765 ddi_put32(acc, &srb->channel,
5766 ((struct aac_nondasd *)acp->dvp)->bus);
5767 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5768 ddi_put32(acc, &srb->lun, 0);
5769 cdb = acp->pkt->pkt_cdbp;
5770 } else {
5771 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5773 ddi_put32(acc, &srb->flags, srb0->flags);
5774 ddi_put32(acc, &srb->channel, srb0->channel);
5775 ddi_put32(acc, &srb->id, srb0->id);
5776 ddi_put32(acc, &srb->lun, srb0->lun);
5777 cdb = srb0->cdb;
5779 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5782 static void
5783 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5785 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5786 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5787 struct aac_sg_entry *sgp;
5788 struct aac_sge *sge;
5790 acp->fib_size = sizeof (struct aac_fib_header) + \
5791 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5792 acp->left_cookien * sizeof (struct aac_sg_entry);
5794 /* Fill FIB and SRB headers, and copy cdb */
5795 aac_cmd_fib_header(softs, acp, ScsiPortCommand);
5796 aac_cmd_fib_srb(acp);
5798 /* Fill SG table */
5799 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5800 ddi_put32(acc, &srb->count, acp->bcount);
5802 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5803 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5804 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5805 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5809 static void
5810 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5812 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5813 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5814 struct aac_sg_entry64 *sgp;
5815 struct aac_sge *sge;
5817 acp->fib_size = sizeof (struct aac_fib_header) + \
5818 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5819 acp->left_cookien * sizeof (struct aac_sg_entry64);
5821 /* Fill FIB and SRB headers, and copy cdb */
5822 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64);
5823 aac_cmd_fib_srb(acp);
5825 /* Fill SG table */
5826 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5827 ddi_put32(acc, &srb->count, acp->bcount);
5829 for (sge = &acp->sgt[0],
5830 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5831 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5832 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5833 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5834 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5838 static int
5839 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5841 struct aac_slot *slotp;
5843 if (slotp = aac_get_slot(softs)) {
5844 acp->slotp = slotp;
5845 slotp->acp = acp;
5846 acp->aac_cmd_fib(softs, acp);
5847 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5848 DDI_DMA_SYNC_FORDEV);
5849 return (AACOK);
5851 return (AACERR);
5854 static int
5855 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5857 struct aac_device *dvp = acp->dvp;
5858 int q = AAC_CMDQ(acp);
5860 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) {
5861 if (dvp) {
5862 if (dvp->ncmds[q] < dvp->throttle[q]) {
5863 if (!(acp->flags & AAC_CMD_NTAG) ||
5864 dvp->ncmds[q] == 0) {
5865 return (aac_cmd_slot_bind(softs, acp));
5867 ASSERT(q == AAC_CMDQ_ASYNC);
5868 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5869 AAC_THROTTLE_DRAIN);
5871 } else {
5872 return (aac_cmd_slot_bind(softs, acp));
5875 return (AACERR);
5878 static int
5879 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5881 struct aac_slot *slotp;
5883 while (softs->sync_ac.slotp)
5884 cv_wait(&softs->sync_fib_cv, &softs->io_lock);
5886 if (slotp = aac_get_slot(softs)) {
5887 ASSERT(acp->slotp == NULL);
5889 acp->slotp = slotp;
5890 slotp->acp = acp;
5891 return (AACOK);
5893 return (AACERR);
5896 static void
5897 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp)
5899 ASSERT(acp->slotp);
5901 aac_release_slot(softs, acp->slotp);
5902 acp->slotp->acp = NULL;
5903 acp->slotp = NULL;
5905 cv_signal(&softs->sync_fib_cv);
5908 static void
5909 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5911 struct aac_slot *slotp = acp->slotp;
5912 int q = AAC_CMDQ(acp);
5913 int rval;
5915 /* Set ac and pkt */
5916 if (acp->pkt) { /* ac from ioctl has no pkt */
5917 acp->pkt->pkt_state |=
5918 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5920 if (acp->timeout) /* 0 indicates no timeout */
5921 acp->timeout += aac_timebase + aac_tick;
5923 if (acp->dvp)
5924 acp->dvp->ncmds[q]++;
5925 softs->bus_ncmds[q]++;
5926 aac_cmd_enqueue(&softs->q_busy, acp);
5928 AACDB_PRINT_FIB(softs, slotp);
5930 if (softs->flags & AAC_FLAGS_NEW_COMM) {
5931 rval = aac_send_command(softs, slotp);
5932 } else {
5934 * If fib can not be enqueued, the adapter is in an abnormal
5935 * state, there will be no interrupt to us.
5937 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5938 slotp->fib_phyaddr, acp->fib_size);
5941 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5942 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5945 * NOTE: We send command only when slots availabe, so should never
5946 * reach here.
5948 if (rval != AACOK) {
5949 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5950 if (acp->pkt) {
5951 acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5952 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5954 aac_end_io(softs, acp);
5955 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5956 ddi_trigger_softintr(softs->softint_id);
5960 static void
5961 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5963 struct aac_cmd *acp, *next_acp;
5965 /* Serve as many waiting io's as possible */
5966 for (acp = q->q_head; acp; acp = next_acp) {
5967 next_acp = acp->next;
5968 if (aac_bind_io(softs, acp) == AACOK) {
5969 aac_cmd_delete(q, acp);
5970 aac_start_io(softs, acp);
5972 if (softs->free_io_slot_head == NULL)
5973 break;
5977 static void
5978 aac_start_waiting_io(struct aac_softstate *softs)
5981 * Sync FIB io is served before async FIB io so that io requests
5982 * sent by interactive userland commands get responded asap.
5984 if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
5985 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
5986 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
5987 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
5990 static void
5991 aac_drain_comp_q(struct aac_softstate *softs)
5993 struct aac_cmd *acp;
5994 struct scsi_pkt *pkt;
5996 /*CONSTCOND*/
5997 while (1) {
5998 mutex_enter(&softs->q_comp_mutex);
5999 acp = aac_cmd_dequeue(&softs->q_comp);
6000 mutex_exit(&softs->q_comp_mutex);
6001 if (acp != NULL) {
6002 ASSERT(acp->pkt != NULL);
6003 pkt = acp->pkt;
6005 if (pkt->pkt_reason == CMD_CMPLT) {
6007 * Consistent packets need to be sync'ed first
6009 if ((acp->flags & AAC_CMD_CONSISTENT) &&
6010 (acp->flags & AAC_CMD_BUF_READ)) {
6011 if (aac_dma_sync_ac(acp) != AACOK) {
6012 ddi_fm_service_impact(
6013 softs->devinfo_p,
6014 DDI_SERVICE_UNAFFECTED);
6015 pkt->pkt_reason = CMD_TRAN_ERR;
6016 pkt->pkt_statistics = 0;
6019 if ((aac_check_acc_handle(softs-> \
6020 comm_space_acc_handle) != DDI_SUCCESS) ||
6021 (aac_check_acc_handle(softs-> \
6022 pci_mem_handle) != DDI_SUCCESS)) {
6023 ddi_fm_service_impact(softs->devinfo_p,
6024 DDI_SERVICE_UNAFFECTED);
6025 ddi_fm_acc_err_clear(softs-> \
6026 pci_mem_handle, DDI_FME_VER0);
6027 pkt->pkt_reason = CMD_TRAN_ERR;
6028 pkt->pkt_statistics = 0;
6030 if (aac_check_dma_handle(softs-> \
6031 comm_space_dma_handle) != DDI_SUCCESS) {
6032 ddi_fm_service_impact(softs->devinfo_p,
6033 DDI_SERVICE_UNAFFECTED);
6034 pkt->pkt_reason = CMD_TRAN_ERR;
6035 pkt->pkt_statistics = 0;
6038 scsi_hba_pkt_comp(pkt);
6039 } else {
6040 break;
6045 static int
6046 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
6048 size_t rlen;
6049 ddi_dma_cookie_t cookie;
6050 uint_t cookien;
6052 /* Allocate FIB dma resource */
6053 if (ddi_dma_alloc_handle(
6054 softs->devinfo_p,
6055 &softs->addr_dma_attr,
6056 DDI_DMA_SLEEP,
6057 NULL,
6058 &slotp->fib_dma_handle) != DDI_SUCCESS) {
6059 AACDB_PRINT(softs, CE_WARN,
6060 "Cannot alloc dma handle for slot fib area");
6061 goto error;
6063 if (ddi_dma_mem_alloc(
6064 slotp->fib_dma_handle,
6065 softs->aac_max_fib_size,
6066 &softs->acc_attr,
6067 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6068 DDI_DMA_SLEEP,
6069 NULL,
6070 (caddr_t *)&slotp->fibp,
6071 &rlen,
6072 &slotp->fib_acc_handle) != DDI_SUCCESS) {
6073 AACDB_PRINT(softs, CE_WARN,
6074 "Cannot alloc mem for slot fib area");
6075 goto error;
6077 if (ddi_dma_addr_bind_handle(
6078 slotp->fib_dma_handle,
6079 NULL,
6080 (caddr_t)slotp->fibp,
6081 softs->aac_max_fib_size,
6082 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6083 DDI_DMA_SLEEP,
6084 NULL,
6085 &cookie,
6086 &cookien) != DDI_DMA_MAPPED) {
6087 AACDB_PRINT(softs, CE_WARN,
6088 "dma bind failed for slot fib area");
6089 goto error;
6092 /* Check dma handles allocated in fib attach */
6093 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
6094 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6095 goto error;
6098 /* Check acc handles allocated in fib attach */
6099 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
6100 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6101 goto error;
6104 slotp->fib_phyaddr = cookie.dmac_laddress;
6105 return (AACOK);
6107 error:
6108 if (slotp->fib_acc_handle) {
6109 ddi_dma_mem_free(&slotp->fib_acc_handle);
6110 slotp->fib_acc_handle = NULL;
6112 if (slotp->fib_dma_handle) {
6113 ddi_dma_free_handle(&slotp->fib_dma_handle);
6114 slotp->fib_dma_handle = NULL;
6116 return (AACERR);
6119 static void
6120 aac_free_fib(struct aac_slot *slotp)
6122 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
6123 ddi_dma_mem_free(&slotp->fib_acc_handle);
6124 slotp->fib_acc_handle = NULL;
6125 ddi_dma_free_handle(&slotp->fib_dma_handle);
6126 slotp->fib_dma_handle = NULL;
6127 slotp->fib_phyaddr = 0;
6130 static void
6131 aac_alloc_fibs(struct aac_softstate *softs)
6133 int i;
6134 struct aac_slot *slotp;
6136 for (i = 0; i < softs->total_slots &&
6137 softs->total_fibs < softs->total_slots; i++) {
6138 slotp = &(softs->io_slot[i]);
6139 if (slotp->fib_phyaddr)
6140 continue;
6141 if (aac_alloc_fib(softs, slotp) != AACOK)
6142 break;
6144 /* Insert the slot to the free slot list */
6145 aac_release_slot(softs, slotp);
6146 softs->total_fibs++;
6150 static void
6151 aac_destroy_fibs(struct aac_softstate *softs)
6153 struct aac_slot *slotp;
6155 while ((slotp = softs->free_io_slot_head) != NULL) {
6156 ASSERT(slotp->fib_phyaddr);
6157 softs->free_io_slot_head = slotp->next;
6158 aac_free_fib(slotp);
6159 ASSERT(slotp->index == (slotp - softs->io_slot));
6160 softs->total_fibs--;
6162 ASSERT(softs->total_fibs == 0);
6165 static int
6166 aac_create_slots(struct aac_softstate *softs)
6168 int i;
6170 softs->total_slots = softs->aac_max_fibs;
6171 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
6172 softs->total_slots, KM_SLEEP);
6173 if (softs->io_slot == NULL) {
6174 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
6175 return (AACERR);
6177 for (i = 0; i < softs->total_slots; i++)
6178 softs->io_slot[i].index = i;
6179 softs->free_io_slot_head = NULL;
6180 softs->total_fibs = 0;
6181 return (AACOK);
6184 static void
6185 aac_destroy_slots(struct aac_softstate *softs)
6187 ASSERT(softs->free_io_slot_head == NULL);
6189 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
6190 softs->total_slots);
6191 softs->io_slot = NULL;
6192 softs->total_slots = 0;
6195 struct aac_slot *
6196 aac_get_slot(struct aac_softstate *softs)
6198 struct aac_slot *slotp;
6200 if ((slotp = softs->free_io_slot_head) != NULL) {
6201 softs->free_io_slot_head = slotp->next;
6202 slotp->next = NULL;
6204 return (slotp);
6207 static void
6208 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
6210 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
6211 ASSERT(slotp == &softs->io_slot[slotp->index]);
6213 slotp->acp = NULL;
6214 slotp->next = softs->free_io_slot_head;
6215 softs->free_io_slot_head = slotp;
6219 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
6221 if (aac_bind_io(softs, acp) == AACOK)
6222 aac_start_io(softs, acp);
6223 else
6224 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
6226 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
6227 return (TRAN_ACCEPT);
6229 * Because sync FIB is always 512 bytes and used for critical
6230 * functions, async FIB is used for poll IO.
6232 if (acp->flags & AAC_CMD_NO_INTR) {
6233 if (aac_do_poll_io(softs, acp) == AACOK)
6234 return (TRAN_ACCEPT);
6235 } else {
6236 if (aac_do_sync_io(softs, acp) == AACOK)
6237 return (TRAN_ACCEPT);
6239 return (TRAN_BADPKT);
6242 static int
6243 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
6245 int (*intr_handler)(struct aac_softstate *);
6248 * Interrupt is disabled, we have to poll the adapter by ourselves.
6250 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
6251 aac_process_intr_new : aac_process_intr_old;
6252 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
6253 int i = AAC_POLL_TIME * 1000;
6255 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
6256 if (i == 0)
6257 aac_cmd_timeout(softs, acp);
6260 ddi_trigger_softintr(softs->softint_id);
6262 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
6263 return (AACOK);
6264 return (AACERR);
6267 static int
6268 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
6270 ASSERT(softs && acp);
6272 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
6273 cv_wait(&softs->event, &softs->io_lock);
6275 if (acp->flags & AAC_CMD_CMPLT)
6276 return (AACOK);
6277 return (AACERR);
6280 static int
6281 aac_dma_sync_ac(struct aac_cmd *acp)
6283 if (acp->buf_dma_handle) {
6284 if (acp->flags & AAC_CMD_BUF_WRITE) {
6285 if (acp->abp != NULL)
6286 ddi_rep_put8(acp->abh,
6287 (uint8_t *)acp->bp->b_un.b_addr,
6288 (uint8_t *)acp->abp, acp->bp->b_bcount,
6289 DDI_DEV_AUTOINCR);
6290 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6291 DDI_DMA_SYNC_FORDEV);
6292 } else {
6293 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6294 DDI_DMA_SYNC_FORCPU);
6295 if (aac_check_dma_handle(acp->buf_dma_handle) !=
6296 DDI_SUCCESS)
6297 return (AACERR);
6298 if (acp->abp != NULL)
6299 ddi_rep_get8(acp->abh,
6300 (uint8_t *)acp->bp->b_un.b_addr,
6301 (uint8_t *)acp->abp, acp->bp->b_bcount,
6302 DDI_DEV_AUTOINCR);
6305 return (AACOK);
6309 * Copy AIF from adapter to the empty AIF slot and inform AIF threads
6311 static void
6312 aac_save_aif(struct aac_softstate *softs, ddi_acc_handle_t acc,
6313 struct aac_fib *fibp0, int fib_size0)
6315 struct aac_fib *fibp; /* FIB in AIF queue */
6316 int fib_size;
6317 uint16_t fib_command;
6318 int current, next;
6320 /* Ignore non AIF messages */
6321 fib_command = ddi_get16(acc, &fibp0->Header.Command);
6322 if (fib_command != AifRequest) {
6323 cmn_err(CE_WARN, "!Unknown command from controller");
6324 return;
6327 mutex_enter(&softs->aifq_mutex);
6329 /* Save AIF */
6330 fibp = &softs->aifq[softs->aifq_idx].d;
6331 fib_size = (fib_size0 > AAC_FIB_SIZE) ? AAC_FIB_SIZE : fib_size0;
6332 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, fib_size,
6333 DDI_DEV_AUTOINCR);
6335 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
6336 ddi_fm_service_impact(softs->devinfo_p,
6337 DDI_SERVICE_UNAFFECTED);
6338 mutex_exit(&softs->aifq_mutex);
6339 return;
6342 AACDB_PRINT_AIF(softs, (struct aac_aif_command *)&fibp->data[0]);
6344 /* Modify AIF contexts */
6345 current = softs->aifq_idx;
6346 next = (current + 1) % AAC_AIFQ_LENGTH;
6347 if (next == 0) {
6348 struct aac_fib_context *ctx_p;
6350 softs->aifq_wrap = 1;
6351 for (ctx_p = softs->fibctx_p; ctx_p; ctx_p = ctx_p->next) {
6352 if (next == ctx_p->ctx_idx) {
6353 ctx_p->ctx_flags |= AAC_CTXFLAG_FILLED;
6354 } else if (current == ctx_p->ctx_idx &&
6355 (ctx_p->ctx_flags & AAC_CTXFLAG_FILLED)) {
6356 ctx_p->ctx_idx = next;
6357 ctx_p->ctx_overrun++;
6361 softs->aifq_idx = next;
6363 /* Wakeup AIF threads */
6364 cv_broadcast(&softs->aifq_cv);
6365 mutex_exit(&softs->aifq_mutex);
6367 /* Wakeup event thread to handle aif */
6368 aac_event_disp(softs, AAC_EVENT_AIF);
6371 static int
6372 aac_return_aif_common(struct aac_softstate *softs, struct aac_fib_context *ctx,
6373 struct aac_fib **fibpp)
6375 int current;
6377 current = ctx->ctx_idx;
6378 if (current == softs->aifq_idx &&
6379 !(ctx->ctx_flags & AAC_CTXFLAG_FILLED))
6380 return (EAGAIN); /* Empty */
6382 *fibpp = &softs->aifq[current].d;
6384 ctx->ctx_flags &= ~AAC_CTXFLAG_FILLED;
6385 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
6386 return (0);
6390 aac_return_aif(struct aac_softstate *softs, struct aac_fib_context *ctx,
6391 struct aac_fib **fibpp)
6393 int rval;
6395 mutex_enter(&softs->aifq_mutex);
6396 rval = aac_return_aif_common(softs, ctx, fibpp);
6397 mutex_exit(&softs->aifq_mutex);
6398 return (rval);
6402 aac_return_aif_wait(struct aac_softstate *softs, struct aac_fib_context *ctx,
6403 struct aac_fib **fibpp)
6405 int rval;
6407 mutex_enter(&softs->aifq_mutex);
6408 rval = aac_return_aif_common(softs, ctx, fibpp);
6409 if (rval == EAGAIN) {
6410 AACDB_PRINT(softs, CE_NOTE, "Waiting for AIF");
6411 rval = cv_wait_sig(&softs->aifq_cv, &softs->aifq_mutex);
6413 mutex_exit(&softs->aifq_mutex);
6414 return ((rval > 0) ? 0 : EINTR);
6418 * The following function comes from Adaptec:
6420 * When driver sees a particular event that means containers are changed, it
6421 * will rescan containers. However a change may not be complete until some
6422 * other event is received. For example, creating or deleting an array will
6423 * incur as many as six AifEnConfigChange events which would generate six
6424 * container rescans. To diminish rescans, driver set a flag to wait for
6425 * another particular event. When sees that events come in, it will do rescan.
6427 static int
6428 aac_handle_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
6430 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
6431 int en_type;
6432 int devcfg_needed;
6433 int cid;
6434 uint32_t bus_id, tgt_id;
6435 enum aac_cfg_event event = AAC_CFG_NULL_EXIST;
6437 devcfg_needed = 0;
6438 en_type = LE_32((uint32_t)aif->data.EN.type);
6440 switch (LE_32((uint32_t)aif->command)) {
6441 case AifCmdDriverNotify: {
6442 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6444 switch (en_type) {
6445 case AifDenMorphComplete:
6446 case AifDenVolumeExtendComplete:
6447 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6448 softs->devcfg_wait_on = AifEnConfigChange;
6449 break;
6451 if (softs->devcfg_wait_on == en_type)
6452 devcfg_needed = 1;
6453 break;
6456 case AifCmdEventNotify:
6457 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6458 switch (en_type) {
6459 case AifEnAddContainer:
6460 case AifEnDeleteContainer:
6461 softs->devcfg_wait_on = AifEnConfigChange;
6462 break;
6463 case AifEnContainerChange:
6464 if (!softs->devcfg_wait_on)
6465 softs->devcfg_wait_on = AifEnConfigChange;
6466 break;
6467 case AifEnContainerEvent:
6468 if (ddi_get32(acc, &aif-> \
6469 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6470 devcfg_needed = 1;
6471 break;
6472 case AifEnAddJBOD:
6473 if (!(softs->flags & AAC_FLAGS_JBOD))
6474 return (AACERR);
6475 event = AAC_CFG_ADD;
6476 bus_id = (cid >> 24) & 0xf;
6477 tgt_id = cid & 0xffff;
6478 break;
6479 case AifEnDeleteJBOD:
6480 if (!(softs->flags & AAC_FLAGS_JBOD))
6481 return (AACERR);
6482 event = AAC_CFG_DELETE;
6483 bus_id = (cid >> 24) & 0xf;
6484 tgt_id = cid & 0xffff;
6485 break;
6487 if (softs->devcfg_wait_on == en_type)
6488 devcfg_needed = 1;
6489 break;
6491 case AifCmdJobProgress:
6492 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6493 int pr_status;
6494 uint32_t pr_ftick, pr_ctick;
6496 pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6497 pr_ctick = LE_32(aif->data.PR[0].currentTick);
6498 pr_ftick = LE_32(aif->data.PR[0].finalTick);
6500 if ((pr_ctick == pr_ftick) ||
6501 (pr_status == AifJobStsSuccess))
6502 softs->devcfg_wait_on = AifEnContainerChange;
6503 else if ((pr_ctick == 0) &&
6504 (pr_status == AifJobStsRunning))
6505 softs->devcfg_wait_on = AifEnContainerChange;
6507 break;
6510 if (devcfg_needed) {
6511 softs->devcfg_wait_on = 0;
6512 (void) aac_probe_containers(softs);
6515 if (event != AAC_CFG_NULL_EXIST) {
6516 ASSERT(en_type == AifEnAddJBOD || en_type == AifEnDeleteJBOD);
6517 (void) aac_probe_jbod(softs,
6518 AAC_P2VTGT(softs, bus_id, tgt_id), event);
6520 return (AACOK);
6525 * Check and handle AIF events
6527 static void
6528 aac_aif_event(struct aac_softstate *softs)
6530 struct aac_fib *fibp;
6532 /*CONSTCOND*/
6533 while (1) {
6534 if (aac_return_aif(softs, &softs->aifctx, &fibp) != 0)
6535 break; /* No more AIFs to handle, end loop */
6537 /* AIF overrun, array create/delete may missed. */
6538 if (softs->aifctx.ctx_overrun) {
6539 softs->aifctx.ctx_overrun = 0;
6542 /* AIF received, handle it */
6543 struct aac_aif_command *aifp =
6544 (struct aac_aif_command *)&fibp->data[0];
6545 uint32_t aif_command = LE_32((uint32_t)aifp->command);
6547 if (aif_command == AifCmdDriverNotify ||
6548 aif_command == AifCmdEventNotify ||
6549 aif_command == AifCmdJobProgress)
6550 (void) aac_handle_aif(softs, aifp);
6555 * Timeout recovery
6557 /*ARGSUSED*/
6558 static void
6559 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6561 #ifdef DEBUG
6562 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6563 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6564 AACDB_PRINT_FIB(softs, acp->slotp);
6565 #endif
6568 * Besides the firmware in unhealthy state, an overloaded
6569 * adapter may also incur pkt timeout.
6570 * There is a chance for an adapter with a slower IOP to take
6571 * longer than 60 seconds to process the commands, such as when
6572 * to perform IOs. So the adapter is doing a build on a RAID-5
6573 * while being required longer completion times should be
6574 * tolerated.
6576 switch (aac_do_reset(softs)) {
6577 case AAC_IOP_RESET_SUCCEED:
6578 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6579 aac_start_waiting_io(softs);
6580 break;
6581 case AAC_IOP_RESET_FAILED:
6582 /* Abort all waiting cmds when adapter is dead */
6583 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6584 break;
6585 case AAC_IOP_RESET_ABNORMAL:
6586 aac_start_waiting_io(softs);
6591 * The following function comes from Adaptec:
6593 * Time sync. command added to synchronize time with firmware every 30
6594 * minutes (required for correct AIF timestamps etc.)
6596 static void
6597 aac_sync_tick(struct aac_softstate *softs)
6599 ddi_acc_handle_t acc;
6600 int rval;
6602 mutex_enter(&softs->time_mutex);
6603 ASSERT(softs->time_sync <= softs->timebase);
6604 softs->time_sync = 0;
6605 mutex_exit(&softs->time_mutex);
6607 /* Time sync. with firmware every AAC_SYNC_TICK */
6608 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
6609 acc = softs->sync_ac.slotp->fib_acc_handle;
6611 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0],
6612 ddi_get_time());
6613 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t));
6614 aac_sync_fib_slot_release(softs, &softs->sync_ac);
6616 mutex_enter(&softs->time_mutex);
6617 softs->time_sync = softs->timebase;
6618 if (rval != AACOK)
6619 /* retry shortly */
6620 softs->time_sync += aac_tick << 1;
6621 else
6622 softs->time_sync += AAC_SYNC_TICK;
6623 mutex_exit(&softs->time_mutex);
6627 * Timeout checking and handling
6629 static void
6630 aac_daemon(struct aac_softstate *softs)
6632 int time_out; /* set if timeout happened */
6633 int time_adjust;
6634 uint32_t softs_timebase;
6636 mutex_enter(&softs->time_mutex);
6637 ASSERT(softs->time_out <= softs->timebase);
6638 softs->time_out = 0;
6639 softs_timebase = softs->timebase;
6640 mutex_exit(&softs->time_mutex);
6642 /* Check slots for timeout pkts */
6643 time_adjust = 0;
6644 do {
6645 struct aac_cmd *acp;
6647 time_out = 0;
6648 for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6649 if (acp->timeout == 0)
6650 continue;
6653 * If timeout happened, update outstanding cmds
6654 * to be checked later again.
6656 if (time_adjust) {
6657 acp->timeout += time_adjust;
6658 continue;
6661 if (acp->timeout <= softs_timebase) {
6662 aac_cmd_timeout(softs, acp);
6663 time_out = 1;
6664 time_adjust = aac_tick * drv_usectohz(1000000);
6665 break; /* timeout happened */
6666 } else {
6667 break; /* no timeout */
6670 } while (time_out);
6672 mutex_enter(&softs->time_mutex);
6673 softs->time_out = softs->timebase + aac_tick;
6674 mutex_exit(&softs->time_mutex);
6678 * The event thread handles various tasks serially for the other parts of
6679 * the driver, so that they can run fast.
6681 static void
6682 aac_event_thread(struct aac_softstate *softs)
6684 int run = 1;
6686 DBCALLED(softs, 1);
6688 mutex_enter(&softs->ev_lock);
6689 while (run) {
6690 int events;
6692 if ((events = softs->events) == 0) {
6693 cv_wait(&softs->event_disp_cv, &softs->ev_lock);
6694 events = softs->events;
6696 softs->events = 0;
6697 mutex_exit(&softs->ev_lock);
6699 mutex_enter(&softs->io_lock);
6700 if ((softs->state & AAC_STATE_RUN) &&
6701 (softs->state & AAC_STATE_DEAD) == 0) {
6702 if (events & AAC_EVENT_TIMEOUT)
6703 aac_daemon(softs);
6704 if (events & AAC_EVENT_SYNCTICK)
6705 aac_sync_tick(softs);
6706 if (events & AAC_EVENT_AIF)
6707 aac_aif_event(softs);
6708 } else {
6709 run = 0;
6711 mutex_exit(&softs->io_lock);
6713 mutex_enter(&softs->ev_lock);
6716 cv_signal(&softs->event_wait_cv);
6717 mutex_exit(&softs->ev_lock);
6721 * Internal timer. It is only responsbile for time counting and report time
6722 * related events. Events handling is done by aac_event_thread(), so that
6723 * the timer itself could be as precise as possible.
6725 static void
6726 aac_timer(void *arg)
6728 struct aac_softstate *softs = arg;
6729 int events = 0;
6731 mutex_enter(&softs->time_mutex);
6733 /* If timer is being stopped, exit */
6734 if (softs->timeout_id) {
6735 softs->timeout_id = timeout(aac_timer, (void *)softs,
6736 (aac_tick * drv_usectohz(1000000)));
6737 } else {
6738 mutex_exit(&softs->time_mutex);
6739 return;
6742 /* Time counting */
6743 softs->timebase += aac_tick;
6745 /* Check time related events */
6746 if (softs->time_out && softs->time_out <= softs->timebase)
6747 events |= AAC_EVENT_TIMEOUT;
6748 if (softs->time_sync && softs->time_sync <= softs->timebase)
6749 events |= AAC_EVENT_SYNCTICK;
6751 mutex_exit(&softs->time_mutex);
6753 if (events)
6754 aac_event_disp(softs, events);
6758 * Dispatch events to daemon thread for handling
6760 static void
6761 aac_event_disp(struct aac_softstate *softs, int events)
6763 mutex_enter(&softs->ev_lock);
6764 softs->events |= events;
6765 cv_broadcast(&softs->event_disp_cv);
6766 mutex_exit(&softs->ev_lock);
6770 * Architecture dependent functions
6772 static int
6773 aac_rx_get_fwstatus(struct aac_softstate *softs)
6775 return (PCI_MEM_GET32(softs, AAC_OMR0));
6778 static int
6779 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6781 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6784 static void
6785 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6786 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6788 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6789 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6790 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6791 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6792 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6795 static int
6796 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6798 return (PCI_MEM_GET32(softs, AAC_OMR0));
6801 static int
6802 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6804 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6807 static void
6808 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6809 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6811 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6812 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6813 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6814 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6815 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6819 * cb_ops functions
6821 static int
6822 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6824 struct aac_softstate *softs;
6825 int minor0, minor;
6826 int instance;
6828 DBCALLED(NULL, 2);
6830 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6831 return (EINVAL);
6833 minor0 = getminor(*devp);
6834 minor = AAC_SCSA_MINOR(minor0);
6836 if (AAC_IS_SCSA_NODE(minor))
6837 return (scsi_hba_open(devp, flag, otyp, cred));
6839 instance = MINOR2INST(minor0);
6840 if (instance >= AAC_MAX_ADAPTERS)
6841 return (ENXIO);
6843 softs = ddi_get_soft_state(aac_softstatep, instance);
6844 if (softs == NULL)
6845 return (ENXIO);
6847 return (0);
6850 /*ARGSUSED*/
6851 static int
6852 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6854 int minor0, minor;
6855 int instance;
6857 DBCALLED(NULL, 2);
6859 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6860 return (EINVAL);
6862 minor0 = getminor(dev);
6863 minor = AAC_SCSA_MINOR(minor0);
6865 if (AAC_IS_SCSA_NODE(minor))
6866 return (scsi_hba_close(dev, flag, otyp, cred));
6868 instance = MINOR2INST(minor0);
6869 if (instance >= AAC_MAX_ADAPTERS)
6870 return (ENXIO);
6872 return (0);
6875 static int
6876 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6877 int *rval_p)
6879 struct aac_softstate *softs;
6880 int minor0, minor;
6881 int instance;
6883 DBCALLED(NULL, 2);
6885 if (drv_priv(cred_p) != 0)
6886 return (EPERM);
6888 minor0 = getminor(dev);
6889 minor = AAC_SCSA_MINOR(minor0);
6891 if (AAC_IS_SCSA_NODE(minor))
6892 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6894 instance = MINOR2INST(minor0);
6895 if (instance < AAC_MAX_ADAPTERS) {
6896 softs = ddi_get_soft_state(aac_softstatep, instance);
6897 return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6899 return (ENXIO);
6903 * The IO fault service error handling callback function
6905 /*ARGSUSED*/
6906 static int
6907 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6910 * as the driver can always deal with an error in any dma or
6911 * access handle, we can just return the fme_status value.
6913 pci_ereport_post(dip, err, NULL);
6914 return (err->fme_status);
6918 * aac_fm_init - initialize fma capabilities and register with IO
6919 * fault services.
6921 static void
6922 aac_fm_init(struct aac_softstate *softs)
6925 * Need to change iblock to priority for new MSI intr
6927 ddi_iblock_cookie_t fm_ibc;
6929 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6930 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6931 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6932 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6934 /* Only register with IO Fault Services if we have some capability */
6935 if (softs->fm_capabilities) {
6936 /* Adjust access and dma attributes for FMA */
6937 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6938 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6939 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6942 * Register capabilities with IO Fault Services.
6943 * fm_capabilities will be updated to indicate
6944 * capabilities actually supported (not requested.)
6946 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6949 * Initialize pci ereport capabilities if ereport
6950 * capable (should always be.)
6952 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6953 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6954 pci_ereport_setup(softs->devinfo_p);
6958 * Register error callback if error callback capable.
6960 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6961 ddi_fm_handler_register(softs->devinfo_p,
6962 aac_fm_error_cb, (void *) softs);
6968 * aac_fm_fini - Releases fma capabilities and un-registers with IO
6969 * fault services.
6971 static void
6972 aac_fm_fini(struct aac_softstate *softs)
6974 /* Only unregister FMA capabilities if registered */
6975 if (softs->fm_capabilities) {
6977 * Un-register error callback if error callback capable.
6979 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6980 ddi_fm_handler_unregister(softs->devinfo_p);
6984 * Release any resources allocated by pci_ereport_setup()
6986 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6987 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6988 pci_ereport_teardown(softs->devinfo_p);
6991 /* Unregister from IO Fault Services */
6992 ddi_fm_fini(softs->devinfo_p);
6994 /* Adjust access and dma attributes for FMA */
6995 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6996 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6997 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
7002 aac_check_acc_handle(ddi_acc_handle_t handle)
7004 ddi_fm_error_t de;
7006 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7007 return (de.fme_status);
7011 aac_check_dma_handle(ddi_dma_handle_t handle)
7013 ddi_fm_error_t de;
7015 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7016 return (de.fme_status);
7019 void
7020 aac_fm_ereport(struct aac_softstate *softs, char *detail)
7022 uint64_t ena;
7023 char buf[FM_MAX_CLASS];
7025 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7026 ena = fm_ena_generate(0, FM_ENA_FMT1);
7027 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
7028 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
7029 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7034 * Autoconfiguration support
7036 static int
7037 aac_parse_devname(char *devnm, int *tgt, int *lun)
7039 char devbuf[SCSI_MAXNAMELEN];
7040 char *addr;
7041 char *p, *tp, *lp;
7042 long num;
7044 /* Parse dev name and address */
7045 (void) strcpy(devbuf, devnm);
7046 addr = "";
7047 for (p = devbuf; *p != '\0'; p++) {
7048 if (*p == '@') {
7049 addr = p + 1;
7050 *p = '\0';
7051 } else if (*p == ':') {
7052 *p = '\0';
7053 break;
7057 /* Parse taget and lun */
7058 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7059 if (*p == ',') {
7060 lp = p + 1;
7061 *p = '\0';
7062 break;
7065 if (tgt && tp) {
7066 if (ddi_strtol(tp, NULL, 0x10, &num))
7067 return (AACERR);
7068 *tgt = (int)num;
7070 if (lun && lp) {
7071 if (ddi_strtol(lp, NULL, 0x10, &num))
7072 return (AACERR);
7073 *lun = (int)num;
7075 return (AACOK);
7078 static dev_info_t *
7079 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
7081 dev_info_t *child = NULL;
7082 char addr[SCSI_MAXNAMELEN];
7083 char tmp[MAXNAMELEN];
7085 if (tgt < AAC_MAX_LD) {
7086 if (lun == 0) {
7087 struct aac_device *dvp = &softs->containers[tgt].dev;
7089 child = dvp->dip;
7091 } else {
7092 (void) sprintf(addr, "%x,%x", tgt, lun);
7093 for (child = ddi_get_child(softs->devinfo_p);
7094 child; child = ddi_get_next_sibling(child)) {
7095 /* We don't care about non-persistent node */
7096 if (ndi_dev_is_persistent_node(child) == 0)
7097 continue;
7099 if (aac_name_node(child, tmp, MAXNAMELEN) !=
7100 DDI_SUCCESS)
7101 continue;
7102 if (strcmp(addr, tmp) == 0)
7103 break;
7106 return (child);
7109 static int
7110 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
7111 dev_info_t **dipp)
7113 char *nodename = NULL;
7114 char **compatible = NULL;
7115 int ncompatible = 0;
7116 char *childname;
7117 dev_info_t *ldip = NULL;
7118 int tgt = sd->sd_address.a_target;
7119 int lun = sd->sd_address.a_lun;
7120 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7121 int rval;
7123 DBCALLED(softs, 2);
7125 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7126 NULL, &nodename, &compatible, &ncompatible);
7127 if (nodename == NULL) {
7128 AACDB_PRINT(softs, CE_WARN,
7129 "found no comptible driver for t%dL%d", tgt, lun);
7130 rval = NDI_FAILURE;
7131 goto finish;
7133 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
7135 /* Create dev node */
7136 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
7137 &ldip);
7138 if (rval == NDI_SUCCESS) {
7139 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
7140 != DDI_PROP_SUCCESS) {
7141 AACDB_PRINT(softs, CE_WARN, "unable to create "
7142 "property for t%dL%d (target)", tgt, lun);
7143 rval = NDI_FAILURE;
7144 goto finish;
7146 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
7147 != DDI_PROP_SUCCESS) {
7148 AACDB_PRINT(softs, CE_WARN, "unable to create "
7149 "property for t%dL%d (lun)", tgt, lun);
7150 rval = NDI_FAILURE;
7151 goto finish;
7153 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7154 "compatible", compatible, ncompatible)
7155 != DDI_PROP_SUCCESS) {
7156 AACDB_PRINT(softs, CE_WARN, "unable to create "
7157 "property for t%dL%d (compatible)", tgt, lun);
7158 rval = NDI_FAILURE;
7159 goto finish;
7162 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7163 if (rval != NDI_SUCCESS) {
7164 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
7165 tgt, lun);
7166 ndi_prop_remove_all(ldip);
7167 (void) ndi_devi_free(ldip);
7170 finish:
7171 if (dipp)
7172 *dipp = ldip;
7174 scsi_hba_nodename_compatible_free(nodename, compatible);
7175 return (rval);
7178 /*ARGSUSED*/
7179 static int
7180 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
7182 int tgt = sd->sd_address.a_target;
7183 int lun = sd->sd_address.a_lun;
7185 DBCALLED(softs, 2);
7187 if (tgt < AAC_MAX_LD) {
7188 enum aac_cfg_event event;
7190 if (lun == 0) {
7191 mutex_enter(&softs->io_lock);
7192 event = aac_probe_container(softs, tgt);
7193 mutex_exit(&softs->io_lock);
7194 if ((event != AAC_CFG_NULL_NOEXIST) &&
7195 (event != AAC_CFG_DELETE)) {
7196 if (scsi_hba_probe(sd, NULL) ==
7197 SCSIPROBE_EXISTS)
7198 return (NDI_SUCCESS);
7201 return (NDI_FAILURE);
7202 } else {
7203 int dtype;
7204 int qual; /* device qualifier */
7206 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
7207 return (NDI_FAILURE);
7209 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7210 qual = dtype >> 5;
7212 AACDB_PRINT(softs, CE_NOTE,
7213 "Phys. device found: tgt %d dtype %d: %s",
7214 tgt, dtype, sd->sd_inq->inq_vid);
7216 /* Only non-DASD and JBOD mode DASD are allowed exposed */
7217 if (dtype == DTYPE_RODIRECT /* CDROM */ ||
7218 dtype == DTYPE_SEQUENTIAL /* TAPE */ ||
7219 dtype == DTYPE_ESI /* SES */) {
7220 if (!(softs->flags & AAC_FLAGS_NONDASD))
7221 return (NDI_FAILURE);
7222 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
7224 } else if (dtype == DTYPE_DIRECT) {
7225 if (!(softs->flags & AAC_FLAGS_JBOD) || qual != 0)
7226 return (NDI_FAILURE);
7227 AACDB_PRINT(softs, CE_NOTE, "JBOD DASD %d found", tgt);
7230 mutex_enter(&softs->io_lock);
7231 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
7232 mutex_exit(&softs->io_lock);
7233 return (NDI_SUCCESS);
7237 static int
7238 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
7239 dev_info_t **ldip)
7241 struct scsi_device sd;
7242 dev_info_t *child;
7243 int rval;
7245 DBCALLED(softs, 2);
7247 if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
7248 if (ldip)
7249 *ldip = child;
7250 return (NDI_SUCCESS);
7253 bzero(&sd, sizeof (struct scsi_device));
7254 sd.sd_address.a_hba_tran = softs->hba_tran;
7255 sd.sd_address.a_target = (uint16_t)tgt;
7256 sd.sd_address.a_lun = (uint8_t)lun;
7257 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
7258 rval = aac_config_child(softs, &sd, ldip);
7259 /* scsi_unprobe is blank now. Free buffer manually */
7260 if (sd.sd_inq) {
7261 kmem_free(sd.sd_inq, SUN_INQSIZE);
7262 sd.sd_inq = NULL;
7264 return (rval);
7267 static int
7268 aac_config_tgt(struct aac_softstate *softs, int tgt)
7270 struct scsi_address ap;
7271 struct buf *bp = NULL;
7272 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
7273 int list_len = 0;
7274 int lun_total = 0;
7275 dev_info_t *ldip;
7276 int i;
7278 ap.a_hba_tran = softs->hba_tran;
7279 ap.a_target = (uint16_t)tgt;
7280 ap.a_lun = 0;
7282 for (i = 0; i < 2; i++) {
7283 struct scsi_pkt *pkt;
7284 uchar_t *cdb;
7285 uchar_t *p;
7286 uint32_t data;
7288 if (bp == NULL) {
7289 if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
7290 buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
7291 return (AACERR);
7293 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
7294 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
7295 NULL, NULL)) == NULL) {
7296 scsi_free_consistent_buf(bp);
7297 return (AACERR);
7299 cdb = pkt->pkt_cdbp;
7300 bzero(cdb, CDB_GROUP5);
7301 cdb[0] = SCMD_REPORT_LUNS;
7303 /* Convert buffer len from local to LE_32 */
7304 data = buf_len;
7305 for (p = &cdb[9]; p > &cdb[5]; p--) {
7306 *p = data & 0xff;
7307 data >>= 8;
7310 if (scsi_poll(pkt) < 0 ||
7311 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
7312 scsi_destroy_pkt(pkt);
7313 break;
7316 /* Convert list_len from LE_32 to local */
7317 for (p = (uchar_t *)bp->b_un.b_addr;
7318 p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
7319 data <<= 8;
7320 data |= *p;
7322 list_len = data;
7323 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
7324 scsi_free_consistent_buf(bp);
7325 bp = NULL;
7326 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
7328 scsi_destroy_pkt(pkt);
7330 if (i >= 2) {
7331 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
7332 AAC_SCSI_RPTLUNS_HEAD_SIZE);
7334 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
7335 uint16_t lun;
7337 /* Determine report luns addressing type */
7338 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
7340 * Vendors in the field have been found to be
7341 * concatenating bus/target/lun to equal the
7342 * complete lun value instead of switching to
7343 * flat space addressing
7345 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
7346 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
7347 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
7348 lun = ((buf[0] & 0x3f) << 8) | buf[1];
7349 if (lun > UINT8_MAX) {
7350 AACDB_PRINT(softs, CE_WARN,
7351 "abnormal lun number: %d", lun);
7352 break;
7354 if (aac_config_lun(softs, tgt, lun, &ldip) ==
7355 NDI_SUCCESS)
7356 lun_total++;
7357 break;
7360 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
7362 } else {
7363 /* The target may do not support SCMD_REPORT_LUNS. */
7364 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
7365 lun_total++;
7367 scsi_free_consistent_buf(bp);
7368 return (lun_total);
7371 static void
7372 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
7374 struct aac_device *dvp;
7376 mutex_enter(&softs->io_lock);
7377 dvp = AAC_DEV(softs, tgt);
7378 if (en)
7379 dvp->flags |= AAC_DFLAG_CONFIGURING;
7380 else
7381 dvp->flags &= ~AAC_DFLAG_CONFIGURING;
7382 mutex_exit(&softs->io_lock);
7385 static int
7386 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
7387 void *arg, dev_info_t **childp)
7389 struct aac_softstate *softs;
7390 int circ = 0;
7391 int rval;
7393 if ((softs = ddi_get_soft_state(aac_softstatep,
7394 ddi_get_instance(parent))) == NULL)
7395 return (NDI_FAILURE);
7397 /* Commands for bus config should be blocked as the bus is quiesced */
7398 mutex_enter(&softs->io_lock);
7399 if (softs->state & AAC_STATE_QUIESCED) {
7400 AACDB_PRINT(softs, CE_NOTE,
7401 "bus_config abroted because bus is quiesced");
7402 mutex_exit(&softs->io_lock);
7403 return (NDI_FAILURE);
7405 mutex_exit(&softs->io_lock);
7407 DBCALLED(softs, 1);
7409 /* Hold the nexus across the bus_config */
7410 ndi_devi_enter(parent, &circ);
7411 switch (op) {
7412 case BUS_CONFIG_ONE: {
7413 int tgt, lun;
7415 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
7416 rval = NDI_FAILURE;
7417 break;
7419 if (tgt >= AAC_MAX_LD) {
7420 if (tgt >= AAC_MAX_DEV(softs)) {
7421 rval = NDI_FAILURE;
7422 break;
7426 AAC_DEVCFG_BEGIN(softs, tgt);
7427 rval = aac_config_lun(softs, tgt, lun, childp);
7428 AAC_DEVCFG_END(softs, tgt);
7429 break;
7432 case BUS_CONFIG_DRIVER:
7433 case BUS_CONFIG_ALL: {
7434 uint32_t bus, tgt;
7435 int index, total;
7437 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
7438 AAC_DEVCFG_BEGIN(softs, tgt);
7439 (void) aac_config_lun(softs, tgt, 0, NULL);
7440 AAC_DEVCFG_END(softs, tgt);
7443 /* Config the non-DASD devices connected to the card */
7444 total = 0;
7445 index = AAC_MAX_LD;
7446 for (bus = 0; bus < softs->bus_max; bus++) {
7447 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
7448 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
7449 AAC_DEVCFG_BEGIN(softs, index);
7450 if (aac_config_tgt(softs, index))
7451 total++;
7452 AAC_DEVCFG_END(softs, index);
7455 AACDB_PRINT(softs, CE_CONT,
7456 "?Total %d phys. device(s) found", total);
7457 rval = NDI_SUCCESS;
7458 break;
7462 if (rval == NDI_SUCCESS)
7463 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7464 ndi_devi_exit(parent, circ);
7465 return (rval);
7468 /*ARGSUSED*/
7469 static int
7470 aac_handle_dr(struct aac_softstate *softs, int tgt, int lun, int event)
7472 struct aac_device *dvp;
7473 dev_info_t *dip;
7474 int valid;
7475 int circ1 = 0;
7477 DBCALLED(softs, 1);
7479 /* Hold the nexus across the bus_config */
7480 dvp = AAC_DEV(softs, tgt);
7481 valid = AAC_DEV_IS_VALID(dvp);
7482 dip = dvp->dip;
7483 if (!(softs->state & AAC_STATE_RUN))
7484 return (AACERR);
7485 mutex_exit(&softs->io_lock);
7487 switch (event) {
7488 case AAC_CFG_ADD:
7489 case AAC_CFG_DELETE:
7490 /* Device onlined */
7491 if (dip == NULL && valid) {
7492 ndi_devi_enter(softs->devinfo_p, &circ1);
7493 (void) aac_config_lun(softs, tgt, 0, NULL);
7494 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
7495 softs->instance, tgt, lun);
7496 ndi_devi_exit(softs->devinfo_p, circ1);
7498 /* Device offlined */
7499 if (dip && !valid) {
7500 mutex_enter(&softs->io_lock);
7501 (void) aac_do_reset(softs);
7502 mutex_exit(&softs->io_lock);
7504 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7505 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
7506 softs->instance, tgt, lun);
7508 break;
7511 mutex_enter(&softs->io_lock);
7512 return (AACOK);
7515 #ifdef DEBUG
7517 /* -------------------------debug aid functions-------------------------- */
7519 #define AAC_FIB_CMD_KEY_STRINGS \
7520 TestCommandResponse, "TestCommandResponse", \
7521 TestAdapterCommand, "TestAdapterCommand", \
7522 LastTestCommand, "LastTestCommand", \
7523 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
7524 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
7525 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
7526 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
7527 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
7528 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
7529 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
7530 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
7531 InterfaceShutdown, "InterfaceShutdown", \
7532 DmaCommandFib, "DmaCommandFib", \
7533 StartProfile, "StartProfile", \
7534 TermProfile, "TermProfile", \
7535 SpeedTest, "SpeedTest", \
7536 TakeABreakPt, "TakeABreakPt", \
7537 RequestPerfData, "RequestPerfData", \
7538 SetInterruptDefTimer, "SetInterruptDefTimer", \
7539 SetInterruptDefCount, "SetInterruptDefCount", \
7540 GetInterruptDefStatus, "GetInterruptDefStatus", \
7541 LastCommCommand, "LastCommCommand", \
7542 NuFileSystem, "NuFileSystem", \
7543 UFS, "UFS", \
7544 HostFileSystem, "HostFileSystem", \
7545 LastFileSystemCommand, "LastFileSystemCommand", \
7546 ContainerCommand, "ContainerCommand", \
7547 ContainerCommand64, "ContainerCommand64", \
7548 ClusterCommand, "ClusterCommand", \
7549 ScsiPortCommand, "ScsiPortCommand", \
7550 ScsiPortCommandU64, "ScsiPortCommandU64", \
7551 AifRequest, "AifRequest", \
7552 CheckRevision, "CheckRevision", \
7553 FsaHostShutdown, "FsaHostShutdown", \
7554 RequestAdapterInfo, "RequestAdapterInfo", \
7555 IsAdapterPaused, "IsAdapterPaused", \
7556 SendHostTime, "SendHostTime", \
7557 LastMiscCommand, "LastMiscCommand"
7559 #define AAC_CTVM_SUBCMD_KEY_STRINGS \
7560 VM_Null, "VM_Null", \
7561 VM_NameServe, "VM_NameServe", \
7562 VM_ContainerConfig, "VM_ContainerConfig", \
7563 VM_Ioctl, "VM_Ioctl", \
7564 VM_FilesystemIoctl, "VM_FilesystemIoctl", \
7565 VM_CloseAll, "VM_CloseAll", \
7566 VM_CtBlockRead, "VM_CtBlockRead", \
7567 VM_CtBlockWrite, "VM_CtBlockWrite", \
7568 VM_SliceBlockRead, "VM_SliceBlockRead", \
7569 VM_SliceBlockWrite, "VM_SliceBlockWrite", \
7570 VM_DriveBlockRead, "VM_DriveBlockRead", \
7571 VM_DriveBlockWrite, "VM_DriveBlockWrite", \
7572 VM_EnclosureMgt, "VM_EnclosureMgt", \
7573 VM_Unused, "VM_Unused", \
7574 VM_CtBlockVerify, "VM_CtBlockVerify", \
7575 VM_CtPerf, "VM_CtPerf", \
7576 VM_CtBlockRead64, "VM_CtBlockRead64", \
7577 VM_CtBlockWrite64, "VM_CtBlockWrite64", \
7578 VM_CtBlockVerify64, "VM_CtBlockVerify64", \
7579 VM_CtHostRead64, "VM_CtHostRead64", \
7580 VM_CtHostWrite64, "VM_CtHostWrite64", \
7581 VM_NameServe64, "VM_NameServe64"
7583 #define AAC_CT_SUBCMD_KEY_STRINGS \
7584 CT_Null, "CT_Null", \
7585 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
7586 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
7587 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
7588 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
7589 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
7590 CT_WRITE_MBR, "CT_WRITE_MBR", \
7591 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
7592 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
7593 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
7594 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
7595 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
7596 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
7597 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
7598 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
7599 CT_READ_MBR, "CT_READ_MBR", \
7600 CT_READ_PARTITION, "CT_READ_PARTITION", \
7601 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
7602 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
7603 CT_SLICE_SIZE, "CT_SLICE_SIZE", \
7604 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
7605 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
7606 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
7607 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
7608 CT_UNMIRROR, "CT_UNMIRROR", \
7609 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
7610 CT_GEN_MIRROR, "CT_GEN_MIRROR", \
7611 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
7612 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
7613 CT_MOVE2, "CT_MOVE2", \
7614 CT_SPLIT, "CT_SPLIT", \
7615 CT_SPLIT2, "CT_SPLIT2", \
7616 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
7617 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
7618 CT_RECONFIG, "CT_RECONFIG", \
7619 CT_BREAK2, "CT_BREAK2", \
7620 CT_BREAK, "CT_BREAK", \
7621 CT_MERGE2, "CT_MERGE2", \
7622 CT_MERGE, "CT_MERGE", \
7623 CT_FORCE_ERROR, "CT_FORCE_ERROR", \
7624 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
7625 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
7626 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
7627 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
7628 CT_VOLUME_ADD, "CT_VOLUME_ADD", \
7629 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
7630 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
7631 CT_COPY_STATUS, "CT_COPY_STATUS", \
7632 CT_COPY, "CT_COPY", \
7633 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
7634 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
7635 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
7636 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
7637 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
7638 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
7639 CT_SET, "CT_SET", \
7640 CT_GET, "CT_GET", \
7641 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
7642 CT_GET_DELAY, "CT_GET_DELAY", \
7643 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
7644 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
7645 CT_SCRUB, "CT_SCRUB", \
7646 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
7647 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
7648 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
7649 CT_PAUSE_IO, "CT_PAUSE_IO", \
7650 CT_RELEASE_IO, "CT_RELEASE_IO", \
7651 CT_SCRUB2, "CT_SCRUB2", \
7652 CT_MCHECK, "CT_MCHECK", \
7653 CT_CORRUPT, "CT_CORRUPT", \
7654 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
7655 CT_PROMOTE, "CT_PROMOTE", \
7656 CT_SET_DEAD, "CT_SET_DEAD", \
7657 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
7658 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
7659 CT_GET_PARAM, "CT_GET_PARAM", \
7660 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
7661 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
7662 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
7663 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
7664 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
7665 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
7666 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
7667 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
7668 CT_STOP_DATA, "CT_STOP_DATA", \
7669 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
7670 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
7671 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
7672 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
7673 CT_GET_TIME, "CT_GET_TIME", \
7674 CT_READ_DATA, "CT_READ_DATA", \
7675 CT_CTR, "CT_CTR", \
7676 CT_CTL, "CT_CTL", \
7677 CT_DRAINIO, "CT_DRAINIO", \
7678 CT_RELEASEIO, "CT_RELEASEIO", \
7679 CT_GET_NVRAM, "CT_GET_NVRAM", \
7680 CT_GET_MEMORY, "CT_GET_MEMORY", \
7681 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
7682 CT_ADD_LEVEL, "CT_ADD_LEVEL", \
7683 CT_NV_ZERO, "CT_NV_ZERO", \
7684 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
7685 CT_THROTTLE_ON, "CT_THROTTLE_ON", \
7686 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
7687 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
7688 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
7689 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
7690 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
7691 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
7692 CT_MONITOR, "CT_MONITOR", \
7693 CT_GEN_MORPH, "CT_GEN_MORPH", \
7694 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
7695 CT_CACHE_SET, "CT_CACHE_SET", \
7696 CT_CACHE_STAT, "CT_CACHE_STAT", \
7697 CT_TRACE_START, "CT_TRACE_START", \
7698 CT_TRACE_STOP, "CT_TRACE_STOP", \
7699 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
7700 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
7701 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
7702 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
7703 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
7704 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
7705 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
7706 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
7707 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
7708 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
7709 CT_STOP_DUMPS, "CT_STOP_DUMPS", \
7710 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
7711 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
7712 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
7713 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
7714 CT_READ_NAME, "CT_READ_NAME", \
7715 CT_WRITE_NAME, "CT_WRITE_NAME", \
7716 CT_TOSS_CACHE, "CT_TOSS_CACHE", \
7717 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
7718 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
7719 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
7720 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
7721 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
7722 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
7723 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
7724 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
7725 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
7726 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
7727 CT_FLUSH, "CT_FLUSH", \
7728 CT_REBUILD, "CT_REBUILD", \
7729 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
7730 CT_RESTART, "CT_RESTART", \
7731 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
7732 CT_TRACE_FLAG, "CT_TRACE_FLAG", \
7733 CT_RESTART_MORPH, "CT_RESTART_MORPH", \
7734 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
7735 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
7736 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
7737 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
7738 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
7739 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
7740 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
7741 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
7742 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
7743 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
7744 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
7745 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
7746 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
7747 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
7748 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
7749 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
7750 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
7751 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
7752 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
7753 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
7754 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
7755 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
7756 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
7757 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
7758 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
7759 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
7760 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
7761 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
7762 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
7763 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
7764 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
7765 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
7766 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
7767 CT_IS_CONTAINER_MEATADATA_STANDARD, \
7768 "CT_IS_CONTAINER_MEATADATA_STANDARD", \
7769 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
7770 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
7771 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
7772 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
7773 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
7774 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
7775 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
7776 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
7777 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
7778 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
7779 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
7780 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
7781 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
7782 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
7783 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
7784 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
7785 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
7786 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
7787 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
7789 #define AAC_CL_SUBCMD_KEY_STRINGS \
7790 CL_NULL, "CL_NULL", \
7791 DS_INIT, "DS_INIT", \
7792 DS_RESCAN, "DS_RESCAN", \
7793 DS_CREATE, "DS_CREATE", \
7794 DS_DELETE, "DS_DELETE", \
7795 DS_ADD_DISK, "DS_ADD_DISK", \
7796 DS_REMOVE_DISK, "DS_REMOVE_DISK", \
7797 DS_MOVE_DISK, "DS_MOVE_DISK", \
7798 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
7799 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
7800 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
7801 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
7802 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
7803 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
7804 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
7805 DS_GET_DRIVES, "DS_GET_DRIVES", \
7806 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
7807 DS_ONLINE, "DS_ONLINE", \
7808 DS_OFFLINE, "DS_OFFLINE", \
7809 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
7810 DS_FSAPRINT, "DS_FSAPRINT", \
7811 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
7812 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
7813 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
7814 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
7815 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
7816 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
7817 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
7818 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
7819 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
7820 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
7821 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
7822 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
7823 CC_GET_BUSINFO, "CC_GET_BUSINFO", \
7824 CC_GET_PORTINFO, "CC_GET_PORTINFO", \
7825 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
7826 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
7827 CQ_QUORUM_OP, "CQ_QUORUM_OP"
7829 #define AAC_AIF_SUBCMD_KEY_STRINGS \
7830 AifCmdEventNotify, "AifCmdEventNotify", \
7831 AifCmdJobProgress, "AifCmdJobProgress", \
7832 AifCmdAPIReport, "AifCmdAPIReport", \
7833 AifCmdDriverNotify, "AifCmdDriverNotify", \
7834 AifReqJobList, "AifReqJobList", \
7835 AifReqJobsForCtr, "AifReqJobsForCtr", \
7836 AifReqJobsForScsi, "AifReqJobsForScsi", \
7837 AifReqJobReport, "AifReqJobReport", \
7838 AifReqTerminateJob, "AifReqTerminateJob", \
7839 AifReqSuspendJob, "AifReqSuspendJob", \
7840 AifReqResumeJob, "AifReqResumeJob", \
7841 AifReqSendAPIReport, "AifReqSendAPIReport", \
7842 AifReqAPIJobStart, "AifReqAPIJobStart", \
7843 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
7844 AifReqAPIJobFinish, "AifReqAPIJobFinish"
7846 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \
7847 Reserved_IOCTL, "Reserved_IOCTL", \
7848 GetDeviceHandle, "GetDeviceHandle", \
7849 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
7850 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
7851 RescanBus, "RescanBus", \
7852 GetDeviceProbeInfo, "GetDeviceProbeInfo", \
7853 GetDeviceCapacity, "GetDeviceCapacity", \
7854 GetContainerProbeInfo, "GetContainerProbeInfo", \
7855 GetRequestedMemorySize, "GetRequestedMemorySize", \
7856 GetBusInfo, "GetBusInfo", \
7857 GetVendorSpecific, "GetVendorSpecific", \
7858 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
7859 EnhancedGetBusInfo, "EnhancedGetBusInfo", \
7860 SetupExtendedCounters, "SetupExtendedCounters", \
7861 GetPerformanceCounters, "GetPerformanceCounters", \
7862 ResetPerformanceCounters, "ResetPerformanceCounters", \
7863 ReadModePage, "ReadModePage", \
7864 WriteModePage, "WriteModePage", \
7865 ReadDriveParameter, "ReadDriveParameter", \
7866 WriteDriveParameter, "WriteDriveParameter", \
7867 ResetAdapter, "ResetAdapter", \
7868 ResetBus, "ResetBus", \
7869 ResetBusDevice, "ResetBusDevice", \
7870 ExecuteSrb, "ExecuteSrb", \
7871 Create_IO_Task, "Create_IO_Task", \
7872 Delete_IO_Task, "Delete_IO_Task", \
7873 Get_IO_Task_Info, "Get_IO_Task_Info", \
7874 Check_Task_Progress, "Check_Task_Progress", \
7875 InjectError, "InjectError", \
7876 GetDeviceDefectCounts, "GetDeviceDefectCounts", \
7877 GetDeviceDefectInfo, "GetDeviceDefectInfo", \
7878 GetDeviceStatus, "GetDeviceStatus", \
7879 ClearDeviceStatus, "ClearDeviceStatus", \
7880 DiskSpinControl, "DiskSpinControl", \
7881 DiskSmartControl, "DiskSmartControl", \
7882 WriteSame, "WriteSame", \
7883 ReadWriteLong, "ReadWriteLong", \
7884 FormatUnit, "FormatUnit", \
7885 TargetDeviceControl, "TargetDeviceControl", \
7886 TargetChannelControl, "TargetChannelControl", \
7887 FlashNewCode, "FlashNewCode", \
7888 DiskCheck, "DiskCheck", \
7889 RequestSense, "RequestSense", \
7890 DiskPERControl, "DiskPERControl", \
7891 Read10, "Read10", \
7892 Write10, "Write10"
7894 #define AAC_AIFEN_KEY_STRINGS \
7895 AifEnGeneric, "Generic", \
7896 AifEnTaskComplete, "TaskComplete", \
7897 AifEnConfigChange, "Config change", \
7898 AifEnContainerChange, "Container change", \
7899 AifEnDeviceFailure, "device failed", \
7900 AifEnMirrorFailover, "Mirror failover", \
7901 AifEnContainerEvent, "container event", \
7902 AifEnFileSystemChange, "File system changed", \
7903 AifEnConfigPause, "Container pause event", \
7904 AifEnConfigResume, "Container resume event", \
7905 AifEnFailoverChange, "Failover space assignment changed", \
7906 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
7907 AifEnEnclosureManagement, "Enclosure management event", \
7908 AifEnBatteryEvent, "battery event", \
7909 AifEnAddContainer, "Add container", \
7910 AifEnDeleteContainer, "Delete container", \
7911 AifEnSMARTEvent, "SMART Event", \
7912 AifEnBatteryNeedsRecond, "battery needs reconditioning", \
7913 AifEnClusterEvent, "cluster event", \
7914 AifEnDiskSetEvent, "disk set event occured", \
7915 AifDenMorphComplete, "morph operation completed", \
7916 AifDenVolumeExtendComplete, "VolumeExtendComplete"
7918 struct aac_key_strings {
7919 int key;
7920 char *message;
7923 extern struct scsi_key_strings scsi_cmds[];
7925 static struct aac_key_strings aac_fib_cmds[] = {
7926 AAC_FIB_CMD_KEY_STRINGS,
7927 -1, NULL
7930 static struct aac_key_strings aac_ctvm_subcmds[] = {
7931 AAC_CTVM_SUBCMD_KEY_STRINGS,
7932 -1, NULL
7935 static struct aac_key_strings aac_ct_subcmds[] = {
7936 AAC_CT_SUBCMD_KEY_STRINGS,
7937 -1, NULL
7940 static struct aac_key_strings aac_cl_subcmds[] = {
7941 AAC_CL_SUBCMD_KEY_STRINGS,
7942 -1, NULL
7945 static struct aac_key_strings aac_aif_subcmds[] = {
7946 AAC_AIF_SUBCMD_KEY_STRINGS,
7947 -1, NULL
7950 static struct aac_key_strings aac_ioctl_subcmds[] = {
7951 AAC_IOCTL_SUBCMD_KEY_STRINGS,
7952 -1, NULL
7955 static struct aac_key_strings aac_aifens[] = {
7956 AAC_AIFEN_KEY_STRINGS,
7957 -1, NULL
7961 * The following function comes from Adaptec:
7963 * Get the firmware print buffer parameters from the firmware,
7964 * if the command was successful map in the address.
7966 static int
7967 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7969 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7970 0, 0, 0, 0, NULL) == AACOK) {
7971 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7972 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7973 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7974 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7976 if (mondrv_buf_size) {
7977 uint32_t offset = mondrv_buf_paddrl - \
7978 softs->pci_mem_base_paddr;
7981 * See if the address is already mapped in, and
7982 * if so set it up from the base address
7984 if ((mondrv_buf_paddrh == 0) &&
7985 (offset + mondrv_buf_size < softs->map_size)) {
7986 mutex_enter(&aac_prt_mutex);
7987 softs->debug_buf_offset = offset;
7988 softs->debug_header_size = mondrv_hdr_size;
7989 softs->debug_buf_size = mondrv_buf_size;
7990 softs->debug_fw_flags = 0;
7991 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7992 mutex_exit(&aac_prt_mutex);
7994 return (AACOK);
7998 return (AACERR);
8002 aac_dbflag_on(struct aac_softstate *softs, int flag)
8004 int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
8006 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
8007 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
8010 static void
8011 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
8013 if (noheader) {
8014 if (sl) {
8015 aac_fmt[0] = sl;
8016 cmn_err(lev, aac_fmt, aac_prt_buf);
8017 } else {
8018 cmn_err(lev, &aac_fmt[1], aac_prt_buf);
8020 } else {
8021 if (sl) {
8022 aac_fmt_header[0] = sl;
8023 cmn_err(lev, aac_fmt_header,
8024 softs->vendor_name, softs->instance,
8025 aac_prt_buf);
8026 } else {
8027 cmn_err(lev, &aac_fmt_header[1],
8028 softs->vendor_name, softs->instance,
8029 aac_prt_buf);
8035 * The following function comes from Adaptec:
8037 * Format and print out the data passed in to UART or console
8038 * as specified by debug flags.
8040 void
8041 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
8043 va_list args;
8044 char sl; /* system log character */
8046 mutex_enter(&aac_prt_mutex);
8047 /* Set up parameters and call sprintf function to format the data */
8048 if (strchr("^!?", fmt[0]) == NULL) {
8049 sl = 0;
8050 } else {
8051 sl = fmt[0];
8052 fmt++;
8054 va_start(args, fmt);
8055 (void) vsprintf(aac_prt_buf, fmt, args);
8056 va_end(args);
8058 /* Make sure the softs structure has been passed in for this section */
8059 if (softs) {
8060 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
8061 /* If we are set up for a Firmware print */
8062 (softs->debug_buf_size)) {
8063 uint32_t count, i;
8065 /* Make sure the string size is within boundaries */
8066 count = strlen(aac_prt_buf);
8067 if (count > softs->debug_buf_size)
8068 count = (uint16_t)softs->debug_buf_size;
8071 * Wait for no more than AAC_PRINT_TIMEOUT for the
8072 * previous message length to clear (the handshake).
8074 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
8075 if (!PCI_MEM_GET32(softs,
8076 softs->debug_buf_offset + \
8077 AAC_FW_DBG_STRLEN_OFFSET))
8078 break;
8080 drv_usecwait(1000);
8084 * If the length is clear, copy over the message, the
8085 * flags, and the length. Make sure the length is the
8086 * last because that is the signal for the Firmware to
8087 * pick it up.
8089 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
8090 AAC_FW_DBG_STRLEN_OFFSET)) {
8091 PCI_MEM_REP_PUT8(softs,
8092 softs->debug_buf_offset + \
8093 softs->debug_header_size,
8094 aac_prt_buf, count);
8095 PCI_MEM_PUT32(softs,
8096 softs->debug_buf_offset + \
8097 AAC_FW_DBG_FLAGS_OFFSET,
8098 softs->debug_fw_flags);
8099 PCI_MEM_PUT32(softs,
8100 softs->debug_buf_offset + \
8101 AAC_FW_DBG_STRLEN_OFFSET, count);
8102 } else {
8103 cmn_err(CE_WARN, "UART output fail");
8104 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
8109 * If the Kernel Debug Print flag is set, send it off
8110 * to the Kernel Debugger
8112 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8113 aac_cmn_err(softs, lev, sl,
8114 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
8115 } else {
8116 /* Driver not initialized yet, no firmware or header output */
8117 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8118 aac_cmn_err(softs, lev, sl, 1);
8120 mutex_exit(&aac_prt_mutex);
8124 * Translate command number to description string
8126 static char *
8127 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
8129 int i;
8131 for (i = 0; cmdlist[i].key != -1; i++) {
8132 if (cmd == cmdlist[i].key)
8133 return (cmdlist[i].message);
8135 return (NULL);
8138 static void
8139 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
8141 struct scsi_pkt *pkt = acp->pkt;
8142 struct scsi_address *ap = &pkt->pkt_address;
8143 int is_pd = 0;
8144 int ctl = ddi_get_instance(softs->devinfo_p);
8145 int tgt = ap->a_target;
8146 int lun = ap->a_lun;
8147 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
8148 uchar_t cmd = cdbp->scc_cmd;
8149 char *desc;
8151 if (tgt >= AAC_MAX_LD) {
8152 is_pd = 1;
8153 ctl = ((struct aac_nondasd *)acp->dvp)->bus;
8154 tgt = ((struct aac_nondasd *)acp->dvp)->tid;
8155 lun = 0;
8158 if ((desc = aac_cmd_name(cmd,
8159 (struct aac_key_strings *)scsi_cmds)) == NULL) {
8160 aac_printf(softs, CE_NOTE,
8161 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
8162 cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
8163 return;
8166 switch (cmd) {
8167 case SCMD_READ:
8168 case SCMD_WRITE:
8169 aac_printf(softs, CE_NOTE,
8170 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8171 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
8172 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8173 ctl, tgt, lun, is_pd ? "(pd)" : "");
8174 break;
8175 case SCMD_READ_G1:
8176 case SCMD_WRITE_G1:
8177 aac_printf(softs, CE_NOTE,
8178 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8179 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
8180 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8181 ctl, tgt, lun, is_pd ? "(pd)" : "");
8182 break;
8183 case SCMD_READ_G4:
8184 case SCMD_WRITE_G4:
8185 aac_printf(softs, CE_NOTE,
8186 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
8187 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
8188 GETG4COUNT(cdbp),
8189 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8190 ctl, tgt, lun, is_pd ? "(pd)" : "");
8191 break;
8192 case SCMD_READ_G5:
8193 case SCMD_WRITE_G5:
8194 aac_printf(softs, CE_NOTE,
8195 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8196 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
8197 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8198 ctl, tgt, lun, is_pd ? "(pd)" : "");
8199 break;
8200 default:
8201 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
8202 desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
8206 void
8207 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
8209 struct aac_cmd *acp = slotp->acp;
8210 struct aac_fib *fibp = slotp->fibp;
8211 ddi_acc_handle_t acc = slotp->fib_acc_handle;
8212 uint16_t fib_size;
8213 uint32_t fib_cmd, sub_cmd;
8214 char *cmdstr, *subcmdstr;
8215 char *caller;
8216 int i;
8218 if (acp) {
8219 if (!(softs->debug_fib_flags & acp->fib_flags))
8220 return;
8221 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
8222 caller = "SCMD";
8223 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
8224 caller = "IOCTL";
8225 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
8226 caller = "SRB";
8227 else
8228 return;
8229 } else {
8230 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
8231 return;
8232 caller = "SYNC";
8235 fib_cmd = ddi_get16(acc, &fibp->Header.Command);
8236 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
8237 sub_cmd = (uint32_t)-1;
8238 subcmdstr = NULL;
8240 /* Print FIB header */
8241 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
8242 aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
8243 aac_printf(softs, CE_NOTE, " XferState %d",
8244 ddi_get32(acc, &fibp->Header.XferState));
8245 aac_printf(softs, CE_NOTE, " Command %d",
8246 ddi_get16(acc, &fibp->Header.Command));
8247 aac_printf(softs, CE_NOTE, " StructType %d",
8248 ddi_get8(acc, &fibp->Header.StructType));
8249 aac_printf(softs, CE_NOTE, " Flags 0x%x",
8250 ddi_get8(acc, &fibp->Header.Flags));
8251 aac_printf(softs, CE_NOTE, " Size %d",
8252 ddi_get16(acc, &fibp->Header.Size));
8253 aac_printf(softs, CE_NOTE, " SenderSize %d",
8254 ddi_get16(acc, &fibp->Header.SenderSize));
8255 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x",
8256 ddi_get32(acc, &fibp->Header.SenderFibAddress));
8257 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x",
8258 ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
8259 aac_printf(softs, CE_NOTE, " SenderData 0x%x",
8260 ddi_get32(acc, &fibp->Header.SenderData));
8263 /* Print FIB data */
8264 switch (fib_cmd) {
8265 case ContainerCommand:
8266 sub_cmd = ddi_get32(acc,
8267 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
8268 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
8269 if (subcmdstr == NULL)
8270 break;
8272 switch (sub_cmd) {
8273 case VM_ContainerConfig: {
8274 struct aac_Container *pContainer =
8275 (struct aac_Container *)fibp->data;
8277 fib_cmd = sub_cmd;
8278 cmdstr = subcmdstr;
8279 sub_cmd = (uint32_t)-1;
8280 subcmdstr = NULL;
8282 sub_cmd = ddi_get32(acc,
8283 &pContainer->CTCommand.command);
8284 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
8285 if (subcmdstr == NULL)
8286 break;
8287 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
8288 subcmdstr,
8289 ddi_get32(acc, &pContainer->CTCommand.param[0]),
8290 ddi_get32(acc, &pContainer->CTCommand.param[1]),
8291 ddi_get32(acc, &pContainer->CTCommand.param[2]));
8292 return;
8295 case VM_Ioctl:
8296 fib_cmd = sub_cmd;
8297 cmdstr = subcmdstr;
8298 sub_cmd = (uint32_t)-1;
8299 subcmdstr = NULL;
8301 sub_cmd = ddi_get32(acc,
8302 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
8303 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
8304 break;
8306 case VM_CtBlockRead:
8307 case VM_CtBlockWrite: {
8308 struct aac_blockread *br =
8309 (struct aac_blockread *)fibp->data;
8310 struct aac_sg_table *sg = &br->SgMap;
8311 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8313 aac_printf(softs, CE_NOTE,
8314 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8315 ddi_get32(acc, &br->ContainerId),
8316 ddi_get32(acc, &br->BlockNumber),
8317 ddi_get32(acc, &br->ByteCount));
8318 for (i = 0; i < sgcount; i++)
8319 aac_printf(softs, CE_NOTE,
8320 " %d: 0x%08x/%d", i,
8321 ddi_get32(acc, &sg->SgEntry[i].SgAddress),
8322 ddi_get32(acc, &sg->SgEntry[i]. \
8323 SgByteCount));
8324 return;
8327 break;
8329 case ContainerCommand64: {
8330 struct aac_blockread64 *br =
8331 (struct aac_blockread64 *)fibp->data;
8332 struct aac_sg_table64 *sg = &br->SgMap64;
8333 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8334 uint64_t sgaddr;
8336 sub_cmd = br->Command;
8337 subcmdstr = NULL;
8338 if (sub_cmd == VM_CtHostRead64)
8339 subcmdstr = "VM_CtHostRead64";
8340 else if (sub_cmd == VM_CtHostWrite64)
8341 subcmdstr = "VM_CtHostWrite64";
8342 else
8343 break;
8345 aac_printf(softs, CE_NOTE,
8346 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8347 ddi_get16(acc, &br->ContainerId),
8348 ddi_get32(acc, &br->BlockNumber),
8349 ddi_get16(acc, &br->SectorCount));
8350 for (i = 0; i < sgcount; i++) {
8351 sgaddr = ddi_get64(acc,
8352 &sg->SgEntry64[i].SgAddress);
8353 aac_printf(softs, CE_NOTE,
8354 " %d: 0x%08x.%08x/%d", i,
8355 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8356 ddi_get32(acc, &sg->SgEntry64[i]. \
8357 SgByteCount));
8359 return;
8362 case RawIo: {
8363 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
8364 struct aac_sg_tableraw *sg = &io->SgMapRaw;
8365 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8366 uint64_t sgaddr;
8368 aac_printf(softs, CE_NOTE,
8369 "FIB> RawIo Container %d 0x%llx/%d 0x%x",
8370 ddi_get16(acc, &io->ContainerId),
8371 ddi_get64(acc, &io->BlockNumber),
8372 ddi_get32(acc, &io->ByteCount),
8373 ddi_get16(acc, &io->Flags));
8374 for (i = 0; i < sgcount; i++) {
8375 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
8376 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i,
8377 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8378 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
8380 return;
8383 case ClusterCommand:
8384 sub_cmd = ddi_get32(acc,
8385 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8386 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
8387 break;
8389 case AifRequest:
8390 sub_cmd = ddi_get32(acc,
8391 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8392 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
8393 break;
8395 default:
8396 break;
8399 fib_size = ddi_get16(acc, &(fibp->Header.Size));
8400 if (subcmdstr)
8401 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8402 subcmdstr, fib_size);
8403 else if (cmdstr && sub_cmd == (uint32_t)-1)
8404 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8405 cmdstr, fib_size);
8406 else if (cmdstr)
8407 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
8408 cmdstr, sub_cmd, fib_size);
8409 else
8410 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
8411 fib_cmd, fib_size);
8414 static void
8415 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
8417 int aif_command;
8418 uint32_t aif_seqnumber;
8419 int aif_en_type;
8420 char *str;
8422 aif_command = LE_32(aif->command);
8423 aif_seqnumber = LE_32(aif->seqNumber);
8424 aif_en_type = LE_32(aif->data.EN.type);
8426 switch (aif_command) {
8427 case AifCmdEventNotify:
8428 str = aac_cmd_name(aif_en_type, aac_aifens);
8429 if (str)
8430 aac_printf(softs, CE_NOTE, "AIF! %s", str);
8431 else
8432 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
8433 aif_en_type);
8434 break;
8436 case AifCmdJobProgress:
8437 switch (LE_32(aif->data.PR[0].status)) {
8438 case AifJobStsSuccess:
8439 str = "success"; break;
8440 case AifJobStsFinished:
8441 str = "finished"; break;
8442 case AifJobStsAborted:
8443 str = "aborted"; break;
8444 case AifJobStsFailed:
8445 str = "failed"; break;
8446 case AifJobStsSuspended:
8447 str = "suspended"; break;
8448 case AifJobStsRunning:
8449 str = "running"; break;
8450 default:
8451 str = "unknown"; break;
8453 aac_printf(softs, CE_NOTE,
8454 "AIF! JobProgress (%d) - %s (%d, %d)",
8455 aif_seqnumber, str,
8456 LE_32(aif->data.PR[0].currentTick),
8457 LE_32(aif->data.PR[0].finalTick));
8458 break;
8460 case AifCmdAPIReport:
8461 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
8462 aif_seqnumber);
8463 break;
8465 case AifCmdDriverNotify:
8466 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
8467 aif_seqnumber);
8468 break;
8470 default:
8471 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
8472 aif_command, aif_seqnumber);
8473 break;
8477 #endif /* DEBUG */