Sync CAM with FreeBSD using lockmgr locks instead of mutexes.
[dragonfly.git] / sys / dev / raid / dpt / dpt_scsi.c
blob2436c8fa63eab386c893d1af8fcba8fcdd5a1e56
1 /*
2 * Copyright (c) 1997 by Simon Shapiro
3 * All Rights Reserved
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
31 * dpt_scsi.c: SCSI dependant code for the DPT driver
33 * credits: Assisted by Mike Neuffer in the early low level DPT code
34 * Thanx to Mark Salyzyn of DPT for his assistance.
35 * Special thanx to Justin Gibbs for invaluable help in
36 * making this driver look and work like a FreeBSD component.
37 * Last but not least, many thanx to UCB and the FreeBSD
38 * team for creating and maintaining such a wonderful O/S.
40 * TODO: * Add ISA probe code.
41 * * Add driver-level RAID-0. This will allow interoperability with
42 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID
43 * arrays that span controllers (Wow!).
46 #ident "$FreeBSD: src/sys/dev/dpt/dpt_scsi.c,v 1.28.2.3 2003/01/31 02:47:10 grog Exp $"
47 #ident "$DragonFly: src/sys/dev/raid/dpt/dpt_scsi.c,v 1.18 2008/05/18 20:30:23 pavalos Exp $"
49 #define _DPT_C_
51 #include "opt_dpt.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/eventhandler.h>
55 #include <sys/malloc.h>
56 #include <sys/kernel.h>
57 #include <sys/bus.h>
58 #include <sys/thread2.h>
60 #include <machine/clock.h>
62 #include <bus/cam/cam.h>
63 #include <bus/cam/cam_ccb.h>
64 #include <bus/cam/cam_sim.h>
65 #include <bus/cam/cam_xpt_sim.h>
66 #include <bus/cam/cam_debug.h>
67 #include <bus/cam/scsi/scsi_all.h>
68 #include <bus/cam/scsi/scsi_message.h>
70 #include <vm/vm.h>
71 #include <vm/pmap.h>
73 #include "dpt.h"
75 /* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */
76 int dpt_controllers_present;
78 u_long dpt_unit; /* Next unit number to use */
80 /* The linked list of softc structures */
81 struct dpt_softc_list dpt_softcs = TAILQ_HEAD_INITIALIZER(dpt_softcs);
83 #define microtime_now dpt_time_now()
85 #define dpt_inl(dpt, port) \
86 bus_space_read_4((dpt)->tag, (dpt)->bsh, port)
87 #define dpt_inb(dpt, port) \
88 bus_space_read_1((dpt)->tag, (dpt)->bsh, port)
89 #define dpt_outl(dpt, port, value) \
90 bus_space_write_4((dpt)->tag, (dpt)->bsh, port, value)
91 #define dpt_outb(dpt, port, value) \
92 bus_space_write_1((dpt)->tag, (dpt)->bsh, port, value)
95 * These will have to be setup by parameters passed at boot/load time. For
96 * perfromance reasons, we make them constants for the time being.
98 #define dpt_min_segs DPT_MAX_SEGS
99 #define dpt_max_segs DPT_MAX_SEGS
101 /* Definitions for our use of the SIM private CCB area */
102 #define ccb_dccb_ptr spriv_ptr0
103 #define ccb_dpt_ptr spriv_ptr1
105 /* ================= Private Inline Function declarations ===================*/
106 static __inline int dpt_just_reset(dpt_softc_t * dpt);
107 static __inline int dpt_raid_busy(dpt_softc_t * dpt);
108 static __inline int dpt_pio_wait (u_int32_t, u_int, u_int, u_int);
109 static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits,
110 u_int state);
111 static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt);
112 static __inline void dptfreeccb(struct dpt_softc *dpt,
113 struct dpt_ccb *dccb);
114 static __inline u_int32_t dptccbvtop(struct dpt_softc *dpt,
115 struct dpt_ccb *dccb);
117 static __inline int dpt_send_immediate(dpt_softc_t *dpt,
118 eata_ccb_t *cmd_block,
119 u_int32_t cmd_busaddr,
120 u_int retries,
121 u_int ifc, u_int code,
122 u_int code2);
124 /* ==================== Private Function declarations =======================*/
125 static void dptmapmem(void *arg, bus_dma_segment_t *segs,
126 int nseg, int error);
128 static struct sg_map_node*
129 dptallocsgmap(struct dpt_softc *dpt);
131 static int dptallocccbs(dpt_softc_t *dpt);
133 static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb,
134 u_int32_t dccb_busaddr, u_int size,
135 u_int page, u_int target, int extent);
136 static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb,
137 u_int32_t dccb_busaddr,
138 u_int8_t *buff);
140 static void dpt_poll(struct cam_sim *sim);
142 static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
143 int nseg, int error);
145 static void dpt_action(struct cam_sim *sim, union ccb *ccb);
147 static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd,
148 u_int32_t cmd_busaddr,
149 u_int command, u_int retries,
150 u_int ifc, u_int code,
151 u_int code2);
152 static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb,
153 union ccb *ccb, u_int hba_stat,
154 u_int scsi_stat, u_int32_t resid);
156 static void dpttimeout(void *arg);
157 static void dptshutdown(void *arg, int howto);
159 /* ================= Private Inline Function definitions ====================*/
160 static __inline int
161 dpt_just_reset(dpt_softc_t * dpt)
163 if ((dpt_inb(dpt, 2) == 'D')
164 && (dpt_inb(dpt, 3) == 'P')
165 && (dpt_inb(dpt, 4) == 'T')
166 && (dpt_inb(dpt, 5) == 'H'))
167 return (1);
168 else
169 return (0);
172 static __inline int
173 dpt_raid_busy(dpt_softc_t * dpt)
175 if ((dpt_inb(dpt, 0) == 'D')
176 && (dpt_inb(dpt, 1) == 'P')
177 && (dpt_inb(dpt, 2) == 'T'))
178 return (1);
179 else
180 return (0);
183 static __inline int
184 dpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state)
186 int i;
187 u_int c;
189 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
190 c = inb(base + reg) & bits;
191 if (!(c == state))
192 return (0);
193 else
194 DELAY(50);
196 return (-1);
199 static __inline int
200 dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state)
202 int i;
203 u_int c;
205 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
206 c = dpt_inb(dpt, HA_RSTATUS) & bits;
207 if (c == state)
208 return (0);
209 else
210 DELAY(50);
212 return (-1);
215 static __inline struct dpt_ccb*
216 dptgetccb(struct dpt_softc *dpt)
218 struct dpt_ccb* dccb;
220 crit_enter();
221 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) {
222 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
223 dpt->free_dccbs--;
224 } else if (dpt->total_dccbs < dpt->max_dccbs) {
225 dptallocccbs(dpt);
226 dccb = SLIST_FIRST(&dpt->free_dccb_list);
227 if (dccb == NULL)
228 kprintf("dpt%d: Can't malloc DCCB\n", dpt->unit);
229 else {
230 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
231 dpt->free_dccbs--;
234 crit_exit();
236 return (dccb);
239 static __inline void
240 dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb)
242 crit_enter();
243 if ((dccb->state & DCCB_ACTIVE) != 0)
244 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le);
245 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0)
246 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
247 else if (dpt->resource_shortage != 0
248 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
249 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
250 dpt->resource_shortage = FALSE;
252 dccb->state = DCCB_FREE;
253 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links);
254 ++dpt->free_dccbs;
255 crit_exit();
258 static __inline u_int32_t
259 dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb)
261 return (dpt->dpt_ccb_busbase
262 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs));
265 static __inline struct dpt_ccb *
266 dptccbptov(struct dpt_softc *dpt, u_int32_t busaddr)
268 return (dpt->dpt_dccbs
269 + ((struct dpt_ccb *)busaddr
270 - (struct dpt_ccb *)dpt->dpt_ccb_busbase));
274 * Send a command for immediate execution by the DPT
275 * See above function for IMPORTANT notes.
277 static __inline int
278 dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
279 u_int32_t cmd_busaddr, u_int retries,
280 u_int ifc, u_int code, u_int code2)
282 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr,
283 EATA_CMD_IMMEDIATE, retries, ifc,
284 code, code2));
288 /* ===================== Private Function definitions =======================*/
289 static void
290 dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
292 bus_addr_t *busaddrp;
294 busaddrp = (bus_addr_t *)arg;
295 *busaddrp = segs->ds_addr;
298 static struct sg_map_node *
299 dptallocsgmap(struct dpt_softc *dpt)
301 struct sg_map_node *sg_map;
303 sg_map = kmalloc(sizeof(*sg_map), M_DEVBUF, M_INTWAIT);
305 /* Allocate S/G space for the next batch of CCBS */
306 if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr,
307 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
308 kfree(sg_map, M_DEVBUF);
309 return (NULL);
312 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
313 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr,
314 /*flags*/0);
316 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links);
318 return (sg_map);
322 * Allocate another chunk of CCB's. Return count of entries added.
323 * Assumed to be called at splcam().
325 static int
326 dptallocccbs(dpt_softc_t *dpt)
328 struct dpt_ccb *next_ccb;
329 struct sg_map_node *sg_map;
330 bus_addr_t physaddr;
331 dpt_sg_t *segs;
332 int newcount;
333 int i;
335 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs];
337 if (next_ccb == dpt->dpt_dccbs) {
339 * First time through. Re-use the S/G
340 * space we allocated for initialization
341 * CCBS.
343 sg_map = SLIST_FIRST(&dpt->sg_maps);
344 } else {
345 sg_map = dptallocsgmap(dpt);
348 if (sg_map == NULL)
349 return (0);
351 segs = sg_map->sg_vaddr;
352 physaddr = sg_map->sg_physaddr;
354 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t)));
355 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) {
356 int error;
358 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0,
359 &next_ccb->dmamap);
360 if (error != 0)
361 break;
362 next_ccb->sg_list = segs;
363 next_ccb->sg_busaddr = htonl(physaddr);
364 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr);
365 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
366 next_ccb->eata_ccb.cp_reqDMA =
367 htonl(dptccbvtop(dpt, next_ccb)
368 + offsetof(struct dpt_ccb, sense_data));
369 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend;
370 next_ccb->state = DCCB_FREE;
371 next_ccb->tag = dpt->total_dccbs;
372 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links);
373 segs += dpt->sgsize;
374 physaddr += (dpt->sgsize * sizeof(dpt_sg_t));
375 dpt->dpt_ccb_busend += sizeof(*next_ccb);
376 next_ccb++;
377 dpt->total_dccbs++;
379 return (i);
382 dpt_conf_t *
383 dpt_pio_get_conf (u_int32_t base)
385 static dpt_conf_t * conf;
386 u_int16_t * p;
387 int i;
390 * Allocate a dpt_conf_t
392 if (conf == NULL)
393 conf = kmalloc(sizeof(dpt_conf_t), M_DEVBUF, M_INTWAIT);
396 * If we have one, clean it up.
398 bzero(conf, sizeof(dpt_conf_t));
401 * Reset the controller.
403 outb((base + HA_WCOMMAND), EATA_CMD_RESET);
406 * Wait for the controller to become ready.
407 * For some reason there can be -no- delays after calling reset
408 * before we wait on ready status.
410 if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) {
411 kprintf("dpt: timeout waiting for controller to become ready\n");
412 return (NULL);
415 if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) {
416 kprintf("dpt: timetout waiting for adapter ready.\n");
417 return (NULL);
421 * Send the PIO_READ_CONFIG command.
423 outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG);
426 * Read the data into the struct.
428 p = (u_int16_t *)conf;
429 for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) {
431 if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) {
432 kprintf("dpt: timeout in data read.\n");
433 return (NULL);
436 (*p) = inw(base + HA_RDATA);
437 p++;
440 if (inb(base + HA_RSTATUS) & HA_SERROR) {
441 kprintf("dpt: error reading configuration data.\n");
442 return (NULL);
445 #define BE_EATA_SIGNATURE 0x45415441
446 #define LE_EATA_SIGNATURE 0x41544145
449 * Test to see if we have a valid card.
451 if ((conf->signature == BE_EATA_SIGNATURE) ||
452 (conf->signature == LE_EATA_SIGNATURE)) {
454 while (inb(base + HA_RSTATUS) & HA_SDRQ) {
455 inw(base + HA_RDATA);
458 return (conf);
460 return (NULL);
464 * Read a configuration page into the supplied dpt_cont_t buffer.
466 static int
467 dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
468 u_int size, u_int page, u_int target, int extent)
470 eata_ccb_t *cp;
472 u_int8_t status;
474 int ndx;
475 int result;
477 cp = &dccb->eata_ccb;
478 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp));
480 cp->Interpret = 1;
481 cp->DataIn = 1;
482 cp->Auto_Req_Sen = 1;
483 cp->reqlen = sizeof(struct scsi_sense_data);
485 cp->cp_id = target;
486 cp->cp_LUN = 0; /* In the EATA packet */
487 cp->cp_lun = 0; /* In the SCSI command */
489 cp->cp_scsi_cmd = INQUIRY;
490 cp->cp_len = size;
492 cp->cp_extent = extent;
494 cp->cp_page = page;
495 cp->cp_channel = 0; /* DNC, Interpret mode is set */
496 cp->cp_identify = 1;
497 cp->cp_datalen = htonl(size);
499 crit_enter();
502 * This could be a simple for loop, but we suspected the compiler To
503 * have optimized it a bit too much. Wait for the controller to
504 * become ready
506 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC)
507 && (status != (HA_SREADY | HA_SSC | HA_SERROR))
508 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ)))
509 || (dpt_wait(dpt, HA_SBUSY, 0))) {
512 * RAID Drives still Spinning up? (This should only occur if
513 * the DPT controller is in a NON PC (PCI?) platform).
515 if (dpt_raid_busy(dpt)) {
516 kprintf("dpt%d WARNING: Get_conf() RSUS failed.\n",
517 dpt->unit);
518 crit_exit();
519 return (0);
523 DptStat_Reset_BUSY(dpt->sp);
526 * XXXX We might want to do something more clever than aborting at
527 * this point, like resetting (rebooting) the controller and trying
528 * again.
530 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
531 EATA_CMD_DMA_SEND_CP,
532 10000, 0, 0, 0)) != 0) {
533 kprintf("dpt%d WARNING: Get_conf() failed (%d) to send "
534 "EATA_CMD_DMA_READ_CONFIG\n",
535 dpt->unit, result);
536 crit_exit();
537 return (0);
539 /* Wait for two seconds for a response. This can be slow */
540 for (ndx = 0;
541 (ndx < 20000)
542 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
543 ndx++) {
544 DELAY(50);
547 /* Grab the status and clear interrupts */
548 status = dpt_inb(dpt, HA_RSTATUS);
550 crit_exit();
553 * Check the status carefully. Return only if the
554 * command was successful.
556 if (((status & HA_SERROR) == 0)
557 && (dpt->sp->hba_stat == 0)
558 && (dpt->sp->scsi_stat == 0)
559 && (dpt->sp->residue_len == 0))
560 return (0);
562 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND)
563 return (0);
565 return (1);
568 /* Detect Cache parameters and size */
569 static void
570 dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
571 u_int8_t *buff)
573 eata_ccb_t *cp;
574 u_int8_t *param;
575 int bytes;
576 int result;
577 int ndx;
578 u_int8_t status;
581 * Default setting, for best perfromance..
582 * This is what virtually all cards default to..
584 dpt->cache_type = DPT_CACHE_WRITEBACK;
585 dpt->cache_size = 0;
587 cp = &dccb->eata_ccb;
588 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp));
589 bzero(buff, 512);
591 /* Setup the command structure */
592 cp->Interpret = 1;
593 cp->DataIn = 1;
594 cp->Auto_Req_Sen = 1;
595 cp->reqlen = sizeof(struct scsi_sense_data);
597 cp->cp_id = 0; /* who cares? The HBA will interpret.. */
598 cp->cp_LUN = 0; /* In the EATA packet */
599 cp->cp_lun = 0; /* In the SCSI command */
600 cp->cp_channel = 0;
602 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP;
603 cp->cp_len = 56;
605 cp->cp_extent = 0;
606 cp->cp_page = 0;
607 cp->cp_identify = 1;
608 cp->cp_dispri = 1;
611 * Build the EATA Command Packet structure
612 * for a Log Sense Command.
614 cp->cp_cdb[0] = 0x4d;
615 cp->cp_cdb[1] = 0x0;
616 cp->cp_cdb[2] = 0x40 | 0x33;
617 cp->cp_cdb[7] = 1;
619 cp->cp_datalen = htonl(512);
621 crit_enter();
622 result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
623 EATA_CMD_DMA_SEND_CP,
624 10000, 0, 0, 0);
625 if (result != 0) {
626 kprintf("dpt%d WARNING: detect_cache() failed (%d) to send "
627 "EATA_CMD_DMA_SEND_CP\n", dpt->unit, result);
628 crit_exit();
629 return;
631 /* Wait for two seconds for a response. This can be slow... */
632 for (ndx = 0;
633 (ndx < 20000) &&
634 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
635 ndx++) {
636 DELAY(50);
639 /* Grab the status and clear interrupts */
640 status = dpt_inb(dpt, HA_RSTATUS);
641 crit_exit();
644 * Sanity check
646 if (buff[0] != 0x33) {
647 return;
649 bytes = DPT_HCP_LENGTH(buff);
650 param = DPT_HCP_FIRST(buff);
652 if (DPT_HCP_CODE(param) != 1) {
654 * DPT Log Page layout error
656 kprintf("dpt%d: NOTICE: Log Page (1) layout error\n",
657 dpt->unit);
658 return;
660 if (!(param[4] & 0x4)) {
661 dpt->cache_type = DPT_NO_CACHE;
662 return;
664 while (DPT_HCP_CODE(param) != 6) {
665 param = DPT_HCP_NEXT(param);
666 if ((param < buff)
667 || (param >= &buff[bytes])) {
668 return;
672 if (param[4] & 0x2) {
674 * Cache disabled
676 dpt->cache_type = DPT_NO_CACHE;
677 return;
680 if (param[4] & 0x4) {
681 dpt->cache_type = DPT_CACHE_WRITETHROUGH;
684 /* XXX This isn't correct. This log parameter only has two bytes.... */
685 #if 0
686 dpt->cache_size = param[5]
687 | (param[6] << 8)
688 | (param[7] << 16)
689 | (param[8] << 24);
690 #endif
693 static void
694 dpt_poll(struct cam_sim *sim)
696 dpt_intr(cam_sim_softc(sim));
699 static void
700 dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
702 struct dpt_ccb *dccb;
703 union ccb *ccb;
704 struct dpt_softc *dpt;
706 dccb = (struct dpt_ccb *)arg;
707 ccb = dccb->ccb;
708 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
710 if (error != 0) {
711 if (error != EFBIG)
712 kprintf("dpt%d: Unexpected error 0x%x returned from "
713 "bus_dmamap_load\n", dpt->unit, error);
714 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
715 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
716 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
718 dptfreeccb(dpt, dccb);
719 xpt_done(ccb);
720 return;
723 if (nseg != 0) {
724 dpt_sg_t *sg;
725 bus_dma_segment_t *end_seg;
726 bus_dmasync_op_t op;
728 end_seg = dm_segs + nseg;
730 /* Copy the segments into our SG list */
731 sg = dccb->sg_list;
732 while (dm_segs < end_seg) {
733 sg->seg_len = htonl(dm_segs->ds_len);
734 sg->seg_addr = htonl(dm_segs->ds_addr);
735 sg++;
736 dm_segs++;
739 if (nseg > 1) {
740 dccb->eata_ccb.scatter = 1;
741 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr;
742 dccb->eata_ccb.cp_datalen =
743 htonl(nseg * sizeof(dpt_sg_t));
744 } else {
745 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr;
746 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len;
749 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
750 op = BUS_DMASYNC_PREREAD;
751 else
752 op = BUS_DMASYNC_PREWRITE;
754 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
756 } else {
757 dccb->eata_ccb.cp_dataDMA = 0;
758 dccb->eata_ccb.cp_datalen = 0;
761 crit_enter();
764 * Last time we need to check if this CCB needs to
765 * be aborted.
767 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
768 if (nseg != 0)
769 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
770 dptfreeccb(dpt, dccb);
771 xpt_done(ccb);
772 crit_exit();
773 return;
776 dccb->state |= DCCB_ACTIVE;
777 ccb->ccb_h.status |= CAM_SIM_QUEUED;
778 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le);
779 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
780 dpttimeout, dccb);
781 if (dpt_send_eata_command(dpt, &dccb->eata_ccb,
782 dccb->eata_ccb.cp_busaddr,
783 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) {
784 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */
785 if (nseg != 0)
786 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
787 dptfreeccb(dpt, dccb);
788 xpt_done(ccb);
791 crit_exit();
794 static void
795 dpt_action(struct cam_sim *sim, union ccb *ccb)
797 struct dpt_softc *dpt;
799 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n"));
801 dpt = (struct dpt_softc *)cam_sim_softc(sim);
803 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) {
804 xpt_print_path(ccb->ccb_h.path);
805 kprintf("controller is shutdown. Aborting CCB.\n");
806 ccb->ccb_h.status = CAM_NO_HBA;
807 xpt_done(ccb);
808 return;
811 switch (ccb->ccb_h.func_code) {
812 /* Common cases first */
813 case XPT_SCSI_IO: /* Execute the requested I/O operation */
815 struct ccb_scsiio *csio;
816 struct ccb_hdr *ccbh;
817 struct dpt_ccb *dccb;
818 struct eata_ccb *eccb;
820 csio = &ccb->csio;
821 ccbh = &ccb->ccb_h;
822 /* Max CDB length is 12 bytes */
823 if (csio->cdb_len > 12) {
824 ccb->ccb_h.status = CAM_REQ_INVALID;
825 xpt_done(ccb);
826 return;
828 if ((dccb = dptgetccb(dpt)) == NULL) {
829 crit_enter();
830 dpt->resource_shortage = 1;
831 crit_exit();
832 xpt_freeze_simq(sim, /*count*/1);
833 ccb->ccb_h.status = CAM_REQUEUE_REQ;
834 xpt_done(ccb);
835 return;
837 eccb = &dccb->eata_ccb;
839 /* Link dccb and ccb so we can find one from the other */
840 dccb->ccb = ccb;
841 ccb->ccb_h.ccb_dccb_ptr = dccb;
842 ccb->ccb_h.ccb_dpt_ptr = dpt;
845 * Explicitly set all flags so that the compiler can
846 * be smart about setting them.
848 eccb->SCSI_Reset = 0;
849 eccb->HBA_Init = 0;
850 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE)
851 ? 0 : 1;
852 eccb->scatter = 0;
853 eccb->Quick = 0;
854 eccb->Interpret =
855 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)]
856 ? 1 : 0;
857 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0;
858 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0;
859 eccb->reqlen = csio->sense_len;
860 eccb->cp_id = ccb->ccb_h.target_id;
861 eccb->cp_channel = cam_sim_bus(sim);
862 eccb->cp_LUN = ccb->ccb_h.target_lun;
863 eccb->cp_luntar = 0;
864 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
865 ? 0 : 1;
866 eccb->cp_identify = 1;
868 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
869 && csio->tag_action != CAM_TAG_ACTION_NONE) {
870 eccb->cp_msg[0] = csio->tag_action;
871 eccb->cp_msg[1] = dccb->tag;
872 } else {
873 eccb->cp_msg[0] = 0;
874 eccb->cp_msg[1] = 0;
876 eccb->cp_msg[2] = 0;
878 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
879 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
880 bcopy(csio->cdb_io.cdb_ptr,
881 eccb->cp_cdb, csio->cdb_len);
882 } else {
883 /* I guess I could map it in... */
884 ccb->ccb_h.status = CAM_REQ_INVALID;
885 dptfreeccb(dpt, dccb);
886 xpt_done(ccb);
887 return;
889 } else {
890 bcopy(csio->cdb_io.cdb_bytes,
891 eccb->cp_cdb, csio->cdb_len);
894 * If we have any data to send with this command,
895 * map it into bus space.
897 /* Only use S/G if there is a transfer */
898 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
899 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
901 * We've been given a pointer
902 * to a single buffer.
904 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
905 int error;
907 crit_enter();
908 error =
909 bus_dmamap_load(dpt->buffer_dmat,
910 dccb->dmamap,
911 csio->data_ptr,
912 csio->dxfer_len,
913 dptexecuteccb,
914 dccb, /*flags*/0);
915 if (error == EINPROGRESS) {
917 * So as to maintain ordering,
918 * freeze the controller queue
919 * until our mapping is
920 * returned.
922 xpt_freeze_simq(sim, 1);
923 dccb->state |= CAM_RELEASE_SIMQ;
925 crit_exit();
926 } else {
927 struct bus_dma_segment seg;
929 /* Pointer to physical buffer */
930 seg.ds_addr =
931 (bus_addr_t)csio->data_ptr;
932 seg.ds_len = csio->dxfer_len;
933 dptexecuteccb(dccb, &seg, 1, 0);
935 } else {
936 struct bus_dma_segment *segs;
938 if ((ccbh->flags & CAM_DATA_PHYS) != 0)
939 panic("dpt_action - Physical "
940 "segment pointers "
941 "unsupported");
943 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
944 panic("dpt_action - Virtual "
945 "segment addresses "
946 "unsupported");
948 /* Just use the segments provided */
949 segs = (struct bus_dma_segment *)csio->data_ptr;
950 dptexecuteccb(dccb, segs, csio->sglist_cnt, 0);
952 } else {
954 * XXX JGibbs.
955 * Does it want them both on or both off?
956 * CAM_DIR_NONE is both on, so this code can
957 * be removed if this is also what the DPT
958 * exptects.
960 eccb->DataOut = 0;
961 eccb->DataIn = 0;
962 dptexecuteccb(dccb, NULL, 0, 0);
964 break;
966 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
967 case XPT_ABORT: /* Abort the specified CCB */
968 /* XXX Implement */
969 ccb->ccb_h.status = CAM_REQ_INVALID;
970 xpt_done(ccb);
971 break;
972 case XPT_SET_TRAN_SETTINGS:
974 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
975 xpt_done(ccb);
976 break;
978 case XPT_GET_TRAN_SETTINGS:
979 /* Get default/user set transfer settings for the target */
981 struct ccb_trans_settings *cts = &ccb->cts;
982 struct ccb_trans_settings_scsi *scsi =
983 &cts->proto_specific.scsi;
984 struct ccb_trans_settings_spi *spi =
985 &cts->xport_specific.spi;
987 cts->protocol = PROTO_SCSI;
988 cts->protocol_version = SCSI_REV_2;
989 cts->transport = XPORT_SPI;
990 cts->transport_version = 2;
992 if (cts->type == CTS_TYPE_USER_SETTINGS) {
993 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
994 spi->bus_width = (dpt->max_id > 7)
995 ? MSG_EXT_WDTR_BUS_8_BIT
996 : MSG_EXT_WDTR_BUS_16_BIT;
997 spi->sync_period = 25; /* 10MHz */
998 if (spi->sync_period != 0)
999 spi->sync_offset = 15;
1000 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1002 spi->valid = CTS_SPI_VALID_SYNC_RATE
1003 | CTS_SPI_VALID_SYNC_OFFSET
1004 | CTS_SPI_VALID_SYNC_RATE
1005 | CTS_SPI_VALID_BUS_WIDTH
1006 | CTS_SPI_VALID_DISC;
1007 scsi->valid = CTS_SCSI_VALID_TQ;
1008 ccb->ccb_h.status = CAM_REQ_CMP;
1009 } else {
1010 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1012 xpt_done(ccb);
1013 break;
1015 case XPT_CALC_GEOMETRY:
1017 struct ccb_calc_geometry *ccg;
1018 u_int32_t size_mb;
1019 u_int32_t secs_per_cylinder;
1020 int extended;
1023 * XXX Use Adaptec translation until I find out how to
1024 * get this information from the card.
1026 ccg = &ccb->ccg;
1027 size_mb = ccg->volume_size
1028 / ((1024L * 1024L) / ccg->block_size);
1029 extended = 1;
1031 if (size_mb > 1024 && extended) {
1032 ccg->heads = 255;
1033 ccg->secs_per_track = 63;
1034 } else {
1035 ccg->heads = 64;
1036 ccg->secs_per_track = 32;
1038 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1039 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1040 ccb->ccb_h.status = CAM_REQ_CMP;
1041 xpt_done(ccb);
1042 break;
1044 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1046 /* XXX Implement */
1047 ccb->ccb_h.status = CAM_REQ_CMP;
1048 xpt_done(ccb);
1049 break;
1051 case XPT_TERM_IO: /* Terminate the I/O process */
1052 /* XXX Implement */
1053 ccb->ccb_h.status = CAM_REQ_INVALID;
1054 xpt_done(ccb);
1055 break;
1056 case XPT_PATH_INQ: /* Path routing inquiry */
1058 struct ccb_pathinq *cpi = &ccb->cpi;
1060 cpi->version_num = 1;
1061 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1062 if (dpt->max_id > 7)
1063 cpi->hba_inquiry |= PI_WIDE_16;
1064 cpi->target_sprt = 0;
1065 cpi->hba_misc = 0;
1066 cpi->hba_eng_cnt = 0;
1067 cpi->max_target = dpt->max_id;
1068 cpi->max_lun = dpt->max_lun;
1069 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)];
1070 cpi->bus_id = cam_sim_bus(sim);
1071 cpi->base_transfer_speed = 3300;
1072 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1073 strncpy(cpi->hba_vid, "DPT", HBA_IDLEN);
1074 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1075 cpi->unit_number = cam_sim_unit(sim);
1076 cpi->transport = XPORT_SPI;
1077 cpi->transport_version = 2;
1078 cpi->protocol = PROTO_SCSI;
1079 cpi->protocol_version = SCSI_REV_2;
1080 cpi->ccb_h.status = CAM_REQ_CMP;
1081 xpt_done(ccb);
1082 break;
1084 default:
1085 ccb->ccb_h.status = CAM_REQ_INVALID;
1086 xpt_done(ccb);
1087 break;
1092 * This routine will try to send an EATA command to the DPT HBA.
1093 * It will, by default, try 20,000 times, waiting 50us between tries.
1094 * It returns 0 on success and 1 on failure.
1095 * It is assumed to be called at splcam().
1097 static int
1098 dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
1099 u_int32_t cmd_busaddr, u_int command, u_int retries,
1100 u_int ifc, u_int code, u_int code2)
1102 u_int loop;
1104 if (!retries)
1105 retries = 20000;
1108 * I hate this polling nonsense. Wish there was a way to tell the DPT
1109 * to go get commands at its own pace, or to interrupt when ready.
1110 * In the mean time we will measure how many itterations it really
1111 * takes.
1113 for (loop = 0; loop < retries; loop++) {
1114 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0)
1115 break;
1116 else
1117 DELAY(50);
1120 if (loop < retries) {
1121 #ifdef DPT_MEASURE_PERFORMANCE
1122 if (loop > dpt->performance.max_eata_tries)
1123 dpt->performance.max_eata_tries = loop;
1125 if (loop < dpt->performance.min_eata_tries)
1126 dpt->performance.min_eata_tries = loop;
1127 #endif
1128 } else {
1129 #ifdef DPT_MEASURE_PERFORMANCE
1130 ++dpt->performance.command_too_busy;
1131 #endif
1132 return (1);
1135 /* The controller is alive, advance the wedge timer */
1136 #ifdef DPT_RESET_HBA
1137 dpt->last_contact = microtime_now;
1138 #endif
1140 if (cmd_block == NULL)
1141 cmd_busaddr = 0;
1142 #if (BYTE_ORDER == BIG_ENDIAN)
1143 else {
1144 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF)
1145 | ((cmd_busaddr >> 16) & 0xFF)
1146 | ((cmd_busaddr >> 8) & 0xFF)
1147 | (cmd_busaddr & 0xFF);
1149 #endif
1150 /* And now the address */
1151 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr);
1153 if (command == EATA_CMD_IMMEDIATE) {
1154 if (cmd_block == NULL) {
1155 dpt_outb(dpt, HA_WCODE2, code2);
1156 dpt_outb(dpt, HA_WCODE, code);
1158 dpt_outb(dpt, HA_WIFC, ifc);
1160 dpt_outb(dpt, HA_WCOMMAND, command);
1162 return (0);
1166 /* ==================== Exported Function definitions =======================*/
1167 dpt_softc_t *
1168 dpt_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh)
1170 dpt_softc_t *dpt = device_get_softc(dev);
1171 int i;
1173 bzero(dpt, sizeof(dpt_softc_t));
1174 dpt->tag = tag;
1175 dpt->bsh = bsh;
1176 dpt->unit = device_get_unit(dev);
1177 SLIST_INIT(&dpt->free_dccb_list);
1178 LIST_INIT(&dpt->pending_ccb_list);
1179 TAILQ_INSERT_TAIL(&dpt_softcs, dpt, links);
1180 for (i = 0; i < MAX_CHANNELS; i++)
1181 dpt->resetlevel[i] = DPT_HA_OK;
1183 #ifdef DPT_MEASURE_PERFORMANCE
1184 dpt_reset_performance(dpt);
1185 #endif /* DPT_MEASURE_PERFORMANCE */
1186 return (dpt);
1189 void
1190 dpt_free(struct dpt_softc *dpt)
1192 switch (dpt->init_level) {
1193 default:
1194 case 5:
1195 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap);
1196 case 4:
1197 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs,
1198 dpt->dccb_dmamap);
1199 bus_dmamap_destroy(dpt->dccb_dmat, dpt->dccb_dmamap);
1200 case 3:
1201 bus_dma_tag_destroy(dpt->dccb_dmat);
1202 case 2:
1203 bus_dma_tag_destroy(dpt->buffer_dmat);
1204 case 1:
1206 struct sg_map_node *sg_map;
1208 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) {
1209 SLIST_REMOVE_HEAD(&dpt->sg_maps, links);
1210 bus_dmamap_unload(dpt->sg_dmat,
1211 sg_map->sg_dmamap);
1212 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr,
1213 sg_map->sg_dmamap);
1214 kfree(sg_map, M_DEVBUF);
1216 bus_dma_tag_destroy(dpt->sg_dmat);
1218 case 0:
1219 break;
1221 TAILQ_REMOVE(&dpt_softcs, dpt, links);
1224 static u_int8_t string_sizes[] =
1226 sizeof(((dpt_inq_t*)NULL)->vendor),
1227 sizeof(((dpt_inq_t*)NULL)->modelNum),
1228 sizeof(((dpt_inq_t*)NULL)->firmware),
1229 sizeof(((dpt_inq_t*)NULL)->protocol),
1233 dpt_init(struct dpt_softc *dpt)
1235 dpt_conf_t conf;
1236 struct sg_map_node *sg_map;
1237 dpt_ccb_t *dccb;
1238 u_int8_t *strp;
1239 int index;
1240 int i;
1241 int retval;
1243 dpt->init_level = 0;
1244 SLIST_INIT(&dpt->sg_maps);
1246 #ifdef DPT_RESET_BOARD
1247 kprintf("dpt%d: resetting HBA\n", dpt->unit);
1248 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET);
1249 DELAY(750000);
1250 /* XXX Shouldn't we poll a status register or something??? */
1251 #endif
1252 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1253 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0,
1254 /*lowaddr*/BUS_SPACE_MAXADDR,
1255 /*highaddr*/BUS_SPACE_MAXADDR,
1256 /*filter*/NULL, /*filterarg*/NULL,
1257 PAGE_SIZE, /*nsegments*/1,
1258 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1259 /*flags*/0, &dpt->sg_dmat) != 0) {
1260 goto error_exit;
1263 dpt->init_level++;
1266 * We allocate our DPT ccbs as a contiguous array of bus dma'able
1267 * memory. To get the allocation size, we need to know how many
1268 * ccbs the card supports. This requires a ccb. We solve this
1269 * chicken and egg problem by allocating some re-usable S/G space
1270 * up front, and treating it as our status packet, CCB, and target
1271 * memory space for these commands.
1273 sg_map = dptallocsgmap(dpt);
1274 if (sg_map == NULL)
1275 goto error_exit;
1277 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr;
1278 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1];
1279 bzero(dccb, sizeof(*dccb));
1280 dpt->sp_physaddr = sg_map->sg_physaddr;
1281 dccb->eata_ccb.cp_dataDMA =
1282 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb));
1283 dccb->eata_ccb.cp_busaddr = ~0;
1284 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
1285 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb)
1286 + offsetof(struct dpt_ccb, sense_data));
1288 /* Okay. Fetch our config */
1289 bzero(&dccb[1], sizeof(conf)); /* data area */
1290 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1291 sizeof(conf), 0xc1, 7, 1);
1293 if (retval != 0) {
1294 kprintf("dpt%d: Failed to get board configuration\n", dpt->unit);
1295 return (retval);
1297 bcopy(&dccb[1], &conf, sizeof(conf));
1299 bzero(&dccb[1], sizeof(dpt->board_data));
1300 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1301 sizeof(dpt->board_data), 0, conf.scsi_id0, 0);
1302 if (retval != 0) {
1303 kprintf("dpt%d: Failed to get inquiry information\n", dpt->unit);
1304 return (retval);
1306 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data));
1308 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1309 (u_int8_t *)&dccb[1]);
1311 switch (ntohl(conf.splen)) {
1312 case DPT_EATA_REVA:
1313 dpt->EATA_revision = 'a';
1314 break;
1315 case DPT_EATA_REVB:
1316 dpt->EATA_revision = 'b';
1317 break;
1318 case DPT_EATA_REVC:
1319 dpt->EATA_revision = 'c';
1320 break;
1321 case DPT_EATA_REVZ:
1322 dpt->EATA_revision = 'z';
1323 break;
1324 default:
1325 dpt->EATA_revision = '?';
1328 dpt->max_id = conf.MAX_ID;
1329 dpt->max_lun = conf.MAX_LUN;
1330 dpt->irq = conf.IRQ;
1331 dpt->dma_channel = (8 - conf.DMA_channel) & 7;
1332 dpt->channels = conf.MAX_CHAN + 1;
1333 dpt->state |= DPT_HA_OK;
1334 if (conf.SECOND)
1335 dpt->primary = FALSE;
1336 else
1337 dpt->primary = TRUE;
1339 dpt->more_support = conf.MORE_support;
1341 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0)
1342 dpt->immediate_support = 1;
1343 else
1344 dpt->immediate_support = 0;
1346 dpt->broken_INQUIRY = FALSE;
1348 dpt->cplen = ntohl(conf.cplen);
1349 dpt->cppadlen = ntohs(conf.cppadlen);
1350 dpt->max_dccbs = ntohs(conf.queuesiz);
1352 if (dpt->max_dccbs > 256) {
1353 kprintf("dpt%d: Max CCBs reduced from %d to "
1354 "256 due to tag algorithm\n", dpt->unit, dpt->max_dccbs);
1355 dpt->max_dccbs = 256;
1358 dpt->hostid[0] = conf.scsi_id0;
1359 dpt->hostid[1] = conf.scsi_id1;
1360 dpt->hostid[2] = conf.scsi_id2;
1362 if (conf.SG_64K)
1363 dpt->sgsize = 8192;
1364 else
1365 dpt->sgsize = ntohs(conf.SGsiz);
1367 /* We can only get 64k buffers, so don't bother to waste space. */
1368 if (dpt->sgsize < 17 || dpt->sgsize > 32)
1369 dpt->sgsize = 32;
1371 if (dpt->sgsize > dpt_max_segs)
1372 dpt->sgsize = dpt_max_segs;
1374 /* DMA tag for mapping buffers into device visible space. */
1375 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0,
1376 /*lowaddr*/BUS_SPACE_MAXADDR,
1377 /*highaddr*/BUS_SPACE_MAXADDR,
1378 /*filter*/NULL, /*filterarg*/NULL,
1379 /*maxsize*/MAXBSIZE, /*nsegments*/dpt->sgsize,
1380 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1381 /*flags*/BUS_DMA_ALLOCNOW,
1382 &dpt->buffer_dmat) != 0) {
1383 kprintf("dpt: bus_dma_tag_create(...,dpt->buffer_dmat) failed\n");
1384 goto error_exit;
1387 dpt->init_level++;
1389 /* DMA tag for our ccb structures and interrupt status packet */
1390 if (bus_dma_tag_create(dpt->parent_dmat, /*alignment*/1, /*boundary*/0,
1391 /*lowaddr*/BUS_SPACE_MAXADDR,
1392 /*highaddr*/BUS_SPACE_MAXADDR,
1393 /*filter*/NULL, /*filterarg*/NULL,
1394 (dpt->max_dccbs * sizeof(struct dpt_ccb))
1395 + sizeof(dpt_sp_t),
1396 /*nsegments*/1,
1397 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1398 /*flags*/0, &dpt->dccb_dmat) != 0) {
1399 kprintf("dpt: bus_dma_tag_create(...,dpt->dccb_dmat) failed\n");
1400 goto error_exit;
1403 dpt->init_level++;
1405 /* Allocation for our ccbs and interrupt status packet */
1406 if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs,
1407 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) {
1408 kprintf("dpt: bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n");
1409 goto error_exit;
1412 dpt->init_level++;
1414 /* And permanently map them */
1415 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap,
1416 dpt->dpt_dccbs,
1417 (dpt->max_dccbs * sizeof(struct dpt_ccb))
1418 + sizeof(dpt_sp_t),
1419 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0);
1421 /* Clear them out. */
1422 bzero(dpt->dpt_dccbs,
1423 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t));
1425 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase;
1427 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs];
1428 dpt->sp_physaddr = dpt->dpt_ccb_busbase
1429 + (dpt->max_dccbs * sizeof(dpt_ccb_t));
1430 dpt->init_level++;
1432 /* Allocate our first batch of ccbs */
1433 if (dptallocccbs(dpt) == 0) {
1434 kprintf("dpt: dptallocccbs(dpt) == 0\n");
1435 return (2);
1438 /* Prepare for Target Mode */
1439 dpt->target_mode_enabled = 1;
1441 /* Nuke excess spaces from inquiry information */
1442 strp = dpt->board_data.vendor;
1443 for (i = 0; i < sizeof(string_sizes); i++) {
1444 index = string_sizes[i] - 1;
1445 while (index && (strp[index] == ' '))
1446 strp[index--] = '\0';
1447 strp += string_sizes[i];
1450 kprintf("dpt%d: %.8s %.16s FW Rev. %.4s, ",
1451 dpt->unit, dpt->board_data.vendor,
1452 dpt->board_data.modelNum, dpt->board_data.firmware);
1454 kprintf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : "");
1456 if (dpt->cache_type != DPT_NO_CACHE
1457 && dpt->cache_size != 0) {
1458 kprintf("%s Cache, ",
1459 dpt->cache_type == DPT_CACHE_WRITETHROUGH
1460 ? "Write-Through" : "Write-Back");
1463 kprintf("%d CCBs\n", dpt->max_dccbs);
1464 return (0);
1466 error_exit:
1467 return (1);
1471 dpt_attach(dpt_softc_t *dpt)
1473 struct cam_devq *devq;
1474 int i;
1477 * Create the device queue for our SIM.
1479 devq = cam_simq_alloc(dpt->max_dccbs);
1480 if (devq == NULL)
1481 return (0);
1483 for (i = 0; i < dpt->channels; i++) {
1485 * Construct our SIM entry
1487 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt",
1488 dpt, dpt->unit, &sim_mplock,
1489 /*untagged*/2,
1490 /*tagged*/dpt->max_dccbs, devq);
1491 if (xpt_bus_register(dpt->sims[i], i) != CAM_SUCCESS) {
1492 cam_sim_free(dpt->sims[i]);
1493 break;
1496 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL,
1497 cam_sim_path(dpt->sims[i]),
1498 CAM_TARGET_WILDCARD,
1499 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1500 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1501 cam_sim_free(dpt->sims[i]);
1502 break;
1506 cam_simq_release(devq);
1507 if (i > 0)
1508 EVENTHANDLER_REGISTER(shutdown_post_sync, dptshutdown,
1509 dpt, SHUTDOWN_PRI_DRIVER);
1510 return (i);
1515 * This is the interrupt handler for the DPT driver.
1517 void
1518 dpt_intr(void *arg)
1520 dpt_softc_t *dpt;
1521 dpt_ccb_t *dccb;
1522 union ccb *ccb;
1523 u_int status;
1524 u_int aux_status;
1525 u_int hba_stat;
1526 u_int scsi_stat;
1527 u_int32_t residue_len; /* Number of bytes not transferred */
1529 dpt = (dpt_softc_t *)arg;
1531 /* First order of business is to check if this interrupt is for us */
1532 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) {
1535 * What we want to do now, is to capture the status, all of it,
1536 * move it where it belongs, wake up whoever sleeps waiting to
1537 * process this result, and get out of here.
1539 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase
1540 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) {
1541 kprintf("Encountered bogus status packet\n");
1542 status = dpt_inb(dpt, HA_RSTATUS);
1543 return;
1546 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr);
1548 dpt->sp->ccb_busaddr = ~0;
1550 /* Ignore status packets with EOC not set */
1551 if (dpt->sp->EOC == 0) {
1552 kprintf("dpt%d ERROR: Request %d received with "
1553 "clear EOC.\n Marking as LOST.\n",
1554 dpt->unit, dccb->transaction_id);
1556 #ifdef DPT_HANDLE_TIMEOUTS
1557 dccb->state |= DPT_CCB_STATE_MARKED_LOST;
1558 #endif
1559 /* This CLEARS the interrupt! */
1560 status = dpt_inb(dpt, HA_RSTATUS);
1561 continue;
1563 dpt->sp->EOC = 0;
1566 * Double buffer the status information so the hardware can
1567 * work on updating the status packet while we decifer the
1568 * one we were just interrupted for.
1569 * According to Mark Salyzyn, we only need few pieces of it.
1571 hba_stat = dpt->sp->hba_stat;
1572 scsi_stat = dpt->sp->scsi_stat;
1573 residue_len = dpt->sp->residue_len;
1575 /* Clear interrupts, check for error */
1576 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) {
1578 * Error Condition. Check for magic cookie. Exit
1579 * this test on earliest sign of non-reset condition
1582 /* Check that this is not a board reset interrupt */
1583 if (dpt_just_reset(dpt)) {
1584 kprintf("dpt%d: HBA rebooted.\n"
1585 " All transactions should be "
1586 "resubmitted\n",
1587 dpt->unit);
1589 kprintf("dpt%d: >>---->> This is incomplete, "
1590 "fix me.... <<----<<", dpt->unit);
1591 panic("DPT Rebooted");
1595 /* Process CCB */
1596 ccb = dccb->ccb;
1597 callout_stop(&ccb->ccb_h.timeout_ch);
1598 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1599 bus_dmasync_op_t op;
1601 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1602 op = BUS_DMASYNC_POSTREAD;
1603 else
1604 op = BUS_DMASYNC_POSTWRITE;
1605 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
1606 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
1609 /* Common Case inline... */
1610 if (hba_stat == HA_NO_ERROR) {
1611 ccb->csio.scsi_status = scsi_stat;
1612 ccb->ccb_h.status = 0;
1613 switch (scsi_stat) {
1614 case SCSI_STATUS_OK:
1615 ccb->ccb_h.status |= CAM_REQ_CMP;
1616 break;
1617 case SCSI_STATUS_CHECK_COND:
1618 case SCSI_STATUS_CMD_TERMINATED:
1619 bcopy(&dccb->sense_data, &ccb->csio.sense_data,
1620 ccb->csio.sense_len);
1621 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1622 /* FALLTHROUGH */
1623 default:
1624 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1625 /* XXX Freeze DevQ */
1626 break;
1628 ccb->csio.resid = residue_len;
1629 dptfreeccb(dpt, dccb);
1630 xpt_done(ccb);
1631 } else {
1632 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat,
1633 residue_len);
1638 static void
1639 dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb,
1640 u_int hba_stat, u_int scsi_stat, u_int32_t resid)
1642 ccb->csio.resid = resid;
1643 switch (hba_stat) {
1644 case HA_ERR_SEL_TO:
1645 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1646 break;
1647 case HA_ERR_CMD_TO:
1648 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1649 break;
1650 case HA_SCSIBUS_RESET:
1651 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */
1652 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1653 break;
1654 case HA_CP_ABORTED:
1655 case HA_CP_RESET: /* XXX ??? */
1656 case HA_CP_ABORT_NA: /* XXX ??? */
1657 case HA_CP_RESET_NA: /* XXX ??? */
1658 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1659 ccb->ccb_h.status = CAM_REQ_ABORTED;
1660 break;
1661 case HA_PCI_PARITY:
1662 case HA_PCI_MABORT:
1663 case HA_PCI_TABORT:
1664 case HA_PCI_STABORT:
1665 case HA_BUS_PARITY:
1666 case HA_PARITY_ERR:
1667 case HA_ECC_ERR:
1668 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1669 break;
1670 case HA_UNX_MSGRJCT:
1671 ccb->ccb_h.status = CAM_MSG_REJECT_REC;
1672 break;
1673 case HA_UNX_BUSPHASE:
1674 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1675 break;
1676 case HA_UNX_BUS_FREE:
1677 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1678 break;
1679 case HA_SCSI_HUNG:
1680 case HA_RESET_STUCK:
1682 * Dead??? Can the controller get unstuck
1683 * from these conditions
1685 ccb->ccb_h.status = CAM_NO_HBA;
1686 break;
1687 case HA_RSENSE_FAIL:
1688 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1689 break;
1690 default:
1691 kprintf("dpt%d: Undocumented Error %x\n", dpt->unit, hba_stat);
1692 kprintf("Please mail this message to shimon@simon-shapiro.org\n");
1693 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1694 break;
1696 dptfreeccb(dpt, dccb);
1697 xpt_done(ccb);
1700 static void
1701 dpttimeout(void *arg)
1703 struct dpt_ccb *dccb;
1704 union ccb *ccb;
1705 struct dpt_softc *dpt;
1707 dccb = (struct dpt_ccb *)arg;
1708 ccb = dccb->ccb;
1709 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
1710 xpt_print_path(ccb->ccb_h.path);
1711 kprintf("CCB %p - timed out\n", (void *)dccb);
1713 crit_enter();
1716 * Try to clear any pending jobs. FreeBSD will loose interrupts,
1717 * leaving the controller suspended, and commands timed-out.
1718 * By calling the interrupt handler, any command thus stuck will be
1719 * completed.
1721 dpt_intr(dpt);
1723 if ((dccb->state & DCCB_ACTIVE) == 0) {
1724 xpt_print_path(ccb->ccb_h.path);
1725 kprintf("CCB %p - timed out CCB already completed\n",
1726 (void *)dccb);
1727 crit_exit();
1728 return;
1731 /* Abort this particular command. Leave all others running */
1732 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr,
1733 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0);
1734 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1735 crit_exit();
1739 * Shutdown the controller and ensure that the cache is completely flushed.
1740 * Called from the shutdown_final event after all disk access has completed.
1742 static void
1743 dptshutdown(void *arg, int howto)
1745 dpt_softc_t *dpt;
1747 dpt = (dpt_softc_t *)arg;
1749 kprintf("dpt%d: Shutting down (mode %x) HBA. Please wait...\n",
1750 dpt->unit, howto);
1753 * What we do for a shutdown, is give the DPT early power loss warning
1755 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0);
1756 DELAY(1000 * 1000 * 5);
1757 kprintf("dpt%d: Controller was warned of shutdown and is now "
1758 "disabled\n", dpt->unit);
1761 /*============================================================================*/
1763 #if 0
1764 #ifdef DPT_RESET_HBA
1767 ** Function name : dpt_reset_hba
1769 ** Description : Reset the HBA and properly discard all pending work
1770 ** Input : Softc
1771 ** Output : Nothing
1773 static void
1774 dpt_reset_hba(dpt_softc_t *dpt)
1776 eata_ccb_t *ccb;
1777 dpt_ccb_t dccb, *dccbp;
1778 int result;
1779 struct scsi_xfer *xs;
1781 /* Prepare a control block. The SCSI command part is immaterial */
1782 dccb.xs = NULL;
1783 dccb.flags = 0;
1784 dccb.state = DPT_CCB_STATE_NEW;
1785 dccb.std_callback = NULL;
1786 dccb.wrbuff_callback = NULL;
1788 ccb = &dccb.eata_ccb;
1789 ccb->CP_OpCode = EATA_CMD_RESET;
1790 ccb->SCSI_Reset = 0;
1791 ccb->HBA_Init = 1;
1792 ccb->Auto_Req_Sen = 1;
1793 ccb->cp_id = 0; /* Should be ignored */
1794 ccb->DataIn = 1;
1795 ccb->DataOut = 0;
1796 ccb->Interpret = 1;
1797 ccb->reqlen = htonl(sizeof(struct scsi_sense_data));
1798 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA));
1799 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA));
1800 ccb->cp_viraddr = (u_int32_t) & ccb;
1802 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1803 ccb->cp_scsi_cmd = 0; /* Should be ignored */
1805 /* Lock up the submitted queue. We are very persistant here */
1806 crit_enter();
1807 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) {
1808 DELAY(100);
1811 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE;
1812 crit_exit();
1814 /* Send the RESET message */
1815 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1816 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) {
1817 kprintf("dpt%d: Failed to send the RESET message.\n"
1818 " Trying cold boot (ouch!)\n", dpt->unit);
1821 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1822 EATA_COLD_BOOT, 0, 0,
1823 0, 0)) != 0) {
1824 panic("dpt%d: Faild to cold boot the HBA\n",
1825 dpt->unit);
1827 #ifdef DPT_MEASURE_PERFORMANCE
1828 dpt->performance.cold_boots++;
1829 #endif /* DPT_MEASURE_PERFORMANCE */
1832 #ifdef DPT_MEASURE_PERFORMANCE
1833 dpt->performance.warm_starts++;
1834 #endif /* DPT_MEASURE_PERFORMANCE */
1836 kprintf("dpt%d: Aborting pending requests. O/S should re-submit\n",
1837 dpt->unit);
1839 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) {
1840 struct scsi_xfer *xs = dccbp->xs;
1842 /* Not all transactions have xs structs */
1843 if (xs != NULL) {
1844 /* Tell the kernel proper this did not complete well */
1845 xs->error |= XS_SELTIMEOUT;
1846 xs->flags |= SCSI_ITSDONE;
1847 scsi_done(xs);
1850 dpt_Qremove_submitted(dpt, dccbp);
1852 /* Remember, Callbacks are NOT in the standard queue */
1853 if (dccbp->std_callback != NULL) {
1854 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel,
1855 dccbp);
1856 } else {
1857 crit_enter();
1858 dpt_Qpush_free(dpt, dccbp);
1859 crit_exit();
1863 kprintf("dpt%d: reset done aborting all pending commands\n", dpt->unit);
1864 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE;
1867 #endif /* DPT_RESET_HBA */
1870 * Build a Command Block for target mode READ/WRITE BUFFER,
1871 * with the ``sync'' bit ON.
1873 * Although the length and offset are 24 bit fields in the command, they cannot
1874 * exceed 8192 bytes, so we take them as short integers andcheck their range.
1875 * If they are sensless, we round them to zero offset, maximum length and
1876 * complain.
1879 static void
1880 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
1881 dpt_ccb_t * ccb, int mode, u_int8_t command,
1882 u_int16_t length, u_int16_t offset)
1884 eata_ccb_t *cp;
1886 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) {
1887 kprintf("dpt%d: Length of %d, and offset of %d are wrong\n",
1888 dpt->unit, length, offset);
1889 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE;
1890 offset = 0;
1892 ccb->xs = NULL;
1893 ccb->flags = 0;
1894 ccb->state = DPT_CCB_STATE_NEW;
1895 ccb->std_callback = (ccb_callback) dpt_target_done;
1896 ccb->wrbuff_callback = NULL;
1898 cp = &ccb->eata_ccb;
1899 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP;
1900 cp->SCSI_Reset = 0;
1901 cp->HBA_Init = 0;
1902 cp->Auto_Req_Sen = 1;
1903 cp->cp_id = target;
1904 cp->DataIn = 1;
1905 cp->DataOut = 0;
1906 cp->Interpret = 0;
1907 cp->reqlen = htonl(sizeof(struct scsi_sense_data));
1908 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA));
1909 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA));
1910 cp->cp_viraddr = (u_int32_t) & ccb;
1912 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1914 cp->cp_scsi_cmd = command;
1915 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK);
1916 cp->cp_lun = lun; /* Order is important here! */
1917 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */
1918 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */
1919 cp->cp_cdb[4] = (length >> 8) & 0xFF;
1920 cp->cp_cdb[5] = length & 0xFF;
1921 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */
1922 cp->cp_cdb[7] = (length >> 8) & 0xFF;
1923 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */
1924 cp->cp_cdb[9] = 0; /* No sync, no match bits */
1927 * This could be optimized to live in dpt_register_buffer.
1928 * We keep it here, just in case the kernel decides to reallocate pages
1930 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE,
1931 dpt->rw_buffer[bus][target][lun])) {
1932 kprintf("dpt%d: Failed to setup Scatter/Gather for "
1933 "Target-Mode buffer\n", dpt->unit);
1937 /* Setup a target mode READ command */
1939 static void
1940 dpt_set_target(int redo, dpt_softc_t * dpt,
1941 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
1942 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb)
1944 if (dpt->target_mode_enabled) {
1945 crit_enter();
1947 if (!redo)
1948 dpt_target_ccb(dpt, bus, target, lun, ccb, mode,
1949 SCSI_TM_READ_BUFFER, length, offset);
1951 ccb->transaction_id = ++dpt->commands_processed;
1953 #ifdef DPT_MEASURE_PERFORMANCE
1954 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
1955 ccb->command_started = microtime_now;
1956 #endif
1957 dpt_Qadd_waiting(dpt, ccb);
1958 dpt_sched_queue(dpt);
1960 crit_exit();
1961 } else {
1962 kprintf("dpt%d: Target Mode Request, but Target Mode is OFF\n",
1963 dpt->unit);
1968 * Schedule a buffer to be sent to another target.
1969 * The work will be scheduled and the callback provided will be called when
1970 * the work is actually done.
1972 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients
1973 * get notified of receipt of buffers.
1977 dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
1978 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data,
1979 buff_wr_done callback)
1981 dpt_softc_t *dpt;
1982 dpt_ccb_t *ccb = NULL;
1984 /* This is an external call. Be a bit paranoid */
1985 for (dpt = TAILQ_FIRST(&dpt_softc_list);
1986 dpt != NULL;
1987 dpt = TAILQ_NEXT(dpt, links)) {
1988 if (dpt->unit == unit)
1989 goto valid_unit;
1992 return (INVALID_UNIT);
1994 valid_unit:
1996 if (dpt->target_mode_enabled) {
1997 if ((channel >= dpt->channels) || (target > dpt->max_id) ||
1998 (lun > dpt->max_lun)) {
1999 return (INVALID_SENDER);
2001 if ((dpt->rw_buffer[channel][target][lun] == NULL) ||
2002 (dpt->buffer_receiver[channel][target][lun] == NULL))
2003 return (NOT_REGISTERED);
2005 crit_enter();
2006 /* Process the free list */
2007 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2008 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n"
2009 " Please try later\n",
2010 dpt->unit);
2011 crit_exit();
2012 return (NO_RESOURCES);
2014 /* Now grab the newest CCB */
2015 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2016 crit_exit();
2017 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit);
2019 crit_exit();
2021 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length);
2022 dpt_target_ccb(dpt, channel, target, lun, ccb, mode,
2023 SCSI_TM_WRITE_BUFFER,
2024 length, offset);
2025 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */
2027 crit_enter();
2028 ccb->transaction_id = ++dpt->commands_processed;
2030 #ifdef DPT_MEASURE_PERFORMANCE
2031 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
2032 ccb->command_started = microtime_now;
2033 #endif
2034 dpt_Qadd_waiting(dpt, ccb);
2035 dpt_sched_queue(dpt);
2037 crit_exit();
2038 return (0);
2040 return (DRIVER_DOWN);
2043 static void
2044 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2046 eata_ccb_t *cp;
2048 cp = &ccb->eata_ccb;
2051 * Remove the CCB from the waiting queue.
2052 * We do NOT put it back on the free, etc., queues as it is a special
2053 * ccb, owned by the dpt_softc of this unit.
2055 crit_enter();
2056 dpt_Qremove_completed(dpt, ccb);
2057 crit_exit();
2059 #define br_channel (ccb->eata_ccb.cp_channel)
2060 #define br_target (ccb->eata_ccb.cp_id)
2061 #define br_lun (ccb->eata_ccb.cp_LUN)
2062 #define br_index [br_channel][br_target][br_lun]
2063 #define read_buffer_callback (dpt->buffer_receiver br_index )
2064 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun])
2065 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset])
2066 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5))
2067 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8))
2069 /* Different reasons for being here, you know... */
2070 switch (ccb->eata_ccb.cp_scsi_cmd) {
2071 case SCSI_TM_READ_BUFFER:
2072 if (read_buffer_callback != NULL) {
2073 /* This is a buffer generated by a kernel process */
2074 read_buffer_callback(dpt->unit, br_channel,
2075 br_target, br_lun,
2076 read_buffer,
2077 br_offset, br_length);
2078 } else {
2080 * This is a buffer waited for by a user (sleeping)
2081 * command
2083 wakeup(ccb);
2086 /* We ALWAYS re-issue the same command; args are don't-care */
2087 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0);
2088 break;
2090 case SCSI_TM_WRITE_BUFFER:
2091 (ccb->wrbuff_callback) (dpt->unit, br_channel, br_target,
2092 br_offset, br_length,
2093 br_lun, ccb->status_packet.hba_stat);
2094 break;
2095 default:
2096 kprintf("dpt%d: %s is an unsupported command for target mode\n",
2097 dpt->unit, scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd));
2099 crit_enter();
2100 dpt->target_ccb[br_channel][br_target][br_lun] = NULL;
2101 dpt_Qpush_free(dpt, ccb);
2102 crit_exit();
2107 * Use this function to register a client for a buffer read target operation.
2108 * The function you register will be called every time a buffer is received
2109 * by the target mode code.
2111 dpt_rb_t
2112 dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2113 u_int8_t mode, u_int16_t length, u_int16_t offset,
2114 dpt_rec_buff callback, dpt_rb_op_t op)
2116 dpt_softc_t *dpt;
2117 dpt_ccb_t *ccb = NULL;
2119 for (dpt = TAILQ_FIRST(&dpt_softc_list);
2120 dpt != NULL;
2121 dpt = TAILQ_NEXT(dpt, links)) {
2122 if (dpt->unit == unit)
2123 goto valid_unit;
2126 return (INVALID_UNIT);
2128 valid_unit:
2130 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
2131 return (DRIVER_DOWN);
2133 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) ||
2134 (lun > (dpt->max_lun - 1)))
2135 return (INVALID_SENDER);
2137 if (dpt->buffer_receiver[channel][target][lun] == NULL) {
2138 if (op == REGISTER_BUFFER) {
2139 /* Assign the requested callback */
2140 dpt->buffer_receiver[channel][target][lun] = callback;
2141 /* Get a CCB */
2142 crit_enter();
2144 /* Process the free list */
2145 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2146 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n"
2147 " Please try later\n",
2148 dpt->unit);
2149 crit_exit();
2150 return (NO_RESOURCES);
2152 /* Now grab the newest CCB */
2153 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2154 crit_exit();
2155 panic("dpt%d: Got a NULL CCB from pop_free()\n",
2156 dpt->unit);
2158 crit_exit();
2160 /* Clean up the leftover of the previous tenant */
2161 ccb->status = DPT_CCB_STATE_NEW;
2162 dpt->target_ccb[channel][target][lun] = ccb;
2164 dpt->rw_buffer[channel][target][lun] =
2165 kmalloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_INTWAIT);
2166 dpt_set_target(0, dpt, channel, target, lun, mode,
2167 length, offset, ccb);
2168 return (SUCCESSFULLY_REGISTERED);
2169 } else
2170 return (NOT_REGISTERED);
2171 } else {
2172 if (op == REGISTER_BUFFER) {
2173 if (dpt->buffer_receiver[channel][target][lun] == callback)
2174 return (ALREADY_REGISTERED);
2175 else
2176 return (REGISTERED_TO_ANOTHER);
2177 } else {
2178 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2179 dpt->buffer_receiver[channel][target][lun] = NULL;
2180 crit_enter();
2181 dpt_Qpush_free(dpt, ccb);
2182 crit_exit();
2183 kfree(dpt->rw_buffer[channel][target][lun], M_DEVBUF);
2184 return (SUCCESSFULLY_REGISTERED);
2185 } else
2186 return (INVALID_CALLBACK);
2192 /* Return the state of the blinking DPT LED's */
2193 u_int8_t
2194 dpt_blinking_led(dpt_softc_t * dpt)
2196 int ndx;
2197 u_int32_t state;
2198 u_int32_t previous;
2199 u_int8_t result;
2201 crit_enter();
2203 result = 0;
2205 for (ndx = 0, state = 0, previous = 0;
2206 (ndx < 10) && (state != previous);
2207 ndx++) {
2208 previous = state;
2209 state = dpt_inl(dpt, 1);
2212 if ((state == previous) && (state == DPT_BLINK_INDICATOR))
2213 result = dpt_inb(dpt, 5);
2215 crit_exit();
2216 return (result);
2220 * Execute a command which did not come from the kernel's SCSI layer.
2221 * The only way to map user commands to bus and target is to comply with the
2222 * standard DPT wire-down scheme:
2225 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
2226 caddr_t cmdarg, int minor_no)
2228 dpt_ccb_t *ccb;
2229 void *data;
2230 int channel, target, lun;
2231 int huh;
2232 int result;
2233 int submitted;
2235 data = NULL;
2236 channel = minor2hba(minor_no);
2237 target = minor2target(minor_no);
2238 lun = minor2lun(minor_no);
2240 if ((channel > (dpt->channels - 1))
2241 || (target > dpt->max_id)
2242 || (lun > dpt->max_lun))
2243 return (ENXIO);
2245 if (target == dpt->sc_scsi_link[channel].adapter_targ) {
2246 /* This one is for the controller itself */
2247 if ((user_cmd->eataID[0] != 'E')
2248 || (user_cmd->eataID[1] != 'A')
2249 || (user_cmd->eataID[2] != 'T')
2250 || (user_cmd->eataID[3] != 'A')) {
2251 return (ENXIO);
2254 /* Get a DPT CCB, so we can prepare a command */
2255 crit_enter();
2257 /* Process the free list */
2258 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2259 kprintf("dpt%d ERROR: Cannot allocate any more free CCB's.\n"
2260 " Please try later\n",
2261 dpt->unit);
2262 crit_exit();
2263 return (EFAULT);
2265 /* Now grab the newest CCB */
2266 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2267 crit_exit();
2268 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit);
2269 } else {
2270 crit_exit();
2271 /* Clean up the leftover of the previous tenant */
2272 ccb->status = DPT_CCB_STATE_NEW;
2275 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb,
2276 sizeof(eata_ccb_t));
2278 /* We do not want to do user specified scatter/gather. Why?? */
2279 if (ccb->eata_ccb.scatter == 1)
2280 return (EINVAL);
2282 ccb->eata_ccb.Auto_Req_Sen = 1;
2283 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
2284 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen));
2285 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA));
2286 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA));
2287 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA));
2288 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
2290 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) {
2291 /* Data I/O is involved in this command. Alocate buffer */
2292 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) {
2293 data = contigmalloc(ccb->eata_ccb.cp_datalen,
2294 M_TEMP, M_WAITOK, 0, ~0,
2295 ccb->eata_ccb.cp_datalen,
2296 0x10000);
2297 } else {
2298 data = kmalloc(ccb->eata_ccb.cp_datalen, M_TEMP,
2299 M_WAITOK);
2302 if (data == NULL) {
2303 kprintf("dpt%d: Cannot allocate %d bytes "
2304 "for EATA command\n", dpt->unit,
2305 ccb->eata_ccb.cp_datalen);
2306 return (EFAULT);
2308 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA
2309 if (ccb->eata_ccb.DataIn == 1) {
2310 if (copyin(usr_cmd_DMA,
2311 data, ccb->eata_ccb.cp_datalen) == -1)
2312 return (EFAULT);
2314 } else {
2315 /* No data I/O involved here. Make sure the DPT knows that */
2316 ccb->eata_ccb.cp_datalen = 0;
2317 data = NULL;
2320 if (ccb->eata_ccb.FWNEST == 1)
2321 ccb->eata_ccb.FWNEST = 0;
2323 if (ccb->eata_ccb.cp_datalen != 0) {
2324 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen,
2325 data) != 0) {
2326 if (data != NULL)
2327 kfree(data, M_TEMP);
2328 return (EFAULT);
2332 * We are required to quiet a SCSI bus.
2333 * since we do not queue comands on a bus basis,
2334 * we wait for ALL commands on a controller to complete.
2335 * In the mean time, sched_queue() will not schedule new commands.
2337 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2338 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) {
2339 /* We wait for ALL traffic for this HBa to subside */
2340 crit_enter();
2341 dpt->state |= DPT_HA_QUIET;
2342 crit_exit();
2344 while ((submitted = dpt->submitted_ccbs_count) != 0) {
2345 huh = tsleep((void *) dpt, PCATCH, "dptqt", 100 * hz);
2346 switch (huh) {
2347 case 0:
2348 /* Wakeup call received */
2349 break;
2350 case EWOULDBLOCK:
2351 /* Timer Expired */
2352 break;
2353 default:
2354 /* anything else */
2355 break;
2359 /* Resume normal operation */
2360 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2361 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) {
2362 crit_enter();
2363 dpt->state &= ~DPT_HA_QUIET;
2364 crit_exit();
2367 * Schedule the command and submit it.
2368 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET
2370 ccb->xs = NULL;
2371 ccb->flags = 0;
2372 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */
2374 ccb->transaction_id = ++dpt->commands_processed;
2375 ccb->std_callback = (ccb_callback) dpt_user_cmd_done;
2376 ccb->result = (u_int32_t) & cmdarg;
2377 ccb->data = data;
2379 #ifdef DPT_MEASURE_PERFORMANCE
2380 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd];
2381 ccb->command_started = microtime_now;
2382 #endif
2383 crit_enter();
2384 dpt_Qadd_waiting(dpt, ccb);
2385 crit_exit();
2387 dpt_sched_queue(dpt);
2389 /* Wait for the command to complete */
2390 (void) tsleep((void *) ccb, PCATCH, "dptucw", 100 * hz);
2392 /* Free allocated memory */
2393 if (data != NULL)
2394 kfree(data, M_TEMP);
2396 return (0);
2399 static void
2400 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2402 u_int32_t result;
2403 caddr_t cmd_arg;
2405 crit_enter();
2408 * If Auto Request Sense is on, copyout the sense struct
2410 #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA)
2411 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen)
2412 if (ccb->eata_ccb.Auto_Req_Sen == 1) {
2413 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA,
2414 sizeof(struct scsi_sense_data))) {
2415 ccb->result = EFAULT;
2416 dpt_Qpush_free(dpt, ccb);
2417 crit_exit();
2418 wakeup(ccb);
2419 return;
2422 /* If DataIn is on, copyout the data */
2423 if ((ccb->eata_ccb.DataIn == 1)
2424 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) {
2425 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) {
2426 dpt_Qpush_free(dpt, ccb);
2427 ccb->result = EFAULT;
2429 crit_exit();
2430 wakeup(ccb);
2431 return;
2434 /* Copyout the status */
2435 result = ccb->status_packet.hba_stat;
2436 cmd_arg = (caddr_t) ccb->result;
2438 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) {
2439 dpt_Qpush_free(dpt, ccb);
2440 ccb->result = EFAULT;
2441 crit_exit();
2442 wakeup(ccb);
2443 return;
2445 /* Put the CCB back in the freelist */
2446 ccb->state |= DPT_CCB_STATE_COMPLETED;
2447 dpt_Qpush_free(dpt, ccb);
2449 /* Free allocated memory */
2450 crit_exit();
2451 return;
2454 #ifdef DPT_HANDLE_TIMEOUTS
2456 * This function walks down the SUBMITTED queue.
2457 * Every request that is too old gets aborted and marked.
2458 * Since the DPT will complete (interrupt) immediately (what does that mean?),
2459 * We just walk the list, aborting old commands and marking them as such.
2460 * The dpt_complete function will get rid of the that were interrupted in the
2461 * normal manner.
2463 * This function needs to run at splcam(), as it interacts with the submitted
2464 * queue, as well as the completed and free queues. Just like dpt_intr() does.
2465 * To run it at any ISPL other than that of dpt_intr(), will mean that dpt_intr
2466 * willbe able to pre-empt it, grab a transaction in progress (towards
2467 * destruction) and operate on it. The state of this transaction will be not
2468 * very clear.
2469 * The only other option, is to lock it only as long as necessary but have
2470 * dpt_intr() spin-wait on it. In a UP environment this makes no sense and in
2471 * a SMP environment, the advantage is dubvious for a function that runs once
2472 * every ten seconds for few microseconds and, on systems with healthy
2473 * hardware, does not do anything anyway.
2476 static void
2477 dpt_handle_timeouts(dpt_softc_t * dpt)
2479 dpt_ccb_t *ccb;
2481 crit_enter();
2483 if (dpt->state & DPT_HA_TIMEOUTS_ACTIVE) {
2484 kprintf("dpt%d WARNING: Timeout Handling Collision\n",
2485 dpt->unit);
2486 crit_exit();
2487 return;
2489 dpt->state |= DPT_HA_TIMEOUTS_ACTIVE;
2491 /* Loop through the entire submitted queue, looking for lost souls */
2492 for (ccb = TAILQ_FIRST(&dpt->submitted_ccbs);
2493 ccb != NULL;
2494 ccb = TAILQ_NEXT(ccb, links)) {
2495 struct scsi_xfer *xs;
2496 u_int32_t age, max_age;
2498 xs = ccb->xs;
2499 age = dpt_time_delta(ccb->command_started, microtime_now);
2501 #define TenSec 10000000
2503 if (xs == NULL) { /* Local, non-kernel call */
2504 max_age = TenSec;
2505 } else {
2506 max_age = (((xs->timeout * (dpt->submitted_ccbs_count
2507 + DPT_TIMEOUT_FACTOR))
2508 > TenSec)
2509 ? (xs->timeout * (dpt->submitted_ccbs_count
2510 + DPT_TIMEOUT_FACTOR))
2511 : TenSec);
2515 * If a transaction is marked lost and is TWICE as old as we
2516 * care, then, and only then do we destroy it!
2518 if (ccb->state & DPT_CCB_STATE_MARKED_LOST) {
2519 /* Remember who is next */
2520 if (age > (max_age * 2)) {
2521 dpt_Qremove_submitted(dpt, ccb);
2522 ccb->state &= ~DPT_CCB_STATE_MARKED_LOST;
2523 ccb->state |= DPT_CCB_STATE_ABORTED;
2524 #define cmd_name scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)
2525 if (ccb->retries++ > DPT_RETRIES) {
2526 kprintf("dpt%d ERROR: Destroying stale "
2527 "%d (%s)\n"
2528 " on "
2529 "c%db%dt%du%d (%d/%d)\n",
2530 dpt->unit, ccb->transaction_id,
2531 cmd_name,
2532 dpt->unit,
2533 ccb->eata_ccb.cp_channel,
2534 ccb->eata_ccb.cp_id,
2535 ccb->eata_ccb.cp_LUN, age,
2536 ccb->retries);
2537 #define send_ccb &ccb->eata_ccb
2538 #define ESA EATA_SPECIFIC_ABORT
2539 (void) dpt_send_immediate(dpt,
2540 send_ccb,
2541 ESA,
2542 0, 0);
2543 dpt_Qpush_free(dpt, ccb);
2545 /* The SCSI layer should re-try */
2546 xs->error |= XS_TIMEOUT;
2547 xs->flags |= SCSI_ITSDONE;
2548 scsi_done(xs);
2549 } else {
2550 kprintf("dpt%d ERROR: Stale %d (%s) on "
2551 "c%db%dt%du%d (%d)\n"
2552 " gets another "
2553 "chance(%d/%d)\n",
2554 dpt->unit, ccb->transaction_id,
2555 cmd_name,
2556 dpt->unit,
2557 ccb->eata_ccb.cp_channel,
2558 ccb->eata_ccb.cp_id,
2559 ccb->eata_ccb.cp_LUN,
2560 age, ccb->retries, DPT_RETRIES);
2562 dpt_Qpush_waiting(dpt, ccb);
2563 dpt_sched_queue(dpt);
2566 } else {
2568 * This is a transaction that is not to be destroyed
2569 * (yet) But it is too old for our liking. We wait as
2570 * long as the upper layer thinks. Not really, we
2571 * multiply that by the number of commands in the
2572 * submitted queue + 1.
2574 if (!(ccb->state & DPT_CCB_STATE_MARKED_LOST) &&
2575 (age != ~0) && (age > max_age)) {
2576 kprintf("dpt%d ERROR: Marking %d (%s) on "
2577 "c%db%dt%du%d \n"
2578 " as late after %dusec\n",
2579 dpt->unit, ccb->transaction_id,
2580 cmd_name,
2581 dpt->unit, ccb->eata_ccb.cp_channel,
2582 ccb->eata_ccb.cp_id,
2583 ccb->eata_ccb.cp_LUN, age);
2584 ccb->state |= DPT_CCB_STATE_MARKED_LOST;
2589 dpt->state &= ~DPT_HA_TIMEOUTS_ACTIVE;
2590 crit_exit();
2593 #endif /* DPT_HANDLE_TIMEOUTS */
2595 #endif