2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/cam/cam_periph.c,v 1.70 2008/02/12 11:07:33 raj Exp $
30 * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.41 2008/07/18 00:07:21 dillon Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
41 #include <sys/devicestat.h>
44 #include <vm/vm_extern.h>
46 #include <sys/thread2.h>
50 #include "cam_xpt_periph.h"
51 #include "cam_periph.h"
52 #include "cam_debug.h"
55 #include <bus/cam/scsi/scsi_all.h>
56 #include <bus/cam/scsi/scsi_message.h>
57 #include <bus/cam/scsi/scsi_pass.h>
59 static u_int
camperiphnextunit(struct periph_driver
*p_drv
,
60 u_int newunit
, int wired
,
61 path_id_t pathid
, target_id_t target
,
63 static u_int
camperiphunit(struct periph_driver
*p_drv
,
64 struct cam_sim
*sim
, path_id_t pathid
,
65 target_id_t target
, lun_id_t lun
);
66 static void camperiphdone(struct cam_periph
*periph
,
68 static void camperiphfree(struct cam_periph
*periph
);
69 static int camperiphscsistatuserror(union ccb
*ccb
,
71 u_int32_t sense_flags
,
74 u_int32_t
*relsim_flags
,
76 static int camperiphscsisenseerror(union ccb
*ccb
,
78 u_int32_t sense_flags
,
81 u_int32_t
*relsim_flags
,
83 static void cam_periph_unmapbufs(struct cam_periph_map_info
*mapinfo
,
84 u_int8_t
***data_ptrs
, int numbufs
);
86 static int nperiph_drivers
;
87 struct periph_driver
**periph_drivers
;
89 MALLOC_DEFINE(M_CAMPERIPH
, "CAM periph", "CAM peripheral buffers");
91 static int periph_selto_delay
= 1000;
92 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay
);
93 static int periph_noresrc_delay
= 500;
94 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay
);
95 static int periph_busy_delay
= 500;
96 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay
);
100 periphdriver_register(void *data
)
102 struct periph_driver
**newdrivers
, **old
;
105 ndrivers
= nperiph_drivers
+ 2;
106 newdrivers
= kmalloc(sizeof(*newdrivers
) * ndrivers
, M_CAMPERIPH
,
109 bcopy(periph_drivers
, newdrivers
,
110 sizeof(*newdrivers
) * nperiph_drivers
);
111 newdrivers
[nperiph_drivers
] = (struct periph_driver
*)data
;
112 newdrivers
[nperiph_drivers
+ 1] = NULL
;
113 old
= periph_drivers
;
114 periph_drivers
= newdrivers
;
116 kfree(old
, M_CAMPERIPH
);
121 cam_periph_alloc(periph_ctor_t
*periph_ctor
,
122 periph_oninv_t
*periph_oninvalidate
,
123 periph_dtor_t
*periph_dtor
, periph_start_t
*periph_start
,
124 char *name
, cam_periph_type type
, struct cam_path
*path
,
125 ac_callback_t
*ac_callback
, ac_code code
, void *arg
)
127 struct periph_driver
**p_drv
;
129 struct cam_periph
*periph
;
130 struct cam_periph
*cur_periph
;
132 target_id_t target_id
;
139 * Handle Hot-Plug scenarios. If there is already a peripheral
140 * of our type assigned to this path, we are likely waiting for
141 * final close on an old, invalidated, peripheral. If this is
142 * the case, queue up a deferred call to the peripheral's async
143 * handler. If it looks like a mistaken re-allocation, complain.
145 if ((periph
= cam_periph_find(path
, name
)) != NULL
) {
147 if ((periph
->flags
& CAM_PERIPH_INVALID
) != 0
148 && (periph
->flags
& CAM_PERIPH_NEW_DEV_FOUND
) == 0) {
149 periph
->flags
|= CAM_PERIPH_NEW_DEV_FOUND
;
150 periph
->deferred_callback
= ac_callback
;
151 periph
->deferred_ac
= code
;
152 return (CAM_REQ_INPROG
);
154 kprintf("cam_periph_alloc: attempt to re-allocate "
155 "valid device %s%d rejected\n",
156 periph
->periph_name
, periph
->unit_number
);
158 return (CAM_REQ_INVALID
);
161 periph
= kmalloc(sizeof(*periph
), M_CAMPERIPH
, M_INTWAIT
| M_ZERO
);
166 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
167 if (strcmp((*p_drv
)->driver_name
, name
) == 0)
172 sim
= xpt_path_sim(path
);
173 path_id
= xpt_path_path_id(path
);
174 target_id
= xpt_path_target_id(path
);
175 lun_id
= xpt_path_lun_id(path
);
176 cam_init_pinfo(&periph
->pinfo
);
177 periph
->periph_start
= periph_start
;
178 periph
->periph_dtor
= periph_dtor
;
179 periph
->periph_oninval
= periph_oninvalidate
;
181 periph
->periph_name
= name
;
182 periph
->unit_number
= camperiphunit(*p_drv
, sim
, path_id
,
184 periph
->immediate_priority
= CAM_PRIORITY_NONE
;
185 periph
->refcount
= 0;
187 SLIST_INIT(&periph
->ccb_list
);
188 status
= xpt_create_path(&path
, periph
, path_id
, target_id
, lun_id
);
189 if (status
!= CAM_REQ_CMP
)
195 status
= xpt_add_periph(periph
);
197 if (status
!= CAM_REQ_CMP
)
200 cur_periph
= TAILQ_FIRST(&(*p_drv
)->units
);
201 while (cur_periph
!= NULL
202 && cur_periph
->unit_number
< periph
->unit_number
)
203 cur_periph
= TAILQ_NEXT(cur_periph
, unit_links
);
205 if (cur_periph
!= NULL
)
206 TAILQ_INSERT_BEFORE(cur_periph
, periph
, unit_links
);
208 TAILQ_INSERT_TAIL(&(*p_drv
)->units
, periph
, unit_links
);
209 (*p_drv
)->generation
++;
214 status
= periph_ctor(periph
, arg
);
216 if (status
== CAM_REQ_CMP
)
220 switch (init_level
) {
222 /* Initialized successfully */
225 TAILQ_REMOVE(&(*p_drv
)->units
, periph
, unit_links
);
226 xpt_remove_periph(periph
);
229 xpt_free_path(periph
->path
);
232 kfree(periph
, M_CAMPERIPH
);
235 /* No cleanup to perform. */
238 panic("cam_periph_alloc: Unknown init level");
244 * Find a peripheral structure with the specified path, target, lun,
245 * and (optionally) type. If the name is NULL, this function will return
246 * the first peripheral driver that matches the specified path.
249 cam_periph_find(struct cam_path
*path
, char *name
)
251 struct periph_driver
**p_drv
;
252 struct cam_periph
*periph
;
255 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
256 if (name
!= NULL
&& (strcmp((*p_drv
)->driver_name
, name
) != 0))
259 TAILQ_FOREACH(periph
, &(*p_drv
)->units
, unit_links
) {
260 if (xpt_path_comp(periph
->path
, path
) == 0) {
275 cam_periph_acquire(struct cam_periph
*periph
)
278 return(CAM_REQ_CMP_ERR
);
288 cam_periph_release(struct cam_periph
*periph
)
295 if ((--periph
->refcount
== 0)
296 && (periph
->flags
& CAM_PERIPH_INVALID
)) {
297 camperiphfree(periph
);
304 cam_periph_hold(struct cam_periph
*periph
, int flags
)
308 sim_lock_assert_owned(periph
->sim
->lock
);
311 * Increment the reference count on the peripheral
312 * while we wait for our lock attempt to succeed
313 * to ensure the peripheral doesn't disappear out
314 * from user us while we sleep.
317 if (cam_periph_acquire(periph
) != CAM_REQ_CMP
)
320 while ((periph
->flags
& CAM_PERIPH_LOCKED
) != 0) {
321 periph
->flags
|= CAM_PERIPH_LOCK_WANTED
;
322 if ((error
= sim_lock_sleep(periph
, flags
, "caplck", 0,
323 periph
->sim
->lock
)) != 0) {
324 cam_periph_release(periph
);
329 periph
->flags
|= CAM_PERIPH_LOCKED
;
334 cam_periph_unhold(struct cam_periph
*periph
, int unlock
)
338 sim_lock_assert_owned(periph
->sim
->lock
);
339 periph
->flags
&= ~CAM_PERIPH_LOCKED
;
340 if ((periph
->flags
& CAM_PERIPH_LOCK_WANTED
) != 0) {
341 periph
->flags
&= ~CAM_PERIPH_LOCK_WANTED
;
346 cam_periph_release(periph
);
347 /* periph may be garbage now */
350 cam_periph_release(periph
);
355 * Look for the next unit number that is not currently in use for this
356 * peripheral type starting at "newunit". Also exclude unit numbers that
357 * are reserved by for future "hardwiring" unless we already know that this
358 * is a potential wired device. Only assume that the device is "wired" the
359 * first time through the loop since after that we'll be looking at unit
360 * numbers that did not match a wiring entry.
363 camperiphnextunit(struct periph_driver
*p_drv
, u_int newunit
, int wired
,
364 path_id_t pathid
, target_id_t target
, lun_id_t lun
)
366 struct cam_periph
*periph
;
367 char *periph_name
, *strval
;
371 periph_name
= p_drv
->driver_name
;
374 for (periph
= TAILQ_FIRST(&p_drv
->units
);
375 periph
!= NULL
&& periph
->unit_number
!= newunit
;
376 periph
= TAILQ_NEXT(periph
, unit_links
))
379 if (periph
!= NULL
&& periph
->unit_number
== newunit
) {
381 xpt_print(periph
->path
, "Duplicate Wired "
383 xpt_print(periph
->path
, "Second device (%s "
384 "device at scbus%d target %d lun %d) will "
385 "not be wired\n", periph_name
, pathid
,
395 * Don't match entries like "da 4" as a wired down
396 * device, but do match entries like "da 4 target 5"
397 * or even "da 4 scbus 1".
400 while ((i
= resource_locate(i
, periph_name
)) != -1) {
401 dname
= resource_query_name(i
);
402 dunit
= resource_query_unit(i
);
403 /* if no "target" and no specific scbus, skip */
404 if (resource_int_value(dname
, dunit
, "target", &val
) &&
405 (resource_string_value(dname
, dunit
, "at",&strval
)||
406 strcmp(strval
, "scbus") == 0))
408 if (newunit
== dunit
)
418 camperiphunit(struct periph_driver
*p_drv
,
419 struct cam_sim
*sim
, path_id_t pathid
,
420 target_id_t target
, lun_id_t lun
)
423 int hit
, i
, val
, dunit
;
425 char pathbuf
[32], *strval
, *periph_name
;
429 periph_name
= p_drv
->driver_name
;
430 ksnprintf(pathbuf
, sizeof(pathbuf
), "scbus%d", pathid
);
432 for (hit
= 0; (i
= resource_locate(i
, periph_name
)) != -1; hit
= 0) {
433 dname
= resource_query_name(i
);
434 dunit
= resource_query_unit(i
);
435 if (resource_string_value(dname
, dunit
, "at", &strval
) == 0) {
436 if (strcmp(strval
, pathbuf
) != 0)
440 if (resource_int_value(dname
, dunit
, "target", &val
) == 0) {
445 if (resource_int_value(dname
, dunit
, "lun", &val
) == 0) {
457 * If no wired units are in the kernel config do an auto unit
458 * start selection. We want usb mass storage out of the way
459 * so it doesn't steal low numbered da%d slots from ahci, sili,
460 * or other scsi attachments.
462 if (hit
== 0 && sim
) {
463 if (strncmp(sim
->sim_name
, "umass", 4) == 0 && unit
< 8)
468 * Either start from 0 looking for the next unit or from
469 * the unit number given in the resource config. This way,
470 * if we have wildcard matches, we don't return the same
473 unit
= camperiphnextunit(p_drv
, unit
, /*wired*/hit
, pathid
,
480 cam_periph_invalidate(struct cam_periph
*periph
)
483 * We only call this routine the first time a peripheral is
486 if (((periph
->flags
& CAM_PERIPH_INVALID
) == 0)
487 && (periph
->periph_oninval
!= NULL
))
488 periph
->periph_oninval(periph
);
490 periph
->flags
|= CAM_PERIPH_INVALID
;
491 periph
->flags
&= ~CAM_PERIPH_NEW_DEV_FOUND
;
494 if (periph
->refcount
== 0)
495 camperiphfree(periph
);
496 else if (periph
->refcount
< 0)
497 kprintf("cam_invalidate_periph: refcount < 0!!\n");
502 camperiphfree(struct cam_periph
*periph
)
504 struct periph_driver
**p_drv
;
506 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
507 if (strcmp((*p_drv
)->driver_name
, periph
->periph_name
) == 0)
511 if (*p_drv
== NULL
) {
512 kprintf("camperiphfree: attempt to free non-existent periph\n");
516 TAILQ_REMOVE(&(*p_drv
)->units
, periph
, unit_links
);
517 (*p_drv
)->generation
++;
520 if (periph
->periph_dtor
!= NULL
)
521 periph
->periph_dtor(periph
);
522 xpt_remove_periph(periph
);
524 if (periph
->flags
& CAM_PERIPH_NEW_DEV_FOUND
) {
528 switch (periph
->deferred_ac
) {
529 case AC_FOUND_DEVICE
:
530 ccb
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
531 xpt_setup_ccb(&ccb
.ccb_h
, periph
->path
, /*priority*/ 1);
535 case AC_PATH_REGISTERED
:
536 ccb
.ccb_h
.func_code
= XPT_PATH_INQ
;
537 xpt_setup_ccb(&ccb
.ccb_h
, periph
->path
, /*priority*/ 1);
545 periph
->deferred_callback(NULL
, periph
->deferred_ac
,
548 xpt_free_path(periph
->path
);
549 kfree(periph
, M_CAMPERIPH
);
554 * Map user virtual pointers into kernel virtual address space, so we can
555 * access the memory. This won't work on physical pointers, for now it's
556 * up to the caller to check for that. (XXX KDM -- should we do that here
557 * instead?) This also only works for up to MAXPHYS memory. Since we use
558 * buffers to map stuff in and out, we're limited to the buffer size.
561 cam_periph_mapmem(union ccb
*ccb
, struct cam_periph_map_info
*mapinfo
)
563 buf_cmd_t cmd
[CAM_PERIPH_MAXMAPS
];
564 u_int8_t
**data_ptrs
[CAM_PERIPH_MAXMAPS
];
565 u_int32_t lengths
[CAM_PERIPH_MAXMAPS
];
571 switch(ccb
->ccb_h
.func_code
) {
573 if (ccb
->cdm
.match_buf_len
== 0) {
574 kprintf("cam_periph_mapmem: invalid match buffer "
578 if (ccb
->cdm
.pattern_buf_len
> 0) {
579 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.patterns
;
580 lengths
[0] = ccb
->cdm
.pattern_buf_len
;
581 mapinfo
->dirs
[0] = CAM_DIR_OUT
;
582 data_ptrs
[1] = (u_int8_t
**)&ccb
->cdm
.matches
;
583 lengths
[1] = ccb
->cdm
.match_buf_len
;
584 mapinfo
->dirs
[1] = CAM_DIR_IN
;
587 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.matches
;
588 lengths
[0] = ccb
->cdm
.match_buf_len
;
589 mapinfo
->dirs
[0] = CAM_DIR_IN
;
594 case XPT_CONT_TARGET_IO
:
595 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_NONE
)
598 data_ptrs
[0] = &ccb
->csio
.data_ptr
;
599 lengths
[0] = ccb
->csio
.dxfer_len
;
600 mapinfo
->dirs
[0] = ccb
->ccb_h
.flags
& CAM_DIR_MASK
;
605 break; /* NOTREACHED */
609 * Check the transfer length and permissions first, so we don't
610 * have to unmap any previously mapped buffers.
612 for (i
= 0; i
< numbufs
; i
++) {
614 * Its kinda bogus, we need a R+W command. For now the
615 * buffer needs some sort of command. Use BUF_CMD_WRITE
616 * to indicate a write and BUF_CMD_READ to indicate R+W.
618 cmd
[i
] = BUF_CMD_WRITE
;
621 * The userland data pointer passed in may not be page
622 * aligned. vmapbuf() truncates the address to a page
623 * boundary, so if the address isn't page aligned, we'll
624 * need enough space for the given transfer length, plus
625 * whatever extra space is necessary to make it to the page
629 (((vm_offset_t
)(*data_ptrs
[i
])) & PAGE_MASK
)) > DFLTPHYS
){
630 kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
631 "which is greater than DFLTPHYS(%d)\n",
633 (((vm_offset_t
)(*data_ptrs
[i
])) & PAGE_MASK
)),
638 if (mapinfo
->dirs
[i
] & CAM_DIR_OUT
) {
639 if (!useracc(*data_ptrs
[i
], lengths
[i
],
641 kprintf("cam_periph_mapmem: error, "
642 "address %p, length %lu isn't "
643 "user accessible for READ\n",
644 (void *)*data_ptrs
[i
],
650 if (mapinfo
->dirs
[i
] & CAM_DIR_IN
) {
651 cmd
[i
] = BUF_CMD_READ
;
652 if (!useracc(*data_ptrs
[i
], lengths
[i
],
654 kprintf("cam_periph_mapmem: error, "
655 "address %p, length %lu isn't "
656 "user accessible for WRITE\n",
657 (void *)*data_ptrs
[i
],
666 for (i
= 0; i
< numbufs
; i
++) {
672 /* save the original user pointer */
673 mapinfo
->saved_ptrs
[i
] = *data_ptrs
[i
];
679 * Require 16-byte alignment and bounce if we don't get it.
680 * (NATA does not realign buffers for DMA).
682 if ((intptr_t)*data_ptrs
[i
] & 15)
683 mapinfo
->bounce
[i
] = 1;
685 mapinfo
->bounce
[i
] = 0;
688 * Map the user buffer into kernel memory. If the user
689 * buffer is not aligned we have to allocate a bounce buffer
692 if (mapinfo
->bounce
[i
]) {
693 bp
->b_data
= bp
->b_kvabase
;
694 bp
->b_bcount
= lengths
[i
];
695 vm_hold_load_pages(bp
, (vm_offset_t
)bp
->b_data
,
696 (vm_offset_t
)bp
->b_data
+ bp
->b_bcount
);
697 if (mapinfo
->dirs
[i
] & CAM_DIR_OUT
) {
698 error
= copyin(*data_ptrs
[i
], bp
->b_data
, bp
->b_bcount
);
700 vm_hold_free_pages(bp
, (vm_offset_t
)bp
->b_data
, (vm_offset_t
)bp
->b_data
+ bp
->b_bcount
);
705 } else if (vmapbuf(bp
, *data_ptrs
[i
], lengths
[i
]) < 0) {
706 kprintf("cam_periph_mapmem: error, "
707 "address %p, length %lu isn't "
708 "user accessible any more\n",
709 (void *)*data_ptrs
[i
],
717 cam_periph_unmapbufs(mapinfo
, data_ptrs
, i
);
718 mapinfo
->num_bufs_used
-= i
;
722 /* set our pointer to the new mapped area */
723 *data_ptrs
[i
] = bp
->b_data
;
726 mapinfo
->num_bufs_used
++;
733 * Unmap memory segments mapped into kernel virtual address space by
734 * cam_periph_mapmem().
737 cam_periph_unmapmem(union ccb
*ccb
, struct cam_periph_map_info
*mapinfo
)
740 u_int8_t
**data_ptrs
[CAM_PERIPH_MAXMAPS
];
742 if (mapinfo
->num_bufs_used
<= 0) {
743 /* allow ourselves to be swapped once again */
747 switch (ccb
->ccb_h
.func_code
) {
749 numbufs
= min(mapinfo
->num_bufs_used
, 2);
752 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.matches
;
754 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.patterns
;
755 data_ptrs
[1] = (u_int8_t
**)&ccb
->cdm
.matches
;
759 case XPT_CONT_TARGET_IO
:
760 data_ptrs
[0] = &ccb
->csio
.data_ptr
;
761 numbufs
= min(mapinfo
->num_bufs_used
, 1);
764 /* allow ourselves to be swapped once again */
766 break; /* NOTREACHED */
768 cam_periph_unmapbufs(mapinfo
, data_ptrs
, numbufs
);
772 cam_periph_unmapbufs(struct cam_periph_map_info
*mapinfo
,
773 u_int8_t
***data_ptrs
, int numbufs
)
778 for (i
= 0; i
< numbufs
; i
++) {
781 /* Set the user's pointer back to the original value */
782 *data_ptrs
[i
] = mapinfo
->saved_ptrs
[i
];
784 /* unmap the buffer */
785 if (mapinfo
->bounce
[i
]) {
786 if (mapinfo
->dirs
[i
] & CAM_DIR_IN
) {
787 /* XXX return error */
788 copyout(bp
->b_data
, *data_ptrs
[i
],
791 vm_hold_free_pages(bp
, (vm_offset_t
)bp
->b_data
,
792 (vm_offset_t
)bp
->b_data
+ bp
->b_bcount
);
797 mapinfo
->bp
[i
] = NULL
;
802 cam_periph_getccb(struct cam_periph
*periph
, u_int32_t priority
)
804 struct ccb_hdr
*ccb_h
;
806 sim_lock_assert_owned(periph
->sim
->lock
);
807 CAM_DEBUG(periph
->path
, CAM_DEBUG_TRACE
, ("entering cdgetccb\n"));
809 while (SLIST_FIRST(&periph
->ccb_list
) == NULL
) {
810 if (periph
->immediate_priority
> priority
)
811 periph
->immediate_priority
= priority
;
812 xpt_schedule(periph
, priority
);
813 if ((SLIST_FIRST(&periph
->ccb_list
) != NULL
)
814 && (SLIST_FIRST(&periph
->ccb_list
)->pinfo
.priority
== priority
))
816 sim_lock_sleep(&periph
->ccb_list
, 0, "cgticb", 0,
820 ccb_h
= SLIST_FIRST(&periph
->ccb_list
);
821 SLIST_REMOVE_HEAD(&periph
->ccb_list
, periph_links
.sle
);
822 return ((union ccb
*)ccb_h
);
826 cam_periph_ccbwait(union ccb
*ccb
)
830 sim
= xpt_path_sim(ccb
->ccb_h
.path
);
831 while ((ccb
->ccb_h
.pinfo
.index
!= CAM_UNQUEUED_INDEX
)
832 || ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
)) {
833 sim_lock_sleep(&ccb
->ccb_h
.cbfcnp
, 0, "cbwait", 0, sim
->lock
);
838 cam_periph_ioctl(struct cam_periph
*periph
, int cmd
, caddr_t addr
,
839 int (*error_routine
)(union ccb
*ccb
,
841 u_int32_t sense_flags
))
851 ccb
= cam_periph_getccb(periph
, /* priority */ 1);
852 xpt_setup_ccb(&ccb
->ccb_h
,
855 ccb
->ccb_h
.func_code
= XPT_GDEVLIST
;
858 * Basically, the point of this is that we go through
859 * getting the list of devices, until we find a passthrough
860 * device. In the current version of the CAM code, the
861 * only way to determine what type of device we're dealing
862 * with is by its name.
866 ccb
->cgdl
.status
= CAM_GDEVLIST_MORE_DEVS
;
867 while (ccb
->cgdl
.status
== CAM_GDEVLIST_MORE_DEVS
) {
869 /* we want the next device in the list */
871 if (strncmp(ccb
->cgdl
.periph_name
,
877 if ((ccb
->cgdl
.status
== CAM_GDEVLIST_LAST_DEVICE
) &&
879 ccb
->cgdl
.periph_name
[0] = '\0';
880 ccb
->cgdl
.unit_number
= 0;
885 /* copy the result back out */
886 bcopy(ccb
, addr
, sizeof(union ccb
));
888 /* and release the ccb */
889 xpt_release_ccb(ccb
);
900 cam_periph_runccb(union ccb
*ccb
,
901 int (*error_routine
)(union ccb
*ccb
,
903 u_int32_t sense_flags
),
904 cam_flags camflags
, u_int32_t sense_flags
,
911 sim
= xpt_path_sim(ccb
->ccb_h
.path
);
912 sim_lock_assert_owned(sim
->lock
);
915 * If the user has supplied a stats structure, and if we understand
916 * this particular type of ccb, record the transaction start.
918 if ((ds
!= NULL
) && (ccb
->ccb_h
.func_code
== XPT_SCSI_IO
))
919 devstat_start_transaction(ds
);
924 cam_periph_ccbwait(ccb
);
925 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
)
927 else if (error_routine
!= NULL
)
928 error
= (*error_routine
)(ccb
, camflags
, sense_flags
);
932 } while (error
== ERESTART
);
934 if ((ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0)
935 cam_release_devq(ccb
->ccb_h
.path
,
939 /* getcount_only */ FALSE
);
941 if ((ds
!= NULL
) && (ccb
->ccb_h
.func_code
== XPT_SCSI_IO
))
942 devstat_end_transaction(ds
,
944 ccb
->csio
.tag_action
& 0xf,
945 ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) ==
946 CAM_DIR_NONE
) ? DEVSTAT_NO_DATA
:
947 (ccb
->ccb_h
.flags
& CAM_DIR_OUT
) ?
955 cam_freeze_devq(struct cam_path
*path
)
957 struct ccb_hdr ccb_h
;
959 xpt_setup_ccb(&ccb_h
, path
, /*priority*/1);
960 ccb_h
.func_code
= XPT_NOOP
;
961 ccb_h
.flags
= CAM_DEV_QFREEZE
;
962 xpt_action((union ccb
*)&ccb_h
);
966 cam_release_devq(struct cam_path
*path
, u_int32_t relsim_flags
,
967 u_int32_t openings
, u_int32_t timeout
,
970 struct ccb_relsim crs
;
972 xpt_setup_ccb(&crs
.ccb_h
, path
,
974 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
975 crs
.ccb_h
.flags
= getcount_only
? CAM_DEV_QFREEZE
: 0;
976 crs
.release_flags
= relsim_flags
;
977 crs
.openings
= openings
;
978 crs
.release_timeout
= timeout
;
979 xpt_action((union ccb
*)&crs
);
980 return (crs
.qfrozen_cnt
);
983 #define saved_ccb_ptr ppriv_ptr0
985 camperiphdone(struct cam_periph
*periph
, union ccb
*done_ccb
)
987 union ccb
*saved_ccb
;
991 struct scsi_start_stop_unit
*scsi_cmd
;
992 u_int32_t relsim_flags
, timeout
;
993 u_int32_t qfrozen_cnt
;
996 xpt_done_ccb
= FALSE
;
997 status
= done_ccb
->ccb_h
.status
;
998 frozen
= (status
& CAM_DEV_QFRZN
) != 0;
999 sense
= (status
& CAM_AUTOSNS_VALID
) != 0;
1000 status
&= CAM_STATUS_MASK
;
1004 saved_ccb
= (union ccb
*)done_ccb
->ccb_h
.saved_ccb_ptr
;
1007 * Unfreeze the queue once if it is already frozen..
1010 qfrozen_cnt
= cam_release_devq(done_ccb
->ccb_h
.path
,
1014 /*getcount_only*/0);
1021 * If we have successfully taken a device from the not
1022 * ready to ready state, re-scan the device and re-get
1023 * the inquiry information. Many devices (mostly disks)
1024 * don't properly report their inquiry information unless
1027 * If we manually retrieved sense into a CCB and got
1028 * something other than "NO SENSE" send the updated CCB
1029 * back to the client via xpt_done() to be processed via
1030 * the error recovery code again.
1032 if (done_ccb
->ccb_h
.func_code
== XPT_SCSI_IO
) {
1033 scsi_cmd
= (struct scsi_start_stop_unit
*)
1034 &done_ccb
->csio
.cdb_io
.cdb_bytes
;
1036 if (scsi_cmd
->opcode
== START_STOP_UNIT
)
1037 xpt_async(AC_INQ_CHANGED
,
1038 done_ccb
->ccb_h
.path
, NULL
);
1039 if (scsi_cmd
->opcode
== REQUEST_SENSE
) {
1042 sense_key
= saved_ccb
->csio
.sense_data
.flags
;
1043 sense_key
&= SSD_KEY
;
1044 if (sense_key
!= SSD_KEY_NO_SENSE
) {
1045 saved_ccb
->ccb_h
.status
|=
1048 xpt_print(saved_ccb
->ccb_h
.path
,
1049 "Recovered Sense\n");
1050 scsi_sense_print(&saved_ccb
->csio
);
1051 cam_error_print(saved_ccb
, CAM_ESF_ALL
,
1054 xpt_done_ccb
= TRUE
;
1058 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
, done_ccb
,
1061 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
1063 if (xpt_done_ccb
== FALSE
)
1064 xpt_action(done_ccb
);
1068 case CAM_SCSI_STATUS_ERROR
:
1069 scsi_cmd
= (struct scsi_start_stop_unit
*)
1070 &done_ccb
->csio
.cdb_io
.cdb_bytes
;
1072 struct ccb_getdev cgd
;
1073 struct scsi_sense_data
*sense
;
1074 int error_code
, sense_key
, asc
, ascq
;
1075 scsi_sense_action err_action
;
1077 sense
= &done_ccb
->csio
.sense_data
;
1078 scsi_extract_sense(sense
, &error_code
,
1079 &sense_key
, &asc
, &ascq
);
1082 * Grab the inquiry data for this device.
1084 xpt_setup_ccb(&cgd
.ccb_h
, done_ccb
->ccb_h
.path
,
1086 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
1087 xpt_action((union ccb
*)&cgd
);
1088 err_action
= scsi_error_action(&done_ccb
->csio
,
1092 * If the error is "invalid field in CDB",
1093 * and the load/eject flag is set, turn the
1094 * flag off and try again. This is just in
1095 * case the drive in question barfs on the
1096 * load eject flag. The CAM code should set
1097 * the load/eject flag by default for
1102 * Should we check to see what the specific
1103 * scsi status is?? Or does it not matter
1104 * since we already know that there was an
1105 * error, and we know what the specific
1106 * error code was, and we know what the
1109 if ((scsi_cmd
->opcode
== START_STOP_UNIT
) &&
1110 ((scsi_cmd
->how
& SSS_LOEJ
) != 0) &&
1111 (asc
== 0x24) && (ascq
== 0x00) &&
1112 (done_ccb
->ccb_h
.retry_count
> 0)) {
1114 scsi_cmd
->how
&= ~SSS_LOEJ
;
1116 xpt_action(done_ccb
);
1118 } else if ((done_ccb
->ccb_h
.retry_count
> 1)
1119 && ((err_action
& SS_MASK
) != SS_FAIL
)) {
1122 * In this case, the error recovery
1123 * command failed, but we've got
1124 * some retries left on it. Give
1125 * it another try unless this is an
1126 * unretryable error.
1129 /* set the timeout to .5 sec */
1131 RELSIM_RELEASE_AFTER_TIMEOUT
;
1134 xpt_action(done_ccb
);
1140 * Perform the final retry with the original
1141 * CCB so that final error processing is
1142 * performed by the owner of the CCB.
1144 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
,
1145 done_ccb
, sizeof(union ccb
));
1147 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
1149 xpt_action(done_ccb
);
1153 * Eh?? The command failed, but we don't
1154 * have any sense. What's up with that?
1155 * Fire the CCB again to return it to the
1158 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
,
1159 done_ccb
, sizeof(union ccb
));
1161 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
1163 xpt_action(done_ccb
);
1168 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
, done_ccb
,
1171 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
1173 xpt_action(done_ccb
);
1178 /* decrement the retry count */
1180 * XXX This isn't appropriate in all cases. Restructure,
1181 * so that the retry count is only decremented on an
1182 * actual retry. Remeber that the orignal ccb had its
1183 * retry count dropped before entering recovery, so
1184 * doing it again is a bug.
1186 if (done_ccb
->ccb_h
.retry_count
> 0)
1187 done_ccb
->ccb_h
.retry_count
--;
1189 qfrozen_cnt
= cam_release_devq(done_ccb
->ccb_h
.path
,
1190 /*relsim_flags*/relsim_flags
,
1193 /*getcount_only*/0);
1194 if (xpt_done_ccb
== TRUE
)
1195 (*done_ccb
->ccb_h
.cbfcnp
)(periph
, done_ccb
);
1199 * Generic Async Event handler. Peripheral drivers usually
1200 * filter out the events that require personal attention,
1201 * and leave the rest to this function.
1204 cam_periph_async(struct cam_periph
*periph
, u_int32_t code
,
1205 struct cam_path
*path
, void *arg
)
1208 case AC_LOST_DEVICE
:
1209 cam_periph_invalidate(periph
);
1214 cam_periph_bus_settle(periph
, scsi_delay
);
1223 cam_periph_bus_settle(struct cam_periph
*periph
, u_int bus_settle
)
1225 struct ccb_getdevstats cgds
;
1227 xpt_setup_ccb(&cgds
.ccb_h
, periph
->path
, /*priority*/1);
1228 cgds
.ccb_h
.func_code
= XPT_GDEV_STATS
;
1229 xpt_action((union ccb
*)&cgds
);
1230 cam_periph_freeze_after_event(periph
, &cgds
.last_reset
, bus_settle
);
1234 cam_periph_freeze_after_event(struct cam_periph
*periph
,
1235 struct timeval
* event_time
, u_int duration_ms
)
1237 struct timeval delta
;
1238 struct timeval duration_tv
;
1240 microuptime(&delta
);
1241 timevalsub(&delta
, event_time
);
1242 duration_tv
.tv_sec
= duration_ms
/ 1000;
1243 duration_tv
.tv_usec
= (duration_ms
% 1000) * 1000;
1244 if (timevalcmp(&delta
, &duration_tv
, <)) {
1245 timevalsub(&duration_tv
, &delta
);
1247 duration_ms
= duration_tv
.tv_sec
* 1000;
1248 duration_ms
+= duration_tv
.tv_usec
/ 1000;
1249 cam_freeze_devq(periph
->path
);
1250 cam_release_devq(periph
->path
,
1251 RELSIM_RELEASE_AFTER_TIMEOUT
,
1253 /*timeout*/duration_ms
,
1254 /*getcount_only*/0);
1260 camperiphscsistatuserror(union ccb
*ccb
, cam_flags camflags
,
1261 u_int32_t sense_flags
, union ccb
*save_ccb
,
1262 int *openings
, u_int32_t
*relsim_flags
,
1267 switch (ccb
->csio
.scsi_status
) {
1268 case SCSI_STATUS_OK
:
1269 case SCSI_STATUS_COND_MET
:
1270 case SCSI_STATUS_INTERMED
:
1271 case SCSI_STATUS_INTERMED_COND_MET
:
1274 case SCSI_STATUS_CMD_TERMINATED
:
1275 case SCSI_STATUS_CHECK_COND
:
1276 error
= camperiphscsisenseerror(ccb
,
1284 case SCSI_STATUS_QUEUE_FULL
:
1287 struct ccb_getdevstats cgds
;
1290 * First off, find out what the current
1291 * transaction counts are.
1293 xpt_setup_ccb(&cgds
.ccb_h
,
1296 cgds
.ccb_h
.func_code
= XPT_GDEV_STATS
;
1297 xpt_action((union ccb
*)&cgds
);
1300 * If we were the only transaction active, treat
1301 * the QUEUE FULL as if it were a BUSY condition.
1303 if (cgds
.dev_active
!= 0) {
1307 * Reduce the number of openings to
1308 * be 1 less than the amount it took
1309 * to get a queue full bounded by the
1310 * minimum allowed tag count for this
1313 total_openings
= cgds
.dev_active
+ cgds
.dev_openings
;
1314 *openings
= cgds
.dev_active
;
1315 if (*openings
< cgds
.mintags
)
1316 *openings
= cgds
.mintags
;
1317 if (*openings
< total_openings
)
1318 *relsim_flags
= RELSIM_ADJUST_OPENINGS
;
1321 * Some devices report queue full for
1322 * temporary resource shortages. For
1323 * this reason, we allow a minimum
1324 * tag count to be entered via a
1325 * quirk entry to prevent the queue
1326 * count on these devices from falling
1327 * to a pessimisticly low value. We
1328 * still wait for the next successful
1329 * completion, however, before queueing
1330 * more transactions to the device.
1332 *relsim_flags
= RELSIM_RELEASE_AFTER_CMDCMPLT
;
1337 xpt_print(ccb
->ccb_h
.path
, "Queue Full\n");
1343 case SCSI_STATUS_BUSY
:
1345 * Restart the queue after either another
1346 * command completes or a 1 second timeout.
1349 xpt_print(ccb
->ccb_h
.path
, "Device Busy\n");
1351 if (ccb
->ccb_h
.retry_count
> 0) {
1352 ccb
->ccb_h
.retry_count
--;
1354 *relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
1355 | RELSIM_RELEASE_AFTER_CMDCMPLT
;
1361 case SCSI_STATUS_RESERV_CONFLICT
:
1362 xpt_print(ccb
->ccb_h
.path
, "Reservation Conflict\n");
1366 xpt_print(ccb
->ccb_h
.path
, "SCSI Status 0x%x\n",
1367 ccb
->csio
.scsi_status
);
1375 camperiphscsisenseerror(union ccb
*ccb
, cam_flags camflags
,
1376 u_int32_t sense_flags
, union ccb
*save_ccb
,
1377 int *openings
, u_int32_t
*relsim_flags
,
1380 struct cam_periph
*periph
;
1383 periph
= xpt_path_periph(ccb
->ccb_h
.path
);
1384 if (periph
->flags
& CAM_PERIPH_RECOVERY_INPROG
) {
1387 * If error recovery is already in progress, don't attempt
1388 * to process this error, but requeue it unconditionally
1389 * and attempt to process it once error recovery has
1390 * completed. This failed command is probably related to
1391 * the error that caused the currently active error recovery
1392 * action so our current recovery efforts should also
1393 * address this command. Be aware that the error recovery
1394 * code assumes that only one recovery action is in progress
1395 * on a particular peripheral instance at any given time
1396 * (e.g. only one saved CCB for error recovery) so it is
1397 * imperitive that we don't violate this assumption.
1401 scsi_sense_action err_action
;
1402 struct ccb_getdev cgd
;
1403 const char *action_string
;
1404 union ccb
* print_ccb
;
1406 /* A description of the error recovery action performed */
1407 action_string
= NULL
;
1410 * The location of the orignal ccb
1411 * for sense printing purposes.
1416 * Grab the inquiry data for this device.
1418 xpt_setup_ccb(&cgd
.ccb_h
, ccb
->ccb_h
.path
, /*priority*/ 1);
1419 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
1420 xpt_action((union ccb
*)&cgd
);
1422 if ((ccb
->ccb_h
.status
& CAM_AUTOSNS_VALID
) != 0)
1423 err_action
= scsi_error_action(&ccb
->csio
,
1426 else if ((ccb
->ccb_h
.flags
& CAM_DIS_AUTOSENSE
) == 0)
1427 err_action
= SS_REQSENSE
;
1429 err_action
= SS_RETRY
|SSQ_DECREMENT_COUNT
|EIO
;
1431 error
= err_action
& SS_ERRMASK
;
1434 * If the recovery action will consume a retry,
1435 * make sure we actually have retries available.
1437 if ((err_action
& SSQ_DECREMENT_COUNT
) != 0) {
1438 if (ccb
->ccb_h
.retry_count
> 0)
1439 ccb
->ccb_h
.retry_count
--;
1441 action_string
= "Retries Exhausted";
1442 goto sense_error_done
;
1446 if ((err_action
& SS_MASK
) >= SS_START
) {
1448 * Do common portions of commands that
1449 * use recovery CCBs.
1451 if (save_ccb
== NULL
) {
1452 action_string
= "No recovery CCB supplied";
1453 goto sense_error_done
;
1455 bcopy(ccb
, save_ccb
, sizeof(*save_ccb
));
1456 print_ccb
= save_ccb
;
1457 periph
->flags
|= CAM_PERIPH_RECOVERY_INPROG
;
1460 switch (err_action
& SS_MASK
) {
1462 action_string
= "No Recovery Action Needed";
1466 action_string
= "Retrying Command (per Sense Data)";
1470 action_string
= "Unretryable error";
1477 * Send a start unit command to the device, and
1478 * then retry the command.
1480 action_string
= "Attempting to Start Unit";
1483 * Check for removable media and set
1484 * load/eject flag appropriately.
1486 if (SID_IS_REMOVABLE(&cgd
.inq_data
))
1491 scsi_start_stop(&ccb
->csio
,
1505 * Send a Test Unit Ready to the device.
1506 * If the 'many' flag is set, we send 120
1507 * test unit ready commands, one every half
1508 * second. Otherwise, we just send one TUR.
1509 * We only want to do this if the retry
1510 * count has not been exhausted.
1514 if ((err_action
& SSQ_MANY
) != 0) {
1515 action_string
= "Polling device for readiness";
1518 action_string
= "Testing device for readiness";
1521 scsi_test_unit_ready(&ccb
->csio
,
1529 * Accomplish our 500ms delay by deferring
1530 * the release of our device queue appropriately.
1532 *relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
;
1539 * Send a Request Sense to the device. We
1540 * assume that we are in a contingent allegiance
1541 * condition so we do not tag this request.
1543 scsi_request_sense(&ccb
->csio
, /*retries*/1,
1545 &save_ccb
->csio
.sense_data
,
1546 sizeof(save_ccb
->csio
.sense_data
),
1547 CAM_TAG_ACTION_NONE
,
1548 /*sense_len*/SSD_FULL_SIZE
,
1553 panic("Unhandled error action %x", err_action
);
1556 if ((err_action
& SS_MASK
) >= SS_START
) {
1558 * Drop the priority to 0 so that the recovery
1559 * CCB is the first to execute. Freeze the queue
1560 * after this command is sent so that we can
1561 * restore the old csio and have it queued in
1562 * the proper order before we release normal
1563 * transactions to the device.
1565 ccb
->ccb_h
.pinfo
.priority
= 0;
1566 ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
1567 ccb
->ccb_h
.saved_ccb_ptr
= save_ccb
;
1572 if ((err_action
& SSQ_PRINT_SENSE
) != 0
1573 && (ccb
->ccb_h
.status
& CAM_AUTOSNS_VALID
) != 0) {
1574 cam_error_print(print_ccb
, CAM_ESF_ALL
, CAM_EPF_ALL
);
1575 xpt_print_path(ccb
->ccb_h
.path
);
1577 scsi_sense_print(&print_ccb
->csio
);
1578 kprintf("%s\n", action_string
);
1585 * Generic error handler. Peripheral drivers usually filter
1586 * out the errors that they handle in a unique mannor, then
1587 * call this function.
1590 cam_periph_error(union ccb
*ccb
, cam_flags camflags
,
1591 u_int32_t sense_flags
, union ccb
*save_ccb
)
1593 const char *action_string
;
1596 int error
, printed
= 0;
1598 u_int32_t relsim_flags
;
1599 u_int32_t timeout
= 0;
1601 action_string
= NULL
;
1602 status
= ccb
->ccb_h
.status
;
1603 frozen
= (status
& CAM_DEV_QFRZN
) != 0;
1604 status
&= CAM_STATUS_MASK
;
1605 openings
= relsim_flags
= 0;
1611 case CAM_SCSI_STATUS_ERROR
:
1612 error
= camperiphscsistatuserror(ccb
,
1620 case CAM_AUTOSENSE_FAIL
:
1621 xpt_print(ccb
->ccb_h
.path
, "AutoSense Failed\n");
1622 error
= EIO
; /* we have to kill the command */
1624 case CAM_REQ_CMP_ERR
:
1625 if (bootverbose
&& printed
== 0) {
1626 xpt_print(ccb
->ccb_h
.path
,
1627 "Request completed with CAM_REQ_CMP_ERR\n");
1631 case CAM_CMD_TIMEOUT
:
1632 if (bootverbose
&& printed
== 0) {
1633 xpt_print(ccb
->ccb_h
.path
, "Command timed out\n");
1637 case CAM_UNEXP_BUSFREE
:
1638 if (bootverbose
&& printed
== 0) {
1639 xpt_print(ccb
->ccb_h
.path
, "Unexpected Bus Free\n");
1643 case CAM_UNCOR_PARITY
:
1644 if (bootverbose
&& printed
== 0) {
1645 xpt_print(ccb
->ccb_h
.path
,
1646 "Uncorrected Parity Error\n");
1650 case CAM_DATA_RUN_ERR
:
1651 if (bootverbose
&& printed
== 0) {
1652 xpt_print(ccb
->ccb_h
.path
, "Data Overrun\n");
1655 error
= EIO
; /* we have to kill the command */
1656 /* decrement the number of retries */
1657 if (ccb
->ccb_h
.retry_count
> 0) {
1658 ccb
->ccb_h
.retry_count
--;
1661 action_string
= "Retries Exhausted";
1667 case CAM_MSG_REJECT_REC
:
1668 /* XXX Don't know that these are correct */
1671 case CAM_SEL_TIMEOUT
:
1673 struct cam_path
*newpath
;
1675 if ((camflags
& CAM_RETRY_SELTO
) != 0) {
1676 if (ccb
->ccb_h
.retry_count
> 0) {
1678 ccb
->ccb_h
.retry_count
--;
1680 if (bootverbose
&& printed
== 0) {
1681 xpt_print(ccb
->ccb_h
.path
,
1682 "Selection Timeout\n");
1687 * Wait a bit to give the device
1688 * time to recover before we try again.
1690 relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
;
1691 timeout
= periph_selto_delay
;
1696 /* Should we do more if we can't create the path?? */
1697 if (xpt_create_path(&newpath
, xpt_path_periph(ccb
->ccb_h
.path
),
1698 xpt_path_path_id(ccb
->ccb_h
.path
),
1699 xpt_path_target_id(ccb
->ccb_h
.path
),
1700 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
)
1704 * Let peripheral drivers know that this device has gone
1707 xpt_async(AC_LOST_DEVICE
, newpath
, NULL
);
1708 xpt_free_path(newpath
);
1711 case CAM_REQ_INVALID
:
1712 case CAM_PATH_INVALID
:
1713 case CAM_DEV_NOT_THERE
:
1715 case CAM_PROVIDE_FAIL
:
1716 case CAM_REQ_TOO_BIG
:
1717 case CAM_LUN_INVALID
:
1718 case CAM_TID_INVALID
:
1721 case CAM_SCSI_BUS_RESET
:
1724 * Commands that repeatedly timeout and cause these
1725 * kinds of error recovery actions, should return
1726 * CAM_CMD_TIMEOUT, which allows us to safely assume
1727 * that this command was an innocent bystander to
1728 * these events and should be unconditionally
1731 if (bootverbose
&& printed
== 0) {
1732 xpt_print_path(ccb
->ccb_h
.path
);
1733 if (status
== CAM_BDR_SENT
)
1734 kprintf("Bus Device Reset sent\n");
1736 kprintf("Bus Reset issued\n");
1740 case CAM_REQUEUE_REQ
:
1741 /* Unconditional requeue */
1743 if (bootverbose
&& printed
== 0) {
1744 xpt_print(ccb
->ccb_h
.path
, "Request Requeued\n");
1748 case CAM_RESRC_UNAVAIL
:
1749 /* Wait a bit for the resource shortage to abate. */
1750 timeout
= periph_noresrc_delay
;
1754 /* Wait a bit for the busy condition to abate. */
1755 timeout
= periph_busy_delay
;
1757 relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
;
1760 /* decrement the number of retries */
1761 if (ccb
->ccb_h
.retry_count
> 0) {
1762 ccb
->ccb_h
.retry_count
--;
1764 if (bootverbose
&& printed
== 0) {
1765 xpt_print(ccb
->ccb_h
.path
, "CAM Status 0x%x\n",
1771 action_string
= "Retries Exhausted";
1776 /* Attempt a retry */
1777 if (error
== ERESTART
|| error
== 0) {
1779 ccb
->ccb_h
.status
&= ~CAM_DEV_QFRZN
;
1781 if (error
== ERESTART
) {
1782 action_string
= "Retrying Command";
1787 cam_release_devq(ccb
->ccb_h
.path
,
1791 /*getcount_only*/0);
1795 * If we have an error and are booting verbosely, whine
1796 * *unless* this was a non-retryable selection timeout.
1798 if (error
!= 0 && bootverbose
&&
1799 !(status
== CAM_SEL_TIMEOUT
&& (camflags
& CAM_RETRY_SELTO
) == 0)) {
1802 if (action_string
== NULL
)
1803 action_string
= "Unretryable Error";
1804 if (error
!= ERESTART
) {
1805 xpt_print(ccb
->ccb_h
.path
, "error %d\n", error
);
1807 xpt_print(ccb
->ccb_h
.path
, "%s\n", action_string
);