2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/cam/cam_periph.c,v 1.70 2008/02/12 11:07:33 raj Exp $
30 * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.40 2008/05/18 20:30:19 pavalos Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
41 #include <sys/devicestat.h>
44 #include <vm/vm_extern.h>
46 #include <sys/thread2.h>
50 #include "cam_xpt_periph.h"
51 #include "cam_periph.h"
52 #include "cam_debug.h"
55 #include <bus/cam/scsi/scsi_all.h>
56 #include <bus/cam/scsi/scsi_message.h>
57 #include <bus/cam/scsi/scsi_pass.h>
59 static u_int
camperiphnextunit(struct periph_driver
*p_drv
,
60 u_int newunit
, int wired
,
61 path_id_t pathid
, target_id_t target
,
63 static u_int
camperiphunit(struct periph_driver
*p_drv
,
64 path_id_t pathid
, target_id_t target
,
66 static void camperiphdone(struct cam_periph
*periph
,
68 static void camperiphfree(struct cam_periph
*periph
);
69 static int camperiphscsistatuserror(union ccb
*ccb
,
71 u_int32_t sense_flags
,
74 u_int32_t
*relsim_flags
,
76 static int camperiphscsisenseerror(union ccb
*ccb
,
78 u_int32_t sense_flags
,
81 u_int32_t
*relsim_flags
,
84 static int nperiph_drivers
;
85 struct periph_driver
**periph_drivers
;
87 MALLOC_DEFINE(M_CAMPERIPH
, "CAM periph", "CAM peripheral buffers");
89 static int periph_selto_delay
= 1000;
90 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay
);
91 static int periph_noresrc_delay
= 500;
92 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay
);
93 static int periph_busy_delay
= 500;
94 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay
);
98 periphdriver_register(void *data
)
100 struct periph_driver
**newdrivers
, **old
;
103 ndrivers
= nperiph_drivers
+ 2;
104 newdrivers
= kmalloc(sizeof(*newdrivers
) * ndrivers
, M_CAMPERIPH
,
107 bcopy(periph_drivers
, newdrivers
,
108 sizeof(*newdrivers
) * nperiph_drivers
);
109 newdrivers
[nperiph_drivers
] = (struct periph_driver
*)data
;
110 newdrivers
[nperiph_drivers
+ 1] = NULL
;
111 old
= periph_drivers
;
112 periph_drivers
= newdrivers
;
114 kfree(old
, M_CAMPERIPH
);
119 cam_periph_alloc(periph_ctor_t
*periph_ctor
,
120 periph_oninv_t
*periph_oninvalidate
,
121 periph_dtor_t
*periph_dtor
, periph_start_t
*periph_start
,
122 char *name
, cam_periph_type type
, struct cam_path
*path
,
123 ac_callback_t
*ac_callback
, ac_code code
, void *arg
)
125 struct periph_driver
**p_drv
;
127 struct cam_periph
*periph
;
128 struct cam_periph
*cur_periph
;
130 target_id_t target_id
;
137 * Handle Hot-Plug scenarios. If there is already a peripheral
138 * of our type assigned to this path, we are likely waiting for
139 * final close on an old, invalidated, peripheral. If this is
140 * the case, queue up a deferred call to the peripheral's async
141 * handler. If it looks like a mistaken re-allocation, complain.
143 if ((periph
= cam_periph_find(path
, name
)) != NULL
) {
145 if ((periph
->flags
& CAM_PERIPH_INVALID
) != 0
146 && (periph
->flags
& CAM_PERIPH_NEW_DEV_FOUND
) == 0) {
147 periph
->flags
|= CAM_PERIPH_NEW_DEV_FOUND
;
148 periph
->deferred_callback
= ac_callback
;
149 periph
->deferred_ac
= code
;
150 return (CAM_REQ_INPROG
);
152 kprintf("cam_periph_alloc: attempt to re-allocate "
153 "valid device %s%d rejected\n",
154 periph
->periph_name
, periph
->unit_number
);
156 return (CAM_REQ_INVALID
);
159 periph
= kmalloc(sizeof(*periph
), M_CAMPERIPH
, M_INTWAIT
| M_ZERO
);
164 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
165 if (strcmp((*p_drv
)->driver_name
, name
) == 0)
170 sim
= xpt_path_sim(path
);
171 path_id
= xpt_path_path_id(path
);
172 target_id
= xpt_path_target_id(path
);
173 lun_id
= xpt_path_lun_id(path
);
174 cam_init_pinfo(&periph
->pinfo
);
175 periph
->periph_start
= periph_start
;
176 periph
->periph_dtor
= periph_dtor
;
177 periph
->periph_oninval
= periph_oninvalidate
;
179 periph
->periph_name
= name
;
180 periph
->unit_number
= camperiphunit(*p_drv
, path_id
, target_id
, lun_id
);
181 periph
->immediate_priority
= CAM_PRIORITY_NONE
;
182 periph
->refcount
= 0;
184 SLIST_INIT(&periph
->ccb_list
);
185 status
= xpt_create_path(&path
, periph
, path_id
, target_id
, lun_id
);
186 if (status
!= CAM_REQ_CMP
)
192 status
= xpt_add_periph(periph
);
194 if (status
!= CAM_REQ_CMP
)
197 cur_periph
= TAILQ_FIRST(&(*p_drv
)->units
);
198 while (cur_periph
!= NULL
199 && cur_periph
->unit_number
< periph
->unit_number
)
200 cur_periph
= TAILQ_NEXT(cur_periph
, unit_links
);
202 if (cur_periph
!= NULL
)
203 TAILQ_INSERT_BEFORE(cur_periph
, periph
, unit_links
);
205 TAILQ_INSERT_TAIL(&(*p_drv
)->units
, periph
, unit_links
);
206 (*p_drv
)->generation
++;
211 status
= periph_ctor(periph
, arg
);
213 if (status
== CAM_REQ_CMP
)
217 switch (init_level
) {
219 /* Initialized successfully */
222 TAILQ_REMOVE(&(*p_drv
)->units
, periph
, unit_links
);
223 xpt_remove_periph(periph
);
226 xpt_free_path(periph
->path
);
229 kfree(periph
, M_CAMPERIPH
);
232 /* No cleanup to perform. */
235 panic("cam_periph_alloc: Unknown init level");
241 * Find a peripheral structure with the specified path, target, lun,
242 * and (optionally) type. If the name is NULL, this function will return
243 * the first peripheral driver that matches the specified path.
246 cam_periph_find(struct cam_path
*path
, char *name
)
248 struct periph_driver
**p_drv
;
249 struct cam_periph
*periph
;
252 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
253 if (name
!= NULL
&& (strcmp((*p_drv
)->driver_name
, name
) != 0))
256 TAILQ_FOREACH(periph
, &(*p_drv
)->units
, unit_links
) {
257 if (xpt_path_comp(periph
->path
, path
) == 0) {
272 cam_periph_acquire(struct cam_periph
*periph
)
275 return(CAM_REQ_CMP_ERR
);
285 cam_periph_release(struct cam_periph
*periph
)
292 if ((--periph
->refcount
== 0)
293 && (periph
->flags
& CAM_PERIPH_INVALID
)) {
294 camperiphfree(periph
);
301 cam_periph_hold(struct cam_periph
*periph
, int flags
)
305 sim_lock_assert_owned(periph
->sim
->lock
);
308 * Increment the reference count on the peripheral
309 * while we wait for our lock attempt to succeed
310 * to ensure the peripheral doesn't disappear out
311 * from user us while we sleep.
314 if (cam_periph_acquire(periph
) != CAM_REQ_CMP
)
317 while ((periph
->flags
& CAM_PERIPH_LOCKED
) != 0) {
318 periph
->flags
|= CAM_PERIPH_LOCK_WANTED
;
319 if ((error
= sim_lock_sleep(periph
, flags
, "caplck", 0,
320 periph
->sim
->lock
)) != 0) {
321 cam_periph_release(periph
);
326 periph
->flags
|= CAM_PERIPH_LOCKED
;
331 cam_periph_unhold(struct cam_periph
*periph
)
334 sim_lock_assert_owned(periph
->sim
->lock
);
336 periph
->flags
&= ~CAM_PERIPH_LOCKED
;
337 if ((periph
->flags
& CAM_PERIPH_LOCK_WANTED
) != 0) {
338 periph
->flags
&= ~CAM_PERIPH_LOCK_WANTED
;
342 cam_periph_release(periph
);
346 * Look for the next unit number that is not currently in use for this
347 * peripheral type starting at "newunit". Also exclude unit numbers that
348 * are reserved by for future "hardwiring" unless we already know that this
349 * is a potential wired device. Only assume that the device is "wired" the
350 * first time through the loop since after that we'll be looking at unit
351 * numbers that did not match a wiring entry.
354 camperiphnextunit(struct periph_driver
*p_drv
, u_int newunit
, int wired
,
355 path_id_t pathid
, target_id_t target
, lun_id_t lun
)
357 struct cam_periph
*periph
;
358 char *periph_name
, *strval
;
362 periph_name
= p_drv
->driver_name
;
365 for (periph
= TAILQ_FIRST(&p_drv
->units
);
366 periph
!= NULL
&& periph
->unit_number
!= newunit
;
367 periph
= TAILQ_NEXT(periph
, unit_links
))
370 if (periph
!= NULL
&& periph
->unit_number
== newunit
) {
372 xpt_print(periph
->path
, "Duplicate Wired "
374 xpt_print(periph
->path
, "Second device (%s "
375 "device at scbus%d target %d lun %d) will "
376 "not be wired\n", periph_name
, pathid
,
386 * Don't match entries like "da 4" as a wired down
387 * device, but do match entries like "da 4 target 5"
388 * or even "da 4 scbus 1".
391 while ((i
= resource_locate(i
, periph_name
)) != -1) {
392 dname
= resource_query_name(i
);
393 dunit
= resource_query_unit(i
);
394 /* if no "target" and no specific scbus, skip */
395 if (resource_int_value(dname
, dunit
, "target", &val
) &&
396 (resource_string_value(dname
, dunit
, "at",&strval
)||
397 strcmp(strval
, "scbus") == 0))
399 if (newunit
== dunit
)
409 camperiphunit(struct periph_driver
*p_drv
, path_id_t pathid
,
410 target_id_t target
, lun_id_t lun
)
413 int hit
, i
, val
, dunit
;
415 char pathbuf
[32], *strval
, *periph_name
;
419 periph_name
= p_drv
->driver_name
;
420 ksnprintf(pathbuf
, sizeof(pathbuf
), "scbus%d", pathid
);
422 for (hit
= 0; (i
= resource_locate(i
, periph_name
)) != -1; hit
= 0) {
423 dname
= resource_query_name(i
);
424 dunit
= resource_query_unit(i
);
425 if (resource_string_value(dname
, dunit
, "at", &strval
) == 0) {
426 if (strcmp(strval
, pathbuf
) != 0)
430 if (resource_int_value(dname
, dunit
, "target", &val
) == 0) {
435 if (resource_int_value(dname
, dunit
, "lun", &val
) == 0) {
447 * Either start from 0 looking for the next unit or from
448 * the unit number given in the resource config. This way,
449 * if we have wildcard matches, we don't return the same
452 unit
= camperiphnextunit(p_drv
, unit
, /*wired*/hit
, pathid
,
459 cam_periph_invalidate(struct cam_periph
*periph
)
462 * We only call this routine the first time a peripheral is
465 if (((periph
->flags
& CAM_PERIPH_INVALID
) == 0)
466 && (periph
->periph_oninval
!= NULL
))
467 periph
->periph_oninval(periph
);
469 periph
->flags
|= CAM_PERIPH_INVALID
;
470 periph
->flags
&= ~CAM_PERIPH_NEW_DEV_FOUND
;
473 if (periph
->refcount
== 0)
474 camperiphfree(periph
);
475 else if (periph
->refcount
< 0)
476 kprintf("cam_invalidate_periph: refcount < 0!!\n");
481 camperiphfree(struct cam_periph
*periph
)
483 struct periph_driver
**p_drv
;
485 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
486 if (strcmp((*p_drv
)->driver_name
, periph
->periph_name
) == 0)
490 if (*p_drv
== NULL
) {
491 kprintf("camperiphfree: attempt to free non-existent periph\n");
495 TAILQ_REMOVE(&(*p_drv
)->units
, periph
, unit_links
);
496 (*p_drv
)->generation
++;
499 if (periph
->periph_dtor
!= NULL
)
500 periph
->periph_dtor(periph
);
501 xpt_remove_periph(periph
);
503 if (periph
->flags
& CAM_PERIPH_NEW_DEV_FOUND
) {
507 switch (periph
->deferred_ac
) {
508 case AC_FOUND_DEVICE
:
509 ccb
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
510 xpt_setup_ccb(&ccb
.ccb_h
, periph
->path
, /*priority*/ 1);
514 case AC_PATH_REGISTERED
:
515 ccb
.ccb_h
.func_code
= XPT_PATH_INQ
;
516 xpt_setup_ccb(&ccb
.ccb_h
, periph
->path
, /*priority*/ 1);
524 periph
->deferred_callback(NULL
, periph
->deferred_ac
,
527 xpt_free_path(periph
->path
);
528 kfree(periph
, M_CAMPERIPH
);
533 * Map user virtual pointers into kernel virtual address space, so we can
534 * access the memory. This won't work on physical pointers, for now it's
535 * up to the caller to check for that. (XXX KDM -- should we do that here
536 * instead?) This also only works for up to MAXPHYS memory. Since we use
537 * buffers to map stuff in and out, we're limited to the buffer size.
540 cam_periph_mapmem(union ccb
*ccb
, struct cam_periph_map_info
*mapinfo
)
543 buf_cmd_t cmd
[CAM_PERIPH_MAXMAPS
];
544 u_int8_t
**data_ptrs
[CAM_PERIPH_MAXMAPS
];
545 u_int32_t lengths
[CAM_PERIPH_MAXMAPS
];
546 u_int32_t dirs
[CAM_PERIPH_MAXMAPS
];
548 switch(ccb
->ccb_h
.func_code
) {
550 if (ccb
->cdm
.match_buf_len
== 0) {
551 kprintf("cam_periph_mapmem: invalid match buffer "
555 if (ccb
->cdm
.pattern_buf_len
> 0) {
556 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.patterns
;
557 lengths
[0] = ccb
->cdm
.pattern_buf_len
;
558 dirs
[0] = CAM_DIR_OUT
;
559 data_ptrs
[1] = (u_int8_t
**)&ccb
->cdm
.matches
;
560 lengths
[1] = ccb
->cdm
.match_buf_len
;
561 dirs
[1] = CAM_DIR_IN
;
564 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.matches
;
565 lengths
[0] = ccb
->cdm
.match_buf_len
;
566 dirs
[0] = CAM_DIR_IN
;
571 case XPT_CONT_TARGET_IO
:
572 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_NONE
)
575 data_ptrs
[0] = &ccb
->csio
.data_ptr
;
576 lengths
[0] = ccb
->csio
.dxfer_len
;
577 dirs
[0] = ccb
->ccb_h
.flags
& CAM_DIR_MASK
;
582 break; /* NOTREACHED */
586 * Check the transfer length and permissions first, so we don't
587 * have to unmap any previously mapped buffers.
589 for (i
= 0; i
< numbufs
; i
++) {
591 * Its kinda bogus, we need a R+W command. For now the
592 * buffer needs some sort of command. Use BUF_CMD_WRITE
593 * to indicate a write and BUF_CMD_READ to indicate R+W.
595 cmd
[i
] = BUF_CMD_WRITE
;
598 * The userland data pointer passed in may not be page
599 * aligned. vmapbuf() truncates the address to a page
600 * boundary, so if the address isn't page aligned, we'll
601 * need enough space for the given transfer length, plus
602 * whatever extra space is necessary to make it to the page
606 (((vm_offset_t
)(*data_ptrs
[i
])) & PAGE_MASK
)) > DFLTPHYS
){
607 kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
608 "which is greater than DFLTPHYS(%d)\n",
610 (((vm_offset_t
)(*data_ptrs
[i
])) & PAGE_MASK
)),
615 if (dirs
[i
] & CAM_DIR_OUT
) {
616 if (!useracc(*data_ptrs
[i
], lengths
[i
],
618 kprintf("cam_periph_mapmem: error, "
619 "address %p, length %lu isn't "
620 "user accessible for READ\n",
621 (void *)*data_ptrs
[i
],
627 if (dirs
[i
] & CAM_DIR_IN
) {
628 cmd
[i
] = BUF_CMD_READ
;
629 if (!useracc(*data_ptrs
[i
], lengths
[i
],
631 kprintf("cam_periph_mapmem: error, "
632 "address %p, length %lu isn't "
633 "user accessible for WRITE\n",
634 (void *)*data_ptrs
[i
],
643 for (i
= 0; i
< numbufs
; i
++) {
647 mapinfo
->bp
[i
] = getpbuf(NULL
);
649 /* save the original user pointer */
650 mapinfo
->saved_ptrs
[i
] = *data_ptrs
[i
];
653 mapinfo
->bp
[i
]->b_cmd
= cmd
[i
];
655 /* map the user buffer into kernel memory */
656 if (vmapbuf(mapinfo
->bp
[i
], *data_ptrs
[i
], lengths
[i
]) < 0) {
657 kprintf("cam_periph_mapmem: error, "
658 "address %p, length %lu isn't "
659 "user accessible any more\n",
660 (void *)*data_ptrs
[i
],
662 for (j
= 0; j
< i
; ++j
) {
663 *data_ptrs
[j
] = mapinfo
->saved_ptrs
[j
];
664 vunmapbuf(mapinfo
->bp
[j
]);
665 relpbuf(mapinfo
->bp
[j
], NULL
);
667 mapinfo
->num_bufs_used
-= i
;
671 /* set our pointer to the new mapped area */
672 *data_ptrs
[i
] = mapinfo
->bp
[i
]->b_data
;
674 mapinfo
->num_bufs_used
++;
681 * Unmap memory segments mapped into kernel virtual address space by
682 * cam_periph_mapmem().
685 cam_periph_unmapmem(union ccb
*ccb
, struct cam_periph_map_info
*mapinfo
)
688 u_int8_t
**data_ptrs
[CAM_PERIPH_MAXMAPS
];
690 if (mapinfo
->num_bufs_used
<= 0) {
691 /* allow ourselves to be swapped once again */
695 switch (ccb
->ccb_h
.func_code
) {
697 numbufs
= min(mapinfo
->num_bufs_used
, 2);
700 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.matches
;
702 data_ptrs
[0] = (u_int8_t
**)&ccb
->cdm
.patterns
;
703 data_ptrs
[1] = (u_int8_t
**)&ccb
->cdm
.matches
;
707 case XPT_CONT_TARGET_IO
:
708 data_ptrs
[0] = &ccb
->csio
.data_ptr
;
709 numbufs
= min(mapinfo
->num_bufs_used
, 1);
712 /* allow ourselves to be swapped once again */
714 break; /* NOTREACHED */
717 for (i
= 0; i
< numbufs
; i
++) {
718 /* Set the user's pointer back to the original value */
719 *data_ptrs
[i
] = mapinfo
->saved_ptrs
[i
];
721 /* unmap the buffer */
722 vunmapbuf(mapinfo
->bp
[i
]);
724 /* release the buffer */
725 relpbuf(mapinfo
->bp
[i
], NULL
);
728 /* allow ourselves to be swapped once again */
732 cam_periph_getccb(struct cam_periph
*periph
, u_int32_t priority
)
734 struct ccb_hdr
*ccb_h
;
736 sim_lock_assert_owned(periph
->sim
->lock
);
737 CAM_DEBUG(periph
->path
, CAM_DEBUG_TRACE
, ("entering cdgetccb\n"));
739 while (SLIST_FIRST(&periph
->ccb_list
) == NULL
) {
740 if (periph
->immediate_priority
> priority
)
741 periph
->immediate_priority
= priority
;
742 xpt_schedule(periph
, priority
);
743 if ((SLIST_FIRST(&periph
->ccb_list
) != NULL
)
744 && (SLIST_FIRST(&periph
->ccb_list
)->pinfo
.priority
== priority
))
746 sim_lock_sleep(&periph
->ccb_list
, 0, "cgticb", 0,
750 ccb_h
= SLIST_FIRST(&periph
->ccb_list
);
751 SLIST_REMOVE_HEAD(&periph
->ccb_list
, periph_links
.sle
);
752 return ((union ccb
*)ccb_h
);
756 cam_periph_ccbwait(union ccb
*ccb
)
760 sim
= xpt_path_sim(ccb
->ccb_h
.path
);
761 if ((ccb
->ccb_h
.pinfo
.index
!= CAM_UNQUEUED_INDEX
)
762 || ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
))
763 sim_lock_sleep(&ccb
->ccb_h
.cbfcnp
, 0, "cbwait", 0, sim
->lock
);
767 cam_periph_ioctl(struct cam_periph
*periph
, int cmd
, caddr_t addr
,
768 int (*error_routine
)(union ccb
*ccb
,
770 u_int32_t sense_flags
))
780 ccb
= cam_periph_getccb(periph
, /* priority */ 1);
781 xpt_setup_ccb(&ccb
->ccb_h
,
784 ccb
->ccb_h
.func_code
= XPT_GDEVLIST
;
787 * Basically, the point of this is that we go through
788 * getting the list of devices, until we find a passthrough
789 * device. In the current version of the CAM code, the
790 * only way to determine what type of device we're dealing
791 * with is by its name.
795 ccb
->cgdl
.status
= CAM_GDEVLIST_MORE_DEVS
;
796 while (ccb
->cgdl
.status
== CAM_GDEVLIST_MORE_DEVS
) {
798 /* we want the next device in the list */
800 if (strncmp(ccb
->cgdl
.periph_name
,
806 if ((ccb
->cgdl
.status
== CAM_GDEVLIST_LAST_DEVICE
) &&
808 ccb
->cgdl
.periph_name
[0] = '\0';
809 ccb
->cgdl
.unit_number
= 0;
814 /* copy the result back out */
815 bcopy(ccb
, addr
, sizeof(union ccb
));
817 /* and release the ccb */
818 xpt_release_ccb(ccb
);
829 cam_periph_runccb(union ccb
*ccb
,
830 int (*error_routine
)(union ccb
*ccb
,
832 u_int32_t sense_flags
),
833 cam_flags camflags
, u_int32_t sense_flags
,
840 sim
= xpt_path_sim(ccb
->ccb_h
.path
);
841 sim_lock_assert_owned(sim
->lock
);
844 * If the user has supplied a stats structure, and if we understand
845 * this particular type of ccb, record the transaction start.
847 if ((ds
!= NULL
) && (ccb
->ccb_h
.func_code
== XPT_SCSI_IO
))
848 devstat_start_transaction(ds
);
853 cam_periph_ccbwait(ccb
);
854 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
)
856 else if (error_routine
!= NULL
)
857 error
= (*error_routine
)(ccb
, camflags
, sense_flags
);
861 } while (error
== ERESTART
);
863 if ((ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0)
864 cam_release_devq(ccb
->ccb_h
.path
,
868 /* getcount_only */ FALSE
);
870 if ((ds
!= NULL
) && (ccb
->ccb_h
.func_code
== XPT_SCSI_IO
))
871 devstat_end_transaction(ds
,
873 ccb
->csio
.tag_action
& 0xf,
874 ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) ==
875 CAM_DIR_NONE
) ? DEVSTAT_NO_DATA
:
876 (ccb
->ccb_h
.flags
& CAM_DIR_OUT
) ?
884 cam_freeze_devq(struct cam_path
*path
)
886 struct ccb_hdr ccb_h
;
888 xpt_setup_ccb(&ccb_h
, path
, /*priority*/1);
889 ccb_h
.func_code
= XPT_NOOP
;
890 ccb_h
.flags
= CAM_DEV_QFREEZE
;
891 xpt_action((union ccb
*)&ccb_h
);
895 cam_release_devq(struct cam_path
*path
, u_int32_t relsim_flags
,
896 u_int32_t openings
, u_int32_t timeout
,
899 struct ccb_relsim crs
;
901 xpt_setup_ccb(&crs
.ccb_h
, path
,
903 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
904 crs
.ccb_h
.flags
= getcount_only
? CAM_DEV_QFREEZE
: 0;
905 crs
.release_flags
= relsim_flags
;
906 crs
.openings
= openings
;
907 crs
.release_timeout
= timeout
;
908 xpt_action((union ccb
*)&crs
);
909 return (crs
.qfrozen_cnt
);
912 #define saved_ccb_ptr ppriv_ptr0
914 camperiphdone(struct cam_periph
*periph
, union ccb
*done_ccb
)
916 union ccb
*saved_ccb
;
920 struct scsi_start_stop_unit
*scsi_cmd
;
921 u_int32_t relsim_flags
, timeout
;
922 u_int32_t qfrozen_cnt
;
925 xpt_done_ccb
= FALSE
;
926 status
= done_ccb
->ccb_h
.status
;
927 frozen
= (status
& CAM_DEV_QFRZN
) != 0;
928 sense
= (status
& CAM_AUTOSNS_VALID
) != 0;
929 status
&= CAM_STATUS_MASK
;
933 saved_ccb
= (union ccb
*)done_ccb
->ccb_h
.saved_ccb_ptr
;
936 * Unfreeze the queue once if it is already frozen..
939 qfrozen_cnt
= cam_release_devq(done_ccb
->ccb_h
.path
,
950 * If we have successfully taken a device from the not
951 * ready to ready state, re-scan the device and re-get
952 * the inquiry information. Many devices (mostly disks)
953 * don't properly report their inquiry information unless
956 * If we manually retrieved sense into a CCB and got
957 * something other than "NO SENSE" send the updated CCB
958 * back to the client via xpt_done() to be processed via
959 * the error recovery code again.
961 if (done_ccb
->ccb_h
.func_code
== XPT_SCSI_IO
) {
962 scsi_cmd
= (struct scsi_start_stop_unit
*)
963 &done_ccb
->csio
.cdb_io
.cdb_bytes
;
965 if (scsi_cmd
->opcode
== START_STOP_UNIT
)
966 xpt_async(AC_INQ_CHANGED
,
967 done_ccb
->ccb_h
.path
, NULL
);
968 if (scsi_cmd
->opcode
== REQUEST_SENSE
) {
971 sense_key
= saved_ccb
->csio
.sense_data
.flags
;
972 sense_key
&= SSD_KEY
;
973 if (sense_key
!= SSD_KEY_NO_SENSE
) {
974 saved_ccb
->ccb_h
.status
|=
977 xpt_print(saved_ccb
->ccb_h
.path
,
978 "Recovered Sense\n");
979 scsi_sense_print(&saved_ccb
->csio
);
980 cam_error_print(saved_ccb
, CAM_ESF_ALL
,
987 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
, done_ccb
,
990 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
992 if (xpt_done_ccb
== FALSE
)
993 xpt_action(done_ccb
);
997 case CAM_SCSI_STATUS_ERROR
:
998 scsi_cmd
= (struct scsi_start_stop_unit
*)
999 &done_ccb
->csio
.cdb_io
.cdb_bytes
;
1001 struct ccb_getdev cgd
;
1002 struct scsi_sense_data
*sense
;
1003 int error_code
, sense_key
, asc
, ascq
;
1004 scsi_sense_action err_action
;
1006 sense
= &done_ccb
->csio
.sense_data
;
1007 scsi_extract_sense(sense
, &error_code
,
1008 &sense_key
, &asc
, &ascq
);
1011 * Grab the inquiry data for this device.
1013 xpt_setup_ccb(&cgd
.ccb_h
, done_ccb
->ccb_h
.path
,
1015 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
1016 xpt_action((union ccb
*)&cgd
);
1017 err_action
= scsi_error_action(&done_ccb
->csio
,
1021 * If the error is "invalid field in CDB",
1022 * and the load/eject flag is set, turn the
1023 * flag off and try again. This is just in
1024 * case the drive in question barfs on the
1025 * load eject flag. The CAM code should set
1026 * the load/eject flag by default for
1031 * Should we check to see what the specific
1032 * scsi status is?? Or does it not matter
1033 * since we already know that there was an
1034 * error, and we know what the specific
1035 * error code was, and we know what the
1038 if ((scsi_cmd
->opcode
== START_STOP_UNIT
) &&
1039 ((scsi_cmd
->how
& SSS_LOEJ
) != 0) &&
1040 (asc
== 0x24) && (ascq
== 0x00) &&
1041 (done_ccb
->ccb_h
.retry_count
> 0)) {
1043 scsi_cmd
->how
&= ~SSS_LOEJ
;
1045 xpt_action(done_ccb
);
1047 } else if ((done_ccb
->ccb_h
.retry_count
> 1)
1048 && ((err_action
& SS_MASK
) != SS_FAIL
)) {
1051 * In this case, the error recovery
1052 * command failed, but we've got
1053 * some retries left on it. Give
1054 * it another try unless this is an
1055 * unretryable error.
1058 /* set the timeout to .5 sec */
1060 RELSIM_RELEASE_AFTER_TIMEOUT
;
1063 xpt_action(done_ccb
);
1069 * Perform the final retry with the original
1070 * CCB so that final error processing is
1071 * performed by the owner of the CCB.
1073 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
,
1074 done_ccb
, sizeof(union ccb
));
1076 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
1078 xpt_action(done_ccb
);
1082 * Eh?? The command failed, but we don't
1083 * have any sense. What's up with that?
1084 * Fire the CCB again to return it to the
1087 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
,
1088 done_ccb
, sizeof(union ccb
));
1090 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
1092 xpt_action(done_ccb
);
1097 bcopy(done_ccb
->ccb_h
.saved_ccb_ptr
, done_ccb
,
1100 periph
->flags
&= ~CAM_PERIPH_RECOVERY_INPROG
;
1102 xpt_action(done_ccb
);
1107 /* decrement the retry count */
1109 * XXX This isn't appropriate in all cases. Restructure,
1110 * so that the retry count is only decremented on an
1111 * actual retry. Remeber that the orignal ccb had its
1112 * retry count dropped before entering recovery, so
1113 * doing it again is a bug.
1115 if (done_ccb
->ccb_h
.retry_count
> 0)
1116 done_ccb
->ccb_h
.retry_count
--;
1118 qfrozen_cnt
= cam_release_devq(done_ccb
->ccb_h
.path
,
1119 /*relsim_flags*/relsim_flags
,
1122 /*getcount_only*/0);
1123 if (xpt_done_ccb
== TRUE
)
1124 (*done_ccb
->ccb_h
.cbfcnp
)(periph
, done_ccb
);
1128 * Generic Async Event handler. Peripheral drivers usually
1129 * filter out the events that require personal attention,
1130 * and leave the rest to this function.
1133 cam_periph_async(struct cam_periph
*periph
, u_int32_t code
,
1134 struct cam_path
*path
, void *arg
)
1137 case AC_LOST_DEVICE
:
1138 cam_periph_invalidate(periph
);
1143 cam_periph_bus_settle(periph
, scsi_delay
);
1152 cam_periph_bus_settle(struct cam_periph
*periph
, u_int bus_settle
)
1154 struct ccb_getdevstats cgds
;
1156 xpt_setup_ccb(&cgds
.ccb_h
, periph
->path
, /*priority*/1);
1157 cgds
.ccb_h
.func_code
= XPT_GDEV_STATS
;
1158 xpt_action((union ccb
*)&cgds
);
1159 cam_periph_freeze_after_event(periph
, &cgds
.last_reset
, bus_settle
);
1163 cam_periph_freeze_after_event(struct cam_periph
*periph
,
1164 struct timeval
* event_time
, u_int duration_ms
)
1166 struct timeval delta
;
1167 struct timeval duration_tv
;
1169 microuptime(&delta
);
1170 timevalsub(&delta
, event_time
);
1171 duration_tv
.tv_sec
= duration_ms
/ 1000;
1172 duration_tv
.tv_usec
= (duration_ms
% 1000) * 1000;
1173 if (timevalcmp(&delta
, &duration_tv
, <)) {
1174 timevalsub(&duration_tv
, &delta
);
1176 duration_ms
= duration_tv
.tv_sec
* 1000;
1177 duration_ms
+= duration_tv
.tv_usec
/ 1000;
1178 cam_freeze_devq(periph
->path
);
1179 cam_release_devq(periph
->path
,
1180 RELSIM_RELEASE_AFTER_TIMEOUT
,
1182 /*timeout*/duration_ms
,
1183 /*getcount_only*/0);
1189 camperiphscsistatuserror(union ccb
*ccb
, cam_flags camflags
,
1190 u_int32_t sense_flags
, union ccb
*save_ccb
,
1191 int *openings
, u_int32_t
*relsim_flags
,
1196 switch (ccb
->csio
.scsi_status
) {
1197 case SCSI_STATUS_OK
:
1198 case SCSI_STATUS_COND_MET
:
1199 case SCSI_STATUS_INTERMED
:
1200 case SCSI_STATUS_INTERMED_COND_MET
:
1203 case SCSI_STATUS_CMD_TERMINATED
:
1204 case SCSI_STATUS_CHECK_COND
:
1205 error
= camperiphscsisenseerror(ccb
,
1213 case SCSI_STATUS_QUEUE_FULL
:
1216 struct ccb_getdevstats cgds
;
1219 * First off, find out what the current
1220 * transaction counts are.
1222 xpt_setup_ccb(&cgds
.ccb_h
,
1225 cgds
.ccb_h
.func_code
= XPT_GDEV_STATS
;
1226 xpt_action((union ccb
*)&cgds
);
1229 * If we were the only transaction active, treat
1230 * the QUEUE FULL as if it were a BUSY condition.
1232 if (cgds
.dev_active
!= 0) {
1236 * Reduce the number of openings to
1237 * be 1 less than the amount it took
1238 * to get a queue full bounded by the
1239 * minimum allowed tag count for this
1242 total_openings
= cgds
.dev_active
+ cgds
.dev_openings
;
1243 *openings
= cgds
.dev_active
;
1244 if (*openings
< cgds
.mintags
)
1245 *openings
= cgds
.mintags
;
1246 if (*openings
< total_openings
)
1247 *relsim_flags
= RELSIM_ADJUST_OPENINGS
;
1250 * Some devices report queue full for
1251 * temporary resource shortages. For
1252 * this reason, we allow a minimum
1253 * tag count to be entered via a
1254 * quirk entry to prevent the queue
1255 * count on these devices from falling
1256 * to a pessimisticly low value. We
1257 * still wait for the next successful
1258 * completion, however, before queueing
1259 * more transactions to the device.
1261 *relsim_flags
= RELSIM_RELEASE_AFTER_CMDCMPLT
;
1266 xpt_print(ccb
->ccb_h
.path
, "Queue Full\n");
1272 case SCSI_STATUS_BUSY
:
1274 * Restart the queue after either another
1275 * command completes or a 1 second timeout.
1278 xpt_print(ccb
->ccb_h
.path
, "Device Busy\n");
1280 if (ccb
->ccb_h
.retry_count
> 0) {
1281 ccb
->ccb_h
.retry_count
--;
1283 *relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
1284 | RELSIM_RELEASE_AFTER_CMDCMPLT
;
1290 case SCSI_STATUS_RESERV_CONFLICT
:
1291 xpt_print(ccb
->ccb_h
.path
, "Reservation Conflict\n");
1295 xpt_print(ccb
->ccb_h
.path
, "SCSI Status 0x%x\n",
1296 ccb
->csio
.scsi_status
);
1304 camperiphscsisenseerror(union ccb
*ccb
, cam_flags camflags
,
1305 u_int32_t sense_flags
, union ccb
*save_ccb
,
1306 int *openings
, u_int32_t
*relsim_flags
,
1309 struct cam_periph
*periph
;
1312 periph
= xpt_path_periph(ccb
->ccb_h
.path
);
1313 if (periph
->flags
& CAM_PERIPH_RECOVERY_INPROG
) {
1316 * If error recovery is already in progress, don't attempt
1317 * to process this error, but requeue it unconditionally
1318 * and attempt to process it once error recovery has
1319 * completed. This failed command is probably related to
1320 * the error that caused the currently active error recovery
1321 * action so our current recovery efforts should also
1322 * address this command. Be aware that the error recovery
1323 * code assumes that only one recovery action is in progress
1324 * on a particular peripheral instance at any given time
1325 * (e.g. only one saved CCB for error recovery) so it is
1326 * imperitive that we don't violate this assumption.
1330 scsi_sense_action err_action
;
1331 struct ccb_getdev cgd
;
1332 const char *action_string
;
1333 union ccb
* print_ccb
;
1335 /* A description of the error recovery action performed */
1336 action_string
= NULL
;
1339 * The location of the orignal ccb
1340 * for sense printing purposes.
1345 * Grab the inquiry data for this device.
1347 xpt_setup_ccb(&cgd
.ccb_h
, ccb
->ccb_h
.path
, /*priority*/ 1);
1348 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
1349 xpt_action((union ccb
*)&cgd
);
1351 if ((ccb
->ccb_h
.status
& CAM_AUTOSNS_VALID
) != 0)
1352 err_action
= scsi_error_action(&ccb
->csio
,
1355 else if ((ccb
->ccb_h
.flags
& CAM_DIS_AUTOSENSE
) == 0)
1356 err_action
= SS_REQSENSE
;
1358 err_action
= SS_RETRY
|SSQ_DECREMENT_COUNT
|EIO
;
1360 error
= err_action
& SS_ERRMASK
;
1363 * If the recovery action will consume a retry,
1364 * make sure we actually have retries available.
1366 if ((err_action
& SSQ_DECREMENT_COUNT
) != 0) {
1367 if (ccb
->ccb_h
.retry_count
> 0)
1368 ccb
->ccb_h
.retry_count
--;
1370 action_string
= "Retries Exhausted";
1371 goto sense_error_done
;
1375 if ((err_action
& SS_MASK
) >= SS_START
) {
1377 * Do common portions of commands that
1378 * use recovery CCBs.
1380 if (save_ccb
== NULL
) {
1381 action_string
= "No recovery CCB supplied";
1382 goto sense_error_done
;
1384 bcopy(ccb
, save_ccb
, sizeof(*save_ccb
));
1385 print_ccb
= save_ccb
;
1386 periph
->flags
|= CAM_PERIPH_RECOVERY_INPROG
;
1389 switch (err_action
& SS_MASK
) {
1391 action_string
= "No Recovery Action Needed";
1395 action_string
= "Retrying Command (per Sense Data)";
1399 action_string
= "Unretryable error";
1406 * Send a start unit command to the device, and
1407 * then retry the command.
1409 action_string
= "Attempting to Start Unit";
1412 * Check for removable media and set
1413 * load/eject flag appropriately.
1415 if (SID_IS_REMOVABLE(&cgd
.inq_data
))
1420 scsi_start_stop(&ccb
->csio
,
1434 * Send a Test Unit Ready to the device.
1435 * If the 'many' flag is set, we send 120
1436 * test unit ready commands, one every half
1437 * second. Otherwise, we just send one TUR.
1438 * We only want to do this if the retry
1439 * count has not been exhausted.
1443 if ((err_action
& SSQ_MANY
) != 0) {
1444 action_string
= "Polling device for readiness";
1447 action_string
= "Testing device for readiness";
1450 scsi_test_unit_ready(&ccb
->csio
,
1458 * Accomplish our 500ms delay by deferring
1459 * the release of our device queue appropriately.
1461 *relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
;
1468 * Send a Request Sense to the device. We
1469 * assume that we are in a contingent allegiance
1470 * condition so we do not tag this request.
1472 scsi_request_sense(&ccb
->csio
, /*retries*/1,
1474 &save_ccb
->csio
.sense_data
,
1475 sizeof(save_ccb
->csio
.sense_data
),
1476 CAM_TAG_ACTION_NONE
,
1477 /*sense_len*/SSD_FULL_SIZE
,
1482 panic("Unhandled error action %x", err_action
);
1485 if ((err_action
& SS_MASK
) >= SS_START
) {
1487 * Drop the priority to 0 so that the recovery
1488 * CCB is the first to execute. Freeze the queue
1489 * after this command is sent so that we can
1490 * restore the old csio and have it queued in
1491 * the proper order before we release normal
1492 * transactions to the device.
1494 ccb
->ccb_h
.pinfo
.priority
= 0;
1495 ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
1496 ccb
->ccb_h
.saved_ccb_ptr
= save_ccb
;
1501 if ((err_action
& SSQ_PRINT_SENSE
) != 0
1502 && (ccb
->ccb_h
.status
& CAM_AUTOSNS_VALID
) != 0) {
1503 cam_error_print(print_ccb
, CAM_ESF_ALL
, CAM_EPF_ALL
);
1504 xpt_print_path(ccb
->ccb_h
.path
);
1506 scsi_sense_print(&print_ccb
->csio
);
1507 kprintf("%s\n", action_string
);
1514 * Generic error handler. Peripheral drivers usually filter
1515 * out the errors that they handle in a unique mannor, then
1516 * call this function.
1519 cam_periph_error(union ccb
*ccb
, cam_flags camflags
,
1520 u_int32_t sense_flags
, union ccb
*save_ccb
)
1522 const char *action_string
;
1525 int error
, printed
= 0;
1527 u_int32_t relsim_flags
;
1528 u_int32_t timeout
= 0;
1530 action_string
= NULL
;
1531 status
= ccb
->ccb_h
.status
;
1532 frozen
= (status
& CAM_DEV_QFRZN
) != 0;
1533 status
&= CAM_STATUS_MASK
;
1534 openings
= relsim_flags
= 0;
1540 case CAM_SCSI_STATUS_ERROR
:
1541 error
= camperiphscsistatuserror(ccb
,
1549 case CAM_AUTOSENSE_FAIL
:
1550 xpt_print(ccb
->ccb_h
.path
, "AutoSense Failed\n");
1551 error
= EIO
; /* we have to kill the command */
1553 case CAM_REQ_CMP_ERR
:
1554 if (bootverbose
&& printed
== 0) {
1555 xpt_print(ccb
->ccb_h
.path
,
1556 "Request completed with CAM_REQ_CMP_ERR\n");
1560 case CAM_CMD_TIMEOUT
:
1561 if (bootverbose
&& printed
== 0) {
1562 xpt_print(ccb
->ccb_h
.path
, "Command timed out\n");
1566 case CAM_UNEXP_BUSFREE
:
1567 if (bootverbose
&& printed
== 0) {
1568 xpt_print(ccb
->ccb_h
.path
, "Unexpected Bus Free\n");
1572 case CAM_UNCOR_PARITY
:
1573 if (bootverbose
&& printed
== 0) {
1574 xpt_print(ccb
->ccb_h
.path
,
1575 "Uncorrected Parity Error\n");
1579 case CAM_DATA_RUN_ERR
:
1580 if (bootverbose
&& printed
== 0) {
1581 xpt_print(ccb
->ccb_h
.path
, "Data Overrun\n");
1584 error
= EIO
; /* we have to kill the command */
1585 /* decrement the number of retries */
1586 if (ccb
->ccb_h
.retry_count
> 0) {
1587 ccb
->ccb_h
.retry_count
--;
1590 action_string
= "Retries Exhausted";
1596 case CAM_MSG_REJECT_REC
:
1597 /* XXX Don't know that these are correct */
1600 case CAM_SEL_TIMEOUT
:
1602 struct cam_path
*newpath
;
1604 if ((camflags
& CAM_RETRY_SELTO
) != 0) {
1605 if (ccb
->ccb_h
.retry_count
> 0) {
1607 ccb
->ccb_h
.retry_count
--;
1609 if (bootverbose
&& printed
== 0) {
1610 xpt_print(ccb
->ccb_h
.path
,
1611 "Selection Timeout\n");
1616 * Wait a bit to give the device
1617 * time to recover before we try again.
1619 relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
;
1620 timeout
= periph_selto_delay
;
1625 /* Should we do more if we can't create the path?? */
1626 if (xpt_create_path(&newpath
, xpt_path_periph(ccb
->ccb_h
.path
),
1627 xpt_path_path_id(ccb
->ccb_h
.path
),
1628 xpt_path_target_id(ccb
->ccb_h
.path
),
1629 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
)
1633 * Let peripheral drivers know that this device has gone
1636 xpt_async(AC_LOST_DEVICE
, newpath
, NULL
);
1637 xpt_free_path(newpath
);
1640 case CAM_REQ_INVALID
:
1641 case CAM_PATH_INVALID
:
1642 case CAM_DEV_NOT_THERE
:
1644 case CAM_PROVIDE_FAIL
:
1645 case CAM_REQ_TOO_BIG
:
1646 case CAM_LUN_INVALID
:
1647 case CAM_TID_INVALID
:
1650 case CAM_SCSI_BUS_RESET
:
1653 * Commands that repeatedly timeout and cause these
1654 * kinds of error recovery actions, should return
1655 * CAM_CMD_TIMEOUT, which allows us to safely assume
1656 * that this command was an innocent bystander to
1657 * these events and should be unconditionally
1660 if (bootverbose
&& printed
== 0) {
1661 xpt_print_path(ccb
->ccb_h
.path
);
1662 if (status
== CAM_BDR_SENT
)
1663 kprintf("Bus Device Reset sent\n");
1665 kprintf("Bus Reset issued\n");
1669 case CAM_REQUEUE_REQ
:
1670 /* Unconditional requeue */
1672 if (bootverbose
&& printed
== 0) {
1673 xpt_print(ccb
->ccb_h
.path
, "Request Requeued\n");
1677 case CAM_RESRC_UNAVAIL
:
1678 /* Wait a bit for the resource shortage to abate. */
1679 timeout
= periph_noresrc_delay
;
1683 /* Wait a bit for the busy condition to abate. */
1684 timeout
= periph_busy_delay
;
1686 relsim_flags
= RELSIM_RELEASE_AFTER_TIMEOUT
;
1689 /* decrement the number of retries */
1690 if (ccb
->ccb_h
.retry_count
> 0) {
1691 ccb
->ccb_h
.retry_count
--;
1693 if (bootverbose
&& printed
== 0) {
1694 xpt_print(ccb
->ccb_h
.path
, "CAM Status 0x%x\n",
1700 action_string
= "Retries Exhausted";
1705 /* Attempt a retry */
1706 if (error
== ERESTART
|| error
== 0) {
1708 ccb
->ccb_h
.status
&= ~CAM_DEV_QFRZN
;
1710 if (error
== ERESTART
) {
1711 action_string
= "Retrying Command";
1716 cam_release_devq(ccb
->ccb_h
.path
,
1720 /*getcount_only*/0);
1724 * If we have an error and are booting verbosely, whine
1725 * *unless* this was a non-retryable selection timeout.
1727 if (error
!= 0 && bootverbose
&&
1728 !(status
== CAM_SEL_TIMEOUT
&& (camflags
& CAM_RETRY_SELTO
) == 0)) {
1731 if (action_string
== NULL
)
1732 action_string
= "Unretryable Error";
1733 if (error
!= ERESTART
) {
1734 xpt_print(ccb
->ccb_h
.path
, "error %d\n", error
);
1736 xpt_print(ccb
->ccb_h
.path
, "%s\n", action_string
);