Sync CAM with FreeBSD using lockmgr locks instead of mutexes.
[dragonfly.git] / sys / bus / cam / cam_periph.c
blob57804fdbc1a1aa8e5839c3d13d8e45c4b7bec8eb
1 /*
2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * $FreeBSD: src/sys/cam/cam_periph.c,v 1.70 2008/02/12 11:07:33 raj Exp $
30 * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.40 2008/05/18 20:30:19 pavalos Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/buf.h>
40 #include <sys/proc.h>
41 #include <sys/devicestat.h>
42 #include <sys/bus.h>
43 #include <vm/vm.h>
44 #include <vm/vm_extern.h>
46 #include <sys/thread2.h>
48 #include "cam.h"
49 #include "cam_ccb.h"
50 #include "cam_xpt_periph.h"
51 #include "cam_periph.h"
52 #include "cam_debug.h"
53 #include "cam_sim.h"
55 #include <bus/cam/scsi/scsi_all.h>
56 #include <bus/cam/scsi/scsi_message.h>
57 #include <bus/cam/scsi/scsi_pass.h>
59 static u_int camperiphnextunit(struct periph_driver *p_drv,
60 u_int newunit, int wired,
61 path_id_t pathid, target_id_t target,
62 lun_id_t lun);
63 static u_int camperiphunit(struct periph_driver *p_drv,
64 path_id_t pathid, target_id_t target,
65 lun_id_t lun);
66 static void camperiphdone(struct cam_periph *periph,
67 union ccb *done_ccb);
68 static void camperiphfree(struct cam_periph *periph);
69 static int camperiphscsistatuserror(union ccb *ccb,
70 cam_flags camflags,
71 u_int32_t sense_flags,
72 union ccb *save_ccb,
73 int *openings,
74 u_int32_t *relsim_flags,
75 u_int32_t *timeout);
76 static int camperiphscsisenseerror(union ccb *ccb,
77 cam_flags camflags,
78 u_int32_t sense_flags,
79 union ccb *save_ccb,
80 int *openings,
81 u_int32_t *relsim_flags,
82 u_int32_t *timeout);
84 static int nperiph_drivers;
85 struct periph_driver **periph_drivers;
87 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
89 static int periph_selto_delay = 1000;
90 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
91 static int periph_noresrc_delay = 500;
92 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
93 static int periph_busy_delay = 500;
94 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
97 void
98 periphdriver_register(void *data)
100 struct periph_driver **newdrivers, **old;
101 int ndrivers;
103 ndrivers = nperiph_drivers + 2;
104 newdrivers = kmalloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
105 M_WAITOK);
106 if (periph_drivers)
107 bcopy(periph_drivers, newdrivers,
108 sizeof(*newdrivers) * nperiph_drivers);
109 newdrivers[nperiph_drivers] = (struct periph_driver *)data;
110 newdrivers[nperiph_drivers + 1] = NULL;
111 old = periph_drivers;
112 periph_drivers = newdrivers;
113 if (old)
114 kfree(old, M_CAMPERIPH);
115 nperiph_drivers++;
118 cam_status
119 cam_periph_alloc(periph_ctor_t *periph_ctor,
120 periph_oninv_t *periph_oninvalidate,
121 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
122 char *name, cam_periph_type type, struct cam_path *path,
123 ac_callback_t *ac_callback, ac_code code, void *arg)
125 struct periph_driver **p_drv;
126 struct cam_sim *sim;
127 struct cam_periph *periph;
128 struct cam_periph *cur_periph;
129 path_id_t path_id;
130 target_id_t target_id;
131 lun_id_t lun_id;
132 cam_status status;
133 u_int init_level;
135 init_level = 0;
137 * Handle Hot-Plug scenarios. If there is already a peripheral
138 * of our type assigned to this path, we are likely waiting for
139 * final close on an old, invalidated, peripheral. If this is
140 * the case, queue up a deferred call to the peripheral's async
141 * handler. If it looks like a mistaken re-allocation, complain.
143 if ((periph = cam_periph_find(path, name)) != NULL) {
145 if ((periph->flags & CAM_PERIPH_INVALID) != 0
146 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
147 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
148 periph->deferred_callback = ac_callback;
149 periph->deferred_ac = code;
150 return (CAM_REQ_INPROG);
151 } else {
152 kprintf("cam_periph_alloc: attempt to re-allocate "
153 "valid device %s%d rejected\n",
154 periph->periph_name, periph->unit_number);
156 return (CAM_REQ_INVALID);
159 periph = kmalloc(sizeof(*periph), M_CAMPERIPH, M_INTWAIT | M_ZERO);
161 init_level++;
163 xpt_lock_buses();
164 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
165 if (strcmp((*p_drv)->driver_name, name) == 0)
166 break;
168 xpt_unlock_buses();
170 sim = xpt_path_sim(path);
171 path_id = xpt_path_path_id(path);
172 target_id = xpt_path_target_id(path);
173 lun_id = xpt_path_lun_id(path);
174 cam_init_pinfo(&periph->pinfo);
175 periph->periph_start = periph_start;
176 periph->periph_dtor = periph_dtor;
177 periph->periph_oninval = periph_oninvalidate;
178 periph->type = type;
179 periph->periph_name = name;
180 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
181 periph->immediate_priority = CAM_PRIORITY_NONE;
182 periph->refcount = 0;
183 periph->sim = sim;
184 SLIST_INIT(&periph->ccb_list);
185 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
186 if (status != CAM_REQ_CMP)
187 goto failure;
189 periph->path = path;
190 init_level++;
192 status = xpt_add_periph(periph);
194 if (status != CAM_REQ_CMP)
195 goto failure;
197 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
198 while (cur_periph != NULL
199 && cur_periph->unit_number < periph->unit_number)
200 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
202 if (cur_periph != NULL)
203 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
204 else {
205 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
206 (*p_drv)->generation++;
209 init_level++;
211 status = periph_ctor(periph, arg);
213 if (status == CAM_REQ_CMP)
214 init_level++;
216 failure:
217 switch (init_level) {
218 case 4:
219 /* Initialized successfully */
220 break;
221 case 3:
222 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
223 xpt_remove_periph(periph);
224 /* FALLTHROUGH */
225 case 2:
226 xpt_free_path(periph->path);
227 /* FALLTHROUGH */
228 case 1:
229 kfree(periph, M_CAMPERIPH);
230 /* FALLTHROUGH */
231 case 0:
232 /* No cleanup to perform. */
233 break;
234 default:
235 panic("cam_periph_alloc: Unknown init level");
237 return(status);
241 * Find a peripheral structure with the specified path, target, lun,
242 * and (optionally) type. If the name is NULL, this function will return
243 * the first peripheral driver that matches the specified path.
245 struct cam_periph *
246 cam_periph_find(struct cam_path *path, char *name)
248 struct periph_driver **p_drv;
249 struct cam_periph *periph;
251 xpt_lock_buses();
252 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
253 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
254 continue;
256 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
257 if (xpt_path_comp(periph->path, path) == 0) {
258 xpt_unlock_buses();
259 return(periph);
262 if (name != NULL) {
263 xpt_unlock_buses();
264 return(NULL);
267 xpt_unlock_buses();
268 return(NULL);
271 cam_status
272 cam_periph_acquire(struct cam_periph *periph)
274 if (periph == NULL)
275 return(CAM_REQ_CMP_ERR);
277 xpt_lock_buses();
278 periph->refcount++;
279 xpt_unlock_buses();
281 return(CAM_REQ_CMP);
284 void
285 cam_periph_release(struct cam_periph *periph)
288 if (periph == NULL)
289 return;
291 xpt_lock_buses();
292 if ((--periph->refcount == 0)
293 && (periph->flags & CAM_PERIPH_INVALID)) {
294 camperiphfree(periph);
296 xpt_unlock_buses();
301 cam_periph_hold(struct cam_periph *periph, int flags)
303 int error;
305 sim_lock_assert_owned(periph->sim->lock);
308 * Increment the reference count on the peripheral
309 * while we wait for our lock attempt to succeed
310 * to ensure the peripheral doesn't disappear out
311 * from user us while we sleep.
314 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
315 return (ENXIO);
317 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
318 periph->flags |= CAM_PERIPH_LOCK_WANTED;
319 if ((error = sim_lock_sleep(periph, flags, "caplck", 0,
320 periph->sim->lock)) != 0) {
321 cam_periph_release(periph);
322 return (error);
326 periph->flags |= CAM_PERIPH_LOCKED;
327 return (0);
330 void
331 cam_periph_unhold(struct cam_periph *periph)
334 sim_lock_assert_owned(periph->sim->lock);
336 periph->flags &= ~CAM_PERIPH_LOCKED;
337 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
338 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
339 wakeup(periph);
342 cam_periph_release(periph);
346 * Look for the next unit number that is not currently in use for this
347 * peripheral type starting at "newunit". Also exclude unit numbers that
348 * are reserved by for future "hardwiring" unless we already know that this
349 * is a potential wired device. Only assume that the device is "wired" the
350 * first time through the loop since after that we'll be looking at unit
351 * numbers that did not match a wiring entry.
353 static u_int
354 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
355 path_id_t pathid, target_id_t target, lun_id_t lun)
357 struct cam_periph *periph;
358 char *periph_name, *strval;
359 int i, val, dunit;
360 const char *dname;
362 periph_name = p_drv->driver_name;
363 for (;;newunit++) {
365 for (periph = TAILQ_FIRST(&p_drv->units);
366 periph != NULL && periph->unit_number != newunit;
367 periph = TAILQ_NEXT(periph, unit_links))
370 if (periph != NULL && periph->unit_number == newunit) {
371 if (wired != 0) {
372 xpt_print(periph->path, "Duplicate Wired "
373 "Device entry!\n");
374 xpt_print(periph->path, "Second device (%s "
375 "device at scbus%d target %d lun %d) will "
376 "not be wired\n", periph_name, pathid,
377 target, lun);
378 wired = 0;
380 continue;
382 if (wired)
383 break;
386 * Don't match entries like "da 4" as a wired down
387 * device, but do match entries like "da 4 target 5"
388 * or even "da 4 scbus 1".
390 i = -1;
391 while ((i = resource_locate(i, periph_name)) != -1) {
392 dname = resource_query_name(i);
393 dunit = resource_query_unit(i);
394 /* if no "target" and no specific scbus, skip */
395 if (resource_int_value(dname, dunit, "target", &val) &&
396 (resource_string_value(dname, dunit, "at",&strval)||
397 strcmp(strval, "scbus") == 0))
398 continue;
399 if (newunit == dunit)
400 break;
402 if (i == -1)
403 break;
405 return (newunit);
408 static u_int
409 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
410 target_id_t target, lun_id_t lun)
412 u_int unit;
413 int hit, i, val, dunit;
414 const char *dname;
415 char pathbuf[32], *strval, *periph_name;
417 unit = 0;
419 periph_name = p_drv->driver_name;
420 ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
421 i = -1;
422 for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) {
423 dname = resource_query_name(i);
424 dunit = resource_query_unit(i);
425 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
426 if (strcmp(strval, pathbuf) != 0)
427 continue;
428 hit++;
430 if (resource_int_value(dname, dunit, "target", &val) == 0) {
431 if (val != target)
432 continue;
433 hit++;
435 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
436 if (val != lun)
437 continue;
438 hit++;
440 if (hit != 0) {
441 unit = dunit;
442 break;
447 * Either start from 0 looking for the next unit or from
448 * the unit number given in the resource config. This way,
449 * if we have wildcard matches, we don't return the same
450 * unit number twice.
452 unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
453 target, lun);
455 return (unit);
458 void
459 cam_periph_invalidate(struct cam_periph *periph)
462 * We only call this routine the first time a peripheral is
463 * invalidated.
465 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
466 && (periph->periph_oninval != NULL))
467 periph->periph_oninval(periph);
469 periph->flags |= CAM_PERIPH_INVALID;
470 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
472 xpt_lock_buses();
473 if (periph->refcount == 0)
474 camperiphfree(periph);
475 else if (periph->refcount < 0)
476 kprintf("cam_invalidate_periph: refcount < 0!!\n");
477 xpt_unlock_buses();
480 static void
481 camperiphfree(struct cam_periph *periph)
483 struct periph_driver **p_drv;
485 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
486 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
487 break;
490 if (*p_drv == NULL) {
491 kprintf("camperiphfree: attempt to free non-existent periph\n");
492 return;
495 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
496 (*p_drv)->generation++;
497 xpt_unlock_buses();
499 if (periph->periph_dtor != NULL)
500 periph->periph_dtor(periph);
501 xpt_remove_periph(periph);
503 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
504 union ccb ccb;
505 void *arg;
507 switch (periph->deferred_ac) {
508 case AC_FOUND_DEVICE:
509 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
510 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
511 xpt_action(&ccb);
512 arg = &ccb;
513 break;
514 case AC_PATH_REGISTERED:
515 ccb.ccb_h.func_code = XPT_PATH_INQ;
516 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
517 xpt_action(&ccb);
518 arg = &ccb;
519 break;
520 default:
521 arg = NULL;
522 break;
524 periph->deferred_callback(NULL, periph->deferred_ac,
525 periph->path, arg);
527 xpt_free_path(periph->path);
528 kfree(periph, M_CAMPERIPH);
529 xpt_lock_buses();
533 * Map user virtual pointers into kernel virtual address space, so we can
534 * access the memory. This won't work on physical pointers, for now it's
535 * up to the caller to check for that. (XXX KDM -- should we do that here
536 * instead?) This also only works for up to MAXPHYS memory. Since we use
537 * buffers to map stuff in and out, we're limited to the buffer size.
540 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
542 int numbufs, i, j;
543 buf_cmd_t cmd[CAM_PERIPH_MAXMAPS];
544 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
545 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
546 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
548 switch(ccb->ccb_h.func_code) {
549 case XPT_DEV_MATCH:
550 if (ccb->cdm.match_buf_len == 0) {
551 kprintf("cam_periph_mapmem: invalid match buffer "
552 "length 0\n");
553 return(EINVAL);
555 if (ccb->cdm.pattern_buf_len > 0) {
556 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
557 lengths[0] = ccb->cdm.pattern_buf_len;
558 dirs[0] = CAM_DIR_OUT;
559 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
560 lengths[1] = ccb->cdm.match_buf_len;
561 dirs[1] = CAM_DIR_IN;
562 numbufs = 2;
563 } else {
564 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
565 lengths[0] = ccb->cdm.match_buf_len;
566 dirs[0] = CAM_DIR_IN;
567 numbufs = 1;
569 break;
570 case XPT_SCSI_IO:
571 case XPT_CONT_TARGET_IO:
572 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
573 return(0);
575 data_ptrs[0] = &ccb->csio.data_ptr;
576 lengths[0] = ccb->csio.dxfer_len;
577 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
578 numbufs = 1;
579 break;
580 default:
581 return(EINVAL);
582 break; /* NOTREACHED */
586 * Check the transfer length and permissions first, so we don't
587 * have to unmap any previously mapped buffers.
589 for (i = 0; i < numbufs; i++) {
591 * Its kinda bogus, we need a R+W command. For now the
592 * buffer needs some sort of command. Use BUF_CMD_WRITE
593 * to indicate a write and BUF_CMD_READ to indicate R+W.
595 cmd[i] = BUF_CMD_WRITE;
598 * The userland data pointer passed in may not be page
599 * aligned. vmapbuf() truncates the address to a page
600 * boundary, so if the address isn't page aligned, we'll
601 * need enough space for the given transfer length, plus
602 * whatever extra space is necessary to make it to the page
603 * boundary.
605 if ((lengths[i] +
606 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
607 kprintf("cam_periph_mapmem: attempt to map %lu bytes, "
608 "which is greater than DFLTPHYS(%d)\n",
609 (long)(lengths[i] +
610 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
611 DFLTPHYS);
612 return(E2BIG);
615 if (dirs[i] & CAM_DIR_OUT) {
616 if (!useracc(*data_ptrs[i], lengths[i],
617 VM_PROT_READ)) {
618 kprintf("cam_periph_mapmem: error, "
619 "address %p, length %lu isn't "
620 "user accessible for READ\n",
621 (void *)*data_ptrs[i],
622 (u_long)lengths[i]);
623 return(EACCES);
627 if (dirs[i] & CAM_DIR_IN) {
628 cmd[i] = BUF_CMD_READ;
629 if (!useracc(*data_ptrs[i], lengths[i],
630 VM_PROT_WRITE)) {
631 kprintf("cam_periph_mapmem: error, "
632 "address %p, length %lu isn't "
633 "user accessible for WRITE\n",
634 (void *)*data_ptrs[i],
635 (u_long)lengths[i]);
637 return(EACCES);
643 for (i = 0; i < numbufs; i++) {
645 * Get the buffer.
647 mapinfo->bp[i] = getpbuf(NULL);
649 /* save the original user pointer */
650 mapinfo->saved_ptrs[i] = *data_ptrs[i];
652 /* set the flags */
653 mapinfo->bp[i]->b_cmd = cmd[i];
655 /* map the user buffer into kernel memory */
656 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i]) < 0) {
657 kprintf("cam_periph_mapmem: error, "
658 "address %p, length %lu isn't "
659 "user accessible any more\n",
660 (void *)*data_ptrs[i],
661 (u_long)lengths[i]);
662 for (j = 0; j < i; ++j) {
663 *data_ptrs[j] = mapinfo->saved_ptrs[j];
664 vunmapbuf(mapinfo->bp[j]);
665 relpbuf(mapinfo->bp[j], NULL);
667 mapinfo->num_bufs_used -= i;
668 return(EACCES);
671 /* set our pointer to the new mapped area */
672 *data_ptrs[i] = mapinfo->bp[i]->b_data;
674 mapinfo->num_bufs_used++;
677 return(0);
681 * Unmap memory segments mapped into kernel virtual address space by
682 * cam_periph_mapmem().
684 void
685 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
687 int numbufs, i;
688 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
690 if (mapinfo->num_bufs_used <= 0) {
691 /* allow ourselves to be swapped once again */
692 return;
695 switch (ccb->ccb_h.func_code) {
696 case XPT_DEV_MATCH:
697 numbufs = min(mapinfo->num_bufs_used, 2);
699 if (numbufs == 1) {
700 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
701 } else {
702 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
703 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
705 break;
706 case XPT_SCSI_IO:
707 case XPT_CONT_TARGET_IO:
708 data_ptrs[0] = &ccb->csio.data_ptr;
709 numbufs = min(mapinfo->num_bufs_used, 1);
710 break;
711 default:
712 /* allow ourselves to be swapped once again */
713 return;
714 break; /* NOTREACHED */
717 for (i = 0; i < numbufs; i++) {
718 /* Set the user's pointer back to the original value */
719 *data_ptrs[i] = mapinfo->saved_ptrs[i];
721 /* unmap the buffer */
722 vunmapbuf(mapinfo->bp[i]);
724 /* release the buffer */
725 relpbuf(mapinfo->bp[i], NULL);
728 /* allow ourselves to be swapped once again */
731 union ccb *
732 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
734 struct ccb_hdr *ccb_h;
736 sim_lock_assert_owned(periph->sim->lock);
737 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
739 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
740 if (periph->immediate_priority > priority)
741 periph->immediate_priority = priority;
742 xpt_schedule(periph, priority);
743 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
744 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
745 break;
746 sim_lock_sleep(&periph->ccb_list, 0, "cgticb", 0,
747 periph->sim->lock);
750 ccb_h = SLIST_FIRST(&periph->ccb_list);
751 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
752 return ((union ccb *)ccb_h);
755 void
756 cam_periph_ccbwait(union ccb *ccb)
758 struct cam_sim *sim;
760 sim = xpt_path_sim(ccb->ccb_h.path);
761 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
762 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
763 sim_lock_sleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0, sim->lock);
767 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
768 int (*error_routine)(union ccb *ccb,
769 cam_flags camflags,
770 u_int32_t sense_flags))
772 union ccb *ccb;
773 int error;
774 int found;
776 error = found = 0;
778 switch(cmd){
779 case CAMGETPASSTHRU:
780 ccb = cam_periph_getccb(periph, /* priority */ 1);
781 xpt_setup_ccb(&ccb->ccb_h,
782 ccb->ccb_h.path,
783 /*priority*/1);
784 ccb->ccb_h.func_code = XPT_GDEVLIST;
787 * Basically, the point of this is that we go through
788 * getting the list of devices, until we find a passthrough
789 * device. In the current version of the CAM code, the
790 * only way to determine what type of device we're dealing
791 * with is by its name.
793 while (found == 0) {
794 ccb->cgdl.index = 0;
795 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
796 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
798 /* we want the next device in the list */
799 xpt_action(ccb);
800 if (strncmp(ccb->cgdl.periph_name,
801 "pass", 4) == 0){
802 found = 1;
803 break;
806 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
807 (found == 0)) {
808 ccb->cgdl.periph_name[0] = '\0';
809 ccb->cgdl.unit_number = 0;
810 break;
814 /* copy the result back out */
815 bcopy(ccb, addr, sizeof(union ccb));
817 /* and release the ccb */
818 xpt_release_ccb(ccb);
820 break;
821 default:
822 error = ENOTTY;
823 break;
825 return(error);
829 cam_periph_runccb(union ccb *ccb,
830 int (*error_routine)(union ccb *ccb,
831 cam_flags camflags,
832 u_int32_t sense_flags),
833 cam_flags camflags, u_int32_t sense_flags,
834 struct devstat *ds)
836 struct cam_sim *sim;
837 int error;
839 error = 0;
840 sim = xpt_path_sim(ccb->ccb_h.path);
841 sim_lock_assert_owned(sim->lock);
844 * If the user has supplied a stats structure, and if we understand
845 * this particular type of ccb, record the transaction start.
847 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
848 devstat_start_transaction(ds);
850 xpt_action(ccb);
852 do {
853 cam_periph_ccbwait(ccb);
854 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
855 error = 0;
856 else if (error_routine != NULL)
857 error = (*error_routine)(ccb, camflags, sense_flags);
858 else
859 error = 0;
861 } while (error == ERESTART);
863 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
864 cam_release_devq(ccb->ccb_h.path,
865 /* relsim_flags */0,
866 /* openings */0,
867 /* timeout */0,
868 /* getcount_only */ FALSE);
870 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
871 devstat_end_transaction(ds,
872 ccb->csio.dxfer_len,
873 ccb->csio.tag_action & 0xf,
874 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
875 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
876 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
877 DEVSTAT_WRITE :
878 DEVSTAT_READ);
880 return(error);
883 void
884 cam_freeze_devq(struct cam_path *path)
886 struct ccb_hdr ccb_h;
888 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
889 ccb_h.func_code = XPT_NOOP;
890 ccb_h.flags = CAM_DEV_QFREEZE;
891 xpt_action((union ccb *)&ccb_h);
894 u_int32_t
895 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
896 u_int32_t openings, u_int32_t timeout,
897 int getcount_only)
899 struct ccb_relsim crs;
901 xpt_setup_ccb(&crs.ccb_h, path,
902 /*priority*/1);
903 crs.ccb_h.func_code = XPT_REL_SIMQ;
904 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
905 crs.release_flags = relsim_flags;
906 crs.openings = openings;
907 crs.release_timeout = timeout;
908 xpt_action((union ccb *)&crs);
909 return (crs.qfrozen_cnt);
912 #define saved_ccb_ptr ppriv_ptr0
913 static void
914 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
916 union ccb *saved_ccb;
917 cam_status status;
918 int frozen;
919 int sense;
920 struct scsi_start_stop_unit *scsi_cmd;
921 u_int32_t relsim_flags, timeout;
922 u_int32_t qfrozen_cnt;
923 int xpt_done_ccb;
925 xpt_done_ccb = FALSE;
926 status = done_ccb->ccb_h.status;
927 frozen = (status & CAM_DEV_QFRZN) != 0;
928 sense = (status & CAM_AUTOSNS_VALID) != 0;
929 status &= CAM_STATUS_MASK;
931 timeout = 0;
932 relsim_flags = 0;
933 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
936 * Unfreeze the queue once if it is already frozen..
938 if (frozen != 0) {
939 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
940 /*relsim_flags*/0,
941 /*openings*/0,
942 /*timeout*/0,
943 /*getcount_only*/0);
946 switch (status) {
947 case CAM_REQ_CMP:
950 * If we have successfully taken a device from the not
951 * ready to ready state, re-scan the device and re-get
952 * the inquiry information. Many devices (mostly disks)
953 * don't properly report their inquiry information unless
954 * they are spun up.
956 * If we manually retrieved sense into a CCB and got
957 * something other than "NO SENSE" send the updated CCB
958 * back to the client via xpt_done() to be processed via
959 * the error recovery code again.
961 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
962 scsi_cmd = (struct scsi_start_stop_unit *)
963 &done_ccb->csio.cdb_io.cdb_bytes;
965 if (scsi_cmd->opcode == START_STOP_UNIT)
966 xpt_async(AC_INQ_CHANGED,
967 done_ccb->ccb_h.path, NULL);
968 if (scsi_cmd->opcode == REQUEST_SENSE) {
969 u_int sense_key;
971 sense_key = saved_ccb->csio.sense_data.flags;
972 sense_key &= SSD_KEY;
973 if (sense_key != SSD_KEY_NO_SENSE) {
974 saved_ccb->ccb_h.status |=
975 CAM_AUTOSNS_VALID;
976 #if 0
977 xpt_print(saved_ccb->ccb_h.path,
978 "Recovered Sense\n");
979 scsi_sense_print(&saved_ccb->csio);
980 cam_error_print(saved_ccb, CAM_ESF_ALL,
981 CAM_EPF_ALL);
982 #endif
983 xpt_done_ccb = TRUE;
987 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
988 sizeof(union ccb));
990 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
992 if (xpt_done_ccb == FALSE)
993 xpt_action(done_ccb);
995 break;
997 case CAM_SCSI_STATUS_ERROR:
998 scsi_cmd = (struct scsi_start_stop_unit *)
999 &done_ccb->csio.cdb_io.cdb_bytes;
1000 if (sense != 0) {
1001 struct ccb_getdev cgd;
1002 struct scsi_sense_data *sense;
1003 int error_code, sense_key, asc, ascq;
1004 scsi_sense_action err_action;
1006 sense = &done_ccb->csio.sense_data;
1007 scsi_extract_sense(sense, &error_code,
1008 &sense_key, &asc, &ascq);
1011 * Grab the inquiry data for this device.
1013 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1014 /*priority*/ 1);
1015 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1016 xpt_action((union ccb *)&cgd);
1017 err_action = scsi_error_action(&done_ccb->csio,
1018 &cgd.inq_data, 0);
1021 * If the error is "invalid field in CDB",
1022 * and the load/eject flag is set, turn the
1023 * flag off and try again. This is just in
1024 * case the drive in question barfs on the
1025 * load eject flag. The CAM code should set
1026 * the load/eject flag by default for
1027 * removable media.
1030 /* XXX KDM
1031 * Should we check to see what the specific
1032 * scsi status is?? Or does it not matter
1033 * since we already know that there was an
1034 * error, and we know what the specific
1035 * error code was, and we know what the
1036 * opcode is..
1038 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1039 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1040 (asc == 0x24) && (ascq == 0x00) &&
1041 (done_ccb->ccb_h.retry_count > 0)) {
1043 scsi_cmd->how &= ~SSS_LOEJ;
1045 xpt_action(done_ccb);
1047 } else if ((done_ccb->ccb_h.retry_count > 1)
1048 && ((err_action & SS_MASK) != SS_FAIL)) {
1051 * In this case, the error recovery
1052 * command failed, but we've got
1053 * some retries left on it. Give
1054 * it another try unless this is an
1055 * unretryable error.
1058 /* set the timeout to .5 sec */
1059 relsim_flags =
1060 RELSIM_RELEASE_AFTER_TIMEOUT;
1061 timeout = 500;
1063 xpt_action(done_ccb);
1065 break;
1067 } else {
1069 * Perform the final retry with the original
1070 * CCB so that final error processing is
1071 * performed by the owner of the CCB.
1073 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1074 done_ccb, sizeof(union ccb));
1076 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1078 xpt_action(done_ccb);
1080 } else {
1082 * Eh?? The command failed, but we don't
1083 * have any sense. What's up with that?
1084 * Fire the CCB again to return it to the
1085 * caller.
1087 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1088 done_ccb, sizeof(union ccb));
1090 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1092 xpt_action(done_ccb);
1095 break;
1096 default:
1097 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1098 sizeof(union ccb));
1100 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1102 xpt_action(done_ccb);
1104 break;
1107 /* decrement the retry count */
1109 * XXX This isn't appropriate in all cases. Restructure,
1110 * so that the retry count is only decremented on an
1111 * actual retry. Remeber that the orignal ccb had its
1112 * retry count dropped before entering recovery, so
1113 * doing it again is a bug.
1115 if (done_ccb->ccb_h.retry_count > 0)
1116 done_ccb->ccb_h.retry_count--;
1118 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1119 /*relsim_flags*/relsim_flags,
1120 /*openings*/0,
1121 /*timeout*/timeout,
1122 /*getcount_only*/0);
1123 if (xpt_done_ccb == TRUE)
1124 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1128 * Generic Async Event handler. Peripheral drivers usually
1129 * filter out the events that require personal attention,
1130 * and leave the rest to this function.
1132 void
1133 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1134 struct cam_path *path, void *arg)
1136 switch (code) {
1137 case AC_LOST_DEVICE:
1138 cam_periph_invalidate(periph);
1139 break;
1140 case AC_SENT_BDR:
1141 case AC_BUS_RESET:
1143 cam_periph_bus_settle(periph, scsi_delay);
1144 break;
1146 default:
1147 break;
1151 void
1152 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1154 struct ccb_getdevstats cgds;
1156 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1157 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1158 xpt_action((union ccb *)&cgds);
1159 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1162 void
1163 cam_periph_freeze_after_event(struct cam_periph *periph,
1164 struct timeval* event_time, u_int duration_ms)
1166 struct timeval delta;
1167 struct timeval duration_tv;
1169 microuptime(&delta);
1170 timevalsub(&delta, event_time);
1171 duration_tv.tv_sec = duration_ms / 1000;
1172 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1173 if (timevalcmp(&delta, &duration_tv, <)) {
1174 timevalsub(&duration_tv, &delta);
1176 duration_ms = duration_tv.tv_sec * 1000;
1177 duration_ms += duration_tv.tv_usec / 1000;
1178 cam_freeze_devq(periph->path);
1179 cam_release_devq(periph->path,
1180 RELSIM_RELEASE_AFTER_TIMEOUT,
1181 /*reduction*/0,
1182 /*timeout*/duration_ms,
1183 /*getcount_only*/0);
1188 static int
1189 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1190 u_int32_t sense_flags, union ccb *save_ccb,
1191 int *openings, u_int32_t *relsim_flags,
1192 u_int32_t *timeout)
1194 int error;
1196 switch (ccb->csio.scsi_status) {
1197 case SCSI_STATUS_OK:
1198 case SCSI_STATUS_COND_MET:
1199 case SCSI_STATUS_INTERMED:
1200 case SCSI_STATUS_INTERMED_COND_MET:
1201 error = 0;
1202 break;
1203 case SCSI_STATUS_CMD_TERMINATED:
1204 case SCSI_STATUS_CHECK_COND:
1205 error = camperiphscsisenseerror(ccb,
1206 camflags,
1207 sense_flags,
1208 save_ccb,
1209 openings,
1210 relsim_flags,
1211 timeout);
1212 break;
1213 case SCSI_STATUS_QUEUE_FULL:
1215 /* no decrement */
1216 struct ccb_getdevstats cgds;
1219 * First off, find out what the current
1220 * transaction counts are.
1222 xpt_setup_ccb(&cgds.ccb_h,
1223 ccb->ccb_h.path,
1224 /*priority*/1);
1225 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1226 xpt_action((union ccb *)&cgds);
1229 * If we were the only transaction active, treat
1230 * the QUEUE FULL as if it were a BUSY condition.
1232 if (cgds.dev_active != 0) {
1233 int total_openings;
1236 * Reduce the number of openings to
1237 * be 1 less than the amount it took
1238 * to get a queue full bounded by the
1239 * minimum allowed tag count for this
1240 * device.
1242 total_openings = cgds.dev_active + cgds.dev_openings;
1243 *openings = cgds.dev_active;
1244 if (*openings < cgds.mintags)
1245 *openings = cgds.mintags;
1246 if (*openings < total_openings)
1247 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1248 else {
1250 * Some devices report queue full for
1251 * temporary resource shortages. For
1252 * this reason, we allow a minimum
1253 * tag count to be entered via a
1254 * quirk entry to prevent the queue
1255 * count on these devices from falling
1256 * to a pessimisticly low value. We
1257 * still wait for the next successful
1258 * completion, however, before queueing
1259 * more transactions to the device.
1261 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1263 *timeout = 0;
1264 error = ERESTART;
1265 if (bootverbose) {
1266 xpt_print(ccb->ccb_h.path, "Queue Full\n");
1268 break;
1270 /* FALLTHROUGH */
1272 case SCSI_STATUS_BUSY:
1274 * Restart the queue after either another
1275 * command completes or a 1 second timeout.
1277 if (bootverbose) {
1278 xpt_print(ccb->ccb_h.path, "Device Busy\n");
1280 if (ccb->ccb_h.retry_count > 0) {
1281 ccb->ccb_h.retry_count--;
1282 error = ERESTART;
1283 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1284 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1285 *timeout = 1000;
1286 } else {
1287 error = EIO;
1289 break;
1290 case SCSI_STATUS_RESERV_CONFLICT:
1291 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n");
1292 error = EIO;
1293 break;
1294 default:
1295 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n",
1296 ccb->csio.scsi_status);
1297 error = EIO;
1298 break;
1300 return (error);
1303 static int
1304 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1305 u_int32_t sense_flags, union ccb *save_ccb,
1306 int *openings, u_int32_t *relsim_flags,
1307 u_int32_t *timeout)
1309 struct cam_periph *periph;
1310 int error;
1312 periph = xpt_path_periph(ccb->ccb_h.path);
1313 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) {
1316 * If error recovery is already in progress, don't attempt
1317 * to process this error, but requeue it unconditionally
1318 * and attempt to process it once error recovery has
1319 * completed. This failed command is probably related to
1320 * the error that caused the currently active error recovery
1321 * action so our current recovery efforts should also
1322 * address this command. Be aware that the error recovery
1323 * code assumes that only one recovery action is in progress
1324 * on a particular peripheral instance at any given time
1325 * (e.g. only one saved CCB for error recovery) so it is
1326 * imperitive that we don't violate this assumption.
1328 error = ERESTART;
1329 } else {
1330 scsi_sense_action err_action;
1331 struct ccb_getdev cgd;
1332 const char *action_string;
1333 union ccb* print_ccb;
1335 /* A description of the error recovery action performed */
1336 action_string = NULL;
1339 * The location of the orignal ccb
1340 * for sense printing purposes.
1342 print_ccb = ccb;
1345 * Grab the inquiry data for this device.
1347 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1);
1348 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1349 xpt_action((union ccb *)&cgd);
1351 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1352 err_action = scsi_error_action(&ccb->csio,
1353 &cgd.inq_data,
1354 sense_flags);
1355 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1356 err_action = SS_REQSENSE;
1357 else
1358 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1360 error = err_action & SS_ERRMASK;
1363 * If the recovery action will consume a retry,
1364 * make sure we actually have retries available.
1366 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1367 if (ccb->ccb_h.retry_count > 0)
1368 ccb->ccb_h.retry_count--;
1369 else {
1370 action_string = "Retries Exhausted";
1371 goto sense_error_done;
1375 if ((err_action & SS_MASK) >= SS_START) {
1377 * Do common portions of commands that
1378 * use recovery CCBs.
1380 if (save_ccb == NULL) {
1381 action_string = "No recovery CCB supplied";
1382 goto sense_error_done;
1384 bcopy(ccb, save_ccb, sizeof(*save_ccb));
1385 print_ccb = save_ccb;
1386 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1389 switch (err_action & SS_MASK) {
1390 case SS_NOP:
1391 action_string = "No Recovery Action Needed";
1392 error = 0;
1393 break;
1394 case SS_RETRY:
1395 action_string = "Retrying Command (per Sense Data)";
1396 error = ERESTART;
1397 break;
1398 case SS_FAIL:
1399 action_string = "Unretryable error";
1400 break;
1401 case SS_START:
1403 int le;
1406 * Send a start unit command to the device, and
1407 * then retry the command.
1409 action_string = "Attempting to Start Unit";
1412 * Check for removable media and set
1413 * load/eject flag appropriately.
1415 if (SID_IS_REMOVABLE(&cgd.inq_data))
1416 le = TRUE;
1417 else
1418 le = FALSE;
1420 scsi_start_stop(&ccb->csio,
1421 /*retries*/1,
1422 camperiphdone,
1423 MSG_SIMPLE_Q_TAG,
1424 /*start*/TRUE,
1425 /*load/eject*/le,
1426 /*immediate*/FALSE,
1427 SSD_FULL_SIZE,
1428 /*timeout*/50000);
1429 break;
1431 case SS_TUR:
1434 * Send a Test Unit Ready to the device.
1435 * If the 'many' flag is set, we send 120
1436 * test unit ready commands, one every half
1437 * second. Otherwise, we just send one TUR.
1438 * We only want to do this if the retry
1439 * count has not been exhausted.
1441 int retries;
1443 if ((err_action & SSQ_MANY) != 0) {
1444 action_string = "Polling device for readiness";
1445 retries = 120;
1446 } else {
1447 action_string = "Testing device for readiness";
1448 retries = 1;
1450 scsi_test_unit_ready(&ccb->csio,
1451 retries,
1452 camperiphdone,
1453 MSG_SIMPLE_Q_TAG,
1454 SSD_FULL_SIZE,
1455 /*timeout*/5000);
1458 * Accomplish our 500ms delay by deferring
1459 * the release of our device queue appropriately.
1461 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1462 *timeout = 500;
1463 break;
1465 case SS_REQSENSE:
1468 * Send a Request Sense to the device. We
1469 * assume that we are in a contingent allegiance
1470 * condition so we do not tag this request.
1472 scsi_request_sense(&ccb->csio, /*retries*/1,
1473 camperiphdone,
1474 &save_ccb->csio.sense_data,
1475 sizeof(save_ccb->csio.sense_data),
1476 CAM_TAG_ACTION_NONE,
1477 /*sense_len*/SSD_FULL_SIZE,
1478 /*timeout*/5000);
1479 break;
1481 default:
1482 panic("Unhandled error action %x", err_action);
1485 if ((err_action & SS_MASK) >= SS_START) {
1487 * Drop the priority to 0 so that the recovery
1488 * CCB is the first to execute. Freeze the queue
1489 * after this command is sent so that we can
1490 * restore the old csio and have it queued in
1491 * the proper order before we release normal
1492 * transactions to the device.
1494 ccb->ccb_h.pinfo.priority = 0;
1495 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1496 ccb->ccb_h.saved_ccb_ptr = save_ccb;
1497 error = ERESTART;
1500 sense_error_done:
1501 if ((err_action & SSQ_PRINT_SENSE) != 0
1502 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) {
1503 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1504 xpt_print_path(ccb->ccb_h.path);
1505 if (bootverbose)
1506 scsi_sense_print(&print_ccb->csio);
1507 kprintf("%s\n", action_string);
1510 return (error);
1514 * Generic error handler. Peripheral drivers usually filter
1515 * out the errors that they handle in a unique mannor, then
1516 * call this function.
1519 cam_periph_error(union ccb *ccb, cam_flags camflags,
1520 u_int32_t sense_flags, union ccb *save_ccb)
1522 const char *action_string;
1523 cam_status status;
1524 int frozen;
1525 int error, printed = 0;
1526 int openings;
1527 u_int32_t relsim_flags;
1528 u_int32_t timeout = 0;
1530 action_string = NULL;
1531 status = ccb->ccb_h.status;
1532 frozen = (status & CAM_DEV_QFRZN) != 0;
1533 status &= CAM_STATUS_MASK;
1534 openings = relsim_flags = 0;
1536 switch (status) {
1537 case CAM_REQ_CMP:
1538 error = 0;
1539 break;
1540 case CAM_SCSI_STATUS_ERROR:
1541 error = camperiphscsistatuserror(ccb,
1542 camflags,
1543 sense_flags,
1544 save_ccb,
1545 &openings,
1546 &relsim_flags,
1547 &timeout);
1548 break;
1549 case CAM_AUTOSENSE_FAIL:
1550 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n");
1551 error = EIO; /* we have to kill the command */
1552 break;
1553 case CAM_REQ_CMP_ERR:
1554 if (bootverbose && printed == 0) {
1555 xpt_print(ccb->ccb_h.path,
1556 "Request completed with CAM_REQ_CMP_ERR\n");
1557 printed++;
1559 /* FALLTHROUGH */
1560 case CAM_CMD_TIMEOUT:
1561 if (bootverbose && printed == 0) {
1562 xpt_print(ccb->ccb_h.path, "Command timed out\n");
1563 printed++;
1565 /* FALLTHROUGH */
1566 case CAM_UNEXP_BUSFREE:
1567 if (bootverbose && printed == 0) {
1568 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n");
1569 printed++;
1571 /* FALLTHROUGH */
1572 case CAM_UNCOR_PARITY:
1573 if (bootverbose && printed == 0) {
1574 xpt_print(ccb->ccb_h.path,
1575 "Uncorrected Parity Error\n");
1576 printed++;
1578 /* FALLTHROUGH */
1579 case CAM_DATA_RUN_ERR:
1580 if (bootverbose && printed == 0) {
1581 xpt_print(ccb->ccb_h.path, "Data Overrun\n");
1582 printed++;
1584 error = EIO; /* we have to kill the command */
1585 /* decrement the number of retries */
1586 if (ccb->ccb_h.retry_count > 0) {
1587 ccb->ccb_h.retry_count--;
1588 error = ERESTART;
1589 } else {
1590 action_string = "Retries Exhausted";
1591 error = EIO;
1593 break;
1594 case CAM_UA_ABORT:
1595 case CAM_UA_TERMIO:
1596 case CAM_MSG_REJECT_REC:
1597 /* XXX Don't know that these are correct */
1598 error = EIO;
1599 break;
1600 case CAM_SEL_TIMEOUT:
1602 struct cam_path *newpath;
1604 if ((camflags & CAM_RETRY_SELTO) != 0) {
1605 if (ccb->ccb_h.retry_count > 0) {
1607 ccb->ccb_h.retry_count--;
1608 error = ERESTART;
1609 if (bootverbose && printed == 0) {
1610 xpt_print(ccb->ccb_h.path,
1611 "Selection Timeout\n");
1612 printed++;
1616 * Wait a bit to give the device
1617 * time to recover before we try again.
1619 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1620 timeout = periph_selto_delay;
1621 break;
1624 error = ENXIO;
1625 /* Should we do more if we can't create the path?? */
1626 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1627 xpt_path_path_id(ccb->ccb_h.path),
1628 xpt_path_target_id(ccb->ccb_h.path),
1629 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1630 break;
1633 * Let peripheral drivers know that this device has gone
1634 * away.
1636 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1637 xpt_free_path(newpath);
1638 break;
1640 case CAM_REQ_INVALID:
1641 case CAM_PATH_INVALID:
1642 case CAM_DEV_NOT_THERE:
1643 case CAM_NO_HBA:
1644 case CAM_PROVIDE_FAIL:
1645 case CAM_REQ_TOO_BIG:
1646 case CAM_LUN_INVALID:
1647 case CAM_TID_INVALID:
1648 error = EINVAL;
1649 break;
1650 case CAM_SCSI_BUS_RESET:
1651 case CAM_BDR_SENT:
1653 * Commands that repeatedly timeout and cause these
1654 * kinds of error recovery actions, should return
1655 * CAM_CMD_TIMEOUT, which allows us to safely assume
1656 * that this command was an innocent bystander to
1657 * these events and should be unconditionally
1658 * retried.
1660 if (bootverbose && printed == 0) {
1661 xpt_print_path(ccb->ccb_h.path);
1662 if (status == CAM_BDR_SENT)
1663 kprintf("Bus Device Reset sent\n");
1664 else
1665 kprintf("Bus Reset issued\n");
1666 printed++;
1668 /* FALLTHROUGH */
1669 case CAM_REQUEUE_REQ:
1670 /* Unconditional requeue */
1671 error = ERESTART;
1672 if (bootverbose && printed == 0) {
1673 xpt_print(ccb->ccb_h.path, "Request Requeued\n");
1674 printed++;
1676 break;
1677 case CAM_RESRC_UNAVAIL:
1678 /* Wait a bit for the resource shortage to abate. */
1679 timeout = periph_noresrc_delay;
1680 /* FALLTHROUGH */
1681 case CAM_BUSY:
1682 if (timeout == 0) {
1683 /* Wait a bit for the busy condition to abate. */
1684 timeout = periph_busy_delay;
1686 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1687 /* FALLTHROUGH */
1688 default:
1689 /* decrement the number of retries */
1690 if (ccb->ccb_h.retry_count > 0) {
1691 ccb->ccb_h.retry_count--;
1692 error = ERESTART;
1693 if (bootverbose && printed == 0) {
1694 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n",
1695 status);
1696 printed++;
1698 } else {
1699 error = EIO;
1700 action_string = "Retries Exhausted";
1702 break;
1705 /* Attempt a retry */
1706 if (error == ERESTART || error == 0) {
1707 if (frozen != 0)
1708 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1710 if (error == ERESTART) {
1711 action_string = "Retrying Command";
1712 xpt_action(ccb);
1715 if (frozen != 0)
1716 cam_release_devq(ccb->ccb_h.path,
1717 relsim_flags,
1718 openings,
1719 timeout,
1720 /*getcount_only*/0);
1724 * If we have an error and are booting verbosely, whine
1725 * *unless* this was a non-retryable selection timeout.
1727 if (error != 0 && bootverbose &&
1728 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1731 if (action_string == NULL)
1732 action_string = "Unretryable Error";
1733 if (error != ERESTART) {
1734 xpt_print(ccb->ccb_h.path, "error %d\n", error);
1736 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1739 return (error);