bridge(4): document net.link.bridge.pfil_onlyip
[dragonfly.git] / sys / bus / cam / cam_xpt.c
blobf142bd625567552132b40f5044f101dfa28b2d43
1 /*
2 * Implementation of the Common Access Method Transport (XPT) layer.
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/time.h>
37 #include <sys/conf.h>
38 #include <sys/device.h>
39 #include <sys/fcntl.h>
40 #include <sys/md5.h>
41 #include <sys/devicestat.h>
42 #include <sys/interrupt.h>
43 #include <sys/sbuf.h>
44 #include <sys/taskqueue.h>
45 #include <sys/bus.h>
46 #include <sys/thread.h>
47 #include <sys/lock.h>
48 #include <sys/spinlock.h>
50 #include <sys/spinlock2.h>
52 #include <machine/clock.h>
53 #include <machine/stdarg.h>
55 #include "cam.h"
56 #include "cam_ccb.h"
57 #include "cam_periph.h"
58 #include "cam_sim.h"
59 #include "cam_xpt.h"
60 #include "cam_xpt_sim.h"
61 #include "cam_xpt_periph.h"
62 #include "cam_debug.h"
64 #include "scsi/scsi_all.h"
65 #include "scsi/scsi_message.h"
66 #include "scsi/scsi_pass.h"
67 #include <sys/kthread.h>
68 #include "opt_cam.h"
70 /* Datastructures internal to the xpt layer */
71 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
73 /* Object for defering XPT actions to a taskqueue */
74 struct xpt_task {
75 struct task task;
76 void *data1;
77 uintptr_t data2;
81 * Definition of an async handler callback block. These are used to add
82 * SIMs and peripherals to the async callback lists.
84 struct async_node {
85 SLIST_ENTRY(async_node) links;
86 u_int32_t event_enable; /* Async Event enables */
87 void (*callback)(void *arg, u_int32_t code,
88 struct cam_path *path, void *args);
89 void *callback_arg;
92 SLIST_HEAD(async_list, async_node);
93 SLIST_HEAD(periph_list, cam_periph);
96 * This is the maximum number of high powered commands (e.g. start unit)
97 * that can be outstanding at a particular time.
99 #ifndef CAM_MAX_HIGHPOWER
100 #define CAM_MAX_HIGHPOWER 4
101 #endif
104 * Structure for queueing a device in a run queue.
105 * There is one run queue for allocating new ccbs,
106 * and another for sending ccbs to the controller.
108 struct cam_ed_qinfo {
109 cam_pinfo pinfo;
110 struct cam_ed *device;
114 * The CAM EDT (Existing Device Table) contains the device information for
115 * all devices for all busses in the system. The table contains a
116 * cam_ed structure for each device on the bus.
118 struct cam_ed {
119 TAILQ_ENTRY(cam_ed) links;
120 struct cam_ed_qinfo alloc_ccb_entry;
121 struct cam_ed_qinfo send_ccb_entry;
122 struct cam_et *target;
123 struct cam_sim *sim;
124 lun_id_t lun_id;
125 struct camq drvq; /*
126 * Queue of type drivers wanting to do
127 * work on this device.
129 struct cam_ccbq ccbq; /* Queue of pending ccbs */
130 struct async_list asyncs; /* Async callback info for this B/T/L */
131 struct periph_list periphs; /* All attached devices */
132 u_int generation; /* Generation number */
133 struct cam_periph *owner; /* Peripheral driver's ownership tag */
134 struct xpt_quirk_entry *quirk; /* Oddities about this device */
135 /* Storage for the inquiry data */
136 cam_proto protocol;
137 u_int protocol_version;
138 cam_xport transport;
139 u_int transport_version;
140 struct scsi_inquiry_data inq_data;
141 u_int8_t inq_flags; /*
142 * Current settings for inquiry flags.
143 * This allows us to override settings
144 * like disconnection and tagged
145 * queuing for a device.
147 u_int8_t queue_flags; /* Queue flags from the control page */
148 u_int8_t serial_num_len;
149 u_int8_t *serial_num;
150 u_int32_t qfrozen_cnt;
151 u_int32_t flags;
152 #define CAM_DEV_UNCONFIGURED 0x01
153 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
154 #define CAM_DEV_REL_ON_COMPLETE 0x04
155 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
156 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
157 #define CAM_DEV_TAG_AFTER_COUNT 0x20
158 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
159 #define CAM_DEV_IN_DV 0x80
160 #define CAM_DEV_DV_HIT_BOTTOM 0x100
161 u_int32_t tag_delay_count;
162 #define CAM_TAG_DELAY_COUNT 5
163 u_int32_t tag_saved_openings;
164 u_int32_t refcount;
165 struct callout callout;
169 * Each target is represented by an ET (Existing Target). These
170 * entries are created when a target is successfully probed with an
171 * identify, and removed when a device fails to respond after a number
172 * of retries, or a bus rescan finds the device missing.
174 struct cam_et {
175 TAILQ_HEAD(, cam_ed) ed_entries;
176 TAILQ_ENTRY(cam_et) links;
177 struct cam_eb *bus;
178 target_id_t target_id;
179 u_int32_t refcount;
180 u_int generation;
181 struct timeval last_reset; /* uptime of last reset */
185 * Each bus is represented by an EB (Existing Bus). These entries
186 * are created by calls to xpt_bus_register and deleted by calls to
187 * xpt_bus_deregister.
189 struct cam_eb {
190 TAILQ_HEAD(, cam_et) et_entries;
191 TAILQ_ENTRY(cam_eb) links;
192 path_id_t path_id;
193 struct cam_sim *sim;
194 struct timeval last_reset; /* uptime of last reset */
195 u_int32_t flags;
196 #define CAM_EB_RUNQ_SCHEDULED 0x01
197 u_int32_t refcount;
198 u_int generation;
199 int counted_to_config; /* busses_to_config */
202 struct cam_path {
203 struct cam_periph *periph;
204 struct cam_eb *bus;
205 struct cam_et *target;
206 struct cam_ed *device;
209 struct xpt_quirk_entry {
210 struct scsi_inquiry_pattern inq_pat;
211 u_int8_t quirks;
212 #define CAM_QUIRK_NOLUNS 0x01
213 #define CAM_QUIRK_NOSERIAL 0x02
214 #define CAM_QUIRK_HILUNS 0x04
215 #define CAM_QUIRK_NOHILUNS 0x08
216 u_int mintags;
217 u_int maxtags;
220 static int cam_srch_hi = 0;
221 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
222 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
223 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
224 sysctl_cam_search_luns, "I",
225 "allow search above LUN 7 for SCSI3 and greater devices");
227 #define CAM_SCSI2_MAXLUN 8
229 * If we're not quirked to search <= the first 8 luns
230 * and we are either quirked to search above lun 8,
231 * or we're > SCSI-2 and we've enabled hilun searching,
232 * or we're > SCSI-2 and the last lun was a success,
233 * we can look for luns above lun 8.
235 #define CAN_SRCH_HI_SPARSE(dv) \
236 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
237 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
238 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
240 #define CAN_SRCH_HI_DENSE(dv) \
241 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
242 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
243 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
245 typedef enum {
246 XPT_FLAG_OPEN = 0x01
247 } xpt_flags;
249 struct xpt_softc {
250 xpt_flags flags;
251 u_int32_t xpt_generation;
253 /* number of high powered commands that can go through right now */
254 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
255 int num_highpower;
257 /* queue for handling async rescan requests. */
258 TAILQ_HEAD(, ccb_hdr) ccb_scanq;
259 int ccb_scanq_running;
261 /* Registered busses */
262 TAILQ_HEAD(,cam_eb) xpt_busses;
263 u_int bus_generation;
265 struct intr_config_hook *xpt_config_hook;
267 struct lock xpt_topo_lock;
268 struct lock xpt_lock;
271 static const char quantum[] = "QUANTUM";
272 static const char sony[] = "SONY";
273 static const char west_digital[] = "WDIGTL";
274 static const char samsung[] = "SAMSUNG";
275 static const char seagate[] = "SEAGATE";
276 static const char microp[] = "MICROP";
278 static struct xpt_quirk_entry xpt_quirk_table[] =
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
286 /* Reports QUEUE FULL for temporary resource shortages */
287 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
288 /*quirks*/0, /*mintags*/24, /*maxtags*/32
291 /* Reports QUEUE FULL for temporary resource shortages */
292 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
293 /*quirks*/0, /*mintags*/24, /*maxtags*/32
296 /* Broken tagged queuing drive */
297 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
298 /*quirks*/0, /*mintags*/0, /*maxtags*/0
301 /* Broken tagged queuing drive */
302 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
303 /*quirks*/0, /*mintags*/0, /*maxtags*/0
306 /* Broken tagged queuing drive */
307 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
308 /*quirks*/0, /*mintags*/0, /*maxtags*/0
312 * Unfortunately, the Quantum Atlas III has the same
313 * problem as the Atlas II drives above.
314 * Reported by: "Johan Granlund" <johan@granlund.nu>
316 * For future reference, the drive with the problem was:
317 * QUANTUM QM39100TD-SW N1B0
319 * It's possible that Quantum will fix the problem in later
320 * firmware revisions. If that happens, the quirk entry
321 * will need to be made specific to the firmware revisions
322 * with the problem.
325 /* Reports QUEUE FULL for temporary resource shortages */
326 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
327 /*quirks*/0, /*mintags*/24, /*maxtags*/32
331 * 18 Gig Atlas III, same problem as the 9G version.
332 * Reported by: Andre Albsmeier
333 * <andre.albsmeier@mchp.siemens.de>
335 * For future reference, the drive with the problem was:
336 * QUANTUM QM318000TD-S N491
338 /* Reports QUEUE FULL for temporary resource shortages */
339 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
340 /*quirks*/0, /*mintags*/24, /*maxtags*/32
344 * Broken tagged queuing drive
345 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
346 * and: Martin Renters <martin@tdc.on.ca>
348 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
352 * The Seagate Medalist Pro drives have very poor write
353 * performance with anything more than 2 tags.
355 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
356 * Drive: <SEAGATE ST36530N 1444>
358 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
359 * Drive: <SEAGATE ST34520W 1281>
361 * No one has actually reported that the 9G version
362 * (ST39140*) of the Medalist Pro has the same problem, but
363 * we're assuming that it does because the 4G and 6.5G
364 * versions of the drive are broken.
367 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
368 /*quirks*/0, /*mintags*/2, /*maxtags*/2
371 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
372 /*quirks*/0, /*mintags*/2, /*maxtags*/2
375 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
376 /*quirks*/0, /*mintags*/2, /*maxtags*/2
380 * Slow when tagged queueing is enabled. Write performance
381 * steadily drops off with more and more concurrent
382 * transactions. Best sequential write performance with
383 * tagged queueing turned off and write caching turned on.
385 * PR: kern/10398
386 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
387 * Drive: DCAS-34330 w/ "S65A" firmware.
389 * The drive with the problem had the "S65A" firmware
390 * revision, and has also been reported (by Stephen J.
391 * Roznowski <sjr@home.net>) for a drive with the "S61A"
392 * firmware revision.
394 * Although no one has reported problems with the 2 gig
395 * version of the DCAS drive, the assumption is that it
396 * has the same problems as the 4 gig version. Therefore
397 * this quirk entries disables tagged queueing for all
398 * DCAS drives.
400 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
401 /*quirks*/0, /*mintags*/0, /*maxtags*/0
404 /* Broken tagged queuing drive */
405 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
406 /*quirks*/0, /*mintags*/0, /*maxtags*/0
409 /* Broken tagged queuing drive */
410 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
411 /*quirks*/0, /*mintags*/0, /*maxtags*/0
414 /* This does not support other than LUN 0 */
415 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
416 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
420 * Broken tagged queuing drive.
421 * Submitted by:
422 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
423 * in PR kern/9535
425 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
426 /*quirks*/0, /*mintags*/0, /*maxtags*/0
430 * Slow when tagged queueing is enabled. (1.5MB/sec versus
431 * 8MB/sec.)
432 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
433 * Best performance with these drives is achieved with
434 * tagged queueing turned off, and write caching turned on.
436 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
437 /*quirks*/0, /*mintags*/0, /*maxtags*/0
441 * Slow when tagged queueing is enabled. (1.5MB/sec versus
442 * 8MB/sec.)
443 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
444 * Best performance with these drives is achieved with
445 * tagged queueing turned off, and write caching turned on.
447 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
448 /*quirks*/0, /*mintags*/0, /*maxtags*/0
452 * Doesn't handle queue full condition correctly,
453 * so we need to limit maxtags to what the device
454 * can handle instead of determining this automatically.
456 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
457 /*quirks*/0, /*mintags*/2, /*maxtags*/32
460 /* Really only one LUN */
461 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
462 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
465 /* I can't believe we need a quirk for DPT volumes. */
466 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
467 CAM_QUIRK_NOLUNS,
468 /*mintags*/0, /*maxtags*/255
472 * Many Sony CDROM drives don't like multi-LUN probing.
474 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
475 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
479 * This drive doesn't like multiple LUN probing.
480 * Submitted by: Parag Patel <parag@cgt.com>
482 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
483 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
486 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
487 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
491 * The 8200 doesn't like multi-lun probing, and probably
492 * don't like serial number requests either.
495 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
496 "EXB-8200*", "*"
498 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
502 * Let's try the same as above, but for a drive that says
503 * it's an IPL-6860 but is actually an EXB 8200.
506 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
507 "IPL-6860*", "*"
509 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
513 * These Hitachi drives don't like multi-lun probing.
514 * The PR submitter has a DK319H, but says that the Linux
515 * kernel has a similar work-around for the DK312 and DK314,
516 * so all DK31* drives are quirked here.
517 * PR: misc/18793
518 * Submitted by: Paul Haddad <paul@pth.com>
520 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
521 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
525 * The Hitachi CJ series with J8A8 firmware apparantly has
526 * problems with tagged commands.
527 * PR: 23536
528 * Reported by: amagai@nue.org
530 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
531 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
535 * These are the large storage arrays.
536 * Submitted by: William Carrel <william.carrel@infospace.com>
538 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
539 CAM_QUIRK_HILUNS, 2, 1024
543 * This old revision of the TDC3600 is also SCSI-1, and
544 * hangs upon serial number probing.
547 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
548 " TDC 3600", "U07:"
550 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
554 * Would repond to all LUNs if asked for.
557 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
558 "CP150", "*"
560 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
564 * Would repond to all LUNs if asked for.
567 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
568 "96X2*", "*"
570 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
573 /* Submitted by: Matthew Dodd <winter@jurai.net> */
574 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
575 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
578 /* Submitted by: Matthew Dodd <winter@jurai.net> */
579 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
580 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
583 /* TeraSolutions special settings for TRC-22 RAID */
584 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
585 /*quirks*/0, /*mintags*/55, /*maxtags*/255
588 /* Veritas Storage Appliance */
589 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
590 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
594 * Would respond to all LUNs. Device type and removable
595 * flag are jumper-selectable.
597 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
598 "Tahiti 1", "*"
600 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
603 /* EasyRAID E5A aka. areca ARC-6010 */
604 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
605 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
608 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
609 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
612 /* Default tagged queuing parameters for all devices */
614 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
615 /*vendor*/"*", /*product*/"*", /*revision*/"*"
617 /*quirks*/0, /*mintags*/2, /*maxtags*/255
621 static const int xpt_quirk_table_size = NELEM(xpt_quirk_table);
623 typedef enum {
624 DM_RET_COPY = 0x01,
625 DM_RET_FLAG_MASK = 0x0f,
626 DM_RET_NONE = 0x00,
627 DM_RET_STOP = 0x10,
628 DM_RET_DESCEND = 0x20,
629 DM_RET_ERROR = 0x30,
630 DM_RET_ACTION_MASK = 0xf0
631 } dev_match_ret;
633 typedef enum {
634 XPT_DEPTH_BUS,
635 XPT_DEPTH_TARGET,
636 XPT_DEPTH_DEVICE,
637 XPT_DEPTH_PERIPH
638 } xpt_traverse_depth;
640 struct xpt_traverse_config {
641 xpt_traverse_depth depth;
642 void *tr_func;
643 void *tr_arg;
646 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
647 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
648 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
649 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
650 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
652 /* Transport layer configuration information */
653 static struct xpt_softc xsoftc;
655 /* Queues for our software interrupt handler */
656 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
657 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
658 static cam_simq_t cam_simq;
659 static struct spinlock cam_simq_spin;
661 struct cam_periph *xpt_periph;
663 static periph_init_t xpt_periph_init;
665 static periph_init_t probe_periph_init;
667 static struct periph_driver xpt_driver =
669 xpt_periph_init, "xpt",
670 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
673 static struct periph_driver probe_driver =
675 probe_periph_init, "probe",
676 TAILQ_HEAD_INITIALIZER(probe_driver.units)
679 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
680 PERIPHDRIVER_DECLARE(probe, probe_driver);
682 static d_open_t xptopen;
683 static d_close_t xptclose;
684 static d_ioctl_t xptioctl;
686 static struct dev_ops xpt_ops = {
687 { "xpt", 0, D_MPSAFE },
688 .d_open = xptopen,
689 .d_close = xptclose,
690 .d_ioctl = xptioctl
693 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
694 static void dead_sim_poll(struct cam_sim *sim);
696 /* Dummy SIM that is used when the real one has gone. */
697 static struct cam_sim cam_dead_sim;
698 static struct lock cam_dead_lock;
700 /* Storage for debugging datastructures */
701 #ifdef CAMDEBUG
702 struct cam_path *cam_dpath;
703 u_int32_t cam_dflags;
704 u_int32_t cam_debug_delay;
705 #endif
707 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
708 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
709 #endif
712 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
713 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
714 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
716 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
717 || defined(CAM_DEBUG_LUN)
718 #ifdef CAMDEBUG
719 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
720 || !defined(CAM_DEBUG_LUN)
721 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
722 and CAM_DEBUG_LUN"
723 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
724 #else /* !CAMDEBUG */
725 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
726 #endif /* CAMDEBUG */
727 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
729 /* Our boot-time initialization hook */
730 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
732 static moduledata_t cam_moduledata = {
733 "cam",
734 cam_module_event_handler,
735 NULL
738 static int xpt_init(void *);
740 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
741 MODULE_VERSION(cam, 1);
744 static cam_status xpt_compile_path(struct cam_path *new_path,
745 struct cam_periph *perph,
746 path_id_t path_id,
747 target_id_t target_id,
748 lun_id_t lun_id);
750 static void xpt_release_path(struct cam_path *path);
752 static void xpt_async_bcast(struct async_list *async_head,
753 u_int32_t async_code,
754 struct cam_path *path,
755 void *async_arg);
756 static void xpt_dev_async(u_int32_t async_code,
757 struct cam_eb *bus,
758 struct cam_et *target,
759 struct cam_ed *device,
760 void *async_arg);
761 static path_id_t xptnextfreepathid(void);
762 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
763 static union ccb *xpt_get_ccb(struct cam_ed *device);
764 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
765 u_int32_t new_priority);
766 static void xpt_run_dev_allocq(struct cam_eb *bus);
767 static void xpt_run_dev_sendq(struct cam_eb *bus);
768 static timeout_t xpt_release_devq_timeout;
769 static void xpt_release_bus(struct cam_eb *bus);
770 static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
771 int run_queue);
772 static struct cam_et*
773 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
774 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
775 static struct cam_ed*
776 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
777 lun_id_t lun_id);
778 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
779 struct cam_ed *device);
780 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
781 static struct cam_eb*
782 xpt_find_bus(path_id_t path_id);
783 static struct cam_et*
784 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
785 static struct cam_ed*
786 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
787 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
788 static void xpt_scan_lun(struct cam_periph *periph,
789 struct cam_path *path, cam_flags flags,
790 union ccb *ccb);
791 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
792 static xpt_busfunc_t xptconfigbuscountfunc;
793 static xpt_busfunc_t xptconfigfunc;
794 static void xpt_config(void *arg);
795 static xpt_devicefunc_t xptpassannouncefunc;
796 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
797 static void xpt_uncount_bus (struct cam_eb *bus);
798 static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
799 static void xptpoll(struct cam_sim *sim);
800 static inthand2_t swi_cambio;
801 static void camisr(void *);
802 static void camisr_runqueue(struct cam_sim *);
803 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
804 u_int num_patterns, struct cam_eb *bus);
805 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
806 u_int num_patterns,
807 struct cam_ed *device);
808 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
809 u_int num_patterns,
810 struct cam_periph *periph);
811 static xpt_busfunc_t xptedtbusfunc;
812 static xpt_targetfunc_t xptedttargetfunc;
813 static xpt_devicefunc_t xptedtdevicefunc;
814 static xpt_periphfunc_t xptedtperiphfunc;
815 static xpt_pdrvfunc_t xptplistpdrvfunc;
816 static xpt_periphfunc_t xptplistperiphfunc;
817 static int xptedtmatch(struct ccb_dev_match *cdm);
818 static int xptperiphlistmatch(struct ccb_dev_match *cdm);
819 static int xptbustraverse(struct cam_eb *start_bus,
820 xpt_busfunc_t *tr_func, void *arg);
821 static int xpttargettraverse(struct cam_eb *bus,
822 struct cam_et *start_target,
823 xpt_targetfunc_t *tr_func, void *arg);
824 static int xptdevicetraverse(struct cam_et *target,
825 struct cam_ed *start_device,
826 xpt_devicefunc_t *tr_func, void *arg);
827 static int xptperiphtraverse(struct cam_ed *device,
828 struct cam_periph *start_periph,
829 xpt_periphfunc_t *tr_func, void *arg);
830 static int xptpdrvtraverse(struct periph_driver **start_pdrv,
831 xpt_pdrvfunc_t *tr_func, void *arg);
832 static int xptpdperiphtraverse(struct periph_driver **pdrv,
833 struct cam_periph *start_periph,
834 xpt_periphfunc_t *tr_func,
835 void *arg);
836 static xpt_busfunc_t xptdefbusfunc;
837 static xpt_targetfunc_t xptdeftargetfunc;
838 static xpt_devicefunc_t xptdefdevicefunc;
839 static xpt_periphfunc_t xptdefperiphfunc;
840 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
841 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
842 void *arg);
843 static xpt_devicefunc_t xptsetasyncfunc;
844 static xpt_busfunc_t xptsetasyncbusfunc;
845 static cam_status xptregister(struct cam_periph *periph,
846 void *arg);
847 static cam_status proberegister(struct cam_periph *periph,
848 void *arg);
849 static void probeschedule(struct cam_periph *probe_periph);
850 static void probestart(struct cam_periph *periph, union ccb *start_ccb);
851 static void proberequestdefaultnegotiation(struct cam_periph *periph);
852 static int proberequestbackoff(struct cam_periph *periph,
853 struct cam_ed *device);
854 static void probedone(struct cam_periph *periph, union ccb *done_ccb);
855 static void probecleanup(struct cam_periph *periph);
856 static void xpt_find_quirk(struct cam_ed *device);
857 static void xpt_devise_transport(struct cam_path *path);
858 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
859 struct cam_ed *device,
860 int async_update);
861 static void xpt_toggle_tags(struct cam_path *path);
862 static void xpt_start_tags(struct cam_path *path);
863 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
864 struct cam_ed *dev);
865 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
866 struct cam_ed *dev);
867 static __inline int periph_is_queued(struct cam_periph *periph);
868 static __inline int device_is_alloc_queued(struct cam_ed *device);
869 static __inline int device_is_send_queued(struct cam_ed *device);
870 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
872 static __inline int
873 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
875 int retval;
877 if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
878 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
879 cam_ccbq_resize(&dev->ccbq,
880 dev->ccbq.dev_openings
881 + dev->ccbq.dev_active);
882 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
885 * The priority of a device waiting for CCB resources
886 * is that of the the highest priority peripheral driver
887 * enqueued.
889 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
890 &dev->alloc_ccb_entry.pinfo,
891 CAMQ_GET_HEAD(&dev->drvq)->priority);
892 } else {
893 retval = 0;
896 return (retval);
899 static __inline int
900 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
902 int retval;
904 if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
906 * The priority of a device waiting for controller
907 * resources is that of the the highest priority CCB
908 * enqueued.
910 retval =
911 xpt_schedule_dev(&bus->sim->devq->send_queue,
912 &dev->send_ccb_entry.pinfo,
913 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
914 } else {
915 retval = 0;
917 return (retval);
920 static __inline int
921 periph_is_queued(struct cam_periph *periph)
923 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
926 static __inline int
927 device_is_alloc_queued(struct cam_ed *device)
929 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
932 static __inline int
933 device_is_send_queued(struct cam_ed *device)
935 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
938 static __inline int
939 dev_allocq_is_runnable(struct cam_devq *devq)
942 * Have work to do.
943 * Have space to do more work.
944 * Allowed to do work.
946 return ((devq->alloc_queue.qfrozen_cnt == 0)
947 && (devq->alloc_queue.entries > 0)
948 && (devq->alloc_openings > 0));
951 static void
952 xpt_periph_init(void)
954 make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
957 static void
958 probe_periph_init(void)
963 static void
964 xptdone(struct cam_periph *periph, union ccb *done_ccb)
966 /* Caller will release the CCB */
967 wakeup(&done_ccb->ccb_h.cbfcnp);
970 static int
971 xptopen(struct dev_open_args *ap)
973 cdev_t dev = ap->a_head.a_dev;
976 * Only allow read-write access.
978 if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
979 return(EPERM);
982 * We don't allow nonblocking access.
984 if ((ap->a_oflags & O_NONBLOCK) != 0) {
985 kprintf("%s: can't do nonblocking access\n", devtoname(dev));
986 return(ENODEV);
989 /* Mark ourselves open */
990 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
991 xsoftc.flags |= XPT_FLAG_OPEN;
992 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
994 return(0);
997 static int
998 xptclose(struct dev_close_args *ap)
1001 /* Mark ourselves closed */
1002 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
1003 xsoftc.flags &= ~XPT_FLAG_OPEN;
1004 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1006 return(0);
1010 * Don't automatically grab the xpt softc lock here even though this is going
1011 * through the xpt device. The xpt device is really just a back door for
1012 * accessing other devices and SIMs, so the right thing to do is to grab
1013 * the appropriate SIM lock once the bus/SIM is located.
1015 static int
1016 xptioctl(struct dev_ioctl_args *ap)
1018 int error;
1020 error = 0;
1022 switch(ap->a_cmd) {
1024 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1025 * to accept CCB types that don't quite make sense to send through a
1026 * passthrough driver.
1028 case CAMIOCOMMAND: {
1029 union ccb *ccb;
1030 union ccb *inccb;
1031 struct cam_eb *bus;
1033 inccb = (union ccb *)ap->a_data;
1035 bus = xpt_find_bus(inccb->ccb_h.path_id);
1036 if (bus == NULL) {
1037 error = EINVAL;
1038 break;
1041 switch(inccb->ccb_h.func_code) {
1042 case XPT_SCAN_BUS:
1043 case XPT_RESET_BUS:
1044 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1045 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1046 error = EINVAL;
1047 break;
1049 /* FALLTHROUGH */
1050 case XPT_PATH_INQ:
1051 case XPT_ENG_INQ:
1052 case XPT_SCAN_LUN:
1054 ccb = xpt_alloc_ccb();
1056 CAM_SIM_LOCK(bus->sim);
1059 * Create a path using the bus, target, and lun the
1060 * user passed in.
1062 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1063 inccb->ccb_h.path_id,
1064 inccb->ccb_h.target_id,
1065 inccb->ccb_h.target_lun) !=
1066 CAM_REQ_CMP){
1067 error = EINVAL;
1068 CAM_SIM_UNLOCK(bus->sim);
1069 xpt_free_ccb(&ccb->ccb_h);
1070 break;
1072 /* Ensure all of our fields are correct */
1073 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1074 inccb->ccb_h.pinfo.priority);
1075 xpt_merge_ccb(ccb, inccb);
1076 ccb->ccb_h.cbfcnp = xptdone;
1077 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1078 bcopy(ccb, inccb, sizeof(union ccb));
1079 xpt_free_path(ccb->ccb_h.path);
1080 xpt_free_ccb(&ccb->ccb_h);
1081 CAM_SIM_UNLOCK(bus->sim);
1082 break;
1084 case XPT_DEBUG: {
1085 union ccb *ccb;
1087 ccb = xpt_alloc_ccb();
1090 * This is an immediate CCB, so it's okay to
1091 * allocate it on the stack.
1094 CAM_SIM_LOCK(bus->sim);
1097 * Create a path using the bus, target, and lun the
1098 * user passed in.
1100 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1101 inccb->ccb_h.path_id,
1102 inccb->ccb_h.target_id,
1103 inccb->ccb_h.target_lun) !=
1104 CAM_REQ_CMP){
1105 error = EINVAL;
1106 CAM_SIM_UNLOCK(bus->sim);
1107 xpt_free_ccb(&ccb->ccb_h);
1108 break;
1110 /* Ensure all of our fields are correct */
1111 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1112 inccb->ccb_h.pinfo.priority);
1113 xpt_merge_ccb(ccb, inccb);
1114 ccb->ccb_h.cbfcnp = xptdone;
1115 xpt_action(ccb);
1116 CAM_SIM_UNLOCK(bus->sim);
1117 bcopy(ccb, inccb, sizeof(union ccb));
1118 inccb->ccb_h.timeout_ch = NULL; /* SAFETY */
1119 xpt_free_path(ccb->ccb_h.path);
1120 xpt_free_ccb(&ccb->ccb_h);
1122 break;
1125 case XPT_DEV_MATCH: {
1126 struct cam_periph_map_info mapinfo;
1127 struct cam_path *old_path;
1130 * We can't deal with physical addresses for this
1131 * type of transaction.
1133 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1134 error = EINVAL;
1135 break;
1139 * Save this in case the caller had it set to
1140 * something in particular.
1142 old_path = inccb->ccb_h.path;
1145 * We really don't need a path for the matching
1146 * code. The path is needed because of the
1147 * debugging statements in xpt_action(). They
1148 * assume that the CCB has a valid path.
1150 inccb->ccb_h.path = xpt_periph->path;
1152 bzero(&mapinfo, sizeof(mapinfo));
1155 * Map the pattern and match buffers into kernel
1156 * virtual address space.
1158 error = cam_periph_mapmem(inccb, &mapinfo);
1160 if (error) {
1161 inccb->ccb_h.path = old_path;
1162 break;
1166 * This is an immediate CCB, we can send it on directly.
1168 xpt_action(inccb);
1171 * Map the buffers back into user space.
1173 cam_periph_unmapmem(inccb, &mapinfo);
1175 inccb->ccb_h.path = old_path;
1177 error = 0;
1178 break;
1180 default:
1181 error = ENOTSUP;
1182 break;
1184 xpt_release_bus(bus);
1185 break;
1188 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1189 * with the periphal driver name and unit name filled in. The other
1190 * fields don't really matter as input. The passthrough driver name
1191 * ("pass"), and unit number are passed back in the ccb. The current
1192 * device generation number, and the index into the device peripheral
1193 * driver list, and the status are also passed back. Note that
1194 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1195 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1196 * (or rather should be) impossible for the device peripheral driver
1197 * list to change since we look at the whole thing in one pass, and
1198 * we do it with lock protection.
1201 case CAMGETPASSTHRU: {
1202 union ccb *ccb;
1203 struct cam_periph *periph;
1204 struct periph_driver **p_drv;
1205 char *name;
1206 u_int unit;
1207 u_int cur_generation;
1208 int base_periph_found;
1209 int splbreaknum;
1211 ccb = (union ccb *)ap->a_data;
1212 unit = ccb->cgdl.unit_number;
1213 name = ccb->cgdl.periph_name;
1215 * Every 100 devices, we want to drop our lock protection to
1216 * give the software interrupt handler a chance to run.
1217 * Most systems won't run into this check, but this should
1218 * avoid starvation in the software interrupt handler in
1219 * large systems.
1221 splbreaknum = 100;
1223 ccb = (union ccb *)ap->a_data;
1225 base_periph_found = 0;
1228 * Sanity check -- make sure we don't get a null peripheral
1229 * driver name.
1231 if (*ccb->cgdl.periph_name == '\0') {
1232 error = EINVAL;
1233 break;
1236 /* Keep the list from changing while we traverse it */
1237 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1238 ptstartover:
1239 cur_generation = xsoftc.xpt_generation;
1241 /* first find our driver in the list of drivers */
1242 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1243 if (strcmp((*p_drv)->driver_name, name) == 0)
1244 break;
1247 if (*p_drv == NULL) {
1248 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1249 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1250 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1251 *ccb->cgdl.periph_name = '\0';
1252 ccb->cgdl.unit_number = 0;
1253 error = ENOENT;
1254 break;
1258 * Run through every peripheral instance of this driver
1259 * and check to see whether it matches the unit passed
1260 * in by the user. If it does, get out of the loops and
1261 * find the passthrough driver associated with that
1262 * peripheral driver.
1264 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1266 if (periph->unit_number == unit) {
1267 break;
1268 } else if (--splbreaknum == 0) {
1269 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1270 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1271 splbreaknum = 100;
1272 if (cur_generation != xsoftc.xpt_generation)
1273 goto ptstartover;
1277 * If we found the peripheral driver that the user passed
1278 * in, go through all of the peripheral drivers for that
1279 * particular device and look for a passthrough driver.
1281 if (periph != NULL) {
1282 struct cam_ed *device;
1283 int i;
1285 base_periph_found = 1;
1286 device = periph->path->device;
1287 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1288 periph != NULL;
1289 periph = SLIST_NEXT(periph, periph_links), i++) {
1291 * Check to see whether we have a
1292 * passthrough device or not.
1294 if (strcmp(periph->periph_name, "pass") == 0) {
1296 * Fill in the getdevlist fields.
1298 strcpy(ccb->cgdl.periph_name,
1299 periph->periph_name);
1300 ccb->cgdl.unit_number =
1301 periph->unit_number;
1302 if (SLIST_NEXT(periph, periph_links))
1303 ccb->cgdl.status =
1304 CAM_GDEVLIST_MORE_DEVS;
1305 else
1306 ccb->cgdl.status =
1307 CAM_GDEVLIST_LAST_DEVICE;
1308 ccb->cgdl.generation =
1309 device->generation;
1310 ccb->cgdl.index = i;
1312 * Fill in some CCB header fields
1313 * that the user may want.
1315 ccb->ccb_h.path_id =
1316 periph->path->bus->path_id;
1317 ccb->ccb_h.target_id =
1318 periph->path->target->target_id;
1319 ccb->ccb_h.target_lun =
1320 periph->path->device->lun_id;
1321 ccb->ccb_h.status = CAM_REQ_CMP;
1322 break;
1328 * If the periph is null here, one of two things has
1329 * happened. The first possibility is that we couldn't
1330 * find the unit number of the particular peripheral driver
1331 * that the user is asking about. e.g. the user asks for
1332 * the passthrough driver for "da11". We find the list of
1333 * "da" peripherals all right, but there is no unit 11.
1334 * The other possibility is that we went through the list
1335 * of peripheral drivers attached to the device structure,
1336 * but didn't find one with the name "pass". Either way,
1337 * we return ENOENT, since we couldn't find something.
1339 if (periph == NULL) {
1340 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1341 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1342 *ccb->cgdl.periph_name = '\0';
1343 ccb->cgdl.unit_number = 0;
1344 error = ENOENT;
1346 * It is unfortunate that this is even necessary,
1347 * but there are many, many clueless users out there.
1348 * If this is true, the user is looking for the
1349 * passthrough driver, but doesn't have one in his
1350 * kernel.
1352 if (base_periph_found == 1) {
1353 kprintf("xptioctl: pass driver is not in the "
1354 "kernel\n");
1355 kprintf("xptioctl: put \"device pass\" in "
1356 "your kernel config file\n");
1359 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1360 break;
1362 default:
1363 error = ENOTTY;
1364 break;
1367 return(error);
1370 static int
1371 cam_module_event_handler(module_t mod, int what, void *arg)
1373 int error;
1375 switch (what) {
1376 case MOD_LOAD:
1377 if ((error = xpt_init(NULL)) != 0)
1378 return (error);
1379 break;
1380 case MOD_UNLOAD:
1381 return EBUSY;
1382 default:
1383 return EOPNOTSUPP;
1386 return 0;
1390 * Thread to handle asynchronous main-context requests.
1392 * This function is typically used by drivers to perform complex actions
1393 * such as bus scans and engineering requests in a main context instead
1394 * of an interrupt context.
1396 static void
1397 xpt_scanner_thread(void *dummy)
1399 union ccb *ccb;
1400 struct cam_sim *sim;
1402 for (;;) {
1403 xpt_lock_buses();
1404 xsoftc.ccb_scanq_running = 1;
1405 while ((ccb = (void *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
1406 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h,
1407 sim_links.tqe);
1408 xpt_unlock_buses();
1410 sim = ccb->ccb_h.path->bus->sim;
1411 CAM_SIM_LOCK(sim);
1412 xpt_action(ccb);
1413 CAM_SIM_UNLOCK(sim);
1415 xpt_lock_buses();
1417 xsoftc.ccb_scanq_running = 0;
1418 tsleep_interlock(&xsoftc.ccb_scanq, 0);
1419 xpt_unlock_buses();
1420 tsleep(&xsoftc.ccb_scanq, PINTERLOCKED, "ccb_scanq", 0);
1425 * Issue an asynchronous asction
1427 void
1428 xpt_action_async(union ccb *ccb)
1430 xpt_lock_buses();
1431 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1432 if (xsoftc.ccb_scanq_running == 0) {
1433 xsoftc.ccb_scanq_running = 1;
1434 wakeup(&xsoftc.ccb_scanq);
1436 xpt_unlock_buses();
1440 /* Functions accessed by the peripheral drivers */
1441 static int
1442 xpt_init(void *dummy)
1444 struct cam_sim *xpt_sim;
1445 struct cam_path *path;
1446 struct cam_devq *devq;
1447 cam_status status;
1449 TAILQ_INIT(&xsoftc.xpt_busses);
1450 TAILQ_INIT(&cam_simq);
1451 TAILQ_INIT(&xsoftc.ccb_scanq);
1452 STAILQ_INIT(&xsoftc.highpowerq);
1453 xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1455 spin_init(&cam_simq_spin, "cam_simq_spin");
1456 lockinit(&xsoftc.xpt_lock, "XPT lock", 0, LK_CANRECURSE);
1457 lockinit(&xsoftc.xpt_topo_lock, "XPT topology lock", 0, LK_CANRECURSE);
1459 SLIST_INIT(&cam_dead_sim.ccb_freeq);
1460 TAILQ_INIT(&cam_dead_sim.sim_doneq);
1461 spin_init(&cam_dead_sim.sim_spin, "cam_dead_sim");
1462 cam_dead_sim.sim_action = dead_sim_action;
1463 cam_dead_sim.sim_poll = dead_sim_poll;
1464 cam_dead_sim.sim_name = "dead_sim";
1465 cam_dead_sim.lock = &cam_dead_lock;
1466 lockinit(&cam_dead_lock, "XPT dead_sim lock", 0, LK_CANRECURSE);
1467 cam_dead_sim.flags |= CAM_SIM_DEREGISTERED;
1470 * The xpt layer is, itself, the equivelent of a SIM.
1471 * Allow 16 ccbs in the ccb pool for it. This should
1472 * give decent parallelism when we probe busses and
1473 * perform other XPT functions.
1475 devq = cam_simq_alloc(16);
1476 xpt_sim = cam_sim_alloc(xptaction,
1477 xptpoll,
1478 "xpt",
1479 /*softc*/NULL,
1480 /*unit*/0,
1481 /*lock*/&xsoftc.xpt_lock,
1482 /*max_dev_transactions*/0,
1483 /*max_tagged_dev_transactions*/0,
1484 devq);
1485 cam_simq_release(devq);
1486 if (xpt_sim == NULL)
1487 return (ENOMEM);
1489 xpt_sim->max_ccbs = 16;
1491 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
1492 if ((status = xpt_bus_register(xpt_sim, /*bus #*/0)) != CAM_SUCCESS) {
1493 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1494 kprintf("xpt_init: xpt_bus_register failed with status %#x,"
1495 " failing attach\n", status);
1496 return (EINVAL);
1500 * Looking at the XPT from the SIM layer, the XPT is
1501 * the equivelent of a peripheral driver. Allocate
1502 * a peripheral driver entry for us.
1504 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1505 CAM_TARGET_WILDCARD,
1506 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1507 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1508 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1509 " failing attach\n", status);
1510 return (EINVAL);
1513 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1514 path, NULL, 0, xpt_sim);
1515 xpt_free_path(path);
1517 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1520 * Register a callback for when interrupts are enabled.
1522 xsoftc.xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1523 M_CAMXPT, M_INTWAIT | M_ZERO);
1524 xsoftc.xpt_config_hook->ich_func = xpt_config;
1525 xsoftc.xpt_config_hook->ich_desc = "xpt";
1526 xsoftc.xpt_config_hook->ich_order = 1000;
1527 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1528 kfree (xsoftc.xpt_config_hook, M_CAMXPT);
1529 kprintf("xpt_init: config_intrhook_establish failed "
1530 "- failing attach\n");
1533 /* fire up rescan thread */
1534 if (kthread_create(xpt_scanner_thread, NULL, NULL, "xpt_thrd")) {
1535 kprintf("xpt_init: failed to create rescan thread\n");
1537 /* Install our software interrupt handlers */
1538 register_swi_mp(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL, -1);
1540 return (0);
1543 static cam_status
1544 xptregister(struct cam_periph *periph, void *arg)
1546 struct cam_sim *xpt_sim;
1548 if (periph == NULL) {
1549 kprintf("xptregister: periph was NULL!!\n");
1550 return(CAM_REQ_CMP_ERR);
1553 xpt_sim = (struct cam_sim *)arg;
1554 xpt_sim->softc = periph;
1555 xpt_periph = periph;
1556 periph->softc = NULL;
1558 return(CAM_REQ_CMP);
1561 int32_t
1562 xpt_add_periph(struct cam_periph *periph)
1564 struct cam_ed *device;
1565 int32_t status;
1566 struct periph_list *periph_head;
1568 sim_lock_assert_owned(periph->sim->lock);
1570 device = periph->path->device;
1572 periph_head = &device->periphs;
1574 status = CAM_REQ_CMP;
1576 if (device != NULL) {
1578 * Make room for this peripheral
1579 * so it will fit in the queue
1580 * when it's scheduled to run
1582 status = camq_resize(&device->drvq,
1583 device->drvq.array_size + 1);
1585 device->generation++;
1587 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1590 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1591 xsoftc.xpt_generation++;
1592 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1594 return (status);
1597 void
1598 xpt_remove_periph(struct cam_periph *periph)
1600 struct cam_ed *device;
1602 sim_lock_assert_owned(periph->sim->lock);
1604 device = periph->path->device;
1606 if (device != NULL) {
1607 struct periph_list *periph_head;
1609 periph_head = &device->periphs;
1611 /* Release the slot for this peripheral */
1612 camq_resize(&device->drvq, device->drvq.array_size - 1);
1614 device->generation++;
1616 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1619 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1620 xsoftc.xpt_generation++;
1621 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1624 void
1625 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1627 struct ccb_pathinq *cpi;
1628 struct ccb_trans_settings *cts;
1629 struct cam_path *path;
1630 u_int speed;
1631 u_int freq;
1632 u_int mb;
1634 sim_lock_assert_owned(periph->sim->lock);
1636 path = periph->path;
1638 /* Report basic attachment and inquiry data */
1639 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1640 periph->periph_name, periph->unit_number,
1641 path->bus->sim->sim_name,
1642 path->bus->sim->unit_number,
1643 path->bus->sim->bus_id,
1644 path->target->target_id,
1645 path->device->lun_id);
1646 kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1647 scsi_print_inquiry(&path->device->inq_data);
1649 /* Report serial number */
1650 if (path->device->serial_num_len > 0) {
1651 /* Don't wrap the screen - print only the first 60 chars */
1652 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1653 periph->unit_number, path->device->serial_num);
1656 /* Acquire and report transfer speed */
1657 cts = &xpt_alloc_ccb()->cts;
1658 xpt_setup_ccb(&cts->ccb_h, path, /*priority*/1);
1659 cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1660 cts->type = CTS_TYPE_CURRENT_SETTINGS;
1661 xpt_action((union ccb*)cts);
1662 if ((cts->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1663 xpt_free_ccb(&cts->ccb_h);
1664 return;
1667 /* Ask the SIM for its base transfer speed */
1668 cpi = &xpt_alloc_ccb()->cpi;
1669 xpt_setup_ccb(&cpi->ccb_h, path, /*priority*/1);
1670 cpi->ccb_h.func_code = XPT_PATH_INQ;
1671 xpt_action((union ccb *)cpi);
1673 speed = cpi->base_transfer_speed;
1674 freq = 0;
1675 if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) {
1676 struct ccb_trans_settings_spi *spi;
1678 spi = &cts->xport_specific.spi;
1679 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1680 && spi->sync_offset != 0) {
1681 freq = scsi_calc_syncsrate(spi->sync_period);
1682 speed = freq;
1685 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1686 speed *= (0x01 << spi->bus_width);
1688 if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) {
1689 struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc;
1690 if (fc->valid & CTS_FC_VALID_SPEED) {
1691 speed = fc->bitrate;
1695 if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SAS) {
1696 struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas;
1697 if (sas->valid & CTS_SAS_VALID_SPEED) {
1698 speed = sas->bitrate;
1702 mb = speed / 1000;
1703 if (mb > 0)
1704 kprintf("%s%d: %d.%03dMB/s transfers",
1705 periph->periph_name, periph->unit_number,
1706 mb, speed % 1000);
1707 else
1708 kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1709 periph->unit_number, speed);
1711 /* Report additional information about SPI connections */
1712 if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) {
1713 struct ccb_trans_settings_spi *spi;
1715 spi = &cts->xport_specific.spi;
1716 if (freq != 0) {
1717 kprintf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1718 freq % 1000,
1719 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1720 ? " DT" : "",
1721 spi->sync_offset);
1723 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1724 && spi->bus_width > 0) {
1725 if (freq != 0) {
1726 kprintf(", ");
1727 } else {
1728 kprintf(" (");
1730 kprintf("%dbit)", 8 * (0x01 << spi->bus_width));
1731 } else if (freq != 0) {
1732 kprintf(")");
1735 if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) {
1736 struct ccb_trans_settings_fc *fc;
1738 fc = &cts->xport_specific.fc;
1739 if (fc->valid & CTS_FC_VALID_WWNN)
1740 kprintf(" WWNN 0x%llx", (long long) fc->wwnn);
1741 if (fc->valid & CTS_FC_VALID_WWPN)
1742 kprintf(" WWPN 0x%llx", (long long) fc->wwpn);
1743 if (fc->valid & CTS_FC_VALID_PORT)
1744 kprintf(" PortID 0x%x", fc->port);
1747 if (path->device->inq_flags & SID_CmdQue ||
1748 path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1749 kprintf("\n%s%d: Command Queueing Enabled",
1750 periph->periph_name, periph->unit_number);
1752 kprintf("\n");
1754 xpt_free_ccb(&cpi->ccb_h);
1755 xpt_free_ccb(&cts->ccb_h);
1758 * We only want to print the caller's announce string if they've
1759 * passed one in..
1761 if (announce_string != NULL)
1762 kprintf("%s%d: %s\n", periph->periph_name,
1763 periph->unit_number, announce_string);
1766 static dev_match_ret
1767 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1768 struct cam_eb *bus)
1770 dev_match_ret retval;
1771 int i;
1773 retval = DM_RET_NONE;
1776 * If we aren't given something to match against, that's an error.
1778 if (bus == NULL)
1779 return(DM_RET_ERROR);
1782 * If there are no match entries, then this bus matches no
1783 * matter what.
1785 if ((patterns == NULL) || (num_patterns == 0))
1786 return(DM_RET_DESCEND | DM_RET_COPY);
1788 for (i = 0; i < num_patterns; i++) {
1789 struct bus_match_pattern *cur_pattern;
1792 * If the pattern in question isn't for a bus node, we
1793 * aren't interested. However, we do indicate to the
1794 * calling routine that we should continue descending the
1795 * tree, since the user wants to match against lower-level
1796 * EDT elements.
1798 if (patterns[i].type != DEV_MATCH_BUS) {
1799 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1800 retval |= DM_RET_DESCEND;
1801 continue;
1804 cur_pattern = &patterns[i].pattern.bus_pattern;
1807 * If they want to match any bus node, we give them any
1808 * device node.
1810 if (cur_pattern->flags == BUS_MATCH_ANY) {
1811 /* set the copy flag */
1812 retval |= DM_RET_COPY;
1815 * If we've already decided on an action, go ahead
1816 * and return.
1818 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1819 return(retval);
1823 * Not sure why someone would do this...
1825 if (cur_pattern->flags == BUS_MATCH_NONE)
1826 continue;
1828 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1829 && (cur_pattern->path_id != bus->path_id))
1830 continue;
1832 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1833 && (cur_pattern->bus_id != bus->sim->bus_id))
1834 continue;
1836 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1837 && (cur_pattern->unit_number != bus->sim->unit_number))
1838 continue;
1840 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1841 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1842 DEV_IDLEN) != 0))
1843 continue;
1846 * If we get to this point, the user definitely wants
1847 * information on this bus. So tell the caller to copy the
1848 * data out.
1850 retval |= DM_RET_COPY;
1853 * If the return action has been set to descend, then we
1854 * know that we've already seen a non-bus matching
1855 * expression, therefore we need to further descend the tree.
1856 * This won't change by continuing around the loop, so we
1857 * go ahead and return. If we haven't seen a non-bus
1858 * matching expression, we keep going around the loop until
1859 * we exhaust the matching expressions. We'll set the stop
1860 * flag once we fall out of the loop.
1862 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1863 return(retval);
1867 * If the return action hasn't been set to descend yet, that means
1868 * we haven't seen anything other than bus matching patterns. So
1869 * tell the caller to stop descending the tree -- the user doesn't
1870 * want to match against lower level tree elements.
1872 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1873 retval |= DM_RET_STOP;
1875 return(retval);
1878 static dev_match_ret
1879 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1880 struct cam_ed *device)
1882 dev_match_ret retval;
1883 int i;
1885 retval = DM_RET_NONE;
1888 * If we aren't given something to match against, that's an error.
1890 if (device == NULL)
1891 return(DM_RET_ERROR);
1894 * If there are no match entries, then this device matches no
1895 * matter what.
1897 if ((patterns == NULL) || (num_patterns == 0))
1898 return(DM_RET_DESCEND | DM_RET_COPY);
1900 for (i = 0; i < num_patterns; i++) {
1901 struct device_match_pattern *cur_pattern;
1904 * If the pattern in question isn't for a device node, we
1905 * aren't interested.
1907 if (patterns[i].type != DEV_MATCH_DEVICE) {
1908 if ((patterns[i].type == DEV_MATCH_PERIPH)
1909 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1910 retval |= DM_RET_DESCEND;
1911 continue;
1914 cur_pattern = &patterns[i].pattern.device_pattern;
1917 * If they want to match any device node, we give them any
1918 * device node.
1920 if (cur_pattern->flags == DEV_MATCH_ANY) {
1921 /* set the copy flag */
1922 retval |= DM_RET_COPY;
1926 * If we've already decided on an action, go ahead
1927 * and return.
1929 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1930 return(retval);
1934 * Not sure why someone would do this...
1936 if (cur_pattern->flags == DEV_MATCH_NONE)
1937 continue;
1939 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1940 && (cur_pattern->path_id != device->target->bus->path_id))
1941 continue;
1943 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1944 && (cur_pattern->target_id != device->target->target_id))
1945 continue;
1947 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1948 && (cur_pattern->target_lun != device->lun_id))
1949 continue;
1951 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1952 && (cam_quirkmatch((caddr_t)&device->inq_data,
1953 (caddr_t)&cur_pattern->inq_pat,
1954 1, sizeof(cur_pattern->inq_pat),
1955 scsi_static_inquiry_match) == NULL))
1956 continue;
1959 * If we get to this point, the user definitely wants
1960 * information on this device. So tell the caller to copy
1961 * the data out.
1963 retval |= DM_RET_COPY;
1966 * If the return action has been set to descend, then we
1967 * know that we've already seen a peripheral matching
1968 * expression, therefore we need to further descend the tree.
1969 * This won't change by continuing around the loop, so we
1970 * go ahead and return. If we haven't seen a peripheral
1971 * matching expression, we keep going around the loop until
1972 * we exhaust the matching expressions. We'll set the stop
1973 * flag once we fall out of the loop.
1975 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1976 return(retval);
1980 * If the return action hasn't been set to descend yet, that means
1981 * we haven't seen any peripheral matching patterns. So tell the
1982 * caller to stop descending the tree -- the user doesn't want to
1983 * match against lower level tree elements.
1985 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1986 retval |= DM_RET_STOP;
1988 return(retval);
1992 * Match a single peripheral against any number of match patterns.
1994 static dev_match_ret
1995 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1996 struct cam_periph *periph)
1998 dev_match_ret retval;
1999 int i;
2002 * If we aren't given something to match against, that's an error.
2004 if (periph == NULL)
2005 return(DM_RET_ERROR);
2008 * If there are no match entries, then this peripheral matches no
2009 * matter what.
2011 if ((patterns == NULL) || (num_patterns == 0))
2012 return(DM_RET_STOP | DM_RET_COPY);
2015 * There aren't any nodes below a peripheral node, so there's no
2016 * reason to descend the tree any further.
2018 retval = DM_RET_STOP;
2020 for (i = 0; i < num_patterns; i++) {
2021 struct periph_match_pattern *cur_pattern;
2024 * If the pattern in question isn't for a peripheral, we
2025 * aren't interested.
2027 if (patterns[i].type != DEV_MATCH_PERIPH)
2028 continue;
2030 cur_pattern = &patterns[i].pattern.periph_pattern;
2033 * If they want to match on anything, then we will do so.
2035 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2036 /* set the copy flag */
2037 retval |= DM_RET_COPY;
2040 * We've already set the return action to stop,
2041 * since there are no nodes below peripherals in
2042 * the tree.
2044 return(retval);
2048 * Not sure why someone would do this...
2050 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2051 continue;
2053 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2054 && (cur_pattern->path_id != periph->path->bus->path_id))
2055 continue;
2058 * For the target and lun id's, we have to make sure the
2059 * target and lun pointers aren't NULL. The xpt peripheral
2060 * has a wildcard target and device.
2062 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2063 && ((periph->path->target == NULL)
2064 ||(cur_pattern->target_id != periph->path->target->target_id)))
2065 continue;
2067 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2068 && ((periph->path->device == NULL)
2069 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2070 continue;
2072 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2073 && (cur_pattern->unit_number != periph->unit_number))
2074 continue;
2076 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2077 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2078 DEV_IDLEN) != 0))
2079 continue;
2082 * If we get to this point, the user definitely wants
2083 * information on this peripheral. So tell the caller to
2084 * copy the data out.
2086 retval |= DM_RET_COPY;
2089 * The return action has already been set to stop, since
2090 * peripherals don't have any nodes below them in the EDT.
2092 return(retval);
2096 * If we get to this point, the peripheral that was passed in
2097 * doesn't match any of the patterns.
2099 return(retval);
2102 static int
2103 xptedtbusfunc(struct cam_eb *bus, void *arg)
2105 struct ccb_dev_match *cdm;
2106 dev_match_ret retval;
2108 cdm = (struct ccb_dev_match *)arg;
2111 * If our position is for something deeper in the tree, that means
2112 * that we've already seen this node. So, we keep going down.
2114 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2115 && (cdm->pos.cookie.bus == bus)
2116 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2117 && (cdm->pos.cookie.target != NULL))
2118 retval = DM_RET_DESCEND;
2119 else
2120 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2123 * If we got an error, bail out of the search.
2125 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2126 cdm->status = CAM_DEV_MATCH_ERROR;
2127 return(0);
2131 * If the copy flag is set, copy this bus out.
2133 if (retval & DM_RET_COPY) {
2134 int spaceleft, j;
2136 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2137 sizeof(struct dev_match_result));
2140 * If we don't have enough space to put in another
2141 * match result, save our position and tell the
2142 * user there are more devices to check.
2144 if (spaceleft < sizeof(struct dev_match_result)) {
2145 bzero(&cdm->pos, sizeof(cdm->pos));
2146 cdm->pos.position_type =
2147 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2149 cdm->pos.cookie.bus = bus;
2150 cdm->pos.generations[CAM_BUS_GENERATION]=
2151 xsoftc.bus_generation;
2152 cdm->status = CAM_DEV_MATCH_MORE;
2153 return(0);
2155 j = cdm->num_matches;
2156 cdm->num_matches++;
2157 cdm->matches[j].type = DEV_MATCH_BUS;
2158 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2159 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2160 cdm->matches[j].result.bus_result.unit_number =
2161 bus->sim->unit_number;
2162 strncpy(cdm->matches[j].result.bus_result.dev_name,
2163 bus->sim->sim_name, DEV_IDLEN);
2167 * If the user is only interested in busses, there's no
2168 * reason to descend to the next level in the tree.
2170 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2171 return(1);
2174 * If there is a target generation recorded, check it to
2175 * make sure the target list hasn't changed.
2177 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2178 && (bus == cdm->pos.cookie.bus)
2179 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2180 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2181 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2182 bus->generation)) {
2183 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2184 return(0);
2187 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2188 && (cdm->pos.cookie.bus == bus)
2189 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2190 && (cdm->pos.cookie.target != NULL))
2191 return(xpttargettraverse(bus,
2192 (struct cam_et *)cdm->pos.cookie.target,
2193 xptedttargetfunc, arg));
2194 else
2195 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2198 static int
2199 xptedttargetfunc(struct cam_et *target, void *arg)
2201 struct ccb_dev_match *cdm;
2203 cdm = (struct ccb_dev_match *)arg;
2206 * If there is a device list generation recorded, check it to
2207 * make sure the device list hasn't changed.
2209 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2210 && (cdm->pos.cookie.bus == target->bus)
2211 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2212 && (cdm->pos.cookie.target == target)
2213 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2214 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2215 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2216 target->generation)) {
2217 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2218 return(0);
2221 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2222 && (cdm->pos.cookie.bus == target->bus)
2223 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2224 && (cdm->pos.cookie.target == target)
2225 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2226 && (cdm->pos.cookie.device != NULL))
2227 return(xptdevicetraverse(target,
2228 (struct cam_ed *)cdm->pos.cookie.device,
2229 xptedtdevicefunc, arg));
2230 else
2231 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2234 static int
2235 xptedtdevicefunc(struct cam_ed *device, void *arg)
2238 struct ccb_dev_match *cdm;
2239 dev_match_ret retval;
2241 cdm = (struct ccb_dev_match *)arg;
2244 * If our position is for something deeper in the tree, that means
2245 * that we've already seen this node. So, we keep going down.
2247 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2248 && (cdm->pos.cookie.device == device)
2249 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2250 && (cdm->pos.cookie.periph != NULL))
2251 retval = DM_RET_DESCEND;
2252 else
2253 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2254 device);
2256 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2257 cdm->status = CAM_DEV_MATCH_ERROR;
2258 return(0);
2262 * If the copy flag is set, copy this device out.
2264 if (retval & DM_RET_COPY) {
2265 int spaceleft, j;
2267 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2268 sizeof(struct dev_match_result));
2271 * If we don't have enough space to put in another
2272 * match result, save our position and tell the
2273 * user there are more devices to check.
2275 if (spaceleft < sizeof(struct dev_match_result)) {
2276 bzero(&cdm->pos, sizeof(cdm->pos));
2277 cdm->pos.position_type =
2278 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2279 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2281 cdm->pos.cookie.bus = device->target->bus;
2282 cdm->pos.generations[CAM_BUS_GENERATION]=
2283 xsoftc.bus_generation;
2284 cdm->pos.cookie.target = device->target;
2285 cdm->pos.generations[CAM_TARGET_GENERATION] =
2286 device->target->bus->generation;
2287 cdm->pos.cookie.device = device;
2288 cdm->pos.generations[CAM_DEV_GENERATION] =
2289 device->target->generation;
2290 cdm->status = CAM_DEV_MATCH_MORE;
2291 return(0);
2293 j = cdm->num_matches;
2294 cdm->num_matches++;
2295 cdm->matches[j].type = DEV_MATCH_DEVICE;
2296 cdm->matches[j].result.device_result.path_id =
2297 device->target->bus->path_id;
2298 cdm->matches[j].result.device_result.target_id =
2299 device->target->target_id;
2300 cdm->matches[j].result.device_result.target_lun =
2301 device->lun_id;
2302 bcopy(&device->inq_data,
2303 &cdm->matches[j].result.device_result.inq_data,
2304 sizeof(struct scsi_inquiry_data));
2306 /* Let the user know whether this device is unconfigured */
2307 if (device->flags & CAM_DEV_UNCONFIGURED)
2308 cdm->matches[j].result.device_result.flags =
2309 DEV_RESULT_UNCONFIGURED;
2310 else
2311 cdm->matches[j].result.device_result.flags =
2312 DEV_RESULT_NOFLAG;
2316 * If the user isn't interested in peripherals, don't descend
2317 * the tree any further.
2319 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2320 return(1);
2323 * If there is a peripheral list generation recorded, make sure
2324 * it hasn't changed.
2326 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2327 && (device->target->bus == cdm->pos.cookie.bus)
2328 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2329 && (device->target == cdm->pos.cookie.target)
2330 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2331 && (device == cdm->pos.cookie.device)
2332 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2333 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2334 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2335 device->generation)){
2336 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2337 return(0);
2340 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2341 && (cdm->pos.cookie.bus == device->target->bus)
2342 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2343 && (cdm->pos.cookie.target == device->target)
2344 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2345 && (cdm->pos.cookie.device == device)
2346 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2347 && (cdm->pos.cookie.periph != NULL))
2348 return(xptperiphtraverse(device,
2349 (struct cam_periph *)cdm->pos.cookie.periph,
2350 xptedtperiphfunc, arg));
2351 else
2352 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2355 static int
2356 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2358 struct ccb_dev_match *cdm;
2359 dev_match_ret retval;
2361 cdm = (struct ccb_dev_match *)arg;
2363 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2365 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2366 cdm->status = CAM_DEV_MATCH_ERROR;
2367 return(0);
2371 * If the copy flag is set, copy this peripheral out.
2373 if (retval & DM_RET_COPY) {
2374 int spaceleft, j;
2376 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2377 sizeof(struct dev_match_result));
2380 * If we don't have enough space to put in another
2381 * match result, save our position and tell the
2382 * user there are more devices to check.
2384 if (spaceleft < sizeof(struct dev_match_result)) {
2385 bzero(&cdm->pos, sizeof(cdm->pos));
2386 cdm->pos.position_type =
2387 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2388 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2389 CAM_DEV_POS_PERIPH;
2391 cdm->pos.cookie.bus = periph->path->bus;
2392 cdm->pos.generations[CAM_BUS_GENERATION]=
2393 xsoftc.bus_generation;
2394 cdm->pos.cookie.target = periph->path->target;
2395 cdm->pos.generations[CAM_TARGET_GENERATION] =
2396 periph->path->bus->generation;
2397 cdm->pos.cookie.device = periph->path->device;
2398 cdm->pos.generations[CAM_DEV_GENERATION] =
2399 periph->path->target->generation;
2400 cdm->pos.cookie.periph = periph;
2401 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2402 periph->path->device->generation;
2403 cdm->status = CAM_DEV_MATCH_MORE;
2404 return(0);
2407 j = cdm->num_matches;
2408 cdm->num_matches++;
2409 cdm->matches[j].type = DEV_MATCH_PERIPH;
2410 cdm->matches[j].result.periph_result.path_id =
2411 periph->path->bus->path_id;
2412 cdm->matches[j].result.periph_result.target_id =
2413 periph->path->target->target_id;
2414 cdm->matches[j].result.periph_result.target_lun =
2415 periph->path->device->lun_id;
2416 cdm->matches[j].result.periph_result.unit_number =
2417 periph->unit_number;
2418 strncpy(cdm->matches[j].result.periph_result.periph_name,
2419 periph->periph_name, DEV_IDLEN);
2422 return(1);
2425 static int
2426 xptedtmatch(struct ccb_dev_match *cdm)
2428 int ret;
2430 cdm->num_matches = 0;
2433 * Check the bus list generation. If it has changed, the user
2434 * needs to reset everything and start over.
2436 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2437 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2438 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
2439 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2440 return(0);
2443 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2444 && (cdm->pos.cookie.bus != NULL))
2445 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2446 xptedtbusfunc, cdm);
2447 else
2448 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2451 * If we get back 0, that means that we had to stop before fully
2452 * traversing the EDT. It also means that one of the subroutines
2453 * has set the status field to the proper value. If we get back 1,
2454 * we've fully traversed the EDT and copied out any matching entries.
2456 if (ret == 1)
2457 cdm->status = CAM_DEV_MATCH_LAST;
2459 return(ret);
2462 static int
2463 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2465 struct ccb_dev_match *cdm;
2467 cdm = (struct ccb_dev_match *)arg;
2469 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2470 && (cdm->pos.cookie.pdrv == pdrv)
2471 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2472 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2473 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2474 (*pdrv)->generation)) {
2475 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2476 return(0);
2479 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2480 && (cdm->pos.cookie.pdrv == pdrv)
2481 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2482 && (cdm->pos.cookie.periph != NULL))
2483 return(xptpdperiphtraverse(pdrv,
2484 (struct cam_periph *)cdm->pos.cookie.periph,
2485 xptplistperiphfunc, arg));
2486 else
2487 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2490 static int
2491 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2493 struct ccb_dev_match *cdm;
2494 dev_match_ret retval;
2496 cdm = (struct ccb_dev_match *)arg;
2498 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2500 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2501 cdm->status = CAM_DEV_MATCH_ERROR;
2502 return(0);
2506 * If the copy flag is set, copy this peripheral out.
2508 if (retval & DM_RET_COPY) {
2509 int spaceleft, j;
2511 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2512 sizeof(struct dev_match_result));
2515 * If we don't have enough space to put in another
2516 * match result, save our position and tell the
2517 * user there are more devices to check.
2519 if (spaceleft < sizeof(struct dev_match_result)) {
2520 struct periph_driver **pdrv;
2522 pdrv = NULL;
2523 bzero(&cdm->pos, sizeof(cdm->pos));
2524 cdm->pos.position_type =
2525 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2526 CAM_DEV_POS_PERIPH;
2529 * This may look a bit non-sensical, but it is
2530 * actually quite logical. There are very few
2531 * peripheral drivers, and bloating every peripheral
2532 * structure with a pointer back to its parent
2533 * peripheral driver linker set entry would cost
2534 * more in the long run than doing this quick lookup.
2536 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2537 if (strcmp((*pdrv)->driver_name,
2538 periph->periph_name) == 0)
2539 break;
2542 if (*pdrv == NULL) {
2543 cdm->status = CAM_DEV_MATCH_ERROR;
2544 return(0);
2547 cdm->pos.cookie.pdrv = pdrv;
2549 * The periph generation slot does double duty, as
2550 * does the periph pointer slot. They are used for
2551 * both edt and pdrv lookups and positioning.
2553 cdm->pos.cookie.periph = periph;
2554 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2555 (*pdrv)->generation;
2556 cdm->status = CAM_DEV_MATCH_MORE;
2557 return(0);
2560 j = cdm->num_matches;
2561 cdm->num_matches++;
2562 cdm->matches[j].type = DEV_MATCH_PERIPH;
2563 cdm->matches[j].result.periph_result.path_id =
2564 periph->path->bus->path_id;
2567 * The transport layer peripheral doesn't have a target or
2568 * lun.
2570 if (periph->path->target)
2571 cdm->matches[j].result.periph_result.target_id =
2572 periph->path->target->target_id;
2573 else
2574 cdm->matches[j].result.periph_result.target_id = -1;
2576 if (periph->path->device)
2577 cdm->matches[j].result.periph_result.target_lun =
2578 periph->path->device->lun_id;
2579 else
2580 cdm->matches[j].result.periph_result.target_lun = -1;
2582 cdm->matches[j].result.periph_result.unit_number =
2583 periph->unit_number;
2584 strncpy(cdm->matches[j].result.periph_result.periph_name,
2585 periph->periph_name, DEV_IDLEN);
2588 return(1);
2591 static int
2592 xptperiphlistmatch(struct ccb_dev_match *cdm)
2594 int ret;
2596 cdm->num_matches = 0;
2599 * At this point in the edt traversal function, we check the bus
2600 * list generation to make sure that no busses have been added or
2601 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2602 * For the peripheral driver list traversal function, however, we
2603 * don't have to worry about new peripheral driver types coming or
2604 * going; they're in a linker set, and therefore can't change
2605 * without a recompile.
2608 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2609 && (cdm->pos.cookie.pdrv != NULL))
2610 ret = xptpdrvtraverse(
2611 (struct periph_driver **)cdm->pos.cookie.pdrv,
2612 xptplistpdrvfunc, cdm);
2613 else
2614 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2617 * If we get back 0, that means that we had to stop before fully
2618 * traversing the peripheral driver tree. It also means that one of
2619 * the subroutines has set the status field to the proper value. If
2620 * we get back 1, we've fully traversed the EDT and copied out any
2621 * matching entries.
2623 if (ret == 1)
2624 cdm->status = CAM_DEV_MATCH_LAST;
2626 return(ret);
2629 static int
2630 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2632 struct cam_eb *bus, *next_bus;
2633 int retval;
2635 retval = 1;
2637 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
2638 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2639 bus != NULL;
2640 bus = next_bus) {
2641 next_bus = TAILQ_NEXT(bus, links);
2643 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
2644 CAM_SIM_LOCK(bus->sim);
2645 retval = tr_func(bus, arg);
2646 CAM_SIM_UNLOCK(bus->sim);
2647 if (retval == 0)
2648 return(retval);
2649 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
2651 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
2653 return(retval);
2656 static int
2657 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2658 xpt_targetfunc_t *tr_func, void *arg)
2660 struct cam_et *target, *next_target;
2661 int retval;
2663 retval = 1;
2664 for (target = (start_target ? start_target :
2665 TAILQ_FIRST(&bus->et_entries));
2666 target != NULL; target = next_target) {
2668 next_target = TAILQ_NEXT(target, links);
2670 retval = tr_func(target, arg);
2672 if (retval == 0)
2673 return(retval);
2676 return(retval);
2679 static int
2680 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2681 xpt_devicefunc_t *tr_func, void *arg)
2683 struct cam_ed *device, *next_device;
2684 int retval;
2686 retval = 1;
2687 for (device = (start_device ? start_device :
2688 TAILQ_FIRST(&target->ed_entries));
2689 device != NULL;
2690 device = next_device) {
2692 next_device = TAILQ_NEXT(device, links);
2694 retval = tr_func(device, arg);
2696 if (retval == 0)
2697 return(retval);
2700 return(retval);
2703 static int
2704 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2705 xpt_periphfunc_t *tr_func, void *arg)
2707 struct cam_periph *periph, *next_periph;
2708 int retval;
2710 retval = 1;
2712 for (periph = (start_periph ? start_periph :
2713 SLIST_FIRST(&device->periphs));
2714 periph != NULL;
2715 periph = next_periph) {
2717 next_periph = SLIST_NEXT(periph, periph_links);
2719 retval = tr_func(periph, arg);
2720 if (retval == 0)
2721 return(retval);
2724 return(retval);
2727 static int
2728 xptpdrvtraverse(struct periph_driver **start_pdrv,
2729 xpt_pdrvfunc_t *tr_func, void *arg)
2731 struct periph_driver **pdrv;
2732 int retval;
2734 retval = 1;
2737 * We don't traverse the peripheral driver list like we do the
2738 * other lists, because it is a linker set, and therefore cannot be
2739 * changed during runtime. If the peripheral driver list is ever
2740 * re-done to be something other than a linker set (i.e. it can
2741 * change while the system is running), the list traversal should
2742 * be modified to work like the other traversal functions.
2744 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2745 *pdrv != NULL; pdrv++) {
2746 retval = tr_func(pdrv, arg);
2748 if (retval == 0)
2749 return(retval);
2752 return(retval);
2755 static int
2756 xptpdperiphtraverse(struct periph_driver **pdrv,
2757 struct cam_periph *start_periph,
2758 xpt_periphfunc_t *tr_func, void *arg)
2760 struct cam_periph *periph, *next_periph;
2761 int retval;
2763 retval = 1;
2765 for (periph = (start_periph ? start_periph :
2766 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2767 periph = next_periph) {
2769 next_periph = TAILQ_NEXT(periph, unit_links);
2771 retval = tr_func(periph, arg);
2772 if (retval == 0)
2773 return(retval);
2775 return(retval);
2778 static int
2779 xptdefbusfunc(struct cam_eb *bus, void *arg)
2781 struct xpt_traverse_config *tr_config;
2783 tr_config = (struct xpt_traverse_config *)arg;
2785 if (tr_config->depth == XPT_DEPTH_BUS) {
2786 xpt_busfunc_t *tr_func;
2788 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2790 return(tr_func(bus, tr_config->tr_arg));
2791 } else
2792 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2795 static int
2796 xptdeftargetfunc(struct cam_et *target, void *arg)
2798 struct xpt_traverse_config *tr_config;
2800 tr_config = (struct xpt_traverse_config *)arg;
2802 if (tr_config->depth == XPT_DEPTH_TARGET) {
2803 xpt_targetfunc_t *tr_func;
2805 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2807 return(tr_func(target, tr_config->tr_arg));
2808 } else
2809 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2812 static int
2813 xptdefdevicefunc(struct cam_ed *device, void *arg)
2815 struct xpt_traverse_config *tr_config;
2817 tr_config = (struct xpt_traverse_config *)arg;
2819 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2820 xpt_devicefunc_t *tr_func;
2822 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2824 return(tr_func(device, tr_config->tr_arg));
2825 } else
2826 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2829 static int
2830 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2832 struct xpt_traverse_config *tr_config;
2833 xpt_periphfunc_t *tr_func;
2835 tr_config = (struct xpt_traverse_config *)arg;
2837 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2840 * Unlike the other default functions, we don't check for depth
2841 * here. The peripheral driver level is the last level in the EDT,
2842 * so if we're here, we should execute the function in question.
2844 return(tr_func(periph, tr_config->tr_arg));
2848 * Execute the given function for every bus in the EDT.
2850 static int
2851 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2853 struct xpt_traverse_config tr_config;
2855 tr_config.depth = XPT_DEPTH_BUS;
2856 tr_config.tr_func = tr_func;
2857 tr_config.tr_arg = arg;
2859 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2863 * Execute the given function for every device in the EDT.
2865 static int
2866 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2868 struct xpt_traverse_config tr_config;
2870 tr_config.depth = XPT_DEPTH_DEVICE;
2871 tr_config.tr_func = tr_func;
2872 tr_config.tr_arg = arg;
2874 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2877 static int
2878 xptsetasyncfunc(struct cam_ed *device, void *arg)
2880 struct cam_path path;
2881 struct ccb_getdev *cgd;
2882 struct async_node *cur_entry;
2884 cur_entry = (struct async_node *)arg;
2887 * Don't report unconfigured devices (Wildcard devs,
2888 * devices only for target mode, device instances
2889 * that have been invalidated but are waiting for
2890 * their last reference count to be released).
2892 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2893 return (1);
2895 xpt_compile_path(&path,
2896 NULL,
2897 device->target->bus->path_id,
2898 device->target->target_id,
2899 device->lun_id);
2901 cgd = &xpt_alloc_ccb()->cgd;
2902 xpt_setup_ccb(&cgd->ccb_h, &path, /*priority*/1);
2903 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2904 xpt_action((union ccb *)cgd);
2905 cur_entry->callback(cur_entry->callback_arg,
2906 AC_FOUND_DEVICE,
2907 &path, cgd);
2908 xpt_release_path(&path);
2909 xpt_free_ccb(&cgd->ccb_h);
2911 return(1);
2914 static int
2915 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2917 struct cam_path path;
2918 struct ccb_pathinq *cpi;
2919 struct async_node *cur_entry;
2921 cur_entry = (struct async_node *)arg;
2923 xpt_compile_path(&path, /*periph*/NULL,
2924 bus->sim->path_id,
2925 CAM_TARGET_WILDCARD,
2926 CAM_LUN_WILDCARD);
2927 cpi = &xpt_alloc_ccb()->cpi;
2928 xpt_setup_ccb(&cpi->ccb_h, &path, /*priority*/1);
2929 cpi->ccb_h.func_code = XPT_PATH_INQ;
2930 xpt_action((union ccb *)cpi);
2931 cur_entry->callback(cur_entry->callback_arg,
2932 AC_PATH_REGISTERED,
2933 &path, cpi);
2934 xpt_release_path(&path);
2935 xpt_free_ccb(&cpi->ccb_h);
2937 return(1);
2940 static void
2941 xpt_action_sasync_cb(void *context, int pending)
2943 struct async_node *cur_entry;
2944 struct xpt_task *task;
2945 uint32_t added;
2947 task = (struct xpt_task *)context;
2948 cur_entry = (struct async_node *)task->data1;
2949 added = task->data2;
2951 if ((added & AC_FOUND_DEVICE) != 0) {
2953 * Get this peripheral up to date with all
2954 * the currently existing devices.
2956 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2958 if ((added & AC_PATH_REGISTERED) != 0) {
2960 * Get this peripheral up to date with all
2961 * the currently existing busses.
2963 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2965 kfree(task, M_CAMXPT);
2968 void
2969 xpt_action(union ccb *start_ccb)
2971 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2973 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2975 switch (start_ccb->ccb_h.func_code) {
2976 case XPT_SCSI_IO:
2977 case XPT_TRIM:
2979 struct cam_ed *device;
2980 #ifdef CAMDEBUG
2981 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2982 struct cam_path *path;
2984 path = start_ccb->ccb_h.path;
2985 #endif
2988 * For the sake of compatibility with SCSI-1
2989 * devices that may not understand the identify
2990 * message, we include lun information in the
2991 * second byte of all commands. SCSI-1 specifies
2992 * that luns are a 3 bit value and reserves only 3
2993 * bits for lun information in the CDB. Later
2994 * revisions of the SCSI spec allow for more than 8
2995 * luns, but have deprecated lun information in the
2996 * CDB. So, if the lun won't fit, we must omit.
2998 * Also be aware that during initial probing for devices,
2999 * the inquiry information is unknown but initialized to 0.
3000 * This means that this code will be exercised while probing
3001 * devices with an ANSI revision greater than 2.
3003 device = start_ccb->ccb_h.path->device;
3004 if (device->protocol_version <= SCSI_REV_2
3005 && start_ccb->ccb_h.target_lun < 8
3006 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3008 start_ccb->csio.cdb_io.cdb_bytes[1] |=
3009 start_ccb->ccb_h.target_lun << 5;
3011 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3012 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3013 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3014 &path->device->inq_data),
3015 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3016 cdb_str, sizeof(cdb_str))));
3017 /* FALLTHROUGH */
3019 case XPT_TARGET_IO:
3020 case XPT_CONT_TARGET_IO:
3021 start_ccb->csio.sense_resid = 0;
3022 start_ccb->csio.resid = 0;
3023 /* FALLTHROUGH */
3024 case XPT_RESET_DEV:
3025 case XPT_ENG_EXEC:
3027 struct cam_path *path;
3028 struct cam_sim *sim;
3029 int runq;
3031 path = start_ccb->ccb_h.path;
3033 sim = path->bus->sim;
3034 if (sim == &cam_dead_sim) {
3035 /* The SIM has gone; just execute the CCB directly. */
3036 cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3037 (*(sim->sim_action))(sim, start_ccb);
3038 break;
3041 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3042 if (path->device->qfrozen_cnt == 0)
3043 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3044 else
3045 runq = 0;
3046 if (runq != 0)
3047 xpt_run_dev_sendq(path->bus);
3048 break;
3050 case XPT_SET_TRAN_SETTINGS:
3052 xpt_set_transfer_settings(&start_ccb->cts,
3053 start_ccb->ccb_h.path->device,
3054 /*async_update*/FALSE);
3055 break;
3057 case XPT_CALC_GEOMETRY:
3059 struct cam_sim *sim;
3061 /* Filter out garbage */
3062 if (start_ccb->ccg.block_size == 0
3063 || start_ccb->ccg.volume_size == 0) {
3064 start_ccb->ccg.cylinders = 0;
3065 start_ccb->ccg.heads = 0;
3066 start_ccb->ccg.secs_per_track = 0;
3067 start_ccb->ccb_h.status = CAM_REQ_CMP;
3068 break;
3070 sim = start_ccb->ccb_h.path->bus->sim;
3071 (*(sim->sim_action))(sim, start_ccb);
3072 break;
3074 case XPT_ABORT:
3076 union ccb* abort_ccb;
3078 abort_ccb = start_ccb->cab.abort_ccb;
3079 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3081 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3082 struct cam_ccbq *ccbq;
3084 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3085 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3086 abort_ccb->ccb_h.status =
3087 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3088 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3089 xpt_done(abort_ccb);
3090 start_ccb->ccb_h.status = CAM_REQ_CMP;
3091 break;
3093 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3094 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3096 * We've caught this ccb en route to
3097 * the SIM. Flag it for abort and the
3098 * SIM will do so just before starting
3099 * real work on the CCB.
3101 abort_ccb->ccb_h.status =
3102 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3103 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3104 start_ccb->ccb_h.status = CAM_REQ_CMP;
3105 break;
3108 if (XPT_FC_IS_QUEUED(abort_ccb)
3109 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3111 * It's already completed but waiting
3112 * for our SWI to get to it.
3114 start_ccb->ccb_h.status = CAM_UA_ABORT;
3115 break;
3118 * If we weren't able to take care of the abort request
3119 * in the XPT, pass the request down to the SIM for processing.
3121 /* FALLTHROUGH */
3123 case XPT_ACCEPT_TARGET_IO:
3124 case XPT_EN_LUN:
3125 case XPT_IMMED_NOTIFY:
3126 case XPT_NOTIFY_ACK:
3127 case XPT_GET_TRAN_SETTINGS:
3128 case XPT_RESET_BUS:
3130 struct cam_sim *sim;
3132 sim = start_ccb->ccb_h.path->bus->sim;
3133 (*(sim->sim_action))(sim, start_ccb);
3134 break;
3136 case XPT_PATH_INQ:
3138 struct cam_sim *sim;
3140 sim = start_ccb->ccb_h.path->bus->sim;
3141 (*(sim->sim_action))(sim, start_ccb);
3142 break;
3144 case XPT_PATH_STATS:
3145 start_ccb->cpis.last_reset =
3146 start_ccb->ccb_h.path->bus->last_reset;
3147 start_ccb->ccb_h.status = CAM_REQ_CMP;
3148 break;
3149 case XPT_GDEV_TYPE:
3151 struct cam_ed *dev;
3153 dev = start_ccb->ccb_h.path->device;
3154 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3155 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3156 } else {
3157 struct ccb_getdev *cgd;
3159 cgd = &start_ccb->cgd;
3160 cgd->inq_data = dev->inq_data;
3161 cgd->ccb_h.status = CAM_REQ_CMP;
3162 cgd->serial_num_len = dev->serial_num_len;
3163 if ((dev->serial_num_len > 0)
3164 && (dev->serial_num != NULL))
3165 bcopy(dev->serial_num, cgd->serial_num,
3166 dev->serial_num_len);
3168 break;
3170 case XPT_GDEV_STATS:
3172 struct cam_ed *dev;
3174 dev = start_ccb->ccb_h.path->device;
3175 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3176 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3177 } else {
3178 struct ccb_getdevstats *cgds;
3179 struct cam_eb *bus;
3180 struct cam_et *tar;
3182 cgds = &start_ccb->cgds;
3183 bus = cgds->ccb_h.path->bus;
3184 tar = cgds->ccb_h.path->target;
3185 cgds->dev_openings = dev->ccbq.dev_openings;
3186 cgds->dev_active = dev->ccbq.dev_active;
3187 cgds->devq_openings = dev->ccbq.devq_openings;
3188 cgds->devq_queued = dev->ccbq.queue.entries;
3189 cgds->held = dev->ccbq.held;
3190 cgds->last_reset = tar->last_reset;
3191 cgds->maxtags = dev->quirk->maxtags;
3192 cgds->mintags = dev->quirk->mintags;
3193 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3194 cgds->last_reset = bus->last_reset;
3195 cgds->ccb_h.status = CAM_REQ_CMP;
3197 break;
3199 case XPT_GDEVLIST:
3201 struct cam_periph *nperiph;
3202 struct periph_list *periph_head;
3203 struct ccb_getdevlist *cgdl;
3204 u_int i;
3205 struct cam_ed *device;
3206 int found;
3209 found = 0;
3212 * Don't want anyone mucking with our data.
3214 device = start_ccb->ccb_h.path->device;
3215 periph_head = &device->periphs;
3216 cgdl = &start_ccb->cgdl;
3219 * Check and see if the list has changed since the user
3220 * last requested a list member. If so, tell them that the
3221 * list has changed, and therefore they need to start over
3222 * from the beginning.
3224 if ((cgdl->index != 0) &&
3225 (cgdl->generation != device->generation)) {
3226 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3227 break;
3231 * Traverse the list of peripherals and attempt to find
3232 * the requested peripheral.
3234 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3235 (nperiph != NULL) && (i <= cgdl->index);
3236 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3237 if (i == cgdl->index) {
3238 strncpy(cgdl->periph_name,
3239 nperiph->periph_name,
3240 DEV_IDLEN);
3241 cgdl->unit_number = nperiph->unit_number;
3242 found = 1;
3245 if (found == 0) {
3246 cgdl->status = CAM_GDEVLIST_ERROR;
3247 break;
3250 if (nperiph == NULL)
3251 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3252 else
3253 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3255 cgdl->index++;
3256 cgdl->generation = device->generation;
3258 cgdl->ccb_h.status = CAM_REQ_CMP;
3259 break;
3261 case XPT_DEV_MATCH:
3263 dev_pos_type position_type;
3264 struct ccb_dev_match *cdm;
3265 int ret;
3267 cdm = &start_ccb->cdm;
3270 * There are two ways of getting at information in the EDT.
3271 * The first way is via the primary EDT tree. It starts
3272 * with a list of busses, then a list of targets on a bus,
3273 * then devices/luns on a target, and then peripherals on a
3274 * device/lun. The "other" way is by the peripheral driver
3275 * lists. The peripheral driver lists are organized by
3276 * peripheral driver. (obviously) So it makes sense to
3277 * use the peripheral driver list if the user is looking
3278 * for something like "da1", or all "da" devices. If the
3279 * user is looking for something on a particular bus/target
3280 * or lun, it's generally better to go through the EDT tree.
3283 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3284 position_type = cdm->pos.position_type;
3285 else {
3286 u_int i;
3288 position_type = CAM_DEV_POS_NONE;
3290 for (i = 0; i < cdm->num_patterns; i++) {
3291 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3292 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3293 position_type = CAM_DEV_POS_EDT;
3294 break;
3298 if (cdm->num_patterns == 0)
3299 position_type = CAM_DEV_POS_EDT;
3300 else if (position_type == CAM_DEV_POS_NONE)
3301 position_type = CAM_DEV_POS_PDRV;
3304 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3305 case CAM_DEV_POS_EDT:
3306 ret = xptedtmatch(cdm);
3307 break;
3308 case CAM_DEV_POS_PDRV:
3309 ret = xptperiphlistmatch(cdm);
3310 break;
3311 default:
3312 cdm->status = CAM_DEV_MATCH_ERROR;
3313 break;
3316 if (cdm->status == CAM_DEV_MATCH_ERROR)
3317 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3318 else
3319 start_ccb->ccb_h.status = CAM_REQ_CMP;
3321 break;
3323 case XPT_SASYNC_CB:
3325 struct ccb_setasync *csa;
3326 struct async_node *cur_entry;
3327 struct async_list *async_head;
3328 u_int32_t added;
3330 csa = &start_ccb->csa;
3331 added = csa->event_enable;
3332 async_head = &csa->ccb_h.path->device->asyncs;
3335 * If there is already an entry for us, simply
3336 * update it.
3338 cur_entry = SLIST_FIRST(async_head);
3339 while (cur_entry != NULL) {
3340 if ((cur_entry->callback_arg == csa->callback_arg)
3341 && (cur_entry->callback == csa->callback))
3342 break;
3343 cur_entry = SLIST_NEXT(cur_entry, links);
3346 if (cur_entry != NULL) {
3348 * If the request has no flags set,
3349 * remove the entry.
3351 added &= ~cur_entry->event_enable;
3352 if (csa->event_enable == 0) {
3353 SLIST_REMOVE(async_head, cur_entry,
3354 async_node, links);
3355 atomic_add_int(
3356 &csa->ccb_h.path->device->refcount, -1);
3357 kfree(cur_entry, M_CAMXPT);
3358 } else {
3359 cur_entry->event_enable = csa->event_enable;
3361 } else {
3362 cur_entry = kmalloc(sizeof(*cur_entry), M_CAMXPT,
3363 M_INTWAIT);
3364 cur_entry->event_enable = csa->event_enable;
3365 cur_entry->callback_arg = csa->callback_arg;
3366 cur_entry->callback = csa->callback;
3367 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3368 atomic_add_int(&csa->ccb_h.path->device->refcount, 1);
3372 * Need to decouple this operation via a taskqueue so that
3373 * the locking doesn't become a mess.
3375 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3376 struct xpt_task *task;
3378 task = kmalloc(sizeof(struct xpt_task), M_CAMXPT,
3379 M_INTWAIT);
3381 TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3382 task->data1 = cur_entry;
3383 task->data2 = added;
3384 taskqueue_enqueue(taskqueue_thread[mycpuid],
3385 &task->task);
3388 start_ccb->ccb_h.status = CAM_REQ_CMP;
3389 break;
3391 case XPT_REL_SIMQ:
3393 struct ccb_relsim *crs;
3394 struct cam_ed *dev;
3396 crs = &start_ccb->crs;
3397 dev = crs->ccb_h.path->device;
3398 if (dev == NULL) {
3400 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3401 break;
3404 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3406 if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3407 /* Don't ever go below one opening */
3408 if (crs->openings > 0) {
3409 xpt_dev_ccbq_resize(crs->ccb_h.path,
3410 crs->openings);
3412 if (bootverbose) {
3413 xpt_print(crs->ccb_h.path,
3414 "tagged openings now %d\n",
3415 crs->openings);
3421 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3423 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3426 * Just extend the old timeout and decrement
3427 * the freeze count so that a single timeout
3428 * is sufficient for releasing the queue.
3430 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3431 callout_stop(&dev->callout);
3432 } else {
3434 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3437 callout_reset(&dev->callout,
3438 (crs->release_timeout * hz) / 1000,
3439 xpt_release_devq_timeout, dev);
3441 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3445 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3447 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3449 * Decrement the freeze count so that a single
3450 * completion is still sufficient to unfreeze
3451 * the queue.
3453 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3454 } else {
3456 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3457 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3461 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3463 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3464 || (dev->ccbq.dev_active == 0)) {
3466 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3467 } else {
3469 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3470 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3474 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3476 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3477 /*run_queue*/TRUE);
3479 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3480 start_ccb->ccb_h.status = CAM_REQ_CMP;
3481 break;
3483 case XPT_SCAN_BUS:
3484 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3485 break;
3486 case XPT_SCAN_LUN:
3487 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3488 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3489 start_ccb);
3490 break;
3491 case XPT_DEBUG: {
3492 #ifdef CAMDEBUG
3493 #ifdef CAM_DEBUG_DELAY
3494 cam_debug_delay = CAM_DEBUG_DELAY;
3495 #endif
3496 cam_dflags = start_ccb->cdbg.flags;
3497 if (cam_dpath != NULL) {
3498 xpt_free_path(cam_dpath);
3499 cam_dpath = NULL;
3502 if (cam_dflags != CAM_DEBUG_NONE) {
3503 if (xpt_create_path(&cam_dpath, xpt_periph,
3504 start_ccb->ccb_h.path_id,
3505 start_ccb->ccb_h.target_id,
3506 start_ccb->ccb_h.target_lun) !=
3507 CAM_REQ_CMP) {
3508 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3509 cam_dflags = CAM_DEBUG_NONE;
3510 } else {
3511 start_ccb->ccb_h.status = CAM_REQ_CMP;
3512 xpt_print(cam_dpath, "debugging flags now %x\n",
3513 cam_dflags);
3515 } else {
3516 cam_dpath = NULL;
3517 start_ccb->ccb_h.status = CAM_REQ_CMP;
3519 #else /* !CAMDEBUG */
3520 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3521 #endif /* CAMDEBUG */
3522 break;
3524 case XPT_NOOP:
3525 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3526 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3527 start_ccb->ccb_h.status = CAM_REQ_CMP;
3528 break;
3529 default:
3530 case XPT_SDEV_TYPE:
3531 case XPT_TERM_IO:
3532 case XPT_ENG_INQ:
3533 /* XXX Implement */
3534 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3535 break;
3539 void
3540 xpt_polled_action(union ccb *start_ccb)
3542 u_int32_t timeout;
3543 struct cam_sim *sim;
3544 struct cam_devq *devq;
3545 struct cam_ed *dev;
3547 timeout = start_ccb->ccb_h.timeout;
3548 sim = start_ccb->ccb_h.path->bus->sim;
3549 devq = sim->devq;
3550 dev = start_ccb->ccb_h.path->device;
3552 sim_lock_assert_owned(sim->lock);
3555 * Steal an opening so that no other queued requests
3556 * can get it before us while we simulate interrupts.
3558 dev->ccbq.devq_openings--;
3559 dev->ccbq.dev_openings--;
3561 while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3562 && (--timeout > 0)) {
3563 DELAY(1000);
3564 (*(sim->sim_poll))(sim);
3565 camisr_runqueue(sim);
3568 dev->ccbq.devq_openings++;
3569 dev->ccbq.dev_openings++;
3571 if (timeout != 0) {
3572 xpt_action(start_ccb);
3573 while(--timeout > 0) {
3574 (*(sim->sim_poll))(sim);
3575 camisr_runqueue(sim);
3576 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3577 != CAM_REQ_INPROG)
3578 break;
3579 DELAY(1000);
3581 if (timeout == 0) {
3583 * XXX Is it worth adding a sim_timeout entry
3584 * point so we can attempt recovery? If
3585 * this is only used for dumps, I don't think
3586 * it is.
3588 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3590 } else {
3591 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3596 * Schedule a peripheral driver to receive a ccb when it's
3597 * target device has space for more transactions.
3599 void
3600 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3602 struct cam_ed *device;
3603 union ccb *work_ccb;
3604 int runq;
3606 sim_lock_assert_owned(perph->sim->lock);
3608 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3609 device = perph->path->device;
3610 if (periph_is_queued(perph)) {
3611 /* Simply reorder based on new priority */
3612 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3613 (" change priority to %d\n", new_priority));
3614 if (new_priority < perph->pinfo.priority) {
3615 camq_change_priority(&device->drvq,
3616 perph->pinfo.index,
3617 new_priority);
3619 runq = 0;
3620 } else if (perph->path->bus->sim == &cam_dead_sim) {
3621 /* The SIM is gone so just call periph_start directly. */
3622 work_ccb = xpt_get_ccb(perph->path->device);
3623 if (work_ccb == NULL)
3624 return; /* XXX */
3625 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3626 perph->pinfo.priority = new_priority;
3627 perph->periph_start(perph, work_ccb);
3628 return;
3629 } else {
3630 /* New entry on the queue */
3631 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3632 (" added periph to queue\n"));
3633 perph->pinfo.priority = new_priority;
3634 perph->pinfo.generation = ++device->drvq.generation;
3635 camq_insert(&device->drvq, &perph->pinfo);
3636 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3638 if (runq != 0) {
3639 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3640 (" calling xpt_run_devq\n"));
3641 xpt_run_dev_allocq(perph->path->bus);
3647 * Schedule a device to run on a given queue.
3648 * If the device was inserted as a new entry on the queue,
3649 * return 1 meaning the device queue should be run. If we
3650 * were already queued, implying someone else has already
3651 * started the queue, return 0 so the caller doesn't attempt
3652 * to run the queue.
3654 static int
3655 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3656 u_int32_t new_priority)
3658 int retval;
3659 u_int32_t old_priority;
3661 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3663 old_priority = pinfo->priority;
3666 * Are we already queued?
3668 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3669 /* Simply reorder based on new priority */
3670 if (new_priority < old_priority) {
3671 camq_change_priority(queue, pinfo->index,
3672 new_priority);
3673 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3674 ("changed priority to %d\n",
3675 new_priority));
3677 retval = 0;
3678 } else {
3679 /* New entry on the queue */
3680 if (new_priority < old_priority)
3681 pinfo->priority = new_priority;
3683 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3684 ("Inserting onto queue\n"));
3685 pinfo->generation = ++queue->generation;
3686 camq_insert(queue, pinfo);
3687 retval = 1;
3689 return (retval);
3692 static void
3693 xpt_run_dev_allocq(struct cam_eb *bus)
3695 struct cam_devq *devq;
3697 if ((devq = bus->sim->devq) == NULL) {
3698 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3699 return;
3701 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3703 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3704 (" qfrozen_cnt == 0x%x, entries == %d, "
3705 "openings == %d, active == %d\n",
3706 devq->alloc_queue.qfrozen_cnt,
3707 devq->alloc_queue.entries,
3708 devq->alloc_openings,
3709 devq->alloc_active));
3711 devq->alloc_queue.qfrozen_cnt++;
3712 while ((devq->alloc_queue.entries > 0)
3713 && (devq->alloc_openings > 0)
3714 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3715 struct cam_ed_qinfo *qinfo;
3716 struct cam_ed *device;
3717 union ccb *work_ccb;
3718 struct cam_periph *drv;
3719 struct camq *drvq;
3721 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3722 CAMQ_HEAD);
3723 device = qinfo->device;
3725 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3726 ("running device %p\n", device));
3728 drvq = &device->drvq;
3730 #ifdef CAMDEBUG
3731 if (drvq->entries <= 0) {
3732 panic("xpt_run_dev_allocq: "
3733 "Device on queue without any work to do");
3735 #endif
3736 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3737 devq->alloc_openings--;
3738 devq->alloc_active++;
3739 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3740 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3741 drv->pinfo.priority);
3742 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3743 ("calling periph start\n"));
3744 drv->periph_start(drv, work_ccb);
3745 } else {
3747 * Malloc failure in alloc_ccb
3750 * XXX add us to a list to be run from free_ccb
3751 * if we don't have any ccbs active on this
3752 * device queue otherwise we may never get run
3753 * again.
3755 break;
3758 if (drvq->entries > 0) {
3759 /* We have more work. Attempt to reschedule */
3760 xpt_schedule_dev_allocq(bus, device);
3763 devq->alloc_queue.qfrozen_cnt--;
3766 static void
3767 xpt_run_dev_sendq(struct cam_eb *bus)
3769 struct cam_devq *devq;
3771 if ((devq = bus->sim->devq) == NULL) {
3772 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3773 return;
3775 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3777 devq->send_queue.qfrozen_cnt++;
3778 while ((devq->send_queue.entries > 0)
3779 && (devq->send_openings > 0)) {
3780 struct cam_ed_qinfo *qinfo;
3781 struct cam_ed *device;
3782 union ccb *work_ccb;
3783 struct cam_sim *sim;
3785 if (devq->send_queue.qfrozen_cnt > 1) {
3786 break;
3789 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3790 CAMQ_HEAD);
3791 device = qinfo->device;
3794 * If the device has been "frozen", don't attempt
3795 * to run it.
3797 if (device->qfrozen_cnt > 0) {
3798 continue;
3801 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3802 ("running device %p\n", device));
3804 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3805 if (work_ccb == NULL) {
3806 kprintf("device on run queue with no ccbs???\n");
3807 continue;
3810 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3812 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
3813 if (xsoftc.num_highpower <= 0) {
3815 * We got a high power command, but we
3816 * don't have any available slots. Freeze
3817 * the device queue until we have a slot
3818 * available.
3820 device->qfrozen_cnt++;
3821 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3822 &work_ccb->ccb_h,
3823 xpt_links.stqe);
3825 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
3826 continue;
3827 } else {
3829 * Consume a high power slot while
3830 * this ccb runs.
3832 xsoftc.num_highpower--;
3834 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
3836 devq->active_dev = device;
3837 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3839 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3841 devq->send_openings--;
3842 devq->send_active++;
3844 if (device->ccbq.queue.entries > 0)
3845 xpt_schedule_dev_sendq(bus, device);
3847 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3849 * The client wants to freeze the queue
3850 * after this CCB is sent.
3852 device->qfrozen_cnt++;
3855 /* In Target mode, the peripheral driver knows best... */
3856 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3857 if ((device->inq_flags & SID_CmdQue) != 0
3858 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3859 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3860 else
3862 * Clear this in case of a retried CCB that
3863 * failed due to a rejected tag.
3865 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3869 * Device queues can be shared among multiple sim instances
3870 * that reside on different busses. Use the SIM in the queue
3871 * CCB's path, rather than the one in the bus that was passed
3872 * into this function.
3874 sim = work_ccb->ccb_h.path->bus->sim;
3875 (*(sim->sim_action))(sim, work_ccb);
3877 devq->active_dev = NULL;
3879 devq->send_queue.qfrozen_cnt--;
3883 * This function merges stuff from the slave ccb into the master ccb, while
3884 * keeping important fields in the master ccb constant.
3886 void
3887 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3890 * Pull fields that are valid for peripheral drivers to set
3891 * into the master CCB along with the CCB "payload".
3893 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3894 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3895 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3896 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3897 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3898 sizeof(union ccb) - sizeof(struct ccb_hdr));
3901 void
3902 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3904 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3905 callout_init(ccb_h->timeout_ch);
3906 ccb_h->pinfo.priority = priority;
3907 ccb_h->path = path;
3908 ccb_h->path_id = path->bus->path_id;
3909 if (path->target)
3910 ccb_h->target_id = path->target->target_id;
3911 else
3912 ccb_h->target_id = CAM_TARGET_WILDCARD;
3913 if (path->device) {
3914 ccb_h->target_lun = path->device->lun_id;
3915 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3916 } else {
3917 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3919 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3920 ccb_h->flags = 0;
3923 /* Path manipulation functions */
3924 cam_status
3925 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3926 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3928 struct cam_path *path;
3929 cam_status status;
3931 path = kmalloc(sizeof(*path), M_CAMXPT, M_INTWAIT);
3932 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3933 if (status != CAM_REQ_CMP) {
3934 kfree(path, M_CAMXPT);
3935 path = NULL;
3937 *new_path_ptr = path;
3938 return (status);
3941 cam_status
3942 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3943 struct cam_periph *periph, path_id_t path_id,
3944 target_id_t target_id, lun_id_t lun_id)
3946 struct cam_path *path;
3947 struct cam_eb *bus = NULL;
3948 cam_status status;
3949 int need_unlock = 0;
3951 path = (struct cam_path *)kmalloc(sizeof(*path), M_CAMXPT, M_WAITOK);
3953 if (path_id != CAM_BUS_WILDCARD) {
3954 bus = xpt_find_bus(path_id);
3955 if (bus != NULL) {
3956 need_unlock = 1;
3957 CAM_SIM_LOCK(bus->sim);
3960 status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3961 if (need_unlock)
3962 CAM_SIM_UNLOCK(bus->sim);
3963 if (status != CAM_REQ_CMP) {
3964 kfree(path, M_CAMXPT);
3965 path = NULL;
3967 *new_path_ptr = path;
3968 return (status);
3971 static cam_status
3972 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3973 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3975 struct cam_eb *bus;
3976 struct cam_et *target;
3977 struct cam_ed *device;
3978 cam_status status;
3980 status = CAM_REQ_CMP; /* Completed without error */
3981 target = NULL; /* Wildcarded */
3982 device = NULL; /* Wildcarded */
3985 * We will potentially modify the EDT, so block interrupts
3986 * that may attempt to create cam paths.
3988 bus = xpt_find_bus(path_id);
3989 if (bus == NULL) {
3990 status = CAM_PATH_INVALID;
3991 } else {
3992 target = xpt_find_target(bus, target_id);
3993 if (target == NULL) {
3994 /* Create one */
3995 struct cam_et *new_target;
3997 new_target = xpt_alloc_target(bus, target_id);
3998 if (new_target == NULL) {
3999 status = CAM_RESRC_UNAVAIL;
4000 } else {
4001 target = new_target;
4004 if (target != NULL) {
4005 device = xpt_find_device(target, lun_id);
4006 if (device == NULL) {
4007 /* Create one */
4008 struct cam_ed *new_device;
4010 new_device = xpt_alloc_device(bus,
4011 target,
4012 lun_id);
4013 if (new_device == NULL) {
4014 status = CAM_RESRC_UNAVAIL;
4015 } else {
4016 device = new_device;
4023 * Only touch the user's data if we are successful.
4025 if (status == CAM_REQ_CMP) {
4026 new_path->periph = perph;
4027 new_path->bus = bus;
4028 new_path->target = target;
4029 new_path->device = device;
4030 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4031 } else {
4032 if (device != NULL)
4033 xpt_release_device(bus, target, device);
4034 if (target != NULL)
4035 xpt_release_target(bus, target);
4036 if (bus != NULL)
4037 xpt_release_bus(bus);
4039 return (status);
4042 static void
4043 xpt_release_path(struct cam_path *path)
4045 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4046 if (path->device != NULL) {
4047 xpt_release_device(path->bus, path->target, path->device);
4048 path->device = NULL;
4050 if (path->target != NULL) {
4051 xpt_release_target(path->bus, path->target);
4052 path->target = NULL;
4054 if (path->bus != NULL) {
4055 xpt_release_bus(path->bus);
4056 path->bus = NULL;
4060 void
4061 xpt_free_path(struct cam_path *path)
4063 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4064 xpt_release_path(path);
4065 kfree(path, M_CAMXPT);
4070 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4071 * in path1, 2 for match with wildcards in path2.
4074 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4076 int retval = 0;
4078 if (path1->bus != path2->bus) {
4079 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4080 retval = 1;
4081 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4082 retval = 2;
4083 else
4084 return (-1);
4086 if (path1->target != path2->target) {
4087 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4088 if (retval == 0)
4089 retval = 1;
4090 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4091 retval = 2;
4092 else
4093 return (-1);
4095 if (path1->device != path2->device) {
4096 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4097 if (retval == 0)
4098 retval = 1;
4099 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4100 retval = 2;
4101 else
4102 return (-1);
4104 return (retval);
4107 void
4108 xpt_print_path(struct cam_path *path)
4111 if (path == NULL)
4112 kprintf("(nopath): ");
4113 else {
4114 if (path->periph != NULL)
4115 kprintf("(%s%d:", path->periph->periph_name,
4116 path->periph->unit_number);
4117 else
4118 kprintf("(noperiph:");
4120 if (path->bus != NULL)
4121 kprintf("%s%d:%d:", path->bus->sim->sim_name,
4122 path->bus->sim->unit_number,
4123 path->bus->sim->bus_id);
4124 else
4125 kprintf("nobus:");
4127 if (path->target != NULL)
4128 kprintf("%d:", path->target->target_id);
4129 else
4130 kprintf("X:");
4132 if (path->device != NULL)
4133 kprintf("%d): ", path->device->lun_id);
4134 else
4135 kprintf("X): ");
4139 void
4140 xpt_print(struct cam_path *path, const char *fmt, ...)
4142 __va_list ap;
4143 xpt_print_path(path);
4144 __va_start(ap, fmt);
4145 kvprintf(fmt, ap);
4146 __va_end(ap);
4150 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4152 struct sbuf sb;
4154 sim_lock_assert_owned(path->bus->sim->lock);
4156 sbuf_new(&sb, str, str_len, 0);
4158 if (path == NULL)
4159 sbuf_printf(&sb, "(nopath): ");
4160 else {
4161 if (path->periph != NULL)
4162 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4163 path->periph->unit_number);
4164 else
4165 sbuf_printf(&sb, "(noperiph:");
4167 if (path->bus != NULL)
4168 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4169 path->bus->sim->unit_number,
4170 path->bus->sim->bus_id);
4171 else
4172 sbuf_printf(&sb, "nobus:");
4174 if (path->target != NULL)
4175 sbuf_printf(&sb, "%d:", path->target->target_id);
4176 else
4177 sbuf_printf(&sb, "X:");
4179 if (path->device != NULL)
4180 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4181 else
4182 sbuf_printf(&sb, "X): ");
4184 sbuf_finish(&sb);
4186 return(sbuf_len(&sb));
4189 path_id_t
4190 xpt_path_path_id(struct cam_path *path)
4192 sim_lock_assert_owned(path->bus->sim->lock);
4194 return(path->bus->path_id);
4197 target_id_t
4198 xpt_path_target_id(struct cam_path *path)
4200 sim_lock_assert_owned(path->bus->sim->lock);
4202 if (path->target != NULL)
4203 return (path->target->target_id);
4204 else
4205 return (CAM_TARGET_WILDCARD);
4208 lun_id_t
4209 xpt_path_lun_id(struct cam_path *path)
4211 sim_lock_assert_owned(path->bus->sim->lock);
4213 if (path->device != NULL)
4214 return (path->device->lun_id);
4215 else
4216 return (CAM_LUN_WILDCARD);
4219 struct cam_sim *
4220 xpt_path_sim(struct cam_path *path)
4222 return (path->bus->sim);
4225 struct cam_periph*
4226 xpt_path_periph(struct cam_path *path)
4228 sim_lock_assert_owned(path->bus->sim->lock);
4230 return (path->periph);
4233 char *
4234 xpt_path_serialno(struct cam_path *path)
4236 return (path->device->serial_num);
4240 * Release a CAM control block for the caller. Remit the cost of the structure
4241 * to the device referenced by the path. If the this device had no 'credits'
4242 * and peripheral drivers have registered async callbacks for this notification
4243 * call them now.
4245 void
4246 xpt_release_ccb(union ccb *free_ccb)
4248 struct cam_path *path;
4249 struct cam_ed *device;
4250 struct cam_eb *bus;
4251 struct cam_sim *sim;
4253 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4254 path = free_ccb->ccb_h.path;
4255 device = path->device;
4256 bus = path->bus;
4257 sim = bus->sim;
4259 sim_lock_assert_owned(sim->lock);
4261 cam_ccbq_release_opening(&device->ccbq);
4262 if (sim->ccb_count > sim->max_ccbs) {
4263 xpt_free_ccb(&free_ccb->ccb_h);
4264 sim->ccb_count--;
4265 } else if (sim == &cam_dead_sim) {
4266 xpt_free_ccb(&free_ccb->ccb_h);
4267 } else {
4268 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
4269 xpt_links.sle);
4271 if (sim->devq == NULL) {
4272 return;
4274 sim->devq->alloc_openings++;
4275 sim->devq->alloc_active--;
4276 /* XXX Turn this into an inline function - xpt_run_device?? */
4277 if ((device_is_alloc_queued(device) == 0)
4278 && (device->drvq.entries > 0)) {
4279 xpt_schedule_dev_allocq(bus, device);
4281 if (dev_allocq_is_runnable(sim->devq))
4282 xpt_run_dev_allocq(bus);
4285 /* Functions accessed by SIM drivers */
4288 * A sim structure, listing the SIM entry points and instance
4289 * identification info is passed to xpt_bus_register to hook the SIM
4290 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4291 * for this new bus and places it in the array of busses and assigns
4292 * it a path_id. The path_id may be influenced by "hard wiring"
4293 * information specified by the user. Once interrupt services are
4294 * availible, the bus will be probed.
4296 int32_t
4297 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4299 struct cam_eb *new_bus;
4300 struct cam_eb *old_bus;
4301 struct ccb_pathinq *cpi;
4303 sim_lock_assert_owned(sim->lock);
4305 sim->bus_id = bus;
4306 new_bus = kmalloc(sizeof(*new_bus), M_CAMXPT, M_INTWAIT);
4309 * Must hold topo lock across xptpathid() through installation of
4310 * new_bus to avoid duplication due to SMP races.
4312 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4313 if (strcmp(sim->sim_name, "xpt") != 0) {
4314 sim->path_id =
4315 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4318 TAILQ_INIT(&new_bus->et_entries);
4319 new_bus->path_id = sim->path_id;
4320 new_bus->sim = sim;
4321 atomic_add_int(&sim->refcount, 1);
4322 timevalclear(&new_bus->last_reset);
4323 new_bus->flags = 0;
4324 new_bus->refcount = 1; /* Held until a bus_deregister event */
4325 new_bus->generation = 0;
4326 new_bus->counted_to_config = 0;
4327 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4328 while (old_bus != NULL && old_bus->path_id < new_bus->path_id)
4329 old_bus = TAILQ_NEXT(old_bus, links);
4330 if (old_bus != NULL)
4331 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4332 else
4333 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4334 xsoftc.bus_generation++;
4335 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4337 /* Notify interested parties */
4338 if (sim->path_id != CAM_XPT_PATH_ID) {
4339 struct cam_path path;
4341 cpi = &xpt_alloc_ccb()->cpi;
4342 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4343 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4344 xpt_setup_ccb(&cpi->ccb_h, &path, /*priority*/1);
4345 cpi->ccb_h.func_code = XPT_PATH_INQ;
4346 xpt_action((union ccb *)cpi);
4347 xpt_async(AC_PATH_REGISTERED, &path, cpi);
4348 xpt_release_path(&path);
4349 xpt_free_ccb(&cpi->ccb_h);
4351 return (CAM_SUCCESS);
4355 * Deregister a bus. We must clean out all transactions pending on the bus.
4356 * This routine is typically called prior to cam_sim_free() (e.g. see
4357 * dev/usbmisc/umass/umass.c)
4359 int32_t
4360 xpt_bus_deregister(path_id_t pathid)
4362 struct cam_path bus_path;
4363 struct cam_et *target;
4364 struct cam_ed *device;
4365 struct cam_ed_qinfo *qinfo;
4366 struct cam_devq *devq;
4367 struct cam_periph *periph;
4368 struct cam_sim *ccbsim;
4369 union ccb *work_ccb;
4370 cam_status status;
4371 int retries = 0;
4373 status = xpt_compile_path(&bus_path, NULL, pathid,
4374 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4375 if (status != CAM_REQ_CMP)
4376 return (status);
4379 * This should clear out all pending requests and timeouts, but
4380 * the ccb's may be queued to a software interrupt.
4382 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4383 * and it really ought to.
4385 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4386 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4389 * Mark the SIM as having been deregistered. This prevents
4390 * certain operations from re-queueing to it, stops new devices
4391 * from being added, etc.
4393 devq = bus_path.bus->sim->devq;
4394 ccbsim = bus_path.bus->sim;
4395 ccbsim->flags |= CAM_SIM_DEREGISTERED;
4397 again:
4399 * Execute any pending operations now.
4401 while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4402 CAMQ_HEAD)) != NULL ||
4403 (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4404 CAMQ_HEAD)) != NULL) {
4405 do {
4406 device = qinfo->device;
4407 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4408 if (work_ccb != NULL) {
4409 devq->active_dev = device;
4410 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4411 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4412 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4415 periph = (struct cam_periph *)camq_remove(&device->drvq,
4416 CAMQ_HEAD);
4417 if (periph != NULL)
4418 xpt_schedule(periph, periph->pinfo.priority);
4419 } while (work_ccb != NULL || periph != NULL);
4423 * Make sure all completed CCBs are processed.
4425 while (!TAILQ_EMPTY(&ccbsim->sim_doneq)) {
4426 camisr_runqueue(ccbsim);
4430 * Check for requeues, reissues asyncs if necessary
4432 if (CAMQ_GET_HEAD(&devq->send_queue))
4433 kprintf("camq: devq send_queue still in use (%d entries)\n",
4434 devq->send_queue.entries);
4435 if (CAMQ_GET_HEAD(&devq->alloc_queue))
4436 kprintf("camq: devq alloc_queue still in use (%d entries)\n",
4437 devq->alloc_queue.entries);
4438 if (CAMQ_GET_HEAD(&devq->send_queue) ||
4439 CAMQ_GET_HEAD(&devq->alloc_queue)) {
4440 if (++retries < 5) {
4441 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4442 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4443 goto again;
4448 * Retarget the bus and all cached sim pointers to dead_sim.
4450 * Various CAM subsystems may be holding on to targets, devices,
4451 * and/or peripherals and may attempt to use the sim pointer cached
4452 * in some of these structures during close.
4454 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4455 bus_path.bus->sim = &cam_dead_sim;
4456 TAILQ_FOREACH(target, &bus_path.bus->et_entries, links) {
4457 TAILQ_FOREACH(device, &target->ed_entries, links) {
4458 device->sim = &cam_dead_sim;
4459 SLIST_FOREACH(periph, &device->periphs, periph_links) {
4460 periph->sim = &cam_dead_sim;
4464 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4467 * Repeat the async's for the benefit of any new devices, such as
4468 * might be created from completed probes. Any new device
4469 * ops will run on dead_sim.
4471 * XXX There are probably races :-(
4473 CAM_SIM_LOCK(&cam_dead_sim);
4474 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4475 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4476 CAM_SIM_UNLOCK(&cam_dead_sim);
4478 /* Release the reference count held while registered. */
4479 xpt_release_bus(bus_path.bus);
4480 xpt_release_path(&bus_path);
4482 /* Release the ref we got when the bus was registered */
4483 cam_sim_release(ccbsim, 0);
4485 return (CAM_REQ_CMP);
4489 * Must be called with xpt_topo_lock held.
4491 static path_id_t
4492 xptnextfreepathid(void)
4494 struct cam_eb *bus;
4495 path_id_t pathid;
4496 const char *strval;
4498 pathid = 0;
4499 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4500 retry:
4501 /* Find an unoccupied pathid */
4502 while (bus != NULL && bus->path_id <= pathid) {
4503 if (bus->path_id == pathid)
4504 pathid++;
4505 bus = TAILQ_NEXT(bus, links);
4509 * Ensure that this pathid is not reserved for
4510 * a bus that may be registered in the future.
4512 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4513 ++pathid;
4514 /* Start the search over */
4515 goto retry;
4517 return (pathid);
4521 * Must be called with xpt_topo_lock held.
4523 static path_id_t
4524 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4526 path_id_t pathid;
4527 int i, dunit, val;
4528 char buf[32];
4530 pathid = CAM_XPT_PATH_ID;
4531 ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4532 i = -1;
4533 while ((i = resource_query_string(i, "at", buf)) != -1) {
4534 if (strcmp(resource_query_name(i), "scbus")) {
4535 /* Avoid a bit of foot shooting. */
4536 continue;
4538 dunit = resource_query_unit(i);
4539 if (dunit < 0) /* unwired?! */
4540 continue;
4541 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4542 if (sim_bus == val) {
4543 pathid = dunit;
4544 break;
4546 } else if (sim_bus == 0) {
4547 /* Unspecified matches bus 0 */
4548 pathid = dunit;
4549 break;
4550 } else {
4551 kprintf("Ambiguous scbus configuration for %s%d "
4552 "bus %d, cannot wire down. The kernel "
4553 "config entry for scbus%d should "
4554 "specify a controller bus.\n"
4555 "Scbus will be assigned dynamically.\n",
4556 sim_name, sim_unit, sim_bus, dunit);
4557 break;
4561 if (pathid == CAM_XPT_PATH_ID)
4562 pathid = xptnextfreepathid();
4563 return (pathid);
4566 void
4567 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4569 struct cam_eb *bus;
4570 struct cam_et *target, *next_target;
4571 struct cam_ed *device, *next_device;
4573 sim_lock_assert_owned(path->bus->sim->lock);
4575 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4578 * Most async events come from a CAM interrupt context. In
4579 * a few cases, the error recovery code at the peripheral layer,
4580 * which may run from our SWI or a process context, may signal
4581 * deferred events with a call to xpt_async.
4584 bus = path->bus;
4586 if (async_code == AC_BUS_RESET) {
4587 /* Update our notion of when the last reset occurred */
4588 microuptime(&bus->last_reset);
4591 for (target = TAILQ_FIRST(&bus->et_entries);
4592 target != NULL;
4593 target = next_target) {
4595 next_target = TAILQ_NEXT(target, links);
4597 if (path->target != target
4598 && path->target->target_id != CAM_TARGET_WILDCARD
4599 && target->target_id != CAM_TARGET_WILDCARD)
4600 continue;
4602 if (async_code == AC_SENT_BDR) {
4603 /* Update our notion of when the last reset occurred */
4604 microuptime(&path->target->last_reset);
4607 for (device = TAILQ_FIRST(&target->ed_entries);
4608 device != NULL;
4609 device = next_device) {
4611 next_device = TAILQ_NEXT(device, links);
4613 if (path->device != device
4614 && path->device->lun_id != CAM_LUN_WILDCARD
4615 && device->lun_id != CAM_LUN_WILDCARD)
4616 continue;
4618 xpt_dev_async(async_code, bus, target,
4619 device, async_arg);
4621 xpt_async_bcast(&device->asyncs, async_code,
4622 path, async_arg);
4627 * If this wasn't a fully wildcarded async, tell all
4628 * clients that want all async events.
4630 if (bus != xpt_periph->path->bus)
4631 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4632 path, async_arg);
4635 static void
4636 xpt_async_bcast(struct async_list *async_head,
4637 u_int32_t async_code,
4638 struct cam_path *path, void *async_arg)
4640 struct async_node *cur_entry;
4642 cur_entry = SLIST_FIRST(async_head);
4643 while (cur_entry != NULL) {
4644 struct async_node *next_entry;
4646 * Grab the next list entry before we call the current
4647 * entry's callback. This is because the callback function
4648 * can delete its async callback entry.
4650 next_entry = SLIST_NEXT(cur_entry, links);
4651 if ((cur_entry->event_enable & async_code) != 0)
4652 cur_entry->callback(cur_entry->callback_arg,
4653 async_code, path,
4654 async_arg);
4655 cur_entry = next_entry;
4660 * Handle any per-device event notifications that require action by the XPT.
4662 static void
4663 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4664 struct cam_ed *device, void *async_arg)
4666 cam_status status;
4667 struct cam_path newpath;
4670 * We only need to handle events for real devices.
4672 if (target->target_id == CAM_TARGET_WILDCARD
4673 || device->lun_id == CAM_LUN_WILDCARD)
4674 return;
4677 * We need our own path with wildcards expanded to
4678 * handle certain types of events.
4680 if ((async_code == AC_SENT_BDR)
4681 || (async_code == AC_BUS_RESET)
4682 || (async_code == AC_INQ_CHANGED))
4683 status = xpt_compile_path(&newpath, NULL,
4684 bus->path_id,
4685 target->target_id,
4686 device->lun_id);
4687 else
4688 status = CAM_REQ_CMP_ERR;
4690 if (status == CAM_REQ_CMP) {
4693 * Allow transfer negotiation to occur in a
4694 * tag free environment.
4696 if (async_code == AC_SENT_BDR
4697 || async_code == AC_BUS_RESET)
4698 xpt_toggle_tags(&newpath);
4700 if (async_code == AC_INQ_CHANGED) {
4702 * We've sent a start unit command, or
4703 * something similar to a device that
4704 * may have caused its inquiry data to
4705 * change. So we re-scan the device to
4706 * refresh the inquiry data for it.
4708 xpt_scan_lun(newpath.periph, &newpath,
4709 CAM_EXPECT_INQ_CHANGE, NULL);
4711 xpt_release_path(&newpath);
4712 } else if (async_code == AC_LOST_DEVICE) {
4714 * When we lose a device the device may be about to detach
4715 * the sim, we have to clear out all pending timeouts and
4716 * requests before that happens.
4718 * This typically happens most often with USB/UMASS devices.
4720 * XXX it would be nice if we could abort the requests
4721 * pertaining to the device.
4723 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4724 if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4725 device->flags |= CAM_DEV_UNCONFIGURED;
4726 xpt_release_device(bus, target, device);
4728 } else if (async_code == AC_TRANSFER_NEG) {
4729 struct ccb_trans_settings *settings;
4731 settings = (struct ccb_trans_settings *)async_arg;
4732 xpt_set_transfer_settings(settings, device,
4733 /*async_update*/TRUE);
4737 u_int32_t
4738 xpt_freeze_devq(struct cam_path *path, u_int count)
4740 struct ccb_hdr *ccbh;
4742 sim_lock_assert_owned(path->bus->sim->lock);
4744 path->device->qfrozen_cnt += count;
4747 * Mark the last CCB in the queue as needing
4748 * to be requeued if the driver hasn't
4749 * changed it's state yet. This fixes a race
4750 * where a ccb is just about to be queued to
4751 * a controller driver when it's interrupt routine
4752 * freezes the queue. To completly close the
4753 * hole, controller drives must check to see
4754 * if a ccb's status is still CAM_REQ_INPROG
4755 * just before they queue
4756 * the CCB. See ahc_action/ahc_freeze_devq for
4757 * an example.
4759 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4760 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4761 ccbh->status = CAM_REQUEUE_REQ;
4762 return (path->device->qfrozen_cnt);
4765 u_int32_t
4766 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4768 sim_lock_assert_owned(sim->lock);
4770 if (sim->devq == NULL)
4771 return(count);
4772 sim->devq->send_queue.qfrozen_cnt += count;
4773 if (sim->devq->active_dev != NULL) {
4774 struct ccb_hdr *ccbh;
4776 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4777 ccb_hdr_tailq);
4778 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4779 ccbh->status = CAM_REQUEUE_REQ;
4781 return (sim->devq->send_queue.qfrozen_cnt);
4785 * Release the device queue after a timeout has expired, typically used to
4786 * introduce a delay before retrying after an I/O error or other problem.
4788 static void
4789 xpt_release_devq_timeout(void *arg)
4791 struct cam_ed *device;
4793 device = (struct cam_ed *)arg;
4794 CAM_SIM_LOCK(device->sim);
4795 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4796 CAM_SIM_UNLOCK(device->sim);
4799 void
4800 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4802 sim_lock_assert_owned(path->bus->sim->lock);
4804 xpt_release_devq_device(path->device, count, run_queue);
4807 static void
4808 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4810 int rundevq;
4812 rundevq = 0;
4814 if (dev->qfrozen_cnt > 0) {
4816 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4817 dev->qfrozen_cnt -= count;
4818 if (dev->qfrozen_cnt == 0) {
4821 * No longer need to wait for a successful
4822 * command completion.
4824 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4827 * Remove any timeouts that might be scheduled
4828 * to release this queue.
4830 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4831 callout_stop(&dev->callout);
4832 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4836 * Now that we are unfrozen schedule the
4837 * device so any pending transactions are
4838 * run.
4840 if ((dev->ccbq.queue.entries > 0)
4841 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4842 && (run_queue != 0)) {
4843 rundevq = 1;
4847 if (rundevq != 0)
4848 xpt_run_dev_sendq(dev->target->bus);
4851 void
4852 xpt_release_simq(struct cam_sim *sim, int run_queue)
4854 struct camq *sendq;
4856 sim_lock_assert_owned(sim->lock);
4858 if (sim->devq == NULL)
4859 return;
4861 sendq = &(sim->devq->send_queue);
4862 if (sendq->qfrozen_cnt > 0) {
4863 sendq->qfrozen_cnt--;
4864 if (sendq->qfrozen_cnt == 0) {
4865 struct cam_eb *bus;
4868 * If there is a timeout scheduled to release this
4869 * sim queue, remove it. The queue frozen count is
4870 * already at 0.
4872 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4873 callout_stop(&sim->callout);
4874 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4876 bus = xpt_find_bus(sim->path_id);
4878 if (run_queue) {
4880 * Now that we are unfrozen run the send queue.
4882 xpt_run_dev_sendq(bus);
4884 xpt_release_bus(bus);
4889 void
4890 xpt_done(union ccb *done_ccb)
4892 struct cam_sim *sim;
4894 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4895 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4897 * Queue up the request for handling by our SWI handler
4898 * any of the "non-immediate" type of ccbs.
4900 sim = done_ccb->ccb_h.path->bus->sim;
4901 switch (done_ccb->ccb_h.path->periph->type) {
4902 case CAM_PERIPH_BIO:
4903 spin_lock(&sim->sim_spin);
4904 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4905 sim_links.tqe);
4906 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4907 spin_unlock(&sim->sim_spin);
4908 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4909 spin_lock(&cam_simq_spin);
4910 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4911 TAILQ_INSERT_TAIL(&cam_simq, sim,
4912 links);
4913 sim->flags |= CAM_SIM_ON_DONEQ;
4915 spin_unlock(&cam_simq_spin);
4917 if ((done_ccb->ccb_h.flags & CAM_POLLED) == 0)
4918 setsoftcambio();
4919 break;
4920 default:
4921 panic("unknown periph type %d",
4922 done_ccb->ccb_h.path->periph->type);
4927 union ccb *
4928 xpt_alloc_ccb(void)
4930 union ccb *new_ccb;
4932 new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT | M_ZERO);
4933 new_ccb->ccb_h.timeout_ch = kmalloc(sizeof(struct callout), M_CAMXPT,
4934 M_INTWAIT | M_ZERO);
4936 return (new_ccb);
4939 void
4940 xpt_free_ccb(struct ccb_hdr *free_ccb)
4942 KKASSERT(free_ccb->timeout_ch != NULL);
4943 kfree(free_ccb->timeout_ch, M_CAMXPT);
4944 free_ccb->timeout_ch = NULL;
4945 kfree(free_ccb, M_CAMXPT);
4948 /* Private XPT functions */
4951 * Get a CAM control block for the caller. Charge the structure to the device
4952 * referenced by the path. If the this device has no 'credits' then the
4953 * device already has the maximum number of outstanding operations under way
4954 * and we return NULL. If we don't have sufficient resources to allocate more
4955 * ccbs, we also return NULL.
4957 static union ccb *
4958 xpt_get_ccb(struct cam_ed *device)
4960 union ccb *new_ccb;
4961 struct cam_sim *sim;
4963 sim = device->sim;
4964 if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4965 new_ccb = xpt_alloc_ccb();
4966 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4967 callout_init(new_ccb->ccb_h.timeout_ch);
4968 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4969 xpt_links.sle);
4970 sim->ccb_count++;
4972 cam_ccbq_take_opening(&device->ccbq);
4973 SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4974 return (new_ccb);
4977 static void
4978 xpt_release_bus(struct cam_eb *bus)
4980 for (;;) {
4981 int count = bus->refcount;
4983 cpu_ccfence();
4984 if (count == 1) {
4985 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4986 if (atomic_cmpset_int(&bus->refcount, 1, 0)) {
4987 if (TAILQ_EMPTY(&bus->et_entries)) {
4988 TAILQ_REMOVE(&xsoftc.xpt_busses,
4989 bus, links);
4990 xsoftc.bus_generation++;
4991 kfree(bus, M_CAMXPT);
4993 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4994 return;
4996 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4997 } else {
4998 if (atomic_cmpset_int(&bus->refcount, count, count-1)) {
4999 return;
5005 static struct cam_et *
5006 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
5008 struct cam_et *target;
5009 struct cam_et *cur_target;
5011 target = kmalloc(sizeof(*target), M_CAMXPT, M_INTWAIT);
5013 TAILQ_INIT(&target->ed_entries);
5014 target->bus = bus;
5015 target->target_id = target_id;
5016 target->refcount = 1;
5017 target->generation = 0;
5018 timevalclear(&target->last_reset);
5021 * Hold a reference to our parent bus so it
5022 * will not go away before we do.
5024 atomic_add_int(&bus->refcount, 1);
5026 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5027 /* Insertion sort into our bus's target list */
5028 cur_target = TAILQ_FIRST(&bus->et_entries);
5029 while (cur_target != NULL && cur_target->target_id < target_id)
5030 cur_target = TAILQ_NEXT(cur_target, links);
5032 if (cur_target != NULL) {
5033 TAILQ_INSERT_BEFORE(cur_target, target, links);
5034 } else {
5035 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
5037 bus->generation++;
5038 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5040 return (target);
5043 static void
5044 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
5046 for (;;) {
5047 int count = target->refcount;
5049 cpu_ccfence();
5050 if (count == 1) {
5051 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5052 if (atomic_cmpset_int(&target->refcount, 1, 0)) {
5053 KKASSERT(TAILQ_EMPTY(&target->ed_entries));
5054 TAILQ_REMOVE(&bus->et_entries, target, links);
5055 bus->generation++;
5056 kfree(target, M_CAMXPT);
5057 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5058 xpt_release_bus(bus);
5059 return;
5061 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5062 } else {
5063 if (atomic_cmpset_int(&target->refcount,
5064 count, count - 1)) {
5065 return;
5071 static struct cam_ed *
5072 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
5074 struct cam_path path;
5075 struct cam_ed *device;
5076 struct cam_devq *devq;
5077 cam_status status;
5080 * Disallow new devices while trying to deregister a sim
5082 if (bus->sim->flags & CAM_SIM_DEREGISTERED)
5083 return (NULL);
5086 * Make space for us in the device queue on our bus
5088 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5089 devq = bus->sim->devq;
5090 if (devq == NULL) {
5091 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5092 return(NULL);
5094 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
5095 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5097 if (status != CAM_REQ_CMP) {
5098 device = NULL;
5099 } else {
5100 device = kmalloc(sizeof(*device), M_CAMXPT, M_INTWAIT);
5103 if (device != NULL) {
5104 struct cam_ed *cur_device;
5106 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5107 device->alloc_ccb_entry.device = device;
5108 cam_init_pinfo(&device->send_ccb_entry.pinfo);
5109 device->send_ccb_entry.device = device;
5110 device->target = target;
5111 device->lun_id = lun_id;
5112 device->sim = bus->sim;
5113 /* Initialize our queues */
5114 if (camq_init(&device->drvq, 0) != 0) {
5115 kfree(device, M_CAMXPT);
5116 return (NULL);
5118 if (cam_ccbq_init(&device->ccbq,
5119 bus->sim->max_dev_openings) != 0) {
5120 camq_fini(&device->drvq);
5121 kfree(device, M_CAMXPT);
5122 return (NULL);
5124 SLIST_INIT(&device->asyncs);
5125 SLIST_INIT(&device->periphs);
5126 device->generation = 0;
5127 device->owner = NULL;
5129 * Take the default quirk entry until we have inquiry
5130 * data and can determine a better quirk to use.
5132 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5133 bzero(&device->inq_data, sizeof(device->inq_data));
5134 device->inq_flags = 0;
5135 device->queue_flags = 0;
5136 device->serial_num = NULL;
5137 device->serial_num_len = 0;
5138 device->qfrozen_cnt = 0;
5139 device->flags = CAM_DEV_UNCONFIGURED;
5140 device->tag_delay_count = 0;
5141 device->tag_saved_openings = 0;
5142 device->refcount = 1;
5143 callout_init(&device->callout);
5146 * Hold a reference to our parent target so it
5147 * will not go away before we do.
5149 atomic_add_int(&target->refcount, 1);
5152 * XXX should be limited by number of CCBs this bus can
5153 * do.
5155 bus->sim->max_ccbs += device->ccbq.devq_openings;
5156 /* Insertion sort into our target's device list */
5157 cur_device = TAILQ_FIRST(&target->ed_entries);
5158 while (cur_device != NULL && cur_device->lun_id < lun_id)
5159 cur_device = TAILQ_NEXT(cur_device, links);
5160 if (cur_device != NULL) {
5161 TAILQ_INSERT_BEFORE(cur_device, device, links);
5162 } else {
5163 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5165 target->generation++;
5166 if (lun_id != CAM_LUN_WILDCARD) {
5167 xpt_compile_path(&path,
5168 NULL,
5169 bus->path_id,
5170 target->target_id,
5171 lun_id);
5172 xpt_devise_transport(&path);
5173 xpt_release_path(&path);
5176 return (device);
5179 static void
5180 xpt_reference_device(struct cam_ed *device)
5182 atomic_add_int(&device->refcount, 1);
5185 static void
5186 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5187 struct cam_ed *device)
5189 struct cam_devq *devq;
5191 for (;;) {
5192 int count = device->refcount;
5194 if (count == 1) {
5195 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5196 if (atomic_cmpset_int(&device->refcount, 1, 0)) {
5197 KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
5198 if (device->alloc_ccb_entry.pinfo.index !=
5199 CAM_UNQUEUED_INDEX ||
5200 device->send_ccb_entry.pinfo.index !=
5201 CAM_UNQUEUED_INDEX) {
5202 panic("Removing device while "
5203 "still queued for ccbs");
5205 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
5206 device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
5207 callout_stop(&device->callout);
5209 TAILQ_REMOVE(&target->ed_entries, device, links);
5210 target->generation++;
5211 bus->sim->max_ccbs -= device->ccbq.devq_openings;
5212 if ((devq = bus->sim->devq) != NULL) {
5213 /* Release our slot in the devq */
5214 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5216 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5218 camq_fini(&device->drvq);
5219 camq_fini(&device->ccbq.queue);
5220 xpt_release_target(bus, target);
5221 kfree(device, M_CAMXPT);
5222 return;
5224 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5225 } else {
5226 if (atomic_cmpset_int(&device->refcount,
5227 count, count - 1)) {
5228 return;
5234 static u_int32_t
5235 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5237 int diff;
5238 int result;
5239 struct cam_ed *dev;
5241 dev = path->device;
5243 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5244 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5245 if (result == CAM_REQ_CMP && (diff < 0)) {
5246 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5248 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5249 || (dev->inq_flags & SID_CmdQue) != 0)
5250 dev->tag_saved_openings = newopenings;
5251 /* Adjust the global limit */
5252 dev->sim->max_ccbs += diff;
5253 return (result);
5256 static struct cam_eb *
5257 xpt_find_bus(path_id_t path_id)
5259 struct cam_eb *bus;
5261 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5262 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
5263 if (bus->path_id == path_id) {
5264 atomic_add_int(&bus->refcount, 1);
5265 break;
5268 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5270 return (bus);
5273 static struct cam_et *
5274 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5276 struct cam_et *target;
5278 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5279 TAILQ_FOREACH(target, &bus->et_entries, links) {
5280 if (target->target_id == target_id) {
5281 atomic_add_int(&target->refcount, 1);
5282 break;
5285 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5287 return (target);
5290 static struct cam_ed *
5291 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5293 struct cam_ed *device;
5295 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5296 TAILQ_FOREACH(device, &target->ed_entries, links) {
5297 if (device->lun_id == lun_id) {
5298 atomic_add_int(&device->refcount, 1);
5299 break;
5302 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5304 return (device);
5307 typedef struct {
5308 union ccb *request_ccb;
5309 struct ccb_pathinq *cpi;
5310 int counter;
5311 } xpt_scan_bus_info;
5314 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5315 * As the scan progresses, xpt_scan_bus is used as the
5316 * callback on completion function.
5318 static void
5319 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5321 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5322 ("xpt_scan_bus\n"));
5323 switch (request_ccb->ccb_h.func_code) {
5324 case XPT_SCAN_BUS:
5326 xpt_scan_bus_info *scan_info;
5327 union ccb *work_ccb;
5328 struct cam_path *path;
5329 u_int i;
5330 u_int max_target;
5331 u_int initiator_id;
5333 /* Find out the characteristics of the bus */
5334 work_ccb = xpt_alloc_ccb();
5335 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5336 request_ccb->ccb_h.pinfo.priority);
5337 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5338 xpt_action(work_ccb);
5339 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5340 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5341 xpt_free_ccb(&work_ccb->ccb_h);
5342 xpt_done(request_ccb);
5343 return;
5346 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5348 * Can't scan the bus on an adapter that
5349 * cannot perform the initiator role.
5351 request_ccb->ccb_h.status = CAM_REQ_CMP;
5352 xpt_free_ccb(&work_ccb->ccb_h);
5353 xpt_done(request_ccb);
5354 return;
5357 /* Save some state for use while we probe for devices */
5358 scan_info = (xpt_scan_bus_info *)
5359 kmalloc(sizeof(xpt_scan_bus_info), M_CAMXPT, M_INTWAIT);
5360 scan_info->request_ccb = request_ccb;
5361 scan_info->cpi = &work_ccb->cpi;
5363 /* Cache on our stack so we can work asynchronously */
5364 max_target = scan_info->cpi->max_target;
5365 initiator_id = scan_info->cpi->initiator_id;
5369 * We can scan all targets in parallel, or do it sequentially.
5371 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5372 max_target = 0;
5373 scan_info->counter = 0;
5374 } else {
5375 scan_info->counter = scan_info->cpi->max_target + 1;
5376 if (scan_info->cpi->initiator_id < scan_info->counter) {
5377 scan_info->counter--;
5381 for (i = 0; i <= max_target; i++) {
5382 cam_status status;
5383 if (i == initiator_id)
5384 continue;
5386 status = xpt_create_path(&path, xpt_periph,
5387 request_ccb->ccb_h.path_id,
5388 i, 0);
5389 if (status != CAM_REQ_CMP) {
5390 kprintf("xpt_scan_bus: xpt_create_path failed"
5391 " with status %#x, bus scan halted\n",
5392 status);
5393 kfree(scan_info, M_CAMXPT);
5394 request_ccb->ccb_h.status = status;
5395 xpt_free_ccb(&work_ccb->ccb_h);
5396 xpt_done(request_ccb);
5397 break;
5399 work_ccb = xpt_alloc_ccb();
5400 xpt_setup_ccb(&work_ccb->ccb_h, path,
5401 request_ccb->ccb_h.pinfo.priority);
5402 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5403 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5404 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5405 work_ccb->crcn.flags = request_ccb->crcn.flags;
5406 xpt_action(work_ccb);
5408 break;
5410 case XPT_SCAN_LUN:
5412 cam_status status;
5413 struct cam_path *path;
5414 xpt_scan_bus_info *scan_info;
5415 path_id_t path_id;
5416 target_id_t target_id;
5417 lun_id_t lun_id;
5419 /* Reuse the same CCB to query if a device was really found */
5420 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5421 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5422 request_ccb->ccb_h.pinfo.priority);
5423 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5425 path_id = request_ccb->ccb_h.path_id;
5426 target_id = request_ccb->ccb_h.target_id;
5427 lun_id = request_ccb->ccb_h.target_lun;
5428 xpt_action(request_ccb);
5430 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5431 struct cam_ed *device;
5432 struct cam_et *target;
5433 int phl;
5436 * If we already probed lun 0 successfully, or
5437 * we have additional configured luns on this
5438 * target that might have "gone away", go onto
5439 * the next lun.
5441 target = request_ccb->ccb_h.path->target;
5443 * We may touch devices that we don't
5444 * hold references too, so ensure they
5445 * don't disappear out from under us.
5446 * The target above is referenced by the
5447 * path in the request ccb.
5449 phl = 0;
5450 device = TAILQ_FIRST(&target->ed_entries);
5451 if (device != NULL) {
5452 phl = CAN_SRCH_HI_SPARSE(device);
5453 if (device->lun_id == 0)
5454 device = TAILQ_NEXT(device, links);
5456 if ((lun_id != 0) || (device != NULL)) {
5457 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5458 lun_id++;
5460 } else {
5461 struct cam_ed *device;
5463 device = request_ccb->ccb_h.path->device;
5465 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5466 /* Try the next lun */
5467 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5468 || CAN_SRCH_HI_DENSE(device))
5469 lun_id++;
5474 * Free the current request path- we're done with it.
5476 xpt_free_path(request_ccb->ccb_h.path);
5479 * Check to see if we scan any further luns.
5481 if (lun_id == request_ccb->ccb_h.target_lun
5482 || lun_id > scan_info->cpi->max_lun) {
5483 int done;
5485 hop_again:
5486 done = 0;
5487 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5488 scan_info->counter++;
5489 if (scan_info->counter ==
5490 scan_info->cpi->initiator_id) {
5491 scan_info->counter++;
5493 if (scan_info->counter >=
5494 scan_info->cpi->max_target+1) {
5495 done = 1;
5497 } else {
5498 scan_info->counter--;
5499 if (scan_info->counter == 0) {
5500 done = 1;
5503 if (done) {
5504 xpt_free_ccb(&request_ccb->ccb_h);
5505 xpt_free_ccb(&scan_info->cpi->ccb_h);
5506 request_ccb = scan_info->request_ccb;
5507 kfree(scan_info, M_CAMXPT);
5508 request_ccb->ccb_h.status = CAM_REQ_CMP;
5509 xpt_done(request_ccb);
5510 break;
5513 if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5514 break;
5516 status = xpt_create_path(&path, xpt_periph,
5517 scan_info->request_ccb->ccb_h.path_id,
5518 scan_info->counter, 0);
5519 if (status != CAM_REQ_CMP) {
5520 kprintf("xpt_scan_bus: xpt_create_path failed"
5521 " with status %#x, bus scan halted\n",
5522 status);
5523 xpt_free_ccb(&request_ccb->ccb_h);
5524 xpt_free_ccb(&scan_info->cpi->ccb_h);
5525 request_ccb = scan_info->request_ccb;
5526 kfree(scan_info, M_CAMXPT);
5527 request_ccb->ccb_h.status = status;
5528 xpt_done(request_ccb);
5529 break;
5531 xpt_setup_ccb(&request_ccb->ccb_h, path,
5532 request_ccb->ccb_h.pinfo.priority);
5533 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5534 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5535 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5536 request_ccb->crcn.flags =
5537 scan_info->request_ccb->crcn.flags;
5538 } else {
5539 status = xpt_create_path(&path, xpt_periph,
5540 path_id, target_id, lun_id);
5541 if (status != CAM_REQ_CMP) {
5542 kprintf("xpt_scan_bus: xpt_create_path failed "
5543 "with status %#x, halting LUN scan\n",
5544 status);
5545 goto hop_again;
5547 xpt_setup_ccb(&request_ccb->ccb_h, path,
5548 request_ccb->ccb_h.pinfo.priority);
5549 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5550 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5551 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5552 request_ccb->crcn.flags =
5553 scan_info->request_ccb->crcn.flags;
5555 xpt_action(request_ccb);
5556 break;
5558 default:
5559 break;
5563 typedef enum {
5564 PROBE_TUR,
5565 PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */
5566 PROBE_FULL_INQUIRY,
5567 PROBE_MODE_SENSE,
5568 PROBE_SERIAL_NUM_0,
5569 PROBE_SERIAL_NUM_1,
5570 PROBE_TUR_FOR_NEGOTIATION,
5571 PROBE_INQUIRY_BASIC_DV1,
5572 PROBE_INQUIRY_BASIC_DV2,
5573 PROBE_DV_EXIT,
5574 PROBE_INVALID
5575 } probe_action;
5577 static char *probe_action_text[] = {
5578 "PROBE_TUR",
5579 "PROBE_INQUIRY",
5580 "PROBE_FULL_INQUIRY",
5581 "PROBE_MODE_SENSE",
5582 "PROBE_SERIAL_NUM_0",
5583 "PROBE_SERIAL_NUM_1",
5584 "PROBE_TUR_FOR_NEGOTIATION",
5585 "PROBE_INQUIRY_BASIC_DV1",
5586 "PROBE_INQUIRY_BASIC_DV2",
5587 "PROBE_DV_EXIT",
5588 "PROBE_INVALID"
5591 #define PROBE_SET_ACTION(softc, newaction) \
5592 do { \
5593 char **text; \
5594 text = probe_action_text; \
5595 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_INFO, \
5596 ("Probe %s to %s\n", text[(softc)->action], \
5597 text[(newaction)])); \
5598 (softc)->action = (newaction); \
5599 } while(0)
5601 typedef enum {
5602 PROBE_INQUIRY_CKSUM = 0x01,
5603 PROBE_SERIAL_CKSUM = 0x02,
5604 PROBE_NO_ANNOUNCE = 0x04
5605 } probe_flags;
5607 typedef struct {
5608 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5609 probe_action action;
5610 union ccb saved_ccb;
5611 probe_flags flags;
5612 MD5_CTX context;
5613 u_int8_t digest[16];
5614 struct cam_periph *periph;
5615 } probe_softc;
5617 static void
5618 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5619 cam_flags flags, union ccb *request_ccb)
5621 struct ccb_pathinq *cpi;
5622 cam_status status;
5623 struct cam_path *new_path;
5624 struct cam_periph *old_periph;
5626 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5627 ("xpt_scan_lun\n"));
5629 cpi = &xpt_alloc_ccb()->cpi;
5630 xpt_setup_ccb(&cpi->ccb_h, path, /*priority*/1);
5631 cpi->ccb_h.func_code = XPT_PATH_INQ;
5632 xpt_action((union ccb *)cpi);
5634 if (cpi->ccb_h.status != CAM_REQ_CMP) {
5635 if (request_ccb != NULL) {
5636 request_ccb->ccb_h.status = cpi->ccb_h.status;
5637 xpt_done(request_ccb);
5639 xpt_free_ccb(&cpi->ccb_h);
5640 return;
5643 if ((cpi->hba_misc & PIM_NOINITIATOR) != 0) {
5645 * Can't scan the bus on an adapter that
5646 * cannot perform the initiator role.
5648 if (request_ccb != NULL) {
5649 request_ccb->ccb_h.status = CAM_REQ_CMP;
5650 xpt_done(request_ccb);
5652 xpt_free_ccb(&cpi->ccb_h);
5653 return;
5655 xpt_free_ccb(&cpi->ccb_h);
5657 if (request_ccb == NULL) {
5658 request_ccb = xpt_alloc_ccb();
5659 new_path = kmalloc(sizeof(*new_path), M_CAMXPT, M_INTWAIT);
5660 status = xpt_compile_path(new_path, xpt_periph,
5661 path->bus->path_id,
5662 path->target->target_id,
5663 path->device->lun_id);
5665 if (status != CAM_REQ_CMP) {
5666 xpt_print(path, "xpt_scan_lun: can't compile path, "
5667 "can't continue\n");
5668 xpt_free_ccb(&request_ccb->ccb_h);
5669 kfree(new_path, M_CAMXPT);
5670 return;
5672 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5673 request_ccb->ccb_h.cbfcnp = xptscandone;
5674 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5675 request_ccb->crcn.flags = flags;
5678 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5679 probe_softc *softc;
5681 softc = (probe_softc *)old_periph->softc;
5682 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5683 periph_links.tqe);
5684 } else {
5685 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5686 probestart, "probe",
5687 CAM_PERIPH_BIO,
5688 request_ccb->ccb_h.path, NULL, 0,
5689 request_ccb);
5691 if (status != CAM_REQ_CMP) {
5692 xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5693 "returned an error, can't continue probe\n");
5694 request_ccb->ccb_h.status = status;
5695 xpt_done(request_ccb);
5700 static void
5701 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5703 xpt_release_path(done_ccb->ccb_h.path);
5704 kfree(done_ccb->ccb_h.path, M_CAMXPT);
5705 xpt_free_ccb(&done_ccb->ccb_h);
5708 static cam_status
5709 proberegister(struct cam_periph *periph, void *arg)
5711 union ccb *request_ccb; /* CCB representing the probe request */
5712 cam_status status;
5713 probe_softc *softc;
5715 request_ccb = (union ccb *)arg;
5716 if (periph == NULL) {
5717 kprintf("proberegister: periph was NULL!!\n");
5718 return(CAM_REQ_CMP_ERR);
5721 if (request_ccb == NULL) {
5722 kprintf("proberegister: no probe CCB, "
5723 "can't register device\n");
5724 return(CAM_REQ_CMP_ERR);
5727 softc = kmalloc(sizeof(*softc), M_CAMXPT, M_INTWAIT | M_ZERO);
5728 TAILQ_INIT(&softc->request_ccbs);
5729 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5730 periph_links.tqe);
5731 softc->flags = 0;
5732 periph->softc = softc;
5733 softc->periph = periph;
5734 softc->action = PROBE_INVALID;
5735 status = cam_periph_acquire(periph);
5736 if (status != CAM_REQ_CMP) {
5737 return (status);
5742 * Ensure we've waited at least a bus settle
5743 * delay before attempting to probe the device.
5744 * For HBAs that don't do bus resets, this won't make a difference.
5746 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5747 scsi_delay);
5748 probeschedule(periph);
5749 return(CAM_REQ_CMP);
5752 static void
5753 probeschedule(struct cam_periph *periph)
5755 struct ccb_pathinq *cpi;
5756 union ccb *ccb;
5757 probe_softc *softc;
5759 softc = (probe_softc *)periph->softc;
5760 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5762 cpi = &xpt_alloc_ccb()->cpi;
5763 xpt_setup_ccb(&cpi->ccb_h, periph->path, /*priority*/1);
5764 cpi->ccb_h.func_code = XPT_PATH_INQ;
5765 xpt_action((union ccb *)cpi);
5768 * If a device has gone away and another device, or the same one,
5769 * is back in the same place, it should have a unit attention
5770 * condition pending. It will not report the unit attention in
5771 * response to an inquiry, which may leave invalid transfer
5772 * negotiations in effect. The TUR will reveal the unit attention
5773 * condition. Only send the TUR for lun 0, since some devices
5774 * will get confused by commands other than inquiry to non-existent
5775 * luns. If you think a device has gone away start your scan from
5776 * lun 0. This will insure that any bogus transfer settings are
5777 * invalidated.
5779 * If we haven't seen the device before and the controller supports
5780 * some kind of transfer negotiation, negotiate with the first
5781 * sent command if no bus reset was performed at startup. This
5782 * ensures that the device is not confused by transfer negotiation
5783 * settings left over by loader or BIOS action.
5785 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5786 && (ccb->ccb_h.target_lun == 0)) {
5787 PROBE_SET_ACTION(softc, PROBE_TUR);
5788 } else if ((cpi->hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5789 && (cpi->hba_misc & PIM_NOBUSRESET) != 0) {
5790 proberequestdefaultnegotiation(periph);
5791 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
5792 } else {
5793 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
5796 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5797 softc->flags |= PROBE_NO_ANNOUNCE;
5798 else
5799 softc->flags &= ~PROBE_NO_ANNOUNCE;
5801 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5802 xpt_free_ccb(&cpi->ccb_h);
5805 static void
5806 probestart(struct cam_periph *periph, union ccb *start_ccb)
5808 /* Probe the device that our peripheral driver points to */
5809 struct ccb_scsiio *csio;
5810 probe_softc *softc;
5812 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5814 softc = (probe_softc *)periph->softc;
5815 csio = &start_ccb->csio;
5817 switch (softc->action) {
5818 case PROBE_TUR:
5819 case PROBE_TUR_FOR_NEGOTIATION:
5820 case PROBE_DV_EXIT:
5822 scsi_test_unit_ready(csio,
5823 /*retries*/4,
5824 probedone,
5825 MSG_SIMPLE_Q_TAG,
5826 SSD_FULL_SIZE,
5827 /*timeout*/60000);
5828 break;
5830 case PROBE_INQUIRY:
5831 case PROBE_FULL_INQUIRY:
5832 case PROBE_INQUIRY_BASIC_DV1:
5833 case PROBE_INQUIRY_BASIC_DV2:
5835 u_int inquiry_len;
5836 struct scsi_inquiry_data *inq_buf;
5838 inq_buf = &periph->path->device->inq_data;
5841 * If the device is currently configured, we calculate an
5842 * MD5 checksum of the inquiry data, and if the serial number
5843 * length is greater than 0, add the serial number data
5844 * into the checksum as well. Once the inquiry and the
5845 * serial number check finish, we attempt to figure out
5846 * whether we still have the same device.
5848 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5850 MD5Init(&softc->context);
5851 MD5Update(&softc->context, (unsigned char *)inq_buf,
5852 sizeof(struct scsi_inquiry_data));
5853 softc->flags |= PROBE_INQUIRY_CKSUM;
5854 if (periph->path->device->serial_num_len > 0) {
5855 MD5Update(&softc->context,
5856 periph->path->device->serial_num,
5857 periph->path->device->serial_num_len);
5858 softc->flags |= PROBE_SERIAL_CKSUM;
5860 MD5Final(softc->digest, &softc->context);
5863 if (softc->action == PROBE_INQUIRY)
5864 inquiry_len = SHORT_INQUIRY_LENGTH;
5865 else
5866 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5869 * Some parallel SCSI devices fail to send an
5870 * ignore wide residue message when dealing with
5871 * odd length inquiry requests. Round up to be
5872 * safe.
5874 inquiry_len = roundup2(inquiry_len, 2);
5876 if (softc->action == PROBE_INQUIRY_BASIC_DV1
5877 || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5878 inq_buf = kmalloc(inquiry_len, M_CAMXPT, M_INTWAIT);
5880 scsi_inquiry(csio,
5881 /*retries*/4,
5882 probedone,
5883 MSG_SIMPLE_Q_TAG,
5884 (u_int8_t *)inq_buf,
5885 inquiry_len,
5886 /*evpd*/FALSE,
5887 /*page_code*/0,
5888 SSD_MIN_SIZE,
5889 /*timeout*/60 * 1000);
5890 break;
5892 case PROBE_MODE_SENSE:
5894 void *mode_buf;
5895 int mode_buf_len;
5897 mode_buf_len = sizeof(struct scsi_mode_header_6)
5898 + sizeof(struct scsi_mode_blk_desc)
5899 + sizeof(struct scsi_control_page);
5900 mode_buf = kmalloc(mode_buf_len, M_CAMXPT, M_INTWAIT);
5901 scsi_mode_sense(csio,
5902 /*retries*/4,
5903 probedone,
5904 MSG_SIMPLE_Q_TAG,
5905 /*dbd*/FALSE,
5906 SMS_PAGE_CTRL_CURRENT,
5907 SMS_CONTROL_MODE_PAGE,
5908 mode_buf,
5909 mode_buf_len,
5910 SSD_FULL_SIZE,
5911 /*timeout*/60000);
5912 break;
5914 case PROBE_SERIAL_NUM_0:
5916 struct scsi_vpd_supported_page_list *vpd_list = NULL;
5917 struct cam_ed *device;
5919 device = periph->path->device;
5920 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5921 vpd_list = kmalloc(sizeof(*vpd_list), M_CAMXPT,
5922 M_INTWAIT | M_ZERO);
5925 if (vpd_list != NULL) {
5926 scsi_inquiry(csio,
5927 /*retries*/4,
5928 probedone,
5929 MSG_SIMPLE_Q_TAG,
5930 (u_int8_t *)vpd_list,
5931 sizeof(*vpd_list),
5932 /*evpd*/TRUE,
5933 SVPD_SUPPORTED_PAGE_LIST,
5934 SSD_MIN_SIZE,
5935 /*timeout*/60 * 1000);
5936 break;
5939 * We'll have to do without, let our probedone
5940 * routine finish up for us.
5942 start_ccb->csio.data_ptr = NULL;
5943 probedone(periph, start_ccb);
5944 return;
5946 case PROBE_SERIAL_NUM_1:
5948 struct scsi_vpd_unit_serial_number *serial_buf;
5949 struct cam_ed* device;
5951 serial_buf = NULL;
5952 device = periph->path->device;
5953 device->serial_num = NULL;
5954 device->serial_num_len = 0;
5956 serial_buf = (struct scsi_vpd_unit_serial_number *)
5957 kmalloc(sizeof(*serial_buf), M_CAMXPT,
5958 M_INTWAIT | M_ZERO);
5959 scsi_inquiry(csio,
5960 /*retries*/4,
5961 probedone,
5962 MSG_SIMPLE_Q_TAG,
5963 (u_int8_t *)serial_buf,
5964 sizeof(*serial_buf),
5965 /*evpd*/TRUE,
5966 SVPD_UNIT_SERIAL_NUMBER,
5967 SSD_MIN_SIZE,
5968 /*timeout*/60 * 1000);
5969 break;
5971 case PROBE_INVALID:
5972 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO,
5973 ("probestart: invalid action state\n"));
5974 default:
5975 break;
5977 xpt_action(start_ccb);
5980 static void
5981 proberequestdefaultnegotiation(struct cam_periph *periph)
5983 struct ccb_trans_settings *cts;
5985 cts = &xpt_alloc_ccb()->cts;
5986 xpt_setup_ccb(&cts->ccb_h, periph->path, /*priority*/1);
5987 cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5988 cts->type = CTS_TYPE_USER_SETTINGS;
5989 xpt_action((union ccb *)cts);
5990 if ((cts->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5991 xpt_free_ccb(&cts->ccb_h);
5992 return;
5994 cts->ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5995 cts->type = CTS_TYPE_CURRENT_SETTINGS;
5996 xpt_action((union ccb *)cts);
5997 xpt_free_ccb(&cts->ccb_h);
6001 * Backoff Negotiation Code- only pertinent for SPI devices.
6003 static int
6004 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
6006 struct ccb_trans_settings *cts;
6007 struct ccb_trans_settings_spi *spi;
6008 int result;
6010 result = 0;
6011 cts = &xpt_alloc_ccb()->cts;
6012 xpt_setup_ccb(&cts->ccb_h, periph->path, /*priority*/1);
6013 cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6014 cts->type = CTS_TYPE_CURRENT_SETTINGS;
6015 xpt_action((union ccb *)cts);
6016 if ((cts->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6017 if (bootverbose) {
6018 xpt_print(periph->path,
6019 "failed to get current device settings\n");
6021 goto done;
6023 if (cts->transport != XPORT_SPI) {
6024 if (bootverbose) {
6025 xpt_print(periph->path, "not SPI transport\n");
6027 goto done;
6029 spi = &cts->xport_specific.spi;
6032 * We cannot renegotiate sync rate if we don't have one.
6034 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
6035 if (bootverbose) {
6036 xpt_print(periph->path, "no sync rate known\n");
6038 goto done;
6042 * We'll assert that we don't have to touch PPR options- the
6043 * SIM will see what we do with period and offset and adjust
6044 * the PPR options as appropriate.
6048 * A sync rate with unknown or zero offset is nonsensical.
6049 * A sync period of zero means Async.
6051 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
6052 || spi->sync_offset == 0 || spi->sync_period == 0) {
6053 if (bootverbose) {
6054 xpt_print(periph->path, "no sync rate available\n");
6056 goto done;
6059 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
6060 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6061 ("hit async: giving up on DV\n"));
6062 goto done;
6067 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
6068 * We don't try to remember 'last' settings to see if the SIM actually
6069 * gets into the speed we want to set. We check on the SIM telling
6070 * us that a requested speed is bad, but otherwise don't try and
6071 * check the speed due to the asynchronous and handshake nature
6072 * of speed setting.
6074 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
6075 for (;;) {
6076 spi->sync_period++;
6077 if (spi->sync_period >= 0xf) {
6078 spi->sync_period = 0;
6079 spi->sync_offset = 0;
6080 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6081 ("setting to async for DV\n"));
6083 * Once we hit async, we don't want to try
6084 * any more settings.
6086 device->flags |= CAM_DEV_DV_HIT_BOTTOM;
6087 } else if (bootverbose) {
6088 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6089 ("DV: period 0x%x\n", spi->sync_period));
6090 kprintf("setting period to 0x%x\n", spi->sync_period);
6092 cts->ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6093 cts->type = CTS_TYPE_CURRENT_SETTINGS;
6094 xpt_action((union ccb *)cts);
6095 if ((cts->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6096 break;
6098 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6099 ("DV: failed to set period 0x%x\n", spi->sync_period));
6100 if (spi->sync_period == 0)
6101 goto done;
6103 result = 1;
6104 done:
6105 xpt_free_ccb(&cts->ccb_h);
6107 return result;
6110 static void
6111 probedone(struct cam_periph *periph, union ccb *done_ccb)
6113 probe_softc *softc;
6114 struct cam_path *path;
6115 u_int32_t priority;
6117 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
6119 softc = (probe_softc *)periph->softc;
6120 path = done_ccb->ccb_h.path;
6121 priority = done_ccb->ccb_h.pinfo.priority;
6123 switch (softc->action) {
6124 case PROBE_TUR:
6126 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6128 if (cam_periph_error(done_ccb, 0,
6129 SF_NO_PRINT, NULL) == ERESTART)
6130 return;
6131 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
6132 /* Don't wedge the queue */
6133 xpt_release_devq(done_ccb->ccb_h.path,
6134 /*count*/1,
6135 /*run_queue*/TRUE);
6137 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
6138 xpt_release_ccb(done_ccb);
6139 xpt_schedule(periph, priority);
6140 return;
6142 case PROBE_INQUIRY:
6143 case PROBE_FULL_INQUIRY:
6145 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6146 struct scsi_inquiry_data *inq_buf;
6147 u_int8_t periph_qual;
6149 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6150 inq_buf = &path->device->inq_data;
6152 periph_qual = SID_QUAL(inq_buf);
6154 switch(periph_qual) {
6155 case SID_QUAL_LU_CONNECTED:
6157 u_int8_t len;
6160 * We conservatively request only
6161 * SHORT_INQUIRY_LEN bytes of inquiry
6162 * information during our first try
6163 * at sending an INQUIRY. If the device
6164 * has more information to give,
6165 * perform a second request specifying
6166 * the amount of information the device
6167 * is willing to give.
6169 len = inq_buf->additional_length
6170 + offsetof(struct scsi_inquiry_data,
6171 additional_length) + 1;
6172 if (softc->action == PROBE_INQUIRY
6173 && len > SHORT_INQUIRY_LENGTH) {
6174 PROBE_SET_ACTION(softc,
6175 PROBE_FULL_INQUIRY);
6176 xpt_release_ccb(done_ccb);
6177 xpt_schedule(periph, priority);
6178 return;
6181 xpt_find_quirk(path->device);
6183 xpt_devise_transport(path);
6184 if (INQ_DATA_TQ_ENABLED(inq_buf))
6185 PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
6186 else
6187 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
6189 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6190 xpt_reference_device(path->device);
6192 xpt_release_ccb(done_ccb);
6193 xpt_schedule(periph, priority);
6194 return;
6196 default:
6197 break;
6199 } else if (cam_periph_error(done_ccb, 0,
6200 done_ccb->ccb_h.target_lun > 0
6201 ? SF_RETRY_UA|SF_QUIET_IR|SF_NO_PRINT
6202 : SF_RETRY_UA|SF_NO_PRINT,
6203 &softc->saved_ccb) == ERESTART) {
6204 return;
6205 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6206 /* Don't wedge the queue */
6207 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6208 /*run_queue*/TRUE);
6211 * If we get to this point, we got an error status back
6212 * from the inquiry and the error status doesn't require
6213 * automatically retrying the command. Therefore, the
6214 * inquiry failed. If we had inquiry information before
6215 * for this device, but this latest inquiry command failed,
6216 * the device has probably gone away. If this device isn't
6217 * already marked unconfigured, notify the peripheral
6218 * drivers that this device is no more.
6220 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
6221 /* Send the async notification. */
6222 xpt_async(AC_LOST_DEVICE, path, NULL);
6225 xpt_release_ccb(done_ccb);
6226 break;
6228 case PROBE_MODE_SENSE:
6230 struct ccb_scsiio *csio;
6231 struct scsi_mode_header_6 *mode_hdr;
6233 csio = &done_ccb->csio;
6234 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6235 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6236 struct scsi_control_page *page;
6237 u_int8_t *offset;
6239 offset = ((u_int8_t *)&mode_hdr[1])
6240 + mode_hdr->blk_desc_len;
6241 page = (struct scsi_control_page *)offset;
6242 path->device->queue_flags = page->queue_flags;
6243 } else if (cam_periph_error(done_ccb, 0,
6244 SF_RETRY_UA|SF_NO_PRINT,
6245 &softc->saved_ccb) == ERESTART) {
6246 return;
6247 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6248 /* Don't wedge the queue */
6249 xpt_release_devq(done_ccb->ccb_h.path,
6250 /*count*/1, /*run_queue*/TRUE);
6252 xpt_release_ccb(done_ccb);
6253 kfree(mode_hdr, M_CAMXPT);
6254 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
6255 xpt_schedule(periph, priority);
6256 return;
6258 case PROBE_SERIAL_NUM_0:
6260 struct ccb_scsiio *csio;
6261 struct scsi_vpd_supported_page_list *page_list;
6262 int length, serialnum_supported, i;
6264 serialnum_supported = 0;
6265 csio = &done_ccb->csio;
6266 page_list =
6267 (struct scsi_vpd_supported_page_list *)csio->data_ptr;
6269 if (page_list == NULL) {
6271 * Don't process the command as it was never sent
6273 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6274 && (page_list->length > 0)) {
6275 length = min(page_list->length,
6276 SVPD_SUPPORTED_PAGES_SIZE);
6277 for (i = 0; i < length; i++) {
6278 if (page_list->list[i] ==
6279 SVPD_UNIT_SERIAL_NUMBER) {
6280 serialnum_supported = 1;
6281 break;
6284 } else if (cam_periph_error(done_ccb, 0,
6285 SF_RETRY_UA|SF_NO_PRINT,
6286 &softc->saved_ccb) == ERESTART) {
6287 return;
6288 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6289 /* Don't wedge the queue */
6290 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6291 /*run_queue*/TRUE);
6294 if (page_list != NULL)
6295 kfree(page_list, M_DEVBUF);
6297 if (serialnum_supported) {
6298 xpt_release_ccb(done_ccb);
6299 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_1);
6300 xpt_schedule(periph, priority);
6301 return;
6304 csio->data_ptr = NULL;
6305 /* FALLTHROUGH */
6308 case PROBE_SERIAL_NUM_1:
6310 struct ccb_scsiio *csio;
6311 struct scsi_vpd_unit_serial_number *serial_buf;
6312 u_int32_t priority;
6313 int changed;
6314 int have_serialnum;
6316 changed = 1;
6317 have_serialnum = 0;
6318 csio = &done_ccb->csio;
6319 priority = done_ccb->ccb_h.pinfo.priority;
6320 serial_buf =
6321 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6323 /* Clean up from previous instance of this device */
6324 if (path->device->serial_num != NULL) {
6325 kfree(path->device->serial_num, M_CAMXPT);
6326 path->device->serial_num = NULL;
6327 path->device->serial_num_len = 0;
6330 if (serial_buf == NULL) {
6332 * Don't process the command as it was never sent
6334 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6335 && (serial_buf->length > 0)) {
6337 have_serialnum = 1;
6338 path->device->serial_num =
6339 kmalloc((serial_buf->length + 1),
6340 M_CAMXPT, M_INTWAIT);
6341 bcopy(serial_buf->serial_num,
6342 path->device->serial_num,
6343 serial_buf->length);
6344 path->device->serial_num_len = serial_buf->length;
6345 path->device->serial_num[serial_buf->length] = '\0';
6346 } else if (cam_periph_error(done_ccb, 0,
6347 SF_RETRY_UA|SF_NO_PRINT,
6348 &softc->saved_ccb) == ERESTART) {
6349 return;
6350 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6351 /* Don't wedge the queue */
6352 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6353 /*run_queue*/TRUE);
6357 * Let's see if we have seen this device before.
6359 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6360 MD5_CTX context;
6361 u_int8_t digest[16];
6363 MD5Init(&context);
6365 MD5Update(&context,
6366 (unsigned char *)&path->device->inq_data,
6367 sizeof(struct scsi_inquiry_data));
6369 if (have_serialnum)
6370 MD5Update(&context, serial_buf->serial_num,
6371 serial_buf->length);
6373 MD5Final(digest, &context);
6374 if (bcmp(softc->digest, digest, 16) == 0)
6375 changed = 0;
6378 * XXX Do we need to do a TUR in order to ensure
6379 * that the device really hasn't changed???
6381 if ((changed != 0)
6382 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6383 xpt_async(AC_LOST_DEVICE, path, NULL);
6385 if (serial_buf != NULL)
6386 kfree(serial_buf, M_CAMXPT);
6388 if (changed != 0) {
6390 * Now that we have all the necessary
6391 * information to safely perform transfer
6392 * negotiations... Controllers don't perform
6393 * any negotiation or tagged queuing until
6394 * after the first XPT_SET_TRAN_SETTINGS ccb is
6395 * received. So, on a new device, just retrieve
6396 * the user settings, and set them as the current
6397 * settings to set the device up.
6399 proberequestdefaultnegotiation(periph);
6400 xpt_release_ccb(done_ccb);
6403 * Perform a TUR to allow the controller to
6404 * perform any necessary transfer negotiation.
6406 PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
6407 xpt_schedule(periph, priority);
6408 return;
6410 xpt_release_ccb(done_ccb);
6411 break;
6413 case PROBE_TUR_FOR_NEGOTIATION:
6414 case PROBE_DV_EXIT:
6415 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6416 /* Don't wedge the queue */
6417 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6418 /*run_queue*/TRUE);
6421 xpt_reference_device(path->device);
6424 * Do Domain Validation for lun 0 on devices that claim
6425 * to support Synchronous Transfer modes.
6427 * The SID_Sync flag is obsolete or misused in some
6428 * situations (some virtio block devices), ignore it.
6430 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6431 && done_ccb->ccb_h.target_lun == 0
6432 /* && (path->device->inq_data.flags & SID_Sync) != 0 */
6433 && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6434 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6435 ("Begin Domain Validation\n"));
6436 path->device->flags |= CAM_DEV_IN_DV;
6437 xpt_release_ccb(done_ccb);
6438 PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1);
6439 xpt_schedule(periph, priority);
6440 return;
6442 if (softc->action == PROBE_DV_EXIT) {
6443 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6444 ("Leave Domain Validation\n"));
6446 path->device->flags &=
6447 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6448 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6449 /* Inform the XPT that a new device has been found */
6450 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6451 xpt_action(done_ccb);
6452 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6453 done_ccb);
6455 xpt_release_ccb(done_ccb);
6456 break;
6457 case PROBE_INQUIRY_BASIC_DV1:
6458 case PROBE_INQUIRY_BASIC_DV2:
6460 struct scsi_inquiry_data *nbuf;
6461 struct ccb_scsiio *csio;
6463 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6464 /* Don't wedge the queue */
6465 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6466 /*run_queue*/TRUE);
6468 csio = &done_ccb->csio;
6469 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6470 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6471 xpt_print(path,
6472 "inquiry data fails comparison at DV%d step\n",
6473 softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
6474 if (proberequestbackoff(periph, path->device)) {
6475 path->device->flags &= ~CAM_DEV_IN_DV;
6476 PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
6477 } else {
6478 /* give up */
6479 PROBE_SET_ACTION(softc, PROBE_DV_EXIT);
6481 kfree(nbuf, M_CAMXPT);
6482 xpt_release_ccb(done_ccb);
6483 xpt_schedule(periph, priority);
6484 return;
6486 kfree(nbuf, M_CAMXPT);
6487 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6488 PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2);
6489 xpt_release_ccb(done_ccb);
6490 xpt_schedule(periph, priority);
6491 return;
6493 if (softc->action == PROBE_DV_EXIT) {
6494 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6495 ("Leave Domain Validation Successfully\n"));
6497 path->device->flags &=
6498 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6499 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6500 /* Inform the XPT that a new device has been found */
6501 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6502 xpt_action(done_ccb);
6503 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6504 done_ccb);
6506 xpt_release_ccb(done_ccb);
6507 break;
6509 case PROBE_INVALID:
6510 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_INFO,
6511 ("probedone: invalid action state\n"));
6512 default:
6513 break;
6515 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6516 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6517 done_ccb->ccb_h.status = CAM_REQ_CMP;
6518 xpt_done(done_ccb);
6519 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6520 cam_periph_invalidate(periph);
6521 cam_periph_release(periph);
6522 } else {
6523 probeschedule(periph);
6527 static void
6528 probecleanup(struct cam_periph *periph)
6530 kfree(periph->softc, M_CAMXPT);
6533 static void
6534 xpt_find_quirk(struct cam_ed *device)
6536 caddr_t match;
6538 match = cam_quirkmatch((caddr_t)&device->inq_data,
6539 (caddr_t)xpt_quirk_table,
6540 NELEM(xpt_quirk_table),
6541 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6543 if (match == NULL)
6544 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6546 device->quirk = (struct xpt_quirk_entry *)match;
6549 static int
6550 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6552 int error, lbool;
6554 lbool = cam_srch_hi;
6555 error = sysctl_handle_int(oidp, &lbool, 0, req);
6556 if (error != 0 || req->newptr == NULL)
6557 return (error);
6558 if (lbool == 0 || lbool == 1) {
6559 cam_srch_hi = lbool;
6560 return (0);
6561 } else {
6562 return (EINVAL);
6566 static void
6567 xpt_devise_transport(struct cam_path *path)
6569 struct ccb_pathinq *cpi;
6570 struct ccb_trans_settings *cts;
6571 struct scsi_inquiry_data *inq_buf;
6573 /* Get transport information from the SIM */
6574 cpi = &xpt_alloc_ccb()->cpi;
6575 cts = &xpt_alloc_ccb()->cts;
6577 xpt_setup_ccb(&cpi->ccb_h, path, /*priority*/1);
6578 cpi->ccb_h.func_code = XPT_PATH_INQ;
6579 xpt_action((union ccb *)cpi);
6581 inq_buf = NULL;
6582 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6583 inq_buf = &path->device->inq_data;
6584 path->device->protocol = PROTO_SCSI;
6585 path->device->protocol_version =
6586 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi->protocol_version;
6587 path->device->transport = cpi->transport;
6588 path->device->transport_version = cpi->transport_version;
6591 * Any device not using SPI3 features should
6592 * be considered SPI2 or lower.
6594 if (inq_buf != NULL) {
6595 if (path->device->transport == XPORT_SPI
6596 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6597 && path->device->transport_version > 2)
6598 path->device->transport_version = 2;
6599 } else {
6600 struct cam_ed* otherdev;
6602 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6603 otherdev != NULL;
6604 otherdev = TAILQ_NEXT(otherdev, links)) {
6605 if (otherdev != path->device)
6606 break;
6609 if (otherdev != NULL) {
6611 * Initially assume the same versioning as
6612 * prior luns for this target.
6614 path->device->protocol_version =
6615 otherdev->protocol_version;
6616 path->device->transport_version =
6617 otherdev->transport_version;
6618 } else {
6619 /* Until we know better, opt for safty */
6620 path->device->protocol_version = 2;
6621 if (path->device->transport == XPORT_SPI)
6622 path->device->transport_version = 2;
6623 else
6624 path->device->transport_version = 0;
6629 * XXX
6630 * For a device compliant with SPC-2 we should be able
6631 * to determine the transport version supported by
6632 * scrutinizing the version descriptors in the
6633 * inquiry buffer.
6636 /* Tell the controller what we think */
6637 xpt_setup_ccb(&cts->ccb_h, path, /*priority*/1);
6638 cts->ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6639 cts->type = CTS_TYPE_CURRENT_SETTINGS;
6640 cts->transport = path->device->transport;
6641 cts->transport_version = path->device->transport_version;
6642 cts->protocol = path->device->protocol;
6643 cts->protocol_version = path->device->protocol_version;
6644 cts->proto_specific.valid = 0;
6645 cts->xport_specific.valid = 0;
6646 xpt_action((union ccb *)cts);
6648 xpt_free_ccb(&cts->ccb_h);
6649 xpt_free_ccb(&cpi->ccb_h);
6652 static void
6653 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6654 int async_update)
6656 struct ccb_pathinq *cpi;
6657 struct ccb_trans_settings *cur_cts;
6658 struct ccb_trans_settings_scsi *scsi;
6659 struct ccb_trans_settings_scsi *cur_scsi;
6660 struct cam_sim *sim;
6661 struct scsi_inquiry_data *inq_data;
6663 if (device == NULL) {
6664 cts->ccb_h.status = CAM_PATH_INVALID;
6665 xpt_done((union ccb *)cts);
6666 return;
6669 if (cts->protocol == PROTO_UNKNOWN
6670 || cts->protocol == PROTO_UNSPECIFIED) {
6671 cts->protocol = device->protocol;
6672 cts->protocol_version = device->protocol_version;
6675 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6676 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6677 cts->protocol_version = device->protocol_version;
6679 if (cts->protocol != device->protocol) {
6680 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6681 cts->protocol, device->protocol);
6682 cts->protocol = device->protocol;
6685 if (cts->protocol_version > device->protocol_version) {
6686 if (bootverbose) {
6687 xpt_print(cts->ccb_h.path, "Down reving Protocol "
6688 "Version from %d to %d?\n", cts->protocol_version,
6689 device->protocol_version);
6691 cts->protocol_version = device->protocol_version;
6694 if (cts->transport == XPORT_UNKNOWN
6695 || cts->transport == XPORT_UNSPECIFIED) {
6696 cts->transport = device->transport;
6697 cts->transport_version = device->transport_version;
6700 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6701 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6702 cts->transport_version = device->transport_version;
6704 if (cts->transport != device->transport) {
6705 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6706 cts->transport, device->transport);
6707 cts->transport = device->transport;
6710 if (cts->transport_version > device->transport_version) {
6711 if (bootverbose) {
6712 xpt_print(cts->ccb_h.path, "Down reving Transport "
6713 "Version from %d to %d?\n", cts->transport_version,
6714 device->transport_version);
6716 cts->transport_version = device->transport_version;
6719 sim = cts->ccb_h.path->bus->sim;
6722 * Nothing more of interest to do unless
6723 * this is a device connected via the
6724 * SCSI protocol.
6726 if (cts->protocol != PROTO_SCSI) {
6727 if (async_update == FALSE)
6728 (*(sim->sim_action))(sim, (union ccb *)cts);
6729 return;
6732 cpi = &xpt_alloc_ccb()->cpi;
6733 cur_cts = &xpt_alloc_ccb()->cts;
6735 inq_data = &device->inq_data;
6736 scsi = &cts->proto_specific.scsi;
6737 xpt_setup_ccb(&cpi->ccb_h, cts->ccb_h.path, /*priority*/1);
6738 cpi->ccb_h.func_code = XPT_PATH_INQ;
6739 xpt_action((union ccb *)cpi);
6741 /* SCSI specific sanity checking */
6742 if ((cpi->hba_inquiry & PI_TAG_ABLE) == 0
6743 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6744 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6745 || (device->quirk->mintags == 0)) {
6747 * Can't tag on hardware that doesn't support tags,
6748 * doesn't have it enabled, or has broken tag support.
6750 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6753 if (async_update == FALSE) {
6755 * Perform sanity checking against what the
6756 * controller and device can do.
6758 xpt_setup_ccb(&cur_cts->ccb_h, cts->ccb_h.path, /*priority*/1);
6759 cur_cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6760 cur_cts->type = cts->type;
6761 xpt_action((union ccb *)cur_cts);
6762 if ((cur_cts->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6763 goto done;
6765 cur_scsi = &cur_cts->proto_specific.scsi;
6766 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6767 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6768 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6770 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6771 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6774 /* SPI specific sanity checking */
6775 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6776 u_int spi3caps;
6777 struct ccb_trans_settings_spi *spi;
6778 struct ccb_trans_settings_spi *cur_spi;
6780 spi = &cts->xport_specific.spi;
6782 cur_spi = &cur_cts->xport_specific.spi;
6784 /* Fill in any gaps in what the user gave us */
6785 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6786 spi->sync_period = cur_spi->sync_period;
6787 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6788 spi->sync_period = 0;
6789 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6790 spi->sync_offset = cur_spi->sync_offset;
6791 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6792 spi->sync_offset = 0;
6793 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6794 spi->ppr_options = cur_spi->ppr_options;
6795 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6796 spi->ppr_options = 0;
6797 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6798 spi->bus_width = cur_spi->bus_width;
6799 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6800 spi->bus_width = 0;
6801 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6802 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6803 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6805 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6806 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6807 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6808 && (inq_data->flags & SID_Sync) == 0
6809 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6810 || ((cpi->hba_inquiry & PI_SDTR_ABLE) == 0)) {
6811 /* Force async */
6812 spi->sync_period = 0;
6813 spi->sync_offset = 0;
6816 switch (spi->bus_width) {
6817 case MSG_EXT_WDTR_BUS_32_BIT:
6818 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6819 || (inq_data->flags & SID_WBus32) != 0
6820 || cts->type == CTS_TYPE_USER_SETTINGS)
6821 && (cpi->hba_inquiry & PI_WIDE_32) != 0)
6822 break;
6823 /* Fall Through to 16-bit */
6824 case MSG_EXT_WDTR_BUS_16_BIT:
6825 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6826 || (inq_data->flags & SID_WBus16) != 0
6827 || cts->type == CTS_TYPE_USER_SETTINGS)
6828 && (cpi->hba_inquiry & PI_WIDE_16) != 0) {
6829 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6830 break;
6832 /* Fall Through to 8-bit */
6833 default: /* New bus width?? */
6834 case MSG_EXT_WDTR_BUS_8_BIT:
6835 /* All targets can do this */
6836 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6837 break;
6840 spi3caps = cpi->xport_specific.spi.ppr_options;
6841 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6842 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6843 spi3caps &= inq_data->spi3data;
6845 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6846 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6848 if ((spi3caps & SID_SPI_IUS) == 0)
6849 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6851 if ((spi3caps & SID_SPI_QAS) == 0)
6852 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6854 /* No SPI Transfer settings are allowed unless we are wide */
6855 if (spi->bus_width == 0)
6856 spi->ppr_options = 0;
6858 if ((spi->valid & CTS_SPI_VALID_DISC)
6859 && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) {
6861 * Can't tag queue without disconnection.
6863 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6864 scsi->valid |= CTS_SCSI_VALID_TQ;
6868 * If we are currently performing tagged transactions to
6869 * this device and want to change its negotiation parameters,
6870 * go non-tagged for a bit to give the controller a chance to
6871 * negotiate unhampered by tag messages.
6873 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6874 && (device->inq_flags & SID_CmdQue) != 0
6875 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6876 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6877 CTS_SPI_VALID_SYNC_OFFSET|
6878 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6879 xpt_toggle_tags(cts->ccb_h.path);
6882 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6883 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6884 int device_tagenb;
6887 * If we are transitioning from tags to no-tags or
6888 * vice-versa, we need to carefully freeze and restart
6889 * the queue so that we don't overlap tagged and non-tagged
6890 * commands. We also temporarily stop tags if there is
6891 * a change in transfer negotiation settings to allow
6892 * "tag-less" negotiation.
6894 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6895 || (device->inq_flags & SID_CmdQue) != 0)
6896 device_tagenb = TRUE;
6897 else
6898 device_tagenb = FALSE;
6900 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6901 && device_tagenb == FALSE)
6902 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6903 && device_tagenb == TRUE)) {
6905 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6907 * Delay change to use tags until after a
6908 * few commands have gone to this device so
6909 * the controller has time to perform transfer
6910 * negotiations without tagged messages getting
6911 * in the way.
6913 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6914 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6915 } else {
6916 struct ccb_relsim crs;
6918 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6919 device->inq_flags &= ~SID_CmdQue;
6920 xpt_dev_ccbq_resize(cts->ccb_h.path,
6921 sim->max_dev_openings);
6922 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6923 device->tag_delay_count = 0;
6925 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6926 /*priority*/1);
6927 crs.ccb_h.func_code = XPT_REL_SIMQ;
6928 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6929 crs.openings
6930 = crs.release_timeout
6931 = crs.qfrozen_cnt
6932 = 0;
6933 xpt_action((union ccb *)&crs);
6937 if (async_update == FALSE)
6938 (*(sim->sim_action))(sim, (union ccb *)cts);
6939 done:
6940 xpt_free_ccb(&cur_cts->ccb_h);
6941 xpt_free_ccb(&cpi->ccb_h);
6944 static void
6945 xpt_toggle_tags(struct cam_path *path)
6947 struct cam_ed *dev;
6950 * Give controllers a chance to renegotiate
6951 * before starting tag operations. We
6952 * "toggle" tagged queuing off then on
6953 * which causes the tag enable command delay
6954 * counter to come into effect.
6956 dev = path->device;
6957 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6958 || ((dev->inq_flags & SID_CmdQue) != 0
6959 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6960 struct ccb_trans_settings *cts;
6962 cts = &xpt_alloc_ccb()->cts;
6963 xpt_setup_ccb(&cts->ccb_h, path, 1);
6964 cts->protocol = PROTO_SCSI;
6965 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
6966 cts->transport = XPORT_UNSPECIFIED;
6967 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
6968 cts->proto_specific.scsi.flags = 0;
6969 cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6970 xpt_set_transfer_settings(cts, path->device,
6971 /*async_update*/TRUE);
6972 cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6973 xpt_set_transfer_settings(cts, path->device,
6974 /*async_update*/TRUE);
6975 xpt_free_ccb(&cts->ccb_h);
6979 static void
6980 xpt_start_tags(struct cam_path *path)
6982 struct ccb_relsim *crs;
6983 struct cam_ed *device;
6984 struct cam_sim *sim;
6985 int newopenings;
6987 crs = &xpt_alloc_ccb()->crs;
6988 device = path->device;
6989 sim = path->bus->sim;
6990 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6991 xpt_freeze_devq(path, /*count*/1);
6992 device->inq_flags |= SID_CmdQue;
6993 if (device->tag_saved_openings != 0)
6994 newopenings = device->tag_saved_openings;
6995 else
6996 newopenings = min(device->quirk->maxtags,
6997 sim->max_tagged_dev_openings);
6998 xpt_dev_ccbq_resize(path, newopenings);
6999 xpt_setup_ccb(&crs->ccb_h, path, /*priority*/1);
7000 crs->ccb_h.func_code = XPT_REL_SIMQ;
7001 crs->release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
7002 crs->openings
7003 = crs->release_timeout
7004 = crs->qfrozen_cnt
7005 = 0;
7006 xpt_action((union ccb *)crs);
7007 xpt_free_ccb(&crs->ccb_h);
7010 static int busses_to_config;
7011 static int busses_to_reset;
7013 static int
7014 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
7016 sim_lock_assert_owned(bus->sim->lock);
7018 if (bus->counted_to_config == 0 && bus->path_id != CAM_XPT_PATH_ID) {
7019 struct cam_path path;
7020 struct ccb_pathinq *cpi;
7021 int can_negotiate;
7023 if (bootverbose) {
7024 kprintf("CAM: Configuring bus:");
7025 if (bus->sim) {
7026 kprintf(" %s%d\n",
7027 bus->sim->sim_name,
7028 bus->sim->unit_number);
7029 } else {
7030 kprintf(" (unknown)\n");
7034 cpi = &xpt_alloc_ccb()->cpi;
7036 atomic_add_int(&busses_to_config, 1);
7037 bus->counted_to_config = 1;
7038 xpt_compile_path(&path, NULL, bus->path_id,
7039 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
7040 xpt_setup_ccb(&cpi->ccb_h, &path, /*priority*/1);
7041 cpi->ccb_h.func_code = XPT_PATH_INQ;
7042 xpt_action((union ccb *)cpi);
7043 can_negotiate = cpi->hba_inquiry;
7044 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
7045 if ((cpi->hba_misc & PIM_NOBUSRESET) == 0 && can_negotiate)
7046 busses_to_reset++;
7047 xpt_release_path(&path);
7048 xpt_free_ccb(&cpi->ccb_h);
7049 } else
7050 if (bus->counted_to_config == 0 && bus->path_id == CAM_XPT_PATH_ID) {
7051 /* this is our dummy periph/bus */
7052 atomic_add_int(&busses_to_config, 1);
7053 bus->counted_to_config = 1;
7056 return(1);
7059 static int
7060 xptconfigfunc(struct cam_eb *bus, void *arg)
7062 struct cam_path *path;
7063 union ccb *work_ccb;
7065 sim_lock_assert_owned(bus->sim->lock);
7067 if (bus->path_id != CAM_XPT_PATH_ID) {
7068 cam_status status;
7069 int can_negotiate;
7071 work_ccb = xpt_alloc_ccb();
7072 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
7073 CAM_TARGET_WILDCARD,
7074 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
7075 kprintf("xptconfigfunc: xpt_create_path failed with "
7076 "status %#x for bus %d\n", status, bus->path_id);
7077 kprintf("xptconfigfunc: halting bus configuration\n");
7078 xpt_free_ccb(&work_ccb->ccb_h);
7079 xpt_uncount_bus(bus);
7080 return(0);
7082 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
7083 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
7084 xpt_action(work_ccb);
7085 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
7086 kprintf("xptconfigfunc: CPI failed on bus %d "
7087 "with status %d\n", bus->path_id,
7088 work_ccb->ccb_h.status);
7089 xpt_finishconfig(xpt_periph, work_ccb);
7090 return(1);
7093 can_negotiate = work_ccb->cpi.hba_inquiry;
7094 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
7095 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
7096 && (can_negotiate != 0)) {
7097 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
7098 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
7099 work_ccb->ccb_h.cbfcnp = NULL;
7100 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
7101 ("Resetting Bus\n"));
7102 xpt_action(work_ccb);
7103 xpt_finishconfig(xpt_periph, work_ccb);
7104 } else {
7105 /* Act as though we performed a successful BUS RESET */
7106 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
7107 xpt_finishconfig(xpt_periph, work_ccb);
7109 } else {
7110 xpt_uncount_bus(bus);
7113 return(1);
7117 * Now that interrupts are enabled, go find our devices.
7119 * This hook function is called once by run_interrupt_driven_config_hooks().
7120 * XPT is expected to disestablish its hook when done.
7122 static void
7123 xpt_config(void *arg)
7126 #ifdef CAMDEBUG
7127 /* Setup debugging flags and path */
7128 #ifdef CAM_DEBUG_FLAGS
7129 cam_dflags = CAM_DEBUG_FLAGS;
7130 #else /* !CAM_DEBUG_FLAGS */
7131 cam_dflags = CAM_DEBUG_NONE;
7132 #endif /* CAM_DEBUG_FLAGS */
7133 #ifdef CAM_DEBUG_BUS
7134 if (cam_dflags != CAM_DEBUG_NONE) {
7136 * Locking is specifically omitted here. No SIMs have
7137 * registered yet, so xpt_create_path will only be searching
7138 * empty lists of targets and devices.
7140 if (xpt_create_path(&cam_dpath, xpt_periph,
7141 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
7142 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
7143 kprintf("xpt_config: xpt_create_path() failed for debug"
7144 " target %d:%d:%d, debugging disabled\n",
7145 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
7146 cam_dflags = CAM_DEBUG_NONE;
7148 } else {
7149 cam_dpath = NULL;
7151 #else /* !CAM_DEBUG_BUS */
7152 cam_dpath = NULL;
7153 #endif /* CAM_DEBUG_BUS */
7154 #endif /* CAMDEBUG */
7157 * Scan all installed busses. This will also add a count
7158 * for our dummy placeholder (xpt_periph).
7160 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
7162 kprintf("CAM: Configuring %d busses\n", busses_to_config - 1);
7163 if (busses_to_reset > 0 && scsi_delay >= 2000) {
7164 kprintf("Waiting %d seconds for SCSI "
7165 "devices to settle\n",
7166 scsi_delay/1000);
7168 xpt_for_all_busses(xptconfigfunc, NULL);
7172 * If the given device only has one peripheral attached to it, and if that
7173 * peripheral is the passthrough driver, announce it. This insures that the
7174 * user sees some sort of announcement for every peripheral in their system.
7176 static int
7177 xptpassannouncefunc(struct cam_ed *device, void *arg)
7179 struct cam_periph *periph;
7180 int i;
7182 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
7183 periph = SLIST_NEXT(periph, periph_links), i++);
7185 periph = SLIST_FIRST(&device->periphs);
7186 if ((i == 1)
7187 && (strncmp(periph->periph_name, "pass", 4) == 0))
7188 xpt_announce_periph(periph, NULL);
7190 return(1);
7193 static void
7194 xpt_finishconfig_task(void *context, int pending)
7196 struct periph_driver **p_drv;
7197 int i;
7199 kprintf("CAM: finished configuring all busses\n");
7201 if (busses_to_config == 0) {
7202 /* Register all the peripheral drivers */
7203 /* XXX This will have to change when we have loadable modules */
7204 p_drv = periph_drivers;
7205 for (i = 0; p_drv[i] != NULL; i++) {
7206 (*p_drv[i]->init)();
7210 * Check for devices with no "standard" peripheral driver
7211 * attached. For any devices like that, announce the
7212 * passthrough driver so the user will see something.
7214 xpt_for_all_devices(xptpassannouncefunc, NULL);
7216 /* Release our hook so that the boot can continue. */
7217 config_intrhook_disestablish(xsoftc.xpt_config_hook);
7218 kfree(xsoftc.xpt_config_hook, M_CAMXPT);
7219 xsoftc.xpt_config_hook = NULL;
7221 kfree(context, M_CAMXPT);
7224 static void
7225 xpt_uncount_bus (struct cam_eb *bus)
7227 struct xpt_task *task;
7229 if (bus->counted_to_config) {
7230 bus->counted_to_config = 0;
7231 if (atomic_fetchadd_int(&busses_to_config, -1) == 1) {
7232 task = kmalloc(sizeof(struct xpt_task), M_CAMXPT,
7233 M_INTWAIT | M_ZERO);
7234 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
7235 taskqueue_enqueue(taskqueue_thread[mycpuid],
7236 &task->task);
7241 static void
7242 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
7244 struct cam_path *path;
7246 path = done_ccb->ccb_h.path;
7247 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_finishconfig\n"));
7249 switch(done_ccb->ccb_h.func_code) {
7250 case XPT_RESET_BUS:
7251 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7252 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7253 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7254 done_ccb->crcn.flags = 0;
7255 xpt_action(done_ccb);
7256 return;
7258 /* FALLTHROUGH */
7259 case XPT_SCAN_BUS:
7260 default:
7261 if (bootverbose) {
7262 kprintf("CAM: Finished configuring bus:");
7263 if (path->bus->sim) {
7264 kprintf(" %s%d\n",
7265 path->bus->sim->sim_name,
7266 path->bus->sim->unit_number);
7267 } else {
7268 kprintf(" (unknown)\n");
7271 xpt_uncount_bus(path->bus);
7272 xpt_free_path(path);
7273 xpt_free_ccb(&done_ccb->ccb_h);
7274 break;
7278 cam_status
7279 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
7280 struct cam_path *path)
7282 struct ccb_setasync *csa;
7283 cam_status status;
7284 int xptpath = 0;
7286 if (path == NULL) {
7287 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
7288 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
7289 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
7290 if (status != CAM_REQ_CMP) {
7291 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7292 return (status);
7294 xptpath = 1;
7297 csa = &xpt_alloc_ccb()->csa;
7298 xpt_setup_ccb(&csa->ccb_h, path, /*priority*/5);
7299 csa->ccb_h.func_code = XPT_SASYNC_CB;
7300 csa->event_enable = event;
7301 csa->callback = cbfunc;
7302 csa->callback_arg = cbarg;
7303 xpt_action((union ccb *)csa);
7304 status = csa->ccb_h.status;
7305 if (xptpath) {
7306 xpt_free_path(path);
7307 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7309 xpt_free_ccb(&csa->ccb_h);
7311 return (status);
7314 static void
7315 xptaction(struct cam_sim *sim, union ccb *work_ccb)
7317 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7319 switch (work_ccb->ccb_h.func_code) {
7320 /* Common cases first */
7321 case XPT_PATH_INQ: /* Path routing inquiry */
7323 struct ccb_pathinq *cpi;
7325 cpi = &work_ccb->cpi;
7326 cpi->version_num = 1; /* XXX??? */
7327 cpi->hba_inquiry = 0;
7328 cpi->target_sprt = 0;
7329 cpi->hba_misc = 0;
7330 cpi->hba_eng_cnt = 0;
7331 cpi->max_target = 0;
7332 cpi->max_lun = 0;
7333 cpi->initiator_id = 0;
7334 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7335 strncpy(cpi->hba_vid, "", HBA_IDLEN);
7336 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7337 cpi->unit_number = sim->unit_number;
7338 cpi->bus_id = sim->bus_id;
7339 cpi->base_transfer_speed = 0;
7340 cpi->protocol = PROTO_UNSPECIFIED;
7341 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7342 cpi->transport = XPORT_UNSPECIFIED;
7343 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7344 cpi->ccb_h.status = CAM_REQ_CMP;
7345 xpt_done(work_ccb);
7346 break;
7348 default:
7349 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7350 xpt_done(work_ccb);
7351 break;
7356 * The xpt as a "controller" has no interrupt sources, so polling
7357 * is a no-op.
7359 static void
7360 xptpoll(struct cam_sim *sim)
7364 void
7365 xpt_lock_buses(void)
7367 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
7370 void
7371 xpt_unlock_buses(void)
7373 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
7378 * Should only be called by the machine interrupt dispatch routines,
7379 * so put these prototypes here instead of in the header.
7382 static void
7383 swi_cambio(void *arg, void *frame)
7385 camisr(NULL);
7388 static void
7389 camisr(void *dummy)
7391 cam_simq_t queue;
7392 struct cam_sim *sim;
7394 spin_lock(&cam_simq_spin);
7395 TAILQ_INIT(&queue);
7396 TAILQ_CONCAT(&queue, &cam_simq, links);
7397 spin_unlock(&cam_simq_spin);
7399 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
7400 TAILQ_REMOVE(&queue, sim, links);
7401 CAM_SIM_LOCK(sim);
7402 sim->flags &= ~CAM_SIM_ON_DONEQ;
7403 camisr_runqueue(sim);
7404 CAM_SIM_UNLOCK(sim);
7408 static void
7409 camisr_runqueue(struct cam_sim *sim)
7411 struct ccb_hdr *ccb_h;
7412 int runq;
7414 spin_lock(&sim->sim_spin);
7415 while ((ccb_h = TAILQ_FIRST(&sim->sim_doneq)) != NULL) {
7416 TAILQ_REMOVE(&sim->sim_doneq, ccb_h, sim_links.tqe);
7417 spin_unlock(&sim->sim_spin);
7418 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7420 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7421 ("camisr\n"));
7423 runq = FALSE;
7425 if (ccb_h->flags & CAM_HIGH_POWER) {
7426 struct highpowerlist *hphead;
7427 union ccb *send_ccb;
7429 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
7430 hphead = &xsoftc.highpowerq;
7432 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7435 * Increment the count since this command is done.
7437 xsoftc.num_highpower++;
7440 * Any high powered commands queued up?
7442 if (send_ccb != NULL) {
7443 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7444 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7446 xpt_release_devq(send_ccb->ccb_h.path,
7447 /*count*/1, /*runqueue*/TRUE);
7448 } else
7449 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7452 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7453 struct cam_ed *dev;
7455 dev = ccb_h->path->device;
7457 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7460 * devq may be NULL if this is cam_dead_sim
7462 if (ccb_h->path->bus->sim->devq) {
7463 ccb_h->path->bus->sim->devq->send_active--;
7464 ccb_h->path->bus->sim->devq->send_openings++;
7467 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7468 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7469 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7470 && (dev->ccbq.dev_active == 0))) {
7472 xpt_release_devq(ccb_h->path, /*count*/1,
7473 /*run_queue*/TRUE);
7476 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7477 && (--dev->tag_delay_count == 0))
7478 xpt_start_tags(ccb_h->path);
7480 if ((dev->ccbq.queue.entries > 0)
7481 && (dev->qfrozen_cnt == 0)
7482 && (device_is_send_queued(dev) == 0)) {
7483 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7484 dev);
7488 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7489 xpt_release_simq(ccb_h->path->bus->sim,
7490 /*run_queue*/TRUE);
7491 ccb_h->status &= ~CAM_RELEASE_SIMQ;
7492 runq = FALSE;
7495 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7496 && (ccb_h->status & CAM_DEV_QFRZN)) {
7497 xpt_release_devq(ccb_h->path, /*count*/1,
7498 /*run_queue*/TRUE);
7499 ccb_h->status &= ~CAM_DEV_QFRZN;
7500 } else if (runq) {
7501 xpt_run_dev_sendq(ccb_h->path->bus);
7504 /* Call the peripheral driver's callback */
7505 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7506 spin_lock(&sim->sim_spin);
7508 spin_unlock(&sim->sim_spin);
7512 * The dead_sim isn't completely hooked into CAM, we have to make sure
7513 * the doneq is cleared after calling xpt_done() so cam_periph_ccbwait()
7514 * doesn't block.
7516 static void
7517 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7520 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7521 xpt_done(ccb);
7522 camisr_runqueue(sim);
7525 static void
7526 dead_sim_poll(struct cam_sim *sim)