Sync quirk tables with FreeBSD.
[dragonfly.git] / sys / bus / cam / cam_xpt.c
blob47667864ef8e1c25516e7d74a1cc2d579e19aa76
1 /*
2 * Implementation of the Common Access Method Transport (XPT) layer.
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.62 2007/12/02 16:38:11 pavalos Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/bus.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
49 #include <machine/clock.h>
51 #include "cam.h"
52 #include "cam_ccb.h"
53 #include "cam_periph.h"
54 #include "cam_sim.h"
55 #include "cam_xpt.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
63 #include "opt_cam.h"
65 /* Datastructures internal to the xpt layer */
66 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
69 * Definition of an async handler callback block. These are used to add
70 * SIMs and peripherals to the async callback lists.
72 struct async_node {
73 SLIST_ENTRY(async_node) links;
74 u_int32_t event_enable; /* Async Event enables */
75 void (*callback)(void *arg, u_int32_t code,
76 struct cam_path *path, void *args);
77 void *callback_arg;
80 SLIST_HEAD(async_list, async_node);
81 SLIST_HEAD(periph_list, cam_periph);
82 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
85 * This is the maximum number of high powered commands (e.g. start unit)
86 * that can be outstanding at a particular time.
88 #ifndef CAM_MAX_HIGHPOWER
89 #define CAM_MAX_HIGHPOWER 4
90 #endif
92 /* number of high powered commands that can go through right now */
93 static int num_highpower = CAM_MAX_HIGHPOWER;
96 * Structure for queueing a device in a run queue.
97 * There is one run queue for allocating new ccbs,
98 * and another for sending ccbs to the controller.
100 struct cam_ed_qinfo {
101 cam_pinfo pinfo;
102 struct cam_ed *device;
106 * The CAM EDT (Existing Device Table) contains the device information for
107 * all devices for all busses in the system. The table contains a
108 * cam_ed structure for each device on the bus.
110 struct cam_ed {
111 TAILQ_ENTRY(cam_ed) links;
112 struct cam_ed_qinfo alloc_ccb_entry;
113 struct cam_ed_qinfo send_ccb_entry;
114 struct cam_et *target;
115 lun_id_t lun_id;
116 struct camq drvq; /*
117 * Queue of type drivers wanting to do
118 * work on this device.
120 struct cam_ccbq ccbq; /* Queue of pending ccbs */
121 struct async_list asyncs; /* Async callback info for this B/T/L */
122 struct periph_list periphs; /* All attached devices */
123 u_int generation; /* Generation number */
124 struct cam_periph *owner; /* Peripheral driver's ownership tag */
125 struct xpt_quirk_entry *quirk; /* Oddities about this device */
126 /* Storage for the inquiry data */
127 #ifdef CAM_NEW_TRAN_CODE
128 cam_proto protocol;
129 u_int protocol_version;
130 cam_xport transport;
131 u_int transport_version;
132 #endif /* CAM_NEW_TRAN_CODE */
133 struct scsi_inquiry_data inq_data;
134 u_int8_t inq_flags; /*
135 * Current settings for inquiry flags.
136 * This allows us to override settings
137 * like disconnection and tagged
138 * queuing for a device.
140 u_int8_t queue_flags; /* Queue flags from the control page */
141 u_int8_t serial_num_len;
142 u_int8_t *serial_num;
143 u_int32_t qfrozen_cnt;
144 u_int32_t flags;
145 #define CAM_DEV_UNCONFIGURED 0x01
146 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
147 #define CAM_DEV_REL_ON_COMPLETE 0x04
148 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
149 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
150 #define CAM_DEV_TAG_AFTER_COUNT 0x20
151 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
152 u_int32_t tag_delay_count;
153 #define CAM_TAG_DELAY_COUNT 5
154 u_int32_t tag_saved_openings;
155 u_int32_t refcount;
156 struct callout c_handle;
160 * Each target is represented by an ET (Existing Target). These
161 * entries are created when a target is successfully probed with an
162 * identify, and removed when a device fails to respond after a number
163 * of retries, or a bus rescan finds the device missing.
165 struct cam_et {
166 TAILQ_HEAD(, cam_ed) ed_entries;
167 TAILQ_ENTRY(cam_et) links;
168 struct cam_eb *bus;
169 target_id_t target_id;
170 u_int32_t refcount;
171 u_int generation;
172 struct timeval last_reset; /* uptime of last reset */
176 * Each bus is represented by an EB (Existing Bus). These entries
177 * are created by calls to xpt_bus_register and deleted by calls to
178 * xpt_bus_deregister.
180 struct cam_eb {
181 TAILQ_HEAD(, cam_et) et_entries;
182 TAILQ_ENTRY(cam_eb) links;
183 path_id_t path_id;
184 struct cam_sim *sim;
185 struct timeval last_reset; /* uptime of last reset */
186 u_int32_t flags;
187 #define CAM_EB_RUNQ_SCHEDULED 0x01
188 u_int32_t refcount;
189 u_int generation;
192 struct cam_path {
193 struct cam_periph *periph;
194 struct cam_eb *bus;
195 struct cam_et *target;
196 struct cam_ed *device;
199 struct xpt_quirk_entry {
200 struct scsi_inquiry_pattern inq_pat;
201 u_int8_t quirks;
202 #define CAM_QUIRK_NOLUNS 0x01
203 #define CAM_QUIRK_NOSERIAL 0x02
204 #define CAM_QUIRK_HILUNS 0x04
205 #define CAM_QUIRK_NOHILUNS 0x08
206 u_int mintags;
207 u_int maxtags;
210 static int cam_srch_hi = 0;
211 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
212 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
213 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
214 sysctl_cam_search_luns, "I",
215 "allow search above LUN 7 for SCSI3 and greater devices");
217 #define CAM_SCSI2_MAXLUN 8
219 * If we're not quirked to search <= the first 8 luns
220 * and we are either quirked to search above lun 8,
221 * or we're > SCSI-2 and we've enabled hilun searching,
222 * or we're > SCSI-2 and the last lun was a success,
223 * we can look for luns above lun 8.
225 #define CAN_SRCH_HI_SPARSE(dv) \
226 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
227 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
228 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
230 #define CAN_SRCH_HI_DENSE(dv) \
231 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
232 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
233 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
235 typedef enum {
236 XPT_FLAG_OPEN = 0x01
237 } xpt_flags;
239 struct xpt_softc {
240 xpt_flags flags;
241 u_int32_t generation;
244 static const char quantum[] = "QUANTUM";
245 static const char sony[] = "SONY";
246 static const char west_digital[] = "WDIGTL";
247 static const char samsung[] = "SAMSUNG";
248 static const char seagate[] = "SEAGATE";
249 static const char microp[] = "MICROP";
251 static struct xpt_quirk_entry xpt_quirk_table[] =
254 /* Reports QUEUE FULL for temporary resource shortages */
255 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
256 /*quirks*/0, /*mintags*/24, /*maxtags*/32
259 /* Reports QUEUE FULL for temporary resource shortages */
260 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
261 /*quirks*/0, /*mintags*/24, /*maxtags*/32
264 /* Reports QUEUE FULL for temporary resource shortages */
265 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
266 /*quirks*/0, /*mintags*/24, /*maxtags*/32
269 /* Broken tagged queuing drive */
270 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
271 /*quirks*/0, /*mintags*/0, /*maxtags*/0
274 /* Broken tagged queuing drive */
275 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
276 /*quirks*/0, /*mintags*/0, /*maxtags*/0
279 /* Broken tagged queuing drive */
280 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
281 /*quirks*/0, /*mintags*/0, /*maxtags*/0
285 * Unfortunately, the Quantum Atlas III has the same
286 * problem as the Atlas II drives above.
287 * Reported by: "Johan Granlund" <johan@granlund.nu>
289 * For future reference, the drive with the problem was:
290 * QUANTUM QM39100TD-SW N1B0
292 * It's possible that Quantum will fix the problem in later
293 * firmware revisions. If that happens, the quirk entry
294 * will need to be made specific to the firmware revisions
295 * with the problem.
298 /* Reports QUEUE FULL for temporary resource shortages */
299 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
300 /*quirks*/0, /*mintags*/24, /*maxtags*/32
304 * 18 Gig Atlas III, same problem as the 9G version.
305 * Reported by: Andre Albsmeier
306 * <andre.albsmeier@mchp.siemens.de>
308 * For future reference, the drive with the problem was:
309 * QUANTUM QM318000TD-S N491
311 /* Reports QUEUE FULL for temporary resource shortages */
312 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
313 /*quirks*/0, /*mintags*/24, /*maxtags*/32
317 * Broken tagged queuing drive
318 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
319 * and: Martin Renters <martin@tdc.on.ca>
321 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
322 /*quirks*/0, /*mintags*/0, /*maxtags*/0
325 * The Seagate Medalist Pro drives have very poor write
326 * performance with anything more than 2 tags.
328 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
329 * Drive: <SEAGATE ST36530N 1444>
331 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
332 * Drive: <SEAGATE ST34520W 1281>
334 * No one has actually reported that the 9G version
335 * (ST39140*) of the Medalist Pro has the same problem, but
336 * we're assuming that it does because the 4G and 6.5G
337 * versions of the drive are broken.
340 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
341 /*quirks*/0, /*mintags*/2, /*maxtags*/2
344 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
345 /*quirks*/0, /*mintags*/2, /*maxtags*/2
348 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
349 /*quirks*/0, /*mintags*/2, /*maxtags*/2
353 * Slow when tagged queueing is enabled. Write performance
354 * steadily drops off with more and more concurrent
355 * transactions. Best sequential write performance with
356 * tagged queueing turned off and write caching turned on.
358 * PR: kern/10398
359 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
360 * Drive: DCAS-34330 w/ "S65A" firmware.
362 * The drive with the problem had the "S65A" firmware
363 * revision, and has also been reported (by Stephen J.
364 * Roznowski <sjr@home.net>) for a drive with the "S61A"
365 * firmware revision.
367 * Although no one has reported problems with the 2 gig
368 * version of the DCAS drive, the assumption is that it
369 * has the same problems as the 4 gig version. Therefore
370 * this quirk entries disables tagged queueing for all
371 * DCAS drives.
373 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
374 /*quirks*/0, /*mintags*/0, /*maxtags*/0
377 /* Broken tagged queuing drive */
378 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
379 /*quirks*/0, /*mintags*/0, /*maxtags*/0
382 /* Broken tagged queuing drive */
383 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
384 /*quirks*/0, /*mintags*/0, /*maxtags*/0
387 /* This does not support other than LUN 0 */
388 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
389 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
393 * Broken tagged queuing drive.
394 * Submitted by:
395 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
396 * in PR kern/9535
398 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
399 /*quirks*/0, /*mintags*/0, /*maxtags*/0
403 * Slow when tagged queueing is enabled. (1.5MB/sec versus
404 * 8MB/sec.)
405 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
406 * Best performance with these drives is achieved with
407 * tagged queueing turned off, and write caching turned on.
409 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
410 /*quirks*/0, /*mintags*/0, /*maxtags*/0
414 * Slow when tagged queueing is enabled. (1.5MB/sec versus
415 * 8MB/sec.)
416 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
417 * Best performance with these drives is achieved with
418 * tagged queueing turned off, and write caching turned on.
420 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
421 /*quirks*/0, /*mintags*/0, /*maxtags*/0
425 * Doesn't handle queue full condition correctly,
426 * so we need to limit maxtags to what the device
427 * can handle instead of determining this automatically.
429 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
430 /*quirks*/0, /*mintags*/2, /*maxtags*/32
433 /* Really only one LUN */
434 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
435 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
438 /* I can't believe we need a quirk for DPT volumes. */
439 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
440 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
441 /*mintags*/0, /*maxtags*/255
445 * Many Sony CDROM drives don't like multi-LUN probing.
447 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
448 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
452 * This drive doesn't like multiple LUN probing.
453 * Submitted by: Parag Patel <parag@cgt.com>
455 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
456 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
459 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
460 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
464 * The 8200 doesn't like multi-lun probing, and probably
465 * don't like serial number requests either.
468 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
469 "EXB-8200*", "*"
471 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
475 * Let's try the same as above, but for a drive that says
476 * it's an IPL-6860 but is actually an EXB 8200.
479 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
480 "IPL-6860*", "*"
482 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
486 * These Hitachi drives don't like multi-lun probing.
487 * The PR submitter has a DK319H, but says that the Linux
488 * kernel has a similar work-around for the DK312 and DK314,
489 * so all DK31* drives are quirked here.
490 * PR: misc/18793
491 * Submitted by: Paul Haddad <paul@pth.com>
493 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
494 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
498 * The Hitachi CJ series with J8A8 firmware apparantly has
499 * problems with tagged commands.
500 * PR: 23536
501 * Reported by: amagai@nue.org
503 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
504 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
508 * These are the large storage arrays.
509 * Submitted by: William Carrel <william.carrel@infospace.com>
511 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
512 CAM_QUIRK_HILUNS, 2, 1024
516 * This old revision of the TDC3600 is also SCSI-1, and
517 * hangs upon serial number probing.
520 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
521 " TDC 3600", "U07:"
523 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
527 * Maxtor Personal Storage 3000XT (Firewire)
528 * hangs upon serial number probing.
531 T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
532 "1394 storage", "*"
534 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
538 * Would repond to all LUNs if asked for.
541 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
542 "CP150", "*"
544 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
548 * Would repond to all LUNs if asked for.
551 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
552 "96X2*", "*"
554 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
557 /* Submitted by: Matthew Dodd <winter@jurai.net> */
558 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
559 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
562 /* Submitted by: Matthew Dodd <winter@jurai.net> */
563 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
564 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
567 /* TeraSolutions special settings for TRC-22 RAID */
568 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
569 /*quirks*/0, /*mintags*/55, /*maxtags*/255
572 /* Veritas Storage Appliance */
573 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
574 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
578 * Would respond to all LUNs. Device type and removable
579 * flag are jumper-selectable.
581 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
582 "Tahiti 1", "*"
584 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
587 /* EasyRAID E5A aka. areca ARC-6010 */
588 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
589 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
592 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
593 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
597 * Western Digital My Book 250GB (USB)
598 * hangs upon serial number probing.
599 * PR: 107495
602 T_DIRECT, SIP_MEDIA_FIXED, "WD",
603 "2500JB External", "*"
605 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
608 /* Default tagged queuing parameters for all devices */
610 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
611 /*vendor*/"*", /*product*/"*", /*revision*/"*"
613 /*quirks*/0, /*mintags*/2, /*maxtags*/255
617 static const int xpt_quirk_table_size =
618 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
620 typedef enum {
621 DM_RET_COPY = 0x01,
622 DM_RET_FLAG_MASK = 0x0f,
623 DM_RET_NONE = 0x00,
624 DM_RET_STOP = 0x10,
625 DM_RET_DESCEND = 0x20,
626 DM_RET_ERROR = 0x30,
627 DM_RET_ACTION_MASK = 0xf0
628 } dev_match_ret;
630 typedef enum {
631 XPT_DEPTH_BUS,
632 XPT_DEPTH_TARGET,
633 XPT_DEPTH_DEVICE,
634 XPT_DEPTH_PERIPH
635 } xpt_traverse_depth;
637 struct xpt_traverse_config {
638 xpt_traverse_depth depth;
639 void *tr_func;
640 void *tr_arg;
643 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
644 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
645 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
646 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
647 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
649 /* Transport layer configuration information */
650 static struct xpt_softc xsoftc;
652 /* Queues for our software interrupt handler */
653 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
654 static cam_isrq_t cam_bioq;
656 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
657 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
658 static u_int xpt_max_ccbs; /*
659 * Maximum size of ccb pool. Modified as
660 * devices are added/removed or have their
661 * opening counts changed.
663 static u_int xpt_ccb_count; /* Current count of allocated ccbs */
665 struct cam_periph *xpt_periph;
667 static periph_init_t xpt_periph_init;
669 static periph_init_t probe_periph_init;
671 static struct periph_driver xpt_driver =
673 xpt_periph_init, "xpt",
674 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
677 static struct periph_driver probe_driver =
679 probe_periph_init, "probe",
680 TAILQ_HEAD_INITIALIZER(probe_driver.units)
683 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
684 PERIPHDRIVER_DECLARE(probe, probe_driver);
686 #define XPT_CDEV_MAJOR 104
688 static d_open_t xptopen;
689 static d_close_t xptclose;
690 static d_ioctl_t xptioctl;
692 static struct dev_ops xpt_ops = {
693 { "xpt", XPT_CDEV_MAJOR, 0 },
694 .d_open = xptopen,
695 .d_close = xptclose,
696 .d_ioctl = xptioctl
699 static struct intr_config_hook *xpt_config_hook;
701 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
702 static void dead_sim_poll(struct cam_sim *sim);
704 /* Dummy SIM that is used when the real one has gone. */
705 static struct cam_sim cam_dead_sim = {
706 .sim_action = dead_sim_action,
707 .sim_poll = dead_sim_poll,
708 .sim_name = "dead_sim",
711 #define SIM_DEAD(sim) ((sim) == &cam_dead_sim)
713 /* Registered busses */
714 static TAILQ_HEAD(,cam_eb) xpt_busses;
715 static u_int bus_generation;
717 /* Storage for debugging datastructures */
718 #ifdef CAMDEBUG
719 struct cam_path *cam_dpath;
720 u_int32_t cam_dflags;
721 u_int32_t cam_debug_delay;
722 #endif
724 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
725 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
726 #endif
729 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
730 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
731 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
733 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
734 || defined(CAM_DEBUG_LUN)
735 #ifdef CAMDEBUG
736 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
737 || !defined(CAM_DEBUG_LUN)
738 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
739 and CAM_DEBUG_LUN"
740 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
741 #else /* !CAMDEBUG */
742 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
743 #endif /* CAMDEBUG */
744 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
746 /* Our boot-time initialization hook */
747 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
749 static moduledata_t cam_moduledata = {
750 "cam",
751 cam_module_event_handler,
752 NULL
755 static void xpt_init(void *);
757 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
758 MODULE_VERSION(cam, 1);
761 static cam_status xpt_compile_path(struct cam_path *new_path,
762 struct cam_periph *perph,
763 path_id_t path_id,
764 target_id_t target_id,
765 lun_id_t lun_id);
767 static void xpt_release_path(struct cam_path *path);
769 static void xpt_async_bcast(struct async_list *async_head,
770 u_int32_t async_code,
771 struct cam_path *path,
772 void *async_arg);
773 static void xpt_dev_async(u_int32_t async_code,
774 struct cam_eb *bus,
775 struct cam_et *target,
776 struct cam_ed *device,
777 void *async_arg);
778 static path_id_t xptnextfreepathid(void);
779 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
780 static union ccb *xpt_get_ccb(struct cam_ed *device);
781 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
782 u_int32_t new_priority);
783 static void xpt_run_dev_allocq(struct cam_eb *bus);
784 static void xpt_run_dev_sendq(struct cam_eb *bus);
785 static timeout_t xpt_release_devq_timeout;
786 static void xpt_release_bus(struct cam_eb *bus);
787 static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
788 int run_queue);
789 static struct cam_et*
790 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
791 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
792 static struct cam_ed*
793 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
794 lun_id_t lun_id);
795 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
796 struct cam_ed *device);
797 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
798 static struct cam_eb*
799 xpt_find_bus(path_id_t path_id);
800 static struct cam_et*
801 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
802 static struct cam_ed*
803 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
804 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
805 static void xpt_scan_lun(struct cam_periph *periph,
806 struct cam_path *path, cam_flags flags,
807 union ccb *ccb);
808 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
809 static xpt_busfunc_t xptconfigbuscountfunc;
810 static xpt_busfunc_t xptconfigfunc;
811 static void xpt_config(void *arg);
812 static xpt_devicefunc_t xptpassannouncefunc;
813 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
814 static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
815 static void xptpoll(struct cam_sim *sim);
816 static inthand2_t swi_cambio;
817 static void camisr(cam_isrq_t *queue);
818 #if 0
819 static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
820 static void xptasync(struct cam_periph *periph,
821 u_int32_t code, cam_path *path);
822 #endif
823 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
824 u_int num_patterns, struct cam_eb *bus);
825 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
826 u_int num_patterns,
827 struct cam_ed *device);
828 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
829 u_int num_patterns,
830 struct cam_periph *periph);
831 static xpt_busfunc_t xptedtbusfunc;
832 static xpt_targetfunc_t xptedttargetfunc;
833 static xpt_devicefunc_t xptedtdevicefunc;
834 static xpt_periphfunc_t xptedtperiphfunc;
835 static xpt_pdrvfunc_t xptplistpdrvfunc;
836 static xpt_periphfunc_t xptplistperiphfunc;
837 static int xptedtmatch(struct ccb_dev_match *cdm);
838 static int xptperiphlistmatch(struct ccb_dev_match *cdm);
839 static int xptbustraverse(struct cam_eb *start_bus,
840 xpt_busfunc_t *tr_func, void *arg);
841 static int xpttargettraverse(struct cam_eb *bus,
842 struct cam_et *start_target,
843 xpt_targetfunc_t *tr_func, void *arg);
844 static int xptdevicetraverse(struct cam_et *target,
845 struct cam_ed *start_device,
846 xpt_devicefunc_t *tr_func, void *arg);
847 static int xptperiphtraverse(struct cam_ed *device,
848 struct cam_periph *start_periph,
849 xpt_periphfunc_t *tr_func, void *arg);
850 static int xptpdrvtraverse(struct periph_driver **start_pdrv,
851 xpt_pdrvfunc_t *tr_func, void *arg);
852 static int xptpdperiphtraverse(struct periph_driver **pdrv,
853 struct cam_periph *start_periph,
854 xpt_periphfunc_t *tr_func,
855 void *arg);
856 static xpt_busfunc_t xptdefbusfunc;
857 static xpt_targetfunc_t xptdeftargetfunc;
858 static xpt_devicefunc_t xptdefdevicefunc;
859 static xpt_periphfunc_t xptdefperiphfunc;
860 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
861 #ifdef notusedyet
862 static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
863 void *arg);
864 #endif
865 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
866 void *arg);
867 #ifdef notusedyet
868 static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
869 void *arg);
870 #endif
871 static xpt_devicefunc_t xptsetasyncfunc;
872 static xpt_busfunc_t xptsetasyncbusfunc;
873 static cam_status xptregister(struct cam_periph *periph,
874 void *arg);
875 static cam_status proberegister(struct cam_periph *periph,
876 void *arg);
877 static void probeschedule(struct cam_periph *probe_periph);
878 static void probestart(struct cam_periph *periph, union ccb *start_ccb);
879 static void proberequestdefaultnegotiation(struct cam_periph *periph);
880 static void probedone(struct cam_periph *periph, union ccb *done_ccb);
881 static void probecleanup(struct cam_periph *periph);
882 static void xpt_find_quirk(struct cam_ed *device);
883 #ifdef CAM_NEW_TRAN_CODE
884 static void xpt_devise_transport(struct cam_path *path);
885 #endif /* CAM_NEW_TRAN_CODE */
886 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
887 struct cam_ed *device,
888 int async_update);
889 static void xpt_toggle_tags(struct cam_path *path);
890 static void xpt_start_tags(struct cam_path *path);
891 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
892 struct cam_ed *dev);
893 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
894 struct cam_ed *dev);
895 static __inline int periph_is_queued(struct cam_periph *periph);
896 static __inline int device_is_alloc_queued(struct cam_ed *device);
897 static __inline int device_is_send_queued(struct cam_ed *device);
898 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
900 static __inline int
901 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
903 int retval;
905 if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
906 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
907 cam_ccbq_resize(&dev->ccbq,
908 dev->ccbq.dev_openings
909 + dev->ccbq.dev_active);
910 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
913 * The priority of a device waiting for CCB resources
914 * is that of the the highest priority peripheral driver
915 * enqueued.
917 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
918 &dev->alloc_ccb_entry.pinfo,
919 CAMQ_GET_HEAD(&dev->drvq)->priority);
920 } else {
921 retval = 0;
924 return (retval);
927 static __inline int
928 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
930 int retval;
932 if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
934 * The priority of a device waiting for controller
935 * resources is that of the the highest priority CCB
936 * enqueued.
938 retval =
939 xpt_schedule_dev(&bus->sim->devq->send_queue,
940 &dev->send_ccb_entry.pinfo,
941 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
942 } else {
943 retval = 0;
945 return (retval);
948 static __inline int
949 periph_is_queued(struct cam_periph *periph)
951 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
954 static __inline int
955 device_is_alloc_queued(struct cam_ed *device)
957 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
960 static __inline int
961 device_is_send_queued(struct cam_ed *device)
963 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
966 static __inline int
967 dev_allocq_is_runnable(struct cam_devq *devq)
970 * Have work to do.
971 * Have space to do more work.
972 * Allowed to do work.
974 return ((devq->alloc_queue.qfrozen_cnt == 0)
975 && (devq->alloc_queue.entries > 0)
976 && (devq->alloc_openings > 0));
979 static void
980 xpt_periph_init(void)
982 dev_ops_add(&xpt_ops, 0, 0);
983 make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
986 static void
987 probe_periph_init(void)
992 static void
993 xptdone(struct cam_periph *periph, union ccb *done_ccb)
995 /* Caller will release the CCB */
996 wakeup(&done_ccb->ccb_h.cbfcnp);
999 static int
1000 xptopen(struct dev_open_args *ap)
1002 cdev_t dev = ap->a_head.a_dev;
1003 int unit;
1005 unit = minor(dev) & 0xff;
1008 * Only allow read-write access.
1010 if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
1011 return(EPERM);
1014 * We don't allow nonblocking access.
1016 if ((ap->a_oflags & O_NONBLOCK) != 0) {
1017 kprintf("xpt%d: can't do nonblocking access\n", unit);
1018 return(ENODEV);
1022 * We only have one transport layer right now. If someone accesses
1023 * us via something other than minor number 1, point out their
1024 * mistake.
1026 if (unit != 0) {
1027 kprintf("xptopen: got invalid xpt unit %d\n", unit);
1028 return(ENXIO);
1031 /* Mark ourselves open */
1032 xsoftc.flags |= XPT_FLAG_OPEN;
1034 return(0);
1037 static int
1038 xptclose(struct dev_close_args *ap)
1040 cdev_t dev = ap->a_head.a_dev;
1041 int unit;
1043 unit = minor(dev) & 0xff;
1046 * We only have one transport layer right now. If someone accesses
1047 * us via something other than minor number 1, point out their
1048 * mistake.
1050 if (unit != 0) {
1051 kprintf("xptclose: got invalid xpt unit %d\n", unit);
1052 return(ENXIO);
1055 /* Mark ourselves closed */
1056 xsoftc.flags &= ~XPT_FLAG_OPEN;
1058 return(0);
1061 static int
1062 xptioctl(struct dev_ioctl_args *ap)
1064 cdev_t dev = ap->a_head.a_dev;
1065 int unit, error;
1067 error = 0;
1068 unit = minor(dev) & 0xff;
1071 * We only have one transport layer right now. If someone accesses
1072 * us via something other than minor number 1, point out their
1073 * mistake.
1075 if (unit != 0) {
1076 kprintf("xptioctl: got invalid xpt unit %d\n", unit);
1077 return(ENXIO);
1080 switch(ap->a_cmd) {
1082 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1083 * to accept CCB types that don't quite make sense to send through a
1084 * passthrough driver.
1086 case CAMIOCOMMAND: {
1087 union ccb *ccb;
1088 union ccb *inccb;
1090 inccb = (union ccb *)ap->a_data;
1092 switch(inccb->ccb_h.func_code) {
1093 case XPT_SCAN_BUS:
1094 case XPT_RESET_BUS:
1095 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1096 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1097 error = EINVAL;
1098 break;
1100 /* FALLTHROUGH */
1101 case XPT_PATH_INQ:
1102 case XPT_ENG_INQ:
1103 case XPT_SCAN_LUN:
1105 ccb = xpt_alloc_ccb();
1108 * Create a path using the bus, target, and lun the
1109 * user passed in.
1111 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1112 inccb->ccb_h.path_id,
1113 inccb->ccb_h.target_id,
1114 inccb->ccb_h.target_lun) !=
1115 CAM_REQ_CMP){
1116 error = EINVAL;
1117 xpt_free_ccb(ccb);
1118 break;
1120 /* Ensure all of our fields are correct */
1121 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1122 inccb->ccb_h.pinfo.priority);
1123 xpt_merge_ccb(ccb, inccb);
1124 ccb->ccb_h.cbfcnp = xptdone;
1125 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1126 bcopy(ccb, inccb, sizeof(union ccb));
1127 xpt_free_path(ccb->ccb_h.path);
1128 xpt_free_ccb(ccb);
1129 break;
1131 case XPT_DEBUG: {
1132 union ccb ccb;
1135 * This is an immediate CCB, so it's okay to
1136 * allocate it on the stack.
1140 * Create a path using the bus, target, and lun the
1141 * user passed in.
1143 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1144 inccb->ccb_h.path_id,
1145 inccb->ccb_h.target_id,
1146 inccb->ccb_h.target_lun) !=
1147 CAM_REQ_CMP){
1148 error = EINVAL;
1149 break;
1151 /* Ensure all of our fields are correct */
1152 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1153 inccb->ccb_h.pinfo.priority);
1154 xpt_merge_ccb(&ccb, inccb);
1155 ccb.ccb_h.cbfcnp = xptdone;
1156 xpt_action(&ccb);
1157 bcopy(&ccb, inccb, sizeof(union ccb));
1158 xpt_free_path(ccb.ccb_h.path);
1159 break;
1162 case XPT_DEV_MATCH: {
1163 struct cam_periph_map_info mapinfo;
1164 struct cam_path *old_path;
1167 * We can't deal with physical addresses for this
1168 * type of transaction.
1170 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1171 error = EINVAL;
1172 break;
1176 * Save this in case the caller had it set to
1177 * something in particular.
1179 old_path = inccb->ccb_h.path;
1182 * We really don't need a path for the matching
1183 * code. The path is needed because of the
1184 * debugging statements in xpt_action(). They
1185 * assume that the CCB has a valid path.
1187 inccb->ccb_h.path = xpt_periph->path;
1189 bzero(&mapinfo, sizeof(mapinfo));
1192 * Map the pattern and match buffers into kernel
1193 * virtual address space.
1195 error = cam_periph_mapmem(inccb, &mapinfo);
1197 if (error) {
1198 inccb->ccb_h.path = old_path;
1199 break;
1203 * This is an immediate CCB, we can send it on directly.
1205 xpt_action(inccb);
1208 * Map the buffers back into user space.
1210 cam_periph_unmapmem(inccb, &mapinfo);
1212 inccb->ccb_h.path = old_path;
1214 error = 0;
1215 break;
1217 default:
1218 error = ENOTSUP;
1219 break;
1221 break;
1224 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1225 * with the periphal driver name and unit name filled in. The other
1226 * fields don't really matter as input. The passthrough driver name
1227 * ("pass"), and unit number are passed back in the ccb. The current
1228 * device generation number, and the index into the device peripheral
1229 * driver list, and the status are also passed back. Note that
1230 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1231 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1232 * (or rather should be) impossible for the device peripheral driver
1233 * list to change since we look at the whole thing in one pass, and
1234 * we do it within a critical section.
1237 case CAMGETPASSTHRU: {
1238 union ccb *ccb;
1239 struct cam_periph *periph;
1240 struct periph_driver **p_drv;
1241 char *name;
1242 u_int unit;
1243 u_int cur_generation;
1244 int base_periph_found;
1245 int splbreaknum;
1247 ccb = (union ccb *)ap->a_data;
1248 unit = ccb->cgdl.unit_number;
1249 name = ccb->cgdl.periph_name;
1251 * Every 100 devices, we want to call splz() to check for
1252 * and allow the software interrupt handler a chance to run.
1254 * Most systems won't run into this check, but this should
1255 * avoid starvation in the software interrupt handler in
1256 * large systems.
1258 splbreaknum = 100;
1260 ccb = (union ccb *)ap->a_data;
1262 base_periph_found = 0;
1265 * Sanity check -- make sure we don't get a null peripheral
1266 * driver name.
1268 if (*ccb->cgdl.periph_name == '\0') {
1269 error = EINVAL;
1270 break;
1273 /* Keep the list from changing while we traverse it */
1274 crit_enter();
1275 ptstartover:
1276 cur_generation = xsoftc.generation;
1278 /* first find our driver in the list of drivers */
1279 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1280 if (strcmp((*p_drv)->driver_name, name) == 0)
1281 break;
1284 if (*p_drv == NULL) {
1285 crit_exit();
1286 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1287 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1288 *ccb->cgdl.periph_name = '\0';
1289 ccb->cgdl.unit_number = 0;
1290 error = ENOENT;
1291 break;
1295 * Run through every peripheral instance of this driver
1296 * and check to see whether it matches the unit passed
1297 * in by the user. If it does, get out of the loops and
1298 * find the passthrough driver associated with that
1299 * peripheral driver.
1301 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1303 if (periph->unit_number == unit) {
1304 break;
1305 } else if (--splbreaknum == 0) {
1306 splz();
1307 splbreaknum = 100;
1308 if (cur_generation != xsoftc.generation)
1309 goto ptstartover;
1313 * If we found the peripheral driver that the user passed
1314 * in, go through all of the peripheral drivers for that
1315 * particular device and look for a passthrough driver.
1317 if (periph != NULL) {
1318 struct cam_ed *device;
1319 int i;
1321 base_periph_found = 1;
1322 device = periph->path->device;
1323 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1324 periph != NULL;
1325 periph = SLIST_NEXT(periph, periph_links), i++) {
1327 * Check to see whether we have a
1328 * passthrough device or not.
1330 if (strcmp(periph->periph_name, "pass") == 0) {
1332 * Fill in the getdevlist fields.
1334 strcpy(ccb->cgdl.periph_name,
1335 periph->periph_name);
1336 ccb->cgdl.unit_number =
1337 periph->unit_number;
1338 if (SLIST_NEXT(periph, periph_links))
1339 ccb->cgdl.status =
1340 CAM_GDEVLIST_MORE_DEVS;
1341 else
1342 ccb->cgdl.status =
1343 CAM_GDEVLIST_LAST_DEVICE;
1344 ccb->cgdl.generation =
1345 device->generation;
1346 ccb->cgdl.index = i;
1348 * Fill in some CCB header fields
1349 * that the user may want.
1351 ccb->ccb_h.path_id =
1352 periph->path->bus->path_id;
1353 ccb->ccb_h.target_id =
1354 periph->path->target->target_id;
1355 ccb->ccb_h.target_lun =
1356 periph->path->device->lun_id;
1357 ccb->ccb_h.status = CAM_REQ_CMP;
1358 break;
1364 * If the periph is null here, one of two things has
1365 * happened. The first possibility is that we couldn't
1366 * find the unit number of the particular peripheral driver
1367 * that the user is asking about. e.g. the user asks for
1368 * the passthrough driver for "da11". We find the list of
1369 * "da" peripherals all right, but there is no unit 11.
1370 * The other possibility is that we went through the list
1371 * of peripheral drivers attached to the device structure,
1372 * but didn't find one with the name "pass". Either way,
1373 * we return ENOENT, since we couldn't find something.
1375 if (periph == NULL) {
1376 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1377 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1378 *ccb->cgdl.periph_name = '\0';
1379 ccb->cgdl.unit_number = 0;
1380 error = ENOENT;
1382 * It is unfortunate that this is even necessary,
1383 * but there are many, many clueless users out there.
1384 * If this is true, the user is looking for the
1385 * passthrough driver, but doesn't have one in his
1386 * kernel.
1388 if (base_periph_found == 1) {
1389 kprintf("xptioctl: pass driver is not in the "
1390 "kernel\n");
1391 kprintf("xptioctl: put \"device pass0\" in "
1392 "your kernel config file\n");
1395 crit_exit();
1396 break;
1398 default:
1399 error = ENOTTY;
1400 break;
1403 return(error);
1406 static int
1407 cam_module_event_handler(module_t mod, int what, void *arg)
1409 if (what == MOD_LOAD) {
1410 xpt_init(NULL);
1411 } else if (what == MOD_UNLOAD) {
1412 return EBUSY;
1413 } else {
1414 return EOPNOTSUPP;
1417 return 0;
1420 /* Functions accessed by the peripheral drivers */
1421 static void
1422 xpt_init(void *dummy)
1424 struct cam_sim *xpt_sim;
1425 struct cam_path *path;
1426 struct cam_devq *devq;
1427 cam_status status;
1429 TAILQ_INIT(&xpt_busses);
1430 TAILQ_INIT(&cam_bioq);
1431 SLIST_INIT(&ccb_freeq);
1432 STAILQ_INIT(&highpowerq);
1435 * The xpt layer is, itself, the equivelent of a SIM.
1436 * Allow 16 ccbs in the ccb pool for it. This should
1437 * give decent parallelism when we probe busses and
1438 * perform other XPT functions.
1440 devq = cam_simq_alloc(16);
1441 xpt_sim = cam_sim_alloc(xptaction,
1442 xptpoll,
1443 "xpt",
1444 /*softc*/NULL,
1445 /*unit*/0,
1446 /*max_dev_transactions*/0,
1447 /*max_tagged_dev_transactions*/0,
1448 devq);
1449 cam_simq_release(devq);
1450 xpt_max_ccbs = 16;
1452 xpt_bus_register(xpt_sim, /*bus #*/0);
1455 * Looking at the XPT from the SIM layer, the XPT is
1456 * the equivelent of a peripheral driver. Allocate
1457 * a peripheral driver entry for us.
1459 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1460 CAM_TARGET_WILDCARD,
1461 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1462 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1463 " failing attach\n", status);
1464 return;
1467 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1468 path, NULL, 0, NULL);
1469 xpt_free_path(path);
1471 xpt_sim->softc = xpt_periph;
1474 * Register a callback for when interrupts are enabled.
1476 xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1477 M_TEMP, M_INTWAIT | M_ZERO);
1478 xpt_config_hook->ich_func = xpt_config;
1479 xpt_config_hook->ich_desc = "xpt";
1480 xpt_config_hook->ich_order = 1000;
1481 if (config_intrhook_establish(xpt_config_hook) != 0) {
1482 kfree (xpt_config_hook, M_TEMP);
1483 kprintf("xpt_init: config_intrhook_establish failed "
1484 "- failing attach\n");
1487 /* Install our software interrupt handlers */
1488 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1491 static cam_status
1492 xptregister(struct cam_periph *periph, void *arg)
1494 if (periph == NULL) {
1495 kprintf("xptregister: periph was NULL!!\n");
1496 return(CAM_REQ_CMP_ERR);
1499 periph->softc = NULL;
1501 xpt_periph = periph;
1503 return(CAM_REQ_CMP);
1506 int32_t
1507 xpt_add_periph(struct cam_periph *periph)
1509 struct cam_ed *device;
1510 int32_t status;
1511 struct periph_list *periph_head;
1513 device = periph->path->device;
1515 periph_head = &device->periphs;
1517 status = CAM_REQ_CMP;
1519 if (device != NULL) {
1521 * Make room for this peripheral
1522 * so it will fit in the queue
1523 * when it's scheduled to run
1525 crit_enter();
1526 status = camq_resize(&device->drvq,
1527 device->drvq.array_size + 1);
1529 device->generation++;
1531 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1532 crit_exit();
1535 xsoftc.generation++;
1537 return (status);
1540 void
1541 xpt_remove_periph(struct cam_periph *periph)
1543 struct cam_ed *device;
1545 device = periph->path->device;
1547 if (device != NULL) {
1548 struct periph_list *periph_head;
1550 periph_head = &device->periphs;
1552 /* Release the slot for this peripheral */
1553 crit_enter();
1554 camq_resize(&device->drvq, device->drvq.array_size - 1);
1556 device->generation++;
1558 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1559 crit_exit();
1562 xsoftc.generation++;
1566 #ifdef CAM_NEW_TRAN_CODE
1568 void
1569 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1571 struct ccb_pathinq cpi;
1572 struct ccb_trans_settings cts;
1573 struct cam_path *path;
1574 u_int speed;
1575 u_int freq;
1576 u_int mb;
1578 path = periph->path;
1580 * To ensure that this is printed in one piece,
1581 * mask out CAM interrupts.
1583 crit_enter();
1584 printf("%s%d at %s%d bus %d target %d lun %d\n",
1585 periph->periph_name, periph->unit_number,
1586 path->bus->sim->sim_name,
1587 path->bus->sim->unit_number,
1588 path->bus->sim->bus_id,
1589 path->target->target_id,
1590 path->device->lun_id);
1591 printf("%s%d: ", periph->periph_name, periph->unit_number);
1592 scsi_print_inquiry(&path->device->inq_data);
1593 if (bootverbose && path->device->serial_num_len > 0) {
1594 /* Don't wrap the screen - print only the first 60 chars */
1595 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1596 periph->unit_number, path->device->serial_num);
1598 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1599 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1600 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1601 xpt_action((union ccb*)&cts);
1602 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1603 return;
1606 /* Ask the SIM for its base transfer speed */
1607 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1608 cpi.ccb_h.func_code = XPT_PATH_INQ;
1609 xpt_action((union ccb *)&cpi);
1611 speed = cpi.base_transfer_speed;
1612 freq = 0;
1613 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1614 struct ccb_trans_settings_spi *spi;
1616 spi = &cts.xport_specific.spi;
1617 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1618 && spi->sync_offset != 0) {
1619 freq = scsi_calc_syncsrate(spi->sync_period);
1620 speed = freq;
1623 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1624 speed *= (0x01 << spi->bus_width);
1626 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1627 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1628 if (fc->valid & CTS_FC_VALID_SPEED) {
1629 speed = fc->bitrate;
1633 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1634 struct ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1635 if (sas->valid & CTS_SAS_VALID_SPEED) {
1636 speed = sas->bitrate;
1640 mb = speed / 1000;
1641 if (mb > 0)
1642 printf("%s%d: %d.%03dMB/s transfers",
1643 periph->periph_name, periph->unit_number,
1644 mb, speed % 1000);
1645 else
1646 printf("%s%d: %dKB/s transfers", periph->periph_name,
1647 periph->unit_number, speed);
1648 /* Report additional information about SPI connections */
1649 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1650 struct ccb_trans_settings_spi *spi;
1652 spi = &cts.xport_specific.spi;
1653 if (freq != 0) {
1654 printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1655 freq % 1000,
1656 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1657 ? " DT" : "",
1658 spi->sync_offset);
1660 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1661 && spi->bus_width > 0) {
1662 if (freq != 0) {
1663 printf(", ");
1664 } else {
1665 printf(" (");
1667 printf("%dbit)", 8 * (0x01 << spi->bus_width));
1668 } else if (freq != 0) {
1669 printf(")");
1672 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1673 struct ccb_trans_settings_fc *fc;
1675 fc = &cts.xport_specific.fc;
1676 if (fc->valid & CTS_FC_VALID_WWNN)
1677 printf(" WWNN 0x%llx", (long long) fc->wwnn);
1678 if (fc->valid & CTS_FC_VALID_WWPN)
1679 printf(" WWPN 0x%llx", (long long) fc->wwpn);
1680 if (fc->valid & CTS_FC_VALID_PORT)
1681 printf(" PortID 0x%x", fc->port);
1684 if (path->device->inq_flags & SID_CmdQue
1685 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1686 printf("\n%s%d: Tagged Queueing Enabled",
1687 periph->periph_name, periph->unit_number);
1689 printf("\n");
1692 * We only want to print the caller's announce string if they've
1693 * passed one in..
1695 if (announce_string != NULL)
1696 printf("%s%d: %s\n", periph->periph_name,
1697 periph->unit_number, announce_string);
1698 crit_exit();
1700 #else /* CAM_NEW_TRAN_CODE */
1701 void
1702 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1704 u_int mb;
1705 struct cam_path *path;
1706 struct ccb_trans_settings cts;
1708 path = periph->path;
1710 * To ensure that this is printed in one piece,
1711 * mask out CAM interrupts.
1713 crit_enter();
1714 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1715 periph->periph_name, periph->unit_number,
1716 path->bus->sim->sim_name,
1717 path->bus->sim->unit_number,
1718 path->bus->sim->bus_id,
1719 path->target->target_id,
1720 path->device->lun_id);
1721 kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1722 scsi_print_inquiry(&path->device->inq_data);
1723 if ((bootverbose)
1724 && (path->device->serial_num_len > 0)) {
1725 /* Don't wrap the screen - print only the first 60 chars */
1726 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1727 periph->unit_number, path->device->serial_num);
1729 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1730 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1731 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1732 xpt_action((union ccb*)&cts);
1733 if (cts.ccb_h.status == CAM_REQ_CMP) {
1734 u_int speed;
1735 u_int freq;
1737 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1738 && cts.sync_offset != 0) {
1739 freq = scsi_calc_syncsrate(cts.sync_period);
1740 speed = freq;
1741 } else {
1742 struct ccb_pathinq cpi;
1744 /* Ask the SIM for its base transfer speed */
1745 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1746 cpi.ccb_h.func_code = XPT_PATH_INQ;
1747 xpt_action((union ccb *)&cpi);
1749 speed = cpi.base_transfer_speed;
1750 freq = 0;
1752 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1753 speed *= (0x01 << cts.bus_width);
1754 mb = speed / 1000;
1755 if (mb > 0)
1756 kprintf("%s%d: %d.%03dMB/s transfers",
1757 periph->periph_name, periph->unit_number,
1758 mb, speed % 1000);
1759 else
1760 kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1761 periph->unit_number, speed);
1762 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1763 && cts.sync_offset != 0) {
1764 kprintf(" (%d.%03dMHz, offset %d", freq / 1000,
1765 freq % 1000, cts.sync_offset);
1767 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1768 && cts.bus_width > 0) {
1769 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1770 && cts.sync_offset != 0) {
1771 kprintf(", ");
1772 } else {
1773 kprintf(" (");
1775 kprintf("%dbit)", 8 * (0x01 << cts.bus_width));
1776 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1777 && cts.sync_offset != 0) {
1778 kprintf(")");
1781 if (path->device->inq_flags & SID_CmdQue
1782 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1783 kprintf(", Tagged Queueing Enabled");
1786 kprintf("\n");
1787 } else if (path->device->inq_flags & SID_CmdQue
1788 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1789 kprintf("%s%d: Tagged Queueing Enabled\n",
1790 periph->periph_name, periph->unit_number);
1794 * We only want to print the caller's announce string if they've
1795 * passed one in..
1797 if (announce_string != NULL)
1798 kprintf("%s%d: %s\n", periph->periph_name,
1799 periph->unit_number, announce_string);
1800 crit_exit();
1803 #endif /* CAM_NEW_TRAN_CODE */
1805 static dev_match_ret
1806 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1807 struct cam_eb *bus)
1809 dev_match_ret retval;
1810 int i;
1812 retval = DM_RET_NONE;
1815 * If we aren't given something to match against, that's an error.
1817 if (bus == NULL)
1818 return(DM_RET_ERROR);
1821 * If there are no match entries, then this bus matches no
1822 * matter what.
1824 if ((patterns == NULL) || (num_patterns == 0))
1825 return(DM_RET_DESCEND | DM_RET_COPY);
1827 for (i = 0; i < num_patterns; i++) {
1828 struct bus_match_pattern *cur_pattern;
1831 * If the pattern in question isn't for a bus node, we
1832 * aren't interested. However, we do indicate to the
1833 * calling routine that we should continue descending the
1834 * tree, since the user wants to match against lower-level
1835 * EDT elements.
1837 if (patterns[i].type != DEV_MATCH_BUS) {
1838 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1839 retval |= DM_RET_DESCEND;
1840 continue;
1843 cur_pattern = &patterns[i].pattern.bus_pattern;
1846 * If they want to match any bus node, we give them any
1847 * device node.
1849 if (cur_pattern->flags == BUS_MATCH_ANY) {
1850 /* set the copy flag */
1851 retval |= DM_RET_COPY;
1854 * If we've already decided on an action, go ahead
1855 * and return.
1857 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1858 return(retval);
1862 * Not sure why someone would do this...
1864 if (cur_pattern->flags == BUS_MATCH_NONE)
1865 continue;
1867 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1868 && (cur_pattern->path_id != bus->path_id))
1869 continue;
1871 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1872 && (cur_pattern->bus_id != bus->sim->bus_id))
1873 continue;
1875 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1876 && (cur_pattern->unit_number != bus->sim->unit_number))
1877 continue;
1879 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1880 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1881 DEV_IDLEN) != 0))
1882 continue;
1885 * If we get to this point, the user definitely wants
1886 * information on this bus. So tell the caller to copy the
1887 * data out.
1889 retval |= DM_RET_COPY;
1892 * If the return action has been set to descend, then we
1893 * know that we've already seen a non-bus matching
1894 * expression, therefore we need to further descend the tree.
1895 * This won't change by continuing around the loop, so we
1896 * go ahead and return. If we haven't seen a non-bus
1897 * matching expression, we keep going around the loop until
1898 * we exhaust the matching expressions. We'll set the stop
1899 * flag once we fall out of the loop.
1901 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1902 return(retval);
1906 * If the return action hasn't been set to descend yet, that means
1907 * we haven't seen anything other than bus matching patterns. So
1908 * tell the caller to stop descending the tree -- the user doesn't
1909 * want to match against lower level tree elements.
1911 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1912 retval |= DM_RET_STOP;
1914 return(retval);
1917 static dev_match_ret
1918 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1919 struct cam_ed *device)
1921 dev_match_ret retval;
1922 int i;
1924 retval = DM_RET_NONE;
1927 * If we aren't given something to match against, that's an error.
1929 if (device == NULL)
1930 return(DM_RET_ERROR);
1933 * If there are no match entries, then this device matches no
1934 * matter what.
1936 if ((patterns == NULL) || (num_patterns == 0))
1937 return(DM_RET_DESCEND | DM_RET_COPY);
1939 for (i = 0; i < num_patterns; i++) {
1940 struct device_match_pattern *cur_pattern;
1943 * If the pattern in question isn't for a device node, we
1944 * aren't interested.
1946 if (patterns[i].type != DEV_MATCH_DEVICE) {
1947 if ((patterns[i].type == DEV_MATCH_PERIPH)
1948 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1949 retval |= DM_RET_DESCEND;
1950 continue;
1953 cur_pattern = &patterns[i].pattern.device_pattern;
1956 * If they want to match any device node, we give them any
1957 * device node.
1959 if (cur_pattern->flags == DEV_MATCH_ANY) {
1960 /* set the copy flag */
1961 retval |= DM_RET_COPY;
1965 * If we've already decided on an action, go ahead
1966 * and return.
1968 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1969 return(retval);
1973 * Not sure why someone would do this...
1975 if (cur_pattern->flags == DEV_MATCH_NONE)
1976 continue;
1978 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1979 && (cur_pattern->path_id != device->target->bus->path_id))
1980 continue;
1982 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1983 && (cur_pattern->target_id != device->target->target_id))
1984 continue;
1986 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1987 && (cur_pattern->target_lun != device->lun_id))
1988 continue;
1990 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1991 && (cam_quirkmatch((caddr_t)&device->inq_data,
1992 (caddr_t)&cur_pattern->inq_pat,
1993 1, sizeof(cur_pattern->inq_pat),
1994 scsi_static_inquiry_match) == NULL))
1995 continue;
1998 * If we get to this point, the user definitely wants
1999 * information on this device. So tell the caller to copy
2000 * the data out.
2002 retval |= DM_RET_COPY;
2005 * If the return action has been set to descend, then we
2006 * know that we've already seen a peripheral matching
2007 * expression, therefore we need to further descend the tree.
2008 * This won't change by continuing around the loop, so we
2009 * go ahead and return. If we haven't seen a peripheral
2010 * matching expression, we keep going around the loop until
2011 * we exhaust the matching expressions. We'll set the stop
2012 * flag once we fall out of the loop.
2014 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
2015 return(retval);
2019 * If the return action hasn't been set to descend yet, that means
2020 * we haven't seen any peripheral matching patterns. So tell the
2021 * caller to stop descending the tree -- the user doesn't want to
2022 * match against lower level tree elements.
2024 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
2025 retval |= DM_RET_STOP;
2027 return(retval);
2031 * Match a single peripheral against any number of match patterns.
2033 static dev_match_ret
2034 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
2035 struct cam_periph *periph)
2037 dev_match_ret retval;
2038 int i;
2041 * If we aren't given something to match against, that's an error.
2043 if (periph == NULL)
2044 return(DM_RET_ERROR);
2047 * If there are no match entries, then this peripheral matches no
2048 * matter what.
2050 if ((patterns == NULL) || (num_patterns == 0))
2051 return(DM_RET_STOP | DM_RET_COPY);
2054 * There aren't any nodes below a peripheral node, so there's no
2055 * reason to descend the tree any further.
2057 retval = DM_RET_STOP;
2059 for (i = 0; i < num_patterns; i++) {
2060 struct periph_match_pattern *cur_pattern;
2063 * If the pattern in question isn't for a peripheral, we
2064 * aren't interested.
2066 if (patterns[i].type != DEV_MATCH_PERIPH)
2067 continue;
2069 cur_pattern = &patterns[i].pattern.periph_pattern;
2072 * If they want to match on anything, then we will do so.
2074 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2075 /* set the copy flag */
2076 retval |= DM_RET_COPY;
2079 * We've already set the return action to stop,
2080 * since there are no nodes below peripherals in
2081 * the tree.
2083 return(retval);
2087 * Not sure why someone would do this...
2089 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2090 continue;
2092 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2093 && (cur_pattern->path_id != periph->path->bus->path_id))
2094 continue;
2097 * For the target and lun id's, we have to make sure the
2098 * target and lun pointers aren't NULL. The xpt peripheral
2099 * has a wildcard target and device.
2101 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2102 && ((periph->path->target == NULL)
2103 ||(cur_pattern->target_id != periph->path->target->target_id)))
2104 continue;
2106 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2107 && ((periph->path->device == NULL)
2108 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2109 continue;
2111 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2112 && (cur_pattern->unit_number != periph->unit_number))
2113 continue;
2115 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2116 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2117 DEV_IDLEN) != 0))
2118 continue;
2121 * If we get to this point, the user definitely wants
2122 * information on this peripheral. So tell the caller to
2123 * copy the data out.
2125 retval |= DM_RET_COPY;
2128 * The return action has already been set to stop, since
2129 * peripherals don't have any nodes below them in the EDT.
2131 return(retval);
2135 * If we get to this point, the peripheral that was passed in
2136 * doesn't match any of the patterns.
2138 return(retval);
2141 static int
2142 xptedtbusfunc(struct cam_eb *bus, void *arg)
2144 struct ccb_dev_match *cdm;
2145 dev_match_ret retval;
2147 cdm = (struct ccb_dev_match *)arg;
2150 * If our position is for something deeper in the tree, that means
2151 * that we've already seen this node. So, we keep going down.
2153 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2154 && (cdm->pos.cookie.bus == bus)
2155 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2156 && (cdm->pos.cookie.target != NULL))
2157 retval = DM_RET_DESCEND;
2158 else
2159 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2162 * If we got an error, bail out of the search.
2164 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2165 cdm->status = CAM_DEV_MATCH_ERROR;
2166 return(0);
2170 * If the copy flag is set, copy this bus out.
2172 if (retval & DM_RET_COPY) {
2173 int spaceleft, j;
2175 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2176 sizeof(struct dev_match_result));
2179 * If we don't have enough space to put in another
2180 * match result, save our position and tell the
2181 * user there are more devices to check.
2183 if (spaceleft < sizeof(struct dev_match_result)) {
2184 bzero(&cdm->pos, sizeof(cdm->pos));
2185 cdm->pos.position_type =
2186 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2188 cdm->pos.cookie.bus = bus;
2189 cdm->pos.generations[CAM_BUS_GENERATION]=
2190 bus_generation;
2191 cdm->status = CAM_DEV_MATCH_MORE;
2192 return(0);
2194 j = cdm->num_matches;
2195 cdm->num_matches++;
2196 cdm->matches[j].type = DEV_MATCH_BUS;
2197 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2198 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2199 cdm->matches[j].result.bus_result.unit_number =
2200 bus->sim->unit_number;
2201 strncpy(cdm->matches[j].result.bus_result.dev_name,
2202 bus->sim->sim_name, DEV_IDLEN);
2206 * If the user is only interested in busses, there's no
2207 * reason to descend to the next level in the tree.
2209 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2210 return(1);
2213 * If there is a target generation recorded, check it to
2214 * make sure the target list hasn't changed.
2216 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2217 && (bus == cdm->pos.cookie.bus)
2218 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2219 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2220 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2221 bus->generation)) {
2222 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2223 return(0);
2226 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2227 && (cdm->pos.cookie.bus == bus)
2228 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2229 && (cdm->pos.cookie.target != NULL))
2230 return(xpttargettraverse(bus,
2231 (struct cam_et *)cdm->pos.cookie.target,
2232 xptedttargetfunc, arg));
2233 else
2234 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2237 static int
2238 xptedttargetfunc(struct cam_et *target, void *arg)
2240 struct ccb_dev_match *cdm;
2242 cdm = (struct ccb_dev_match *)arg;
2245 * If there is a device list generation recorded, check it to
2246 * make sure the device list hasn't changed.
2248 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2249 && (cdm->pos.cookie.bus == target->bus)
2250 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2251 && (cdm->pos.cookie.target == target)
2252 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2253 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2254 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2255 target->generation)) {
2256 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2257 return(0);
2260 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2261 && (cdm->pos.cookie.bus == target->bus)
2262 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2263 && (cdm->pos.cookie.target == target)
2264 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2265 && (cdm->pos.cookie.device != NULL))
2266 return(xptdevicetraverse(target,
2267 (struct cam_ed *)cdm->pos.cookie.device,
2268 xptedtdevicefunc, arg));
2269 else
2270 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2273 static int
2274 xptedtdevicefunc(struct cam_ed *device, void *arg)
2277 struct ccb_dev_match *cdm;
2278 dev_match_ret retval;
2280 cdm = (struct ccb_dev_match *)arg;
2283 * If our position is for something deeper in the tree, that means
2284 * that we've already seen this node. So, we keep going down.
2286 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2287 && (cdm->pos.cookie.device == device)
2288 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2289 && (cdm->pos.cookie.periph != NULL))
2290 retval = DM_RET_DESCEND;
2291 else
2292 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2293 device);
2295 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2296 cdm->status = CAM_DEV_MATCH_ERROR;
2297 return(0);
2301 * If the copy flag is set, copy this device out.
2303 if (retval & DM_RET_COPY) {
2304 int spaceleft, j;
2306 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2307 sizeof(struct dev_match_result));
2310 * If we don't have enough space to put in another
2311 * match result, save our position and tell the
2312 * user there are more devices to check.
2314 if (spaceleft < sizeof(struct dev_match_result)) {
2315 bzero(&cdm->pos, sizeof(cdm->pos));
2316 cdm->pos.position_type =
2317 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2318 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2320 cdm->pos.cookie.bus = device->target->bus;
2321 cdm->pos.generations[CAM_BUS_GENERATION]=
2322 bus_generation;
2323 cdm->pos.cookie.target = device->target;
2324 cdm->pos.generations[CAM_TARGET_GENERATION] =
2325 device->target->bus->generation;
2326 cdm->pos.cookie.device = device;
2327 cdm->pos.generations[CAM_DEV_GENERATION] =
2328 device->target->generation;
2329 cdm->status = CAM_DEV_MATCH_MORE;
2330 return(0);
2332 j = cdm->num_matches;
2333 cdm->num_matches++;
2334 cdm->matches[j].type = DEV_MATCH_DEVICE;
2335 cdm->matches[j].result.device_result.path_id =
2336 device->target->bus->path_id;
2337 cdm->matches[j].result.device_result.target_id =
2338 device->target->target_id;
2339 cdm->matches[j].result.device_result.target_lun =
2340 device->lun_id;
2341 bcopy(&device->inq_data,
2342 &cdm->matches[j].result.device_result.inq_data,
2343 sizeof(struct scsi_inquiry_data));
2345 /* Let the user know whether this device is unconfigured */
2346 if (device->flags & CAM_DEV_UNCONFIGURED)
2347 cdm->matches[j].result.device_result.flags =
2348 DEV_RESULT_UNCONFIGURED;
2349 else
2350 cdm->matches[j].result.device_result.flags =
2351 DEV_RESULT_NOFLAG;
2355 * If the user isn't interested in peripherals, don't descend
2356 * the tree any further.
2358 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2359 return(1);
2362 * If there is a peripheral list generation recorded, make sure
2363 * it hasn't changed.
2365 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2366 && (device->target->bus == cdm->pos.cookie.bus)
2367 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2368 && (device->target == cdm->pos.cookie.target)
2369 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2370 && (device == cdm->pos.cookie.device)
2371 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2372 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2373 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2374 device->generation)){
2375 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2376 return(0);
2379 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2380 && (cdm->pos.cookie.bus == device->target->bus)
2381 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2382 && (cdm->pos.cookie.target == device->target)
2383 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2384 && (cdm->pos.cookie.device == device)
2385 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2386 && (cdm->pos.cookie.periph != NULL))
2387 return(xptperiphtraverse(device,
2388 (struct cam_periph *)cdm->pos.cookie.periph,
2389 xptedtperiphfunc, arg));
2390 else
2391 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2394 static int
2395 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2397 struct ccb_dev_match *cdm;
2398 dev_match_ret retval;
2400 cdm = (struct ccb_dev_match *)arg;
2402 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2404 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2405 cdm->status = CAM_DEV_MATCH_ERROR;
2406 return(0);
2410 * If the copy flag is set, copy this peripheral out.
2412 if (retval & DM_RET_COPY) {
2413 int spaceleft, j;
2415 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2416 sizeof(struct dev_match_result));
2419 * If we don't have enough space to put in another
2420 * match result, save our position and tell the
2421 * user there are more devices to check.
2423 if (spaceleft < sizeof(struct dev_match_result)) {
2424 bzero(&cdm->pos, sizeof(cdm->pos));
2425 cdm->pos.position_type =
2426 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2427 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2428 CAM_DEV_POS_PERIPH;
2430 cdm->pos.cookie.bus = periph->path->bus;
2431 cdm->pos.generations[CAM_BUS_GENERATION]=
2432 bus_generation;
2433 cdm->pos.cookie.target = periph->path->target;
2434 cdm->pos.generations[CAM_TARGET_GENERATION] =
2435 periph->path->bus->generation;
2436 cdm->pos.cookie.device = periph->path->device;
2437 cdm->pos.generations[CAM_DEV_GENERATION] =
2438 periph->path->target->generation;
2439 cdm->pos.cookie.periph = periph;
2440 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2441 periph->path->device->generation;
2442 cdm->status = CAM_DEV_MATCH_MORE;
2443 return(0);
2446 j = cdm->num_matches;
2447 cdm->num_matches++;
2448 cdm->matches[j].type = DEV_MATCH_PERIPH;
2449 cdm->matches[j].result.periph_result.path_id =
2450 periph->path->bus->path_id;
2451 cdm->matches[j].result.periph_result.target_id =
2452 periph->path->target->target_id;
2453 cdm->matches[j].result.periph_result.target_lun =
2454 periph->path->device->lun_id;
2455 cdm->matches[j].result.periph_result.unit_number =
2456 periph->unit_number;
2457 strncpy(cdm->matches[j].result.periph_result.periph_name,
2458 periph->periph_name, DEV_IDLEN);
2461 return(1);
2464 static int
2465 xptedtmatch(struct ccb_dev_match *cdm)
2467 int ret;
2469 cdm->num_matches = 0;
2472 * Check the bus list generation. If it has changed, the user
2473 * needs to reset everything and start over.
2475 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2476 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2477 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2478 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2479 return(0);
2482 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2483 && (cdm->pos.cookie.bus != NULL))
2484 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2485 xptedtbusfunc, cdm);
2486 else
2487 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2490 * If we get back 0, that means that we had to stop before fully
2491 * traversing the EDT. It also means that one of the subroutines
2492 * has set the status field to the proper value. If we get back 1,
2493 * we've fully traversed the EDT and copied out any matching entries.
2495 if (ret == 1)
2496 cdm->status = CAM_DEV_MATCH_LAST;
2498 return(ret);
2501 static int
2502 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2504 struct ccb_dev_match *cdm;
2506 cdm = (struct ccb_dev_match *)arg;
2508 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2509 && (cdm->pos.cookie.pdrv == pdrv)
2510 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2511 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2512 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2513 (*pdrv)->generation)) {
2514 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2515 return(0);
2518 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2519 && (cdm->pos.cookie.pdrv == pdrv)
2520 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2521 && (cdm->pos.cookie.periph != NULL))
2522 return(xptpdperiphtraverse(pdrv,
2523 (struct cam_periph *)cdm->pos.cookie.periph,
2524 xptplistperiphfunc, arg));
2525 else
2526 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2529 static int
2530 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2532 struct ccb_dev_match *cdm;
2533 dev_match_ret retval;
2535 cdm = (struct ccb_dev_match *)arg;
2537 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2539 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2540 cdm->status = CAM_DEV_MATCH_ERROR;
2541 return(0);
2545 * If the copy flag is set, copy this peripheral out.
2547 if (retval & DM_RET_COPY) {
2548 int spaceleft, j;
2550 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2551 sizeof(struct dev_match_result));
2554 * If we don't have enough space to put in another
2555 * match result, save our position and tell the
2556 * user there are more devices to check.
2558 if (spaceleft < sizeof(struct dev_match_result)) {
2559 struct periph_driver **pdrv;
2561 pdrv = NULL;
2562 bzero(&cdm->pos, sizeof(cdm->pos));
2563 cdm->pos.position_type =
2564 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2565 CAM_DEV_POS_PERIPH;
2568 * This may look a bit non-sensical, but it is
2569 * actually quite logical. There are very few
2570 * peripheral drivers, and bloating every peripheral
2571 * structure with a pointer back to its parent
2572 * peripheral driver linker set entry would cost
2573 * more in the long run than doing this quick lookup.
2575 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2576 if (strcmp((*pdrv)->driver_name,
2577 periph->periph_name) == 0)
2578 break;
2581 if (*pdrv == NULL) {
2582 cdm->status = CAM_DEV_MATCH_ERROR;
2583 return(0);
2586 cdm->pos.cookie.pdrv = pdrv;
2588 * The periph generation slot does double duty, as
2589 * does the periph pointer slot. They are used for
2590 * both edt and pdrv lookups and positioning.
2592 cdm->pos.cookie.periph = periph;
2593 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2594 (*pdrv)->generation;
2595 cdm->status = CAM_DEV_MATCH_MORE;
2596 return(0);
2599 j = cdm->num_matches;
2600 cdm->num_matches++;
2601 cdm->matches[j].type = DEV_MATCH_PERIPH;
2602 cdm->matches[j].result.periph_result.path_id =
2603 periph->path->bus->path_id;
2606 * The transport layer peripheral doesn't have a target or
2607 * lun.
2609 if (periph->path->target)
2610 cdm->matches[j].result.periph_result.target_id =
2611 periph->path->target->target_id;
2612 else
2613 cdm->matches[j].result.periph_result.target_id = -1;
2615 if (periph->path->device)
2616 cdm->matches[j].result.periph_result.target_lun =
2617 periph->path->device->lun_id;
2618 else
2619 cdm->matches[j].result.periph_result.target_lun = -1;
2621 cdm->matches[j].result.periph_result.unit_number =
2622 periph->unit_number;
2623 strncpy(cdm->matches[j].result.periph_result.periph_name,
2624 periph->periph_name, DEV_IDLEN);
2627 return(1);
2630 static int
2631 xptperiphlistmatch(struct ccb_dev_match *cdm)
2633 int ret;
2635 cdm->num_matches = 0;
2638 * At this point in the edt traversal function, we check the bus
2639 * list generation to make sure that no busses have been added or
2640 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2641 * For the peripheral driver list traversal function, however, we
2642 * don't have to worry about new peripheral driver types coming or
2643 * going; they're in a linker set, and therefore can't change
2644 * without a recompile.
2647 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2648 && (cdm->pos.cookie.pdrv != NULL))
2649 ret = xptpdrvtraverse(
2650 (struct periph_driver **)cdm->pos.cookie.pdrv,
2651 xptplistpdrvfunc, cdm);
2652 else
2653 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2656 * If we get back 0, that means that we had to stop before fully
2657 * traversing the peripheral driver tree. It also means that one of
2658 * the subroutines has set the status field to the proper value. If
2659 * we get back 1, we've fully traversed the EDT and copied out any
2660 * matching entries.
2662 if (ret == 1)
2663 cdm->status = CAM_DEV_MATCH_LAST;
2665 return(ret);
2668 static int
2669 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2671 struct cam_eb *bus, *next_bus;
2672 int retval;
2674 retval = 1;
2676 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2677 bus != NULL;
2678 bus = next_bus) {
2679 next_bus = TAILQ_NEXT(bus, links);
2681 retval = tr_func(bus, arg);
2682 if (retval == 0)
2683 return(retval);
2686 return(retval);
2689 static int
2690 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2691 xpt_targetfunc_t *tr_func, void *arg)
2693 struct cam_et *target, *next_target;
2694 int retval;
2696 retval = 1;
2697 for (target = (start_target ? start_target :
2698 TAILQ_FIRST(&bus->et_entries));
2699 target != NULL; target = next_target) {
2701 next_target = TAILQ_NEXT(target, links);
2703 retval = tr_func(target, arg);
2705 if (retval == 0)
2706 return(retval);
2709 return(retval);
2712 static int
2713 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2714 xpt_devicefunc_t *tr_func, void *arg)
2716 struct cam_ed *device, *next_device;
2717 int retval;
2719 retval = 1;
2720 for (device = (start_device ? start_device :
2721 TAILQ_FIRST(&target->ed_entries));
2722 device != NULL;
2723 device = next_device) {
2725 next_device = TAILQ_NEXT(device, links);
2727 retval = tr_func(device, arg);
2729 if (retval == 0)
2730 return(retval);
2733 return(retval);
2736 static int
2737 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2738 xpt_periphfunc_t *tr_func, void *arg)
2740 struct cam_periph *periph, *next_periph;
2741 int retval;
2743 retval = 1;
2745 for (periph = (start_periph ? start_periph :
2746 SLIST_FIRST(&device->periphs));
2747 periph != NULL;
2748 periph = next_periph) {
2750 next_periph = SLIST_NEXT(periph, periph_links);
2752 retval = tr_func(periph, arg);
2753 if (retval == 0)
2754 return(retval);
2757 return(retval);
2760 static int
2761 xptpdrvtraverse(struct periph_driver **start_pdrv,
2762 xpt_pdrvfunc_t *tr_func, void *arg)
2764 struct periph_driver **pdrv;
2765 int retval;
2767 retval = 1;
2770 * We don't traverse the peripheral driver list like we do the
2771 * other lists, because it is a linker set, and therefore cannot be
2772 * changed during runtime. If the peripheral driver list is ever
2773 * re-done to be something other than a linker set (i.e. it can
2774 * change while the system is running), the list traversal should
2775 * be modified to work like the other traversal functions.
2777 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2778 *pdrv != NULL; pdrv++) {
2779 retval = tr_func(pdrv, arg);
2781 if (retval == 0)
2782 return(retval);
2785 return(retval);
2788 static int
2789 xptpdperiphtraverse(struct periph_driver **pdrv,
2790 struct cam_periph *start_periph,
2791 xpt_periphfunc_t *tr_func, void *arg)
2793 struct cam_periph *periph, *next_periph;
2794 int retval;
2796 retval = 1;
2798 for (periph = (start_periph ? start_periph :
2799 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2800 periph = next_periph) {
2802 next_periph = TAILQ_NEXT(periph, unit_links);
2804 retval = tr_func(periph, arg);
2805 if (retval == 0)
2806 return(retval);
2808 return(retval);
2811 static int
2812 xptdefbusfunc(struct cam_eb *bus, void *arg)
2814 struct xpt_traverse_config *tr_config;
2816 tr_config = (struct xpt_traverse_config *)arg;
2818 if (tr_config->depth == XPT_DEPTH_BUS) {
2819 xpt_busfunc_t *tr_func;
2821 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2823 return(tr_func(bus, tr_config->tr_arg));
2824 } else
2825 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2828 static int
2829 xptdeftargetfunc(struct cam_et *target, void *arg)
2831 struct xpt_traverse_config *tr_config;
2833 tr_config = (struct xpt_traverse_config *)arg;
2835 if (tr_config->depth == XPT_DEPTH_TARGET) {
2836 xpt_targetfunc_t *tr_func;
2838 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2840 return(tr_func(target, tr_config->tr_arg));
2841 } else
2842 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2845 static int
2846 xptdefdevicefunc(struct cam_ed *device, void *arg)
2848 struct xpt_traverse_config *tr_config;
2850 tr_config = (struct xpt_traverse_config *)arg;
2852 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2853 xpt_devicefunc_t *tr_func;
2855 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2857 return(tr_func(device, tr_config->tr_arg));
2858 } else
2859 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2862 static int
2863 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2865 struct xpt_traverse_config *tr_config;
2866 xpt_periphfunc_t *tr_func;
2868 tr_config = (struct xpt_traverse_config *)arg;
2870 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2873 * Unlike the other default functions, we don't check for depth
2874 * here. The peripheral driver level is the last level in the EDT,
2875 * so if we're here, we should execute the function in question.
2877 return(tr_func(periph, tr_config->tr_arg));
2881 * Execute the given function for every bus in the EDT.
2883 static int
2884 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2886 struct xpt_traverse_config tr_config;
2888 tr_config.depth = XPT_DEPTH_BUS;
2889 tr_config.tr_func = tr_func;
2890 tr_config.tr_arg = arg;
2892 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2895 #ifdef notusedyet
2897 * Execute the given function for every target in the EDT.
2899 static int
2900 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2902 struct xpt_traverse_config tr_config;
2904 tr_config.depth = XPT_DEPTH_TARGET;
2905 tr_config.tr_func = tr_func;
2906 tr_config.tr_arg = arg;
2908 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2910 #endif /* notusedyet */
2913 * Execute the given function for every device in the EDT.
2915 static int
2916 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2918 struct xpt_traverse_config tr_config;
2920 tr_config.depth = XPT_DEPTH_DEVICE;
2921 tr_config.tr_func = tr_func;
2922 tr_config.tr_arg = arg;
2924 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2927 #ifdef notusedyet
2929 * Execute the given function for every peripheral in the EDT.
2931 static int
2932 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2934 struct xpt_traverse_config tr_config;
2936 tr_config.depth = XPT_DEPTH_PERIPH;
2937 tr_config.tr_func = tr_func;
2938 tr_config.tr_arg = arg;
2940 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2942 #endif /* notusedyet */
2944 static int
2945 xptsetasyncfunc(struct cam_ed *device, void *arg)
2947 struct cam_path path;
2948 struct ccb_getdev cgd;
2949 struct async_node *cur_entry;
2951 cur_entry = (struct async_node *)arg;
2954 * Don't report unconfigured devices (Wildcard devs,
2955 * devices only for target mode, device instances
2956 * that have been invalidated but are waiting for
2957 * their last reference count to be released).
2959 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2960 return (1);
2962 xpt_compile_path(&path,
2963 NULL,
2964 device->target->bus->path_id,
2965 device->target->target_id,
2966 device->lun_id);
2967 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2968 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2969 xpt_action((union ccb *)&cgd);
2970 cur_entry->callback(cur_entry->callback_arg,
2971 AC_FOUND_DEVICE,
2972 &path, &cgd);
2973 xpt_release_path(&path);
2975 return(1);
2978 static int
2979 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2981 struct cam_path path;
2982 struct ccb_pathinq cpi;
2983 struct async_node *cur_entry;
2985 cur_entry = (struct async_node *)arg;
2987 xpt_compile_path(&path, /*periph*/NULL,
2988 bus->sim->path_id,
2989 CAM_TARGET_WILDCARD,
2990 CAM_LUN_WILDCARD);
2991 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2992 cpi.ccb_h.func_code = XPT_PATH_INQ;
2993 xpt_action((union ccb *)&cpi);
2994 cur_entry->callback(cur_entry->callback_arg,
2995 AC_PATH_REGISTERED,
2996 &path, &cpi);
2997 xpt_release_path(&path);
2999 return(1);
3002 void
3003 xpt_action(union ccb *start_ccb)
3005 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
3007 start_ccb->ccb_h.status = CAM_REQ_INPROG;
3009 crit_enter();
3011 switch (start_ccb->ccb_h.func_code) {
3012 case XPT_SCSI_IO:
3014 #ifdef CAM_NEW_TRAN_CODE
3015 struct cam_ed *device;
3016 #endif /* CAM_NEW_TRAN_CODE */
3017 #ifdef CAMDEBUG
3018 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3019 struct cam_path *path;
3021 path = start_ccb->ccb_h.path;
3022 #endif
3025 * For the sake of compatibility with SCSI-1
3026 * devices that may not understand the identify
3027 * message, we include lun information in the
3028 * second byte of all commands. SCSI-1 specifies
3029 * that luns are a 3 bit value and reserves only 3
3030 * bits for lun information in the CDB. Later
3031 * revisions of the SCSI spec allow for more than 8
3032 * luns, but have deprecated lun information in the
3033 * CDB. So, if the lun won't fit, we must omit.
3035 * Also be aware that during initial probing for devices,
3036 * the inquiry information is unknown but initialized to 0.
3037 * This means that this code will be exercised while probing
3038 * devices with an ANSI revision greater than 2.
3040 #ifdef CAM_NEW_TRAN_CODE
3041 device = start_ccb->ccb_h.path->device;
3042 if (device->protocol_version <= SCSI_REV_2
3043 #else /* CAM_NEW_TRAN_CODE */
3044 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
3045 #endif /* CAM_NEW_TRAN_CODE */
3046 && start_ccb->ccb_h.target_lun < 8
3047 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3049 start_ccb->csio.cdb_io.cdb_bytes[1] |=
3050 start_ccb->ccb_h.target_lun << 5;
3052 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3053 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3054 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3055 &path->device->inq_data),
3056 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3057 cdb_str, sizeof(cdb_str))));
3058 /* FALLTHROUGH */
3060 case XPT_TARGET_IO:
3061 case XPT_CONT_TARGET_IO:
3062 start_ccb->csio.sense_resid = 0;
3063 start_ccb->csio.resid = 0;
3064 /* FALLTHROUGH */
3065 case XPT_RESET_DEV:
3066 case XPT_ENG_EXEC:
3068 struct cam_path *path;
3069 struct cam_sim *sim;
3070 int runq;
3072 path = start_ccb->ccb_h.path;
3074 sim = path->bus->sim;
3075 if (SIM_DEAD(sim)) {
3076 /* The SIM has gone; just execute the CCB directly. */
3077 cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3078 (*(sim->sim_action))(sim, start_ccb);
3079 break;
3082 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3083 if (path->device->qfrozen_cnt == 0)
3084 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3085 else
3086 runq = 0;
3087 if (runq != 0)
3088 xpt_run_dev_sendq(path->bus);
3089 break;
3091 case XPT_SET_TRAN_SETTINGS:
3093 xpt_set_transfer_settings(&start_ccb->cts,
3094 start_ccb->ccb_h.path->device,
3095 /*async_update*/FALSE);
3096 break;
3098 case XPT_CALC_GEOMETRY:
3100 struct cam_sim *sim;
3102 /* Filter out garbage */
3103 if (start_ccb->ccg.block_size == 0
3104 || start_ccb->ccg.volume_size == 0) {
3105 start_ccb->ccg.cylinders = 0;
3106 start_ccb->ccg.heads = 0;
3107 start_ccb->ccg.secs_per_track = 0;
3108 start_ccb->ccb_h.status = CAM_REQ_CMP;
3109 break;
3111 sim = start_ccb->ccb_h.path->bus->sim;
3112 (*(sim->sim_action))(sim, start_ccb);
3113 break;
3115 case XPT_ABORT:
3117 union ccb* abort_ccb;
3119 abort_ccb = start_ccb->cab.abort_ccb;
3120 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3122 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3123 struct cam_ccbq *ccbq;
3125 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3126 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3127 abort_ccb->ccb_h.status =
3128 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3129 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3130 xpt_done(abort_ccb);
3131 start_ccb->ccb_h.status = CAM_REQ_CMP;
3132 break;
3134 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3135 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3137 * We've caught this ccb en route to
3138 * the SIM. Flag it for abort and the
3139 * SIM will do so just before starting
3140 * real work on the CCB.
3142 abort_ccb->ccb_h.status =
3143 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3144 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3145 start_ccb->ccb_h.status = CAM_REQ_CMP;
3146 break;
3149 if (XPT_FC_IS_QUEUED(abort_ccb)
3150 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3152 * It's already completed but waiting
3153 * for our SWI to get to it.
3155 start_ccb->ccb_h.status = CAM_UA_ABORT;
3156 break;
3159 * If we weren't able to take care of the abort request
3160 * in the XPT, pass the request down to the SIM for processing.
3162 /* FALLTHROUGH */
3164 case XPT_ACCEPT_TARGET_IO:
3165 case XPT_EN_LUN:
3166 case XPT_IMMED_NOTIFY:
3167 case XPT_NOTIFY_ACK:
3168 case XPT_GET_TRAN_SETTINGS:
3169 case XPT_RESET_BUS:
3171 struct cam_sim *sim;
3173 sim = start_ccb->ccb_h.path->bus->sim;
3174 (*(sim->sim_action))(sim, start_ccb);
3175 break;
3177 case XPT_PATH_INQ:
3179 struct cam_sim *sim;
3181 sim = start_ccb->ccb_h.path->bus->sim;
3182 (*(sim->sim_action))(sim, start_ccb);
3183 break;
3185 case XPT_PATH_STATS:
3186 start_ccb->cpis.last_reset =
3187 start_ccb->ccb_h.path->bus->last_reset;
3188 start_ccb->ccb_h.status = CAM_REQ_CMP;
3189 break;
3190 case XPT_GDEV_TYPE:
3192 struct cam_ed *dev;
3194 dev = start_ccb->ccb_h.path->device;
3195 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3196 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3197 } else {
3198 struct ccb_getdev *cgd;
3199 struct cam_eb *bus;
3200 struct cam_et *tar;
3202 cgd = &start_ccb->cgd;
3203 bus = cgd->ccb_h.path->bus;
3204 tar = cgd->ccb_h.path->target;
3205 cgd->inq_data = dev->inq_data;
3206 cgd->ccb_h.status = CAM_REQ_CMP;
3207 cgd->serial_num_len = dev->serial_num_len;
3208 if ((dev->serial_num_len > 0)
3209 && (dev->serial_num != NULL))
3210 bcopy(dev->serial_num, cgd->serial_num,
3211 dev->serial_num_len);
3213 break;
3215 case XPT_GDEV_STATS:
3217 struct cam_ed *dev;
3219 dev = start_ccb->ccb_h.path->device;
3220 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3221 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3222 } else {
3223 struct ccb_getdevstats *cgds;
3224 struct cam_eb *bus;
3225 struct cam_et *tar;
3227 cgds = &start_ccb->cgds;
3228 bus = cgds->ccb_h.path->bus;
3229 tar = cgds->ccb_h.path->target;
3230 cgds->dev_openings = dev->ccbq.dev_openings;
3231 cgds->dev_active = dev->ccbq.dev_active;
3232 cgds->devq_openings = dev->ccbq.devq_openings;
3233 cgds->devq_queued = dev->ccbq.queue.entries;
3234 cgds->held = dev->ccbq.held;
3235 cgds->last_reset = tar->last_reset;
3236 cgds->maxtags = dev->quirk->maxtags;
3237 cgds->mintags = dev->quirk->mintags;
3238 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3239 cgds->last_reset = bus->last_reset;
3240 cgds->ccb_h.status = CAM_REQ_CMP;
3242 break;
3244 case XPT_GDEVLIST:
3246 struct cam_periph *nperiph;
3247 struct periph_list *periph_head;
3248 struct ccb_getdevlist *cgdl;
3249 u_int i;
3250 struct cam_ed *device;
3251 int found;
3254 found = 0;
3257 * Don't want anyone mucking with our data.
3259 device = start_ccb->ccb_h.path->device;
3260 periph_head = &device->periphs;
3261 cgdl = &start_ccb->cgdl;
3264 * Check and see if the list has changed since the user
3265 * last requested a list member. If so, tell them that the
3266 * list has changed, and therefore they need to start over
3267 * from the beginning.
3269 if ((cgdl->index != 0) &&
3270 (cgdl->generation != device->generation)) {
3271 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3272 break;
3276 * Traverse the list of peripherals and attempt to find
3277 * the requested peripheral.
3279 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3280 (nperiph != NULL) && (i <= cgdl->index);
3281 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3282 if (i == cgdl->index) {
3283 strncpy(cgdl->periph_name,
3284 nperiph->periph_name,
3285 DEV_IDLEN);
3286 cgdl->unit_number = nperiph->unit_number;
3287 found = 1;
3290 if (found == 0) {
3291 cgdl->status = CAM_GDEVLIST_ERROR;
3292 break;
3295 if (nperiph == NULL)
3296 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3297 else
3298 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3300 cgdl->index++;
3301 cgdl->generation = device->generation;
3303 cgdl->ccb_h.status = CAM_REQ_CMP;
3304 break;
3306 case XPT_DEV_MATCH:
3308 dev_pos_type position_type;
3309 struct ccb_dev_match *cdm;
3310 int ret;
3312 cdm = &start_ccb->cdm;
3315 * Prevent EDT changes while we traverse it.
3318 * There are two ways of getting at information in the EDT.
3319 * The first way is via the primary EDT tree. It starts
3320 * with a list of busses, then a list of targets on a bus,
3321 * then devices/luns on a target, and then peripherals on a
3322 * device/lun. The "other" way is by the peripheral driver
3323 * lists. The peripheral driver lists are organized by
3324 * peripheral driver. (obviously) So it makes sense to
3325 * use the peripheral driver list if the user is looking
3326 * for something like "da1", or all "da" devices. If the
3327 * user is looking for something on a particular bus/target
3328 * or lun, it's generally better to go through the EDT tree.
3331 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3332 position_type = cdm->pos.position_type;
3333 else {
3334 u_int i;
3336 position_type = CAM_DEV_POS_NONE;
3338 for (i = 0; i < cdm->num_patterns; i++) {
3339 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3340 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3341 position_type = CAM_DEV_POS_EDT;
3342 break;
3346 if (cdm->num_patterns == 0)
3347 position_type = CAM_DEV_POS_EDT;
3348 else if (position_type == CAM_DEV_POS_NONE)
3349 position_type = CAM_DEV_POS_PDRV;
3352 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3353 case CAM_DEV_POS_EDT:
3354 ret = xptedtmatch(cdm);
3355 break;
3356 case CAM_DEV_POS_PDRV:
3357 ret = xptperiphlistmatch(cdm);
3358 break;
3359 default:
3360 cdm->status = CAM_DEV_MATCH_ERROR;
3361 break;
3364 if (cdm->status == CAM_DEV_MATCH_ERROR)
3365 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3366 else
3367 start_ccb->ccb_h.status = CAM_REQ_CMP;
3369 break;
3371 case XPT_SASYNC_CB:
3373 struct ccb_setasync *csa;
3374 struct async_node *cur_entry;
3375 struct async_list *async_head;
3376 u_int32_t added;
3378 csa = &start_ccb->csa;
3379 added = csa->event_enable;
3380 async_head = &csa->ccb_h.path->device->asyncs;
3383 * If there is already an entry for us, simply
3384 * update it.
3386 cur_entry = SLIST_FIRST(async_head);
3387 while (cur_entry != NULL) {
3388 if ((cur_entry->callback_arg == csa->callback_arg)
3389 && (cur_entry->callback == csa->callback))
3390 break;
3391 cur_entry = SLIST_NEXT(cur_entry, links);
3394 if (cur_entry != NULL) {
3396 * If the request has no flags set,
3397 * remove the entry.
3399 added &= ~cur_entry->event_enable;
3400 if (csa->event_enable == 0) {
3401 SLIST_REMOVE(async_head, cur_entry,
3402 async_node, links);
3403 csa->ccb_h.path->device->refcount--;
3404 kfree(cur_entry, M_CAMXPT);
3405 } else {
3406 cur_entry->event_enable = csa->event_enable;
3408 } else {
3409 cur_entry = kmalloc(sizeof(*cur_entry),
3410 M_CAMXPT, M_INTWAIT);
3411 cur_entry->event_enable = csa->event_enable;
3412 cur_entry->callback_arg = csa->callback_arg;
3413 cur_entry->callback = csa->callback;
3414 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3415 csa->ccb_h.path->device->refcount++;
3418 if ((added & AC_FOUND_DEVICE) != 0) {
3420 * Get this peripheral up to date with all
3421 * the currently existing devices.
3423 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3425 if ((added & AC_PATH_REGISTERED) != 0) {
3427 * Get this peripheral up to date with all
3428 * the currently existing busses.
3430 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3432 start_ccb->ccb_h.status = CAM_REQ_CMP;
3433 break;
3435 case XPT_REL_SIMQ:
3437 struct ccb_relsim *crs;
3438 struct cam_ed *dev;
3440 crs = &start_ccb->crs;
3441 dev = crs->ccb_h.path->device;
3442 if (dev == NULL) {
3444 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3445 break;
3448 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3450 if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3451 /* Don't ever go below one opening */
3452 if (crs->openings > 0) {
3453 xpt_dev_ccbq_resize(crs->ccb_h.path,
3454 crs->openings);
3456 if (bootverbose) {
3457 xpt_print_path(crs->ccb_h.path);
3458 kprintf("tagged openings "
3459 "now %d\n",
3460 crs->openings);
3466 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3468 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3471 * Just extend the old timeout and decrement
3472 * the freeze count so that a single timeout
3473 * is sufficient for releasing the queue.
3475 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3476 callout_stop(&dev->c_handle);
3477 } else {
3479 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3482 callout_reset(&dev->c_handle,
3483 (crs->release_timeout * hz) / 1000,
3484 xpt_release_devq_timeout, dev);
3486 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3490 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3492 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3494 * Decrement the freeze count so that a single
3495 * completion is still sufficient to unfreeze
3496 * the queue.
3498 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3499 } else {
3501 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3502 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3506 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3508 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3509 || (dev->ccbq.dev_active == 0)) {
3511 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3512 } else {
3514 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3515 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3519 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3521 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3522 /*run_queue*/TRUE);
3524 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3525 start_ccb->ccb_h.status = CAM_REQ_CMP;
3526 break;
3528 case XPT_SCAN_BUS:
3529 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3530 break;
3531 case XPT_SCAN_LUN:
3532 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3533 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3534 start_ccb);
3535 break;
3536 case XPT_DEBUG: {
3537 #ifdef CAMDEBUG
3538 #ifdef CAM_DEBUG_DELAY
3539 cam_debug_delay = CAM_DEBUG_DELAY;
3540 #endif
3541 cam_dflags = start_ccb->cdbg.flags;
3542 if (cam_dpath != NULL) {
3543 xpt_free_path(cam_dpath);
3544 cam_dpath = NULL;
3547 if (cam_dflags != CAM_DEBUG_NONE) {
3548 if (xpt_create_path(&cam_dpath, xpt_periph,
3549 start_ccb->ccb_h.path_id,
3550 start_ccb->ccb_h.target_id,
3551 start_ccb->ccb_h.target_lun) !=
3552 CAM_REQ_CMP) {
3553 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3554 cam_dflags = CAM_DEBUG_NONE;
3555 } else {
3556 start_ccb->ccb_h.status = CAM_REQ_CMP;
3557 xpt_print_path(cam_dpath);
3558 kprintf("debugging flags now %x\n", cam_dflags);
3560 } else {
3561 cam_dpath = NULL;
3562 start_ccb->ccb_h.status = CAM_REQ_CMP;
3564 #else /* !CAMDEBUG */
3565 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3566 #endif /* CAMDEBUG */
3567 break;
3569 case XPT_NOOP:
3570 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3571 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3572 start_ccb->ccb_h.status = CAM_REQ_CMP;
3573 break;
3574 default:
3575 case XPT_SDEV_TYPE:
3576 case XPT_TERM_IO:
3577 case XPT_ENG_INQ:
3578 /* XXX Implement */
3579 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3580 break;
3582 crit_exit();
3585 void
3586 xpt_polled_action(union ccb *start_ccb)
3588 u_int32_t timeout;
3589 struct cam_sim *sim;
3590 struct cam_devq *devq;
3591 struct cam_ed *dev;
3593 timeout = start_ccb->ccb_h.timeout;
3594 sim = start_ccb->ccb_h.path->bus->sim;
3595 devq = sim->devq;
3596 dev = start_ccb->ccb_h.path->device;
3598 crit_enter();
3601 * Steal an opening so that no other queued requests
3602 * can get it before us while we simulate interrupts.
3604 dev->ccbq.devq_openings--;
3605 dev->ccbq.dev_openings--;
3607 while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3608 && (--timeout > 0)) {
3609 DELAY(1000);
3610 (*(sim->sim_poll))(sim);
3611 swi_cambio(NULL, NULL);
3614 dev->ccbq.devq_openings++;
3615 dev->ccbq.dev_openings++;
3617 if (timeout != 0) {
3618 xpt_action(start_ccb);
3619 while(--timeout > 0) {
3620 (*(sim->sim_poll))(sim);
3621 swi_cambio(NULL, NULL);
3622 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3623 != CAM_REQ_INPROG)
3624 break;
3625 DELAY(1000);
3627 if (timeout == 0) {
3629 * XXX Is it worth adding a sim_timeout entry
3630 * point so we can attempt recovery? If
3631 * this is only used for dumps, I don't think
3632 * it is.
3634 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3636 } else {
3637 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3639 crit_exit();
3643 * Schedule a peripheral driver to receive a ccb when it's
3644 * target device has space for more transactions.
3646 void
3647 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3649 struct cam_ed *device;
3650 union ccb *work_ccb;
3651 int runq;
3653 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3654 device = perph->path->device;
3655 crit_enter();
3656 if (periph_is_queued(perph)) {
3657 /* Simply reorder based on new priority */
3658 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3659 (" change priority to %d\n", new_priority));
3660 if (new_priority < perph->pinfo.priority) {
3661 camq_change_priority(&device->drvq,
3662 perph->pinfo.index,
3663 new_priority);
3665 runq = 0;
3666 } else if (SIM_DEAD(perph->path->bus->sim)) {
3667 /* The SIM is gone so just call periph_start directly. */
3668 work_ccb = xpt_get_ccb(perph->path->device);
3669 crit_exit();
3670 if (work_ccb == NULL)
3671 return; /* XXX */
3672 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3673 perph->pinfo.priority = new_priority;
3674 perph->periph_start(perph, work_ccb);
3675 return;
3676 } else {
3677 /* New entry on the queue */
3678 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3679 (" added periph to queue\n"));
3680 perph->pinfo.priority = new_priority;
3681 perph->pinfo.generation = ++device->drvq.generation;
3682 camq_insert(&device->drvq, &perph->pinfo);
3683 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3685 crit_exit();
3686 if (runq != 0) {
3687 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3688 (" calling xpt_run_devq\n"));
3689 xpt_run_dev_allocq(perph->path->bus);
3695 * Schedule a device to run on a given queue.
3696 * If the device was inserted as a new entry on the queue,
3697 * return 1 meaning the device queue should be run. If we
3698 * were already queued, implying someone else has already
3699 * started the queue, return 0 so the caller doesn't attempt
3700 * to run the queue. Must be run in a critical section.
3702 static int
3703 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3704 u_int32_t new_priority)
3706 int retval;
3707 u_int32_t old_priority;
3709 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3711 old_priority = pinfo->priority;
3714 * Are we already queued?
3716 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3717 /* Simply reorder based on new priority */
3718 if (new_priority < old_priority) {
3719 camq_change_priority(queue, pinfo->index,
3720 new_priority);
3721 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3722 ("changed priority to %d\n",
3723 new_priority));
3725 retval = 0;
3726 } else {
3727 /* New entry on the queue */
3728 if (new_priority < old_priority)
3729 pinfo->priority = new_priority;
3731 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3732 ("Inserting onto queue\n"));
3733 pinfo->generation = ++queue->generation;
3734 camq_insert(queue, pinfo);
3735 retval = 1;
3737 return (retval);
3740 static void
3741 xpt_run_dev_allocq(struct cam_eb *bus)
3743 struct cam_devq *devq;
3745 if ((devq = bus->sim->devq) == NULL) {
3746 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3747 return;
3749 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3751 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3752 (" qfrozen_cnt == 0x%x, entries == %d, "
3753 "openings == %d, active == %d\n",
3754 devq->alloc_queue.qfrozen_cnt,
3755 devq->alloc_queue.entries,
3756 devq->alloc_openings,
3757 devq->alloc_active));
3759 crit_enter();
3760 devq->alloc_queue.qfrozen_cnt++;
3761 while ((devq->alloc_queue.entries > 0)
3762 && (devq->alloc_openings > 0)
3763 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3764 struct cam_ed_qinfo *qinfo;
3765 struct cam_ed *device;
3766 union ccb *work_ccb;
3767 struct cam_periph *drv;
3768 struct camq *drvq;
3770 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3771 CAMQ_HEAD);
3772 device = qinfo->device;
3774 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3775 ("running device %p\n", device));
3777 drvq = &device->drvq;
3779 #ifdef CAMDEBUG
3780 if (drvq->entries <= 0) {
3781 panic("xpt_run_dev_allocq: "
3782 "Device on queue without any work to do");
3784 #endif
3785 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3786 devq->alloc_openings--;
3787 devq->alloc_active++;
3788 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3789 crit_exit();
3790 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3791 drv->pinfo.priority);
3792 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3793 ("calling periph start\n"));
3794 drv->periph_start(drv, work_ccb);
3795 } else {
3797 * Malloc failure in alloc_ccb
3800 * XXX add us to a list to be run from free_ccb
3801 * if we don't have any ccbs active on this
3802 * device queue otherwise we may never get run
3803 * again.
3805 break;
3808 /* Raise IPL for possible insertion and test at top of loop */
3809 crit_enter();
3811 if (drvq->entries > 0) {
3812 /* We have more work. Attempt to reschedule */
3813 xpt_schedule_dev_allocq(bus, device);
3816 devq->alloc_queue.qfrozen_cnt--;
3817 crit_exit();
3820 static void
3821 xpt_run_dev_sendq(struct cam_eb *bus)
3823 struct cam_devq *devq;
3825 if ((devq = bus->sim->devq) == NULL) {
3826 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3827 return;
3829 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3831 crit_enter();
3832 devq->send_queue.qfrozen_cnt++;
3833 while ((devq->send_queue.entries > 0)
3834 && (devq->send_openings > 0)) {
3835 struct cam_ed_qinfo *qinfo;
3836 struct cam_ed *device;
3837 union ccb *work_ccb;
3838 struct cam_sim *sim;
3840 if (devq->send_queue.qfrozen_cnt > 1) {
3841 break;
3844 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3845 CAMQ_HEAD);
3846 device = qinfo->device;
3849 * If the device has been "frozen", don't attempt
3850 * to run it.
3852 if (device->qfrozen_cnt > 0) {
3853 continue;
3856 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3857 ("running device %p\n", device));
3859 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3860 if (work_ccb == NULL) {
3861 kprintf("device on run queue with no ccbs???\n");
3862 continue;
3865 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3867 if (num_highpower <= 0) {
3869 * We got a high power command, but we
3870 * don't have any available slots. Freeze
3871 * the device queue until we have a slot
3872 * available.
3874 device->qfrozen_cnt++;
3875 STAILQ_INSERT_TAIL(&highpowerq,
3876 &work_ccb->ccb_h,
3877 xpt_links.stqe);
3879 continue;
3880 } else {
3882 * Consume a high power slot while
3883 * this ccb runs.
3885 num_highpower--;
3888 devq->active_dev = device;
3889 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3891 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3893 devq->send_openings--;
3894 devq->send_active++;
3896 if (device->ccbq.queue.entries > 0)
3897 xpt_schedule_dev_sendq(bus, device);
3899 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3901 * The client wants to freeze the queue
3902 * after this CCB is sent.
3904 device->qfrozen_cnt++;
3907 /* In Target mode, the peripheral driver knows best... */
3908 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3909 if ((device->inq_flags & SID_CmdQue) != 0
3910 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3911 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3912 else
3914 * Clear this in case of a retried CCB that
3915 * failed due to a rejected tag.
3917 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3921 * Device queues can be shared among multiple sim instances
3922 * that reside on different busses. Use the SIM in the queue
3923 * CCB's path, rather than the one in the bus that was passed
3924 * into this function.
3926 sim = work_ccb->ccb_h.path->bus->sim;
3927 (*(sim->sim_action))(sim, work_ccb);
3929 devq->active_dev = NULL;
3930 /* Raise IPL for possible insertion and test at top of loop */
3932 devq->send_queue.qfrozen_cnt--;
3933 crit_exit();
3937 * This function merges stuff from the slave ccb into the master ccb, while
3938 * keeping important fields in the master ccb constant.
3940 void
3941 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3944 * Pull fields that are valid for peripheral drivers to set
3945 * into the master CCB along with the CCB "payload".
3947 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3948 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3949 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3950 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3951 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3952 sizeof(union ccb) - sizeof(struct ccb_hdr));
3955 void
3956 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3958 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3959 callout_init(&ccb_h->timeout_ch);
3960 ccb_h->pinfo.priority = priority;
3961 ccb_h->path = path;
3962 ccb_h->path_id = path->bus->path_id;
3963 if (path->target)
3964 ccb_h->target_id = path->target->target_id;
3965 else
3966 ccb_h->target_id = CAM_TARGET_WILDCARD;
3967 if (path->device) {
3968 ccb_h->target_lun = path->device->lun_id;
3969 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3970 } else {
3971 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3973 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3974 ccb_h->flags = 0;
3977 /* Path manipulation functions */
3978 cam_status
3979 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3980 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3982 struct cam_path *path;
3983 cam_status status;
3985 path = kmalloc(sizeof(*path), M_CAMXPT, M_INTWAIT);
3986 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3987 if (status != CAM_REQ_CMP) {
3988 kfree(path, M_CAMXPT);
3989 path = NULL;
3991 *new_path_ptr = path;
3992 return (status);
3995 static cam_status
3996 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3997 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3999 struct cam_eb *bus;
4000 struct cam_et *target;
4001 struct cam_ed *device;
4002 cam_status status;
4004 status = CAM_REQ_CMP; /* Completed without error */
4005 target = NULL; /* Wildcarded */
4006 device = NULL; /* Wildcarded */
4009 * We will potentially modify the EDT, so block interrupts
4010 * that may attempt to create cam paths.
4012 crit_enter();
4013 bus = xpt_find_bus(path_id);
4014 if (bus == NULL) {
4015 status = CAM_PATH_INVALID;
4016 } else {
4017 target = xpt_find_target(bus, target_id);
4018 if (target == NULL) {
4019 /* Create one */
4020 struct cam_et *new_target;
4022 new_target = xpt_alloc_target(bus, target_id);
4023 if (new_target == NULL) {
4024 status = CAM_RESRC_UNAVAIL;
4025 } else {
4026 target = new_target;
4029 if (target != NULL) {
4030 device = xpt_find_device(target, lun_id);
4031 if (device == NULL) {
4032 /* Create one */
4033 struct cam_ed *new_device;
4035 new_device = xpt_alloc_device(bus,
4036 target,
4037 lun_id);
4038 if (new_device == NULL) {
4039 status = CAM_RESRC_UNAVAIL;
4040 } else {
4041 device = new_device;
4046 crit_exit();
4049 * Only touch the user's data if we are successful.
4051 if (status == CAM_REQ_CMP) {
4052 new_path->periph = perph;
4053 new_path->bus = bus;
4054 new_path->target = target;
4055 new_path->device = device;
4056 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4057 } else {
4058 if (device != NULL)
4059 xpt_release_device(bus, target, device);
4060 if (target != NULL)
4061 xpt_release_target(bus, target);
4062 if (bus != NULL)
4063 xpt_release_bus(bus);
4065 return (status);
4068 static void
4069 xpt_release_path(struct cam_path *path)
4071 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4072 if (path->device != NULL) {
4073 xpt_release_device(path->bus, path->target, path->device);
4074 path->device = NULL;
4076 if (path->target != NULL) {
4077 xpt_release_target(path->bus, path->target);
4078 path->target = NULL;
4080 if (path->bus != NULL) {
4081 xpt_release_bus(path->bus);
4082 path->bus = NULL;
4086 void
4087 xpt_free_path(struct cam_path *path)
4089 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4090 xpt_release_path(path);
4091 kfree(path, M_CAMXPT);
4096 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4097 * in path1, 2 for match with wildcards in path2.
4100 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4102 int retval = 0;
4104 if (path1->bus != path2->bus) {
4105 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4106 retval = 1;
4107 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4108 retval = 2;
4109 else
4110 return (-1);
4112 if (path1->target != path2->target) {
4113 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4114 if (retval == 0)
4115 retval = 1;
4116 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4117 retval = 2;
4118 else
4119 return (-1);
4121 if (path1->device != path2->device) {
4122 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4123 if (retval == 0)
4124 retval = 1;
4125 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4126 retval = 2;
4127 else
4128 return (-1);
4130 return (retval);
4133 void
4134 xpt_print_path(struct cam_path *path)
4136 if (path == NULL)
4137 kprintf("(nopath): ");
4138 else {
4139 if (path->periph != NULL)
4140 kprintf("(%s%d:", path->periph->periph_name,
4141 path->periph->unit_number);
4142 else
4143 kprintf("(noperiph:");
4145 if (path->bus != NULL)
4146 kprintf("%s%d:%d:", path->bus->sim->sim_name,
4147 path->bus->sim->unit_number,
4148 path->bus->sim->bus_id);
4149 else
4150 kprintf("nobus:");
4152 if (path->target != NULL)
4153 kprintf("%d:", path->target->target_id);
4154 else
4155 kprintf("X:");
4157 if (path->device != NULL)
4158 kprintf("%d): ", path->device->lun_id);
4159 else
4160 kprintf("X): ");
4165 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4167 struct sbuf sb;
4169 sbuf_new(&sb, str, str_len, 0);
4171 if (path == NULL)
4172 sbuf_printf(&sb, "(nopath): ");
4173 else {
4174 if (path->periph != NULL)
4175 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4176 path->periph->unit_number);
4177 else
4178 sbuf_printf(&sb, "(noperiph:");
4180 if (path->bus != NULL)
4181 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4182 path->bus->sim->unit_number,
4183 path->bus->sim->bus_id);
4184 else
4185 sbuf_printf(&sb, "nobus:");
4187 if (path->target != NULL)
4188 sbuf_printf(&sb, "%d:", path->target->target_id);
4189 else
4190 sbuf_printf(&sb, "X:");
4192 if (path->device != NULL)
4193 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4194 else
4195 sbuf_printf(&sb, "X): ");
4197 sbuf_finish(&sb);
4199 return(sbuf_len(&sb));
4202 path_id_t
4203 xpt_path_path_id(struct cam_path *path)
4205 return(path->bus->path_id);
4208 target_id_t
4209 xpt_path_target_id(struct cam_path *path)
4211 if (path->target != NULL)
4212 return (path->target->target_id);
4213 else
4214 return (CAM_TARGET_WILDCARD);
4217 lun_id_t
4218 xpt_path_lun_id(struct cam_path *path)
4220 if (path->device != NULL)
4221 return (path->device->lun_id);
4222 else
4223 return (CAM_LUN_WILDCARD);
4226 struct cam_sim *
4227 xpt_path_sim(struct cam_path *path)
4229 return (path->bus->sim);
4232 struct cam_periph*
4233 xpt_path_periph(struct cam_path *path)
4235 return (path->periph);
4239 * Release a CAM control block for the caller. Remit the cost of the structure
4240 * to the device referenced by the path. If the this device had no 'credits'
4241 * and peripheral drivers have registered async callbacks for this notification
4242 * call them now.
4244 void
4245 xpt_release_ccb(union ccb *free_ccb)
4247 struct cam_path *path;
4248 struct cam_ed *device;
4249 struct cam_eb *bus;
4251 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4252 path = free_ccb->ccb_h.path;
4253 device = path->device;
4254 bus = path->bus;
4255 crit_enter();
4256 cam_ccbq_release_opening(&device->ccbq);
4257 if (xpt_ccb_count > xpt_max_ccbs) {
4258 xpt_free_ccb(free_ccb);
4259 xpt_ccb_count--;
4260 } else {
4261 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4263 if (bus->sim->devq == NULL) {
4264 crit_exit();
4265 return;
4267 bus->sim->devq->alloc_openings++;
4268 bus->sim->devq->alloc_active--;
4269 /* XXX Turn this into an inline function - xpt_run_device?? */
4270 if ((device_is_alloc_queued(device) == 0)
4271 && (device->drvq.entries > 0)) {
4272 xpt_schedule_dev_allocq(bus, device);
4274 crit_exit();
4275 if (bus->sim->devq && dev_allocq_is_runnable(bus->sim->devq))
4276 xpt_run_dev_allocq(bus);
4279 /* Functions accessed by SIM drivers */
4282 * A sim structure, listing the SIM entry points and instance
4283 * identification info is passed to xpt_bus_register to hook the SIM
4284 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4285 * for this new bus and places it in the array of busses and assigns
4286 * it a path_id. The path_id may be influenced by "hard wiring"
4287 * information specified by the user. Once interrupt services are
4288 * availible, the bus will be probed.
4290 int32_t
4291 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4293 struct cam_eb *new_bus;
4294 struct cam_eb *old_bus;
4295 struct ccb_pathinq cpi;
4297 sim->bus_id = bus;
4298 new_bus = kmalloc(sizeof(*new_bus), M_CAMXPT, M_INTWAIT);
4300 if (strcmp(sim->sim_name, "xpt") != 0) {
4301 sim->path_id =
4302 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4305 TAILQ_INIT(&new_bus->et_entries);
4306 new_bus->path_id = sim->path_id;
4307 new_bus->sim = sim;
4308 ++sim->refcount;
4309 timevalclear(&new_bus->last_reset);
4310 new_bus->flags = 0;
4311 new_bus->refcount = 1; /* Held until a bus_deregister event */
4312 new_bus->generation = 0;
4313 crit_enter();
4314 old_bus = TAILQ_FIRST(&xpt_busses);
4315 while (old_bus != NULL
4316 && old_bus->path_id < new_bus->path_id)
4317 old_bus = TAILQ_NEXT(old_bus, links);
4318 if (old_bus != NULL)
4319 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4320 else
4321 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4322 bus_generation++;
4323 crit_exit();
4325 /* Notify interested parties */
4326 if (sim->path_id != CAM_XPT_PATH_ID) {
4327 struct cam_path path;
4329 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4330 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4331 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4332 cpi.ccb_h.func_code = XPT_PATH_INQ;
4333 xpt_action((union ccb *)&cpi);
4334 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4335 xpt_release_path(&path);
4337 return (CAM_SUCCESS);
4341 * Deregister a bus. We must clean out all transactions pending on the bus.
4342 * This routine is typically called prior to cam_sim_free() (e.g. see
4343 * dev/usbmisc/umass/umass.c)
4345 int32_t
4346 xpt_bus_deregister(path_id_t pathid)
4348 struct cam_path bus_path;
4349 struct cam_ed *device;
4350 struct cam_ed_qinfo *qinfo;
4351 struct cam_devq *devq;
4352 struct cam_periph *periph;
4353 struct cam_sim *ccbsim;
4354 union ccb *work_ccb;
4355 cam_status status;
4357 status = xpt_compile_path(&bus_path, NULL, pathid,
4358 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4359 if (status != CAM_REQ_CMP)
4360 return (status);
4363 * This should clear out all pending requests and timeouts, but
4364 * the ccb's may be queued to a software interrupt.
4366 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4367 * and it really ought to.
4369 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4370 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4372 /* The SIM may be gone, so use a dummy SIM for any stray operations. */
4373 devq = bus_path.bus->sim->devq;
4374 bus_path.bus->sim = &cam_dead_sim;
4376 /* Execute any pending operations now. */
4377 while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4378 CAMQ_HEAD)) != NULL ||
4379 (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4380 CAMQ_HEAD)) != NULL) {
4381 do {
4382 device = qinfo->device;
4383 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4384 if (work_ccb != NULL) {
4385 devq->active_dev = device;
4386 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4387 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4388 ccbsim = work_ccb->ccb_h.path->bus->sim;
4389 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4392 periph = (struct cam_periph *)camq_remove(&device->drvq,
4393 CAMQ_HEAD);
4394 if (periph != NULL)
4395 xpt_schedule(periph, periph->pinfo.priority);
4396 } while (work_ccb != NULL || periph != NULL);
4399 /* Make sure all completed CCBs are processed. */
4400 while (!TAILQ_EMPTY(&cam_bioq)) {
4401 camisr(&cam_bioq);
4403 /* Repeat the async's for the benefit of any new devices. */
4404 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4405 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4408 /* Release the reference count held while registered. */
4409 xpt_release_bus(bus_path.bus);
4410 xpt_release_path(&bus_path);
4412 /* Recheck for more completed CCBs. */
4413 while (!TAILQ_EMPTY(&cam_bioq))
4414 camisr(&cam_bioq);
4416 return (CAM_REQ_CMP);
4419 static path_id_t
4420 xptnextfreepathid(void)
4422 struct cam_eb *bus;
4423 path_id_t pathid;
4424 char *strval;
4426 pathid = 0;
4427 bus = TAILQ_FIRST(&xpt_busses);
4428 retry:
4429 /* Find an unoccupied pathid */
4430 while (bus != NULL
4431 && bus->path_id <= pathid) {
4432 if (bus->path_id == pathid)
4433 pathid++;
4434 bus = TAILQ_NEXT(bus, links);
4438 * Ensure that this pathid is not reserved for
4439 * a bus that may be registered in the future.
4441 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4442 ++pathid;
4443 /* Start the search over */
4444 goto retry;
4446 return (pathid);
4449 static path_id_t
4450 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4452 path_id_t pathid;
4453 int i, dunit, val;
4454 char buf[32];
4456 pathid = CAM_XPT_PATH_ID;
4457 ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4458 i = -1;
4459 while ((i = resource_query_string(i, "at", buf)) != -1) {
4460 if (strcmp(resource_query_name(i), "scbus")) {
4461 /* Avoid a bit of foot shooting. */
4462 continue;
4464 dunit = resource_query_unit(i);
4465 if (dunit < 0) /* unwired?! */
4466 continue;
4467 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4468 if (sim_bus == val) {
4469 pathid = dunit;
4470 break;
4472 } else if (sim_bus == 0) {
4473 /* Unspecified matches bus 0 */
4474 pathid = dunit;
4475 break;
4476 } else {
4477 kprintf("Ambiguous scbus configuration for %s%d "
4478 "bus %d, cannot wire down. The kernel "
4479 "config entry for scbus%d should "
4480 "specify a controller bus.\n"
4481 "Scbus will be assigned dynamically.\n",
4482 sim_name, sim_unit, sim_bus, dunit);
4483 break;
4487 if (pathid == CAM_XPT_PATH_ID)
4488 pathid = xptnextfreepathid();
4489 return (pathid);
4492 void
4493 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4495 struct cam_eb *bus;
4496 struct cam_et *target, *next_target;
4497 struct cam_ed *device, *next_device;
4499 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4502 * Most async events come from a CAM interrupt context. In
4503 * a few cases, the error recovery code at the peripheral layer,
4504 * which may run from our SWI or a process context, may signal
4505 * deferred events with a call to xpt_async. Ensure async
4506 * notifications are serialized by blocking cam interrupts.
4508 crit_enter();
4510 bus = path->bus;
4512 if (async_code == AC_BUS_RESET) {
4513 /* Update our notion of when the last reset occurred */
4514 microuptime(&bus->last_reset);
4517 for (target = TAILQ_FIRST(&bus->et_entries);
4518 target != NULL;
4519 target = next_target) {
4521 next_target = TAILQ_NEXT(target, links);
4523 if (path->target != target
4524 && path->target->target_id != CAM_TARGET_WILDCARD
4525 && target->target_id != CAM_TARGET_WILDCARD)
4526 continue;
4528 if (async_code == AC_SENT_BDR) {
4529 /* Update our notion of when the last reset occurred */
4530 microuptime(&path->target->last_reset);
4533 for (device = TAILQ_FIRST(&target->ed_entries);
4534 device != NULL;
4535 device = next_device) {
4537 next_device = TAILQ_NEXT(device, links);
4539 if (path->device != device
4540 && path->device->lun_id != CAM_LUN_WILDCARD
4541 && device->lun_id != CAM_LUN_WILDCARD)
4542 continue;
4544 xpt_dev_async(async_code, bus, target,
4545 device, async_arg);
4547 xpt_async_bcast(&device->asyncs, async_code,
4548 path, async_arg);
4553 * If this wasn't a fully wildcarded async, tell all
4554 * clients that want all async events.
4556 if (bus != xpt_periph->path->bus)
4557 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4558 path, async_arg);
4559 crit_exit();
4562 static void
4563 xpt_async_bcast(struct async_list *async_head,
4564 u_int32_t async_code,
4565 struct cam_path *path, void *async_arg)
4567 struct async_node *cur_entry;
4569 cur_entry = SLIST_FIRST(async_head);
4570 while (cur_entry != NULL) {
4571 struct async_node *next_entry;
4573 * Grab the next list entry before we call the current
4574 * entry's callback. This is because the callback function
4575 * can delete its async callback entry.
4577 next_entry = SLIST_NEXT(cur_entry, links);
4578 if ((cur_entry->event_enable & async_code) != 0)
4579 cur_entry->callback(cur_entry->callback_arg,
4580 async_code, path,
4581 async_arg);
4582 cur_entry = next_entry;
4587 * Handle any per-device event notifications that require action by the XPT.
4589 static void
4590 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4591 struct cam_ed *device, void *async_arg)
4593 cam_status status;
4594 struct cam_path newpath;
4597 * We only need to handle events for real devices.
4599 if (target->target_id == CAM_TARGET_WILDCARD
4600 || device->lun_id == CAM_LUN_WILDCARD)
4601 return;
4604 * We need our own path with wildcards expanded to
4605 * handle certain types of events.
4607 if ((async_code == AC_SENT_BDR)
4608 || (async_code == AC_BUS_RESET)
4609 || (async_code == AC_INQ_CHANGED))
4610 status = xpt_compile_path(&newpath, NULL,
4611 bus->path_id,
4612 target->target_id,
4613 device->lun_id);
4614 else
4615 status = CAM_REQ_CMP_ERR;
4617 if (status == CAM_REQ_CMP) {
4620 * Allow transfer negotiation to occur in a
4621 * tag free environment.
4623 if (async_code == AC_SENT_BDR
4624 || async_code == AC_BUS_RESET)
4625 xpt_toggle_tags(&newpath);
4627 if (async_code == AC_INQ_CHANGED) {
4629 * We've sent a start unit command, or
4630 * something similar to a device that
4631 * may have caused its inquiry data to
4632 * change. So we re-scan the device to
4633 * refresh the inquiry data for it.
4635 xpt_scan_lun(newpath.periph, &newpath,
4636 CAM_EXPECT_INQ_CHANGE, NULL);
4638 xpt_release_path(&newpath);
4639 } else if (async_code == AC_LOST_DEVICE) {
4641 * When we lose a device the device may be about to detach
4642 * the sim, we have to clear out all pending timeouts and
4643 * requests before that happens. XXX it would be nice if
4644 * we could abort the requests pertaining to the device.
4646 xpt_release_devq_timeout(device);
4647 if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4648 device->flags |= CAM_DEV_UNCONFIGURED;
4649 xpt_release_device(bus, target, device);
4651 } else if (async_code == AC_TRANSFER_NEG) {
4652 struct ccb_trans_settings *settings;
4654 settings = (struct ccb_trans_settings *)async_arg;
4655 xpt_set_transfer_settings(settings, device,
4656 /*async_update*/TRUE);
4660 u_int32_t
4661 xpt_freeze_devq(struct cam_path *path, u_int count)
4663 struct ccb_hdr *ccbh;
4665 crit_enter();
4666 path->device->qfrozen_cnt += count;
4669 * Mark the last CCB in the queue as needing
4670 * to be requeued if the driver hasn't
4671 * changed it's state yet. This fixes a race
4672 * where a ccb is just about to be queued to
4673 * a controller driver when it's interrupt routine
4674 * freezes the queue. To completly close the
4675 * hole, controller drives must check to see
4676 * if a ccb's status is still CAM_REQ_INPROG
4677 * under critical section protection just before they queue
4678 * the CCB. See ahc_action/ahc_freeze_devq for
4679 * an example.
4681 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4682 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4683 ccbh->status = CAM_REQUEUE_REQ;
4684 crit_exit();
4685 return (path->device->qfrozen_cnt);
4688 u_int32_t
4689 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4691 if (sim->devq == NULL)
4692 return(count);
4693 sim->devq->send_queue.qfrozen_cnt += count;
4694 if (sim->devq->active_dev != NULL) {
4695 struct ccb_hdr *ccbh;
4697 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4698 ccb_hdr_tailq);
4699 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4700 ccbh->status = CAM_REQUEUE_REQ;
4702 return (sim->devq->send_queue.qfrozen_cnt);
4706 * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4707 * We ref-count the sim (and the bus only NULLs it out when the bus has been
4708 * freed, which is not the case here), but the device queue is also freed XXX
4709 * and we have to check that here.
4711 * XXX fixme: could we simply not null-out the device queue via
4712 * cam_sim_free()?
4714 static void
4715 xpt_release_devq_timeout(void *arg)
4717 struct cam_ed *device;
4719 device = (struct cam_ed *)arg;
4721 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4724 void
4725 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4727 xpt_release_devq_device(path->device, count, run_queue);
4730 static void
4731 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4733 int rundevq;
4735 rundevq = 0;
4736 crit_enter();
4738 if (dev->qfrozen_cnt > 0) {
4740 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4741 dev->qfrozen_cnt -= count;
4742 if (dev->qfrozen_cnt == 0) {
4745 * No longer need to wait for a successful
4746 * command completion.
4748 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4751 * Remove any timeouts that might be scheduled
4752 * to release this queue.
4754 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4755 callout_stop(&dev->c_handle);
4756 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4760 * Now that we are unfrozen schedule the
4761 * device so any pending transactions are
4762 * run.
4764 if ((dev->ccbq.queue.entries > 0)
4765 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4766 && (run_queue != 0)) {
4767 rundevq = 1;
4771 if (rundevq != 0)
4772 xpt_run_dev_sendq(dev->target->bus);
4773 crit_exit();
4776 void
4777 xpt_release_simq(struct cam_sim *sim, int run_queue)
4779 struct camq *sendq;
4781 if (sim->devq == NULL)
4782 return;
4784 sendq = &(sim->devq->send_queue);
4785 crit_enter();
4787 if (sendq->qfrozen_cnt > 0) {
4788 sendq->qfrozen_cnt--;
4789 if (sendq->qfrozen_cnt == 0) {
4790 struct cam_eb *bus;
4793 * If there is a timeout scheduled to release this
4794 * sim queue, remove it. The queue frozen count is
4795 * already at 0.
4797 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4798 callout_stop(&sim->c_handle);
4799 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4801 bus = xpt_find_bus(sim->path_id);
4802 crit_exit();
4804 if (run_queue) {
4806 * Now that we are unfrozen run the send queue.
4808 xpt_run_dev_sendq(bus);
4810 xpt_release_bus(bus);
4811 } else {
4812 crit_exit();
4814 } else {
4815 crit_exit();
4819 void
4820 xpt_done(union ccb *done_ccb)
4822 crit_enter();
4824 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4825 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4827 * Queue up the request for handling by our SWI handler
4828 * any of the "non-immediate" type of ccbs.
4830 switch (done_ccb->ccb_h.path->periph->type) {
4831 case CAM_PERIPH_BIO:
4832 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4833 sim_links.tqe);
4834 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4835 setsoftcambio();
4836 break;
4837 default:
4838 panic("unknown periph type %d",
4839 done_ccb->ccb_h.path->periph->type);
4842 crit_exit();
4845 union ccb *
4846 xpt_alloc_ccb(void)
4848 union ccb *new_ccb;
4850 new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT);
4851 return (new_ccb);
4854 void
4855 xpt_free_ccb(union ccb *free_ccb)
4857 kfree(free_ccb, M_CAMXPT);
4862 /* Private XPT functions */
4865 * Get a CAM control block for the caller. Charge the structure to the device
4866 * referenced by the path. If the this device has no 'credits' then the
4867 * device already has the maximum number of outstanding operations under way
4868 * and we return NULL. If we don't have sufficient resources to allocate more
4869 * ccbs, we also return NULL.
4871 static union ccb *
4872 xpt_get_ccb(struct cam_ed *device)
4874 union ccb *new_ccb;
4876 crit_enter();
4877 if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4878 new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT);
4879 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4880 xpt_links.sle);
4881 xpt_ccb_count++;
4883 cam_ccbq_take_opening(&device->ccbq);
4884 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4885 crit_exit();
4886 return (new_ccb);
4889 static void
4890 xpt_release_bus(struct cam_eb *bus)
4893 crit_enter();
4894 if (bus->refcount == 1) {
4895 KKASSERT(TAILQ_FIRST(&bus->et_entries) == NULL);
4896 TAILQ_REMOVE(&xpt_busses, bus, links);
4897 if (bus->sim) {
4898 cam_sim_release(bus->sim, 0);
4899 bus->sim = NULL;
4901 bus_generation++;
4902 KKASSERT(bus->refcount == 1);
4903 kfree(bus, M_CAMXPT);
4904 } else {
4905 --bus->refcount;
4907 crit_exit();
4910 static struct cam_et *
4911 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4913 struct cam_et *target;
4914 struct cam_et *cur_target;
4916 target = kmalloc(sizeof(*target), M_CAMXPT, M_INTWAIT);
4918 TAILQ_INIT(&target->ed_entries);
4919 target->bus = bus;
4920 target->target_id = target_id;
4921 target->refcount = 1;
4922 target->generation = 0;
4923 timevalclear(&target->last_reset);
4925 * Hold a reference to our parent bus so it
4926 * will not go away before we do.
4928 bus->refcount++;
4930 /* Insertion sort into our bus's target list */
4931 cur_target = TAILQ_FIRST(&bus->et_entries);
4932 while (cur_target != NULL && cur_target->target_id < target_id)
4933 cur_target = TAILQ_NEXT(cur_target, links);
4935 if (cur_target != NULL) {
4936 TAILQ_INSERT_BEFORE(cur_target, target, links);
4937 } else {
4938 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4940 bus->generation++;
4941 return (target);
4944 static void
4945 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4947 crit_enter();
4948 if (target->refcount == 1) {
4949 KKASSERT(TAILQ_FIRST(&target->ed_entries) == NULL);
4950 TAILQ_REMOVE(&bus->et_entries, target, links);
4951 bus->generation++;
4952 xpt_release_bus(bus);
4953 KKASSERT(target->refcount == 1);
4954 kfree(target, M_CAMXPT);
4955 } else {
4956 --target->refcount;
4958 crit_exit();
4961 static struct cam_ed *
4962 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4964 #ifdef CAM_NEW_TRAN_CODE
4965 struct cam_path path;
4966 #endif /* CAM_NEW_TRAN_CODE */
4967 struct cam_ed *device;
4968 struct cam_devq *devq;
4969 cam_status status;
4971 if (SIM_DEAD(bus->sim))
4972 return (NULL);
4974 /* Make space for us in the device queue on our bus */
4975 if (bus->sim->devq == NULL)
4976 return(NULL);
4977 devq = bus->sim->devq;
4978 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4980 if (status != CAM_REQ_CMP) {
4981 device = NULL;
4982 } else {
4983 device = kmalloc(sizeof(*device), M_CAMXPT, M_INTWAIT);
4986 if (device != NULL) {
4987 struct cam_ed *cur_device;
4989 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4990 device->alloc_ccb_entry.device = device;
4991 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4992 device->send_ccb_entry.device = device;
4993 device->target = target;
4994 device->lun_id = lun_id;
4995 /* Initialize our queues */
4996 if (camq_init(&device->drvq, 0) != 0) {
4997 kfree(device, M_CAMXPT);
4998 return (NULL);
5000 if (cam_ccbq_init(&device->ccbq,
5001 bus->sim->max_dev_openings) != 0) {
5002 camq_fini(&device->drvq);
5003 kfree(device, M_CAMXPT);
5004 return (NULL);
5006 SLIST_INIT(&device->asyncs);
5007 SLIST_INIT(&device->periphs);
5008 device->generation = 0;
5009 device->owner = NULL;
5011 * Take the default quirk entry until we have inquiry
5012 * data and can determine a better quirk to use.
5014 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5015 bzero(&device->inq_data, sizeof(device->inq_data));
5016 device->inq_flags = 0;
5017 device->queue_flags = 0;
5018 device->serial_num = NULL;
5019 device->serial_num_len = 0;
5020 device->qfrozen_cnt = 0;
5021 device->flags = CAM_DEV_UNCONFIGURED;
5022 device->tag_delay_count = 0;
5023 device->tag_saved_openings = 0;
5024 device->refcount = 1;
5025 callout_init(&device->c_handle);
5028 * Hold a reference to our parent target so it
5029 * will not go away before we do.
5031 target->refcount++;
5034 * XXX should be limited by number of CCBs this bus can
5035 * do.
5037 xpt_max_ccbs += device->ccbq.devq_openings;
5038 /* Insertion sort into our target's device list */
5039 cur_device = TAILQ_FIRST(&target->ed_entries);
5040 while (cur_device != NULL && cur_device->lun_id < lun_id)
5041 cur_device = TAILQ_NEXT(cur_device, links);
5042 if (cur_device != NULL) {
5043 TAILQ_INSERT_BEFORE(cur_device, device, links);
5044 } else {
5045 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5047 target->generation++;
5048 #ifdef CAM_NEW_TRAN_CODE
5049 if (lun_id != CAM_LUN_WILDCARD) {
5050 xpt_compile_path(&path,
5051 NULL,
5052 bus->path_id,
5053 target->target_id,
5054 lun_id);
5055 xpt_devise_transport(&path);
5056 xpt_release_path(&path);
5058 #endif /* CAM_NEW_TRAN_CODE */
5060 return (device);
5063 static void
5064 xpt_reference_device(struct cam_ed *device)
5066 ++device->refcount;
5069 static void
5070 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5071 struct cam_ed *device)
5073 struct cam_devq *devq;
5075 crit_enter();
5076 if (device->refcount == 1) {
5077 KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
5079 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5080 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5081 panic("Removing device while still queued for ccbs");
5083 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
5084 device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
5085 callout_stop(&device->c_handle);
5088 TAILQ_REMOVE(&target->ed_entries, device,links);
5089 target->generation++;
5090 xpt_max_ccbs -= device->ccbq.devq_openings;
5091 if (!SIM_DEAD(bus->sim)) {
5092 /* Release our slot in the devq */
5093 devq = bus->sim->devq;
5094 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5096 camq_fini(&device->drvq);
5097 camq_fini(&device->ccbq.queue);
5098 xpt_release_target(bus, target);
5099 KKASSERT(device->refcount == 1);
5100 kfree(device, M_CAMXPT);
5101 } else {
5102 --device->refcount;
5104 crit_exit();
5107 static u_int32_t
5108 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5110 int diff;
5111 int result;
5112 struct cam_ed *dev;
5114 dev = path->device;
5116 crit_enter();
5118 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5119 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5120 if (result == CAM_REQ_CMP && (diff < 0)) {
5121 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5123 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5124 || (dev->inq_flags & SID_CmdQue) != 0)
5125 dev->tag_saved_openings = newopenings;
5126 /* Adjust the global limit */
5127 xpt_max_ccbs += diff;
5128 crit_exit();
5129 return (result);
5132 static struct cam_eb *
5133 xpt_find_bus(path_id_t path_id)
5135 struct cam_eb *bus;
5137 TAILQ_FOREACH(bus, &xpt_busses, links) {
5138 if (bus->path_id == path_id) {
5139 bus->refcount++;
5140 break;
5143 return (bus);
5146 static struct cam_et *
5147 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5149 struct cam_et *target;
5151 TAILQ_FOREACH(target, &bus->et_entries, links) {
5152 if (target->target_id == target_id) {
5153 target->refcount++;
5154 break;
5157 return (target);
5160 static struct cam_ed *
5161 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5163 struct cam_ed *device;
5165 TAILQ_FOREACH(device, &target->ed_entries, links) {
5166 if (device->lun_id == lun_id) {
5167 device->refcount++;
5168 break;
5171 return (device);
5174 typedef struct {
5175 union ccb *request_ccb;
5176 struct ccb_pathinq *cpi;
5177 int counter;
5178 } xpt_scan_bus_info;
5181 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5182 * As the scan progresses, xpt_scan_bus is used as the
5183 * callback on completion function.
5185 static void
5186 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5188 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5189 ("xpt_scan_bus\n"));
5190 switch (request_ccb->ccb_h.func_code) {
5191 case XPT_SCAN_BUS:
5193 xpt_scan_bus_info *scan_info;
5194 union ccb *work_ccb;
5195 struct cam_path *path;
5196 u_int i;
5197 u_int max_target;
5198 u_int initiator_id;
5200 /* Find out the characteristics of the bus */
5201 work_ccb = xpt_alloc_ccb();
5202 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5203 request_ccb->ccb_h.pinfo.priority);
5204 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5205 xpt_action(work_ccb);
5206 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5207 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5208 xpt_free_ccb(work_ccb);
5209 xpt_done(request_ccb);
5210 return;
5213 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5215 * Can't scan the bus on an adapter that
5216 * cannot perform the initiator role.
5218 request_ccb->ccb_h.status = CAM_REQ_CMP;
5219 xpt_free_ccb(work_ccb);
5220 xpt_done(request_ccb);
5221 return;
5224 /* Save some state for use while we probe for devices */
5225 scan_info = (xpt_scan_bus_info *)
5226 kmalloc(sizeof(xpt_scan_bus_info), M_TEMP, M_INTWAIT);
5227 scan_info->request_ccb = request_ccb;
5228 scan_info->cpi = &work_ccb->cpi;
5230 /* Cache on our stack so we can work asynchronously */
5231 max_target = scan_info->cpi->max_target;
5232 initiator_id = scan_info->cpi->initiator_id;
5236 * We can scan all targets in parallel, or do it sequentially.
5238 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5239 max_target = 0;
5240 scan_info->counter = 0;
5241 } else {
5242 scan_info->counter = scan_info->cpi->max_target + 1;
5243 if (scan_info->cpi->initiator_id < scan_info->counter) {
5244 scan_info->counter--;
5248 for (i = 0; i <= max_target; i++) {
5249 cam_status status;
5250 if (i == initiator_id)
5251 continue;
5253 status = xpt_create_path(&path, xpt_periph,
5254 request_ccb->ccb_h.path_id,
5255 i, 0);
5256 if (status != CAM_REQ_CMP) {
5257 kprintf("xpt_scan_bus: xpt_create_path failed"
5258 " with status %#x, bus scan halted\n",
5259 status);
5260 kfree(scan_info, M_TEMP);
5261 request_ccb->ccb_h.status = status;
5262 xpt_free_ccb(work_ccb);
5263 xpt_done(request_ccb);
5264 break;
5266 work_ccb = xpt_alloc_ccb();
5267 xpt_setup_ccb(&work_ccb->ccb_h, path,
5268 request_ccb->ccb_h.pinfo.priority);
5269 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5270 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5271 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5272 work_ccb->crcn.flags = request_ccb->crcn.flags;
5273 xpt_action(work_ccb);
5275 break;
5277 case XPT_SCAN_LUN:
5279 cam_status status;
5280 struct cam_path *path;
5281 xpt_scan_bus_info *scan_info;
5282 path_id_t path_id;
5283 target_id_t target_id;
5284 lun_id_t lun_id;
5286 /* Reuse the same CCB to query if a device was really found */
5287 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5288 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5289 request_ccb->ccb_h.pinfo.priority);
5290 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5292 path_id = request_ccb->ccb_h.path_id;
5293 target_id = request_ccb->ccb_h.target_id;
5294 lun_id = request_ccb->ccb_h.target_lun;
5295 xpt_action(request_ccb);
5297 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5298 struct cam_ed *device;
5299 struct cam_et *target;
5300 int phl;
5303 * If we already probed lun 0 successfully, or
5304 * we have additional configured luns on this
5305 * target that might have "gone away", go onto
5306 * the next lun.
5308 target = request_ccb->ccb_h.path->target;
5310 * We may touch devices that we don't
5311 * hold references too, so ensure they
5312 * don't disappear out from under us.
5313 * The target above is referenced by the
5314 * path in the request ccb.
5316 phl = 0;
5317 crit_enter();
5318 device = TAILQ_FIRST(&target->ed_entries);
5319 if (device != NULL) {
5320 phl = CAN_SRCH_HI_SPARSE(device);
5321 if (device->lun_id == 0)
5322 device = TAILQ_NEXT(device, links);
5324 crit_exit();
5325 if ((lun_id != 0) || (device != NULL)) {
5326 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5327 lun_id++;
5329 } else {
5330 struct cam_ed *device;
5332 device = request_ccb->ccb_h.path->device;
5334 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5335 /* Try the next lun */
5336 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5337 || CAN_SRCH_HI_DENSE(device))
5338 lun_id++;
5343 * Free the current request path- we're done with it.
5345 xpt_free_path(request_ccb->ccb_h.path);
5348 * Check to see if we scan any further luns.
5350 if (lun_id == request_ccb->ccb_h.target_lun
5351 || lun_id > scan_info->cpi->max_lun) {
5352 int done;
5354 hop_again:
5355 done = 0;
5356 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5357 scan_info->counter++;
5358 if (scan_info->counter ==
5359 scan_info->cpi->initiator_id) {
5360 scan_info->counter++;
5362 if (scan_info->counter >=
5363 scan_info->cpi->max_target+1) {
5364 done = 1;
5366 } else {
5367 scan_info->counter--;
5368 if (scan_info->counter == 0) {
5369 done = 1;
5372 if (done) {
5373 xpt_free_ccb(request_ccb);
5374 xpt_free_ccb((union ccb *)scan_info->cpi);
5375 request_ccb = scan_info->request_ccb;
5376 kfree(scan_info, M_TEMP);
5377 request_ccb->ccb_h.status = CAM_REQ_CMP;
5378 xpt_done(request_ccb);
5379 break;
5382 if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5383 break;
5385 status = xpt_create_path(&path, xpt_periph,
5386 scan_info->request_ccb->ccb_h.path_id,
5387 scan_info->counter, 0);
5388 if (status != CAM_REQ_CMP) {
5389 kprintf("xpt_scan_bus: xpt_create_path failed"
5390 " with status %#x, bus scan halted\n",
5391 status);
5392 xpt_free_ccb(request_ccb);
5393 xpt_free_ccb((union ccb *)scan_info->cpi);
5394 request_ccb = scan_info->request_ccb;
5395 kfree(scan_info, M_TEMP);
5396 request_ccb->ccb_h.status = status;
5397 xpt_done(request_ccb);
5398 break;
5400 xpt_setup_ccb(&request_ccb->ccb_h, path,
5401 request_ccb->ccb_h.pinfo.priority);
5402 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5403 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5404 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5405 request_ccb->crcn.flags =
5406 scan_info->request_ccb->crcn.flags;
5407 } else {
5408 status = xpt_create_path(&path, xpt_periph,
5409 path_id, target_id, lun_id);
5410 if (status != CAM_REQ_CMP) {
5411 kprintf("xpt_scan_bus: xpt_create_path failed "
5412 "with status %#x, halting LUN scan\n",
5413 status);
5414 goto hop_again;
5416 xpt_setup_ccb(&request_ccb->ccb_h, path,
5417 request_ccb->ccb_h.pinfo.priority);
5418 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5419 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5420 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5421 request_ccb->crcn.flags =
5422 scan_info->request_ccb->crcn.flags;
5424 xpt_action(request_ccb);
5425 break;
5427 default:
5428 break;
5432 typedef enum {
5433 PROBE_TUR,
5434 PROBE_INQUIRY,
5435 PROBE_FULL_INQUIRY,
5436 PROBE_MODE_SENSE,
5437 PROBE_SERIAL_NUM,
5438 PROBE_TUR_FOR_NEGOTIATION
5439 } probe_action;
5441 typedef enum {
5442 PROBE_INQUIRY_CKSUM = 0x01,
5443 PROBE_SERIAL_CKSUM = 0x02,
5444 PROBE_NO_ANNOUNCE = 0x04
5445 } probe_flags;
5447 typedef struct {
5448 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5449 probe_action action;
5450 union ccb saved_ccb;
5451 probe_flags flags;
5452 MD5_CTX context;
5453 u_int8_t digest[16];
5454 } probe_softc;
5456 static void
5457 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5458 cam_flags flags, union ccb *request_ccb)
5460 struct ccb_pathinq cpi;
5461 cam_status status;
5462 struct cam_path *new_path;
5463 struct cam_periph *old_periph;
5465 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5466 ("xpt_scan_lun\n"));
5468 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5469 cpi.ccb_h.func_code = XPT_PATH_INQ;
5470 xpt_action((union ccb *)&cpi);
5472 if (cpi.ccb_h.status != CAM_REQ_CMP) {
5473 if (request_ccb != NULL) {
5474 request_ccb->ccb_h.status = cpi.ccb_h.status;
5475 xpt_done(request_ccb);
5477 return;
5480 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5482 * Can't scan the bus on an adapter that
5483 * cannot perform the initiator role.
5485 if (request_ccb != NULL) {
5486 request_ccb->ccb_h.status = CAM_REQ_CMP;
5487 xpt_done(request_ccb);
5489 return;
5492 if (request_ccb == NULL) {
5493 request_ccb = kmalloc(sizeof(union ccb), M_TEMP, M_INTWAIT);
5494 new_path = kmalloc(sizeof(*new_path), M_TEMP, M_INTWAIT);
5495 status = xpt_compile_path(new_path, xpt_periph,
5496 path->bus->path_id,
5497 path->target->target_id,
5498 path->device->lun_id);
5500 if (status != CAM_REQ_CMP) {
5501 xpt_print_path(path);
5502 kprintf("xpt_scan_lun: can't compile path, can't "
5503 "continue\n");
5504 kfree(request_ccb, M_TEMP);
5505 kfree(new_path, M_TEMP);
5506 return;
5508 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5509 request_ccb->ccb_h.cbfcnp = xptscandone;
5510 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5511 request_ccb->crcn.flags = flags;
5514 crit_enter();
5515 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5516 probe_softc *softc;
5518 softc = (probe_softc *)old_periph->softc;
5519 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5520 periph_links.tqe);
5521 } else {
5522 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5523 probestart, "probe",
5524 CAM_PERIPH_BIO,
5525 request_ccb->ccb_h.path, NULL, 0,
5526 request_ccb);
5528 if (status != CAM_REQ_CMP) {
5529 xpt_print_path(path);
5530 kprintf("xpt_scan_lun: cam_alloc_periph returned an "
5531 "error, can't continue probe\n");
5532 request_ccb->ccb_h.status = status;
5533 xpt_done(request_ccb);
5536 crit_exit();
5539 static void
5540 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5542 xpt_release_path(done_ccb->ccb_h.path);
5543 kfree(done_ccb->ccb_h.path, M_TEMP);
5544 kfree(done_ccb, M_TEMP);
5547 static cam_status
5548 proberegister(struct cam_periph *periph, void *arg)
5550 union ccb *request_ccb; /* CCB representing the probe request */
5551 probe_softc *softc;
5553 request_ccb = (union ccb *)arg;
5554 if (periph == NULL) {
5555 kprintf("proberegister: periph was NULL!!\n");
5556 return(CAM_REQ_CMP_ERR);
5559 if (request_ccb == NULL) {
5560 kprintf("proberegister: no probe CCB, "
5561 "can't register device\n");
5562 return(CAM_REQ_CMP_ERR);
5565 softc = kmalloc(sizeof(*softc), M_TEMP, M_INTWAIT | M_ZERO);
5566 TAILQ_INIT(&softc->request_ccbs);
5567 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5568 periph_links.tqe);
5569 softc->flags = 0;
5570 periph->softc = softc;
5571 cam_periph_acquire(periph);
5573 * Ensure we've waited at least a bus settle
5574 * delay before attempting to probe the device.
5575 * For HBAs that don't do bus resets, this won't make a difference.
5577 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5578 scsi_delay);
5579 probeschedule(periph);
5580 return(CAM_REQ_CMP);
5583 static void
5584 probeschedule(struct cam_periph *periph)
5586 struct ccb_pathinq cpi;
5587 union ccb *ccb;
5588 probe_softc *softc;
5590 softc = (probe_softc *)periph->softc;
5591 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5593 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5594 cpi.ccb_h.func_code = XPT_PATH_INQ;
5595 xpt_action((union ccb *)&cpi);
5598 * If a device has gone away and another device, or the same one,
5599 * is back in the same place, it should have a unit attention
5600 * condition pending. It will not report the unit attention in
5601 * response to an inquiry, which may leave invalid transfer
5602 * negotiations in effect. The TUR will reveal the unit attention
5603 * condition. Only send the TUR for lun 0, since some devices
5604 * will get confused by commands other than inquiry to non-existent
5605 * luns. If you think a device has gone away start your scan from
5606 * lun 0. This will insure that any bogus transfer settings are
5607 * invalidated.
5609 * If we haven't seen the device before and the controller supports
5610 * some kind of transfer negotiation, negotiate with the first
5611 * sent command if no bus reset was performed at startup. This
5612 * ensures that the device is not confused by transfer negotiation
5613 * settings left over by loader or BIOS action.
5615 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5616 && (ccb->ccb_h.target_lun == 0)) {
5617 softc->action = PROBE_TUR;
5618 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5619 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5620 proberequestdefaultnegotiation(periph);
5621 softc->action = PROBE_INQUIRY;
5622 } else {
5623 softc->action = PROBE_INQUIRY;
5626 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5627 softc->flags |= PROBE_NO_ANNOUNCE;
5628 else
5629 softc->flags &= ~PROBE_NO_ANNOUNCE;
5631 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5634 static void
5635 probestart(struct cam_periph *periph, union ccb *start_ccb)
5637 /* Probe the device that our peripheral driver points to */
5638 struct ccb_scsiio *csio;
5639 probe_softc *softc;
5641 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5643 softc = (probe_softc *)periph->softc;
5644 csio = &start_ccb->csio;
5646 switch (softc->action) {
5647 case PROBE_TUR:
5648 case PROBE_TUR_FOR_NEGOTIATION:
5650 scsi_test_unit_ready(csio,
5651 /*retries*/4,
5652 probedone,
5653 MSG_SIMPLE_Q_TAG,
5654 SSD_FULL_SIZE,
5655 /*timeout*/60000);
5656 break;
5658 case PROBE_INQUIRY:
5659 case PROBE_FULL_INQUIRY:
5661 u_int inquiry_len;
5662 struct scsi_inquiry_data *inq_buf;
5664 inq_buf = &periph->path->device->inq_data;
5666 * If the device is currently configured, we calculate an
5667 * MD5 checksum of the inquiry data, and if the serial number
5668 * length is greater than 0, add the serial number data
5669 * into the checksum as well. Once the inquiry and the
5670 * serial number check finish, we attempt to figure out
5671 * whether we still have the same device.
5673 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5675 MD5Init(&softc->context);
5676 MD5Update(&softc->context, (unsigned char *)inq_buf,
5677 sizeof(struct scsi_inquiry_data));
5678 softc->flags |= PROBE_INQUIRY_CKSUM;
5679 if (periph->path->device->serial_num_len > 0) {
5680 MD5Update(&softc->context,
5681 periph->path->device->serial_num,
5682 periph->path->device->serial_num_len);
5683 softc->flags |= PROBE_SERIAL_CKSUM;
5685 MD5Final(softc->digest, &softc->context);
5688 if (softc->action == PROBE_INQUIRY)
5689 inquiry_len = SHORT_INQUIRY_LENGTH;
5690 else
5691 inquiry_len = inq_buf->additional_length
5692 + offsetof(struct scsi_inquiry_data,
5693 additional_length) + 1;
5696 * Some parallel SCSI devices fail to send an
5697 * ignore wide residue message when dealing with
5698 * odd length inquiry requests. Round up to be
5699 * safe.
5701 inquiry_len = roundup2(inquiry_len, 2);
5703 scsi_inquiry(csio,
5704 /*retries*/4,
5705 probedone,
5706 MSG_SIMPLE_Q_TAG,
5707 (u_int8_t *)inq_buf,
5708 inquiry_len,
5709 /*evpd*/FALSE,
5710 /*page_code*/0,
5711 SSD_MIN_SIZE,
5712 /*timeout*/60 * 1000);
5713 break;
5715 case PROBE_MODE_SENSE:
5717 void *mode_buf;
5718 int mode_buf_len;
5720 mode_buf_len = sizeof(struct scsi_mode_header_6)
5721 + sizeof(struct scsi_mode_blk_desc)
5722 + sizeof(struct scsi_control_page);
5723 mode_buf = kmalloc(mode_buf_len, M_TEMP, M_INTWAIT);
5724 scsi_mode_sense(csio,
5725 /*retries*/4,
5726 probedone,
5727 MSG_SIMPLE_Q_TAG,
5728 /*dbd*/FALSE,
5729 SMS_PAGE_CTRL_CURRENT,
5730 SMS_CONTROL_MODE_PAGE,
5731 mode_buf,
5732 mode_buf_len,
5733 SSD_FULL_SIZE,
5734 /*timeout*/60000);
5735 break;
5737 case PROBE_SERIAL_NUM:
5739 struct scsi_vpd_unit_serial_number *serial_buf;
5740 struct cam_ed* device;
5742 serial_buf = NULL;
5743 device = periph->path->device;
5744 device->serial_num = NULL;
5745 device->serial_num_len = 0;
5747 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5748 serial_buf = kmalloc(sizeof(*serial_buf), M_TEMP,
5749 M_INTWAIT | M_ZERO);
5750 scsi_inquiry(csio,
5751 /*retries*/4,
5752 probedone,
5753 MSG_SIMPLE_Q_TAG,
5754 (u_int8_t *)serial_buf,
5755 sizeof(*serial_buf),
5756 /*evpd*/TRUE,
5757 SVPD_UNIT_SERIAL_NUMBER,
5758 SSD_MIN_SIZE,
5759 /*timeout*/60 * 1000);
5760 break;
5763 * We'll have to do without, let our probedone
5764 * routine finish up for us.
5766 start_ccb->csio.data_ptr = NULL;
5767 probedone(periph, start_ccb);
5768 return;
5771 xpt_action(start_ccb);
5774 static void
5775 proberequestdefaultnegotiation(struct cam_periph *periph)
5777 struct ccb_trans_settings cts;
5779 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5780 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5781 #ifdef CAM_NEW_TRAN_CODE
5782 cts.type = CTS_TYPE_USER_SETTINGS;
5783 #else /* CAM_NEW_TRAN_CODE */
5784 cts.flags = CCB_TRANS_USER_SETTINGS;
5785 #endif /* CAM_NEW_TRAN_CODE */
5786 xpt_action((union ccb *)&cts);
5787 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5788 return;
5790 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5791 #ifdef CAM_NEW_TRAN_CODE
5792 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5793 #else /* CAM_NEW_TRAN_CODE */
5794 cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5795 cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5796 #endif /* CAM_NEW_TRAN_CODE */
5797 xpt_action((union ccb *)&cts);
5800 static void
5801 probedone(struct cam_periph *periph, union ccb *done_ccb)
5803 probe_softc *softc;
5804 struct cam_path *path;
5805 u_int32_t priority;
5807 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5809 softc = (probe_softc *)periph->softc;
5810 path = done_ccb->ccb_h.path;
5811 priority = done_ccb->ccb_h.pinfo.priority;
5813 switch (softc->action) {
5814 case PROBE_TUR:
5816 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5818 if (cam_periph_error(done_ccb, 0,
5819 SF_NO_PRINT, NULL) == ERESTART)
5820 return;
5821 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5822 /* Don't wedge the queue */
5823 xpt_release_devq(done_ccb->ccb_h.path,
5824 /*count*/1,
5825 /*run_queue*/TRUE);
5827 softc->action = PROBE_INQUIRY;
5828 xpt_release_ccb(done_ccb);
5829 xpt_schedule(periph, priority);
5830 return;
5832 case PROBE_INQUIRY:
5833 case PROBE_FULL_INQUIRY:
5835 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5836 struct scsi_inquiry_data *inq_buf;
5837 u_int8_t periph_qual;
5839 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5840 inq_buf = &path->device->inq_data;
5842 periph_qual = SID_QUAL(inq_buf);
5844 switch(periph_qual) {
5845 case SID_QUAL_LU_CONNECTED:
5847 u_int8_t len;
5850 * We conservatively request only
5851 * SHORT_INQUIRY_LEN bytes of inquiry
5852 * information during our first try
5853 * at sending an INQUIRY. If the device
5854 * has more information to give,
5855 * perform a second request specifying
5856 * the amount of information the device
5857 * is willing to give.
5859 len = inq_buf->additional_length
5860 + offsetof(struct scsi_inquiry_data,
5861 additional_length) + 1;
5862 if (softc->action == PROBE_INQUIRY
5863 && len > SHORT_INQUIRY_LENGTH) {
5864 softc->action = PROBE_FULL_INQUIRY;
5865 xpt_release_ccb(done_ccb);
5866 xpt_schedule(periph, priority);
5867 return;
5870 xpt_find_quirk(path->device);
5872 #ifdef CAM_NEW_TRAN_CODE
5873 xpt_devise_transport(path);
5874 #endif /* CAM_NEW_TRAN_CODE */
5875 if (INQ_DATA_TQ_ENABLED(inq_buf))
5876 softc->action = PROBE_MODE_SENSE;
5877 else
5878 softc->action = PROBE_SERIAL_NUM;
5880 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5881 xpt_reference_device(path->device);
5883 xpt_release_ccb(done_ccb);
5884 xpt_schedule(periph, priority);
5885 return;
5887 default:
5888 break;
5890 } else if (cam_periph_error(done_ccb, 0,
5891 done_ccb->ccb_h.target_lun > 0
5892 ? SF_RETRY_UA|SF_QUIET_IR
5893 : SF_RETRY_UA,
5894 &softc->saved_ccb) == ERESTART) {
5895 return;
5896 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5897 /* Don't wedge the queue */
5898 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5899 /*run_queue*/TRUE);
5902 * If we get to this point, we got an error status back
5903 * from the inquiry and the error status doesn't require
5904 * automatically retrying the command. Therefore, the
5905 * inquiry failed. If we had inquiry information before
5906 * for this device, but this latest inquiry command failed,
5907 * the device has probably gone away. If this device isn't
5908 * already marked unconfigured, notify the peripheral
5909 * drivers that this device is no more.
5911 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5912 /* Send the async notification. */
5913 xpt_async(AC_LOST_DEVICE, path, NULL);
5916 xpt_release_ccb(done_ccb);
5917 break;
5919 case PROBE_MODE_SENSE:
5921 struct ccb_scsiio *csio;
5922 struct scsi_mode_header_6 *mode_hdr;
5924 csio = &done_ccb->csio;
5925 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5926 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5927 struct scsi_control_page *page;
5928 u_int8_t *offset;
5930 offset = ((u_int8_t *)&mode_hdr[1])
5931 + mode_hdr->blk_desc_len;
5932 page = (struct scsi_control_page *)offset;
5933 path->device->queue_flags = page->queue_flags;
5934 } else if (cam_periph_error(done_ccb, 0,
5935 SF_RETRY_UA|SF_NO_PRINT,
5936 &softc->saved_ccb) == ERESTART) {
5937 return;
5938 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5939 /* Don't wedge the queue */
5940 xpt_release_devq(done_ccb->ccb_h.path,
5941 /*count*/1, /*run_queue*/TRUE);
5943 xpt_release_ccb(done_ccb);
5944 kfree(mode_hdr, M_TEMP);
5945 softc->action = PROBE_SERIAL_NUM;
5946 xpt_schedule(periph, priority);
5947 return;
5949 case PROBE_SERIAL_NUM:
5951 struct ccb_scsiio *csio;
5952 struct scsi_vpd_unit_serial_number *serial_buf;
5953 u_int32_t priority;
5954 int changed;
5955 int have_serialnum;
5957 changed = 1;
5958 have_serialnum = 0;
5959 csio = &done_ccb->csio;
5960 priority = done_ccb->ccb_h.pinfo.priority;
5961 serial_buf =
5962 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5964 /* Clean up from previous instance of this device */
5965 if (path->device->serial_num != NULL) {
5966 kfree(path->device->serial_num, M_CAMXPT);
5967 path->device->serial_num = NULL;
5968 path->device->serial_num_len = 0;
5971 if (serial_buf == NULL) {
5973 * Don't process the command as it was never sent
5975 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5976 && (serial_buf->length > 0)) {
5978 have_serialnum = 1;
5979 path->device->serial_num =
5980 kmalloc((serial_buf->length + 1),
5981 M_CAMXPT, M_INTWAIT);
5982 bcopy(serial_buf->serial_num,
5983 path->device->serial_num,
5984 serial_buf->length);
5985 path->device->serial_num_len = serial_buf->length;
5986 path->device->serial_num[serial_buf->length] = '\0';
5987 } else if (cam_periph_error(done_ccb, 0,
5988 SF_RETRY_UA|SF_NO_PRINT,
5989 &softc->saved_ccb) == ERESTART) {
5990 return;
5991 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5992 /* Don't wedge the queue */
5993 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5994 /*run_queue*/TRUE);
5998 * Let's see if we have seen this device before.
6000 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6001 MD5_CTX context;
6002 u_int8_t digest[16];
6004 MD5Init(&context);
6006 MD5Update(&context,
6007 (unsigned char *)&path->device->inq_data,
6008 sizeof(struct scsi_inquiry_data));
6010 if (have_serialnum)
6011 MD5Update(&context, serial_buf->serial_num,
6012 serial_buf->length);
6014 MD5Final(digest, &context);
6015 if (bcmp(softc->digest, digest, 16) == 0)
6016 changed = 0;
6019 * XXX Do we need to do a TUR in order to ensure
6020 * that the device really hasn't changed???
6022 if ((changed != 0)
6023 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6024 xpt_async(AC_LOST_DEVICE, path, NULL);
6026 if (serial_buf != NULL)
6027 kfree(serial_buf, M_TEMP);
6029 if (changed != 0) {
6031 * Now that we have all the necessary
6032 * information to safely perform transfer
6033 * negotiations... Controllers don't perform
6034 * any negotiation or tagged queuing until
6035 * after the first XPT_SET_TRAN_SETTINGS ccb is
6036 * received. So, on a new device, just retreive
6037 * the user settings, and set them as the current
6038 * settings to set the device up.
6040 proberequestdefaultnegotiation(periph);
6041 xpt_release_ccb(done_ccb);
6044 * Perform a TUR to allow the controller to
6045 * perform any necessary transfer negotiation.
6047 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6048 xpt_schedule(periph, priority);
6049 return;
6051 xpt_release_ccb(done_ccb);
6052 break;
6054 case PROBE_TUR_FOR_NEGOTIATION:
6055 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6056 /* Don't wedge the queue */
6057 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6058 /*run_queue*/TRUE);
6061 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6062 xpt_reference_device(path->device);
6064 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6065 /* Inform the XPT that a new device has been found */
6066 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6067 xpt_action(done_ccb);
6069 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6070 done_ccb);
6072 xpt_release_ccb(done_ccb);
6073 break;
6075 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6076 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6077 done_ccb->ccb_h.status = CAM_REQ_CMP;
6078 xpt_done(done_ccb);
6079 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6080 cam_periph_invalidate(periph);
6081 cam_periph_release(periph);
6082 } else {
6083 probeschedule(periph);
6087 static void
6088 probecleanup(struct cam_periph *periph)
6090 kfree(periph->softc, M_TEMP);
6093 static void
6094 xpt_find_quirk(struct cam_ed *device)
6096 caddr_t match;
6098 match = cam_quirkmatch((caddr_t)&device->inq_data,
6099 (caddr_t)xpt_quirk_table,
6100 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6101 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6103 if (match == NULL)
6104 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6106 device->quirk = (struct xpt_quirk_entry *)match;
6109 static int
6110 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6112 int error, bool;
6114 bool = cam_srch_hi;
6115 error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6116 if (error != 0 || req->newptr == NULL)
6117 return (error);
6118 if (bool == 0 || bool == 1) {
6119 cam_srch_hi = bool;
6120 return (0);
6121 } else {
6122 return (EINVAL);
6126 #ifdef CAM_NEW_TRAN_CODE
6128 static void
6129 xpt_devise_transport(struct cam_path *path)
6131 struct ccb_pathinq cpi;
6132 struct ccb_trans_settings cts;
6133 struct scsi_inquiry_data *inq_buf;
6135 /* Get transport information from the SIM */
6136 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6137 cpi.ccb_h.func_code = XPT_PATH_INQ;
6138 xpt_action((union ccb *)&cpi);
6140 inq_buf = NULL;
6141 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6142 inq_buf = &path->device->inq_data;
6143 path->device->protocol = PROTO_SCSI;
6144 path->device->protocol_version =
6145 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6146 path->device->transport = cpi.transport;
6147 path->device->transport_version = cpi.transport_version;
6150 * Any device not using SPI3 features should
6151 * be considered SPI2 or lower.
6153 if (inq_buf != NULL) {
6154 if (path->device->transport == XPORT_SPI
6155 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6156 && path->device->transport_version > 2)
6157 path->device->transport_version = 2;
6158 } else {
6159 struct cam_ed* otherdev;
6161 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6162 otherdev != NULL;
6163 otherdev = TAILQ_NEXT(otherdev, links)) {
6164 if (otherdev != path->device)
6165 break;
6168 if (otherdev != NULL) {
6170 * Initially assume the same versioning as
6171 * prior luns for this target.
6173 path->device->protocol_version =
6174 otherdev->protocol_version;
6175 path->device->transport_version =
6176 otherdev->transport_version;
6177 } else {
6178 /* Until we know better, opt for safty */
6179 path->device->protocol_version = 2;
6180 if (path->device->transport == XPORT_SPI)
6181 path->device->transport_version = 2;
6182 else
6183 path->device->transport_version = 0;
6188 * XXX
6189 * For a device compliant with SPC-2 we should be able
6190 * to determine the transport version supported by
6191 * scrutinizing the version descriptors in the
6192 * inquiry buffer.
6195 /* Tell the controller what we think */
6196 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6197 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6198 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6199 cts.transport = path->device->transport;
6200 cts.transport_version = path->device->transport_version;
6201 cts.protocol = path->device->protocol;
6202 cts.protocol_version = path->device->protocol_version;
6203 cts.proto_specific.valid = 0;
6204 cts.xport_specific.valid = 0;
6205 xpt_action((union ccb *)&cts);
6208 static void
6209 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6210 int async_update)
6212 struct ccb_pathinq cpi;
6213 struct ccb_trans_settings cur_cts;
6214 struct ccb_trans_settings_scsi *scsi;
6215 struct ccb_trans_settings_scsi *cur_scsi;
6216 struct cam_sim *sim;
6217 struct scsi_inquiry_data *inq_data;
6219 if (device == NULL) {
6220 cts->ccb_h.status = CAM_PATH_INVALID;
6221 xpt_done((union ccb *)cts);
6222 return;
6225 if (cts->protocol == PROTO_UNKNOWN
6226 || cts->protocol == PROTO_UNSPECIFIED) {
6227 cts->protocol = device->protocol;
6228 cts->protocol_version = device->protocol_version;
6231 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6232 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6233 cts->protocol_version = device->protocol_version;
6235 if (cts->protocol != device->protocol) {
6236 xpt_print_path(cts->ccb_h.path);
6237 printf("Uninitialized Protocol %x:%x?\n",
6238 cts->protocol, device->protocol);
6239 cts->protocol = device->protocol;
6242 if (cts->protocol_version > device->protocol_version) {
6243 if (bootverbose) {
6244 xpt_print_path(cts->ccb_h.path);
6245 printf("Down reving Protocol Version from %d to %d?\n",
6246 cts->protocol_version, device->protocol_version);
6248 cts->protocol_version = device->protocol_version;
6251 if (cts->transport == XPORT_UNKNOWN
6252 || cts->transport == XPORT_UNSPECIFIED) {
6253 cts->transport = device->transport;
6254 cts->transport_version = device->transport_version;
6257 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6258 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6259 cts->transport_version = device->transport_version;
6261 if (cts->transport != device->transport) {
6262 xpt_print_path(cts->ccb_h.path);
6263 printf("Uninitialized Transport %x:%x?\n",
6264 cts->transport, device->transport);
6265 cts->transport = device->transport;
6268 if (cts->transport_version > device->transport_version) {
6269 if (bootverbose) {
6270 xpt_print_path(cts->ccb_h.path);
6271 printf("Down reving Transport Version from %d to %d?\n",
6272 cts->transport_version,
6273 device->transport_version);
6275 cts->transport_version = device->transport_version;
6278 sim = cts->ccb_h.path->bus->sim;
6281 * Nothing more of interest to do unless
6282 * this is a device connected via the
6283 * SCSI protocol.
6285 if (cts->protocol != PROTO_SCSI) {
6286 if (async_update == FALSE)
6287 (*(sim->sim_action))(sim, (union ccb *)cts);
6288 return;
6291 inq_data = &device->inq_data;
6292 scsi = &cts->proto_specific.scsi;
6293 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6294 cpi.ccb_h.func_code = XPT_PATH_INQ;
6295 xpt_action((union ccb *)&cpi);
6297 /* SCSI specific sanity checking */
6298 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6299 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6300 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6301 || (device->quirk->mintags == 0)) {
6303 * Can't tag on hardware that doesn't support tags,
6304 * doesn't have it enabled, or has broken tag support.
6306 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6309 if (async_update == FALSE) {
6311 * Perform sanity checking against what the
6312 * controller and device can do.
6314 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6315 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6316 cur_cts.type = cts->type;
6317 xpt_action((union ccb *)&cur_cts);
6318 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6319 return;
6321 cur_scsi = &cur_cts.proto_specific.scsi;
6322 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6323 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6324 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6326 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6327 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6330 /* SPI specific sanity checking */
6331 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6332 u_int spi3caps;
6333 struct ccb_trans_settings_spi *spi;
6334 struct ccb_trans_settings_spi *cur_spi;
6336 spi = &cts->xport_specific.spi;
6338 cur_spi = &cur_cts.xport_specific.spi;
6340 /* Fill in any gaps in what the user gave us */
6341 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6342 spi->sync_period = cur_spi->sync_period;
6343 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6344 spi->sync_period = 0;
6345 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6346 spi->sync_offset = cur_spi->sync_offset;
6347 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6348 spi->sync_offset = 0;
6349 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6350 spi->ppr_options = cur_spi->ppr_options;
6351 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6352 spi->ppr_options = 0;
6353 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6354 spi->bus_width = cur_spi->bus_width;
6355 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6356 spi->bus_width = 0;
6357 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6358 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6359 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6361 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6362 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6363 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6364 && (inq_data->flags & SID_Sync) == 0
6365 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6366 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6367 || (spi->sync_offset == 0)
6368 || (spi->sync_period == 0)) {
6369 /* Force async */
6370 spi->sync_period = 0;
6371 spi->sync_offset = 0;
6374 switch (spi->bus_width) {
6375 case MSG_EXT_WDTR_BUS_32_BIT:
6376 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6377 || (inq_data->flags & SID_WBus32) != 0
6378 || cts->type == CTS_TYPE_USER_SETTINGS)
6379 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6380 break;
6381 /* Fall Through to 16-bit */
6382 case MSG_EXT_WDTR_BUS_16_BIT:
6383 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6384 || (inq_data->flags & SID_WBus16) != 0
6385 || cts->type == CTS_TYPE_USER_SETTINGS)
6386 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6387 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6388 break;
6390 /* Fall Through to 8-bit */
6391 default: /* New bus width?? */
6392 case MSG_EXT_WDTR_BUS_8_BIT:
6393 /* All targets can do this */
6394 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6395 break;
6398 spi3caps = cpi.xport_specific.spi.ppr_options;
6399 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6400 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6401 spi3caps &= inq_data->spi3data;
6403 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6404 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6406 if ((spi3caps & SID_SPI_IUS) == 0)
6407 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6409 if ((spi3caps & SID_SPI_QAS) == 0)
6410 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6412 /* No SPI Transfer settings are allowed unless we are wide */
6413 if (spi->bus_width == 0)
6414 spi->ppr_options = 0;
6416 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6418 * Can't tag queue without disconnection.
6420 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6421 scsi->valid |= CTS_SCSI_VALID_TQ;
6425 * If we are currently performing tagged transactions to
6426 * this device and want to change its negotiation parameters,
6427 * go non-tagged for a bit to give the controller a chance to
6428 * negotiate unhampered by tag messages.
6430 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6431 && (device->inq_flags & SID_CmdQue) != 0
6432 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6433 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6434 CTS_SPI_VALID_SYNC_OFFSET|
6435 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6436 xpt_toggle_tags(cts->ccb_h.path);
6439 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6440 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6441 int device_tagenb;
6444 * If we are transitioning from tags to no-tags or
6445 * vice-versa, we need to carefully freeze and restart
6446 * the queue so that we don't overlap tagged and non-tagged
6447 * commands. We also temporarily stop tags if there is
6448 * a change in transfer negotiation settings to allow
6449 * "tag-less" negotiation.
6451 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6452 || (device->inq_flags & SID_CmdQue) != 0)
6453 device_tagenb = TRUE;
6454 else
6455 device_tagenb = FALSE;
6457 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6458 && device_tagenb == FALSE)
6459 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6460 && device_tagenb == TRUE)) {
6462 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6464 * Delay change to use tags until after a
6465 * few commands have gone to this device so
6466 * the controller has time to perform transfer
6467 * negotiations without tagged messages getting
6468 * in the way.
6470 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6471 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6472 } else {
6473 struct ccb_relsim crs;
6475 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6476 device->inq_flags &= ~SID_CmdQue;
6477 xpt_dev_ccbq_resize(cts->ccb_h.path,
6478 sim->max_dev_openings);
6479 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6480 device->tag_delay_count = 0;
6482 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6483 /*priority*/1);
6484 crs.ccb_h.func_code = XPT_REL_SIMQ;
6485 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6486 crs.openings
6487 = crs.release_timeout
6488 = crs.qfrozen_cnt
6489 = 0;
6490 xpt_action((union ccb *)&crs);
6494 if (async_update == FALSE)
6495 (*(sim->sim_action))(sim, (union ccb *)cts);
6498 #else /* CAM_NEW_TRAN_CODE */
6500 static void
6501 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6502 int async_update)
6504 struct cam_sim *sim;
6505 int qfrozen;
6507 sim = cts->ccb_h.path->bus->sim;
6508 if (async_update == FALSE) {
6509 struct scsi_inquiry_data *inq_data;
6510 struct ccb_pathinq cpi;
6511 struct ccb_trans_settings cur_cts;
6513 if (device == NULL) {
6514 cts->ccb_h.status = CAM_PATH_INVALID;
6515 xpt_done((union ccb *)cts);
6516 return;
6520 * Perform sanity checking against what the
6521 * controller and device can do.
6523 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6524 cpi.ccb_h.func_code = XPT_PATH_INQ;
6525 xpt_action((union ccb *)&cpi);
6526 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6527 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6528 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6529 xpt_action((union ccb *)&cur_cts);
6530 inq_data = &device->inq_data;
6532 /* Fill in any gaps in what the user gave us */
6533 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6534 cts->sync_period = cur_cts.sync_period;
6535 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6536 cts->sync_offset = cur_cts.sync_offset;
6537 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6538 cts->bus_width = cur_cts.bus_width;
6539 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6540 cts->flags &= ~CCB_TRANS_DISC_ENB;
6541 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6543 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6544 cts->flags &= ~CCB_TRANS_TAG_ENB;
6545 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6548 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6549 && (inq_data->flags & SID_Sync) == 0)
6550 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6551 || (cts->sync_offset == 0)
6552 || (cts->sync_period == 0)) {
6553 /* Force async */
6554 cts->sync_period = 0;
6555 cts->sync_offset = 0;
6556 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
6558 if ((inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6559 && cts->sync_period <= 0x9) {
6561 * Don't allow DT transmission rates if the
6562 * device does not support it.
6564 cts->sync_period = 0xa;
6566 if ((inq_data->spi3data & SID_SPI_IUS) == 0
6567 && cts->sync_period <= 0x8) {
6569 * Don't allow PACE transmission rates
6570 * if the device does support packetized
6571 * transfers.
6573 cts->sync_period = 0x9;
6577 switch (cts->bus_width) {
6578 case MSG_EXT_WDTR_BUS_32_BIT:
6579 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6580 || (inq_data->flags & SID_WBus32) != 0)
6581 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6582 break;
6583 /* Fall Through to 16-bit */
6584 case MSG_EXT_WDTR_BUS_16_BIT:
6585 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6586 || (inq_data->flags & SID_WBus16) != 0)
6587 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6588 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6589 break;
6591 /* Fall Through to 8-bit */
6592 default: /* New bus width?? */
6593 case MSG_EXT_WDTR_BUS_8_BIT:
6594 /* All targets can do this */
6595 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6596 break;
6599 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6601 * Can't tag queue without disconnection.
6603 cts->flags &= ~CCB_TRANS_TAG_ENB;
6604 cts->valid |= CCB_TRANS_TQ_VALID;
6607 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6608 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6609 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6610 || (device->quirk->mintags == 0)) {
6612 * Can't tag on hardware that doesn't support,
6613 * doesn't have it enabled, or has broken tag support.
6615 cts->flags &= ~CCB_TRANS_TAG_ENB;
6619 qfrozen = FALSE;
6620 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6621 int device_tagenb;
6624 * If we are transitioning from tags to no-tags or
6625 * vice-versa, we need to carefully freeze and restart
6626 * the queue so that we don't overlap tagged and non-tagged
6627 * commands. We also temporarily stop tags if there is
6628 * a change in transfer negotiation settings to allow
6629 * "tag-less" negotiation.
6631 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6632 || (device->inq_flags & SID_CmdQue) != 0)
6633 device_tagenb = TRUE;
6634 else
6635 device_tagenb = FALSE;
6637 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6638 && device_tagenb == FALSE)
6639 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6640 && device_tagenb == TRUE)) {
6642 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6644 * Delay change to use tags until after a
6645 * few commands have gone to this device so
6646 * the controller has time to perform transfer
6647 * negotiations without tagged messages getting
6648 * in the way.
6650 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6651 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6652 } else {
6653 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6654 qfrozen = TRUE;
6655 device->inq_flags &= ~SID_CmdQue;
6656 xpt_dev_ccbq_resize(cts->ccb_h.path,
6657 sim->max_dev_openings);
6658 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6659 device->tag_delay_count = 0;
6664 if (async_update == FALSE) {
6666 * If we are currently performing tagged transactions to
6667 * this device and want to change its negotiation parameters,
6668 * go non-tagged for a bit to give the controller a chance to
6669 * negotiate unhampered by tag messages.
6671 if ((device->inq_flags & SID_CmdQue) != 0
6672 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6673 CCB_TRANS_SYNC_OFFSET_VALID|
6674 CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6675 xpt_toggle_tags(cts->ccb_h.path);
6677 (*(sim->sim_action))(sim, (union ccb *)cts);
6680 if (qfrozen) {
6681 struct ccb_relsim crs;
6683 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6684 /*priority*/1);
6685 crs.ccb_h.func_code = XPT_REL_SIMQ;
6686 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6687 crs.openings
6688 = crs.release_timeout
6689 = crs.qfrozen_cnt
6690 = 0;
6691 xpt_action((union ccb *)&crs);
6696 #endif /* CAM_NEW_TRAN_CODE */
6698 static void
6699 xpt_toggle_tags(struct cam_path *path)
6701 struct cam_ed *dev;
6704 * Give controllers a chance to renegotiate
6705 * before starting tag operations. We
6706 * "toggle" tagged queuing off then on
6707 * which causes the tag enable command delay
6708 * counter to come into effect.
6710 dev = path->device;
6711 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6712 || ((dev->inq_flags & SID_CmdQue) != 0
6713 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6714 struct ccb_trans_settings cts;
6716 xpt_setup_ccb(&cts.ccb_h, path, 1);
6717 #ifdef CAM_NEW_TRAN_CODE
6718 cts.protocol = PROTO_SCSI;
6719 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6720 cts.transport = XPORT_UNSPECIFIED;
6721 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6722 cts.proto_specific.scsi.flags = 0;
6723 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6724 #else /* CAM_NEW_TRAN_CODE */
6725 cts.flags = 0;
6726 cts.valid = CCB_TRANS_TQ_VALID;
6727 #endif /* CAM_NEW_TRAN_CODE */
6728 xpt_set_transfer_settings(&cts, path->device,
6729 /*async_update*/TRUE);
6730 #ifdef CAM_NEW_TRAN_CODE
6731 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6732 #else /* CAM_NEW_TRAN_CODE */
6733 cts.flags = CCB_TRANS_TAG_ENB;
6734 #endif /* CAM_NEW_TRAN_CODE */
6735 xpt_set_transfer_settings(&cts, path->device,
6736 /*async_update*/TRUE);
6740 static void
6741 xpt_start_tags(struct cam_path *path)
6743 struct ccb_relsim crs;
6744 struct cam_ed *device;
6745 struct cam_sim *sim;
6746 int newopenings;
6748 device = path->device;
6749 sim = path->bus->sim;
6750 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6751 xpt_freeze_devq(path, /*count*/1);
6752 device->inq_flags |= SID_CmdQue;
6753 if (device->tag_saved_openings != 0)
6754 newopenings = device->tag_saved_openings;
6755 else
6756 newopenings = min(device->quirk->maxtags,
6757 sim->max_tagged_dev_openings);
6758 xpt_dev_ccbq_resize(path, newopenings);
6759 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6760 crs.ccb_h.func_code = XPT_REL_SIMQ;
6761 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6762 crs.openings
6763 = crs.release_timeout
6764 = crs.qfrozen_cnt
6765 = 0;
6766 xpt_action((union ccb *)&crs);
6769 static int busses_to_config;
6770 static int busses_to_reset;
6772 static int
6773 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6775 if (bus->path_id != CAM_XPT_PATH_ID) {
6776 struct cam_path path;
6777 struct ccb_pathinq cpi;
6778 int can_negotiate;
6780 busses_to_config++;
6781 xpt_compile_path(&path, NULL, bus->path_id,
6782 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6783 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6784 cpi.ccb_h.func_code = XPT_PATH_INQ;
6785 xpt_action((union ccb *)&cpi);
6786 can_negotiate = cpi.hba_inquiry;
6787 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6788 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6789 && can_negotiate)
6790 busses_to_reset++;
6791 xpt_release_path(&path);
6794 return(1);
6797 static int
6798 xptconfigfunc(struct cam_eb *bus, void *arg)
6800 struct cam_path *path;
6801 union ccb *work_ccb;
6803 if (bus->path_id != CAM_XPT_PATH_ID) {
6804 cam_status status;
6805 int can_negotiate;
6807 work_ccb = xpt_alloc_ccb();
6808 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6809 CAM_TARGET_WILDCARD,
6810 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6811 kprintf("xptconfigfunc: xpt_create_path failed with "
6812 "status %#x for bus %d\n", status, bus->path_id);
6813 kprintf("xptconfigfunc: halting bus configuration\n");
6814 xpt_free_ccb(work_ccb);
6815 busses_to_config--;
6816 xpt_finishconfig(xpt_periph, NULL);
6817 return(0);
6819 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6820 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6821 xpt_action(work_ccb);
6822 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6823 kprintf("xptconfigfunc: CPI failed on bus %d "
6824 "with status %d\n", bus->path_id,
6825 work_ccb->ccb_h.status);
6826 xpt_finishconfig(xpt_periph, work_ccb);
6827 return(1);
6830 can_negotiate = work_ccb->cpi.hba_inquiry;
6831 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6832 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6833 && (can_negotiate != 0)) {
6834 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6835 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6836 work_ccb->ccb_h.cbfcnp = NULL;
6837 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6838 ("Resetting Bus\n"));
6839 xpt_action(work_ccb);
6840 xpt_finishconfig(xpt_periph, work_ccb);
6841 } else {
6842 /* Act as though we performed a successful BUS RESET */
6843 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6844 xpt_finishconfig(xpt_periph, work_ccb);
6848 return(1);
6851 static void
6852 xpt_config(void *arg)
6855 * Now that interrupts are enabled, go find our devices
6858 #ifdef CAMDEBUG
6859 /* Setup debugging flags and path */
6860 #ifdef CAM_DEBUG_FLAGS
6861 cam_dflags = CAM_DEBUG_FLAGS;
6862 #else /* !CAM_DEBUG_FLAGS */
6863 cam_dflags = CAM_DEBUG_NONE;
6864 #endif /* CAM_DEBUG_FLAGS */
6865 #ifdef CAM_DEBUG_BUS
6866 if (cam_dflags != CAM_DEBUG_NONE) {
6867 if (xpt_create_path(&cam_dpath, xpt_periph,
6868 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6869 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6870 kprintf("xpt_config: xpt_create_path() failed for debug"
6871 " target %d:%d:%d, debugging disabled\n",
6872 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6873 cam_dflags = CAM_DEBUG_NONE;
6875 } else
6876 cam_dpath = NULL;
6877 #else /* !CAM_DEBUG_BUS */
6878 cam_dpath = NULL;
6879 #endif /* CAM_DEBUG_BUS */
6880 #endif /* CAMDEBUG */
6883 * Scan all installed busses.
6885 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6887 if (busses_to_config == 0) {
6888 /* Call manually because we don't have any busses */
6889 xpt_finishconfig(xpt_periph, NULL);
6890 } else {
6891 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6892 kprintf("Waiting %d seconds for SCSI "
6893 "devices to settle\n", scsi_delay/1000);
6895 xpt_for_all_busses(xptconfigfunc, NULL);
6900 * If the given device only has one peripheral attached to it, and if that
6901 * peripheral is the passthrough driver, announce it. This insures that the
6902 * user sees some sort of announcement for every peripheral in their system.
6904 static int
6905 xptpassannouncefunc(struct cam_ed *device, void *arg)
6907 struct cam_periph *periph;
6908 int i;
6910 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6911 periph = SLIST_NEXT(periph, periph_links), i++);
6913 periph = SLIST_FIRST(&device->periphs);
6914 if ((i == 1)
6915 && (strncmp(periph->periph_name, "pass", 4) == 0))
6916 xpt_announce_periph(periph, NULL);
6918 return(1);
6921 static void
6922 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6924 struct periph_driver **p_drv;
6925 int i;
6927 if (done_ccb != NULL) {
6928 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6929 ("xpt_finishconfig\n"));
6930 switch(done_ccb->ccb_h.func_code) {
6931 case XPT_RESET_BUS:
6932 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6933 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6934 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6935 done_ccb->crcn.flags = 0;
6936 xpt_action(done_ccb);
6937 return;
6939 /* FALLTHROUGH */
6940 case XPT_SCAN_BUS:
6941 default:
6942 xpt_free_path(done_ccb->ccb_h.path);
6943 busses_to_config--;
6944 break;
6948 if (busses_to_config == 0) {
6949 /* Register all the peripheral drivers */
6950 /* XXX This will have to change when we have loadable modules */
6951 p_drv = periph_drivers;
6952 for (i = 0; p_drv[i] != NULL; i++) {
6953 (*p_drv[i]->init)();
6957 * Check for devices with no "standard" peripheral driver
6958 * attached. For any devices like that, announce the
6959 * passthrough driver so the user will see something.
6961 xpt_for_all_devices(xptpassannouncefunc, NULL);
6963 /* Release our hook so that the boot can continue. */
6964 config_intrhook_disestablish(xpt_config_hook);
6965 kfree(xpt_config_hook, M_TEMP);
6966 xpt_config_hook = NULL;
6968 if (done_ccb != NULL)
6969 xpt_free_ccb(done_ccb);
6972 static void
6973 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6975 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6977 switch (work_ccb->ccb_h.func_code) {
6978 /* Common cases first */
6979 case XPT_PATH_INQ: /* Path routing inquiry */
6981 struct ccb_pathinq *cpi;
6983 cpi = &work_ccb->cpi;
6984 cpi->version_num = 1; /* XXX??? */
6985 cpi->hba_inquiry = 0;
6986 cpi->target_sprt = 0;
6987 cpi->hba_misc = 0;
6988 cpi->hba_eng_cnt = 0;
6989 cpi->max_target = 0;
6990 cpi->max_lun = 0;
6991 cpi->initiator_id = 0;
6992 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6993 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6994 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6995 cpi->unit_number = sim->unit_number;
6996 cpi->bus_id = sim->bus_id;
6997 cpi->base_transfer_speed = 0;
6998 #ifdef CAM_NEW_TRAN_CODE
6999 cpi->protocol = PROTO_UNSPECIFIED;
7000 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7001 cpi->transport = XPORT_UNSPECIFIED;
7002 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7003 #endif /* CAM_NEW_TRAN_CODE */
7004 cpi->ccb_h.status = CAM_REQ_CMP;
7005 xpt_done(work_ccb);
7006 break;
7008 default:
7009 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7010 xpt_done(work_ccb);
7011 break;
7016 * The xpt as a "controller" has no interrupt sources, so polling
7017 * is a no-op.
7019 static void
7020 xptpoll(struct cam_sim *sim)
7025 * Should only be called by the machine interrupt dispatch routines,
7026 * so put these prototypes here instead of in the header.
7029 static void
7030 swi_cambio(void *arg, void *frame)
7032 camisr(&cam_bioq);
7035 static void
7036 camisr(cam_isrq_t *queue)
7038 struct ccb_hdr *ccb_h;
7040 crit_enter();
7041 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
7042 int runq;
7044 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
7045 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7046 splz();
7048 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7049 ("camisr\n"));
7051 runq = FALSE;
7053 if (ccb_h->flags & CAM_HIGH_POWER) {
7054 struct highpowerlist *hphead;
7055 struct cam_ed *device;
7056 union ccb *send_ccb;
7058 hphead = &highpowerq;
7060 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7063 * Increment the count since this command is done.
7065 num_highpower++;
7068 * Any high powered commands queued up?
7070 if (send_ccb != NULL) {
7071 device = send_ccb->ccb_h.path->device;
7073 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7075 xpt_release_devq(send_ccb->ccb_h.path,
7076 /*count*/1, /*runqueue*/TRUE);
7079 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7080 struct cam_ed *dev;
7082 dev = ccb_h->path->device;
7084 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7086 if (!SIM_DEAD(ccb_h->path->bus->sim)) {
7087 ccb_h->path->bus->sim->devq->send_active--;
7088 ccb_h->path->bus->sim->devq->send_openings++;
7091 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7092 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7093 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7094 && (dev->ccbq.dev_active == 0))) {
7096 xpt_release_devq(ccb_h->path, /*count*/1,
7097 /*run_queue*/TRUE);
7100 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7101 && (--dev->tag_delay_count == 0))
7102 xpt_start_tags(ccb_h->path);
7104 if ((dev->ccbq.queue.entries > 0)
7105 && (dev->qfrozen_cnt == 0)
7106 && (device_is_send_queued(dev) == 0)) {
7107 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7108 dev);
7112 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7113 xpt_release_simq(ccb_h->path->bus->sim,
7114 /*run_queue*/TRUE);
7115 ccb_h->status &= ~CAM_RELEASE_SIMQ;
7116 runq = FALSE;
7119 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7120 && (ccb_h->status & CAM_DEV_QFRZN)) {
7121 xpt_release_devq(ccb_h->path, /*count*/1,
7122 /*run_queue*/TRUE);
7123 ccb_h->status &= ~CAM_DEV_QFRZN;
7124 } else if (runq) {
7125 xpt_run_dev_sendq(ccb_h->path->bus);
7128 /* Call the peripheral driver's callback */
7129 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7131 crit_exit();
7134 static void
7135 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7138 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7139 xpt_done(ccb);
7142 static void
7143 dead_sim_poll(struct cam_sim *sim)