Fix numerous compiler warnings and format conversion specifiers.
[dragonfly.git] / sys / bus / cam / cam_xpt.c
blob0e21e88d3d41a76f62fbaee2a3316ace3853ff50
1 /*
2 * Implementation of the Common Access Method Transport (XPT) layer.
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.68 2008/08/23 17:13:31 pavalos Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/taskqueue.h>
46 #include <sys/bus.h>
47 #include <sys/thread.h>
48 #include <sys/lock.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
53 #include <machine/clock.h>
54 #include <machine/stdarg.h>
56 #include "cam.h"
57 #include "cam_ccb.h"
58 #include "cam_periph.h"
59 #include "cam_sim.h"
60 #include "cam_xpt.h"
61 #include "cam_xpt_sim.h"
62 #include "cam_xpt_periph.h"
63 #include "cam_debug.h"
65 #include "scsi/scsi_all.h"
66 #include "scsi/scsi_message.h"
67 #include "scsi/scsi_pass.h"
68 #include <sys/kthread.h>
69 #include "opt_cam.h"
71 /* Datastructures internal to the xpt layer */
72 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
74 /* Object for defering XPT actions to a taskqueue */
75 struct xpt_task {
76 struct task task;
77 void *data1;
78 uintptr_t data2;
82 * Definition of an async handler callback block. These are used to add
83 * SIMs and peripherals to the async callback lists.
85 struct async_node {
86 SLIST_ENTRY(async_node) links;
87 u_int32_t event_enable; /* Async Event enables */
88 void (*callback)(void *arg, u_int32_t code,
89 struct cam_path *path, void *args);
90 void *callback_arg;
93 SLIST_HEAD(async_list, async_node);
94 SLIST_HEAD(periph_list, cam_periph);
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
100 #ifndef CAM_MAX_HIGHPOWER
101 #define CAM_MAX_HIGHPOWER 4
102 #endif
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
109 struct cam_ed_qinfo {
110 cam_pinfo pinfo;
111 struct cam_ed *device;
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system. The table contains a
117 * cam_ed structure for each device on the bus.
119 struct cam_ed {
120 TAILQ_ENTRY(cam_ed) links;
121 struct cam_ed_qinfo alloc_ccb_entry;
122 struct cam_ed_qinfo send_ccb_entry;
123 struct cam_et *target;
124 struct cam_sim *sim;
125 lun_id_t lun_id;
126 struct camq drvq; /*
127 * Queue of type drivers wanting to do
128 * work on this device.
130 struct cam_ccbq ccbq; /* Queue of pending ccbs */
131 struct async_list asyncs; /* Async callback info for this B/T/L */
132 struct periph_list periphs; /* All attached devices */
133 u_int generation; /* Generation number */
134 struct cam_periph *owner; /* Peripheral driver's ownership tag */
135 struct xpt_quirk_entry *quirk; /* Oddities about this device */
136 /* Storage for the inquiry data */
137 cam_proto protocol;
138 u_int protocol_version;
139 cam_xport transport;
140 u_int transport_version;
141 struct scsi_inquiry_data inq_data;
142 u_int8_t inq_flags; /*
143 * Current settings for inquiry flags.
144 * This allows us to override settings
145 * like disconnection and tagged
146 * queuing for a device.
148 u_int8_t queue_flags; /* Queue flags from the control page */
149 u_int8_t serial_num_len;
150 u_int8_t *serial_num;
151 u_int32_t qfrozen_cnt;
152 u_int32_t flags;
153 #define CAM_DEV_UNCONFIGURED 0x01
154 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
155 #define CAM_DEV_REL_ON_COMPLETE 0x04
156 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
157 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
158 #define CAM_DEV_TAG_AFTER_COUNT 0x20
159 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
160 #define CAM_DEV_IN_DV 0x80
161 #define CAM_DEV_DV_HIT_BOTTOM 0x100
162 u_int32_t tag_delay_count;
163 #define CAM_TAG_DELAY_COUNT 5
164 u_int32_t tag_saved_openings;
165 u_int32_t refcount;
166 struct callout callout;
170 * Each target is represented by an ET (Existing Target). These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
175 struct cam_et {
176 TAILQ_HEAD(, cam_ed) ed_entries;
177 TAILQ_ENTRY(cam_et) links;
178 struct cam_eb *bus;
179 target_id_t target_id;
180 u_int32_t refcount;
181 u_int generation;
182 struct timeval last_reset; /* uptime of last reset */
186 * Each bus is represented by an EB (Existing Bus). These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
190 struct cam_eb {
191 TAILQ_HEAD(, cam_et) et_entries;
192 TAILQ_ENTRY(cam_eb) links;
193 path_id_t path_id;
194 struct cam_sim *sim;
195 struct timeval last_reset; /* uptime of last reset */
196 u_int32_t flags;
197 #define CAM_EB_RUNQ_SCHEDULED 0x01
198 u_int32_t refcount;
199 u_int generation;
202 struct cam_path {
203 struct cam_periph *periph;
204 struct cam_eb *bus;
205 struct cam_et *target;
206 struct cam_ed *device;
209 struct xpt_quirk_entry {
210 struct scsi_inquiry_pattern inq_pat;
211 u_int8_t quirks;
212 #define CAM_QUIRK_NOLUNS 0x01
213 #define CAM_QUIRK_NOSERIAL 0x02
214 #define CAM_QUIRK_HILUNS 0x04
215 #define CAM_QUIRK_NOHILUNS 0x08
216 u_int mintags;
217 u_int maxtags;
220 static int cam_srch_hi = 0;
221 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
222 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
223 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
224 sysctl_cam_search_luns, "I",
225 "allow search above LUN 7 for SCSI3 and greater devices");
227 #define CAM_SCSI2_MAXLUN 8
229 * If we're not quirked to search <= the first 8 luns
230 * and we are either quirked to search above lun 8,
231 * or we're > SCSI-2 and we've enabled hilun searching,
232 * or we're > SCSI-2 and the last lun was a success,
233 * we can look for luns above lun 8.
235 #define CAN_SRCH_HI_SPARSE(dv) \
236 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
237 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
238 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
240 #define CAN_SRCH_HI_DENSE(dv) \
241 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
242 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
243 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
245 typedef enum {
246 XPT_FLAG_OPEN = 0x01
247 } xpt_flags;
249 struct xpt_softc {
250 xpt_flags flags;
251 u_int32_t xpt_generation;
253 /* number of high powered commands that can go through right now */
254 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
255 int num_highpower;
257 /* queue for handling async rescan requests. */
258 TAILQ_HEAD(, ccb_hdr) ccb_scanq;
259 int ccb_scanq_running;
261 /* Registered busses */
262 TAILQ_HEAD(,cam_eb) xpt_busses;
263 u_int bus_generation;
265 struct intr_config_hook *xpt_config_hook;
267 struct lock xpt_topo_lock;
268 struct lock xpt_lock;
271 static const char quantum[] = "QUANTUM";
272 static const char sony[] = "SONY";
273 static const char west_digital[] = "WDIGTL";
274 static const char samsung[] = "SAMSUNG";
275 static const char seagate[] = "SEAGATE";
276 static const char microp[] = "MICROP";
278 static struct xpt_quirk_entry xpt_quirk_table[] =
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
286 /* Reports QUEUE FULL for temporary resource shortages */
287 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
288 /*quirks*/0, /*mintags*/24, /*maxtags*/32
291 /* Reports QUEUE FULL for temporary resource shortages */
292 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
293 /*quirks*/0, /*mintags*/24, /*maxtags*/32
296 /* Broken tagged queuing drive */
297 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
298 /*quirks*/0, /*mintags*/0, /*maxtags*/0
301 /* Broken tagged queuing drive */
302 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
303 /*quirks*/0, /*mintags*/0, /*maxtags*/0
306 /* Broken tagged queuing drive */
307 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
308 /*quirks*/0, /*mintags*/0, /*maxtags*/0
312 * Unfortunately, the Quantum Atlas III has the same
313 * problem as the Atlas II drives above.
314 * Reported by: "Johan Granlund" <johan@granlund.nu>
316 * For future reference, the drive with the problem was:
317 * QUANTUM QM39100TD-SW N1B0
319 * It's possible that Quantum will fix the problem in later
320 * firmware revisions. If that happens, the quirk entry
321 * will need to be made specific to the firmware revisions
322 * with the problem.
325 /* Reports QUEUE FULL for temporary resource shortages */
326 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
327 /*quirks*/0, /*mintags*/24, /*maxtags*/32
331 * 18 Gig Atlas III, same problem as the 9G version.
332 * Reported by: Andre Albsmeier
333 * <andre.albsmeier@mchp.siemens.de>
335 * For future reference, the drive with the problem was:
336 * QUANTUM QM318000TD-S N491
338 /* Reports QUEUE FULL for temporary resource shortages */
339 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
340 /*quirks*/0, /*mintags*/24, /*maxtags*/32
344 * Broken tagged queuing drive
345 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
346 * and: Martin Renters <martin@tdc.on.ca>
348 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
352 * The Seagate Medalist Pro drives have very poor write
353 * performance with anything more than 2 tags.
355 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
356 * Drive: <SEAGATE ST36530N 1444>
358 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
359 * Drive: <SEAGATE ST34520W 1281>
361 * No one has actually reported that the 9G version
362 * (ST39140*) of the Medalist Pro has the same problem, but
363 * we're assuming that it does because the 4G and 6.5G
364 * versions of the drive are broken.
367 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
368 /*quirks*/0, /*mintags*/2, /*maxtags*/2
371 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
372 /*quirks*/0, /*mintags*/2, /*maxtags*/2
375 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
376 /*quirks*/0, /*mintags*/2, /*maxtags*/2
380 * Slow when tagged queueing is enabled. Write performance
381 * steadily drops off with more and more concurrent
382 * transactions. Best sequential write performance with
383 * tagged queueing turned off and write caching turned on.
385 * PR: kern/10398
386 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
387 * Drive: DCAS-34330 w/ "S65A" firmware.
389 * The drive with the problem had the "S65A" firmware
390 * revision, and has also been reported (by Stephen J.
391 * Roznowski <sjr@home.net>) for a drive with the "S61A"
392 * firmware revision.
394 * Although no one has reported problems with the 2 gig
395 * version of the DCAS drive, the assumption is that it
396 * has the same problems as the 4 gig version. Therefore
397 * this quirk entries disables tagged queueing for all
398 * DCAS drives.
400 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
401 /*quirks*/0, /*mintags*/0, /*maxtags*/0
404 /* Broken tagged queuing drive */
405 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
406 /*quirks*/0, /*mintags*/0, /*maxtags*/0
409 /* Broken tagged queuing drive */
410 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
411 /*quirks*/0, /*mintags*/0, /*maxtags*/0
414 /* This does not support other than LUN 0 */
415 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
416 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
420 * Broken tagged queuing drive.
421 * Submitted by:
422 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
423 * in PR kern/9535
425 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
426 /*quirks*/0, /*mintags*/0, /*maxtags*/0
430 * Slow when tagged queueing is enabled. (1.5MB/sec versus
431 * 8MB/sec.)
432 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
433 * Best performance with these drives is achieved with
434 * tagged queueing turned off, and write caching turned on.
436 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
437 /*quirks*/0, /*mintags*/0, /*maxtags*/0
441 * Slow when tagged queueing is enabled. (1.5MB/sec versus
442 * 8MB/sec.)
443 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
444 * Best performance with these drives is achieved with
445 * tagged queueing turned off, and write caching turned on.
447 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
448 /*quirks*/0, /*mintags*/0, /*maxtags*/0
452 * Doesn't handle queue full condition correctly,
453 * so we need to limit maxtags to what the device
454 * can handle instead of determining this automatically.
456 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
457 /*quirks*/0, /*mintags*/2, /*maxtags*/32
460 /* Really only one LUN */
461 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
462 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
465 /* I can't believe we need a quirk for DPT volumes. */
466 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
467 CAM_QUIRK_NOLUNS,
468 /*mintags*/0, /*maxtags*/255
472 * Many Sony CDROM drives don't like multi-LUN probing.
474 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
475 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
479 * This drive doesn't like multiple LUN probing.
480 * Submitted by: Parag Patel <parag@cgt.com>
482 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
483 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
486 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
487 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
491 * The 8200 doesn't like multi-lun probing, and probably
492 * don't like serial number requests either.
495 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
496 "EXB-8200*", "*"
498 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
502 * Let's try the same as above, but for a drive that says
503 * it's an IPL-6860 but is actually an EXB 8200.
506 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
507 "IPL-6860*", "*"
509 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
513 * These Hitachi drives don't like multi-lun probing.
514 * The PR submitter has a DK319H, but says that the Linux
515 * kernel has a similar work-around for the DK312 and DK314,
516 * so all DK31* drives are quirked here.
517 * PR: misc/18793
518 * Submitted by: Paul Haddad <paul@pth.com>
520 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
521 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
525 * The Hitachi CJ series with J8A8 firmware apparantly has
526 * problems with tagged commands.
527 * PR: 23536
528 * Reported by: amagai@nue.org
530 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
531 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
535 * These are the large storage arrays.
536 * Submitted by: William Carrel <william.carrel@infospace.com>
538 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
539 CAM_QUIRK_HILUNS, 2, 1024
543 * This old revision of the TDC3600 is also SCSI-1, and
544 * hangs upon serial number probing.
547 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
548 " TDC 3600", "U07:"
550 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
554 * Would repond to all LUNs if asked for.
557 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
558 "CP150", "*"
560 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
564 * Would repond to all LUNs if asked for.
567 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
568 "96X2*", "*"
570 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
573 /* Submitted by: Matthew Dodd <winter@jurai.net> */
574 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
575 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
578 /* Submitted by: Matthew Dodd <winter@jurai.net> */
579 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
580 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
583 /* TeraSolutions special settings for TRC-22 RAID */
584 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
585 /*quirks*/0, /*mintags*/55, /*maxtags*/255
588 /* Veritas Storage Appliance */
589 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
590 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
594 * Would respond to all LUNs. Device type and removable
595 * flag are jumper-selectable.
597 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
598 "Tahiti 1", "*"
600 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
603 /* EasyRAID E5A aka. areca ARC-6010 */
604 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
605 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
608 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
609 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
612 /* Default tagged queuing parameters for all devices */
614 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
615 /*vendor*/"*", /*product*/"*", /*revision*/"*"
617 /*quirks*/0, /*mintags*/2, /*maxtags*/255
621 static const int xpt_quirk_table_size =
622 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
624 typedef enum {
625 DM_RET_COPY = 0x01,
626 DM_RET_FLAG_MASK = 0x0f,
627 DM_RET_NONE = 0x00,
628 DM_RET_STOP = 0x10,
629 DM_RET_DESCEND = 0x20,
630 DM_RET_ERROR = 0x30,
631 DM_RET_ACTION_MASK = 0xf0
632 } dev_match_ret;
634 typedef enum {
635 XPT_DEPTH_BUS,
636 XPT_DEPTH_TARGET,
637 XPT_DEPTH_DEVICE,
638 XPT_DEPTH_PERIPH
639 } xpt_traverse_depth;
641 struct xpt_traverse_config {
642 xpt_traverse_depth depth;
643 void *tr_func;
644 void *tr_arg;
647 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
648 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
649 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
650 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
651 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
653 /* Transport layer configuration information */
654 static struct xpt_softc xsoftc;
656 /* Queues for our software interrupt handler */
657 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
658 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
659 static cam_simq_t cam_simq;
660 static struct spinlock cam_simq_spin;
662 struct cam_periph *xpt_periph;
664 static periph_init_t xpt_periph_init;
666 static periph_init_t probe_periph_init;
668 static struct periph_driver xpt_driver =
670 xpt_periph_init, "xpt",
671 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
674 static struct periph_driver probe_driver =
676 probe_periph_init, "probe",
677 TAILQ_HEAD_INITIALIZER(probe_driver.units)
680 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
681 PERIPHDRIVER_DECLARE(probe, probe_driver);
683 #define XPT_CDEV_MAJOR 104
685 static d_open_t xptopen;
686 static d_close_t xptclose;
687 static d_ioctl_t xptioctl;
689 static struct dev_ops xpt_ops = {
690 { "xpt", XPT_CDEV_MAJOR, 0 },
691 .d_open = xptopen,
692 .d_close = xptclose,
693 .d_ioctl = xptioctl
696 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
697 static void dead_sim_poll(struct cam_sim *sim);
699 /* Dummy SIM that is used when the real one has gone. */
700 static struct cam_sim cam_dead_sim;
701 static struct lock cam_dead_lock;
703 /* Storage for debugging datastructures */
704 #ifdef CAMDEBUG
705 struct cam_path *cam_dpath;
706 u_int32_t cam_dflags;
707 u_int32_t cam_debug_delay;
708 #endif
710 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
711 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
712 #endif
715 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
716 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
717 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
719 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
720 || defined(CAM_DEBUG_LUN)
721 #ifdef CAMDEBUG
722 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
723 || !defined(CAM_DEBUG_LUN)
724 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
725 and CAM_DEBUG_LUN"
726 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
727 #else /* !CAMDEBUG */
728 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
729 #endif /* CAMDEBUG */
730 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
732 /* Our boot-time initialization hook */
733 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
735 static moduledata_t cam_moduledata = {
736 "cam",
737 cam_module_event_handler,
738 NULL
741 static int xpt_init(void *);
743 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
744 MODULE_VERSION(cam, 1);
747 static cam_status xpt_compile_path(struct cam_path *new_path,
748 struct cam_periph *perph,
749 path_id_t path_id,
750 target_id_t target_id,
751 lun_id_t lun_id);
753 static void xpt_release_path(struct cam_path *path);
755 static void xpt_async_bcast(struct async_list *async_head,
756 u_int32_t async_code,
757 struct cam_path *path,
758 void *async_arg);
759 static void xpt_dev_async(u_int32_t async_code,
760 struct cam_eb *bus,
761 struct cam_et *target,
762 struct cam_ed *device,
763 void *async_arg);
764 static path_id_t xptnextfreepathid(void);
765 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
766 static union ccb *xpt_get_ccb(struct cam_ed *device);
767 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
768 u_int32_t new_priority);
769 static void xpt_run_dev_allocq(struct cam_eb *bus);
770 static void xpt_run_dev_sendq(struct cam_eb *bus);
771 static timeout_t xpt_release_devq_timeout;
772 static void xpt_release_bus(struct cam_eb *bus);
773 static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
774 int run_queue);
775 static struct cam_et*
776 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
777 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
778 static struct cam_ed*
779 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
780 lun_id_t lun_id);
781 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
782 struct cam_ed *device);
783 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
784 static struct cam_eb*
785 xpt_find_bus(path_id_t path_id);
786 static struct cam_et*
787 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
788 static struct cam_ed*
789 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
790 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
791 static void xpt_scan_lun(struct cam_periph *periph,
792 struct cam_path *path, cam_flags flags,
793 union ccb *ccb);
794 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
795 static xpt_busfunc_t xptconfigbuscountfunc;
796 static xpt_busfunc_t xptconfigfunc;
797 static void xpt_config(void *arg);
798 static xpt_devicefunc_t xptpassannouncefunc;
799 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
800 static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
801 static void xptpoll(struct cam_sim *sim);
802 static inthand2_t swi_cambio;
803 static void camisr(void *);
804 static void camisr_runqueue(struct cam_sim *);
805 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
806 u_int num_patterns, struct cam_eb *bus);
807 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
808 u_int num_patterns,
809 struct cam_ed *device);
810 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
811 u_int num_patterns,
812 struct cam_periph *periph);
813 static xpt_busfunc_t xptedtbusfunc;
814 static xpt_targetfunc_t xptedttargetfunc;
815 static xpt_devicefunc_t xptedtdevicefunc;
816 static xpt_periphfunc_t xptedtperiphfunc;
817 static xpt_pdrvfunc_t xptplistpdrvfunc;
818 static xpt_periphfunc_t xptplistperiphfunc;
819 static int xptedtmatch(struct ccb_dev_match *cdm);
820 static int xptperiphlistmatch(struct ccb_dev_match *cdm);
821 static int xptbustraverse(struct cam_eb *start_bus,
822 xpt_busfunc_t *tr_func, void *arg);
823 static int xpttargettraverse(struct cam_eb *bus,
824 struct cam_et *start_target,
825 xpt_targetfunc_t *tr_func, void *arg);
826 static int xptdevicetraverse(struct cam_et *target,
827 struct cam_ed *start_device,
828 xpt_devicefunc_t *tr_func, void *arg);
829 static int xptperiphtraverse(struct cam_ed *device,
830 struct cam_periph *start_periph,
831 xpt_periphfunc_t *tr_func, void *arg);
832 static int xptpdrvtraverse(struct periph_driver **start_pdrv,
833 xpt_pdrvfunc_t *tr_func, void *arg);
834 static int xptpdperiphtraverse(struct periph_driver **pdrv,
835 struct cam_periph *start_periph,
836 xpt_periphfunc_t *tr_func,
837 void *arg);
838 static xpt_busfunc_t xptdefbusfunc;
839 static xpt_targetfunc_t xptdeftargetfunc;
840 static xpt_devicefunc_t xptdefdevicefunc;
841 static xpt_periphfunc_t xptdefperiphfunc;
842 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
843 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
844 void *arg);
845 static xpt_devicefunc_t xptsetasyncfunc;
846 static xpt_busfunc_t xptsetasyncbusfunc;
847 static cam_status xptregister(struct cam_periph *periph,
848 void *arg);
849 static cam_status proberegister(struct cam_periph *periph,
850 void *arg);
851 static void probeschedule(struct cam_periph *probe_periph);
852 static void probestart(struct cam_periph *periph, union ccb *start_ccb);
853 static void proberequestdefaultnegotiation(struct cam_periph *periph);
854 static int proberequestbackoff(struct cam_periph *periph,
855 struct cam_ed *device);
856 static void probedone(struct cam_periph *periph, union ccb *done_ccb);
857 static void probecleanup(struct cam_periph *periph);
858 static void xpt_find_quirk(struct cam_ed *device);
859 static void xpt_devise_transport(struct cam_path *path);
860 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
861 struct cam_ed *device,
862 int async_update);
863 static void xpt_toggle_tags(struct cam_path *path);
864 static void xpt_start_tags(struct cam_path *path);
865 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
866 struct cam_ed *dev);
867 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
868 struct cam_ed *dev);
869 static __inline int periph_is_queued(struct cam_periph *periph);
870 static __inline int device_is_alloc_queued(struct cam_ed *device);
871 static __inline int device_is_send_queued(struct cam_ed *device);
872 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
874 static __inline int
875 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
877 int retval;
879 if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
880 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
881 cam_ccbq_resize(&dev->ccbq,
882 dev->ccbq.dev_openings
883 + dev->ccbq.dev_active);
884 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
887 * The priority of a device waiting for CCB resources
888 * is that of the the highest priority peripheral driver
889 * enqueued.
891 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
892 &dev->alloc_ccb_entry.pinfo,
893 CAMQ_GET_HEAD(&dev->drvq)->priority);
894 } else {
895 retval = 0;
898 return (retval);
901 static __inline int
902 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
904 int retval;
906 if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
908 * The priority of a device waiting for controller
909 * resources is that of the the highest priority CCB
910 * enqueued.
912 retval =
913 xpt_schedule_dev(&bus->sim->devq->send_queue,
914 &dev->send_ccb_entry.pinfo,
915 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
916 } else {
917 retval = 0;
919 return (retval);
922 static __inline int
923 periph_is_queued(struct cam_periph *periph)
925 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
928 static __inline int
929 device_is_alloc_queued(struct cam_ed *device)
931 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
934 static __inline int
935 device_is_send_queued(struct cam_ed *device)
937 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
940 static __inline int
941 dev_allocq_is_runnable(struct cam_devq *devq)
944 * Have work to do.
945 * Have space to do more work.
946 * Allowed to do work.
948 return ((devq->alloc_queue.qfrozen_cnt == 0)
949 && (devq->alloc_queue.entries > 0)
950 && (devq->alloc_openings > 0));
953 static void
954 xpt_periph_init(void)
956 make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
959 static void
960 probe_periph_init(void)
965 static void
966 xptdone(struct cam_periph *periph, union ccb *done_ccb)
968 /* Caller will release the CCB */
969 wakeup(&done_ccb->ccb_h.cbfcnp);
972 static int
973 xptopen(struct dev_open_args *ap)
975 cdev_t dev = ap->a_head.a_dev;
978 * Only allow read-write access.
980 if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
981 return(EPERM);
984 * We don't allow nonblocking access.
986 if ((ap->a_oflags & O_NONBLOCK) != 0) {
987 kprintf("%s: can't do nonblocking access\n", devtoname(dev));
988 return(ENODEV);
991 /* Mark ourselves open */
992 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
993 xsoftc.flags |= XPT_FLAG_OPEN;
994 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
996 return(0);
999 static int
1000 xptclose(struct dev_close_args *ap)
1003 /* Mark ourselves closed */
1004 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
1005 xsoftc.flags &= ~XPT_FLAG_OPEN;
1006 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1008 return(0);
1012 * Don't automatically grab the xpt softc lock here even though this is going
1013 * through the xpt device. The xpt device is really just a back door for
1014 * accessing other devices and SIMs, so the right thing to do is to grab
1015 * the appropriate SIM lock once the bus/SIM is located.
1017 static int
1018 xptioctl(struct dev_ioctl_args *ap)
1020 int error;
1022 error = 0;
1024 switch(ap->a_cmd) {
1026 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1027 * to accept CCB types that don't quite make sense to send through a
1028 * passthrough driver.
1030 case CAMIOCOMMAND: {
1031 union ccb *ccb;
1032 union ccb *inccb;
1033 struct cam_eb *bus;
1035 inccb = (union ccb *)ap->a_data;
1037 bus = xpt_find_bus(inccb->ccb_h.path_id);
1038 if (bus == NULL) {
1039 error = EINVAL;
1040 break;
1043 switch(inccb->ccb_h.func_code) {
1044 case XPT_SCAN_BUS:
1045 case XPT_RESET_BUS:
1046 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1047 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1048 error = EINVAL;
1049 break;
1051 /* FALLTHROUGH */
1052 case XPT_PATH_INQ:
1053 case XPT_ENG_INQ:
1054 case XPT_SCAN_LUN:
1056 ccb = xpt_alloc_ccb();
1058 CAM_SIM_LOCK(bus->sim);
1061 * Create a path using the bus, target, and lun the
1062 * user passed in.
1064 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1065 inccb->ccb_h.path_id,
1066 inccb->ccb_h.target_id,
1067 inccb->ccb_h.target_lun) !=
1068 CAM_REQ_CMP){
1069 error = EINVAL;
1070 CAM_SIM_UNLOCK(bus->sim);
1071 xpt_free_ccb(ccb);
1072 break;
1074 /* Ensure all of our fields are correct */
1075 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1076 inccb->ccb_h.pinfo.priority);
1077 xpt_merge_ccb(ccb, inccb);
1078 ccb->ccb_h.cbfcnp = xptdone;
1079 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1080 bcopy(ccb, inccb, sizeof(union ccb));
1081 xpt_free_path(ccb->ccb_h.path);
1082 xpt_free_ccb(ccb);
1083 CAM_SIM_UNLOCK(bus->sim);
1084 break;
1086 case XPT_DEBUG: {
1087 union ccb ccb;
1090 * This is an immediate CCB, so it's okay to
1091 * allocate it on the stack.
1094 CAM_SIM_LOCK(bus->sim);
1097 * Create a path using the bus, target, and lun the
1098 * user passed in.
1100 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1101 inccb->ccb_h.path_id,
1102 inccb->ccb_h.target_id,
1103 inccb->ccb_h.target_lun) !=
1104 CAM_REQ_CMP){
1105 error = EINVAL;
1106 CAM_SIM_UNLOCK(bus->sim);
1107 break;
1109 /* Ensure all of our fields are correct */
1110 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1111 inccb->ccb_h.pinfo.priority);
1112 xpt_merge_ccb(&ccb, inccb);
1113 ccb.ccb_h.cbfcnp = xptdone;
1114 xpt_action(&ccb);
1115 CAM_SIM_UNLOCK(bus->sim);
1116 bcopy(&ccb, inccb, sizeof(union ccb));
1117 xpt_free_path(ccb.ccb_h.path);
1118 break;
1121 case XPT_DEV_MATCH: {
1122 struct cam_periph_map_info mapinfo;
1123 struct cam_path *old_path;
1126 * We can't deal with physical addresses for this
1127 * type of transaction.
1129 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1130 error = EINVAL;
1131 break;
1135 * Save this in case the caller had it set to
1136 * something in particular.
1138 old_path = inccb->ccb_h.path;
1141 * We really don't need a path for the matching
1142 * code. The path is needed because of the
1143 * debugging statements in xpt_action(). They
1144 * assume that the CCB has a valid path.
1146 inccb->ccb_h.path = xpt_periph->path;
1148 bzero(&mapinfo, sizeof(mapinfo));
1151 * Map the pattern and match buffers into kernel
1152 * virtual address space.
1154 error = cam_periph_mapmem(inccb, &mapinfo);
1156 if (error) {
1157 inccb->ccb_h.path = old_path;
1158 break;
1162 * This is an immediate CCB, we can send it on directly.
1164 xpt_action(inccb);
1167 * Map the buffers back into user space.
1169 cam_periph_unmapmem(inccb, &mapinfo);
1171 inccb->ccb_h.path = old_path;
1173 error = 0;
1174 break;
1176 default:
1177 error = ENOTSUP;
1178 break;
1180 xpt_release_bus(bus);
1181 break;
1184 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1185 * with the periphal driver name and unit name filled in. The other
1186 * fields don't really matter as input. The passthrough driver name
1187 * ("pass"), and unit number are passed back in the ccb. The current
1188 * device generation number, and the index into the device peripheral
1189 * driver list, and the status are also passed back. Note that
1190 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1191 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1192 * (or rather should be) impossible for the device peripheral driver
1193 * list to change since we look at the whole thing in one pass, and
1194 * we do it with lock protection.
1197 case CAMGETPASSTHRU: {
1198 union ccb *ccb;
1199 struct cam_periph *periph;
1200 struct periph_driver **p_drv;
1201 char *name;
1202 u_int unit;
1203 u_int cur_generation;
1204 int base_periph_found;
1205 int splbreaknum;
1207 ccb = (union ccb *)ap->a_data;
1208 unit = ccb->cgdl.unit_number;
1209 name = ccb->cgdl.periph_name;
1211 * Every 100 devices, we want to drop our lock protection to
1212 * give the software interrupt handler a chance to run.
1213 * Most systems won't run into this check, but this should
1214 * avoid starvation in the software interrupt handler in
1215 * large systems.
1217 splbreaknum = 100;
1219 ccb = (union ccb *)ap->a_data;
1221 base_periph_found = 0;
1224 * Sanity check -- make sure we don't get a null peripheral
1225 * driver name.
1227 if (*ccb->cgdl.periph_name == '\0') {
1228 error = EINVAL;
1229 break;
1232 /* Keep the list from changing while we traverse it */
1233 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1234 ptstartover:
1235 cur_generation = xsoftc.xpt_generation;
1237 /* first find our driver in the list of drivers */
1238 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1239 if (strcmp((*p_drv)->driver_name, name) == 0)
1240 break;
1243 if (*p_drv == NULL) {
1244 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1245 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1246 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1247 *ccb->cgdl.periph_name = '\0';
1248 ccb->cgdl.unit_number = 0;
1249 error = ENOENT;
1250 break;
1254 * Run through every peripheral instance of this driver
1255 * and check to see whether it matches the unit passed
1256 * in by the user. If it does, get out of the loops and
1257 * find the passthrough driver associated with that
1258 * peripheral driver.
1260 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1262 if (periph->unit_number == unit) {
1263 break;
1264 } else if (--splbreaknum == 0) {
1265 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1266 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1267 splbreaknum = 100;
1268 if (cur_generation != xsoftc.xpt_generation)
1269 goto ptstartover;
1273 * If we found the peripheral driver that the user passed
1274 * in, go through all of the peripheral drivers for that
1275 * particular device and look for a passthrough driver.
1277 if (periph != NULL) {
1278 struct cam_ed *device;
1279 int i;
1281 base_periph_found = 1;
1282 device = periph->path->device;
1283 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1284 periph != NULL;
1285 periph = SLIST_NEXT(periph, periph_links), i++) {
1287 * Check to see whether we have a
1288 * passthrough device or not.
1290 if (strcmp(periph->periph_name, "pass") == 0) {
1292 * Fill in the getdevlist fields.
1294 strcpy(ccb->cgdl.periph_name,
1295 periph->periph_name);
1296 ccb->cgdl.unit_number =
1297 periph->unit_number;
1298 if (SLIST_NEXT(periph, periph_links))
1299 ccb->cgdl.status =
1300 CAM_GDEVLIST_MORE_DEVS;
1301 else
1302 ccb->cgdl.status =
1303 CAM_GDEVLIST_LAST_DEVICE;
1304 ccb->cgdl.generation =
1305 device->generation;
1306 ccb->cgdl.index = i;
1308 * Fill in some CCB header fields
1309 * that the user may want.
1311 ccb->ccb_h.path_id =
1312 periph->path->bus->path_id;
1313 ccb->ccb_h.target_id =
1314 periph->path->target->target_id;
1315 ccb->ccb_h.target_lun =
1316 periph->path->device->lun_id;
1317 ccb->ccb_h.status = CAM_REQ_CMP;
1318 break;
1324 * If the periph is null here, one of two things has
1325 * happened. The first possibility is that we couldn't
1326 * find the unit number of the particular peripheral driver
1327 * that the user is asking about. e.g. the user asks for
1328 * the passthrough driver for "da11". We find the list of
1329 * "da" peripherals all right, but there is no unit 11.
1330 * The other possibility is that we went through the list
1331 * of peripheral drivers attached to the device structure,
1332 * but didn't find one with the name "pass". Either way,
1333 * we return ENOENT, since we couldn't find something.
1335 if (periph == NULL) {
1336 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1337 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1338 *ccb->cgdl.periph_name = '\0';
1339 ccb->cgdl.unit_number = 0;
1340 error = ENOENT;
1342 * It is unfortunate that this is even necessary,
1343 * but there are many, many clueless users out there.
1344 * If this is true, the user is looking for the
1345 * passthrough driver, but doesn't have one in his
1346 * kernel.
1348 if (base_periph_found == 1) {
1349 kprintf("xptioctl: pass driver is not in the "
1350 "kernel\n");
1351 kprintf("xptioctl: put \"device pass\" in "
1352 "your kernel config file\n");
1355 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1356 break;
1358 default:
1359 error = ENOTTY;
1360 break;
1363 return(error);
1366 static int
1367 cam_module_event_handler(module_t mod, int what, void *arg)
1369 int error;
1371 switch (what) {
1372 case MOD_LOAD:
1373 if ((error = xpt_init(NULL)) != 0)
1374 return (error);
1375 break;
1376 case MOD_UNLOAD:
1377 return EBUSY;
1378 default:
1379 return EOPNOTSUPP;
1382 return 0;
1386 * Thread to handle asynchronous main-context requests.
1388 * This function is typically used by drivers to perform complex actions
1389 * such as bus scans and engineering requests in a main context instead
1390 * of an interrupt context.
1392 static void
1393 xpt_scanner_thread(void *dummy)
1395 union ccb *ccb;
1396 #if 0
1397 struct cam_sim *sim;
1398 #endif
1400 for (;;) {
1401 xpt_lock_buses();
1402 xsoftc.ccb_scanq_running = 1;
1403 while ((ccb = (void *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
1404 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h,
1405 sim_links.tqe);
1406 xpt_unlock_buses();
1407 #if 0
1408 sim = ccb->ccb_h.path->bus->sim;
1409 CAM_SIM_LOCK(sim);
1410 #endif
1411 xpt_action(ccb);
1412 #if 0
1413 CAM_SIM_UNLOCK(sim);
1414 xpt_lock_buses();
1415 #endif
1417 xsoftc.ccb_scanq_running = 0;
1418 tsleep_interlock(&xsoftc.ccb_scanq, 0);
1419 xpt_unlock_buses();
1420 tsleep(&xsoftc.ccb_scanq, PINTERLOCKED, "ccb_scanq", 0);
1425 * Issue an asynchronous asction
1427 void
1428 xpt_action_async(union ccb *ccb)
1430 xpt_lock_buses();
1431 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1432 if (xsoftc.ccb_scanq_running == 0) {
1433 xsoftc.ccb_scanq_running = 1;
1434 wakeup(&xsoftc.ccb_scanq);
1436 xpt_unlock_buses();
1440 /* Functions accessed by the peripheral drivers */
1441 static int
1442 xpt_init(void *dummy)
1444 struct cam_sim *xpt_sim;
1445 struct cam_path *path;
1446 struct cam_devq *devq;
1447 cam_status status;
1449 TAILQ_INIT(&xsoftc.xpt_busses);
1450 TAILQ_INIT(&cam_simq);
1451 TAILQ_INIT(&xsoftc.ccb_scanq);
1452 STAILQ_INIT(&xsoftc.highpowerq);
1453 xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1455 spin_init(&cam_simq_spin);
1456 lockinit(&xsoftc.xpt_lock, "XPT lock", 0, LK_CANRECURSE);
1457 lockinit(&xsoftc.xpt_topo_lock, "XPT topology lock", 0, LK_CANRECURSE);
1459 SLIST_INIT(&cam_dead_sim.ccb_freeq);
1460 TAILQ_INIT(&cam_dead_sim.sim_doneq);
1461 spin_init(&cam_dead_sim.sim_spin);
1462 cam_dead_sim.sim_action = dead_sim_action;
1463 cam_dead_sim.sim_poll = dead_sim_poll;
1464 cam_dead_sim.sim_name = "dead_sim";
1465 cam_dead_sim.lock = &cam_dead_lock;
1466 lockinit(&cam_dead_lock, "XPT dead_sim lock", 0, LK_CANRECURSE);
1467 cam_dead_sim.flags |= CAM_SIM_DEREGISTERED;
1470 * The xpt layer is, itself, the equivelent of a SIM.
1471 * Allow 16 ccbs in the ccb pool for it. This should
1472 * give decent parallelism when we probe busses and
1473 * perform other XPT functions.
1475 devq = cam_simq_alloc(16);
1476 xpt_sim = cam_sim_alloc(xptaction,
1477 xptpoll,
1478 "xpt",
1479 /*softc*/NULL,
1480 /*unit*/0,
1481 /*lock*/&xsoftc.xpt_lock,
1482 /*max_dev_transactions*/0,
1483 /*max_tagged_dev_transactions*/0,
1484 devq);
1485 cam_simq_release(devq);
1486 if (xpt_sim == NULL)
1487 return (ENOMEM);
1489 xpt_sim->max_ccbs = 16;
1491 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
1492 if ((status = xpt_bus_register(xpt_sim, /*bus #*/0)) != CAM_SUCCESS) {
1493 kprintf("xpt_init: xpt_bus_register failed with status %#x,"
1494 " failing attach\n", status);
1495 return (EINVAL);
1499 * Looking at the XPT from the SIM layer, the XPT is
1500 * the equivelent of a peripheral driver. Allocate
1501 * a peripheral driver entry for us.
1503 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1504 CAM_TARGET_WILDCARD,
1505 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1506 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1507 " failing attach\n", status);
1508 return (EINVAL);
1511 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1512 path, NULL, 0, xpt_sim);
1513 xpt_free_path(path);
1515 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1518 * Register a callback for when interrupts are enabled.
1520 xsoftc.xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1521 M_CAMXPT, M_INTWAIT | M_ZERO);
1522 xsoftc.xpt_config_hook->ich_func = xpt_config;
1523 xsoftc.xpt_config_hook->ich_desc = "xpt";
1524 xsoftc.xpt_config_hook->ich_order = 1000;
1525 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1526 kfree (xsoftc.xpt_config_hook, M_CAMXPT);
1527 kprintf("xpt_init: config_intrhook_establish failed "
1528 "- failing attach\n");
1531 /* fire up rescan thread */
1532 if (kthread_create(xpt_scanner_thread, NULL, NULL, "xpt_thrd")) {
1533 kprintf("xpt_init: failed to create rescan thread\n");
1535 /* Install our software interrupt handlers */
1536 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1538 return (0);
1541 static cam_status
1542 xptregister(struct cam_periph *periph, void *arg)
1544 struct cam_sim *xpt_sim;
1546 if (periph == NULL) {
1547 kprintf("xptregister: periph was NULL!!\n");
1548 return(CAM_REQ_CMP_ERR);
1551 xpt_sim = (struct cam_sim *)arg;
1552 xpt_sim->softc = periph;
1553 xpt_periph = periph;
1554 periph->softc = NULL;
1556 return(CAM_REQ_CMP);
1559 int32_t
1560 xpt_add_periph(struct cam_periph *periph)
1562 struct cam_ed *device;
1563 int32_t status;
1564 struct periph_list *periph_head;
1566 sim_lock_assert_owned(periph->sim->lock);
1568 device = periph->path->device;
1570 periph_head = &device->periphs;
1572 status = CAM_REQ_CMP;
1574 if (device != NULL) {
1576 * Make room for this peripheral
1577 * so it will fit in the queue
1578 * when it's scheduled to run
1580 status = camq_resize(&device->drvq,
1581 device->drvq.array_size + 1);
1583 device->generation++;
1585 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1588 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1589 xsoftc.xpt_generation++;
1590 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1592 return (status);
1595 void
1596 xpt_remove_periph(struct cam_periph *periph)
1598 struct cam_ed *device;
1600 sim_lock_assert_owned(periph->sim->lock);
1602 device = periph->path->device;
1604 if (device != NULL) {
1605 struct periph_list *periph_head;
1607 periph_head = &device->periphs;
1609 /* Release the slot for this peripheral */
1610 camq_resize(&device->drvq, device->drvq.array_size - 1);
1612 device->generation++;
1614 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1617 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1618 xsoftc.xpt_generation++;
1619 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1622 void
1623 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1625 struct ccb_pathinq cpi;
1626 struct ccb_trans_settings cts;
1627 struct cam_path *path;
1628 u_int speed;
1629 u_int freq;
1630 u_int mb;
1632 sim_lock_assert_owned(periph->sim->lock);
1634 path = periph->path;
1636 /* Report basic attachment and inquiry data */
1637 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1638 periph->periph_name, periph->unit_number,
1639 path->bus->sim->sim_name,
1640 path->bus->sim->unit_number,
1641 path->bus->sim->bus_id,
1642 path->target->target_id,
1643 path->device->lun_id);
1644 kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1645 scsi_print_inquiry(&path->device->inq_data);
1647 /* Report serial number */
1648 if (path->device->serial_num_len > 0) {
1649 /* Don't wrap the screen - print only the first 60 chars */
1650 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1651 periph->unit_number, path->device->serial_num);
1654 /* Acquire and report transfer speed */
1655 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1656 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1657 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1658 xpt_action((union ccb*)&cts);
1659 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1660 return;
1663 /* Ask the SIM for its base transfer speed */
1664 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1665 cpi.ccb_h.func_code = XPT_PATH_INQ;
1666 xpt_action((union ccb *)&cpi);
1668 speed = cpi.base_transfer_speed;
1669 freq = 0;
1670 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1671 struct ccb_trans_settings_spi *spi;
1673 spi = &cts.xport_specific.spi;
1674 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1675 && spi->sync_offset != 0) {
1676 freq = scsi_calc_syncsrate(spi->sync_period);
1677 speed = freq;
1680 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1681 speed *= (0x01 << spi->bus_width);
1683 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1684 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1685 if (fc->valid & CTS_FC_VALID_SPEED) {
1686 speed = fc->bitrate;
1690 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1691 struct ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1692 if (sas->valid & CTS_SAS_VALID_SPEED) {
1693 speed = sas->bitrate;
1697 mb = speed / 1000;
1698 if (mb > 0)
1699 kprintf("%s%d: %d.%03dMB/s transfers",
1700 periph->periph_name, periph->unit_number,
1701 mb, speed % 1000);
1702 else
1703 kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1704 periph->unit_number, speed);
1706 /* Report additional information about SPI connections */
1707 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1708 struct ccb_trans_settings_spi *spi;
1710 spi = &cts.xport_specific.spi;
1711 if (freq != 0) {
1712 kprintf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1713 freq % 1000,
1714 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1715 ? " DT" : "",
1716 spi->sync_offset);
1718 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1719 && spi->bus_width > 0) {
1720 if (freq != 0) {
1721 kprintf(", ");
1722 } else {
1723 kprintf(" (");
1725 kprintf("%dbit)", 8 * (0x01 << spi->bus_width));
1726 } else if (freq != 0) {
1727 kprintf(")");
1730 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1731 struct ccb_trans_settings_fc *fc;
1733 fc = &cts.xport_specific.fc;
1734 if (fc->valid & CTS_FC_VALID_WWNN)
1735 kprintf(" WWNN 0x%llx", (long long) fc->wwnn);
1736 if (fc->valid & CTS_FC_VALID_WWPN)
1737 kprintf(" WWPN 0x%llx", (long long) fc->wwpn);
1738 if (fc->valid & CTS_FC_VALID_PORT)
1739 kprintf(" PortID 0x%x", fc->port);
1742 if (path->device->inq_flags & SID_CmdQue
1743 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1744 kprintf("\n%s%d: Command Queueing Enabled",
1745 periph->periph_name, periph->unit_number);
1747 kprintf("\n");
1750 * We only want to print the caller's announce string if they've
1751 * passed one in..
1753 if (announce_string != NULL)
1754 kprintf("%s%d: %s\n", periph->periph_name,
1755 periph->unit_number, announce_string);
1758 static dev_match_ret
1759 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1760 struct cam_eb *bus)
1762 dev_match_ret retval;
1763 int i;
1765 retval = DM_RET_NONE;
1768 * If we aren't given something to match against, that's an error.
1770 if (bus == NULL)
1771 return(DM_RET_ERROR);
1774 * If there are no match entries, then this bus matches no
1775 * matter what.
1777 if ((patterns == NULL) || (num_patterns == 0))
1778 return(DM_RET_DESCEND | DM_RET_COPY);
1780 for (i = 0; i < num_patterns; i++) {
1781 struct bus_match_pattern *cur_pattern;
1784 * If the pattern in question isn't for a bus node, we
1785 * aren't interested. However, we do indicate to the
1786 * calling routine that we should continue descending the
1787 * tree, since the user wants to match against lower-level
1788 * EDT elements.
1790 if (patterns[i].type != DEV_MATCH_BUS) {
1791 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1792 retval |= DM_RET_DESCEND;
1793 continue;
1796 cur_pattern = &patterns[i].pattern.bus_pattern;
1799 * If they want to match any bus node, we give them any
1800 * device node.
1802 if (cur_pattern->flags == BUS_MATCH_ANY) {
1803 /* set the copy flag */
1804 retval |= DM_RET_COPY;
1807 * If we've already decided on an action, go ahead
1808 * and return.
1810 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1811 return(retval);
1815 * Not sure why someone would do this...
1817 if (cur_pattern->flags == BUS_MATCH_NONE)
1818 continue;
1820 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1821 && (cur_pattern->path_id != bus->path_id))
1822 continue;
1824 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1825 && (cur_pattern->bus_id != bus->sim->bus_id))
1826 continue;
1828 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1829 && (cur_pattern->unit_number != bus->sim->unit_number))
1830 continue;
1832 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1833 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1834 DEV_IDLEN) != 0))
1835 continue;
1838 * If we get to this point, the user definitely wants
1839 * information on this bus. So tell the caller to copy the
1840 * data out.
1842 retval |= DM_RET_COPY;
1845 * If the return action has been set to descend, then we
1846 * know that we've already seen a non-bus matching
1847 * expression, therefore we need to further descend the tree.
1848 * This won't change by continuing around the loop, so we
1849 * go ahead and return. If we haven't seen a non-bus
1850 * matching expression, we keep going around the loop until
1851 * we exhaust the matching expressions. We'll set the stop
1852 * flag once we fall out of the loop.
1854 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1855 return(retval);
1859 * If the return action hasn't been set to descend yet, that means
1860 * we haven't seen anything other than bus matching patterns. So
1861 * tell the caller to stop descending the tree -- the user doesn't
1862 * want to match against lower level tree elements.
1864 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1865 retval |= DM_RET_STOP;
1867 return(retval);
1870 static dev_match_ret
1871 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1872 struct cam_ed *device)
1874 dev_match_ret retval;
1875 int i;
1877 retval = DM_RET_NONE;
1880 * If we aren't given something to match against, that's an error.
1882 if (device == NULL)
1883 return(DM_RET_ERROR);
1886 * If there are no match entries, then this device matches no
1887 * matter what.
1889 if ((patterns == NULL) || (num_patterns == 0))
1890 return(DM_RET_DESCEND | DM_RET_COPY);
1892 for (i = 0; i < num_patterns; i++) {
1893 struct device_match_pattern *cur_pattern;
1896 * If the pattern in question isn't for a device node, we
1897 * aren't interested.
1899 if (patterns[i].type != DEV_MATCH_DEVICE) {
1900 if ((patterns[i].type == DEV_MATCH_PERIPH)
1901 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1902 retval |= DM_RET_DESCEND;
1903 continue;
1906 cur_pattern = &patterns[i].pattern.device_pattern;
1909 * If they want to match any device node, we give them any
1910 * device node.
1912 if (cur_pattern->flags == DEV_MATCH_ANY) {
1913 /* set the copy flag */
1914 retval |= DM_RET_COPY;
1918 * If we've already decided on an action, go ahead
1919 * and return.
1921 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1922 return(retval);
1926 * Not sure why someone would do this...
1928 if (cur_pattern->flags == DEV_MATCH_NONE)
1929 continue;
1931 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1932 && (cur_pattern->path_id != device->target->bus->path_id))
1933 continue;
1935 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1936 && (cur_pattern->target_id != device->target->target_id))
1937 continue;
1939 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1940 && (cur_pattern->target_lun != device->lun_id))
1941 continue;
1943 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1944 && (cam_quirkmatch((caddr_t)&device->inq_data,
1945 (caddr_t)&cur_pattern->inq_pat,
1946 1, sizeof(cur_pattern->inq_pat),
1947 scsi_static_inquiry_match) == NULL))
1948 continue;
1951 * If we get to this point, the user definitely wants
1952 * information on this device. So tell the caller to copy
1953 * the data out.
1955 retval |= DM_RET_COPY;
1958 * If the return action has been set to descend, then we
1959 * know that we've already seen a peripheral matching
1960 * expression, therefore we need to further descend the tree.
1961 * This won't change by continuing around the loop, so we
1962 * go ahead and return. If we haven't seen a peripheral
1963 * matching expression, we keep going around the loop until
1964 * we exhaust the matching expressions. We'll set the stop
1965 * flag once we fall out of the loop.
1967 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1968 return(retval);
1972 * If the return action hasn't been set to descend yet, that means
1973 * we haven't seen any peripheral matching patterns. So tell the
1974 * caller to stop descending the tree -- the user doesn't want to
1975 * match against lower level tree elements.
1977 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1978 retval |= DM_RET_STOP;
1980 return(retval);
1984 * Match a single peripheral against any number of match patterns.
1986 static dev_match_ret
1987 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1988 struct cam_periph *periph)
1990 dev_match_ret retval;
1991 int i;
1994 * If we aren't given something to match against, that's an error.
1996 if (periph == NULL)
1997 return(DM_RET_ERROR);
2000 * If there are no match entries, then this peripheral matches no
2001 * matter what.
2003 if ((patterns == NULL) || (num_patterns == 0))
2004 return(DM_RET_STOP | DM_RET_COPY);
2007 * There aren't any nodes below a peripheral node, so there's no
2008 * reason to descend the tree any further.
2010 retval = DM_RET_STOP;
2012 for (i = 0; i < num_patterns; i++) {
2013 struct periph_match_pattern *cur_pattern;
2016 * If the pattern in question isn't for a peripheral, we
2017 * aren't interested.
2019 if (patterns[i].type != DEV_MATCH_PERIPH)
2020 continue;
2022 cur_pattern = &patterns[i].pattern.periph_pattern;
2025 * If they want to match on anything, then we will do so.
2027 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2028 /* set the copy flag */
2029 retval |= DM_RET_COPY;
2032 * We've already set the return action to stop,
2033 * since there are no nodes below peripherals in
2034 * the tree.
2036 return(retval);
2040 * Not sure why someone would do this...
2042 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2043 continue;
2045 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2046 && (cur_pattern->path_id != periph->path->bus->path_id))
2047 continue;
2050 * For the target and lun id's, we have to make sure the
2051 * target and lun pointers aren't NULL. The xpt peripheral
2052 * has a wildcard target and device.
2054 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2055 && ((periph->path->target == NULL)
2056 ||(cur_pattern->target_id != periph->path->target->target_id)))
2057 continue;
2059 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2060 && ((periph->path->device == NULL)
2061 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2062 continue;
2064 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2065 && (cur_pattern->unit_number != periph->unit_number))
2066 continue;
2068 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2069 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2070 DEV_IDLEN) != 0))
2071 continue;
2074 * If we get to this point, the user definitely wants
2075 * information on this peripheral. So tell the caller to
2076 * copy the data out.
2078 retval |= DM_RET_COPY;
2081 * The return action has already been set to stop, since
2082 * peripherals don't have any nodes below them in the EDT.
2084 return(retval);
2088 * If we get to this point, the peripheral that was passed in
2089 * doesn't match any of the patterns.
2091 return(retval);
2094 static int
2095 xptedtbusfunc(struct cam_eb *bus, void *arg)
2097 struct ccb_dev_match *cdm;
2098 dev_match_ret retval;
2100 cdm = (struct ccb_dev_match *)arg;
2103 * If our position is for something deeper in the tree, that means
2104 * that we've already seen this node. So, we keep going down.
2106 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2107 && (cdm->pos.cookie.bus == bus)
2108 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2109 && (cdm->pos.cookie.target != NULL))
2110 retval = DM_RET_DESCEND;
2111 else
2112 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2115 * If we got an error, bail out of the search.
2117 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2118 cdm->status = CAM_DEV_MATCH_ERROR;
2119 return(0);
2123 * If the copy flag is set, copy this bus out.
2125 if (retval & DM_RET_COPY) {
2126 int spaceleft, j;
2128 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2129 sizeof(struct dev_match_result));
2132 * If we don't have enough space to put in another
2133 * match result, save our position and tell the
2134 * user there are more devices to check.
2136 if (spaceleft < sizeof(struct dev_match_result)) {
2137 bzero(&cdm->pos, sizeof(cdm->pos));
2138 cdm->pos.position_type =
2139 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2141 cdm->pos.cookie.bus = bus;
2142 cdm->pos.generations[CAM_BUS_GENERATION]=
2143 xsoftc.bus_generation;
2144 cdm->status = CAM_DEV_MATCH_MORE;
2145 return(0);
2147 j = cdm->num_matches;
2148 cdm->num_matches++;
2149 cdm->matches[j].type = DEV_MATCH_BUS;
2150 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2151 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2152 cdm->matches[j].result.bus_result.unit_number =
2153 bus->sim->unit_number;
2154 strncpy(cdm->matches[j].result.bus_result.dev_name,
2155 bus->sim->sim_name, DEV_IDLEN);
2159 * If the user is only interested in busses, there's no
2160 * reason to descend to the next level in the tree.
2162 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2163 return(1);
2166 * If there is a target generation recorded, check it to
2167 * make sure the target list hasn't changed.
2169 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2170 && (bus == cdm->pos.cookie.bus)
2171 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2172 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2173 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2174 bus->generation)) {
2175 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2176 return(0);
2179 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2180 && (cdm->pos.cookie.bus == bus)
2181 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2182 && (cdm->pos.cookie.target != NULL))
2183 return(xpttargettraverse(bus,
2184 (struct cam_et *)cdm->pos.cookie.target,
2185 xptedttargetfunc, arg));
2186 else
2187 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2190 static int
2191 xptedttargetfunc(struct cam_et *target, void *arg)
2193 struct ccb_dev_match *cdm;
2195 cdm = (struct ccb_dev_match *)arg;
2198 * If there is a device list generation recorded, check it to
2199 * make sure the device list hasn't changed.
2201 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2202 && (cdm->pos.cookie.bus == target->bus)
2203 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2204 && (cdm->pos.cookie.target == target)
2205 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2206 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2207 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2208 target->generation)) {
2209 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2210 return(0);
2213 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2214 && (cdm->pos.cookie.bus == target->bus)
2215 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2216 && (cdm->pos.cookie.target == target)
2217 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2218 && (cdm->pos.cookie.device != NULL))
2219 return(xptdevicetraverse(target,
2220 (struct cam_ed *)cdm->pos.cookie.device,
2221 xptedtdevicefunc, arg));
2222 else
2223 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2226 static int
2227 xptedtdevicefunc(struct cam_ed *device, void *arg)
2230 struct ccb_dev_match *cdm;
2231 dev_match_ret retval;
2233 cdm = (struct ccb_dev_match *)arg;
2236 * If our position is for something deeper in the tree, that means
2237 * that we've already seen this node. So, we keep going down.
2239 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2240 && (cdm->pos.cookie.device == device)
2241 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2242 && (cdm->pos.cookie.periph != NULL))
2243 retval = DM_RET_DESCEND;
2244 else
2245 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2246 device);
2248 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2249 cdm->status = CAM_DEV_MATCH_ERROR;
2250 return(0);
2254 * If the copy flag is set, copy this device out.
2256 if (retval & DM_RET_COPY) {
2257 int spaceleft, j;
2259 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2260 sizeof(struct dev_match_result));
2263 * If we don't have enough space to put in another
2264 * match result, save our position and tell the
2265 * user there are more devices to check.
2267 if (spaceleft < sizeof(struct dev_match_result)) {
2268 bzero(&cdm->pos, sizeof(cdm->pos));
2269 cdm->pos.position_type =
2270 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2271 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2273 cdm->pos.cookie.bus = device->target->bus;
2274 cdm->pos.generations[CAM_BUS_GENERATION]=
2275 xsoftc.bus_generation;
2276 cdm->pos.cookie.target = device->target;
2277 cdm->pos.generations[CAM_TARGET_GENERATION] =
2278 device->target->bus->generation;
2279 cdm->pos.cookie.device = device;
2280 cdm->pos.generations[CAM_DEV_GENERATION] =
2281 device->target->generation;
2282 cdm->status = CAM_DEV_MATCH_MORE;
2283 return(0);
2285 j = cdm->num_matches;
2286 cdm->num_matches++;
2287 cdm->matches[j].type = DEV_MATCH_DEVICE;
2288 cdm->matches[j].result.device_result.path_id =
2289 device->target->bus->path_id;
2290 cdm->matches[j].result.device_result.target_id =
2291 device->target->target_id;
2292 cdm->matches[j].result.device_result.target_lun =
2293 device->lun_id;
2294 bcopy(&device->inq_data,
2295 &cdm->matches[j].result.device_result.inq_data,
2296 sizeof(struct scsi_inquiry_data));
2298 /* Let the user know whether this device is unconfigured */
2299 if (device->flags & CAM_DEV_UNCONFIGURED)
2300 cdm->matches[j].result.device_result.flags =
2301 DEV_RESULT_UNCONFIGURED;
2302 else
2303 cdm->matches[j].result.device_result.flags =
2304 DEV_RESULT_NOFLAG;
2308 * If the user isn't interested in peripherals, don't descend
2309 * the tree any further.
2311 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2312 return(1);
2315 * If there is a peripheral list generation recorded, make sure
2316 * it hasn't changed.
2318 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2319 && (device->target->bus == cdm->pos.cookie.bus)
2320 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2321 && (device->target == cdm->pos.cookie.target)
2322 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2323 && (device == cdm->pos.cookie.device)
2324 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2325 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2326 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2327 device->generation)){
2328 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2329 return(0);
2332 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2333 && (cdm->pos.cookie.bus == device->target->bus)
2334 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2335 && (cdm->pos.cookie.target == device->target)
2336 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2337 && (cdm->pos.cookie.device == device)
2338 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2339 && (cdm->pos.cookie.periph != NULL))
2340 return(xptperiphtraverse(device,
2341 (struct cam_periph *)cdm->pos.cookie.periph,
2342 xptedtperiphfunc, arg));
2343 else
2344 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2347 static int
2348 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2350 struct ccb_dev_match *cdm;
2351 dev_match_ret retval;
2353 cdm = (struct ccb_dev_match *)arg;
2355 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2357 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2358 cdm->status = CAM_DEV_MATCH_ERROR;
2359 return(0);
2363 * If the copy flag is set, copy this peripheral out.
2365 if (retval & DM_RET_COPY) {
2366 int spaceleft, j;
2368 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2369 sizeof(struct dev_match_result));
2372 * If we don't have enough space to put in another
2373 * match result, save our position and tell the
2374 * user there are more devices to check.
2376 if (spaceleft < sizeof(struct dev_match_result)) {
2377 bzero(&cdm->pos, sizeof(cdm->pos));
2378 cdm->pos.position_type =
2379 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2380 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2381 CAM_DEV_POS_PERIPH;
2383 cdm->pos.cookie.bus = periph->path->bus;
2384 cdm->pos.generations[CAM_BUS_GENERATION]=
2385 xsoftc.bus_generation;
2386 cdm->pos.cookie.target = periph->path->target;
2387 cdm->pos.generations[CAM_TARGET_GENERATION] =
2388 periph->path->bus->generation;
2389 cdm->pos.cookie.device = periph->path->device;
2390 cdm->pos.generations[CAM_DEV_GENERATION] =
2391 periph->path->target->generation;
2392 cdm->pos.cookie.periph = periph;
2393 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2394 periph->path->device->generation;
2395 cdm->status = CAM_DEV_MATCH_MORE;
2396 return(0);
2399 j = cdm->num_matches;
2400 cdm->num_matches++;
2401 cdm->matches[j].type = DEV_MATCH_PERIPH;
2402 cdm->matches[j].result.periph_result.path_id =
2403 periph->path->bus->path_id;
2404 cdm->matches[j].result.periph_result.target_id =
2405 periph->path->target->target_id;
2406 cdm->matches[j].result.periph_result.target_lun =
2407 periph->path->device->lun_id;
2408 cdm->matches[j].result.periph_result.unit_number =
2409 periph->unit_number;
2410 strncpy(cdm->matches[j].result.periph_result.periph_name,
2411 periph->periph_name, DEV_IDLEN);
2414 return(1);
2417 static int
2418 xptedtmatch(struct ccb_dev_match *cdm)
2420 int ret;
2422 cdm->num_matches = 0;
2425 * Check the bus list generation. If it has changed, the user
2426 * needs to reset everything and start over.
2428 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2429 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2430 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
2431 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2432 return(0);
2435 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2436 && (cdm->pos.cookie.bus != NULL))
2437 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2438 xptedtbusfunc, cdm);
2439 else
2440 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2443 * If we get back 0, that means that we had to stop before fully
2444 * traversing the EDT. It also means that one of the subroutines
2445 * has set the status field to the proper value. If we get back 1,
2446 * we've fully traversed the EDT and copied out any matching entries.
2448 if (ret == 1)
2449 cdm->status = CAM_DEV_MATCH_LAST;
2451 return(ret);
2454 static int
2455 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2457 struct ccb_dev_match *cdm;
2459 cdm = (struct ccb_dev_match *)arg;
2461 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2462 && (cdm->pos.cookie.pdrv == pdrv)
2463 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2464 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2465 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2466 (*pdrv)->generation)) {
2467 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2468 return(0);
2471 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2472 && (cdm->pos.cookie.pdrv == pdrv)
2473 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2474 && (cdm->pos.cookie.periph != NULL))
2475 return(xptpdperiphtraverse(pdrv,
2476 (struct cam_periph *)cdm->pos.cookie.periph,
2477 xptplistperiphfunc, arg));
2478 else
2479 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2482 static int
2483 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2485 struct ccb_dev_match *cdm;
2486 dev_match_ret retval;
2488 cdm = (struct ccb_dev_match *)arg;
2490 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2492 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2493 cdm->status = CAM_DEV_MATCH_ERROR;
2494 return(0);
2498 * If the copy flag is set, copy this peripheral out.
2500 if (retval & DM_RET_COPY) {
2501 int spaceleft, j;
2503 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2504 sizeof(struct dev_match_result));
2507 * If we don't have enough space to put in another
2508 * match result, save our position and tell the
2509 * user there are more devices to check.
2511 if (spaceleft < sizeof(struct dev_match_result)) {
2512 struct periph_driver **pdrv;
2514 pdrv = NULL;
2515 bzero(&cdm->pos, sizeof(cdm->pos));
2516 cdm->pos.position_type =
2517 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2518 CAM_DEV_POS_PERIPH;
2521 * This may look a bit non-sensical, but it is
2522 * actually quite logical. There are very few
2523 * peripheral drivers, and bloating every peripheral
2524 * structure with a pointer back to its parent
2525 * peripheral driver linker set entry would cost
2526 * more in the long run than doing this quick lookup.
2528 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2529 if (strcmp((*pdrv)->driver_name,
2530 periph->periph_name) == 0)
2531 break;
2534 if (*pdrv == NULL) {
2535 cdm->status = CAM_DEV_MATCH_ERROR;
2536 return(0);
2539 cdm->pos.cookie.pdrv = pdrv;
2541 * The periph generation slot does double duty, as
2542 * does the periph pointer slot. They are used for
2543 * both edt and pdrv lookups and positioning.
2545 cdm->pos.cookie.periph = periph;
2546 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2547 (*pdrv)->generation;
2548 cdm->status = CAM_DEV_MATCH_MORE;
2549 return(0);
2552 j = cdm->num_matches;
2553 cdm->num_matches++;
2554 cdm->matches[j].type = DEV_MATCH_PERIPH;
2555 cdm->matches[j].result.periph_result.path_id =
2556 periph->path->bus->path_id;
2559 * The transport layer peripheral doesn't have a target or
2560 * lun.
2562 if (periph->path->target)
2563 cdm->matches[j].result.periph_result.target_id =
2564 periph->path->target->target_id;
2565 else
2566 cdm->matches[j].result.periph_result.target_id = -1;
2568 if (periph->path->device)
2569 cdm->matches[j].result.periph_result.target_lun =
2570 periph->path->device->lun_id;
2571 else
2572 cdm->matches[j].result.periph_result.target_lun = -1;
2574 cdm->matches[j].result.periph_result.unit_number =
2575 periph->unit_number;
2576 strncpy(cdm->matches[j].result.periph_result.periph_name,
2577 periph->periph_name, DEV_IDLEN);
2580 return(1);
2583 static int
2584 xptperiphlistmatch(struct ccb_dev_match *cdm)
2586 int ret;
2588 cdm->num_matches = 0;
2591 * At this point in the edt traversal function, we check the bus
2592 * list generation to make sure that no busses have been added or
2593 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2594 * For the peripheral driver list traversal function, however, we
2595 * don't have to worry about new peripheral driver types coming or
2596 * going; they're in a linker set, and therefore can't change
2597 * without a recompile.
2600 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2601 && (cdm->pos.cookie.pdrv != NULL))
2602 ret = xptpdrvtraverse(
2603 (struct periph_driver **)cdm->pos.cookie.pdrv,
2604 xptplistpdrvfunc, cdm);
2605 else
2606 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2609 * If we get back 0, that means that we had to stop before fully
2610 * traversing the peripheral driver tree. It also means that one of
2611 * the subroutines has set the status field to the proper value. If
2612 * we get back 1, we've fully traversed the EDT and copied out any
2613 * matching entries.
2615 if (ret == 1)
2616 cdm->status = CAM_DEV_MATCH_LAST;
2618 return(ret);
2621 static int
2622 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2624 struct cam_eb *bus, *next_bus;
2625 int retval;
2627 retval = 1;
2629 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
2630 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2631 bus != NULL;
2632 bus = next_bus) {
2633 next_bus = TAILQ_NEXT(bus, links);
2635 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
2636 CAM_SIM_LOCK(bus->sim);
2637 retval = tr_func(bus, arg);
2638 CAM_SIM_UNLOCK(bus->sim);
2639 if (retval == 0)
2640 return(retval);
2641 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
2643 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
2645 return(retval);
2648 static int
2649 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2650 xpt_targetfunc_t *tr_func, void *arg)
2652 struct cam_et *target, *next_target;
2653 int retval;
2655 retval = 1;
2656 for (target = (start_target ? start_target :
2657 TAILQ_FIRST(&bus->et_entries));
2658 target != NULL; target = next_target) {
2660 next_target = TAILQ_NEXT(target, links);
2662 retval = tr_func(target, arg);
2664 if (retval == 0)
2665 return(retval);
2668 return(retval);
2671 static int
2672 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2673 xpt_devicefunc_t *tr_func, void *arg)
2675 struct cam_ed *device, *next_device;
2676 int retval;
2678 retval = 1;
2679 for (device = (start_device ? start_device :
2680 TAILQ_FIRST(&target->ed_entries));
2681 device != NULL;
2682 device = next_device) {
2684 next_device = TAILQ_NEXT(device, links);
2686 retval = tr_func(device, arg);
2688 if (retval == 0)
2689 return(retval);
2692 return(retval);
2695 static int
2696 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2697 xpt_periphfunc_t *tr_func, void *arg)
2699 struct cam_periph *periph, *next_periph;
2700 int retval;
2702 retval = 1;
2704 for (periph = (start_periph ? start_periph :
2705 SLIST_FIRST(&device->periphs));
2706 periph != NULL;
2707 periph = next_periph) {
2709 next_periph = SLIST_NEXT(periph, periph_links);
2711 retval = tr_func(periph, arg);
2712 if (retval == 0)
2713 return(retval);
2716 return(retval);
2719 static int
2720 xptpdrvtraverse(struct periph_driver **start_pdrv,
2721 xpt_pdrvfunc_t *tr_func, void *arg)
2723 struct periph_driver **pdrv;
2724 int retval;
2726 retval = 1;
2729 * We don't traverse the peripheral driver list like we do the
2730 * other lists, because it is a linker set, and therefore cannot be
2731 * changed during runtime. If the peripheral driver list is ever
2732 * re-done to be something other than a linker set (i.e. it can
2733 * change while the system is running), the list traversal should
2734 * be modified to work like the other traversal functions.
2736 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2737 *pdrv != NULL; pdrv++) {
2738 retval = tr_func(pdrv, arg);
2740 if (retval == 0)
2741 return(retval);
2744 return(retval);
2747 static int
2748 xptpdperiphtraverse(struct periph_driver **pdrv,
2749 struct cam_periph *start_periph,
2750 xpt_periphfunc_t *tr_func, void *arg)
2752 struct cam_periph *periph, *next_periph;
2753 int retval;
2755 retval = 1;
2757 for (periph = (start_periph ? start_periph :
2758 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2759 periph = next_periph) {
2761 next_periph = TAILQ_NEXT(periph, unit_links);
2763 retval = tr_func(periph, arg);
2764 if (retval == 0)
2765 return(retval);
2767 return(retval);
2770 static int
2771 xptdefbusfunc(struct cam_eb *bus, void *arg)
2773 struct xpt_traverse_config *tr_config;
2775 tr_config = (struct xpt_traverse_config *)arg;
2777 if (tr_config->depth == XPT_DEPTH_BUS) {
2778 xpt_busfunc_t *tr_func;
2780 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2782 return(tr_func(bus, tr_config->tr_arg));
2783 } else
2784 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2787 static int
2788 xptdeftargetfunc(struct cam_et *target, void *arg)
2790 struct xpt_traverse_config *tr_config;
2792 tr_config = (struct xpt_traverse_config *)arg;
2794 if (tr_config->depth == XPT_DEPTH_TARGET) {
2795 xpt_targetfunc_t *tr_func;
2797 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2799 return(tr_func(target, tr_config->tr_arg));
2800 } else
2801 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2804 static int
2805 xptdefdevicefunc(struct cam_ed *device, void *arg)
2807 struct xpt_traverse_config *tr_config;
2809 tr_config = (struct xpt_traverse_config *)arg;
2811 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2812 xpt_devicefunc_t *tr_func;
2814 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2816 return(tr_func(device, tr_config->tr_arg));
2817 } else
2818 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2821 static int
2822 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2824 struct xpt_traverse_config *tr_config;
2825 xpt_periphfunc_t *tr_func;
2827 tr_config = (struct xpt_traverse_config *)arg;
2829 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2832 * Unlike the other default functions, we don't check for depth
2833 * here. The peripheral driver level is the last level in the EDT,
2834 * so if we're here, we should execute the function in question.
2836 return(tr_func(periph, tr_config->tr_arg));
2840 * Execute the given function for every bus in the EDT.
2842 static int
2843 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2845 struct xpt_traverse_config tr_config;
2847 tr_config.depth = XPT_DEPTH_BUS;
2848 tr_config.tr_func = tr_func;
2849 tr_config.tr_arg = arg;
2851 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2855 * Execute the given function for every device in the EDT.
2857 static int
2858 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2860 struct xpt_traverse_config tr_config;
2862 tr_config.depth = XPT_DEPTH_DEVICE;
2863 tr_config.tr_func = tr_func;
2864 tr_config.tr_arg = arg;
2866 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2869 static int
2870 xptsetasyncfunc(struct cam_ed *device, void *arg)
2872 struct cam_path path;
2873 struct ccb_getdev cgd;
2874 struct async_node *cur_entry;
2876 cur_entry = (struct async_node *)arg;
2879 * Don't report unconfigured devices (Wildcard devs,
2880 * devices only for target mode, device instances
2881 * that have been invalidated but are waiting for
2882 * their last reference count to be released).
2884 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2885 return (1);
2887 xpt_compile_path(&path,
2888 NULL,
2889 device->target->bus->path_id,
2890 device->target->target_id,
2891 device->lun_id);
2892 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2893 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2894 xpt_action((union ccb *)&cgd);
2895 cur_entry->callback(cur_entry->callback_arg,
2896 AC_FOUND_DEVICE,
2897 &path, &cgd);
2898 xpt_release_path(&path);
2900 return(1);
2903 static int
2904 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2906 struct cam_path path;
2907 struct ccb_pathinq cpi;
2908 struct async_node *cur_entry;
2910 cur_entry = (struct async_node *)arg;
2912 xpt_compile_path(&path, /*periph*/NULL,
2913 bus->sim->path_id,
2914 CAM_TARGET_WILDCARD,
2915 CAM_LUN_WILDCARD);
2916 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2917 cpi.ccb_h.func_code = XPT_PATH_INQ;
2918 xpt_action((union ccb *)&cpi);
2919 cur_entry->callback(cur_entry->callback_arg,
2920 AC_PATH_REGISTERED,
2921 &path, &cpi);
2922 xpt_release_path(&path);
2924 return(1);
2927 static void
2928 xpt_action_sasync_cb(void *context, int pending)
2930 struct async_node *cur_entry;
2931 struct xpt_task *task;
2932 uint32_t added;
2934 task = (struct xpt_task *)context;
2935 cur_entry = (struct async_node *)task->data1;
2936 added = task->data2;
2938 if ((added & AC_FOUND_DEVICE) != 0) {
2940 * Get this peripheral up to date with all
2941 * the currently existing devices.
2943 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2945 if ((added & AC_PATH_REGISTERED) != 0) {
2947 * Get this peripheral up to date with all
2948 * the currently existing busses.
2950 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2953 kfree(task, M_CAMXPT);
2956 void
2957 xpt_action(union ccb *start_ccb)
2959 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2961 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2963 switch (start_ccb->ccb_h.func_code) {
2964 case XPT_SCSI_IO:
2966 struct cam_ed *device;
2967 #ifdef CAMDEBUG
2968 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2969 struct cam_path *path;
2971 path = start_ccb->ccb_h.path;
2972 #endif
2975 * For the sake of compatibility with SCSI-1
2976 * devices that may not understand the identify
2977 * message, we include lun information in the
2978 * second byte of all commands. SCSI-1 specifies
2979 * that luns are a 3 bit value and reserves only 3
2980 * bits for lun information in the CDB. Later
2981 * revisions of the SCSI spec allow for more than 8
2982 * luns, but have deprecated lun information in the
2983 * CDB. So, if the lun won't fit, we must omit.
2985 * Also be aware that during initial probing for devices,
2986 * the inquiry information is unknown but initialized to 0.
2987 * This means that this code will be exercised while probing
2988 * devices with an ANSI revision greater than 2.
2990 device = start_ccb->ccb_h.path->device;
2991 if (device->protocol_version <= SCSI_REV_2
2992 && start_ccb->ccb_h.target_lun < 8
2993 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2995 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2996 start_ccb->ccb_h.target_lun << 5;
2998 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2999 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3000 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3001 &path->device->inq_data),
3002 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3003 cdb_str, sizeof(cdb_str))));
3004 /* FALLTHROUGH */
3006 case XPT_TARGET_IO:
3007 case XPT_CONT_TARGET_IO:
3008 start_ccb->csio.sense_resid = 0;
3009 start_ccb->csio.resid = 0;
3010 /* FALLTHROUGH */
3011 case XPT_RESET_DEV:
3012 case XPT_ENG_EXEC:
3014 struct cam_path *path;
3015 struct cam_sim *sim;
3016 int runq;
3018 path = start_ccb->ccb_h.path;
3020 sim = path->bus->sim;
3021 if (sim == &cam_dead_sim) {
3022 /* The SIM has gone; just execute the CCB directly. */
3023 cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3024 (*(sim->sim_action))(sim, start_ccb);
3025 break;
3028 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3029 if (path->device->qfrozen_cnt == 0)
3030 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3031 else
3032 runq = 0;
3033 if (runq != 0)
3034 xpt_run_dev_sendq(path->bus);
3035 break;
3037 case XPT_SET_TRAN_SETTINGS:
3039 xpt_set_transfer_settings(&start_ccb->cts,
3040 start_ccb->ccb_h.path->device,
3041 /*async_update*/FALSE);
3042 break;
3044 case XPT_CALC_GEOMETRY:
3046 struct cam_sim *sim;
3048 /* Filter out garbage */
3049 if (start_ccb->ccg.block_size == 0
3050 || start_ccb->ccg.volume_size == 0) {
3051 start_ccb->ccg.cylinders = 0;
3052 start_ccb->ccg.heads = 0;
3053 start_ccb->ccg.secs_per_track = 0;
3054 start_ccb->ccb_h.status = CAM_REQ_CMP;
3055 break;
3057 sim = start_ccb->ccb_h.path->bus->sim;
3058 (*(sim->sim_action))(sim, start_ccb);
3059 break;
3061 case XPT_ABORT:
3063 union ccb* abort_ccb;
3065 abort_ccb = start_ccb->cab.abort_ccb;
3066 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3068 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3069 struct cam_ccbq *ccbq;
3071 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3072 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3073 abort_ccb->ccb_h.status =
3074 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3075 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3076 xpt_done(abort_ccb);
3077 start_ccb->ccb_h.status = CAM_REQ_CMP;
3078 break;
3080 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3081 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3083 * We've caught this ccb en route to
3084 * the SIM. Flag it for abort and the
3085 * SIM will do so just before starting
3086 * real work on the CCB.
3088 abort_ccb->ccb_h.status =
3089 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3090 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3091 start_ccb->ccb_h.status = CAM_REQ_CMP;
3092 break;
3095 if (XPT_FC_IS_QUEUED(abort_ccb)
3096 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3098 * It's already completed but waiting
3099 * for our SWI to get to it.
3101 start_ccb->ccb_h.status = CAM_UA_ABORT;
3102 break;
3105 * If we weren't able to take care of the abort request
3106 * in the XPT, pass the request down to the SIM for processing.
3108 /* FALLTHROUGH */
3110 case XPT_ACCEPT_TARGET_IO:
3111 case XPT_EN_LUN:
3112 case XPT_IMMED_NOTIFY:
3113 case XPT_NOTIFY_ACK:
3114 case XPT_GET_TRAN_SETTINGS:
3115 case XPT_RESET_BUS:
3117 struct cam_sim *sim;
3119 sim = start_ccb->ccb_h.path->bus->sim;
3120 (*(sim->sim_action))(sim, start_ccb);
3121 break;
3123 case XPT_PATH_INQ:
3125 struct cam_sim *sim;
3127 sim = start_ccb->ccb_h.path->bus->sim;
3128 (*(sim->sim_action))(sim, start_ccb);
3129 break;
3131 case XPT_PATH_STATS:
3132 start_ccb->cpis.last_reset =
3133 start_ccb->ccb_h.path->bus->last_reset;
3134 start_ccb->ccb_h.status = CAM_REQ_CMP;
3135 break;
3136 case XPT_GDEV_TYPE:
3138 struct cam_ed *dev;
3140 dev = start_ccb->ccb_h.path->device;
3141 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3142 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3143 } else {
3144 struct ccb_getdev *cgd;
3145 struct cam_eb *bus;
3146 struct cam_et *tar;
3148 cgd = &start_ccb->cgd;
3149 bus = cgd->ccb_h.path->bus;
3150 tar = cgd->ccb_h.path->target;
3151 cgd->inq_data = dev->inq_data;
3152 cgd->ccb_h.status = CAM_REQ_CMP;
3153 cgd->serial_num_len = dev->serial_num_len;
3154 if ((dev->serial_num_len > 0)
3155 && (dev->serial_num != NULL))
3156 bcopy(dev->serial_num, cgd->serial_num,
3157 dev->serial_num_len);
3159 break;
3161 case XPT_GDEV_STATS:
3163 struct cam_ed *dev;
3165 dev = start_ccb->ccb_h.path->device;
3166 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3167 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3168 } else {
3169 struct ccb_getdevstats *cgds;
3170 struct cam_eb *bus;
3171 struct cam_et *tar;
3173 cgds = &start_ccb->cgds;
3174 bus = cgds->ccb_h.path->bus;
3175 tar = cgds->ccb_h.path->target;
3176 cgds->dev_openings = dev->ccbq.dev_openings;
3177 cgds->dev_active = dev->ccbq.dev_active;
3178 cgds->devq_openings = dev->ccbq.devq_openings;
3179 cgds->devq_queued = dev->ccbq.queue.entries;
3180 cgds->held = dev->ccbq.held;
3181 cgds->last_reset = tar->last_reset;
3182 cgds->maxtags = dev->quirk->maxtags;
3183 cgds->mintags = dev->quirk->mintags;
3184 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3185 cgds->last_reset = bus->last_reset;
3186 cgds->ccb_h.status = CAM_REQ_CMP;
3188 break;
3190 case XPT_GDEVLIST:
3192 struct cam_periph *nperiph;
3193 struct periph_list *periph_head;
3194 struct ccb_getdevlist *cgdl;
3195 u_int i;
3196 struct cam_ed *device;
3197 int found;
3200 found = 0;
3203 * Don't want anyone mucking with our data.
3205 device = start_ccb->ccb_h.path->device;
3206 periph_head = &device->periphs;
3207 cgdl = &start_ccb->cgdl;
3210 * Check and see if the list has changed since the user
3211 * last requested a list member. If so, tell them that the
3212 * list has changed, and therefore they need to start over
3213 * from the beginning.
3215 if ((cgdl->index != 0) &&
3216 (cgdl->generation != device->generation)) {
3217 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3218 break;
3222 * Traverse the list of peripherals and attempt to find
3223 * the requested peripheral.
3225 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3226 (nperiph != NULL) && (i <= cgdl->index);
3227 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3228 if (i == cgdl->index) {
3229 strncpy(cgdl->periph_name,
3230 nperiph->periph_name,
3231 DEV_IDLEN);
3232 cgdl->unit_number = nperiph->unit_number;
3233 found = 1;
3236 if (found == 0) {
3237 cgdl->status = CAM_GDEVLIST_ERROR;
3238 break;
3241 if (nperiph == NULL)
3242 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3243 else
3244 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3246 cgdl->index++;
3247 cgdl->generation = device->generation;
3249 cgdl->ccb_h.status = CAM_REQ_CMP;
3250 break;
3252 case XPT_DEV_MATCH:
3254 dev_pos_type position_type;
3255 struct ccb_dev_match *cdm;
3256 int ret;
3258 cdm = &start_ccb->cdm;
3261 * There are two ways of getting at information in the EDT.
3262 * The first way is via the primary EDT tree. It starts
3263 * with a list of busses, then a list of targets on a bus,
3264 * then devices/luns on a target, and then peripherals on a
3265 * device/lun. The "other" way is by the peripheral driver
3266 * lists. The peripheral driver lists are organized by
3267 * peripheral driver. (obviously) So it makes sense to
3268 * use the peripheral driver list if the user is looking
3269 * for something like "da1", or all "da" devices. If the
3270 * user is looking for something on a particular bus/target
3271 * or lun, it's generally better to go through the EDT tree.
3274 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3275 position_type = cdm->pos.position_type;
3276 else {
3277 u_int i;
3279 position_type = CAM_DEV_POS_NONE;
3281 for (i = 0; i < cdm->num_patterns; i++) {
3282 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3283 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3284 position_type = CAM_DEV_POS_EDT;
3285 break;
3289 if (cdm->num_patterns == 0)
3290 position_type = CAM_DEV_POS_EDT;
3291 else if (position_type == CAM_DEV_POS_NONE)
3292 position_type = CAM_DEV_POS_PDRV;
3295 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3296 case CAM_DEV_POS_EDT:
3297 ret = xptedtmatch(cdm);
3298 break;
3299 case CAM_DEV_POS_PDRV:
3300 ret = xptperiphlistmatch(cdm);
3301 break;
3302 default:
3303 cdm->status = CAM_DEV_MATCH_ERROR;
3304 break;
3307 if (cdm->status == CAM_DEV_MATCH_ERROR)
3308 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3309 else
3310 start_ccb->ccb_h.status = CAM_REQ_CMP;
3312 break;
3314 case XPT_SASYNC_CB:
3316 struct ccb_setasync *csa;
3317 struct async_node *cur_entry;
3318 struct async_list *async_head;
3319 u_int32_t added;
3321 csa = &start_ccb->csa;
3322 added = csa->event_enable;
3323 async_head = &csa->ccb_h.path->device->asyncs;
3326 * If there is already an entry for us, simply
3327 * update it.
3329 cur_entry = SLIST_FIRST(async_head);
3330 while (cur_entry != NULL) {
3331 if ((cur_entry->callback_arg == csa->callback_arg)
3332 && (cur_entry->callback == csa->callback))
3333 break;
3334 cur_entry = SLIST_NEXT(cur_entry, links);
3337 if (cur_entry != NULL) {
3339 * If the request has no flags set,
3340 * remove the entry.
3342 added &= ~cur_entry->event_enable;
3343 if (csa->event_enable == 0) {
3344 SLIST_REMOVE(async_head, cur_entry,
3345 async_node, links);
3346 csa->ccb_h.path->device->refcount--;
3347 kfree(cur_entry, M_CAMXPT);
3348 } else {
3349 cur_entry->event_enable = csa->event_enable;
3351 } else {
3352 cur_entry = kmalloc(sizeof(*cur_entry), M_CAMXPT,
3353 M_INTWAIT);
3354 cur_entry->event_enable = csa->event_enable;
3355 cur_entry->callback_arg = csa->callback_arg;
3356 cur_entry->callback = csa->callback;
3357 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3358 csa->ccb_h.path->device->refcount++;
3362 * Need to decouple this operation via a taskqueue so that
3363 * the locking doesn't become a mess.
3365 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3366 struct xpt_task *task;
3368 task = kmalloc(sizeof(struct xpt_task), M_CAMXPT,
3369 M_INTWAIT);
3371 TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3372 task->data1 = cur_entry;
3373 task->data2 = added;
3374 taskqueue_enqueue(taskqueue_thread[mycpuid],
3375 &task->task);
3378 start_ccb->ccb_h.status = CAM_REQ_CMP;
3379 break;
3381 case XPT_REL_SIMQ:
3383 struct ccb_relsim *crs;
3384 struct cam_ed *dev;
3386 crs = &start_ccb->crs;
3387 dev = crs->ccb_h.path->device;
3388 if (dev == NULL) {
3390 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3391 break;
3394 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3396 if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3397 /* Don't ever go below one opening */
3398 if (crs->openings > 0) {
3399 xpt_dev_ccbq_resize(crs->ccb_h.path,
3400 crs->openings);
3402 if (bootverbose) {
3403 xpt_print(crs->ccb_h.path,
3404 "tagged openings now %d\n",
3405 crs->openings);
3411 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3413 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3416 * Just extend the old timeout and decrement
3417 * the freeze count so that a single timeout
3418 * is sufficient for releasing the queue.
3420 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3421 callout_stop(&dev->callout);
3422 } else {
3424 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3427 callout_reset(&dev->callout,
3428 (crs->release_timeout * hz) / 1000,
3429 xpt_release_devq_timeout, dev);
3431 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3435 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3437 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3439 * Decrement the freeze count so that a single
3440 * completion is still sufficient to unfreeze
3441 * the queue.
3443 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3444 } else {
3446 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3447 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3451 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3453 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3454 || (dev->ccbq.dev_active == 0)) {
3456 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3457 } else {
3459 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3460 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3464 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3466 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3467 /*run_queue*/TRUE);
3469 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3470 start_ccb->ccb_h.status = CAM_REQ_CMP;
3471 break;
3473 case XPT_SCAN_BUS:
3474 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3475 break;
3476 case XPT_SCAN_LUN:
3477 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3478 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3479 start_ccb);
3480 break;
3481 case XPT_DEBUG: {
3482 #ifdef CAMDEBUG
3483 #ifdef CAM_DEBUG_DELAY
3484 cam_debug_delay = CAM_DEBUG_DELAY;
3485 #endif
3486 cam_dflags = start_ccb->cdbg.flags;
3487 if (cam_dpath != NULL) {
3488 xpt_free_path(cam_dpath);
3489 cam_dpath = NULL;
3492 if (cam_dflags != CAM_DEBUG_NONE) {
3493 if (xpt_create_path(&cam_dpath, xpt_periph,
3494 start_ccb->ccb_h.path_id,
3495 start_ccb->ccb_h.target_id,
3496 start_ccb->ccb_h.target_lun) !=
3497 CAM_REQ_CMP) {
3498 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3499 cam_dflags = CAM_DEBUG_NONE;
3500 } else {
3501 start_ccb->ccb_h.status = CAM_REQ_CMP;
3502 xpt_print(cam_dpath, "debugging flags now %x\n",
3503 cam_dflags);
3505 } else {
3506 cam_dpath = NULL;
3507 start_ccb->ccb_h.status = CAM_REQ_CMP;
3509 #else /* !CAMDEBUG */
3510 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3511 #endif /* CAMDEBUG */
3512 break;
3514 case XPT_NOOP:
3515 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3516 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3517 start_ccb->ccb_h.status = CAM_REQ_CMP;
3518 break;
3519 default:
3520 case XPT_SDEV_TYPE:
3521 case XPT_TERM_IO:
3522 case XPT_ENG_INQ:
3523 /* XXX Implement */
3524 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3525 break;
3529 void
3530 xpt_polled_action(union ccb *start_ccb)
3532 u_int32_t timeout;
3533 struct cam_sim *sim;
3534 struct cam_devq *devq;
3535 struct cam_ed *dev;
3537 timeout = start_ccb->ccb_h.timeout;
3538 sim = start_ccb->ccb_h.path->bus->sim;
3539 devq = sim->devq;
3540 dev = start_ccb->ccb_h.path->device;
3542 sim_lock_assert_owned(sim->lock);
3545 * Steal an opening so that no other queued requests
3546 * can get it before us while we simulate interrupts.
3548 dev->ccbq.devq_openings--;
3549 dev->ccbq.dev_openings--;
3551 while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3552 && (--timeout > 0)) {
3553 DELAY(1000);
3554 (*(sim->sim_poll))(sim);
3555 camisr_runqueue(sim);
3558 dev->ccbq.devq_openings++;
3559 dev->ccbq.dev_openings++;
3561 if (timeout != 0) {
3562 xpt_action(start_ccb);
3563 while(--timeout > 0) {
3564 (*(sim->sim_poll))(sim);
3565 camisr_runqueue(sim);
3566 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3567 != CAM_REQ_INPROG)
3568 break;
3569 DELAY(1000);
3571 if (timeout == 0) {
3573 * XXX Is it worth adding a sim_timeout entry
3574 * point so we can attempt recovery? If
3575 * this is only used for dumps, I don't think
3576 * it is.
3578 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3580 } else {
3581 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3586 * Schedule a peripheral driver to receive a ccb when it's
3587 * target device has space for more transactions.
3589 void
3590 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3592 struct cam_ed *device;
3593 union ccb *work_ccb;
3594 int runq;
3596 sim_lock_assert_owned(perph->sim->lock);
3598 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3599 device = perph->path->device;
3600 if (periph_is_queued(perph)) {
3601 /* Simply reorder based on new priority */
3602 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3603 (" change priority to %d\n", new_priority));
3604 if (new_priority < perph->pinfo.priority) {
3605 camq_change_priority(&device->drvq,
3606 perph->pinfo.index,
3607 new_priority);
3609 runq = 0;
3610 } else if (perph->path->bus->sim == &cam_dead_sim) {
3611 /* The SIM is gone so just call periph_start directly. */
3612 work_ccb = xpt_get_ccb(perph->path->device);
3613 if (work_ccb == NULL)
3614 return; /* XXX */
3615 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3616 perph->pinfo.priority = new_priority;
3617 perph->periph_start(perph, work_ccb);
3618 return;
3619 } else {
3620 /* New entry on the queue */
3621 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3622 (" added periph to queue\n"));
3623 perph->pinfo.priority = new_priority;
3624 perph->pinfo.generation = ++device->drvq.generation;
3625 camq_insert(&device->drvq, &perph->pinfo);
3626 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3628 if (runq != 0) {
3629 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3630 (" calling xpt_run_devq\n"));
3631 xpt_run_dev_allocq(perph->path->bus);
3637 * Schedule a device to run on a given queue.
3638 * If the device was inserted as a new entry on the queue,
3639 * return 1 meaning the device queue should be run. If we
3640 * were already queued, implying someone else has already
3641 * started the queue, return 0 so the caller doesn't attempt
3642 * to run the queue.
3644 static int
3645 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3646 u_int32_t new_priority)
3648 int retval;
3649 u_int32_t old_priority;
3651 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3653 old_priority = pinfo->priority;
3656 * Are we already queued?
3658 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3659 /* Simply reorder based on new priority */
3660 if (new_priority < old_priority) {
3661 camq_change_priority(queue, pinfo->index,
3662 new_priority);
3663 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3664 ("changed priority to %d\n",
3665 new_priority));
3667 retval = 0;
3668 } else {
3669 /* New entry on the queue */
3670 if (new_priority < old_priority)
3671 pinfo->priority = new_priority;
3673 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3674 ("Inserting onto queue\n"));
3675 pinfo->generation = ++queue->generation;
3676 camq_insert(queue, pinfo);
3677 retval = 1;
3679 return (retval);
3682 static void
3683 xpt_run_dev_allocq(struct cam_eb *bus)
3685 struct cam_devq *devq;
3687 if ((devq = bus->sim->devq) == NULL) {
3688 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3689 return;
3691 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3693 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3694 (" qfrozen_cnt == 0x%x, entries == %d, "
3695 "openings == %d, active == %d\n",
3696 devq->alloc_queue.qfrozen_cnt,
3697 devq->alloc_queue.entries,
3698 devq->alloc_openings,
3699 devq->alloc_active));
3701 devq->alloc_queue.qfrozen_cnt++;
3702 while ((devq->alloc_queue.entries > 0)
3703 && (devq->alloc_openings > 0)
3704 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3705 struct cam_ed_qinfo *qinfo;
3706 struct cam_ed *device;
3707 union ccb *work_ccb;
3708 struct cam_periph *drv;
3709 struct camq *drvq;
3711 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3712 CAMQ_HEAD);
3713 device = qinfo->device;
3715 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3716 ("running device %p\n", device));
3718 drvq = &device->drvq;
3720 #ifdef CAMDEBUG
3721 if (drvq->entries <= 0) {
3722 panic("xpt_run_dev_allocq: "
3723 "Device on queue without any work to do");
3725 #endif
3726 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3727 devq->alloc_openings--;
3728 devq->alloc_active++;
3729 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3730 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3731 drv->pinfo.priority);
3732 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3733 ("calling periph start\n"));
3734 drv->periph_start(drv, work_ccb);
3735 } else {
3737 * Malloc failure in alloc_ccb
3740 * XXX add us to a list to be run from free_ccb
3741 * if we don't have any ccbs active on this
3742 * device queue otherwise we may never get run
3743 * again.
3745 break;
3748 if (drvq->entries > 0) {
3749 /* We have more work. Attempt to reschedule */
3750 xpt_schedule_dev_allocq(bus, device);
3753 devq->alloc_queue.qfrozen_cnt--;
3756 static void
3757 xpt_run_dev_sendq(struct cam_eb *bus)
3759 struct cam_devq *devq;
3761 if ((devq = bus->sim->devq) == NULL) {
3762 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3763 return;
3765 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3767 devq->send_queue.qfrozen_cnt++;
3768 while ((devq->send_queue.entries > 0)
3769 && (devq->send_openings > 0)) {
3770 struct cam_ed_qinfo *qinfo;
3771 struct cam_ed *device;
3772 union ccb *work_ccb;
3773 struct cam_sim *sim;
3775 if (devq->send_queue.qfrozen_cnt > 1) {
3776 break;
3779 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3780 CAMQ_HEAD);
3781 device = qinfo->device;
3784 * If the device has been "frozen", don't attempt
3785 * to run it.
3787 if (device->qfrozen_cnt > 0) {
3788 continue;
3791 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3792 ("running device %p\n", device));
3794 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3795 if (work_ccb == NULL) {
3796 kprintf("device on run queue with no ccbs???\n");
3797 continue;
3800 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3802 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
3803 if (xsoftc.num_highpower <= 0) {
3805 * We got a high power command, but we
3806 * don't have any available slots. Freeze
3807 * the device queue until we have a slot
3808 * available.
3810 device->qfrozen_cnt++;
3811 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3812 &work_ccb->ccb_h,
3813 xpt_links.stqe);
3815 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
3816 continue;
3817 } else {
3819 * Consume a high power slot while
3820 * this ccb runs.
3822 xsoftc.num_highpower--;
3824 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
3826 devq->active_dev = device;
3827 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3829 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3831 devq->send_openings--;
3832 devq->send_active++;
3834 if (device->ccbq.queue.entries > 0)
3835 xpt_schedule_dev_sendq(bus, device);
3837 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3839 * The client wants to freeze the queue
3840 * after this CCB is sent.
3842 device->qfrozen_cnt++;
3845 /* In Target mode, the peripheral driver knows best... */
3846 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3847 if ((device->inq_flags & SID_CmdQue) != 0
3848 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3849 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3850 else
3852 * Clear this in case of a retried CCB that
3853 * failed due to a rejected tag.
3855 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3859 * Device queues can be shared among multiple sim instances
3860 * that reside on different busses. Use the SIM in the queue
3861 * CCB's path, rather than the one in the bus that was passed
3862 * into this function.
3864 sim = work_ccb->ccb_h.path->bus->sim;
3865 (*(sim->sim_action))(sim, work_ccb);
3867 devq->active_dev = NULL;
3869 devq->send_queue.qfrozen_cnt--;
3873 * This function merges stuff from the slave ccb into the master ccb, while
3874 * keeping important fields in the master ccb constant.
3876 void
3877 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3880 * Pull fields that are valid for peripheral drivers to set
3881 * into the master CCB along with the CCB "payload".
3883 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3884 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3885 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3886 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3887 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3888 sizeof(union ccb) - sizeof(struct ccb_hdr));
3891 void
3892 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3894 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3895 callout_init(&ccb_h->timeout_ch);
3896 ccb_h->pinfo.priority = priority;
3897 ccb_h->path = path;
3898 ccb_h->path_id = path->bus->path_id;
3899 if (path->target)
3900 ccb_h->target_id = path->target->target_id;
3901 else
3902 ccb_h->target_id = CAM_TARGET_WILDCARD;
3903 if (path->device) {
3904 ccb_h->target_lun = path->device->lun_id;
3905 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3906 } else {
3907 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3909 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3910 ccb_h->flags = 0;
3913 /* Path manipulation functions */
3914 cam_status
3915 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3916 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3918 struct cam_path *path;
3919 cam_status status;
3921 path = kmalloc(sizeof(*path), M_CAMXPT, M_INTWAIT);
3922 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3923 if (status != CAM_REQ_CMP) {
3924 kfree(path, M_CAMXPT);
3925 path = NULL;
3927 *new_path_ptr = path;
3928 return (status);
3931 cam_status
3932 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3933 struct cam_periph *periph, path_id_t path_id,
3934 target_id_t target_id, lun_id_t lun_id)
3936 struct cam_path *path;
3937 struct cam_eb *bus = NULL;
3938 cam_status status;
3939 int need_unlock = 0;
3941 path = (struct cam_path *)kmalloc(sizeof(*path), M_CAMXPT, M_WAITOK);
3943 if (path_id != CAM_BUS_WILDCARD) {
3944 bus = xpt_find_bus(path_id);
3945 if (bus != NULL) {
3946 need_unlock = 1;
3947 CAM_SIM_LOCK(bus->sim);
3950 status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3951 if (need_unlock)
3952 CAM_SIM_UNLOCK(bus->sim);
3953 if (status != CAM_REQ_CMP) {
3954 kfree(path, M_CAMXPT);
3955 path = NULL;
3957 *new_path_ptr = path;
3958 return (status);
3961 static cam_status
3962 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3963 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3965 struct cam_eb *bus;
3966 struct cam_et *target;
3967 struct cam_ed *device;
3968 cam_status status;
3970 status = CAM_REQ_CMP; /* Completed without error */
3971 target = NULL; /* Wildcarded */
3972 device = NULL; /* Wildcarded */
3975 * We will potentially modify the EDT, so block interrupts
3976 * that may attempt to create cam paths.
3978 bus = xpt_find_bus(path_id);
3979 if (bus == NULL) {
3980 status = CAM_PATH_INVALID;
3981 } else {
3982 target = xpt_find_target(bus, target_id);
3983 if (target == NULL) {
3984 /* Create one */
3985 struct cam_et *new_target;
3987 new_target = xpt_alloc_target(bus, target_id);
3988 if (new_target == NULL) {
3989 status = CAM_RESRC_UNAVAIL;
3990 } else {
3991 target = new_target;
3994 if (target != NULL) {
3995 device = xpt_find_device(target, lun_id);
3996 if (device == NULL) {
3997 /* Create one */
3998 struct cam_ed *new_device;
4000 new_device = xpt_alloc_device(bus,
4001 target,
4002 lun_id);
4003 if (new_device == NULL) {
4004 status = CAM_RESRC_UNAVAIL;
4005 } else {
4006 device = new_device;
4013 * Only touch the user's data if we are successful.
4015 if (status == CAM_REQ_CMP) {
4016 new_path->periph = perph;
4017 new_path->bus = bus;
4018 new_path->target = target;
4019 new_path->device = device;
4020 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4021 } else {
4022 if (device != NULL)
4023 xpt_release_device(bus, target, device);
4024 if (target != NULL)
4025 xpt_release_target(bus, target);
4026 if (bus != NULL)
4027 xpt_release_bus(bus);
4029 return (status);
4032 static void
4033 xpt_release_path(struct cam_path *path)
4035 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4036 if (path->device != NULL) {
4037 xpt_release_device(path->bus, path->target, path->device);
4038 path->device = NULL;
4040 if (path->target != NULL) {
4041 xpt_release_target(path->bus, path->target);
4042 path->target = NULL;
4044 if (path->bus != NULL) {
4045 xpt_release_bus(path->bus);
4046 path->bus = NULL;
4050 void
4051 xpt_free_path(struct cam_path *path)
4053 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4054 xpt_release_path(path);
4055 kfree(path, M_CAMXPT);
4060 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4061 * in path1, 2 for match with wildcards in path2.
4064 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4066 int retval = 0;
4068 if (path1->bus != path2->bus) {
4069 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4070 retval = 1;
4071 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4072 retval = 2;
4073 else
4074 return (-1);
4076 if (path1->target != path2->target) {
4077 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4078 if (retval == 0)
4079 retval = 1;
4080 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4081 retval = 2;
4082 else
4083 return (-1);
4085 if (path1->device != path2->device) {
4086 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4087 if (retval == 0)
4088 retval = 1;
4089 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4090 retval = 2;
4091 else
4092 return (-1);
4094 return (retval);
4097 void
4098 xpt_print_path(struct cam_path *path)
4101 if (path == NULL)
4102 kprintf("(nopath): ");
4103 else {
4104 if (path->periph != NULL)
4105 kprintf("(%s%d:", path->periph->periph_name,
4106 path->periph->unit_number);
4107 else
4108 kprintf("(noperiph:");
4110 if (path->bus != NULL)
4111 kprintf("%s%d:%d:", path->bus->sim->sim_name,
4112 path->bus->sim->unit_number,
4113 path->bus->sim->bus_id);
4114 else
4115 kprintf("nobus:");
4117 if (path->target != NULL)
4118 kprintf("%d:", path->target->target_id);
4119 else
4120 kprintf("X:");
4122 if (path->device != NULL)
4123 kprintf("%d): ", path->device->lun_id);
4124 else
4125 kprintf("X): ");
4129 void
4130 xpt_print(struct cam_path *path, const char *fmt, ...)
4132 __va_list ap;
4133 xpt_print_path(path);
4134 __va_start(ap, fmt);
4135 kvprintf(fmt, ap);
4136 __va_end(ap);
4140 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4142 struct sbuf sb;
4144 sim_lock_assert_owned(path->bus->sim->lock);
4146 sbuf_new(&sb, str, str_len, 0);
4148 if (path == NULL)
4149 sbuf_printf(&sb, "(nopath): ");
4150 else {
4151 if (path->periph != NULL)
4152 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4153 path->periph->unit_number);
4154 else
4155 sbuf_printf(&sb, "(noperiph:");
4157 if (path->bus != NULL)
4158 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4159 path->bus->sim->unit_number,
4160 path->bus->sim->bus_id);
4161 else
4162 sbuf_printf(&sb, "nobus:");
4164 if (path->target != NULL)
4165 sbuf_printf(&sb, "%d:", path->target->target_id);
4166 else
4167 sbuf_printf(&sb, "X:");
4169 if (path->device != NULL)
4170 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4171 else
4172 sbuf_printf(&sb, "X): ");
4174 sbuf_finish(&sb);
4176 return(sbuf_len(&sb));
4179 path_id_t
4180 xpt_path_path_id(struct cam_path *path)
4182 sim_lock_assert_owned(path->bus->sim->lock);
4184 return(path->bus->path_id);
4187 target_id_t
4188 xpt_path_target_id(struct cam_path *path)
4190 sim_lock_assert_owned(path->bus->sim->lock);
4192 if (path->target != NULL)
4193 return (path->target->target_id);
4194 else
4195 return (CAM_TARGET_WILDCARD);
4198 lun_id_t
4199 xpt_path_lun_id(struct cam_path *path)
4201 sim_lock_assert_owned(path->bus->sim->lock);
4203 if (path->device != NULL)
4204 return (path->device->lun_id);
4205 else
4206 return (CAM_LUN_WILDCARD);
4209 struct cam_sim *
4210 xpt_path_sim(struct cam_path *path)
4212 return (path->bus->sim);
4215 struct cam_periph*
4216 xpt_path_periph(struct cam_path *path)
4218 sim_lock_assert_owned(path->bus->sim->lock);
4220 return (path->periph);
4223 char *
4224 xpt_path_serialno(struct cam_path *path)
4226 return (path->device->serial_num);
4230 * Release a CAM control block for the caller. Remit the cost of the structure
4231 * to the device referenced by the path. If the this device had no 'credits'
4232 * and peripheral drivers have registered async callbacks for this notification
4233 * call them now.
4235 void
4236 xpt_release_ccb(union ccb *free_ccb)
4238 struct cam_path *path;
4239 struct cam_ed *device;
4240 struct cam_eb *bus;
4241 struct cam_sim *sim;
4243 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4244 path = free_ccb->ccb_h.path;
4245 device = path->device;
4246 bus = path->bus;
4247 sim = bus->sim;
4249 sim_lock_assert_owned(sim->lock);
4251 cam_ccbq_release_opening(&device->ccbq);
4252 if (sim->ccb_count > sim->max_ccbs) {
4253 xpt_free_ccb(free_ccb);
4254 sim->ccb_count--;
4255 } else if (sim == &cam_dead_sim) {
4256 xpt_free_ccb(free_ccb);
4257 } else {
4258 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
4259 xpt_links.sle);
4261 if (sim->devq == NULL) {
4262 return;
4264 sim->devq->alloc_openings++;
4265 sim->devq->alloc_active--;
4266 /* XXX Turn this into an inline function - xpt_run_device?? */
4267 if ((device_is_alloc_queued(device) == 0)
4268 && (device->drvq.entries > 0)) {
4269 xpt_schedule_dev_allocq(bus, device);
4271 if (dev_allocq_is_runnable(sim->devq))
4272 xpt_run_dev_allocq(bus);
4275 /* Functions accessed by SIM drivers */
4278 * A sim structure, listing the SIM entry points and instance
4279 * identification info is passed to xpt_bus_register to hook the SIM
4280 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4281 * for this new bus and places it in the array of busses and assigns
4282 * it a path_id. The path_id may be influenced by "hard wiring"
4283 * information specified by the user. Once interrupt services are
4284 * availible, the bus will be probed.
4286 int32_t
4287 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4289 struct cam_eb *new_bus;
4290 struct cam_eb *old_bus;
4291 struct ccb_pathinq cpi;
4293 sim_lock_assert_owned(sim->lock);
4295 sim->bus_id = bus;
4296 new_bus = kmalloc(sizeof(*new_bus), M_CAMXPT, M_INTWAIT);
4298 if (strcmp(sim->sim_name, "xpt") != 0) {
4299 sim->path_id =
4300 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4303 TAILQ_INIT(&new_bus->et_entries);
4304 new_bus->path_id = sim->path_id;
4305 new_bus->sim = sim;
4306 ++sim->refcount;
4307 timevalclear(&new_bus->last_reset);
4308 new_bus->flags = 0;
4309 new_bus->refcount = 1; /* Held until a bus_deregister event */
4310 new_bus->generation = 0;
4311 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4312 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4313 while (old_bus != NULL
4314 && old_bus->path_id < new_bus->path_id)
4315 old_bus = TAILQ_NEXT(old_bus, links);
4316 if (old_bus != NULL)
4317 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4318 else
4319 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4320 xsoftc.bus_generation++;
4321 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4323 /* Notify interested parties */
4324 if (sim->path_id != CAM_XPT_PATH_ID) {
4325 struct cam_path path;
4327 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4328 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4329 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4330 cpi.ccb_h.func_code = XPT_PATH_INQ;
4331 xpt_action((union ccb *)&cpi);
4332 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4333 xpt_release_path(&path);
4335 return (CAM_SUCCESS);
4339 * Deregister a bus. We must clean out all transactions pending on the bus.
4340 * This routine is typically called prior to cam_sim_free() (e.g. see
4341 * dev/usbmisc/umass/umass.c)
4343 int32_t
4344 xpt_bus_deregister(path_id_t pathid)
4346 struct cam_path bus_path;
4347 struct cam_et *target;
4348 struct cam_ed *device;
4349 struct cam_ed_qinfo *qinfo;
4350 struct cam_devq *devq;
4351 struct cam_periph *periph;
4352 struct cam_sim *ccbsim;
4353 union ccb *work_ccb;
4354 cam_status status;
4355 int retries = 0;
4357 status = xpt_compile_path(&bus_path, NULL, pathid,
4358 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4359 if (status != CAM_REQ_CMP)
4360 return (status);
4363 * This should clear out all pending requests and timeouts, but
4364 * the ccb's may be queued to a software interrupt.
4366 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4367 * and it really ought to.
4369 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4370 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4373 * Mark the SIM as having been deregistered. This prevents
4374 * certain operations from re-queueing to it, stops new devices
4375 * from being added, etc.
4377 devq = bus_path.bus->sim->devq;
4378 ccbsim = bus_path.bus->sim;
4379 ccbsim->flags |= CAM_SIM_DEREGISTERED;
4381 again:
4383 * Execute any pending operations now.
4385 while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4386 CAMQ_HEAD)) != NULL ||
4387 (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4388 CAMQ_HEAD)) != NULL) {
4389 do {
4390 device = qinfo->device;
4391 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4392 if (work_ccb != NULL) {
4393 devq->active_dev = device;
4394 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4395 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4396 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4399 periph = (struct cam_periph *)camq_remove(&device->drvq,
4400 CAMQ_HEAD);
4401 if (periph != NULL)
4402 xpt_schedule(periph, periph->pinfo.priority);
4403 } while (work_ccb != NULL || periph != NULL);
4407 * Make sure all completed CCBs are processed.
4409 while (!TAILQ_EMPTY(&ccbsim->sim_doneq)) {
4410 camisr_runqueue(ccbsim);
4414 * Check for requeues, reissues asyncs if necessary
4416 if (CAMQ_GET_HEAD(&devq->send_queue))
4417 kprintf("camq: devq send_queue still in use (%d entries)\n",
4418 devq->send_queue.entries);
4419 if (CAMQ_GET_HEAD(&devq->alloc_queue))
4420 kprintf("camq: devq alloc_queue still in use (%d entries)\n",
4421 devq->alloc_queue.entries);
4422 if (CAMQ_GET_HEAD(&devq->send_queue) ||
4423 CAMQ_GET_HEAD(&devq->alloc_queue)) {
4424 if (++retries < 5) {
4425 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4426 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4427 goto again;
4432 * Retarget the bus and all cached sim pointers to dead_sim.
4434 * Various CAM subsystems may be holding on to targets, devices,
4435 * and/or peripherals and may attempt to use the sim pointer cached
4436 * in some of these structures during close.
4438 bus_path.bus->sim = &cam_dead_sim;
4439 TAILQ_FOREACH(target, &bus_path.bus->et_entries, links) {
4440 TAILQ_FOREACH(device, &target->ed_entries, links) {
4441 device->sim = &cam_dead_sim;
4442 SLIST_FOREACH(periph, &device->periphs, periph_links) {
4443 periph->sim = &cam_dead_sim;
4449 * Repeat the async's for the benefit of any new devices, such as
4450 * might be created from completed probes. Any new device
4451 * ops will run on dead_sim.
4453 * XXX There are probably races :-(
4455 CAM_SIM_LOCK(&cam_dead_sim);
4456 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4457 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4458 CAM_SIM_UNLOCK(&cam_dead_sim);
4460 /* Release the reference count held while registered. */
4461 xpt_release_bus(bus_path.bus);
4462 xpt_release_path(&bus_path);
4464 /* Release the ref we got when the bus was registered */
4465 cam_sim_release(ccbsim, 0);
4467 return (CAM_REQ_CMP);
4470 static path_id_t
4471 xptnextfreepathid(void)
4473 struct cam_eb *bus;
4474 path_id_t pathid;
4475 char *strval;
4477 pathid = 0;
4478 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4479 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4480 retry:
4481 /* Find an unoccupied pathid */
4482 while (bus != NULL && bus->path_id <= pathid) {
4483 if (bus->path_id == pathid)
4484 pathid++;
4485 bus = TAILQ_NEXT(bus, links);
4487 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4490 * Ensure that this pathid is not reserved for
4491 * a bus that may be registered in the future.
4493 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4494 ++pathid;
4495 /* Start the search over */
4496 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4497 goto retry;
4499 return (pathid);
4502 static path_id_t
4503 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4505 path_id_t pathid;
4506 int i, dunit, val;
4507 char buf[32];
4509 pathid = CAM_XPT_PATH_ID;
4510 ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4511 i = -1;
4512 while ((i = resource_query_string(i, "at", buf)) != -1) {
4513 if (strcmp(resource_query_name(i), "scbus")) {
4514 /* Avoid a bit of foot shooting. */
4515 continue;
4517 dunit = resource_query_unit(i);
4518 if (dunit < 0) /* unwired?! */
4519 continue;
4520 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4521 if (sim_bus == val) {
4522 pathid = dunit;
4523 break;
4525 } else if (sim_bus == 0) {
4526 /* Unspecified matches bus 0 */
4527 pathid = dunit;
4528 break;
4529 } else {
4530 kprintf("Ambiguous scbus configuration for %s%d "
4531 "bus %d, cannot wire down. The kernel "
4532 "config entry for scbus%d should "
4533 "specify a controller bus.\n"
4534 "Scbus will be assigned dynamically.\n",
4535 sim_name, sim_unit, sim_bus, dunit);
4536 break;
4540 if (pathid == CAM_XPT_PATH_ID)
4541 pathid = xptnextfreepathid();
4542 return (pathid);
4545 void
4546 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4548 struct cam_eb *bus;
4549 struct cam_et *target, *next_target;
4550 struct cam_ed *device, *next_device;
4552 sim_lock_assert_owned(path->bus->sim->lock);
4554 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4557 * Most async events come from a CAM interrupt context. In
4558 * a few cases, the error recovery code at the peripheral layer,
4559 * which may run from our SWI or a process context, may signal
4560 * deferred events with a call to xpt_async.
4563 bus = path->bus;
4565 if (async_code == AC_BUS_RESET) {
4566 /* Update our notion of when the last reset occurred */
4567 microuptime(&bus->last_reset);
4570 for (target = TAILQ_FIRST(&bus->et_entries);
4571 target != NULL;
4572 target = next_target) {
4574 next_target = TAILQ_NEXT(target, links);
4576 if (path->target != target
4577 && path->target->target_id != CAM_TARGET_WILDCARD
4578 && target->target_id != CAM_TARGET_WILDCARD)
4579 continue;
4581 if (async_code == AC_SENT_BDR) {
4582 /* Update our notion of when the last reset occurred */
4583 microuptime(&path->target->last_reset);
4586 for (device = TAILQ_FIRST(&target->ed_entries);
4587 device != NULL;
4588 device = next_device) {
4590 next_device = TAILQ_NEXT(device, links);
4592 if (path->device != device
4593 && path->device->lun_id != CAM_LUN_WILDCARD
4594 && device->lun_id != CAM_LUN_WILDCARD)
4595 continue;
4597 xpt_dev_async(async_code, bus, target,
4598 device, async_arg);
4600 xpt_async_bcast(&device->asyncs, async_code,
4601 path, async_arg);
4606 * If this wasn't a fully wildcarded async, tell all
4607 * clients that want all async events.
4609 if (bus != xpt_periph->path->bus)
4610 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4611 path, async_arg);
4614 static void
4615 xpt_async_bcast(struct async_list *async_head,
4616 u_int32_t async_code,
4617 struct cam_path *path, void *async_arg)
4619 struct async_node *cur_entry;
4621 cur_entry = SLIST_FIRST(async_head);
4622 while (cur_entry != NULL) {
4623 struct async_node *next_entry;
4625 * Grab the next list entry before we call the current
4626 * entry's callback. This is because the callback function
4627 * can delete its async callback entry.
4629 next_entry = SLIST_NEXT(cur_entry, links);
4630 if ((cur_entry->event_enable & async_code) != 0)
4631 cur_entry->callback(cur_entry->callback_arg,
4632 async_code, path,
4633 async_arg);
4634 cur_entry = next_entry;
4639 * Handle any per-device event notifications that require action by the XPT.
4641 static void
4642 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4643 struct cam_ed *device, void *async_arg)
4645 cam_status status;
4646 struct cam_path newpath;
4649 * We only need to handle events for real devices.
4651 if (target->target_id == CAM_TARGET_WILDCARD
4652 || device->lun_id == CAM_LUN_WILDCARD)
4653 return;
4656 * We need our own path with wildcards expanded to
4657 * handle certain types of events.
4659 if ((async_code == AC_SENT_BDR)
4660 || (async_code == AC_BUS_RESET)
4661 || (async_code == AC_INQ_CHANGED))
4662 status = xpt_compile_path(&newpath, NULL,
4663 bus->path_id,
4664 target->target_id,
4665 device->lun_id);
4666 else
4667 status = CAM_REQ_CMP_ERR;
4669 if (status == CAM_REQ_CMP) {
4672 * Allow transfer negotiation to occur in a
4673 * tag free environment.
4675 if (async_code == AC_SENT_BDR
4676 || async_code == AC_BUS_RESET)
4677 xpt_toggle_tags(&newpath);
4679 if (async_code == AC_INQ_CHANGED) {
4681 * We've sent a start unit command, or
4682 * something similar to a device that
4683 * may have caused its inquiry data to
4684 * change. So we re-scan the device to
4685 * refresh the inquiry data for it.
4687 xpt_scan_lun(newpath.periph, &newpath,
4688 CAM_EXPECT_INQ_CHANGE, NULL);
4690 xpt_release_path(&newpath);
4691 } else if (async_code == AC_LOST_DEVICE) {
4693 * When we lose a device the device may be about to detach
4694 * the sim, we have to clear out all pending timeouts and
4695 * requests before that happens. XXX it would be nice if
4696 * we could abort the requests pertaining to the device.
4698 xpt_release_devq_timeout(device);
4699 if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4700 device->flags |= CAM_DEV_UNCONFIGURED;
4701 xpt_release_device(bus, target, device);
4703 } else if (async_code == AC_TRANSFER_NEG) {
4704 struct ccb_trans_settings *settings;
4706 settings = (struct ccb_trans_settings *)async_arg;
4707 xpt_set_transfer_settings(settings, device,
4708 /*async_update*/TRUE);
4712 u_int32_t
4713 xpt_freeze_devq(struct cam_path *path, u_int count)
4715 struct ccb_hdr *ccbh;
4717 sim_lock_assert_owned(path->bus->sim->lock);
4719 path->device->qfrozen_cnt += count;
4722 * Mark the last CCB in the queue as needing
4723 * to be requeued if the driver hasn't
4724 * changed it's state yet. This fixes a race
4725 * where a ccb is just about to be queued to
4726 * a controller driver when it's interrupt routine
4727 * freezes the queue. To completly close the
4728 * hole, controller drives must check to see
4729 * if a ccb's status is still CAM_REQ_INPROG
4730 * just before they queue
4731 * the CCB. See ahc_action/ahc_freeze_devq for
4732 * an example.
4734 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4735 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4736 ccbh->status = CAM_REQUEUE_REQ;
4737 return (path->device->qfrozen_cnt);
4740 u_int32_t
4741 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4743 sim_lock_assert_owned(sim->lock);
4745 if (sim->devq == NULL)
4746 return(count);
4747 sim->devq->send_queue.qfrozen_cnt += count;
4748 if (sim->devq->active_dev != NULL) {
4749 struct ccb_hdr *ccbh;
4751 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4752 ccb_hdr_tailq);
4753 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4754 ccbh->status = CAM_REQUEUE_REQ;
4756 return (sim->devq->send_queue.qfrozen_cnt);
4760 * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4761 * We ref-count the sim (and the bus only NULLs it out when the bus has been
4762 * freed, which is not the case here), but the device queue is also freed XXX
4763 * and we have to check that here.
4765 * XXX fixme: could we simply not null-out the device queue via
4766 * cam_sim_free()?
4768 static void
4769 xpt_release_devq_timeout(void *arg)
4771 struct cam_ed *device;
4773 device = (struct cam_ed *)arg;
4775 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4778 void
4779 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4781 sim_lock_assert_owned(path->bus->sim->lock);
4783 xpt_release_devq_device(path->device, count, run_queue);
4786 static void
4787 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4789 int rundevq;
4791 rundevq = 0;
4793 if (dev->qfrozen_cnt > 0) {
4795 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4796 dev->qfrozen_cnt -= count;
4797 if (dev->qfrozen_cnt == 0) {
4800 * No longer need to wait for a successful
4801 * command completion.
4803 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4806 * Remove any timeouts that might be scheduled
4807 * to release this queue.
4809 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4810 callout_stop(&dev->callout);
4811 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4815 * Now that we are unfrozen schedule the
4816 * device so any pending transactions are
4817 * run.
4819 if ((dev->ccbq.queue.entries > 0)
4820 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4821 && (run_queue != 0)) {
4822 rundevq = 1;
4826 if (rundevq != 0)
4827 xpt_run_dev_sendq(dev->target->bus);
4830 void
4831 xpt_release_simq(struct cam_sim *sim, int run_queue)
4833 struct camq *sendq;
4835 sim_lock_assert_owned(sim->lock);
4837 if (sim->devq == NULL)
4838 return;
4840 sendq = &(sim->devq->send_queue);
4841 if (sendq->qfrozen_cnt > 0) {
4842 sendq->qfrozen_cnt--;
4843 if (sendq->qfrozen_cnt == 0) {
4844 struct cam_eb *bus;
4847 * If there is a timeout scheduled to release this
4848 * sim queue, remove it. The queue frozen count is
4849 * already at 0.
4851 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4852 callout_stop(&sim->callout);
4853 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4855 bus = xpt_find_bus(sim->path_id);
4857 if (run_queue) {
4859 * Now that we are unfrozen run the send queue.
4861 xpt_run_dev_sendq(bus);
4863 xpt_release_bus(bus);
4868 void
4869 xpt_done(union ccb *done_ccb)
4871 struct cam_sim *sim;
4873 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4874 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4876 * Queue up the request for handling by our SWI handler
4877 * any of the "non-immediate" type of ccbs.
4879 sim = done_ccb->ccb_h.path->bus->sim;
4880 switch (done_ccb->ccb_h.path->periph->type) {
4881 case CAM_PERIPH_BIO:
4882 spin_lock_wr(&sim->sim_spin);
4883 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4884 sim_links.tqe);
4885 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4886 spin_unlock_wr(&sim->sim_spin);
4887 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4888 spin_lock_wr(&cam_simq_spin);
4889 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4890 TAILQ_INSERT_TAIL(&cam_simq, sim,
4891 links);
4892 sim->flags |= CAM_SIM_ON_DONEQ;
4894 spin_unlock_wr(&cam_simq_spin);
4896 if ((done_ccb->ccb_h.flags & CAM_POLLED) == 0)
4897 setsoftcambio();
4898 break;
4899 default:
4900 panic("unknown periph type %d",
4901 done_ccb->ccb_h.path->periph->type);
4906 union ccb *
4907 xpt_alloc_ccb(void)
4909 union ccb *new_ccb;
4911 new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT | M_ZERO);
4912 return (new_ccb);
4915 void
4916 xpt_free_ccb(union ccb *free_ccb)
4918 kfree(free_ccb, M_CAMXPT);
4923 /* Private XPT functions */
4926 * Get a CAM control block for the caller. Charge the structure to the device
4927 * referenced by the path. If the this device has no 'credits' then the
4928 * device already has the maximum number of outstanding operations under way
4929 * and we return NULL. If we don't have sufficient resources to allocate more
4930 * ccbs, we also return NULL.
4932 static union ccb *
4933 xpt_get_ccb(struct cam_ed *device)
4935 union ccb *new_ccb;
4936 struct cam_sim *sim;
4938 sim = device->sim;
4939 if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4940 new_ccb = xpt_alloc_ccb();
4941 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4942 callout_init(&new_ccb->ccb_h.timeout_ch);
4943 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4944 xpt_links.sle);
4945 sim->ccb_count++;
4947 cam_ccbq_take_opening(&device->ccbq);
4948 SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4949 return (new_ccb);
4952 static void
4953 xpt_release_bus(struct cam_eb *bus)
4956 if ((--bus->refcount == 0)
4957 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4958 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4959 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4960 xsoftc.bus_generation++;
4961 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4962 kfree(bus, M_CAMXPT);
4966 static struct cam_et *
4967 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4969 struct cam_et *target;
4970 struct cam_et *cur_target;
4972 target = kmalloc(sizeof(*target), M_CAMXPT, M_INTWAIT);
4974 TAILQ_INIT(&target->ed_entries);
4975 target->bus = bus;
4976 target->target_id = target_id;
4977 target->refcount = 1;
4978 target->generation = 0;
4979 timevalclear(&target->last_reset);
4981 * Hold a reference to our parent bus so it
4982 * will not go away before we do.
4984 bus->refcount++;
4986 /* Insertion sort into our bus's target list */
4987 cur_target = TAILQ_FIRST(&bus->et_entries);
4988 while (cur_target != NULL && cur_target->target_id < target_id)
4989 cur_target = TAILQ_NEXT(cur_target, links);
4991 if (cur_target != NULL) {
4992 TAILQ_INSERT_BEFORE(cur_target, target, links);
4993 } else {
4994 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4996 bus->generation++;
4997 return (target);
5000 static void
5001 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
5003 if (target->refcount == 1) {
5004 KKASSERT(TAILQ_FIRST(&target->ed_entries) == NULL);
5005 TAILQ_REMOVE(&bus->et_entries, target, links);
5006 bus->generation++;
5007 xpt_release_bus(bus);
5008 KKASSERT(target->refcount == 1);
5009 kfree(target, M_CAMXPT);
5010 } else {
5011 --target->refcount;
5015 static struct cam_ed *
5016 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
5018 struct cam_path path;
5019 struct cam_ed *device;
5020 struct cam_devq *devq;
5021 cam_status status;
5024 * Disallow new devices while trying to deregister a sim
5026 if (bus->sim->flags & CAM_SIM_DEREGISTERED)
5027 return (NULL);
5030 * Make space for us in the device queue on our bus
5032 devq = bus->sim->devq;
5033 if (devq == NULL)
5034 return(NULL);
5035 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
5037 if (status != CAM_REQ_CMP) {
5038 device = NULL;
5039 } else {
5040 device = kmalloc(sizeof(*device), M_CAMXPT, M_INTWAIT);
5043 if (device != NULL) {
5044 struct cam_ed *cur_device;
5046 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5047 device->alloc_ccb_entry.device = device;
5048 cam_init_pinfo(&device->send_ccb_entry.pinfo);
5049 device->send_ccb_entry.device = device;
5050 device->target = target;
5051 device->lun_id = lun_id;
5052 device->sim = bus->sim;
5053 /* Initialize our queues */
5054 if (camq_init(&device->drvq, 0) != 0) {
5055 kfree(device, M_CAMXPT);
5056 return (NULL);
5058 if (cam_ccbq_init(&device->ccbq,
5059 bus->sim->max_dev_openings) != 0) {
5060 camq_fini(&device->drvq);
5061 kfree(device, M_CAMXPT);
5062 return (NULL);
5064 SLIST_INIT(&device->asyncs);
5065 SLIST_INIT(&device->periphs);
5066 device->generation = 0;
5067 device->owner = NULL;
5069 * Take the default quirk entry until we have inquiry
5070 * data and can determine a better quirk to use.
5072 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5073 bzero(&device->inq_data, sizeof(device->inq_data));
5074 device->inq_flags = 0;
5075 device->queue_flags = 0;
5076 device->serial_num = NULL;
5077 device->serial_num_len = 0;
5078 device->qfrozen_cnt = 0;
5079 device->flags = CAM_DEV_UNCONFIGURED;
5080 device->tag_delay_count = 0;
5081 device->tag_saved_openings = 0;
5082 device->refcount = 1;
5083 callout_init(&device->callout);
5086 * Hold a reference to our parent target so it
5087 * will not go away before we do.
5089 target->refcount++;
5092 * XXX should be limited by number of CCBs this bus can
5093 * do.
5095 bus->sim->max_ccbs += device->ccbq.devq_openings;
5096 /* Insertion sort into our target's device list */
5097 cur_device = TAILQ_FIRST(&target->ed_entries);
5098 while (cur_device != NULL && cur_device->lun_id < lun_id)
5099 cur_device = TAILQ_NEXT(cur_device, links);
5100 if (cur_device != NULL) {
5101 TAILQ_INSERT_BEFORE(cur_device, device, links);
5102 } else {
5103 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5105 target->generation++;
5106 if (lun_id != CAM_LUN_WILDCARD) {
5107 xpt_compile_path(&path,
5108 NULL,
5109 bus->path_id,
5110 target->target_id,
5111 lun_id);
5112 xpt_devise_transport(&path);
5113 xpt_release_path(&path);
5116 return (device);
5119 static void
5120 xpt_reference_device(struct cam_ed *device)
5122 ++device->refcount;
5125 static void
5126 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5127 struct cam_ed *device)
5129 struct cam_devq *devq;
5131 if (device->refcount == 1) {
5132 KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
5134 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5135 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5136 panic("Removing device while still queued for ccbs");
5138 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
5139 device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
5140 callout_stop(&device->callout);
5143 TAILQ_REMOVE(&target->ed_entries, device,links);
5144 target->generation++;
5145 bus->sim->max_ccbs -= device->ccbq.devq_openings;
5146 if ((devq = bus->sim->devq) != NULL) {
5147 /* Release our slot in the devq */
5148 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5150 camq_fini(&device->drvq);
5151 camq_fini(&device->ccbq.queue);
5152 xpt_release_target(bus, target);
5153 KKASSERT(device->refcount == 1);
5154 kfree(device, M_CAMXPT);
5155 } else {
5156 --device->refcount;
5160 static u_int32_t
5161 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5163 int diff;
5164 int result;
5165 struct cam_ed *dev;
5167 dev = path->device;
5169 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5170 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5171 if (result == CAM_REQ_CMP && (diff < 0)) {
5172 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5174 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5175 || (dev->inq_flags & SID_CmdQue) != 0)
5176 dev->tag_saved_openings = newopenings;
5177 /* Adjust the global limit */
5178 dev->sim->max_ccbs += diff;
5179 return (result);
5182 static struct cam_eb *
5183 xpt_find_bus(path_id_t path_id)
5185 struct cam_eb *bus;
5187 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5188 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
5189 if (bus->path_id == path_id) {
5190 bus->refcount++;
5191 break;
5194 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5195 return (bus);
5198 static struct cam_et *
5199 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5201 struct cam_et *target;
5203 TAILQ_FOREACH(target, &bus->et_entries, links) {
5204 if (target->target_id == target_id) {
5205 target->refcount++;
5206 break;
5209 return (target);
5212 static struct cam_ed *
5213 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5215 struct cam_ed *device;
5217 TAILQ_FOREACH(device, &target->ed_entries, links) {
5218 if (device->lun_id == lun_id) {
5219 device->refcount++;
5220 break;
5223 return (device);
5226 typedef struct {
5227 union ccb *request_ccb;
5228 struct ccb_pathinq *cpi;
5229 int counter;
5230 } xpt_scan_bus_info;
5233 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5234 * As the scan progresses, xpt_scan_bus is used as the
5235 * callback on completion function.
5237 static void
5238 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5240 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5241 ("xpt_scan_bus\n"));
5242 switch (request_ccb->ccb_h.func_code) {
5243 case XPT_SCAN_BUS:
5245 xpt_scan_bus_info *scan_info;
5246 union ccb *work_ccb;
5247 struct cam_path *path;
5248 u_int i;
5249 u_int max_target;
5250 u_int initiator_id;
5252 /* Find out the characteristics of the bus */
5253 work_ccb = xpt_alloc_ccb();
5254 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5255 request_ccb->ccb_h.pinfo.priority);
5256 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5257 xpt_action(work_ccb);
5258 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5259 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5260 xpt_free_ccb(work_ccb);
5261 xpt_done(request_ccb);
5262 return;
5265 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5267 * Can't scan the bus on an adapter that
5268 * cannot perform the initiator role.
5270 request_ccb->ccb_h.status = CAM_REQ_CMP;
5271 xpt_free_ccb(work_ccb);
5272 xpt_done(request_ccb);
5273 return;
5276 /* Save some state for use while we probe for devices */
5277 scan_info = (xpt_scan_bus_info *)
5278 kmalloc(sizeof(xpt_scan_bus_info), M_CAMXPT, M_INTWAIT);
5279 scan_info->request_ccb = request_ccb;
5280 scan_info->cpi = &work_ccb->cpi;
5282 /* Cache on our stack so we can work asynchronously */
5283 max_target = scan_info->cpi->max_target;
5284 initiator_id = scan_info->cpi->initiator_id;
5288 * We can scan all targets in parallel, or do it sequentially.
5290 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5291 max_target = 0;
5292 scan_info->counter = 0;
5293 } else {
5294 scan_info->counter = scan_info->cpi->max_target + 1;
5295 if (scan_info->cpi->initiator_id < scan_info->counter) {
5296 scan_info->counter--;
5300 for (i = 0; i <= max_target; i++) {
5301 cam_status status;
5302 if (i == initiator_id)
5303 continue;
5305 status = xpt_create_path(&path, xpt_periph,
5306 request_ccb->ccb_h.path_id,
5307 i, 0);
5308 if (status != CAM_REQ_CMP) {
5309 kprintf("xpt_scan_bus: xpt_create_path failed"
5310 " with status %#x, bus scan halted\n",
5311 status);
5312 kfree(scan_info, M_CAMXPT);
5313 request_ccb->ccb_h.status = status;
5314 xpt_free_ccb(work_ccb);
5315 xpt_done(request_ccb);
5316 break;
5318 work_ccb = xpt_alloc_ccb();
5319 xpt_setup_ccb(&work_ccb->ccb_h, path,
5320 request_ccb->ccb_h.pinfo.priority);
5321 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5322 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5323 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5324 work_ccb->crcn.flags = request_ccb->crcn.flags;
5325 xpt_action(work_ccb);
5327 break;
5329 case XPT_SCAN_LUN:
5331 cam_status status;
5332 struct cam_path *path;
5333 xpt_scan_bus_info *scan_info;
5334 path_id_t path_id;
5335 target_id_t target_id;
5336 lun_id_t lun_id;
5338 /* Reuse the same CCB to query if a device was really found */
5339 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5340 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5341 request_ccb->ccb_h.pinfo.priority);
5342 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5344 path_id = request_ccb->ccb_h.path_id;
5345 target_id = request_ccb->ccb_h.target_id;
5346 lun_id = request_ccb->ccb_h.target_lun;
5347 xpt_action(request_ccb);
5349 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5350 struct cam_ed *device;
5351 struct cam_et *target;
5352 int phl;
5355 * If we already probed lun 0 successfully, or
5356 * we have additional configured luns on this
5357 * target that might have "gone away", go onto
5358 * the next lun.
5360 target = request_ccb->ccb_h.path->target;
5362 * We may touch devices that we don't
5363 * hold references too, so ensure they
5364 * don't disappear out from under us.
5365 * The target above is referenced by the
5366 * path in the request ccb.
5368 phl = 0;
5369 device = TAILQ_FIRST(&target->ed_entries);
5370 if (device != NULL) {
5371 phl = CAN_SRCH_HI_SPARSE(device);
5372 if (device->lun_id == 0)
5373 device = TAILQ_NEXT(device, links);
5375 if ((lun_id != 0) || (device != NULL)) {
5376 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5377 lun_id++;
5379 } else {
5380 struct cam_ed *device;
5382 device = request_ccb->ccb_h.path->device;
5384 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5385 /* Try the next lun */
5386 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5387 || CAN_SRCH_HI_DENSE(device))
5388 lun_id++;
5393 * Free the current request path- we're done with it.
5395 xpt_free_path(request_ccb->ccb_h.path);
5398 * Check to see if we scan any further luns.
5400 if (lun_id == request_ccb->ccb_h.target_lun
5401 || lun_id > scan_info->cpi->max_lun) {
5402 int done;
5404 hop_again:
5405 done = 0;
5406 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5407 scan_info->counter++;
5408 if (scan_info->counter ==
5409 scan_info->cpi->initiator_id) {
5410 scan_info->counter++;
5412 if (scan_info->counter >=
5413 scan_info->cpi->max_target+1) {
5414 done = 1;
5416 } else {
5417 scan_info->counter--;
5418 if (scan_info->counter == 0) {
5419 done = 1;
5422 if (done) {
5423 xpt_free_ccb(request_ccb);
5424 xpt_free_ccb((union ccb *)scan_info->cpi);
5425 request_ccb = scan_info->request_ccb;
5426 kfree(scan_info, M_CAMXPT);
5427 request_ccb->ccb_h.status = CAM_REQ_CMP;
5428 xpt_done(request_ccb);
5429 break;
5432 if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5433 break;
5435 status = xpt_create_path(&path, xpt_periph,
5436 scan_info->request_ccb->ccb_h.path_id,
5437 scan_info->counter, 0);
5438 if (status != CAM_REQ_CMP) {
5439 kprintf("xpt_scan_bus: xpt_create_path failed"
5440 " with status %#x, bus scan halted\n",
5441 status);
5442 xpt_free_ccb(request_ccb);
5443 xpt_free_ccb((union ccb *)scan_info->cpi);
5444 request_ccb = scan_info->request_ccb;
5445 kfree(scan_info, M_CAMXPT);
5446 request_ccb->ccb_h.status = status;
5447 xpt_done(request_ccb);
5448 break;
5450 xpt_setup_ccb(&request_ccb->ccb_h, path,
5451 request_ccb->ccb_h.pinfo.priority);
5452 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5453 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5454 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5455 request_ccb->crcn.flags =
5456 scan_info->request_ccb->crcn.flags;
5457 } else {
5458 status = xpt_create_path(&path, xpt_periph,
5459 path_id, target_id, lun_id);
5460 if (status != CAM_REQ_CMP) {
5461 kprintf("xpt_scan_bus: xpt_create_path failed "
5462 "with status %#x, halting LUN scan\n",
5463 status);
5464 goto hop_again;
5466 xpt_setup_ccb(&request_ccb->ccb_h, path,
5467 request_ccb->ccb_h.pinfo.priority);
5468 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5469 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5470 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5471 request_ccb->crcn.flags =
5472 scan_info->request_ccb->crcn.flags;
5474 xpt_action(request_ccb);
5475 break;
5477 default:
5478 break;
5482 typedef enum {
5483 PROBE_TUR,
5484 PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */
5485 PROBE_FULL_INQUIRY,
5486 PROBE_MODE_SENSE,
5487 PROBE_SERIAL_NUM_0,
5488 PROBE_SERIAL_NUM_1,
5489 PROBE_TUR_FOR_NEGOTIATION,
5490 PROBE_INQUIRY_BASIC_DV1,
5491 PROBE_INQUIRY_BASIC_DV2,
5492 PROBE_DV_EXIT
5493 } probe_action;
5495 typedef enum {
5496 PROBE_INQUIRY_CKSUM = 0x01,
5497 PROBE_SERIAL_CKSUM = 0x02,
5498 PROBE_NO_ANNOUNCE = 0x04
5499 } probe_flags;
5501 typedef struct {
5502 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5503 probe_action action;
5504 union ccb saved_ccb;
5505 probe_flags flags;
5506 MD5_CTX context;
5507 u_int8_t digest[16];
5508 } probe_softc;
5510 static void
5511 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5512 cam_flags flags, union ccb *request_ccb)
5514 struct ccb_pathinq cpi;
5515 cam_status status;
5516 struct cam_path *new_path;
5517 struct cam_periph *old_periph;
5519 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5520 ("xpt_scan_lun\n"));
5522 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5523 cpi.ccb_h.func_code = XPT_PATH_INQ;
5524 xpt_action((union ccb *)&cpi);
5526 if (cpi.ccb_h.status != CAM_REQ_CMP) {
5527 if (request_ccb != NULL) {
5528 request_ccb->ccb_h.status = cpi.ccb_h.status;
5529 xpt_done(request_ccb);
5531 return;
5534 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5536 * Can't scan the bus on an adapter that
5537 * cannot perform the initiator role.
5539 if (request_ccb != NULL) {
5540 request_ccb->ccb_h.status = CAM_REQ_CMP;
5541 xpt_done(request_ccb);
5543 return;
5546 if (request_ccb == NULL) {
5547 request_ccb = kmalloc(sizeof(union ccb), M_CAMXPT, M_INTWAIT);
5548 new_path = kmalloc(sizeof(*new_path), M_CAMXPT, M_INTWAIT);
5549 status = xpt_compile_path(new_path, xpt_periph,
5550 path->bus->path_id,
5551 path->target->target_id,
5552 path->device->lun_id);
5554 if (status != CAM_REQ_CMP) {
5555 xpt_print(path, "xpt_scan_lun: can't compile path, "
5556 "can't continue\n");
5557 kfree(request_ccb, M_CAMXPT);
5558 kfree(new_path, M_CAMXPT);
5559 return;
5561 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5562 request_ccb->ccb_h.cbfcnp = xptscandone;
5563 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5564 request_ccb->crcn.flags = flags;
5567 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5568 probe_softc *softc;
5570 softc = (probe_softc *)old_periph->softc;
5571 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5572 periph_links.tqe);
5573 } else {
5574 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5575 probestart, "probe",
5576 CAM_PERIPH_BIO,
5577 request_ccb->ccb_h.path, NULL, 0,
5578 request_ccb);
5580 if (status != CAM_REQ_CMP) {
5581 xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5582 "returned an error, can't continue probe\n");
5583 request_ccb->ccb_h.status = status;
5584 xpt_done(request_ccb);
5589 static void
5590 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5592 xpt_release_path(done_ccb->ccb_h.path);
5593 kfree(done_ccb->ccb_h.path, M_CAMXPT);
5594 kfree(done_ccb, M_CAMXPT);
5597 static cam_status
5598 proberegister(struct cam_periph *periph, void *arg)
5600 union ccb *request_ccb; /* CCB representing the probe request */
5601 cam_status status;
5602 probe_softc *softc;
5604 request_ccb = (union ccb *)arg;
5605 if (periph == NULL) {
5606 kprintf("proberegister: periph was NULL!!\n");
5607 return(CAM_REQ_CMP_ERR);
5610 if (request_ccb == NULL) {
5611 kprintf("proberegister: no probe CCB, "
5612 "can't register device\n");
5613 return(CAM_REQ_CMP_ERR);
5616 softc = kmalloc(sizeof(*softc), M_CAMXPT, M_INTWAIT | M_ZERO);
5617 TAILQ_INIT(&softc->request_ccbs);
5618 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5619 periph_links.tqe);
5620 softc->flags = 0;
5621 periph->softc = softc;
5622 status = cam_periph_acquire(periph);
5623 if (status != CAM_REQ_CMP) {
5624 return (status);
5629 * Ensure we've waited at least a bus settle
5630 * delay before attempting to probe the device.
5631 * For HBAs that don't do bus resets, this won't make a difference.
5633 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5634 scsi_delay);
5635 probeschedule(periph);
5636 return(CAM_REQ_CMP);
5639 static void
5640 probeschedule(struct cam_periph *periph)
5642 struct ccb_pathinq cpi;
5643 union ccb *ccb;
5644 probe_softc *softc;
5646 softc = (probe_softc *)periph->softc;
5647 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5649 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5650 cpi.ccb_h.func_code = XPT_PATH_INQ;
5651 xpt_action((union ccb *)&cpi);
5654 * If a device has gone away and another device, or the same one,
5655 * is back in the same place, it should have a unit attention
5656 * condition pending. It will not report the unit attention in
5657 * response to an inquiry, which may leave invalid transfer
5658 * negotiations in effect. The TUR will reveal the unit attention
5659 * condition. Only send the TUR for lun 0, since some devices
5660 * will get confused by commands other than inquiry to non-existent
5661 * luns. If you think a device has gone away start your scan from
5662 * lun 0. This will insure that any bogus transfer settings are
5663 * invalidated.
5665 * If we haven't seen the device before and the controller supports
5666 * some kind of transfer negotiation, negotiate with the first
5667 * sent command if no bus reset was performed at startup. This
5668 * ensures that the device is not confused by transfer negotiation
5669 * settings left over by loader or BIOS action.
5671 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5672 && (ccb->ccb_h.target_lun == 0)) {
5673 softc->action = PROBE_TUR;
5674 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5675 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5676 proberequestdefaultnegotiation(periph);
5677 softc->action = PROBE_INQUIRY;
5678 } else {
5679 softc->action = PROBE_INQUIRY;
5682 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5683 softc->flags |= PROBE_NO_ANNOUNCE;
5684 else
5685 softc->flags &= ~PROBE_NO_ANNOUNCE;
5687 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5690 static void
5691 probestart(struct cam_periph *periph, union ccb *start_ccb)
5693 /* Probe the device that our peripheral driver points to */
5694 struct ccb_scsiio *csio;
5695 probe_softc *softc;
5697 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5699 softc = (probe_softc *)periph->softc;
5700 csio = &start_ccb->csio;
5702 switch (softc->action) {
5703 case PROBE_TUR:
5704 case PROBE_TUR_FOR_NEGOTIATION:
5705 case PROBE_DV_EXIT:
5707 scsi_test_unit_ready(csio,
5708 /*retries*/4,
5709 probedone,
5710 MSG_SIMPLE_Q_TAG,
5711 SSD_FULL_SIZE,
5712 /*timeout*/60000);
5713 break;
5715 case PROBE_INQUIRY:
5716 case PROBE_FULL_INQUIRY:
5717 case PROBE_INQUIRY_BASIC_DV1:
5718 case PROBE_INQUIRY_BASIC_DV2:
5720 u_int inquiry_len;
5721 struct scsi_inquiry_data *inq_buf;
5723 inq_buf = &periph->path->device->inq_data;
5726 * If the device is currently configured, we calculate an
5727 * MD5 checksum of the inquiry data, and if the serial number
5728 * length is greater than 0, add the serial number data
5729 * into the checksum as well. Once the inquiry and the
5730 * serial number check finish, we attempt to figure out
5731 * whether we still have the same device.
5733 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5735 MD5Init(&softc->context);
5736 MD5Update(&softc->context, (unsigned char *)inq_buf,
5737 sizeof(struct scsi_inquiry_data));
5738 softc->flags |= PROBE_INQUIRY_CKSUM;
5739 if (periph->path->device->serial_num_len > 0) {
5740 MD5Update(&softc->context,
5741 periph->path->device->serial_num,
5742 periph->path->device->serial_num_len);
5743 softc->flags |= PROBE_SERIAL_CKSUM;
5745 MD5Final(softc->digest, &softc->context);
5748 if (softc->action == PROBE_INQUIRY)
5749 inquiry_len = SHORT_INQUIRY_LENGTH;
5750 else
5751 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5754 * Some parallel SCSI devices fail to send an
5755 * ignore wide residue message when dealing with
5756 * odd length inquiry requests. Round up to be
5757 * safe.
5759 inquiry_len = roundup2(inquiry_len, 2);
5761 if (softc->action == PROBE_INQUIRY_BASIC_DV1
5762 || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5763 inq_buf = kmalloc(inquiry_len, M_CAMXPT, M_INTWAIT);
5765 scsi_inquiry(csio,
5766 /*retries*/4,
5767 probedone,
5768 MSG_SIMPLE_Q_TAG,
5769 (u_int8_t *)inq_buf,
5770 inquiry_len,
5771 /*evpd*/FALSE,
5772 /*page_code*/0,
5773 SSD_MIN_SIZE,
5774 /*timeout*/60 * 1000);
5775 break;
5777 case PROBE_MODE_SENSE:
5779 void *mode_buf;
5780 int mode_buf_len;
5782 mode_buf_len = sizeof(struct scsi_mode_header_6)
5783 + sizeof(struct scsi_mode_blk_desc)
5784 + sizeof(struct scsi_control_page);
5785 mode_buf = kmalloc(mode_buf_len, M_CAMXPT, M_INTWAIT);
5786 scsi_mode_sense(csio,
5787 /*retries*/4,
5788 probedone,
5789 MSG_SIMPLE_Q_TAG,
5790 /*dbd*/FALSE,
5791 SMS_PAGE_CTRL_CURRENT,
5792 SMS_CONTROL_MODE_PAGE,
5793 mode_buf,
5794 mode_buf_len,
5795 SSD_FULL_SIZE,
5796 /*timeout*/60000);
5797 break;
5799 case PROBE_SERIAL_NUM_0:
5801 struct scsi_vpd_supported_page_list *vpd_list = NULL;
5802 struct cam_ed *device;
5804 device = periph->path->device;
5805 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5806 vpd_list = kmalloc(sizeof(*vpd_list), M_CAMXPT,
5807 M_INTWAIT | M_ZERO);
5810 if (vpd_list != NULL) {
5811 scsi_inquiry(csio,
5812 /*retries*/4,
5813 probedone,
5814 MSG_SIMPLE_Q_TAG,
5815 (u_int8_t *)vpd_list,
5816 sizeof(*vpd_list),
5817 /*evpd*/TRUE,
5818 SVPD_SUPPORTED_PAGE_LIST,
5819 SSD_MIN_SIZE,
5820 /*timeout*/60 * 1000);
5821 break;
5824 * We'll have to do without, let our probedone
5825 * routine finish up for us.
5827 start_ccb->csio.data_ptr = NULL;
5828 probedone(periph, start_ccb);
5829 return;
5831 case PROBE_SERIAL_NUM_1:
5833 struct scsi_vpd_unit_serial_number *serial_buf;
5834 struct cam_ed* device;
5836 serial_buf = NULL;
5837 device = periph->path->device;
5838 device->serial_num = NULL;
5839 device->serial_num_len = 0;
5841 serial_buf = (struct scsi_vpd_unit_serial_number *)
5842 kmalloc(sizeof(*serial_buf), M_CAMXPT,
5843 M_INTWAIT | M_ZERO);
5844 scsi_inquiry(csio,
5845 /*retries*/4,
5846 probedone,
5847 MSG_SIMPLE_Q_TAG,
5848 (u_int8_t *)serial_buf,
5849 sizeof(*serial_buf),
5850 /*evpd*/TRUE,
5851 SVPD_UNIT_SERIAL_NUMBER,
5852 SSD_MIN_SIZE,
5853 /*timeout*/60 * 1000);
5854 break;
5857 xpt_action(start_ccb);
5860 static void
5861 proberequestdefaultnegotiation(struct cam_periph *periph)
5863 struct ccb_trans_settings cts;
5865 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5866 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5867 cts.type = CTS_TYPE_USER_SETTINGS;
5868 xpt_action((union ccb *)&cts);
5869 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5870 return;
5872 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5873 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5874 xpt_action((union ccb *)&cts);
5878 * Backoff Negotiation Code- only pertinent for SPI devices.
5880 static int
5881 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
5883 struct ccb_trans_settings cts;
5884 struct ccb_trans_settings_spi *spi;
5886 memset(&cts, 0, sizeof (cts));
5887 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5888 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5889 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5890 xpt_action((union ccb *)&cts);
5891 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5892 if (bootverbose) {
5893 xpt_print(periph->path,
5894 "failed to get current device settings\n");
5896 return (0);
5898 if (cts.transport != XPORT_SPI) {
5899 if (bootverbose) {
5900 xpt_print(periph->path, "not SPI transport\n");
5902 return (0);
5904 spi = &cts.xport_specific.spi;
5907 * We cannot renegotiate sync rate if we don't have one.
5909 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
5910 if (bootverbose) {
5911 xpt_print(periph->path, "no sync rate known\n");
5913 return (0);
5917 * We'll assert that we don't have to touch PPR options- the
5918 * SIM will see what we do with period and offset and adjust
5919 * the PPR options as appropriate.
5923 * A sync rate with unknown or zero offset is nonsensical.
5924 * A sync period of zero means Async.
5926 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
5927 || spi->sync_offset == 0 || spi->sync_period == 0) {
5928 if (bootverbose) {
5929 xpt_print(periph->path, "no sync rate available\n");
5931 return (0);
5934 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
5935 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5936 ("hit async: giving up on DV\n"));
5937 return (0);
5942 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
5943 * We don't try to remember 'last' settings to see if the SIM actually
5944 * gets into the speed we want to set. We check on the SIM telling
5945 * us that a requested speed is bad, but otherwise don't try and
5946 * check the speed due to the asynchronous and handshake nature
5947 * of speed setting.
5949 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
5950 for (;;) {
5951 spi->sync_period++;
5952 if (spi->sync_period >= 0xf) {
5953 spi->sync_period = 0;
5954 spi->sync_offset = 0;
5955 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5956 ("setting to async for DV\n"));
5958 * Once we hit async, we don't want to try
5959 * any more settings.
5961 device->flags |= CAM_DEV_DV_HIT_BOTTOM;
5962 } else if (bootverbose) {
5963 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5964 ("DV: period 0x%x\n", spi->sync_period));
5965 kprintf("setting period to 0x%x\n", spi->sync_period);
5967 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5968 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5969 xpt_action((union ccb *)&cts);
5970 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5971 break;
5973 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5974 ("DV: failed to set period 0x%x\n", spi->sync_period));
5975 if (spi->sync_period == 0) {
5976 return (0);
5979 return (1);
5982 static void
5983 probedone(struct cam_periph *periph, union ccb *done_ccb)
5985 probe_softc *softc;
5986 struct cam_path *path;
5987 u_int32_t priority;
5989 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5991 softc = (probe_softc *)periph->softc;
5992 path = done_ccb->ccb_h.path;
5993 priority = done_ccb->ccb_h.pinfo.priority;
5995 switch (softc->action) {
5996 case PROBE_TUR:
5998 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6000 if (cam_periph_error(done_ccb, 0,
6001 SF_NO_PRINT, NULL) == ERESTART)
6002 return;
6003 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
6004 /* Don't wedge the queue */
6005 xpt_release_devq(done_ccb->ccb_h.path,
6006 /*count*/1,
6007 /*run_queue*/TRUE);
6009 softc->action = PROBE_INQUIRY;
6010 xpt_release_ccb(done_ccb);
6011 xpt_schedule(periph, priority);
6012 return;
6014 case PROBE_INQUIRY:
6015 case PROBE_FULL_INQUIRY:
6017 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6018 struct scsi_inquiry_data *inq_buf;
6019 u_int8_t periph_qual;
6021 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6022 inq_buf = &path->device->inq_data;
6024 periph_qual = SID_QUAL(inq_buf);
6026 switch(periph_qual) {
6027 case SID_QUAL_LU_CONNECTED:
6029 u_int8_t len;
6032 * We conservatively request only
6033 * SHORT_INQUIRY_LEN bytes of inquiry
6034 * information during our first try
6035 * at sending an INQUIRY. If the device
6036 * has more information to give,
6037 * perform a second request specifying
6038 * the amount of information the device
6039 * is willing to give.
6041 len = inq_buf->additional_length
6042 + offsetof(struct scsi_inquiry_data,
6043 additional_length) + 1;
6044 if (softc->action == PROBE_INQUIRY
6045 && len > SHORT_INQUIRY_LENGTH) {
6046 softc->action = PROBE_FULL_INQUIRY;
6047 xpt_release_ccb(done_ccb);
6048 xpt_schedule(periph, priority);
6049 return;
6052 xpt_find_quirk(path->device);
6054 xpt_devise_transport(path);
6055 if (INQ_DATA_TQ_ENABLED(inq_buf))
6056 softc->action = PROBE_MODE_SENSE;
6057 else
6058 softc->action = PROBE_SERIAL_NUM_0;
6060 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6061 xpt_reference_device(path->device);
6063 xpt_release_ccb(done_ccb);
6064 xpt_schedule(periph, priority);
6065 return;
6067 default:
6068 break;
6070 } else if (cam_periph_error(done_ccb, 0,
6071 done_ccb->ccb_h.target_lun > 0
6072 ? SF_RETRY_UA|SF_QUIET_IR
6073 : SF_RETRY_UA,
6074 &softc->saved_ccb) == ERESTART) {
6075 return;
6076 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6077 /* Don't wedge the queue */
6078 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6079 /*run_queue*/TRUE);
6082 * If we get to this point, we got an error status back
6083 * from the inquiry and the error status doesn't require
6084 * automatically retrying the command. Therefore, the
6085 * inquiry failed. If we had inquiry information before
6086 * for this device, but this latest inquiry command failed,
6087 * the device has probably gone away. If this device isn't
6088 * already marked unconfigured, notify the peripheral
6089 * drivers that this device is no more.
6091 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
6092 /* Send the async notification. */
6093 xpt_async(AC_LOST_DEVICE, path, NULL);
6096 xpt_release_ccb(done_ccb);
6097 break;
6099 case PROBE_MODE_SENSE:
6101 struct ccb_scsiio *csio;
6102 struct scsi_mode_header_6 *mode_hdr;
6104 csio = &done_ccb->csio;
6105 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6106 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6107 struct scsi_control_page *page;
6108 u_int8_t *offset;
6110 offset = ((u_int8_t *)&mode_hdr[1])
6111 + mode_hdr->blk_desc_len;
6112 page = (struct scsi_control_page *)offset;
6113 path->device->queue_flags = page->queue_flags;
6114 } else if (cam_periph_error(done_ccb, 0,
6115 SF_RETRY_UA|SF_NO_PRINT,
6116 &softc->saved_ccb) == ERESTART) {
6117 return;
6118 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6119 /* Don't wedge the queue */
6120 xpt_release_devq(done_ccb->ccb_h.path,
6121 /*count*/1, /*run_queue*/TRUE);
6123 xpt_release_ccb(done_ccb);
6124 kfree(mode_hdr, M_CAMXPT);
6125 softc->action = PROBE_SERIAL_NUM_0;
6126 xpt_schedule(periph, priority);
6127 return;
6129 case PROBE_SERIAL_NUM_0:
6131 struct ccb_scsiio *csio;
6132 struct scsi_vpd_supported_page_list *page_list;
6133 int length, serialnum_supported, i;
6135 serialnum_supported = 0;
6136 csio = &done_ccb->csio;
6137 page_list =
6138 (struct scsi_vpd_supported_page_list *)csio->data_ptr;
6140 if (page_list == NULL) {
6142 * Don't process the command as it was never sent
6144 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6145 && (page_list->length > 0)) {
6146 length = min(page_list->length,
6147 SVPD_SUPPORTED_PAGES_SIZE);
6148 for (i = 0; i < length; i++) {
6149 if (page_list->list[i] ==
6150 SVPD_UNIT_SERIAL_NUMBER) {
6151 serialnum_supported = 1;
6152 break;
6155 } else if (cam_periph_error(done_ccb, 0,
6156 SF_RETRY_UA|SF_NO_PRINT,
6157 &softc->saved_ccb) == ERESTART) {
6158 return;
6159 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6160 /* Don't wedge the queue */
6161 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6162 /*run_queue*/TRUE);
6165 if (page_list != NULL)
6166 kfree(page_list, M_DEVBUF);
6168 if (serialnum_supported) {
6169 xpt_release_ccb(done_ccb);
6170 softc->action = PROBE_SERIAL_NUM_1;
6171 xpt_schedule(periph, priority);
6172 return;
6174 xpt_release_ccb(done_ccb);
6175 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6176 xpt_schedule(periph, done_ccb->ccb_h.pinfo.priority);
6177 return;
6180 case PROBE_SERIAL_NUM_1:
6182 struct ccb_scsiio *csio;
6183 struct scsi_vpd_unit_serial_number *serial_buf;
6184 u_int32_t priority;
6185 int changed;
6186 int have_serialnum;
6188 changed = 1;
6189 have_serialnum = 0;
6190 csio = &done_ccb->csio;
6191 priority = done_ccb->ccb_h.pinfo.priority;
6192 serial_buf =
6193 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6195 /* Clean up from previous instance of this device */
6196 if (path->device->serial_num != NULL) {
6197 kfree(path->device->serial_num, M_CAMXPT);
6198 path->device->serial_num = NULL;
6199 path->device->serial_num_len = 0;
6202 if (serial_buf == NULL) {
6204 * Don't process the command as it was never sent
6206 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6207 && (serial_buf->length > 0)) {
6209 have_serialnum = 1;
6210 path->device->serial_num =
6211 kmalloc((serial_buf->length + 1),
6212 M_CAMXPT, M_INTWAIT);
6213 bcopy(serial_buf->serial_num,
6214 path->device->serial_num,
6215 serial_buf->length);
6216 path->device->serial_num_len = serial_buf->length;
6217 path->device->serial_num[serial_buf->length] = '\0';
6218 } else if (cam_periph_error(done_ccb, 0,
6219 SF_RETRY_UA|SF_NO_PRINT,
6220 &softc->saved_ccb) == ERESTART) {
6221 return;
6222 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6223 /* Don't wedge the queue */
6224 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6225 /*run_queue*/TRUE);
6229 * Let's see if we have seen this device before.
6231 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6232 MD5_CTX context;
6233 u_int8_t digest[16];
6235 MD5Init(&context);
6237 MD5Update(&context,
6238 (unsigned char *)&path->device->inq_data,
6239 sizeof(struct scsi_inquiry_data));
6241 if (have_serialnum)
6242 MD5Update(&context, serial_buf->serial_num,
6243 serial_buf->length);
6245 MD5Final(digest, &context);
6246 if (bcmp(softc->digest, digest, 16) == 0)
6247 changed = 0;
6250 * XXX Do we need to do a TUR in order to ensure
6251 * that the device really hasn't changed???
6253 if ((changed != 0)
6254 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6255 xpt_async(AC_LOST_DEVICE, path, NULL);
6257 if (serial_buf != NULL)
6258 kfree(serial_buf, M_CAMXPT);
6260 if (changed != 0) {
6262 * Now that we have all the necessary
6263 * information to safely perform transfer
6264 * negotiations... Controllers don't perform
6265 * any negotiation or tagged queuing until
6266 * after the first XPT_SET_TRAN_SETTINGS ccb is
6267 * received. So, on a new device, just retrieve
6268 * the user settings, and set them as the current
6269 * settings to set the device up.
6271 proberequestdefaultnegotiation(periph);
6272 xpt_release_ccb(done_ccb);
6275 * Perform a TUR to allow the controller to
6276 * perform any necessary transfer negotiation.
6278 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6279 xpt_schedule(periph, priority);
6280 return;
6282 xpt_release_ccb(done_ccb);
6283 break;
6285 case PROBE_TUR_FOR_NEGOTIATION:
6286 case PROBE_DV_EXIT:
6287 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6288 /* Don't wedge the queue */
6289 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6290 /*run_queue*/TRUE);
6293 xpt_reference_device(path->device);
6295 * Do Domain Validation for lun 0 on devices that claim
6296 * to support Synchronous Transfer modes.
6298 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6299 && done_ccb->ccb_h.target_lun == 0
6300 && (path->device->inq_data.flags & SID_Sync) != 0
6301 && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6302 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6303 ("Begin Domain Validation\n"));
6304 path->device->flags |= CAM_DEV_IN_DV;
6305 xpt_release_ccb(done_ccb);
6306 softc->action = PROBE_INQUIRY_BASIC_DV1;
6307 xpt_schedule(periph, priority);
6308 return;
6310 if (softc->action == PROBE_DV_EXIT) {
6311 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6312 ("Leave Domain Validation\n"));
6314 path->device->flags &=
6315 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6316 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6317 /* Inform the XPT that a new device has been found */
6318 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6319 xpt_action(done_ccb);
6320 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6321 done_ccb);
6323 xpt_release_ccb(done_ccb);
6324 break;
6325 case PROBE_INQUIRY_BASIC_DV1:
6326 case PROBE_INQUIRY_BASIC_DV2:
6328 struct scsi_inquiry_data *nbuf;
6329 struct ccb_scsiio *csio;
6331 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6332 /* Don't wedge the queue */
6333 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6334 /*run_queue*/TRUE);
6336 csio = &done_ccb->csio;
6337 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6338 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6339 xpt_print(path,
6340 "inquiry data fails comparison at DV%d step\n",
6341 softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
6342 if (proberequestbackoff(periph, path->device)) {
6343 path->device->flags &= ~CAM_DEV_IN_DV;
6344 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6345 } else {
6346 /* give up */
6347 softc->action = PROBE_DV_EXIT;
6349 kfree(nbuf, M_CAMXPT);
6350 xpt_release_ccb(done_ccb);
6351 xpt_schedule(periph, priority);
6352 return;
6354 kfree(nbuf, M_CAMXPT);
6355 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6356 softc->action = PROBE_INQUIRY_BASIC_DV2;
6357 xpt_release_ccb(done_ccb);
6358 xpt_schedule(periph, priority);
6359 return;
6361 if (softc->action == PROBE_DV_EXIT) {
6362 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6363 ("Leave Domain Validation Successfully\n"));
6365 path->device->flags &=
6366 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6367 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6368 /* Inform the XPT that a new device has been found */
6369 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6370 xpt_action(done_ccb);
6371 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6372 done_ccb);
6374 xpt_release_ccb(done_ccb);
6375 break;
6378 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6379 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6380 done_ccb->ccb_h.status = CAM_REQ_CMP;
6381 xpt_done(done_ccb);
6382 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6383 cam_periph_invalidate(periph);
6384 cam_periph_release(periph);
6385 } else {
6386 probeschedule(periph);
6390 static void
6391 probecleanup(struct cam_periph *periph)
6393 kfree(periph->softc, M_CAMXPT);
6396 static void
6397 xpt_find_quirk(struct cam_ed *device)
6399 caddr_t match;
6401 match = cam_quirkmatch((caddr_t)&device->inq_data,
6402 (caddr_t)xpt_quirk_table,
6403 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6404 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6406 if (match == NULL)
6407 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6409 device->quirk = (struct xpt_quirk_entry *)match;
6412 static int
6413 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6415 int error, bool;
6417 bool = cam_srch_hi;
6418 error = sysctl_handle_int(oidp, &bool, 0, req);
6419 if (error != 0 || req->newptr == NULL)
6420 return (error);
6421 if (bool == 0 || bool == 1) {
6422 cam_srch_hi = bool;
6423 return (0);
6424 } else {
6425 return (EINVAL);
6429 static void
6430 xpt_devise_transport(struct cam_path *path)
6432 struct ccb_pathinq cpi;
6433 struct ccb_trans_settings cts;
6434 struct scsi_inquiry_data *inq_buf;
6436 /* Get transport information from the SIM */
6437 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6438 cpi.ccb_h.func_code = XPT_PATH_INQ;
6439 xpt_action((union ccb *)&cpi);
6441 inq_buf = NULL;
6442 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6443 inq_buf = &path->device->inq_data;
6444 path->device->protocol = PROTO_SCSI;
6445 path->device->protocol_version =
6446 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6447 path->device->transport = cpi.transport;
6448 path->device->transport_version = cpi.transport_version;
6451 * Any device not using SPI3 features should
6452 * be considered SPI2 or lower.
6454 if (inq_buf != NULL) {
6455 if (path->device->transport == XPORT_SPI
6456 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6457 && path->device->transport_version > 2)
6458 path->device->transport_version = 2;
6459 } else {
6460 struct cam_ed* otherdev;
6462 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6463 otherdev != NULL;
6464 otherdev = TAILQ_NEXT(otherdev, links)) {
6465 if (otherdev != path->device)
6466 break;
6469 if (otherdev != NULL) {
6471 * Initially assume the same versioning as
6472 * prior luns for this target.
6474 path->device->protocol_version =
6475 otherdev->protocol_version;
6476 path->device->transport_version =
6477 otherdev->transport_version;
6478 } else {
6479 /* Until we know better, opt for safty */
6480 path->device->protocol_version = 2;
6481 if (path->device->transport == XPORT_SPI)
6482 path->device->transport_version = 2;
6483 else
6484 path->device->transport_version = 0;
6489 * XXX
6490 * For a device compliant with SPC-2 we should be able
6491 * to determine the transport version supported by
6492 * scrutinizing the version descriptors in the
6493 * inquiry buffer.
6496 /* Tell the controller what we think */
6497 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6498 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6499 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6500 cts.transport = path->device->transport;
6501 cts.transport_version = path->device->transport_version;
6502 cts.protocol = path->device->protocol;
6503 cts.protocol_version = path->device->protocol_version;
6504 cts.proto_specific.valid = 0;
6505 cts.xport_specific.valid = 0;
6506 xpt_action((union ccb *)&cts);
6509 static void
6510 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6511 int async_update)
6513 struct ccb_pathinq cpi;
6514 struct ccb_trans_settings cur_cts;
6515 struct ccb_trans_settings_scsi *scsi;
6516 struct ccb_trans_settings_scsi *cur_scsi;
6517 struct cam_sim *sim;
6518 struct scsi_inquiry_data *inq_data;
6520 if (device == NULL) {
6521 cts->ccb_h.status = CAM_PATH_INVALID;
6522 xpt_done((union ccb *)cts);
6523 return;
6526 if (cts->protocol == PROTO_UNKNOWN
6527 || cts->protocol == PROTO_UNSPECIFIED) {
6528 cts->protocol = device->protocol;
6529 cts->protocol_version = device->protocol_version;
6532 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6533 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6534 cts->protocol_version = device->protocol_version;
6536 if (cts->protocol != device->protocol) {
6537 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6538 cts->protocol, device->protocol);
6539 cts->protocol = device->protocol;
6542 if (cts->protocol_version > device->protocol_version) {
6543 if (bootverbose) {
6544 xpt_print(cts->ccb_h.path, "Down reving Protocol "
6545 "Version from %d to %d?\n", cts->protocol_version,
6546 device->protocol_version);
6548 cts->protocol_version = device->protocol_version;
6551 if (cts->transport == XPORT_UNKNOWN
6552 || cts->transport == XPORT_UNSPECIFIED) {
6553 cts->transport = device->transport;
6554 cts->transport_version = device->transport_version;
6557 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6558 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6559 cts->transport_version = device->transport_version;
6561 if (cts->transport != device->transport) {
6562 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6563 cts->transport, device->transport);
6564 cts->transport = device->transport;
6567 if (cts->transport_version > device->transport_version) {
6568 if (bootverbose) {
6569 xpt_print(cts->ccb_h.path, "Down reving Transport "
6570 "Version from %d to %d?\n", cts->transport_version,
6571 device->transport_version);
6573 cts->transport_version = device->transport_version;
6576 sim = cts->ccb_h.path->bus->sim;
6579 * Nothing more of interest to do unless
6580 * this is a device connected via the
6581 * SCSI protocol.
6583 if (cts->protocol != PROTO_SCSI) {
6584 if (async_update == FALSE)
6585 (*(sim->sim_action))(sim, (union ccb *)cts);
6586 return;
6589 inq_data = &device->inq_data;
6590 scsi = &cts->proto_specific.scsi;
6591 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6592 cpi.ccb_h.func_code = XPT_PATH_INQ;
6593 xpt_action((union ccb *)&cpi);
6595 /* SCSI specific sanity checking */
6596 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6597 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6598 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6599 || (device->quirk->mintags == 0)) {
6601 * Can't tag on hardware that doesn't support tags,
6602 * doesn't have it enabled, or has broken tag support.
6604 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6607 if (async_update == FALSE) {
6609 * Perform sanity checking against what the
6610 * controller and device can do.
6612 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6613 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6614 cur_cts.type = cts->type;
6615 xpt_action((union ccb *)&cur_cts);
6616 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6617 return;
6619 cur_scsi = &cur_cts.proto_specific.scsi;
6620 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6621 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6622 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6624 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6625 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6628 /* SPI specific sanity checking */
6629 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6630 u_int spi3caps;
6631 struct ccb_trans_settings_spi *spi;
6632 struct ccb_trans_settings_spi *cur_spi;
6634 spi = &cts->xport_specific.spi;
6636 cur_spi = &cur_cts.xport_specific.spi;
6638 /* Fill in any gaps in what the user gave us */
6639 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6640 spi->sync_period = cur_spi->sync_period;
6641 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6642 spi->sync_period = 0;
6643 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6644 spi->sync_offset = cur_spi->sync_offset;
6645 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6646 spi->sync_offset = 0;
6647 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6648 spi->ppr_options = cur_spi->ppr_options;
6649 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6650 spi->ppr_options = 0;
6651 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6652 spi->bus_width = cur_spi->bus_width;
6653 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6654 spi->bus_width = 0;
6655 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6656 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6657 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6659 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6660 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6661 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6662 && (inq_data->flags & SID_Sync) == 0
6663 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6664 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6665 || (spi->sync_offset == 0)
6666 || (spi->sync_period == 0)) {
6667 /* Force async */
6668 spi->sync_period = 0;
6669 spi->sync_offset = 0;
6672 switch (spi->bus_width) {
6673 case MSG_EXT_WDTR_BUS_32_BIT:
6674 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6675 || (inq_data->flags & SID_WBus32) != 0
6676 || cts->type == CTS_TYPE_USER_SETTINGS)
6677 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6678 break;
6679 /* Fall Through to 16-bit */
6680 case MSG_EXT_WDTR_BUS_16_BIT:
6681 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6682 || (inq_data->flags & SID_WBus16) != 0
6683 || cts->type == CTS_TYPE_USER_SETTINGS)
6684 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6685 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6686 break;
6688 /* Fall Through to 8-bit */
6689 default: /* New bus width?? */
6690 case MSG_EXT_WDTR_BUS_8_BIT:
6691 /* All targets can do this */
6692 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6693 break;
6696 spi3caps = cpi.xport_specific.spi.ppr_options;
6697 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6698 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6699 spi3caps &= inq_data->spi3data;
6701 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6702 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6704 if ((spi3caps & SID_SPI_IUS) == 0)
6705 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6707 if ((spi3caps & SID_SPI_QAS) == 0)
6708 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6710 /* No SPI Transfer settings are allowed unless we are wide */
6711 if (spi->bus_width == 0)
6712 spi->ppr_options = 0;
6714 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6716 * Can't tag queue without disconnection.
6718 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6719 scsi->valid |= CTS_SCSI_VALID_TQ;
6723 * If we are currently performing tagged transactions to
6724 * this device and want to change its negotiation parameters,
6725 * go non-tagged for a bit to give the controller a chance to
6726 * negotiate unhampered by tag messages.
6728 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6729 && (device->inq_flags & SID_CmdQue) != 0
6730 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6731 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6732 CTS_SPI_VALID_SYNC_OFFSET|
6733 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6734 xpt_toggle_tags(cts->ccb_h.path);
6737 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6738 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6739 int device_tagenb;
6742 * If we are transitioning from tags to no-tags or
6743 * vice-versa, we need to carefully freeze and restart
6744 * the queue so that we don't overlap tagged and non-tagged
6745 * commands. We also temporarily stop tags if there is
6746 * a change in transfer negotiation settings to allow
6747 * "tag-less" negotiation.
6749 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6750 || (device->inq_flags & SID_CmdQue) != 0)
6751 device_tagenb = TRUE;
6752 else
6753 device_tagenb = FALSE;
6755 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6756 && device_tagenb == FALSE)
6757 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6758 && device_tagenb == TRUE)) {
6760 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6762 * Delay change to use tags until after a
6763 * few commands have gone to this device so
6764 * the controller has time to perform transfer
6765 * negotiations without tagged messages getting
6766 * in the way.
6768 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6769 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6770 } else {
6771 struct ccb_relsim crs;
6773 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6774 device->inq_flags &= ~SID_CmdQue;
6775 xpt_dev_ccbq_resize(cts->ccb_h.path,
6776 sim->max_dev_openings);
6777 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6778 device->tag_delay_count = 0;
6780 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6781 /*priority*/1);
6782 crs.ccb_h.func_code = XPT_REL_SIMQ;
6783 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6784 crs.openings
6785 = crs.release_timeout
6786 = crs.qfrozen_cnt
6787 = 0;
6788 xpt_action((union ccb *)&crs);
6792 if (async_update == FALSE)
6793 (*(sim->sim_action))(sim, (union ccb *)cts);
6796 static void
6797 xpt_toggle_tags(struct cam_path *path)
6799 struct cam_ed *dev;
6802 * Give controllers a chance to renegotiate
6803 * before starting tag operations. We
6804 * "toggle" tagged queuing off then on
6805 * which causes the tag enable command delay
6806 * counter to come into effect.
6808 dev = path->device;
6809 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6810 || ((dev->inq_flags & SID_CmdQue) != 0
6811 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6812 struct ccb_trans_settings cts;
6814 xpt_setup_ccb(&cts.ccb_h, path, 1);
6815 cts.protocol = PROTO_SCSI;
6816 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6817 cts.transport = XPORT_UNSPECIFIED;
6818 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6819 cts.proto_specific.scsi.flags = 0;
6820 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6821 xpt_set_transfer_settings(&cts, path->device,
6822 /*async_update*/TRUE);
6823 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6824 xpt_set_transfer_settings(&cts, path->device,
6825 /*async_update*/TRUE);
6829 static void
6830 xpt_start_tags(struct cam_path *path)
6832 struct ccb_relsim crs;
6833 struct cam_ed *device;
6834 struct cam_sim *sim;
6835 int newopenings;
6837 device = path->device;
6838 sim = path->bus->sim;
6839 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6840 xpt_freeze_devq(path, /*count*/1);
6841 device->inq_flags |= SID_CmdQue;
6842 if (device->tag_saved_openings != 0)
6843 newopenings = device->tag_saved_openings;
6844 else
6845 newopenings = min(device->quirk->maxtags,
6846 sim->max_tagged_dev_openings);
6847 xpt_dev_ccbq_resize(path, newopenings);
6848 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6849 crs.ccb_h.func_code = XPT_REL_SIMQ;
6850 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6851 crs.openings
6852 = crs.release_timeout
6853 = crs.qfrozen_cnt
6854 = 0;
6855 xpt_action((union ccb *)&crs);
6858 static int busses_to_config;
6859 static int busses_to_reset;
6861 static int
6862 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6865 sim_lock_assert_owned(bus->sim->lock);
6867 if (bus->path_id != CAM_XPT_PATH_ID) {
6868 struct cam_path path;
6869 struct ccb_pathinq cpi;
6870 int can_negotiate;
6872 busses_to_config++;
6873 xpt_compile_path(&path, NULL, bus->path_id,
6874 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6875 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6876 cpi.ccb_h.func_code = XPT_PATH_INQ;
6877 xpt_action((union ccb *)&cpi);
6878 can_negotiate = cpi.hba_inquiry;
6879 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6880 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6881 && can_negotiate)
6882 busses_to_reset++;
6883 xpt_release_path(&path);
6886 return(1);
6889 static int
6890 xptconfigfunc(struct cam_eb *bus, void *arg)
6892 struct cam_path *path;
6893 union ccb *work_ccb;
6895 sim_lock_assert_owned(bus->sim->lock);
6897 if (bus->path_id != CAM_XPT_PATH_ID) {
6898 cam_status status;
6899 int can_negotiate;
6901 work_ccb = xpt_alloc_ccb();
6902 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6903 CAM_TARGET_WILDCARD,
6904 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6905 kprintf("xptconfigfunc: xpt_create_path failed with "
6906 "status %#x for bus %d\n", status, bus->path_id);
6907 kprintf("xptconfigfunc: halting bus configuration\n");
6908 xpt_free_ccb(work_ccb);
6909 busses_to_config--;
6910 xpt_finishconfig(xpt_periph, NULL);
6911 return(0);
6913 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6914 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6915 xpt_action(work_ccb);
6916 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6917 kprintf("xptconfigfunc: CPI failed on bus %d "
6918 "with status %d\n", bus->path_id,
6919 work_ccb->ccb_h.status);
6920 xpt_finishconfig(xpt_periph, work_ccb);
6921 return(1);
6924 can_negotiate = work_ccb->cpi.hba_inquiry;
6925 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6926 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6927 && (can_negotiate != 0)) {
6928 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6929 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6930 work_ccb->ccb_h.cbfcnp = NULL;
6931 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6932 ("Resetting Bus\n"));
6933 xpt_action(work_ccb);
6934 xpt_finishconfig(xpt_periph, work_ccb);
6935 } else {
6936 /* Act as though we performed a successful BUS RESET */
6937 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6938 xpt_finishconfig(xpt_periph, work_ccb);
6942 return(1);
6945 static void
6946 xpt_config(void *arg)
6949 * Now that interrupts are enabled, go find our devices
6952 #ifdef CAMDEBUG
6953 /* Setup debugging flags and path */
6954 #ifdef CAM_DEBUG_FLAGS
6955 cam_dflags = CAM_DEBUG_FLAGS;
6956 #else /* !CAM_DEBUG_FLAGS */
6957 cam_dflags = CAM_DEBUG_NONE;
6958 #endif /* CAM_DEBUG_FLAGS */
6959 #ifdef CAM_DEBUG_BUS
6960 if (cam_dflags != CAM_DEBUG_NONE) {
6962 * Locking is specifically omitted here. No SIMs have
6963 * registered yet, so xpt_create_path will only be searching
6964 * empty lists of targets and devices.
6966 if (xpt_create_path(&cam_dpath, xpt_periph,
6967 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6968 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6969 kprintf("xpt_config: xpt_create_path() failed for debug"
6970 " target %d:%d:%d, debugging disabled\n",
6971 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6972 cam_dflags = CAM_DEBUG_NONE;
6974 } else
6975 cam_dpath = NULL;
6976 #else /* !CAM_DEBUG_BUS */
6977 cam_dpath = NULL;
6978 #endif /* CAM_DEBUG_BUS */
6979 #endif /* CAMDEBUG */
6982 * Scan all installed busses.
6984 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6986 if (busses_to_config == 0) {
6987 /* Call manually because we don't have any busses */
6988 xpt_finishconfig(xpt_periph, NULL);
6989 } else {
6990 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6991 kprintf("Waiting %d seconds for SCSI "
6992 "devices to settle\n", scsi_delay/1000);
6994 xpt_for_all_busses(xptconfigfunc, NULL);
6999 * If the given device only has one peripheral attached to it, and if that
7000 * peripheral is the passthrough driver, announce it. This insures that the
7001 * user sees some sort of announcement for every peripheral in their system.
7003 static int
7004 xptpassannouncefunc(struct cam_ed *device, void *arg)
7006 struct cam_periph *periph;
7007 int i;
7009 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
7010 periph = SLIST_NEXT(periph, periph_links), i++);
7012 periph = SLIST_FIRST(&device->periphs);
7013 if ((i == 1)
7014 && (strncmp(periph->periph_name, "pass", 4) == 0))
7015 xpt_announce_periph(periph, NULL);
7017 return(1);
7020 static void
7021 xpt_finishconfig_task(void *context, int pending)
7023 struct periph_driver **p_drv;
7024 int i;
7026 if (busses_to_config == 0) {
7027 /* Register all the peripheral drivers */
7028 /* XXX This will have to change when we have loadable modules */
7029 p_drv = periph_drivers;
7030 for (i = 0; p_drv[i] != NULL; i++) {
7031 (*p_drv[i]->init)();
7035 * Check for devices with no "standard" peripheral driver
7036 * attached. For any devices like that, announce the
7037 * passthrough driver so the user will see something.
7039 xpt_for_all_devices(xptpassannouncefunc, NULL);
7041 /* Release our hook so that the boot can continue. */
7042 config_intrhook_disestablish(xsoftc.xpt_config_hook);
7043 kfree(xsoftc.xpt_config_hook, M_CAMXPT);
7044 xsoftc.xpt_config_hook = NULL;
7047 kfree(context, M_CAMXPT);
7050 static void
7051 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
7053 struct xpt_task *task;
7055 if (done_ccb != NULL) {
7056 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
7057 ("xpt_finishconfig\n"));
7058 switch(done_ccb->ccb_h.func_code) {
7059 case XPT_RESET_BUS:
7060 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7061 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7062 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7063 done_ccb->crcn.flags = 0;
7064 xpt_action(done_ccb);
7065 return;
7067 /* FALLTHROUGH */
7068 case XPT_SCAN_BUS:
7069 default:
7070 xpt_free_path(done_ccb->ccb_h.path);
7071 busses_to_config--;
7072 break;
7076 if (busses_to_config == 0) {
7077 task = kmalloc(sizeof(struct xpt_task), M_CAMXPT, M_INTWAIT);
7078 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
7079 taskqueue_enqueue(taskqueue_thread[mycpuid], &task->task);
7082 if (done_ccb != NULL)
7083 xpt_free_ccb(done_ccb);
7086 cam_status
7087 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
7088 struct cam_path *path)
7090 struct ccb_setasync csa;
7091 cam_status status;
7092 int xptpath = 0;
7094 if (path == NULL) {
7095 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
7096 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
7097 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
7098 if (status != CAM_REQ_CMP) {
7099 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7100 return (status);
7102 xptpath = 1;
7105 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
7106 csa.ccb_h.func_code = XPT_SASYNC_CB;
7107 csa.event_enable = event;
7108 csa.callback = cbfunc;
7109 csa.callback_arg = cbarg;
7110 xpt_action((union ccb *)&csa);
7111 status = csa.ccb_h.status;
7112 if (xptpath) {
7113 xpt_free_path(path);
7114 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7116 return (status);
7119 static void
7120 xptaction(struct cam_sim *sim, union ccb *work_ccb)
7122 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7124 switch (work_ccb->ccb_h.func_code) {
7125 /* Common cases first */
7126 case XPT_PATH_INQ: /* Path routing inquiry */
7128 struct ccb_pathinq *cpi;
7130 cpi = &work_ccb->cpi;
7131 cpi->version_num = 1; /* XXX??? */
7132 cpi->hba_inquiry = 0;
7133 cpi->target_sprt = 0;
7134 cpi->hba_misc = 0;
7135 cpi->hba_eng_cnt = 0;
7136 cpi->max_target = 0;
7137 cpi->max_lun = 0;
7138 cpi->initiator_id = 0;
7139 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7140 strncpy(cpi->hba_vid, "", HBA_IDLEN);
7141 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7142 cpi->unit_number = sim->unit_number;
7143 cpi->bus_id = sim->bus_id;
7144 cpi->base_transfer_speed = 0;
7145 cpi->protocol = PROTO_UNSPECIFIED;
7146 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7147 cpi->transport = XPORT_UNSPECIFIED;
7148 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7149 cpi->ccb_h.status = CAM_REQ_CMP;
7150 xpt_done(work_ccb);
7151 break;
7153 default:
7154 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7155 xpt_done(work_ccb);
7156 break;
7161 * The xpt as a "controller" has no interrupt sources, so polling
7162 * is a no-op.
7164 static void
7165 xptpoll(struct cam_sim *sim)
7169 void
7170 xpt_lock_buses(void)
7172 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
7175 void
7176 xpt_unlock_buses(void)
7178 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
7183 * Should only be called by the machine interrupt dispatch routines,
7184 * so put these prototypes here instead of in the header.
7187 static void
7188 swi_cambio(void *arg, void *frame)
7190 camisr(NULL);
7193 static void
7194 camisr(void *dummy)
7196 cam_simq_t queue;
7197 struct cam_sim *sim;
7199 spin_lock_wr(&cam_simq_spin);
7200 TAILQ_INIT(&queue);
7201 TAILQ_CONCAT(&queue, &cam_simq, links);
7202 spin_unlock_wr(&cam_simq_spin);
7204 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
7205 TAILQ_REMOVE(&queue, sim, links);
7206 CAM_SIM_LOCK(sim);
7207 sim->flags &= ~CAM_SIM_ON_DONEQ;
7208 camisr_runqueue(sim);
7209 CAM_SIM_UNLOCK(sim);
7213 static void
7214 camisr_runqueue(struct cam_sim *sim)
7216 struct ccb_hdr *ccb_h;
7217 int runq;
7219 spin_lock_wr(&sim->sim_spin);
7220 while ((ccb_h = TAILQ_FIRST(&sim->sim_doneq)) != NULL) {
7221 TAILQ_REMOVE(&sim->sim_doneq, ccb_h, sim_links.tqe);
7222 spin_unlock_wr(&sim->sim_spin);
7223 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7225 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7226 ("camisr\n"));
7228 runq = FALSE;
7230 if (ccb_h->flags & CAM_HIGH_POWER) {
7231 struct highpowerlist *hphead;
7232 struct cam_ed *device;
7233 union ccb *send_ccb;
7235 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
7236 hphead = &xsoftc.highpowerq;
7238 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7241 * Increment the count since this command is done.
7243 xsoftc.num_highpower++;
7246 * Any high powered commands queued up?
7248 if (send_ccb != NULL) {
7249 device = send_ccb->ccb_h.path->device;
7251 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7252 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7254 xpt_release_devq(send_ccb->ccb_h.path,
7255 /*count*/1, /*runqueue*/TRUE);
7256 } else
7257 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7260 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7261 struct cam_ed *dev;
7263 dev = ccb_h->path->device;
7265 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7268 * devq may be NULL if this is cam_dead_sim
7270 if (ccb_h->path->bus->sim->devq) {
7271 ccb_h->path->bus->sim->devq->send_active--;
7272 ccb_h->path->bus->sim->devq->send_openings++;
7275 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7276 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7277 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7278 && (dev->ccbq.dev_active == 0))) {
7280 xpt_release_devq(ccb_h->path, /*count*/1,
7281 /*run_queue*/TRUE);
7284 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7285 && (--dev->tag_delay_count == 0))
7286 xpt_start_tags(ccb_h->path);
7288 if ((dev->ccbq.queue.entries > 0)
7289 && (dev->qfrozen_cnt == 0)
7290 && (device_is_send_queued(dev) == 0)) {
7291 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7292 dev);
7296 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7297 xpt_release_simq(ccb_h->path->bus->sim,
7298 /*run_queue*/TRUE);
7299 ccb_h->status &= ~CAM_RELEASE_SIMQ;
7300 runq = FALSE;
7303 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7304 && (ccb_h->status & CAM_DEV_QFRZN)) {
7305 xpt_release_devq(ccb_h->path, /*count*/1,
7306 /*run_queue*/TRUE);
7307 ccb_h->status &= ~CAM_DEV_QFRZN;
7308 } else if (runq) {
7309 xpt_run_dev_sendq(ccb_h->path->bus);
7312 /* Call the peripheral driver's callback */
7313 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7314 spin_lock_wr(&sim->sim_spin);
7316 spin_unlock_wr(&sim->sim_spin);
7320 * The dead_sim isn't completely hooked into CAM, we have to make sure
7321 * the doneq is cleared after calling xpt_done() so cam_periph_ccbwait()
7322 * doesn't block.
7324 static void
7325 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7328 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7329 xpt_done(ccb);
7330 camisr_runqueue(sim);
7333 static void
7334 dead_sim_poll(struct cam_sim *sim)