2 * Implementation of the Common Access Method Transport (XPT) layer.
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.64 2008/02/10 00:01:01 pavalos Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
49 #include <machine/clock.h>
53 #include "cam_periph.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
65 /* Datastructures internal to the xpt layer */
66 MALLOC_DEFINE(M_CAMXPT
, "CAM XPT", "CAM XPT buffers");
69 * Definition of an async handler callback block. These are used to add
70 * SIMs and peripherals to the async callback lists.
73 SLIST_ENTRY(async_node
) links
;
74 u_int32_t event_enable
; /* Async Event enables */
75 void (*callback
)(void *arg
, u_int32_t code
,
76 struct cam_path
*path
, void *args
);
80 SLIST_HEAD(async_list
, async_node
);
81 SLIST_HEAD(periph_list
, cam_periph
);
82 static STAILQ_HEAD(highpowerlist
, ccb_hdr
) highpowerq
;
85 * This is the maximum number of high powered commands (e.g. start unit)
86 * that can be outstanding at a particular time.
88 #ifndef CAM_MAX_HIGHPOWER
89 #define CAM_MAX_HIGHPOWER 4
92 /* number of high powered commands that can go through right now */
93 static int num_highpower
= CAM_MAX_HIGHPOWER
;
96 * Structure for queueing a device in a run queue.
97 * There is one run queue for allocating new ccbs,
98 * and another for sending ccbs to the controller.
100 struct cam_ed_qinfo
{
102 struct cam_ed
*device
;
106 * The CAM EDT (Existing Device Table) contains the device information for
107 * all devices for all busses in the system. The table contains a
108 * cam_ed structure for each device on the bus.
111 TAILQ_ENTRY(cam_ed
) links
;
112 struct cam_ed_qinfo alloc_ccb_entry
;
113 struct cam_ed_qinfo send_ccb_entry
;
114 struct cam_et
*target
;
117 * Queue of type drivers wanting to do
118 * work on this device.
120 struct cam_ccbq ccbq
; /* Queue of pending ccbs */
121 struct async_list asyncs
; /* Async callback info for this B/T/L */
122 struct periph_list periphs
; /* All attached devices */
123 u_int generation
; /* Generation number */
124 struct cam_periph
*owner
; /* Peripheral driver's ownership tag */
125 struct xpt_quirk_entry
*quirk
; /* Oddities about this device */
126 /* Storage for the inquiry data */
128 u_int protocol_version
;
130 u_int transport_version
;
131 struct scsi_inquiry_data inq_data
;
132 u_int8_t inq_flags
; /*
133 * Current settings for inquiry flags.
134 * This allows us to override settings
135 * like disconnection and tagged
136 * queuing for a device.
138 u_int8_t queue_flags
; /* Queue flags from the control page */
139 u_int8_t serial_num_len
;
140 u_int8_t
*serial_num
;
141 u_int32_t qfrozen_cnt
;
143 #define CAM_DEV_UNCONFIGURED 0x01
144 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
145 #define CAM_DEV_REL_ON_COMPLETE 0x04
146 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
147 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
148 #define CAM_DEV_TAG_AFTER_COUNT 0x20
149 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
150 u_int32_t tag_delay_count
;
151 #define CAM_TAG_DELAY_COUNT 5
152 u_int32_t tag_saved_openings
;
154 struct callout c_handle
;
158 * Each target is represented by an ET (Existing Target). These
159 * entries are created when a target is successfully probed with an
160 * identify, and removed when a device fails to respond after a number
161 * of retries, or a bus rescan finds the device missing.
164 TAILQ_HEAD(, cam_ed
) ed_entries
;
165 TAILQ_ENTRY(cam_et
) links
;
167 target_id_t target_id
;
170 struct timeval last_reset
; /* uptime of last reset */
174 * Each bus is represented by an EB (Existing Bus). These entries
175 * are created by calls to xpt_bus_register and deleted by calls to
176 * xpt_bus_deregister.
179 TAILQ_HEAD(, cam_et
) et_entries
;
180 TAILQ_ENTRY(cam_eb
) links
;
183 struct timeval last_reset
; /* uptime of last reset */
185 #define CAM_EB_RUNQ_SCHEDULED 0x01
191 struct cam_periph
*periph
;
193 struct cam_et
*target
;
194 struct cam_ed
*device
;
197 struct xpt_quirk_entry
{
198 struct scsi_inquiry_pattern inq_pat
;
200 #define CAM_QUIRK_NOLUNS 0x01
201 #define CAM_QUIRK_NOSERIAL 0x02
202 #define CAM_QUIRK_HILUNS 0x04
203 #define CAM_QUIRK_NOHILUNS 0x08
208 static int cam_srch_hi
= 0;
209 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi
);
210 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
);
211 SYSCTL_PROC(_kern_cam
, OID_AUTO
, cam_srch_hi
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
212 sysctl_cam_search_luns
, "I",
213 "allow search above LUN 7 for SCSI3 and greater devices");
215 #define CAM_SCSI2_MAXLUN 8
217 * If we're not quirked to search <= the first 8 luns
218 * and we are either quirked to search above lun 8,
219 * or we're > SCSI-2 and we've enabled hilun searching,
220 * or we're > SCSI-2 and the last lun was a success,
221 * we can look for luns above lun 8.
223 #define CAN_SRCH_HI_SPARSE(dv) \
224 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
225 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
226 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
228 #define CAN_SRCH_HI_DENSE(dv) \
229 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
230 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
231 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
239 u_int32_t generation
;
242 static const char quantum
[] = "QUANTUM";
243 static const char sony
[] = "SONY";
244 static const char west_digital
[] = "WDIGTL";
245 static const char samsung
[] = "SAMSUNG";
246 static const char seagate
[] = "SEAGATE";
247 static const char microp
[] = "MICROP";
249 static struct xpt_quirk_entry xpt_quirk_table
[] =
252 /* Reports QUEUE FULL for temporary resource shortages */
253 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP39100*", "*" },
254 /*quirks*/0, /*mintags*/24, /*maxtags*/32
257 /* Reports QUEUE FULL for temporary resource shortages */
258 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP34550*", "*" },
259 /*quirks*/0, /*mintags*/24, /*maxtags*/32
262 /* Reports QUEUE FULL for temporary resource shortages */
263 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP32275*", "*" },
264 /*quirks*/0, /*mintags*/24, /*maxtags*/32
267 /* Broken tagged queuing drive */
268 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "4421-07*", "*" },
269 /*quirks*/0, /*mintags*/0, /*maxtags*/0
272 /* Broken tagged queuing drive */
273 { T_DIRECT
, SIP_MEDIA_FIXED
, "HP", "C372*", "*" },
274 /*quirks*/0, /*mintags*/0, /*maxtags*/0
277 /* Broken tagged queuing drive */
278 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "3391*", "x43h" },
279 /*quirks*/0, /*mintags*/0, /*maxtags*/0
283 * Unfortunately, the Quantum Atlas III has the same
284 * problem as the Atlas II drives above.
285 * Reported by: "Johan Granlund" <johan@granlund.nu>
287 * For future reference, the drive with the problem was:
288 * QUANTUM QM39100TD-SW N1B0
290 * It's possible that Quantum will fix the problem in later
291 * firmware revisions. If that happens, the quirk entry
292 * will need to be made specific to the firmware revisions
296 /* Reports QUEUE FULL for temporary resource shortages */
297 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM39100*", "*" },
298 /*quirks*/0, /*mintags*/24, /*maxtags*/32
302 * 18 Gig Atlas III, same problem as the 9G version.
303 * Reported by: Andre Albsmeier
304 * <andre.albsmeier@mchp.siemens.de>
306 * For future reference, the drive with the problem was:
307 * QUANTUM QM318000TD-S N491
309 /* Reports QUEUE FULL for temporary resource shortages */
310 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM318000*", "*" },
311 /*quirks*/0, /*mintags*/24, /*maxtags*/32
315 * Broken tagged queuing drive
316 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
317 * and: Martin Renters <martin@tdc.on.ca>
319 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST410800*", "71*" },
320 /*quirks*/0, /*mintags*/0, /*maxtags*/0
323 * The Seagate Medalist Pro drives have very poor write
324 * performance with anything more than 2 tags.
326 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
327 * Drive: <SEAGATE ST36530N 1444>
329 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
330 * Drive: <SEAGATE ST34520W 1281>
332 * No one has actually reported that the 9G version
333 * (ST39140*) of the Medalist Pro has the same problem, but
334 * we're assuming that it does because the 4G and 6.5G
335 * versions of the drive are broken.
338 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST34520*", "*"},
339 /*quirks*/0, /*mintags*/2, /*maxtags*/2
342 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST36530*", "*"},
343 /*quirks*/0, /*mintags*/2, /*maxtags*/2
346 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST39140*", "*"},
347 /*quirks*/0, /*mintags*/2, /*maxtags*/2
351 * Slow when tagged queueing is enabled. Write performance
352 * steadily drops off with more and more concurrent
353 * transactions. Best sequential write performance with
354 * tagged queueing turned off and write caching turned on.
357 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
358 * Drive: DCAS-34330 w/ "S65A" firmware.
360 * The drive with the problem had the "S65A" firmware
361 * revision, and has also been reported (by Stephen J.
362 * Roznowski <sjr@home.net>) for a drive with the "S61A"
365 * Although no one has reported problems with the 2 gig
366 * version of the DCAS drive, the assumption is that it
367 * has the same problems as the 4 gig version. Therefore
368 * this quirk entries disables tagged queueing for all
371 { T_DIRECT
, SIP_MEDIA_FIXED
, "IBM", "DCAS*", "*" },
372 /*quirks*/0, /*mintags*/0, /*maxtags*/0
375 /* Broken tagged queuing drive */
376 { T_DIRECT
, SIP_MEDIA_REMOVABLE
, "iomega", "jaz*", "*" },
377 /*quirks*/0, /*mintags*/0, /*maxtags*/0
380 /* Broken tagged queuing drive */
381 { T_DIRECT
, SIP_MEDIA_FIXED
, "CONNER", "CFP2107*", "*" },
382 /*quirks*/0, /*mintags*/0, /*maxtags*/0
385 /* This does not support other than LUN 0 */
386 { T_DIRECT
, SIP_MEDIA_FIXED
, "VMware*", "*", "*" },
387 CAM_QUIRK_NOLUNS
, /*mintags*/2, /*maxtags*/255
391 * Broken tagged queuing drive.
393 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
396 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN34324U*", "*" },
397 /*quirks*/0, /*mintags*/0, /*maxtags*/0
401 * Slow when tagged queueing is enabled. (1.5MB/sec versus
403 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
404 * Best performance with these drives is achieved with
405 * tagged queueing turned off, and write caching turned on.
407 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "WDE*", "*" },
408 /*quirks*/0, /*mintags*/0, /*maxtags*/0
412 * Slow when tagged queueing is enabled. (1.5MB/sec versus
414 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
415 * Best performance with these drives is achieved with
416 * tagged queueing turned off, and write caching turned on.
418 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "ENTERPRISE", "*" },
419 /*quirks*/0, /*mintags*/0, /*maxtags*/0
423 * Doesn't handle queue full condition correctly,
424 * so we need to limit maxtags to what the device
425 * can handle instead of determining this automatically.
427 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN321010S*", "*" },
428 /*quirks*/0, /*mintags*/2, /*maxtags*/32
431 /* Really only one LUN */
432 { T_ENCLOSURE
, SIP_MEDIA_FIXED
, "SUN", "SENA", "*" },
433 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
436 /* I can't believe we need a quirk for DPT volumes. */
437 { T_ANY
, SIP_MEDIA_FIXED
|SIP_MEDIA_REMOVABLE
, "DPT", "*", "*" },
438 CAM_QUIRK_NOSERIAL
|CAM_QUIRK_NOLUNS
,
439 /*mintags*/0, /*maxtags*/255
443 * Many Sony CDROM drives don't like multi-LUN probing.
445 { T_CDROM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-ROM CDU*", "*" },
446 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
450 * This drive doesn't like multiple LUN probing.
451 * Submitted by: Parag Patel <parag@cgt.com>
453 { T_WORM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-R CDU9*", "*" },
454 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
457 { T_WORM
, SIP_MEDIA_REMOVABLE
, "YAMAHA", "CDR100*", "*" },
458 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
462 * The 8200 doesn't like multi-lun probing, and probably
463 * don't like serial number requests either.
466 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
469 CAM_QUIRK_NOSERIAL
|CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
473 * Let's try the same as above, but for a drive that says
474 * it's an IPL-6860 but is actually an EXB 8200.
477 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
480 CAM_QUIRK_NOSERIAL
|CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
484 * These Hitachi drives don't like multi-lun probing.
485 * The PR submitter has a DK319H, but says that the Linux
486 * kernel has a similar work-around for the DK312 and DK314,
487 * so all DK31* drives are quirked here.
489 * Submitted by: Paul Haddad <paul@pth.com>
491 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "DK31*", "*" },
492 CAM_QUIRK_NOLUNS
, /*mintags*/2, /*maxtags*/255
496 * The Hitachi CJ series with J8A8 firmware apparantly has
497 * problems with tagged commands.
499 * Reported by: amagai@nue.org
501 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "DK32CJ*", "J8A8" },
502 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
506 * These are the large storage arrays.
507 * Submitted by: William Carrel <william.carrel@infospace.com>
509 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "OPEN*", "*" },
510 CAM_QUIRK_HILUNS
, 2, 1024
514 * This old revision of the TDC3600 is also SCSI-1, and
515 * hangs upon serial number probing.
518 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "TANDBERG",
521 CAM_QUIRK_NOSERIAL
, /*mintags*/0, /*maxtags*/0
525 * Maxtor Personal Storage 3000XT (Firewire)
526 * hangs upon serial number probing.
529 T_DIRECT
, SIP_MEDIA_FIXED
, "Maxtor",
532 CAM_QUIRK_NOSERIAL
, /*mintags*/0, /*maxtags*/0
536 * Would repond to all LUNs if asked for.
539 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "CALIPER",
542 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
546 * Would repond to all LUNs if asked for.
549 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "KENNEDY",
552 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
555 /* Submitted by: Matthew Dodd <winter@jurai.net> */
556 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "Cabletrn", "EA41*", "*" },
557 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
560 /* Submitted by: Matthew Dodd <winter@jurai.net> */
561 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "CABLETRN", "EA41*", "*" },
562 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
565 /* TeraSolutions special settings for TRC-22 RAID */
566 { T_DIRECT
, SIP_MEDIA_FIXED
, "TERASOLU", "TRC-22", "*" },
567 /*quirks*/0, /*mintags*/55, /*maxtags*/255
570 /* Veritas Storage Appliance */
571 { T_DIRECT
, SIP_MEDIA_FIXED
, "VERITAS", "*", "*" },
572 CAM_QUIRK_HILUNS
, /*mintags*/2, /*maxtags*/1024
576 * Would respond to all LUNs. Device type and removable
577 * flag are jumper-selectable.
579 { T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
, "MaxOptix",
582 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
585 /* EasyRAID E5A aka. areca ARC-6010 */
586 { T_DIRECT
, SIP_MEDIA_FIXED
, "easyRAID", "*", "*" },
587 CAM_QUIRK_NOHILUNS
, /*mintags*/2, /*maxtags*/255
590 { T_ENCLOSURE
, SIP_MEDIA_FIXED
, "DP", "BACKPLANE", "*" },
591 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
595 * Western Digital My Book 250GB (USB)
596 * hangs upon serial number probing.
600 T_DIRECT
, SIP_MEDIA_FIXED
, "WD",
601 "2500JB External", "*"
603 CAM_QUIRK_NOSERIAL
, /*mintags*/0, /*maxtags*/0
606 /* Default tagged queuing parameters for all devices */
608 T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
,
609 /*vendor*/"*", /*product*/"*", /*revision*/"*"
611 /*quirks*/0, /*mintags*/2, /*maxtags*/255
615 static const int xpt_quirk_table_size
=
616 sizeof(xpt_quirk_table
) / sizeof(*xpt_quirk_table
);
620 DM_RET_FLAG_MASK
= 0x0f,
623 DM_RET_DESCEND
= 0x20,
625 DM_RET_ACTION_MASK
= 0xf0
633 } xpt_traverse_depth
;
635 struct xpt_traverse_config
{
636 xpt_traverse_depth depth
;
641 typedef int xpt_busfunc_t (struct cam_eb
*bus
, void *arg
);
642 typedef int xpt_targetfunc_t (struct cam_et
*target
, void *arg
);
643 typedef int xpt_devicefunc_t (struct cam_ed
*device
, void *arg
);
644 typedef int xpt_periphfunc_t (struct cam_periph
*periph
, void *arg
);
645 typedef int xpt_pdrvfunc_t (struct periph_driver
**pdrv
, void *arg
);
647 /* Transport layer configuration information */
648 static struct xpt_softc xsoftc
;
650 /* Queues for our software interrupt handler */
651 typedef TAILQ_HEAD(cam_isrq
, ccb_hdr
) cam_isrq_t
;
652 static cam_isrq_t cam_bioq
;
654 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
655 static SLIST_HEAD(,ccb_hdr
) ccb_freeq
;
656 static u_int xpt_max_ccbs
; /*
657 * Maximum size of ccb pool. Modified as
658 * devices are added/removed or have their
659 * opening counts changed.
661 static u_int xpt_ccb_count
; /* Current count of allocated ccbs */
663 struct cam_periph
*xpt_periph
;
665 static periph_init_t xpt_periph_init
;
667 static periph_init_t probe_periph_init
;
669 static struct periph_driver xpt_driver
=
671 xpt_periph_init
, "xpt",
672 TAILQ_HEAD_INITIALIZER(xpt_driver
.units
)
675 static struct periph_driver probe_driver
=
677 probe_periph_init
, "probe",
678 TAILQ_HEAD_INITIALIZER(probe_driver
.units
)
681 PERIPHDRIVER_DECLARE(xpt
, xpt_driver
);
682 PERIPHDRIVER_DECLARE(probe
, probe_driver
);
684 #define XPT_CDEV_MAJOR 104
686 static d_open_t xptopen
;
687 static d_close_t xptclose
;
688 static d_ioctl_t xptioctl
;
690 static struct dev_ops xpt_ops
= {
691 { "xpt", XPT_CDEV_MAJOR
, 0 },
697 static struct intr_config_hook
*xpt_config_hook
;
699 static void dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
);
700 static void dead_sim_poll(struct cam_sim
*sim
);
702 /* Dummy SIM that is used when the real one has gone. */
703 static struct cam_sim cam_dead_sim
= {
704 .sim_action
= dead_sim_action
,
705 .sim_poll
= dead_sim_poll
,
706 .sim_name
= "dead_sim",
709 #define SIM_DEAD(sim) ((sim) == &cam_dead_sim)
711 /* Registered busses */
712 static TAILQ_HEAD(,cam_eb
) xpt_busses
;
713 static u_int bus_generation
;
715 /* Storage for debugging datastructures */
717 struct cam_path
*cam_dpath
;
718 u_int32_t cam_dflags
;
719 u_int32_t cam_debug_delay
;
722 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
723 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
727 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
728 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
729 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
731 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
732 || defined(CAM_DEBUG_LUN)
734 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
735 || !defined(CAM_DEBUG_LUN)
736 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
738 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
739 #else /* !CAMDEBUG */
740 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
741 #endif /* CAMDEBUG */
742 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
744 /* Our boot-time initialization hook */
745 static int cam_module_event_handler(module_t
, int /*modeventtype_t*/, void *);
747 static moduledata_t cam_moduledata
= {
749 cam_module_event_handler
,
753 static void xpt_init(void *);
755 DECLARE_MODULE(cam
, cam_moduledata
, SI_SUB_CONFIGURE
, SI_ORDER_SECOND
);
756 MODULE_VERSION(cam
, 1);
759 static cam_status
xpt_compile_path(struct cam_path
*new_path
,
760 struct cam_periph
*perph
,
762 target_id_t target_id
,
765 static void xpt_release_path(struct cam_path
*path
);
767 static void xpt_async_bcast(struct async_list
*async_head
,
768 u_int32_t async_code
,
769 struct cam_path
*path
,
771 static void xpt_dev_async(u_int32_t async_code
,
773 struct cam_et
*target
,
774 struct cam_ed
*device
,
776 static path_id_t
xptnextfreepathid(void);
777 static path_id_t
xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
);
778 static union ccb
*xpt_get_ccb(struct cam_ed
*device
);
779 static int xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*dev_pinfo
,
780 u_int32_t new_priority
);
781 static void xpt_run_dev_allocq(struct cam_eb
*bus
);
782 static void xpt_run_dev_sendq(struct cam_eb
*bus
);
783 static timeout_t xpt_release_devq_timeout
;
784 static void xpt_release_bus(struct cam_eb
*bus
);
785 static void xpt_release_devq_device(struct cam_ed
*dev
, u_int count
,
787 static struct cam_et
*
788 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
);
789 static void xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
);
790 static struct cam_ed
*
791 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
,
793 static void xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
794 struct cam_ed
*device
);
795 static u_int32_t
xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
);
796 static struct cam_eb
*
797 xpt_find_bus(path_id_t path_id
);
798 static struct cam_et
*
799 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
);
800 static struct cam_ed
*
801 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
);
802 static void xpt_scan_bus(struct cam_periph
*periph
, union ccb
*ccb
);
803 static void xpt_scan_lun(struct cam_periph
*periph
,
804 struct cam_path
*path
, cam_flags flags
,
806 static void xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
);
807 static xpt_busfunc_t xptconfigbuscountfunc
;
808 static xpt_busfunc_t xptconfigfunc
;
809 static void xpt_config(void *arg
);
810 static xpt_devicefunc_t xptpassannouncefunc
;
811 static void xpt_finishconfig(struct cam_periph
*periph
, union ccb
*ccb
);
812 static void xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
);
813 static void xptpoll(struct cam_sim
*sim
);
814 static inthand2_t swi_cambio
;
815 static void camisr(cam_isrq_t
*queue
);
817 static void xptstart(struct cam_periph
*periph
, union ccb
*work_ccb
);
818 static void xptasync(struct cam_periph
*periph
,
819 u_int32_t code
, cam_path
*path
);
821 static dev_match_ret
xptbusmatch(struct dev_match_pattern
*patterns
,
822 u_int num_patterns
, struct cam_eb
*bus
);
823 static dev_match_ret
xptdevicematch(struct dev_match_pattern
*patterns
,
825 struct cam_ed
*device
);
826 static dev_match_ret
xptperiphmatch(struct dev_match_pattern
*patterns
,
828 struct cam_periph
*periph
);
829 static xpt_busfunc_t xptedtbusfunc
;
830 static xpt_targetfunc_t xptedttargetfunc
;
831 static xpt_devicefunc_t xptedtdevicefunc
;
832 static xpt_periphfunc_t xptedtperiphfunc
;
833 static xpt_pdrvfunc_t xptplistpdrvfunc
;
834 static xpt_periphfunc_t xptplistperiphfunc
;
835 static int xptedtmatch(struct ccb_dev_match
*cdm
);
836 static int xptperiphlistmatch(struct ccb_dev_match
*cdm
);
837 static int xptbustraverse(struct cam_eb
*start_bus
,
838 xpt_busfunc_t
*tr_func
, void *arg
);
839 static int xpttargettraverse(struct cam_eb
*bus
,
840 struct cam_et
*start_target
,
841 xpt_targetfunc_t
*tr_func
, void *arg
);
842 static int xptdevicetraverse(struct cam_et
*target
,
843 struct cam_ed
*start_device
,
844 xpt_devicefunc_t
*tr_func
, void *arg
);
845 static int xptperiphtraverse(struct cam_ed
*device
,
846 struct cam_periph
*start_periph
,
847 xpt_periphfunc_t
*tr_func
, void *arg
);
848 static int xptpdrvtraverse(struct periph_driver
**start_pdrv
,
849 xpt_pdrvfunc_t
*tr_func
, void *arg
);
850 static int xptpdperiphtraverse(struct periph_driver
**pdrv
,
851 struct cam_periph
*start_periph
,
852 xpt_periphfunc_t
*tr_func
,
854 static xpt_busfunc_t xptdefbusfunc
;
855 static xpt_targetfunc_t xptdeftargetfunc
;
856 static xpt_devicefunc_t xptdefdevicefunc
;
857 static xpt_periphfunc_t xptdefperiphfunc
;
858 static int xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
);
860 static int xpt_for_all_targets(xpt_targetfunc_t
*tr_func
,
863 static int xpt_for_all_devices(xpt_devicefunc_t
*tr_func
,
866 static int xpt_for_all_periphs(xpt_periphfunc_t
*tr_func
,
869 static xpt_devicefunc_t xptsetasyncfunc
;
870 static xpt_busfunc_t xptsetasyncbusfunc
;
871 static cam_status
xptregister(struct cam_periph
*periph
,
873 static cam_status
proberegister(struct cam_periph
*periph
,
875 static void probeschedule(struct cam_periph
*probe_periph
);
876 static void probestart(struct cam_periph
*periph
, union ccb
*start_ccb
);
877 static void proberequestdefaultnegotiation(struct cam_periph
*periph
);
878 static void probedone(struct cam_periph
*periph
, union ccb
*done_ccb
);
879 static void probecleanup(struct cam_periph
*periph
);
880 static void xpt_find_quirk(struct cam_ed
*device
);
881 static void xpt_devise_transport(struct cam_path
*path
);
882 static void xpt_set_transfer_settings(struct ccb_trans_settings
*cts
,
883 struct cam_ed
*device
,
885 static void xpt_toggle_tags(struct cam_path
*path
);
886 static void xpt_start_tags(struct cam_path
*path
);
887 static __inline
int xpt_schedule_dev_allocq(struct cam_eb
*bus
,
889 static __inline
int xpt_schedule_dev_sendq(struct cam_eb
*bus
,
891 static __inline
int periph_is_queued(struct cam_periph
*periph
);
892 static __inline
int device_is_alloc_queued(struct cam_ed
*device
);
893 static __inline
int device_is_send_queued(struct cam_ed
*device
);
894 static __inline
int dev_allocq_is_runnable(struct cam_devq
*devq
);
897 xpt_schedule_dev_allocq(struct cam_eb
*bus
, struct cam_ed
*dev
)
901 if (bus
->sim
->devq
&& dev
->ccbq
.devq_openings
> 0) {
902 if ((dev
->flags
& CAM_DEV_RESIZE_QUEUE_NEEDED
) != 0) {
903 cam_ccbq_resize(&dev
->ccbq
,
904 dev
->ccbq
.dev_openings
905 + dev
->ccbq
.dev_active
);
906 dev
->flags
&= ~CAM_DEV_RESIZE_QUEUE_NEEDED
;
909 * The priority of a device waiting for CCB resources
910 * is that of the the highest priority peripheral driver
913 retval
= xpt_schedule_dev(&bus
->sim
->devq
->alloc_queue
,
914 &dev
->alloc_ccb_entry
.pinfo
,
915 CAMQ_GET_HEAD(&dev
->drvq
)->priority
);
924 xpt_schedule_dev_sendq(struct cam_eb
*bus
, struct cam_ed
*dev
)
928 if (bus
->sim
->devq
&& dev
->ccbq
.dev_openings
> 0) {
930 * The priority of a device waiting for controller
931 * resources is that of the the highest priority CCB
935 xpt_schedule_dev(&bus
->sim
->devq
->send_queue
,
936 &dev
->send_ccb_entry
.pinfo
,
937 CAMQ_GET_HEAD(&dev
->ccbq
.queue
)->priority
);
945 periph_is_queued(struct cam_periph
*periph
)
947 return (periph
->pinfo
.index
!= CAM_UNQUEUED_INDEX
);
951 device_is_alloc_queued(struct cam_ed
*device
)
953 return (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
957 device_is_send_queued(struct cam_ed
*device
)
959 return (device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
963 dev_allocq_is_runnable(struct cam_devq
*devq
)
967 * Have space to do more work.
968 * Allowed to do work.
970 return ((devq
->alloc_queue
.qfrozen_cnt
== 0)
971 && (devq
->alloc_queue
.entries
> 0)
972 && (devq
->alloc_openings
> 0));
976 xpt_periph_init(void)
978 dev_ops_add(&xpt_ops
, 0, 0);
979 make_dev(&xpt_ops
, 0, UID_ROOT
, GID_OPERATOR
, 0600, "xpt0");
983 probe_periph_init(void)
989 xptdone(struct cam_periph
*periph
, union ccb
*done_ccb
)
991 /* Caller will release the CCB */
992 wakeup(&done_ccb
->ccb_h
.cbfcnp
);
996 xptopen(struct dev_open_args
*ap
)
998 cdev_t dev
= ap
->a_head
.a_dev
;
1001 unit
= minor(dev
) & 0xff;
1004 * Only allow read-write access.
1006 if (((ap
->a_oflags
& FWRITE
) == 0) || ((ap
->a_oflags
& FREAD
) == 0))
1010 * We don't allow nonblocking access.
1012 if ((ap
->a_oflags
& O_NONBLOCK
) != 0) {
1013 kprintf("xpt%d: can't do nonblocking access\n", unit
);
1018 * We only have one transport layer right now. If someone accesses
1019 * us via something other than minor number 1, point out their
1023 kprintf("xptopen: got invalid xpt unit %d\n", unit
);
1027 /* Mark ourselves open */
1028 xsoftc
.flags
|= XPT_FLAG_OPEN
;
1034 xptclose(struct dev_close_args
*ap
)
1036 cdev_t dev
= ap
->a_head
.a_dev
;
1039 unit
= minor(dev
) & 0xff;
1042 * We only have one transport layer right now. If someone accesses
1043 * us via something other than minor number 1, point out their
1047 kprintf("xptclose: got invalid xpt unit %d\n", unit
);
1051 /* Mark ourselves closed */
1052 xsoftc
.flags
&= ~XPT_FLAG_OPEN
;
1058 xptioctl(struct dev_ioctl_args
*ap
)
1060 cdev_t dev
= ap
->a_head
.a_dev
;
1064 unit
= minor(dev
) & 0xff;
1067 * We only have one transport layer right now. If someone accesses
1068 * us via something other than minor number 1, point out their
1072 kprintf("xptioctl: got invalid xpt unit %d\n", unit
);
1078 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1079 * to accept CCB types that don't quite make sense to send through a
1080 * passthrough driver.
1082 case CAMIOCOMMAND
: {
1086 inccb
= (union ccb
*)ap
->a_data
;
1088 switch(inccb
->ccb_h
.func_code
) {
1091 if ((inccb
->ccb_h
.target_id
!= CAM_TARGET_WILDCARD
)
1092 || (inccb
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
)) {
1101 ccb
= xpt_alloc_ccb();
1104 * Create a path using the bus, target, and lun the
1107 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
,
1108 inccb
->ccb_h
.path_id
,
1109 inccb
->ccb_h
.target_id
,
1110 inccb
->ccb_h
.target_lun
) !=
1116 /* Ensure all of our fields are correct */
1117 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
,
1118 inccb
->ccb_h
.pinfo
.priority
);
1119 xpt_merge_ccb(ccb
, inccb
);
1120 ccb
->ccb_h
.cbfcnp
= xptdone
;
1121 cam_periph_runccb(ccb
, NULL
, 0, 0, NULL
);
1122 bcopy(ccb
, inccb
, sizeof(union ccb
));
1123 xpt_free_path(ccb
->ccb_h
.path
);
1131 * This is an immediate CCB, so it's okay to
1132 * allocate it on the stack.
1136 * Create a path using the bus, target, and lun the
1139 if (xpt_create_path(&ccb
.ccb_h
.path
, xpt_periph
,
1140 inccb
->ccb_h
.path_id
,
1141 inccb
->ccb_h
.target_id
,
1142 inccb
->ccb_h
.target_lun
) !=
1147 /* Ensure all of our fields are correct */
1148 xpt_setup_ccb(&ccb
.ccb_h
, ccb
.ccb_h
.path
,
1149 inccb
->ccb_h
.pinfo
.priority
);
1150 xpt_merge_ccb(&ccb
, inccb
);
1151 ccb
.ccb_h
.cbfcnp
= xptdone
;
1153 bcopy(&ccb
, inccb
, sizeof(union ccb
));
1154 xpt_free_path(ccb
.ccb_h
.path
);
1158 case XPT_DEV_MATCH
: {
1159 struct cam_periph_map_info mapinfo
;
1160 struct cam_path
*old_path
;
1163 * We can't deal with physical addresses for this
1164 * type of transaction.
1166 if (inccb
->ccb_h
.flags
& CAM_DATA_PHYS
) {
1172 * Save this in case the caller had it set to
1173 * something in particular.
1175 old_path
= inccb
->ccb_h
.path
;
1178 * We really don't need a path for the matching
1179 * code. The path is needed because of the
1180 * debugging statements in xpt_action(). They
1181 * assume that the CCB has a valid path.
1183 inccb
->ccb_h
.path
= xpt_periph
->path
;
1185 bzero(&mapinfo
, sizeof(mapinfo
));
1188 * Map the pattern and match buffers into kernel
1189 * virtual address space.
1191 error
= cam_periph_mapmem(inccb
, &mapinfo
);
1194 inccb
->ccb_h
.path
= old_path
;
1199 * This is an immediate CCB, we can send it on directly.
1204 * Map the buffers back into user space.
1206 cam_periph_unmapmem(inccb
, &mapinfo
);
1208 inccb
->ccb_h
.path
= old_path
;
1220 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1221 * with the periphal driver name and unit name filled in. The other
1222 * fields don't really matter as input. The passthrough driver name
1223 * ("pass"), and unit number are passed back in the ccb. The current
1224 * device generation number, and the index into the device peripheral
1225 * driver list, and the status are also passed back. Note that
1226 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1227 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1228 * (or rather should be) impossible for the device peripheral driver
1229 * list to change since we look at the whole thing in one pass, and
1230 * we do it within a critical section.
1233 case CAMGETPASSTHRU
: {
1235 struct cam_periph
*periph
;
1236 struct periph_driver
**p_drv
;
1239 u_int cur_generation
;
1240 int base_periph_found
;
1243 ccb
= (union ccb
*)ap
->a_data
;
1244 unit
= ccb
->cgdl
.unit_number
;
1245 name
= ccb
->cgdl
.periph_name
;
1247 * Every 100 devices, we want to call splz() to check for
1248 * and allow the software interrupt handler a chance to run.
1250 * Most systems won't run into this check, but this should
1251 * avoid starvation in the software interrupt handler in
1256 ccb
= (union ccb
*)ap
->a_data
;
1258 base_periph_found
= 0;
1261 * Sanity check -- make sure we don't get a null peripheral
1264 if (*ccb
->cgdl
.periph_name
== '\0') {
1269 /* Keep the list from changing while we traverse it */
1272 cur_generation
= xsoftc
.generation
;
1274 /* first find our driver in the list of drivers */
1275 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
1276 if (strcmp((*p_drv
)->driver_name
, name
) == 0)
1280 if (*p_drv
== NULL
) {
1282 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1283 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1284 *ccb
->cgdl
.periph_name
= '\0';
1285 ccb
->cgdl
.unit_number
= 0;
1291 * Run through every peripheral instance of this driver
1292 * and check to see whether it matches the unit passed
1293 * in by the user. If it does, get out of the loops and
1294 * find the passthrough driver associated with that
1295 * peripheral driver.
1297 TAILQ_FOREACH(periph
, &(*p_drv
)->units
, unit_links
) {
1299 if (periph
->unit_number
== unit
) {
1301 } else if (--splbreaknum
== 0) {
1304 if (cur_generation
!= xsoftc
.generation
)
1309 * If we found the peripheral driver that the user passed
1310 * in, go through all of the peripheral drivers for that
1311 * particular device and look for a passthrough driver.
1313 if (periph
!= NULL
) {
1314 struct cam_ed
*device
;
1317 base_periph_found
= 1;
1318 device
= periph
->path
->device
;
1319 for (i
= 0, periph
= SLIST_FIRST(&device
->periphs
);
1321 periph
= SLIST_NEXT(periph
, periph_links
), i
++) {
1323 * Check to see whether we have a
1324 * passthrough device or not.
1326 if (strcmp(periph
->periph_name
, "pass") == 0) {
1328 * Fill in the getdevlist fields.
1330 strcpy(ccb
->cgdl
.periph_name
,
1331 periph
->periph_name
);
1332 ccb
->cgdl
.unit_number
=
1333 periph
->unit_number
;
1334 if (SLIST_NEXT(periph
, periph_links
))
1336 CAM_GDEVLIST_MORE_DEVS
;
1339 CAM_GDEVLIST_LAST_DEVICE
;
1340 ccb
->cgdl
.generation
=
1342 ccb
->cgdl
.index
= i
;
1344 * Fill in some CCB header fields
1345 * that the user may want.
1347 ccb
->ccb_h
.path_id
=
1348 periph
->path
->bus
->path_id
;
1349 ccb
->ccb_h
.target_id
=
1350 periph
->path
->target
->target_id
;
1351 ccb
->ccb_h
.target_lun
=
1352 periph
->path
->device
->lun_id
;
1353 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
1360 * If the periph is null here, one of two things has
1361 * happened. The first possibility is that we couldn't
1362 * find the unit number of the particular peripheral driver
1363 * that the user is asking about. e.g. the user asks for
1364 * the passthrough driver for "da11". We find the list of
1365 * "da" peripherals all right, but there is no unit 11.
1366 * The other possibility is that we went through the list
1367 * of peripheral drivers attached to the device structure,
1368 * but didn't find one with the name "pass". Either way,
1369 * we return ENOENT, since we couldn't find something.
1371 if (periph
== NULL
) {
1372 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1373 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1374 *ccb
->cgdl
.periph_name
= '\0';
1375 ccb
->cgdl
.unit_number
= 0;
1378 * It is unfortunate that this is even necessary,
1379 * but there are many, many clueless users out there.
1380 * If this is true, the user is looking for the
1381 * passthrough driver, but doesn't have one in his
1384 if (base_periph_found
== 1) {
1385 kprintf("xptioctl: pass driver is not in the "
1387 kprintf("xptioctl: put \"device pass0\" in "
1388 "your kernel config file\n");
1403 cam_module_event_handler(module_t mod
, int what
, void *arg
)
1405 if (what
== MOD_LOAD
) {
1407 } else if (what
== MOD_UNLOAD
) {
1416 /* Functions accessed by the peripheral drivers */
1418 xpt_init(void *dummy
)
1420 struct cam_sim
*xpt_sim
;
1421 struct cam_path
*path
;
1422 struct cam_devq
*devq
;
1425 TAILQ_INIT(&xpt_busses
);
1426 TAILQ_INIT(&cam_bioq
);
1427 SLIST_INIT(&ccb_freeq
);
1428 STAILQ_INIT(&highpowerq
);
1431 * The xpt layer is, itself, the equivelent of a SIM.
1432 * Allow 16 ccbs in the ccb pool for it. This should
1433 * give decent parallelism when we probe busses and
1434 * perform other XPT functions.
1436 devq
= cam_simq_alloc(16);
1437 xpt_sim
= cam_sim_alloc(xptaction
,
1442 /*max_dev_transactions*/0,
1443 /*max_tagged_dev_transactions*/0,
1445 cam_simq_release(devq
);
1448 xpt_bus_register(xpt_sim
, /*bus #*/0);
1451 * Looking at the XPT from the SIM layer, the XPT is
1452 * the equivelent of a peripheral driver. Allocate
1453 * a peripheral driver entry for us.
1455 if ((status
= xpt_create_path(&path
, NULL
, CAM_XPT_PATH_ID
,
1456 CAM_TARGET_WILDCARD
,
1457 CAM_LUN_WILDCARD
)) != CAM_REQ_CMP
) {
1458 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1459 " failing attach\n", status
);
1463 cam_periph_alloc(xptregister
, NULL
, NULL
, NULL
, "xpt", CAM_PERIPH_BIO
,
1464 path
, NULL
, 0, NULL
);
1465 xpt_free_path(path
);
1467 xpt_sim
->softc
= xpt_periph
;
1470 * Register a callback for when interrupts are enabled.
1472 xpt_config_hook
= kmalloc(sizeof(struct intr_config_hook
),
1473 M_TEMP
, M_INTWAIT
| M_ZERO
);
1474 xpt_config_hook
->ich_func
= xpt_config
;
1475 xpt_config_hook
->ich_desc
= "xpt";
1476 xpt_config_hook
->ich_order
= 1000;
1477 if (config_intrhook_establish(xpt_config_hook
) != 0) {
1478 kfree (xpt_config_hook
, M_TEMP
);
1479 kprintf("xpt_init: config_intrhook_establish failed "
1480 "- failing attach\n");
1483 /* Install our software interrupt handlers */
1484 register_swi(SWI_CAMBIO
, swi_cambio
, NULL
, "swi_cambio", NULL
);
1488 xptregister(struct cam_periph
*periph
, void *arg
)
1490 if (periph
== NULL
) {
1491 kprintf("xptregister: periph was NULL!!\n");
1492 return(CAM_REQ_CMP_ERR
);
1495 periph
->softc
= NULL
;
1497 xpt_periph
= periph
;
1499 return(CAM_REQ_CMP
);
1503 xpt_add_periph(struct cam_periph
*periph
)
1505 struct cam_ed
*device
;
1507 struct periph_list
*periph_head
;
1509 device
= periph
->path
->device
;
1511 periph_head
= &device
->periphs
;
1513 status
= CAM_REQ_CMP
;
1515 if (device
!= NULL
) {
1517 * Make room for this peripheral
1518 * so it will fit in the queue
1519 * when it's scheduled to run
1522 status
= camq_resize(&device
->drvq
,
1523 device
->drvq
.array_size
+ 1);
1525 device
->generation
++;
1527 SLIST_INSERT_HEAD(periph_head
, periph
, periph_links
);
1531 xsoftc
.generation
++;
1537 xpt_remove_periph(struct cam_periph
*periph
)
1539 struct cam_ed
*device
;
1541 device
= periph
->path
->device
;
1543 if (device
!= NULL
) {
1544 struct periph_list
*periph_head
;
1546 periph_head
= &device
->periphs
;
1548 /* Release the slot for this peripheral */
1550 camq_resize(&device
->drvq
, device
->drvq
.array_size
- 1);
1552 device
->generation
++;
1554 SLIST_REMOVE(periph_head
, periph
, cam_periph
, periph_links
);
1558 xsoftc
.generation
++;
1563 xpt_announce_periph(struct cam_periph
*periph
, char *announce_string
)
1565 struct ccb_pathinq cpi
;
1566 struct ccb_trans_settings cts
;
1567 struct cam_path
*path
;
1572 path
= periph
->path
;
1574 * To ensure that this is printed in one piece,
1575 * mask out CAM interrupts.
1578 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1579 periph
->periph_name
, periph
->unit_number
,
1580 path
->bus
->sim
->sim_name
,
1581 path
->bus
->sim
->unit_number
,
1582 path
->bus
->sim
->bus_id
,
1583 path
->target
->target_id
,
1584 path
->device
->lun_id
);
1585 kprintf("%s%d: ", periph
->periph_name
, periph
->unit_number
);
1586 scsi_print_inquiry(&path
->device
->inq_data
);
1587 if (bootverbose
&& path
->device
->serial_num_len
> 0) {
1588 /* Don't wrap the screen - print only the first 60 chars */
1589 kprintf("%s%d: Serial Number %.60s\n", periph
->periph_name
,
1590 periph
->unit_number
, path
->device
->serial_num
);
1592 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
1593 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
1594 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
1595 xpt_action((union ccb
*)&cts
);
1596 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
1600 /* Ask the SIM for its base transfer speed */
1601 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
1602 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
1603 xpt_action((union ccb
*)&cpi
);
1605 speed
= cpi
.base_transfer_speed
;
1607 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1608 struct ccb_trans_settings_spi
*spi
;
1610 spi
= &cts
.xport_specific
.spi
;
1611 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) != 0
1612 && spi
->sync_offset
!= 0) {
1613 freq
= scsi_calc_syncsrate(spi
->sync_period
);
1617 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0)
1618 speed
*= (0x01 << spi
->bus_width
);
1620 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1621 struct ccb_trans_settings_fc
*fc
= &cts
.xport_specific
.fc
;
1622 if (fc
->valid
& CTS_FC_VALID_SPEED
) {
1623 speed
= fc
->bitrate
;
1627 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SAS
) {
1628 struct ccb_trans_settings_sas
*sas
= &cts
.xport_specific
.sas
;
1629 if (sas
->valid
& CTS_SAS_VALID_SPEED
) {
1630 speed
= sas
->bitrate
;
1636 kprintf("%s%d: %d.%03dMB/s transfers",
1637 periph
->periph_name
, periph
->unit_number
,
1640 kprintf("%s%d: %dKB/s transfers", periph
->periph_name
,
1641 periph
->unit_number
, speed
);
1642 /* Report additional information about SPI connections */
1643 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1644 struct ccb_trans_settings_spi
*spi
;
1646 spi
= &cts
.xport_specific
.spi
;
1648 kprintf(" (%d.%03dMHz%s, offset %d", freq
/ 1000,
1650 (spi
->ppr_options
& MSG_EXT_PPR_DT_REQ
) != 0
1654 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0
1655 && spi
->bus_width
> 0) {
1661 kprintf("%dbit)", 8 * (0x01 << spi
->bus_width
));
1662 } else if (freq
!= 0) {
1666 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1667 struct ccb_trans_settings_fc
*fc
;
1669 fc
= &cts
.xport_specific
.fc
;
1670 if (fc
->valid
& CTS_FC_VALID_WWNN
)
1671 kprintf(" WWNN 0x%llx", (long long) fc
->wwnn
);
1672 if (fc
->valid
& CTS_FC_VALID_WWPN
)
1673 kprintf(" WWPN 0x%llx", (long long) fc
->wwpn
);
1674 if (fc
->valid
& CTS_FC_VALID_PORT
)
1675 kprintf(" PortID 0x%x", fc
->port
);
1678 if (path
->device
->inq_flags
& SID_CmdQue
1679 || path
->device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) {
1680 kprintf("\n%s%d: Tagged Queueing Enabled",
1681 periph
->periph_name
, periph
->unit_number
);
1686 * We only want to print the caller's announce string if they've
1689 if (announce_string
!= NULL
)
1690 kprintf("%s%d: %s\n", periph
->periph_name
,
1691 periph
->unit_number
, announce_string
);
1695 static dev_match_ret
1696 xptbusmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1699 dev_match_ret retval
;
1702 retval
= DM_RET_NONE
;
1705 * If we aren't given something to match against, that's an error.
1708 return(DM_RET_ERROR
);
1711 * If there are no match entries, then this bus matches no
1714 if ((patterns
== NULL
) || (num_patterns
== 0))
1715 return(DM_RET_DESCEND
| DM_RET_COPY
);
1717 for (i
= 0; i
< num_patterns
; i
++) {
1718 struct bus_match_pattern
*cur_pattern
;
1721 * If the pattern in question isn't for a bus node, we
1722 * aren't interested. However, we do indicate to the
1723 * calling routine that we should continue descending the
1724 * tree, since the user wants to match against lower-level
1727 if (patterns
[i
].type
!= DEV_MATCH_BUS
) {
1728 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1729 retval
|= DM_RET_DESCEND
;
1733 cur_pattern
= &patterns
[i
].pattern
.bus_pattern
;
1736 * If they want to match any bus node, we give them any
1739 if (cur_pattern
->flags
== BUS_MATCH_ANY
) {
1740 /* set the copy flag */
1741 retval
|= DM_RET_COPY
;
1744 * If we've already decided on an action, go ahead
1747 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1752 * Not sure why someone would do this...
1754 if (cur_pattern
->flags
== BUS_MATCH_NONE
)
1757 if (((cur_pattern
->flags
& BUS_MATCH_PATH
) != 0)
1758 && (cur_pattern
->path_id
!= bus
->path_id
))
1761 if (((cur_pattern
->flags
& BUS_MATCH_BUS_ID
) != 0)
1762 && (cur_pattern
->bus_id
!= bus
->sim
->bus_id
))
1765 if (((cur_pattern
->flags
& BUS_MATCH_UNIT
) != 0)
1766 && (cur_pattern
->unit_number
!= bus
->sim
->unit_number
))
1769 if (((cur_pattern
->flags
& BUS_MATCH_NAME
) != 0)
1770 && (strncmp(cur_pattern
->dev_name
, bus
->sim
->sim_name
,
1775 * If we get to this point, the user definitely wants
1776 * information on this bus. So tell the caller to copy the
1779 retval
|= DM_RET_COPY
;
1782 * If the return action has been set to descend, then we
1783 * know that we've already seen a non-bus matching
1784 * expression, therefore we need to further descend the tree.
1785 * This won't change by continuing around the loop, so we
1786 * go ahead and return. If we haven't seen a non-bus
1787 * matching expression, we keep going around the loop until
1788 * we exhaust the matching expressions. We'll set the stop
1789 * flag once we fall out of the loop.
1791 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1796 * If the return action hasn't been set to descend yet, that means
1797 * we haven't seen anything other than bus matching patterns. So
1798 * tell the caller to stop descending the tree -- the user doesn't
1799 * want to match against lower level tree elements.
1801 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1802 retval
|= DM_RET_STOP
;
1807 static dev_match_ret
1808 xptdevicematch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1809 struct cam_ed
*device
)
1811 dev_match_ret retval
;
1814 retval
= DM_RET_NONE
;
1817 * If we aren't given something to match against, that's an error.
1820 return(DM_RET_ERROR
);
1823 * If there are no match entries, then this device matches no
1826 if ((patterns
== NULL
) || (num_patterns
== 0))
1827 return(DM_RET_DESCEND
| DM_RET_COPY
);
1829 for (i
= 0; i
< num_patterns
; i
++) {
1830 struct device_match_pattern
*cur_pattern
;
1833 * If the pattern in question isn't for a device node, we
1834 * aren't interested.
1836 if (patterns
[i
].type
!= DEV_MATCH_DEVICE
) {
1837 if ((patterns
[i
].type
== DEV_MATCH_PERIPH
)
1838 && ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
))
1839 retval
|= DM_RET_DESCEND
;
1843 cur_pattern
= &patterns
[i
].pattern
.device_pattern
;
1846 * If they want to match any device node, we give them any
1849 if (cur_pattern
->flags
== DEV_MATCH_ANY
) {
1850 /* set the copy flag */
1851 retval
|= DM_RET_COPY
;
1855 * If we've already decided on an action, go ahead
1858 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1863 * Not sure why someone would do this...
1865 if (cur_pattern
->flags
== DEV_MATCH_NONE
)
1868 if (((cur_pattern
->flags
& DEV_MATCH_PATH
) != 0)
1869 && (cur_pattern
->path_id
!= device
->target
->bus
->path_id
))
1872 if (((cur_pattern
->flags
& DEV_MATCH_TARGET
) != 0)
1873 && (cur_pattern
->target_id
!= device
->target
->target_id
))
1876 if (((cur_pattern
->flags
& DEV_MATCH_LUN
) != 0)
1877 && (cur_pattern
->target_lun
!= device
->lun_id
))
1880 if (((cur_pattern
->flags
& DEV_MATCH_INQUIRY
) != 0)
1881 && (cam_quirkmatch((caddr_t
)&device
->inq_data
,
1882 (caddr_t
)&cur_pattern
->inq_pat
,
1883 1, sizeof(cur_pattern
->inq_pat
),
1884 scsi_static_inquiry_match
) == NULL
))
1888 * If we get to this point, the user definitely wants
1889 * information on this device. So tell the caller to copy
1892 retval
|= DM_RET_COPY
;
1895 * If the return action has been set to descend, then we
1896 * know that we've already seen a peripheral matching
1897 * expression, therefore we need to further descend the tree.
1898 * This won't change by continuing around the loop, so we
1899 * go ahead and return. If we haven't seen a peripheral
1900 * matching expression, we keep going around the loop until
1901 * we exhaust the matching expressions. We'll set the stop
1902 * flag once we fall out of the loop.
1904 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1909 * If the return action hasn't been set to descend yet, that means
1910 * we haven't seen any peripheral matching patterns. So tell the
1911 * caller to stop descending the tree -- the user doesn't want to
1912 * match against lower level tree elements.
1914 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1915 retval
|= DM_RET_STOP
;
1921 * Match a single peripheral against any number of match patterns.
1923 static dev_match_ret
1924 xptperiphmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1925 struct cam_periph
*periph
)
1927 dev_match_ret retval
;
1931 * If we aren't given something to match against, that's an error.
1934 return(DM_RET_ERROR
);
1937 * If there are no match entries, then this peripheral matches no
1940 if ((patterns
== NULL
) || (num_patterns
== 0))
1941 return(DM_RET_STOP
| DM_RET_COPY
);
1944 * There aren't any nodes below a peripheral node, so there's no
1945 * reason to descend the tree any further.
1947 retval
= DM_RET_STOP
;
1949 for (i
= 0; i
< num_patterns
; i
++) {
1950 struct periph_match_pattern
*cur_pattern
;
1953 * If the pattern in question isn't for a peripheral, we
1954 * aren't interested.
1956 if (patterns
[i
].type
!= DEV_MATCH_PERIPH
)
1959 cur_pattern
= &patterns
[i
].pattern
.periph_pattern
;
1962 * If they want to match on anything, then we will do so.
1964 if (cur_pattern
->flags
== PERIPH_MATCH_ANY
) {
1965 /* set the copy flag */
1966 retval
|= DM_RET_COPY
;
1969 * We've already set the return action to stop,
1970 * since there are no nodes below peripherals in
1977 * Not sure why someone would do this...
1979 if (cur_pattern
->flags
== PERIPH_MATCH_NONE
)
1982 if (((cur_pattern
->flags
& PERIPH_MATCH_PATH
) != 0)
1983 && (cur_pattern
->path_id
!= periph
->path
->bus
->path_id
))
1987 * For the target and lun id's, we have to make sure the
1988 * target and lun pointers aren't NULL. The xpt peripheral
1989 * has a wildcard target and device.
1991 if (((cur_pattern
->flags
& PERIPH_MATCH_TARGET
) != 0)
1992 && ((periph
->path
->target
== NULL
)
1993 ||(cur_pattern
->target_id
!= periph
->path
->target
->target_id
)))
1996 if (((cur_pattern
->flags
& PERIPH_MATCH_LUN
) != 0)
1997 && ((periph
->path
->device
== NULL
)
1998 || (cur_pattern
->target_lun
!= periph
->path
->device
->lun_id
)))
2001 if (((cur_pattern
->flags
& PERIPH_MATCH_UNIT
) != 0)
2002 && (cur_pattern
->unit_number
!= periph
->unit_number
))
2005 if (((cur_pattern
->flags
& PERIPH_MATCH_NAME
) != 0)
2006 && (strncmp(cur_pattern
->periph_name
, periph
->periph_name
,
2011 * If we get to this point, the user definitely wants
2012 * information on this peripheral. So tell the caller to
2013 * copy the data out.
2015 retval
|= DM_RET_COPY
;
2018 * The return action has already been set to stop, since
2019 * peripherals don't have any nodes below them in the EDT.
2025 * If we get to this point, the peripheral that was passed in
2026 * doesn't match any of the patterns.
2032 xptedtbusfunc(struct cam_eb
*bus
, void *arg
)
2034 struct ccb_dev_match
*cdm
;
2035 dev_match_ret retval
;
2037 cdm
= (struct ccb_dev_match
*)arg
;
2040 * If our position is for something deeper in the tree, that means
2041 * that we've already seen this node. So, we keep going down.
2043 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2044 && (cdm
->pos
.cookie
.bus
== bus
)
2045 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2046 && (cdm
->pos
.cookie
.target
!= NULL
))
2047 retval
= DM_RET_DESCEND
;
2049 retval
= xptbusmatch(cdm
->patterns
, cdm
->num_patterns
, bus
);
2052 * If we got an error, bail out of the search.
2054 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2055 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2060 * If the copy flag is set, copy this bus out.
2062 if (retval
& DM_RET_COPY
) {
2065 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2066 sizeof(struct dev_match_result
));
2069 * If we don't have enough space to put in another
2070 * match result, save our position and tell the
2071 * user there are more devices to check.
2073 if (spaceleft
< sizeof(struct dev_match_result
)) {
2074 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2075 cdm
->pos
.position_type
=
2076 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
;
2078 cdm
->pos
.cookie
.bus
= bus
;
2079 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2081 cdm
->status
= CAM_DEV_MATCH_MORE
;
2084 j
= cdm
->num_matches
;
2086 cdm
->matches
[j
].type
= DEV_MATCH_BUS
;
2087 cdm
->matches
[j
].result
.bus_result
.path_id
= bus
->path_id
;
2088 cdm
->matches
[j
].result
.bus_result
.bus_id
= bus
->sim
->bus_id
;
2089 cdm
->matches
[j
].result
.bus_result
.unit_number
=
2090 bus
->sim
->unit_number
;
2091 strncpy(cdm
->matches
[j
].result
.bus_result
.dev_name
,
2092 bus
->sim
->sim_name
, DEV_IDLEN
);
2096 * If the user is only interested in busses, there's no
2097 * reason to descend to the next level in the tree.
2099 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2103 * If there is a target generation recorded, check it to
2104 * make sure the target list hasn't changed.
2106 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2107 && (bus
== cdm
->pos
.cookie
.bus
)
2108 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2109 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] != 0)
2110 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] !=
2112 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2116 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2117 && (cdm
->pos
.cookie
.bus
== bus
)
2118 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2119 && (cdm
->pos
.cookie
.target
!= NULL
))
2120 return(xpttargettraverse(bus
,
2121 (struct cam_et
*)cdm
->pos
.cookie
.target
,
2122 xptedttargetfunc
, arg
));
2124 return(xpttargettraverse(bus
, NULL
, xptedttargetfunc
, arg
));
2128 xptedttargetfunc(struct cam_et
*target
, void *arg
)
2130 struct ccb_dev_match
*cdm
;
2132 cdm
= (struct ccb_dev_match
*)arg
;
2135 * If there is a device list generation recorded, check it to
2136 * make sure the device list hasn't changed.
2138 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2139 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2140 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2141 && (cdm
->pos
.cookie
.target
== target
)
2142 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2143 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] != 0)
2144 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] !=
2145 target
->generation
)) {
2146 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2150 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2151 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2152 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2153 && (cdm
->pos
.cookie
.target
== target
)
2154 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2155 && (cdm
->pos
.cookie
.device
!= NULL
))
2156 return(xptdevicetraverse(target
,
2157 (struct cam_ed
*)cdm
->pos
.cookie
.device
,
2158 xptedtdevicefunc
, arg
));
2160 return(xptdevicetraverse(target
, NULL
, xptedtdevicefunc
, arg
));
2164 xptedtdevicefunc(struct cam_ed
*device
, void *arg
)
2167 struct ccb_dev_match
*cdm
;
2168 dev_match_ret retval
;
2170 cdm
= (struct ccb_dev_match
*)arg
;
2173 * If our position is for something deeper in the tree, that means
2174 * that we've already seen this node. So, we keep going down.
2176 if ((cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2177 && (cdm
->pos
.cookie
.device
== device
)
2178 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2179 && (cdm
->pos
.cookie
.periph
!= NULL
))
2180 retval
= DM_RET_DESCEND
;
2182 retval
= xptdevicematch(cdm
->patterns
, cdm
->num_patterns
,
2185 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2186 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2191 * If the copy flag is set, copy this device out.
2193 if (retval
& DM_RET_COPY
) {
2196 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2197 sizeof(struct dev_match_result
));
2200 * If we don't have enough space to put in another
2201 * match result, save our position and tell the
2202 * user there are more devices to check.
2204 if (spaceleft
< sizeof(struct dev_match_result
)) {
2205 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2206 cdm
->pos
.position_type
=
2207 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2208 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
;
2210 cdm
->pos
.cookie
.bus
= device
->target
->bus
;
2211 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2213 cdm
->pos
.cookie
.target
= device
->target
;
2214 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2215 device
->target
->bus
->generation
;
2216 cdm
->pos
.cookie
.device
= device
;
2217 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2218 device
->target
->generation
;
2219 cdm
->status
= CAM_DEV_MATCH_MORE
;
2222 j
= cdm
->num_matches
;
2224 cdm
->matches
[j
].type
= DEV_MATCH_DEVICE
;
2225 cdm
->matches
[j
].result
.device_result
.path_id
=
2226 device
->target
->bus
->path_id
;
2227 cdm
->matches
[j
].result
.device_result
.target_id
=
2228 device
->target
->target_id
;
2229 cdm
->matches
[j
].result
.device_result
.target_lun
=
2231 bcopy(&device
->inq_data
,
2232 &cdm
->matches
[j
].result
.device_result
.inq_data
,
2233 sizeof(struct scsi_inquiry_data
));
2235 /* Let the user know whether this device is unconfigured */
2236 if (device
->flags
& CAM_DEV_UNCONFIGURED
)
2237 cdm
->matches
[j
].result
.device_result
.flags
=
2238 DEV_RESULT_UNCONFIGURED
;
2240 cdm
->matches
[j
].result
.device_result
.flags
=
2245 * If the user isn't interested in peripherals, don't descend
2246 * the tree any further.
2248 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2252 * If there is a peripheral list generation recorded, make sure
2253 * it hasn't changed.
2255 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2256 && (device
->target
->bus
== cdm
->pos
.cookie
.bus
)
2257 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2258 && (device
->target
== cdm
->pos
.cookie
.target
)
2259 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2260 && (device
== cdm
->pos
.cookie
.device
)
2261 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2262 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2263 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2264 device
->generation
)){
2265 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2269 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2270 && (cdm
->pos
.cookie
.bus
== device
->target
->bus
)
2271 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2272 && (cdm
->pos
.cookie
.target
== device
->target
)
2273 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2274 && (cdm
->pos
.cookie
.device
== device
)
2275 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2276 && (cdm
->pos
.cookie
.periph
!= NULL
))
2277 return(xptperiphtraverse(device
,
2278 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2279 xptedtperiphfunc
, arg
));
2281 return(xptperiphtraverse(device
, NULL
, xptedtperiphfunc
, arg
));
2285 xptedtperiphfunc(struct cam_periph
*periph
, void *arg
)
2287 struct ccb_dev_match
*cdm
;
2288 dev_match_ret retval
;
2290 cdm
= (struct ccb_dev_match
*)arg
;
2292 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2294 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2295 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2300 * If the copy flag is set, copy this peripheral out.
2302 if (retval
& DM_RET_COPY
) {
2305 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2306 sizeof(struct dev_match_result
));
2309 * If we don't have enough space to put in another
2310 * match result, save our position and tell the
2311 * user there are more devices to check.
2313 if (spaceleft
< sizeof(struct dev_match_result
)) {
2314 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2315 cdm
->pos
.position_type
=
2316 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2317 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
|
2320 cdm
->pos
.cookie
.bus
= periph
->path
->bus
;
2321 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2323 cdm
->pos
.cookie
.target
= periph
->path
->target
;
2324 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2325 periph
->path
->bus
->generation
;
2326 cdm
->pos
.cookie
.device
= periph
->path
->device
;
2327 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2328 periph
->path
->target
->generation
;
2329 cdm
->pos
.cookie
.periph
= periph
;
2330 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2331 periph
->path
->device
->generation
;
2332 cdm
->status
= CAM_DEV_MATCH_MORE
;
2336 j
= cdm
->num_matches
;
2338 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2339 cdm
->matches
[j
].result
.periph_result
.path_id
=
2340 periph
->path
->bus
->path_id
;
2341 cdm
->matches
[j
].result
.periph_result
.target_id
=
2342 periph
->path
->target
->target_id
;
2343 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2344 periph
->path
->device
->lun_id
;
2345 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2346 periph
->unit_number
;
2347 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2348 periph
->periph_name
, DEV_IDLEN
);
2355 xptedtmatch(struct ccb_dev_match
*cdm
)
2359 cdm
->num_matches
= 0;
2362 * Check the bus list generation. If it has changed, the user
2363 * needs to reset everything and start over.
2365 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2366 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != 0)
2367 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != bus_generation
)) {
2368 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2372 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2373 && (cdm
->pos
.cookie
.bus
!= NULL
))
2374 ret
= xptbustraverse((struct cam_eb
*)cdm
->pos
.cookie
.bus
,
2375 xptedtbusfunc
, cdm
);
2377 ret
= xptbustraverse(NULL
, xptedtbusfunc
, cdm
);
2380 * If we get back 0, that means that we had to stop before fully
2381 * traversing the EDT. It also means that one of the subroutines
2382 * has set the status field to the proper value. If we get back 1,
2383 * we've fully traversed the EDT and copied out any matching entries.
2386 cdm
->status
= CAM_DEV_MATCH_LAST
;
2392 xptplistpdrvfunc(struct periph_driver
**pdrv
, void *arg
)
2394 struct ccb_dev_match
*cdm
;
2396 cdm
= (struct ccb_dev_match
*)arg
;
2398 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2399 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2400 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2401 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2402 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2403 (*pdrv
)->generation
)) {
2404 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2408 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2409 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2410 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2411 && (cdm
->pos
.cookie
.periph
!= NULL
))
2412 return(xptpdperiphtraverse(pdrv
,
2413 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2414 xptplistperiphfunc
, arg
));
2416 return(xptpdperiphtraverse(pdrv
, NULL
,xptplistperiphfunc
, arg
));
2420 xptplistperiphfunc(struct cam_periph
*periph
, void *arg
)
2422 struct ccb_dev_match
*cdm
;
2423 dev_match_ret retval
;
2425 cdm
= (struct ccb_dev_match
*)arg
;
2427 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2429 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2430 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2435 * If the copy flag is set, copy this peripheral out.
2437 if (retval
& DM_RET_COPY
) {
2440 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2441 sizeof(struct dev_match_result
));
2444 * If we don't have enough space to put in another
2445 * match result, save our position and tell the
2446 * user there are more devices to check.
2448 if (spaceleft
< sizeof(struct dev_match_result
)) {
2449 struct periph_driver
**pdrv
;
2452 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2453 cdm
->pos
.position_type
=
2454 CAM_DEV_POS_PDRV
| CAM_DEV_POS_PDPTR
|
2458 * This may look a bit non-sensical, but it is
2459 * actually quite logical. There are very few
2460 * peripheral drivers, and bloating every peripheral
2461 * structure with a pointer back to its parent
2462 * peripheral driver linker set entry would cost
2463 * more in the long run than doing this quick lookup.
2465 for (pdrv
= periph_drivers
; *pdrv
!= NULL
; pdrv
++) {
2466 if (strcmp((*pdrv
)->driver_name
,
2467 periph
->periph_name
) == 0)
2471 if (*pdrv
== NULL
) {
2472 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2476 cdm
->pos
.cookie
.pdrv
= pdrv
;
2478 * The periph generation slot does double duty, as
2479 * does the periph pointer slot. They are used for
2480 * both edt and pdrv lookups and positioning.
2482 cdm
->pos
.cookie
.periph
= periph
;
2483 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2484 (*pdrv
)->generation
;
2485 cdm
->status
= CAM_DEV_MATCH_MORE
;
2489 j
= cdm
->num_matches
;
2491 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2492 cdm
->matches
[j
].result
.periph_result
.path_id
=
2493 periph
->path
->bus
->path_id
;
2496 * The transport layer peripheral doesn't have a target or
2499 if (periph
->path
->target
)
2500 cdm
->matches
[j
].result
.periph_result
.target_id
=
2501 periph
->path
->target
->target_id
;
2503 cdm
->matches
[j
].result
.periph_result
.target_id
= -1;
2505 if (periph
->path
->device
)
2506 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2507 periph
->path
->device
->lun_id
;
2509 cdm
->matches
[j
].result
.periph_result
.target_lun
= -1;
2511 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2512 periph
->unit_number
;
2513 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2514 periph
->periph_name
, DEV_IDLEN
);
2521 xptperiphlistmatch(struct ccb_dev_match
*cdm
)
2525 cdm
->num_matches
= 0;
2528 * At this point in the edt traversal function, we check the bus
2529 * list generation to make sure that no busses have been added or
2530 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2531 * For the peripheral driver list traversal function, however, we
2532 * don't have to worry about new peripheral driver types coming or
2533 * going; they're in a linker set, and therefore can't change
2534 * without a recompile.
2537 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2538 && (cdm
->pos
.cookie
.pdrv
!= NULL
))
2539 ret
= xptpdrvtraverse(
2540 (struct periph_driver
**)cdm
->pos
.cookie
.pdrv
,
2541 xptplistpdrvfunc
, cdm
);
2543 ret
= xptpdrvtraverse(NULL
, xptplistpdrvfunc
, cdm
);
2546 * If we get back 0, that means that we had to stop before fully
2547 * traversing the peripheral driver tree. It also means that one of
2548 * the subroutines has set the status field to the proper value. If
2549 * we get back 1, we've fully traversed the EDT and copied out any
2553 cdm
->status
= CAM_DEV_MATCH_LAST
;
2559 xptbustraverse(struct cam_eb
*start_bus
, xpt_busfunc_t
*tr_func
, void *arg
)
2561 struct cam_eb
*bus
, *next_bus
;
2566 for (bus
= (start_bus
? start_bus
: TAILQ_FIRST(&xpt_busses
));
2569 next_bus
= TAILQ_NEXT(bus
, links
);
2571 retval
= tr_func(bus
, arg
);
2580 xpttargettraverse(struct cam_eb
*bus
, struct cam_et
*start_target
,
2581 xpt_targetfunc_t
*tr_func
, void *arg
)
2583 struct cam_et
*target
, *next_target
;
2587 for (target
= (start_target
? start_target
:
2588 TAILQ_FIRST(&bus
->et_entries
));
2589 target
!= NULL
; target
= next_target
) {
2591 next_target
= TAILQ_NEXT(target
, links
);
2593 retval
= tr_func(target
, arg
);
2603 xptdevicetraverse(struct cam_et
*target
, struct cam_ed
*start_device
,
2604 xpt_devicefunc_t
*tr_func
, void *arg
)
2606 struct cam_ed
*device
, *next_device
;
2610 for (device
= (start_device
? start_device
:
2611 TAILQ_FIRST(&target
->ed_entries
));
2613 device
= next_device
) {
2615 next_device
= TAILQ_NEXT(device
, links
);
2617 retval
= tr_func(device
, arg
);
2627 xptperiphtraverse(struct cam_ed
*device
, struct cam_periph
*start_periph
,
2628 xpt_periphfunc_t
*tr_func
, void *arg
)
2630 struct cam_periph
*periph
, *next_periph
;
2635 for (periph
= (start_periph
? start_periph
:
2636 SLIST_FIRST(&device
->periphs
));
2638 periph
= next_periph
) {
2640 next_periph
= SLIST_NEXT(periph
, periph_links
);
2642 retval
= tr_func(periph
, arg
);
2651 xptpdrvtraverse(struct periph_driver
**start_pdrv
,
2652 xpt_pdrvfunc_t
*tr_func
, void *arg
)
2654 struct periph_driver
**pdrv
;
2660 * We don't traverse the peripheral driver list like we do the
2661 * other lists, because it is a linker set, and therefore cannot be
2662 * changed during runtime. If the peripheral driver list is ever
2663 * re-done to be something other than a linker set (i.e. it can
2664 * change while the system is running), the list traversal should
2665 * be modified to work like the other traversal functions.
2667 for (pdrv
= (start_pdrv
? start_pdrv
: periph_drivers
);
2668 *pdrv
!= NULL
; pdrv
++) {
2669 retval
= tr_func(pdrv
, arg
);
2679 xptpdperiphtraverse(struct periph_driver
**pdrv
,
2680 struct cam_periph
*start_periph
,
2681 xpt_periphfunc_t
*tr_func
, void *arg
)
2683 struct cam_periph
*periph
, *next_periph
;
2688 for (periph
= (start_periph
? start_periph
:
2689 TAILQ_FIRST(&(*pdrv
)->units
)); periph
!= NULL
;
2690 periph
= next_periph
) {
2692 next_periph
= TAILQ_NEXT(periph
, unit_links
);
2694 retval
= tr_func(periph
, arg
);
2702 xptdefbusfunc(struct cam_eb
*bus
, void *arg
)
2704 struct xpt_traverse_config
*tr_config
;
2706 tr_config
= (struct xpt_traverse_config
*)arg
;
2708 if (tr_config
->depth
== XPT_DEPTH_BUS
) {
2709 xpt_busfunc_t
*tr_func
;
2711 tr_func
= (xpt_busfunc_t
*)tr_config
->tr_func
;
2713 return(tr_func(bus
, tr_config
->tr_arg
));
2715 return(xpttargettraverse(bus
, NULL
, xptdeftargetfunc
, arg
));
2719 xptdeftargetfunc(struct cam_et
*target
, void *arg
)
2721 struct xpt_traverse_config
*tr_config
;
2723 tr_config
= (struct xpt_traverse_config
*)arg
;
2725 if (tr_config
->depth
== XPT_DEPTH_TARGET
) {
2726 xpt_targetfunc_t
*tr_func
;
2728 tr_func
= (xpt_targetfunc_t
*)tr_config
->tr_func
;
2730 return(tr_func(target
, tr_config
->tr_arg
));
2732 return(xptdevicetraverse(target
, NULL
, xptdefdevicefunc
, arg
));
2736 xptdefdevicefunc(struct cam_ed
*device
, void *arg
)
2738 struct xpt_traverse_config
*tr_config
;
2740 tr_config
= (struct xpt_traverse_config
*)arg
;
2742 if (tr_config
->depth
== XPT_DEPTH_DEVICE
) {
2743 xpt_devicefunc_t
*tr_func
;
2745 tr_func
= (xpt_devicefunc_t
*)tr_config
->tr_func
;
2747 return(tr_func(device
, tr_config
->tr_arg
));
2749 return(xptperiphtraverse(device
, NULL
, xptdefperiphfunc
, arg
));
2753 xptdefperiphfunc(struct cam_periph
*periph
, void *arg
)
2755 struct xpt_traverse_config
*tr_config
;
2756 xpt_periphfunc_t
*tr_func
;
2758 tr_config
= (struct xpt_traverse_config
*)arg
;
2760 tr_func
= (xpt_periphfunc_t
*)tr_config
->tr_func
;
2763 * Unlike the other default functions, we don't check for depth
2764 * here. The peripheral driver level is the last level in the EDT,
2765 * so if we're here, we should execute the function in question.
2767 return(tr_func(periph
, tr_config
->tr_arg
));
2771 * Execute the given function for every bus in the EDT.
2774 xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
)
2776 struct xpt_traverse_config tr_config
;
2778 tr_config
.depth
= XPT_DEPTH_BUS
;
2779 tr_config
.tr_func
= tr_func
;
2780 tr_config
.tr_arg
= arg
;
2782 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2787 * Execute the given function for every target in the EDT.
2790 xpt_for_all_targets(xpt_targetfunc_t
*tr_func
, void *arg
)
2792 struct xpt_traverse_config tr_config
;
2794 tr_config
.depth
= XPT_DEPTH_TARGET
;
2795 tr_config
.tr_func
= tr_func
;
2796 tr_config
.tr_arg
= arg
;
2798 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2800 #endif /* notusedyet */
2803 * Execute the given function for every device in the EDT.
2806 xpt_for_all_devices(xpt_devicefunc_t
*tr_func
, void *arg
)
2808 struct xpt_traverse_config tr_config
;
2810 tr_config
.depth
= XPT_DEPTH_DEVICE
;
2811 tr_config
.tr_func
= tr_func
;
2812 tr_config
.tr_arg
= arg
;
2814 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2819 * Execute the given function for every peripheral in the EDT.
2822 xpt_for_all_periphs(xpt_periphfunc_t
*tr_func
, void *arg
)
2824 struct xpt_traverse_config tr_config
;
2826 tr_config
.depth
= XPT_DEPTH_PERIPH
;
2827 tr_config
.tr_func
= tr_func
;
2828 tr_config
.tr_arg
= arg
;
2830 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2832 #endif /* notusedyet */
2835 xptsetasyncfunc(struct cam_ed
*device
, void *arg
)
2837 struct cam_path path
;
2838 struct ccb_getdev cgd
;
2839 struct async_node
*cur_entry
;
2841 cur_entry
= (struct async_node
*)arg
;
2844 * Don't report unconfigured devices (Wildcard devs,
2845 * devices only for target mode, device instances
2846 * that have been invalidated but are waiting for
2847 * their last reference count to be released).
2849 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) != 0)
2852 xpt_compile_path(&path
,
2854 device
->target
->bus
->path_id
,
2855 device
->target
->target_id
,
2857 xpt_setup_ccb(&cgd
.ccb_h
, &path
, /*priority*/1);
2858 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
2859 xpt_action((union ccb
*)&cgd
);
2860 cur_entry
->callback(cur_entry
->callback_arg
,
2863 xpt_release_path(&path
);
2869 xptsetasyncbusfunc(struct cam_eb
*bus
, void *arg
)
2871 struct cam_path path
;
2872 struct ccb_pathinq cpi
;
2873 struct async_node
*cur_entry
;
2875 cur_entry
= (struct async_node
*)arg
;
2877 xpt_compile_path(&path
, /*periph*/NULL
,
2879 CAM_TARGET_WILDCARD
,
2881 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
2882 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
2883 xpt_action((union ccb
*)&cpi
);
2884 cur_entry
->callback(cur_entry
->callback_arg
,
2887 xpt_release_path(&path
);
2893 xpt_action(union ccb
*start_ccb
)
2895 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_action\n"));
2897 start_ccb
->ccb_h
.status
= CAM_REQ_INPROG
;
2901 switch (start_ccb
->ccb_h
.func_code
) {
2904 struct cam_ed
*device
;
2906 char cdb_str
[(SCSI_MAX_CDBLEN
* 3) + 1];
2907 struct cam_path
*path
;
2909 path
= start_ccb
->ccb_h
.path
;
2913 * For the sake of compatibility with SCSI-1
2914 * devices that may not understand the identify
2915 * message, we include lun information in the
2916 * second byte of all commands. SCSI-1 specifies
2917 * that luns are a 3 bit value and reserves only 3
2918 * bits for lun information in the CDB. Later
2919 * revisions of the SCSI spec allow for more than 8
2920 * luns, but have deprecated lun information in the
2921 * CDB. So, if the lun won't fit, we must omit.
2923 * Also be aware that during initial probing for devices,
2924 * the inquiry information is unknown but initialized to 0.
2925 * This means that this code will be exercised while probing
2926 * devices with an ANSI revision greater than 2.
2928 device
= start_ccb
->ccb_h
.path
->device
;
2929 if (device
->protocol_version
<= SCSI_REV_2
2930 && start_ccb
->ccb_h
.target_lun
< 8
2931 && (start_ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) == 0) {
2933 start_ccb
->csio
.cdb_io
.cdb_bytes
[1] |=
2934 start_ccb
->ccb_h
.target_lun
<< 5;
2936 start_ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
2937 CAM_DEBUG(path
, CAM_DEBUG_CDB
,("%s. CDB: %s\n",
2938 scsi_op_desc(start_ccb
->csio
.cdb_io
.cdb_bytes
[0],
2939 &path
->device
->inq_data
),
2940 scsi_cdb_string(start_ccb
->csio
.cdb_io
.cdb_bytes
,
2941 cdb_str
, sizeof(cdb_str
))));
2945 case XPT_CONT_TARGET_IO
:
2946 start_ccb
->csio
.sense_resid
= 0;
2947 start_ccb
->csio
.resid
= 0;
2952 struct cam_path
*path
;
2953 struct cam_sim
*sim
;
2956 path
= start_ccb
->ccb_h
.path
;
2958 sim
= path
->bus
->sim
;
2959 if (SIM_DEAD(sim
)) {
2960 /* The SIM has gone; just execute the CCB directly. */
2961 cam_ccbq_send_ccb(&path
->device
->ccbq
, start_ccb
);
2962 (*(sim
->sim_action
))(sim
, start_ccb
);
2966 cam_ccbq_insert_ccb(&path
->device
->ccbq
, start_ccb
);
2967 if (path
->device
->qfrozen_cnt
== 0)
2968 runq
= xpt_schedule_dev_sendq(path
->bus
, path
->device
);
2972 xpt_run_dev_sendq(path
->bus
);
2975 case XPT_SET_TRAN_SETTINGS
:
2977 xpt_set_transfer_settings(&start_ccb
->cts
,
2978 start_ccb
->ccb_h
.path
->device
,
2979 /*async_update*/FALSE
);
2982 case XPT_CALC_GEOMETRY
:
2984 struct cam_sim
*sim
;
2986 /* Filter out garbage */
2987 if (start_ccb
->ccg
.block_size
== 0
2988 || start_ccb
->ccg
.volume_size
== 0) {
2989 start_ccb
->ccg
.cylinders
= 0;
2990 start_ccb
->ccg
.heads
= 0;
2991 start_ccb
->ccg
.secs_per_track
= 0;
2992 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
2995 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
2996 (*(sim
->sim_action
))(sim
, start_ccb
);
3001 union ccb
* abort_ccb
;
3003 abort_ccb
= start_ccb
->cab
.abort_ccb
;
3004 if (XPT_FC_IS_DEV_QUEUED(abort_ccb
)) {
3006 if (abort_ccb
->ccb_h
.pinfo
.index
>= 0) {
3007 struct cam_ccbq
*ccbq
;
3009 ccbq
= &abort_ccb
->ccb_h
.path
->device
->ccbq
;
3010 cam_ccbq_remove_ccb(ccbq
, abort_ccb
);
3011 abort_ccb
->ccb_h
.status
=
3012 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3013 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3014 xpt_done(abort_ccb
);
3015 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3018 if (abort_ccb
->ccb_h
.pinfo
.index
== CAM_UNQUEUED_INDEX
3019 && (abort_ccb
->ccb_h
.status
& CAM_SIM_QUEUED
) == 0) {
3021 * We've caught this ccb en route to
3022 * the SIM. Flag it for abort and the
3023 * SIM will do so just before starting
3024 * real work on the CCB.
3026 abort_ccb
->ccb_h
.status
=
3027 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3028 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3029 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3033 if (XPT_FC_IS_QUEUED(abort_ccb
)
3034 && (abort_ccb
->ccb_h
.pinfo
.index
== CAM_DONEQ_INDEX
)) {
3036 * It's already completed but waiting
3037 * for our SWI to get to it.
3039 start_ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3043 * If we weren't able to take care of the abort request
3044 * in the XPT, pass the request down to the SIM for processing.
3048 case XPT_ACCEPT_TARGET_IO
:
3050 case XPT_IMMED_NOTIFY
:
3051 case XPT_NOTIFY_ACK
:
3052 case XPT_GET_TRAN_SETTINGS
:
3055 struct cam_sim
*sim
;
3057 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3058 (*(sim
->sim_action
))(sim
, start_ccb
);
3063 struct cam_sim
*sim
;
3065 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3066 (*(sim
->sim_action
))(sim
, start_ccb
);
3069 case XPT_PATH_STATS
:
3070 start_ccb
->cpis
.last_reset
=
3071 start_ccb
->ccb_h
.path
->bus
->last_reset
;
3072 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3078 dev
= start_ccb
->ccb_h
.path
->device
;
3079 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3080 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3082 struct ccb_getdev
*cgd
;
3086 cgd
= &start_ccb
->cgd
;
3087 bus
= cgd
->ccb_h
.path
->bus
;
3088 tar
= cgd
->ccb_h
.path
->target
;
3089 cgd
->inq_data
= dev
->inq_data
;
3090 cgd
->ccb_h
.status
= CAM_REQ_CMP
;
3091 cgd
->serial_num_len
= dev
->serial_num_len
;
3092 if ((dev
->serial_num_len
> 0)
3093 && (dev
->serial_num
!= NULL
))
3094 bcopy(dev
->serial_num
, cgd
->serial_num
,
3095 dev
->serial_num_len
);
3099 case XPT_GDEV_STATS
:
3103 dev
= start_ccb
->ccb_h
.path
->device
;
3104 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3105 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3107 struct ccb_getdevstats
*cgds
;
3111 cgds
= &start_ccb
->cgds
;
3112 bus
= cgds
->ccb_h
.path
->bus
;
3113 tar
= cgds
->ccb_h
.path
->target
;
3114 cgds
->dev_openings
= dev
->ccbq
.dev_openings
;
3115 cgds
->dev_active
= dev
->ccbq
.dev_active
;
3116 cgds
->devq_openings
= dev
->ccbq
.devq_openings
;
3117 cgds
->devq_queued
= dev
->ccbq
.queue
.entries
;
3118 cgds
->held
= dev
->ccbq
.held
;
3119 cgds
->last_reset
= tar
->last_reset
;
3120 cgds
->maxtags
= dev
->quirk
->maxtags
;
3121 cgds
->mintags
= dev
->quirk
->mintags
;
3122 if (timevalcmp(&tar
->last_reset
, &bus
->last_reset
, <))
3123 cgds
->last_reset
= bus
->last_reset
;
3124 cgds
->ccb_h
.status
= CAM_REQ_CMP
;
3130 struct cam_periph
*nperiph
;
3131 struct periph_list
*periph_head
;
3132 struct ccb_getdevlist
*cgdl
;
3134 struct cam_ed
*device
;
3141 * Don't want anyone mucking with our data.
3143 device
= start_ccb
->ccb_h
.path
->device
;
3144 periph_head
= &device
->periphs
;
3145 cgdl
= &start_ccb
->cgdl
;
3148 * Check and see if the list has changed since the user
3149 * last requested a list member. If so, tell them that the
3150 * list has changed, and therefore they need to start over
3151 * from the beginning.
3153 if ((cgdl
->index
!= 0) &&
3154 (cgdl
->generation
!= device
->generation
)) {
3155 cgdl
->status
= CAM_GDEVLIST_LIST_CHANGED
;
3160 * Traverse the list of peripherals and attempt to find
3161 * the requested peripheral.
3163 for (nperiph
= SLIST_FIRST(periph_head
), i
= 0;
3164 (nperiph
!= NULL
) && (i
<= cgdl
->index
);
3165 nperiph
= SLIST_NEXT(nperiph
, periph_links
), i
++) {
3166 if (i
== cgdl
->index
) {
3167 strncpy(cgdl
->periph_name
,
3168 nperiph
->periph_name
,
3170 cgdl
->unit_number
= nperiph
->unit_number
;
3175 cgdl
->status
= CAM_GDEVLIST_ERROR
;
3179 if (nperiph
== NULL
)
3180 cgdl
->status
= CAM_GDEVLIST_LAST_DEVICE
;
3182 cgdl
->status
= CAM_GDEVLIST_MORE_DEVS
;
3185 cgdl
->generation
= device
->generation
;
3187 cgdl
->ccb_h
.status
= CAM_REQ_CMP
;
3192 dev_pos_type position_type
;
3193 struct ccb_dev_match
*cdm
;
3196 cdm
= &start_ccb
->cdm
;
3199 * Prevent EDT changes while we traverse it.
3202 * There are two ways of getting at information in the EDT.
3203 * The first way is via the primary EDT tree. It starts
3204 * with a list of busses, then a list of targets on a bus,
3205 * then devices/luns on a target, and then peripherals on a
3206 * device/lun. The "other" way is by the peripheral driver
3207 * lists. The peripheral driver lists are organized by
3208 * peripheral driver. (obviously) So it makes sense to
3209 * use the peripheral driver list if the user is looking
3210 * for something like "da1", or all "da" devices. If the
3211 * user is looking for something on a particular bus/target
3212 * or lun, it's generally better to go through the EDT tree.
3215 if (cdm
->pos
.position_type
!= CAM_DEV_POS_NONE
)
3216 position_type
= cdm
->pos
.position_type
;
3220 position_type
= CAM_DEV_POS_NONE
;
3222 for (i
= 0; i
< cdm
->num_patterns
; i
++) {
3223 if ((cdm
->patterns
[i
].type
== DEV_MATCH_BUS
)
3224 ||(cdm
->patterns
[i
].type
== DEV_MATCH_DEVICE
)){
3225 position_type
= CAM_DEV_POS_EDT
;
3230 if (cdm
->num_patterns
== 0)
3231 position_type
= CAM_DEV_POS_EDT
;
3232 else if (position_type
== CAM_DEV_POS_NONE
)
3233 position_type
= CAM_DEV_POS_PDRV
;
3236 switch(position_type
& CAM_DEV_POS_TYPEMASK
) {
3237 case CAM_DEV_POS_EDT
:
3238 ret
= xptedtmatch(cdm
);
3240 case CAM_DEV_POS_PDRV
:
3241 ret
= xptperiphlistmatch(cdm
);
3244 cdm
->status
= CAM_DEV_MATCH_ERROR
;
3248 if (cdm
->status
== CAM_DEV_MATCH_ERROR
)
3249 start_ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
3251 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3257 struct ccb_setasync
*csa
;
3258 struct async_node
*cur_entry
;
3259 struct async_list
*async_head
;
3262 csa
= &start_ccb
->csa
;
3263 added
= csa
->event_enable
;
3264 async_head
= &csa
->ccb_h
.path
->device
->asyncs
;
3267 * If there is already an entry for us, simply
3270 cur_entry
= SLIST_FIRST(async_head
);
3271 while (cur_entry
!= NULL
) {
3272 if ((cur_entry
->callback_arg
== csa
->callback_arg
)
3273 && (cur_entry
->callback
== csa
->callback
))
3275 cur_entry
= SLIST_NEXT(cur_entry
, links
);
3278 if (cur_entry
!= NULL
) {
3280 * If the request has no flags set,
3283 added
&= ~cur_entry
->event_enable
;
3284 if (csa
->event_enable
== 0) {
3285 SLIST_REMOVE(async_head
, cur_entry
,
3287 csa
->ccb_h
.path
->device
->refcount
--;
3288 kfree(cur_entry
, M_CAMXPT
);
3290 cur_entry
->event_enable
= csa
->event_enable
;
3293 cur_entry
= kmalloc(sizeof(*cur_entry
),
3294 M_CAMXPT
, M_INTWAIT
);
3295 cur_entry
->event_enable
= csa
->event_enable
;
3296 cur_entry
->callback_arg
= csa
->callback_arg
;
3297 cur_entry
->callback
= csa
->callback
;
3298 SLIST_INSERT_HEAD(async_head
, cur_entry
, links
);
3299 csa
->ccb_h
.path
->device
->refcount
++;
3302 if ((added
& AC_FOUND_DEVICE
) != 0) {
3304 * Get this peripheral up to date with all
3305 * the currently existing devices.
3307 xpt_for_all_devices(xptsetasyncfunc
, cur_entry
);
3309 if ((added
& AC_PATH_REGISTERED
) != 0) {
3311 * Get this peripheral up to date with all
3312 * the currently existing busses.
3314 xpt_for_all_busses(xptsetasyncbusfunc
, cur_entry
);
3316 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3321 struct ccb_relsim
*crs
;
3324 crs
= &start_ccb
->crs
;
3325 dev
= crs
->ccb_h
.path
->device
;
3328 crs
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3332 if ((crs
->release_flags
& RELSIM_ADJUST_OPENINGS
) != 0) {
3334 if (INQ_DATA_TQ_ENABLED(&dev
->inq_data
)) {
3335 /* Don't ever go below one opening */
3336 if (crs
->openings
> 0) {
3337 xpt_dev_ccbq_resize(crs
->ccb_h
.path
,
3341 xpt_print_path(crs
->ccb_h
.path
);
3342 kprintf("tagged openings "
3350 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_TIMEOUT
) != 0) {
3352 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
3355 * Just extend the old timeout and decrement
3356 * the freeze count so that a single timeout
3357 * is sufficient for releasing the queue.
3359 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3360 callout_stop(&dev
->c_handle
);
3363 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3366 callout_reset(&dev
->c_handle
,
3367 (crs
->release_timeout
* hz
) / 1000,
3368 xpt_release_devq_timeout
, dev
);
3370 dev
->flags
|= CAM_DEV_REL_TIMEOUT_PENDING
;
3374 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_CMDCMPLT
) != 0) {
3376 if ((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0) {
3378 * Decrement the freeze count so that a single
3379 * completion is still sufficient to unfreeze
3382 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3385 dev
->flags
|= CAM_DEV_REL_ON_COMPLETE
;
3386 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3390 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_QEMPTY
) != 0) {
3392 if ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
3393 || (dev
->ccbq
.dev_active
== 0)) {
3395 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3398 dev
->flags
|= CAM_DEV_REL_ON_QUEUE_EMPTY
;
3399 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3403 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) == 0) {
3405 xpt_release_devq(crs
->ccb_h
.path
, /*count*/1,
3408 start_ccb
->crs
.qfrozen_cnt
= dev
->qfrozen_cnt
;
3409 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3413 xpt_scan_bus(start_ccb
->ccb_h
.path
->periph
, start_ccb
);
3416 xpt_scan_lun(start_ccb
->ccb_h
.path
->periph
,
3417 start_ccb
->ccb_h
.path
, start_ccb
->crcn
.flags
,
3422 #ifdef CAM_DEBUG_DELAY
3423 cam_debug_delay
= CAM_DEBUG_DELAY
;
3425 cam_dflags
= start_ccb
->cdbg
.flags
;
3426 if (cam_dpath
!= NULL
) {
3427 xpt_free_path(cam_dpath
);
3431 if (cam_dflags
!= CAM_DEBUG_NONE
) {
3432 if (xpt_create_path(&cam_dpath
, xpt_periph
,
3433 start_ccb
->ccb_h
.path_id
,
3434 start_ccb
->ccb_h
.target_id
,
3435 start_ccb
->ccb_h
.target_lun
) !=
3437 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3438 cam_dflags
= CAM_DEBUG_NONE
;
3440 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3441 xpt_print_path(cam_dpath
);
3442 kprintf("debugging flags now %x\n", cam_dflags
);
3446 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3448 #else /* !CAMDEBUG */
3449 start_ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
3450 #endif /* CAMDEBUG */
3454 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0)
3455 xpt_freeze_devq(start_ccb
->ccb_h
.path
, 1);
3456 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3463 start_ccb
->ccb_h
.status
= CAM_PROVIDE_FAIL
;
3470 xpt_polled_action(union ccb
*start_ccb
)
3473 struct cam_sim
*sim
;
3474 struct cam_devq
*devq
;
3477 timeout
= start_ccb
->ccb_h
.timeout
;
3478 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3480 dev
= start_ccb
->ccb_h
.path
->device
;
3485 * Steal an opening so that no other queued requests
3486 * can get it before us while we simulate interrupts.
3488 dev
->ccbq
.devq_openings
--;
3489 dev
->ccbq
.dev_openings
--;
3491 while(((devq
&& devq
->send_openings
<= 0) || dev
->ccbq
.dev_openings
< 0)
3492 && (--timeout
> 0)) {
3494 (*(sim
->sim_poll
))(sim
);
3495 swi_cambio(NULL
, NULL
);
3498 dev
->ccbq
.devq_openings
++;
3499 dev
->ccbq
.dev_openings
++;
3502 xpt_action(start_ccb
);
3503 while(--timeout
> 0) {
3504 (*(sim
->sim_poll
))(sim
);
3505 swi_cambio(NULL
, NULL
);
3506 if ((start_ccb
->ccb_h
.status
& CAM_STATUS_MASK
)
3513 * XXX Is it worth adding a sim_timeout entry
3514 * point so we can attempt recovery? If
3515 * this is only used for dumps, I don't think
3518 start_ccb
->ccb_h
.status
= CAM_CMD_TIMEOUT
;
3521 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3527 * Schedule a peripheral driver to receive a ccb when it's
3528 * target device has space for more transactions.
3531 xpt_schedule(struct cam_periph
*perph
, u_int32_t new_priority
)
3533 struct cam_ed
*device
;
3534 union ccb
*work_ccb
;
3537 CAM_DEBUG(perph
->path
, CAM_DEBUG_TRACE
, ("xpt_schedule\n"));
3538 device
= perph
->path
->device
;
3540 if (periph_is_queued(perph
)) {
3541 /* Simply reorder based on new priority */
3542 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3543 (" change priority to %d\n", new_priority
));
3544 if (new_priority
< perph
->pinfo
.priority
) {
3545 camq_change_priority(&device
->drvq
,
3550 } else if (SIM_DEAD(perph
->path
->bus
->sim
)) {
3551 /* The SIM is gone so just call periph_start directly. */
3552 work_ccb
= xpt_get_ccb(perph
->path
->device
);
3554 if (work_ccb
== NULL
)
3556 xpt_setup_ccb(&work_ccb
->ccb_h
, perph
->path
, new_priority
);
3557 perph
->pinfo
.priority
= new_priority
;
3558 perph
->periph_start(perph
, work_ccb
);
3561 /* New entry on the queue */
3562 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3563 (" added periph to queue\n"));
3564 perph
->pinfo
.priority
= new_priority
;
3565 perph
->pinfo
.generation
= ++device
->drvq
.generation
;
3566 camq_insert(&device
->drvq
, &perph
->pinfo
);
3567 runq
= xpt_schedule_dev_allocq(perph
->path
->bus
, device
);
3571 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3572 (" calling xpt_run_devq\n"));
3573 xpt_run_dev_allocq(perph
->path
->bus
);
3579 * Schedule a device to run on a given queue.
3580 * If the device was inserted as a new entry on the queue,
3581 * return 1 meaning the device queue should be run. If we
3582 * were already queued, implying someone else has already
3583 * started the queue, return 0 so the caller doesn't attempt
3584 * to run the queue. Must be run in a critical section.
3587 xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*pinfo
,
3588 u_int32_t new_priority
)
3591 u_int32_t old_priority
;
3593 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_schedule_dev\n"));
3595 old_priority
= pinfo
->priority
;
3598 * Are we already queued?
3600 if (pinfo
->index
!= CAM_UNQUEUED_INDEX
) {
3601 /* Simply reorder based on new priority */
3602 if (new_priority
< old_priority
) {
3603 camq_change_priority(queue
, pinfo
->index
,
3605 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3606 ("changed priority to %d\n",
3611 /* New entry on the queue */
3612 if (new_priority
< old_priority
)
3613 pinfo
->priority
= new_priority
;
3615 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3616 ("Inserting onto queue\n"));
3617 pinfo
->generation
= ++queue
->generation
;
3618 camq_insert(queue
, pinfo
);
3625 xpt_run_dev_allocq(struct cam_eb
*bus
)
3627 struct cam_devq
*devq
;
3629 if ((devq
= bus
->sim
->devq
) == NULL
) {
3630 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq: NULL devq\n"));
3633 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq\n"));
3635 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3636 (" qfrozen_cnt == 0x%x, entries == %d, "
3637 "openings == %d, active == %d\n",
3638 devq
->alloc_queue
.qfrozen_cnt
,
3639 devq
->alloc_queue
.entries
,
3640 devq
->alloc_openings
,
3641 devq
->alloc_active
));
3644 devq
->alloc_queue
.qfrozen_cnt
++;
3645 while ((devq
->alloc_queue
.entries
> 0)
3646 && (devq
->alloc_openings
> 0)
3647 && (devq
->alloc_queue
.qfrozen_cnt
<= 1)) {
3648 struct cam_ed_qinfo
*qinfo
;
3649 struct cam_ed
*device
;
3650 union ccb
*work_ccb
;
3651 struct cam_periph
*drv
;
3654 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
3656 device
= qinfo
->device
;
3658 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3659 ("running device %p\n", device
));
3661 drvq
= &device
->drvq
;
3664 if (drvq
->entries
<= 0) {
3665 panic("xpt_run_dev_allocq: "
3666 "Device on queue without any work to do");
3669 if ((work_ccb
= xpt_get_ccb(device
)) != NULL
) {
3670 devq
->alloc_openings
--;
3671 devq
->alloc_active
++;
3672 drv
= (struct cam_periph
*)camq_remove(drvq
, CAMQ_HEAD
);
3674 xpt_setup_ccb(&work_ccb
->ccb_h
, drv
->path
,
3675 drv
->pinfo
.priority
);
3676 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3677 ("calling periph start\n"));
3678 drv
->periph_start(drv
, work_ccb
);
3681 * Malloc failure in alloc_ccb
3684 * XXX add us to a list to be run from free_ccb
3685 * if we don't have any ccbs active on this
3686 * device queue otherwise we may never get run
3692 /* Raise IPL for possible insertion and test at top of loop */
3695 if (drvq
->entries
> 0) {
3696 /* We have more work. Attempt to reschedule */
3697 xpt_schedule_dev_allocq(bus
, device
);
3700 devq
->alloc_queue
.qfrozen_cnt
--;
3705 xpt_run_dev_sendq(struct cam_eb
*bus
)
3707 struct cam_devq
*devq
;
3709 if ((devq
= bus
->sim
->devq
) == NULL
) {
3710 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq: NULL devq\n"));
3713 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq\n"));
3716 devq
->send_queue
.qfrozen_cnt
++;
3717 while ((devq
->send_queue
.entries
> 0)
3718 && (devq
->send_openings
> 0)) {
3719 struct cam_ed_qinfo
*qinfo
;
3720 struct cam_ed
*device
;
3721 union ccb
*work_ccb
;
3722 struct cam_sim
*sim
;
3724 if (devq
->send_queue
.qfrozen_cnt
> 1) {
3728 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
3730 device
= qinfo
->device
;
3733 * If the device has been "frozen", don't attempt
3736 if (device
->qfrozen_cnt
> 0) {
3740 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3741 ("running device %p\n", device
));
3743 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
3744 if (work_ccb
== NULL
) {
3745 kprintf("device on run queue with no ccbs???\n");
3749 if ((work_ccb
->ccb_h
.flags
& CAM_HIGH_POWER
) != 0) {
3751 if (num_highpower
<= 0) {
3753 * We got a high power command, but we
3754 * don't have any available slots. Freeze
3755 * the device queue until we have a slot
3758 device
->qfrozen_cnt
++;
3759 STAILQ_INSERT_TAIL(&highpowerq
,
3766 * Consume a high power slot while
3772 devq
->active_dev
= device
;
3773 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
3775 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
3777 devq
->send_openings
--;
3778 devq
->send_active
++;
3780 if (device
->ccbq
.queue
.entries
> 0)
3781 xpt_schedule_dev_sendq(bus
, device
);
3783 if (work_ccb
&& (work_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0){
3785 * The client wants to freeze the queue
3786 * after this CCB is sent.
3788 device
->qfrozen_cnt
++;
3791 /* In Target mode, the peripheral driver knows best... */
3792 if (work_ccb
->ccb_h
.func_code
== XPT_SCSI_IO
) {
3793 if ((device
->inq_flags
& SID_CmdQue
) != 0
3794 && work_ccb
->csio
.tag_action
!= CAM_TAG_ACTION_NONE
)
3795 work_ccb
->ccb_h
.flags
|= CAM_TAG_ACTION_VALID
;
3798 * Clear this in case of a retried CCB that
3799 * failed due to a rejected tag.
3801 work_ccb
->ccb_h
.flags
&= ~CAM_TAG_ACTION_VALID
;
3805 * Device queues can be shared among multiple sim instances
3806 * that reside on different busses. Use the SIM in the queue
3807 * CCB's path, rather than the one in the bus that was passed
3808 * into this function.
3810 sim
= work_ccb
->ccb_h
.path
->bus
->sim
;
3811 (*(sim
->sim_action
))(sim
, work_ccb
);
3813 devq
->active_dev
= NULL
;
3814 /* Raise IPL for possible insertion and test at top of loop */
3816 devq
->send_queue
.qfrozen_cnt
--;
3821 * This function merges stuff from the slave ccb into the master ccb, while
3822 * keeping important fields in the master ccb constant.
3825 xpt_merge_ccb(union ccb
*master_ccb
, union ccb
*slave_ccb
)
3828 * Pull fields that are valid for peripheral drivers to set
3829 * into the master CCB along with the CCB "payload".
3831 master_ccb
->ccb_h
.retry_count
= slave_ccb
->ccb_h
.retry_count
;
3832 master_ccb
->ccb_h
.func_code
= slave_ccb
->ccb_h
.func_code
;
3833 master_ccb
->ccb_h
.timeout
= slave_ccb
->ccb_h
.timeout
;
3834 master_ccb
->ccb_h
.flags
= slave_ccb
->ccb_h
.flags
;
3835 bcopy(&(&slave_ccb
->ccb_h
)[1], &(&master_ccb
->ccb_h
)[1],
3836 sizeof(union ccb
) - sizeof(struct ccb_hdr
));
3840 xpt_setup_ccb(struct ccb_hdr
*ccb_h
, struct cam_path
*path
, u_int32_t priority
)
3842 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_setup_ccb\n"));
3843 callout_init(&ccb_h
->timeout_ch
);
3844 ccb_h
->pinfo
.priority
= priority
;
3846 ccb_h
->path_id
= path
->bus
->path_id
;
3848 ccb_h
->target_id
= path
->target
->target_id
;
3850 ccb_h
->target_id
= CAM_TARGET_WILDCARD
;
3852 ccb_h
->target_lun
= path
->device
->lun_id
;
3853 ccb_h
->pinfo
.generation
= ++path
->device
->ccbq
.queue
.generation
;
3855 ccb_h
->target_lun
= CAM_TARGET_WILDCARD
;
3857 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
3861 /* Path manipulation functions */
3863 xpt_create_path(struct cam_path
**new_path_ptr
, struct cam_periph
*perph
,
3864 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3866 struct cam_path
*path
;
3869 path
= kmalloc(sizeof(*path
), M_CAMXPT
, M_INTWAIT
);
3870 status
= xpt_compile_path(path
, perph
, path_id
, target_id
, lun_id
);
3871 if (status
!= CAM_REQ_CMP
) {
3872 kfree(path
, M_CAMXPT
);
3875 *new_path_ptr
= path
;
3880 xpt_compile_path(struct cam_path
*new_path
, struct cam_periph
*perph
,
3881 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3884 struct cam_et
*target
;
3885 struct cam_ed
*device
;
3888 status
= CAM_REQ_CMP
; /* Completed without error */
3889 target
= NULL
; /* Wildcarded */
3890 device
= NULL
; /* Wildcarded */
3893 * We will potentially modify the EDT, so block interrupts
3894 * that may attempt to create cam paths.
3897 bus
= xpt_find_bus(path_id
);
3899 status
= CAM_PATH_INVALID
;
3901 target
= xpt_find_target(bus
, target_id
);
3902 if (target
== NULL
) {
3904 struct cam_et
*new_target
;
3906 new_target
= xpt_alloc_target(bus
, target_id
);
3907 if (new_target
== NULL
) {
3908 status
= CAM_RESRC_UNAVAIL
;
3910 target
= new_target
;
3913 if (target
!= NULL
) {
3914 device
= xpt_find_device(target
, lun_id
);
3915 if (device
== NULL
) {
3917 struct cam_ed
*new_device
;
3919 new_device
= xpt_alloc_device(bus
,
3922 if (new_device
== NULL
) {
3923 status
= CAM_RESRC_UNAVAIL
;
3925 device
= new_device
;
3933 * Only touch the user's data if we are successful.
3935 if (status
== CAM_REQ_CMP
) {
3936 new_path
->periph
= perph
;
3937 new_path
->bus
= bus
;
3938 new_path
->target
= target
;
3939 new_path
->device
= device
;
3940 CAM_DEBUG(new_path
, CAM_DEBUG_TRACE
, ("xpt_compile_path\n"));
3943 xpt_release_device(bus
, target
, device
);
3945 xpt_release_target(bus
, target
);
3947 xpt_release_bus(bus
);
3953 xpt_release_path(struct cam_path
*path
)
3955 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_release_path\n"));
3956 if (path
->device
!= NULL
) {
3957 xpt_release_device(path
->bus
, path
->target
, path
->device
);
3958 path
->device
= NULL
;
3960 if (path
->target
!= NULL
) {
3961 xpt_release_target(path
->bus
, path
->target
);
3962 path
->target
= NULL
;
3964 if (path
->bus
!= NULL
) {
3965 xpt_release_bus(path
->bus
);
3971 xpt_free_path(struct cam_path
*path
)
3973 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_free_path\n"));
3974 xpt_release_path(path
);
3975 kfree(path
, M_CAMXPT
);
3980 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3981 * in path1, 2 for match with wildcards in path2.
3984 xpt_path_comp(struct cam_path
*path1
, struct cam_path
*path2
)
3988 if (path1
->bus
!= path2
->bus
) {
3989 if (path1
->bus
->path_id
== CAM_BUS_WILDCARD
)
3991 else if (path2
->bus
->path_id
== CAM_BUS_WILDCARD
)
3996 if (path1
->target
!= path2
->target
) {
3997 if (path1
->target
->target_id
== CAM_TARGET_WILDCARD
) {
4000 } else if (path2
->target
->target_id
== CAM_TARGET_WILDCARD
)
4005 if (path1
->device
!= path2
->device
) {
4006 if (path1
->device
->lun_id
== CAM_LUN_WILDCARD
) {
4009 } else if (path2
->device
->lun_id
== CAM_LUN_WILDCARD
)
4018 xpt_print_path(struct cam_path
*path
)
4021 kprintf("(nopath): ");
4023 if (path
->periph
!= NULL
)
4024 kprintf("(%s%d:", path
->periph
->periph_name
,
4025 path
->periph
->unit_number
);
4027 kprintf("(noperiph:");
4029 if (path
->bus
!= NULL
)
4030 kprintf("%s%d:%d:", path
->bus
->sim
->sim_name
,
4031 path
->bus
->sim
->unit_number
,
4032 path
->bus
->sim
->bus_id
);
4036 if (path
->target
!= NULL
)
4037 kprintf("%d:", path
->target
->target_id
);
4041 if (path
->device
!= NULL
)
4042 kprintf("%d): ", path
->device
->lun_id
);
4049 xpt_path_string(struct cam_path
*path
, char *str
, size_t str_len
)
4053 sbuf_new(&sb
, str
, str_len
, 0);
4056 sbuf_printf(&sb
, "(nopath): ");
4058 if (path
->periph
!= NULL
)
4059 sbuf_printf(&sb
, "(%s%d:", path
->periph
->periph_name
,
4060 path
->periph
->unit_number
);
4062 sbuf_printf(&sb
, "(noperiph:");
4064 if (path
->bus
!= NULL
)
4065 sbuf_printf(&sb
, "%s%d:%d:", path
->bus
->sim
->sim_name
,
4066 path
->bus
->sim
->unit_number
,
4067 path
->bus
->sim
->bus_id
);
4069 sbuf_printf(&sb
, "nobus:");
4071 if (path
->target
!= NULL
)
4072 sbuf_printf(&sb
, "%d:", path
->target
->target_id
);
4074 sbuf_printf(&sb
, "X:");
4076 if (path
->device
!= NULL
)
4077 sbuf_printf(&sb
, "%d): ", path
->device
->lun_id
);
4079 sbuf_printf(&sb
, "X): ");
4083 return(sbuf_len(&sb
));
4087 xpt_path_path_id(struct cam_path
*path
)
4089 return(path
->bus
->path_id
);
4093 xpt_path_target_id(struct cam_path
*path
)
4095 if (path
->target
!= NULL
)
4096 return (path
->target
->target_id
);
4098 return (CAM_TARGET_WILDCARD
);
4102 xpt_path_lun_id(struct cam_path
*path
)
4104 if (path
->device
!= NULL
)
4105 return (path
->device
->lun_id
);
4107 return (CAM_LUN_WILDCARD
);
4111 xpt_path_sim(struct cam_path
*path
)
4113 return (path
->bus
->sim
);
4117 xpt_path_periph(struct cam_path
*path
)
4119 return (path
->periph
);
4123 * Release a CAM control block for the caller. Remit the cost of the structure
4124 * to the device referenced by the path. If the this device had no 'credits'
4125 * and peripheral drivers have registered async callbacks for this notification
4129 xpt_release_ccb(union ccb
*free_ccb
)
4131 struct cam_path
*path
;
4132 struct cam_ed
*device
;
4135 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_release_ccb\n"));
4136 path
= free_ccb
->ccb_h
.path
;
4137 device
= path
->device
;
4140 cam_ccbq_release_opening(&device
->ccbq
);
4141 if (xpt_ccb_count
> xpt_max_ccbs
) {
4142 xpt_free_ccb(free_ccb
);
4145 SLIST_INSERT_HEAD(&ccb_freeq
, &free_ccb
->ccb_h
, xpt_links
.sle
);
4147 if (bus
->sim
->devq
== NULL
) {
4151 bus
->sim
->devq
->alloc_openings
++;
4152 bus
->sim
->devq
->alloc_active
--;
4153 /* XXX Turn this into an inline function - xpt_run_device?? */
4154 if ((device_is_alloc_queued(device
) == 0)
4155 && (device
->drvq
.entries
> 0)) {
4156 xpt_schedule_dev_allocq(bus
, device
);
4159 if (bus
->sim
->devq
&& dev_allocq_is_runnable(bus
->sim
->devq
))
4160 xpt_run_dev_allocq(bus
);
4163 /* Functions accessed by SIM drivers */
4166 * A sim structure, listing the SIM entry points and instance
4167 * identification info is passed to xpt_bus_register to hook the SIM
4168 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4169 * for this new bus and places it in the array of busses and assigns
4170 * it a path_id. The path_id may be influenced by "hard wiring"
4171 * information specified by the user. Once interrupt services are
4172 * availible, the bus will be probed.
4175 xpt_bus_register(struct cam_sim
*sim
, u_int32_t bus
)
4177 struct cam_eb
*new_bus
;
4178 struct cam_eb
*old_bus
;
4179 struct ccb_pathinq cpi
;
4182 new_bus
= kmalloc(sizeof(*new_bus
), M_CAMXPT
, M_INTWAIT
);
4184 if (strcmp(sim
->sim_name
, "xpt") != 0) {
4186 xptpathid(sim
->sim_name
, sim
->unit_number
, sim
->bus_id
);
4189 TAILQ_INIT(&new_bus
->et_entries
);
4190 new_bus
->path_id
= sim
->path_id
;
4193 timevalclear(&new_bus
->last_reset
);
4195 new_bus
->refcount
= 1; /* Held until a bus_deregister event */
4196 new_bus
->generation
= 0;
4198 old_bus
= TAILQ_FIRST(&xpt_busses
);
4199 while (old_bus
!= NULL
4200 && old_bus
->path_id
< new_bus
->path_id
)
4201 old_bus
= TAILQ_NEXT(old_bus
, links
);
4202 if (old_bus
!= NULL
)
4203 TAILQ_INSERT_BEFORE(old_bus
, new_bus
, links
);
4205 TAILQ_INSERT_TAIL(&xpt_busses
, new_bus
, links
);
4209 /* Notify interested parties */
4210 if (sim
->path_id
!= CAM_XPT_PATH_ID
) {
4211 struct cam_path path
;
4213 xpt_compile_path(&path
, /*periph*/NULL
, sim
->path_id
,
4214 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4215 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
4216 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
4217 xpt_action((union ccb
*)&cpi
);
4218 xpt_async(AC_PATH_REGISTERED
, &path
, &cpi
);
4219 xpt_release_path(&path
);
4221 return (CAM_SUCCESS
);
4225 * Deregister a bus. We must clean out all transactions pending on the bus.
4226 * This routine is typically called prior to cam_sim_free() (e.g. see
4227 * dev/usbmisc/umass/umass.c)
4230 xpt_bus_deregister(path_id_t pathid
)
4232 struct cam_path bus_path
;
4233 struct cam_ed
*device
;
4234 struct cam_ed_qinfo
*qinfo
;
4235 struct cam_devq
*devq
;
4236 struct cam_periph
*periph
;
4237 struct cam_sim
*ccbsim
;
4238 union ccb
*work_ccb
;
4241 status
= xpt_compile_path(&bus_path
, NULL
, pathid
,
4242 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4243 if (status
!= CAM_REQ_CMP
)
4247 * This should clear out all pending requests and timeouts, but
4248 * the ccb's may be queued to a software interrupt.
4250 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4251 * and it really ought to.
4253 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4254 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4256 /* The SIM may be gone, so use a dummy SIM for any stray operations. */
4257 devq
= bus_path
.bus
->sim
->devq
;
4258 bus_path
.bus
->sim
= &cam_dead_sim
;
4260 /* Execute any pending operations now. */
4261 while ((qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
4262 CAMQ_HEAD
)) != NULL
||
4263 (qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
4264 CAMQ_HEAD
)) != NULL
) {
4266 device
= qinfo
->device
;
4267 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
4268 if (work_ccb
!= NULL
) {
4269 devq
->active_dev
= device
;
4270 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
4271 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
4272 ccbsim
= work_ccb
->ccb_h
.path
->bus
->sim
;
4273 (*(ccbsim
->sim_action
))(ccbsim
, work_ccb
);
4276 periph
= (struct cam_periph
*)camq_remove(&device
->drvq
,
4279 xpt_schedule(periph
, periph
->pinfo
.priority
);
4280 } while (work_ccb
!= NULL
|| periph
!= NULL
);
4283 /* Make sure all completed CCBs are processed. */
4284 while (!TAILQ_EMPTY(&cam_bioq
)) {
4287 /* Repeat the async's for the benefit of any new devices. */
4288 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4289 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4292 /* Release the reference count held while registered. */
4293 xpt_release_bus(bus_path
.bus
);
4294 xpt_release_path(&bus_path
);
4296 /* Recheck for more completed CCBs. */
4297 while (!TAILQ_EMPTY(&cam_bioq
))
4300 return (CAM_REQ_CMP
);
4304 xptnextfreepathid(void)
4311 bus
= TAILQ_FIRST(&xpt_busses
);
4313 /* Find an unoccupied pathid */
4315 && bus
->path_id
<= pathid
) {
4316 if (bus
->path_id
== pathid
)
4318 bus
= TAILQ_NEXT(bus
, links
);
4322 * Ensure that this pathid is not reserved for
4323 * a bus that may be registered in the future.
4325 if (resource_string_value("scbus", pathid
, "at", &strval
) == 0) {
4327 /* Start the search over */
4334 xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
)
4340 pathid
= CAM_XPT_PATH_ID
;
4341 ksnprintf(buf
, sizeof(buf
), "%s%d", sim_name
, sim_unit
);
4343 while ((i
= resource_query_string(i
, "at", buf
)) != -1) {
4344 if (strcmp(resource_query_name(i
), "scbus")) {
4345 /* Avoid a bit of foot shooting. */
4348 dunit
= resource_query_unit(i
);
4349 if (dunit
< 0) /* unwired?! */
4351 if (resource_int_value("scbus", dunit
, "bus", &val
) == 0) {
4352 if (sim_bus
== val
) {
4356 } else if (sim_bus
== 0) {
4357 /* Unspecified matches bus 0 */
4361 kprintf("Ambiguous scbus configuration for %s%d "
4362 "bus %d, cannot wire down. The kernel "
4363 "config entry for scbus%d should "
4364 "specify a controller bus.\n"
4365 "Scbus will be assigned dynamically.\n",
4366 sim_name
, sim_unit
, sim_bus
, dunit
);
4371 if (pathid
== CAM_XPT_PATH_ID
)
4372 pathid
= xptnextfreepathid();
4377 xpt_async(u_int32_t async_code
, struct cam_path
*path
, void *async_arg
)
4380 struct cam_et
*target
, *next_target
;
4381 struct cam_ed
*device
, *next_device
;
4383 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_async\n"));
4386 * Most async events come from a CAM interrupt context. In
4387 * a few cases, the error recovery code at the peripheral layer,
4388 * which may run from our SWI or a process context, may signal
4389 * deferred events with a call to xpt_async. Ensure async
4390 * notifications are serialized by blocking cam interrupts.
4396 if (async_code
== AC_BUS_RESET
) {
4397 /* Update our notion of when the last reset occurred */
4398 microuptime(&bus
->last_reset
);
4401 for (target
= TAILQ_FIRST(&bus
->et_entries
);
4403 target
= next_target
) {
4405 next_target
= TAILQ_NEXT(target
, links
);
4407 if (path
->target
!= target
4408 && path
->target
->target_id
!= CAM_TARGET_WILDCARD
4409 && target
->target_id
!= CAM_TARGET_WILDCARD
)
4412 if (async_code
== AC_SENT_BDR
) {
4413 /* Update our notion of when the last reset occurred */
4414 microuptime(&path
->target
->last_reset
);
4417 for (device
= TAILQ_FIRST(&target
->ed_entries
);
4419 device
= next_device
) {
4421 next_device
= TAILQ_NEXT(device
, links
);
4423 if (path
->device
!= device
4424 && path
->device
->lun_id
!= CAM_LUN_WILDCARD
4425 && device
->lun_id
!= CAM_LUN_WILDCARD
)
4428 xpt_dev_async(async_code
, bus
, target
,
4431 xpt_async_bcast(&device
->asyncs
, async_code
,
4437 * If this wasn't a fully wildcarded async, tell all
4438 * clients that want all async events.
4440 if (bus
!= xpt_periph
->path
->bus
)
4441 xpt_async_bcast(&xpt_periph
->path
->device
->asyncs
, async_code
,
4447 xpt_async_bcast(struct async_list
*async_head
,
4448 u_int32_t async_code
,
4449 struct cam_path
*path
, void *async_arg
)
4451 struct async_node
*cur_entry
;
4453 cur_entry
= SLIST_FIRST(async_head
);
4454 while (cur_entry
!= NULL
) {
4455 struct async_node
*next_entry
;
4457 * Grab the next list entry before we call the current
4458 * entry's callback. This is because the callback function
4459 * can delete its async callback entry.
4461 next_entry
= SLIST_NEXT(cur_entry
, links
);
4462 if ((cur_entry
->event_enable
& async_code
) != 0)
4463 cur_entry
->callback(cur_entry
->callback_arg
,
4466 cur_entry
= next_entry
;
4471 * Handle any per-device event notifications that require action by the XPT.
4474 xpt_dev_async(u_int32_t async_code
, struct cam_eb
*bus
, struct cam_et
*target
,
4475 struct cam_ed
*device
, void *async_arg
)
4478 struct cam_path newpath
;
4481 * We only need to handle events for real devices.
4483 if (target
->target_id
== CAM_TARGET_WILDCARD
4484 || device
->lun_id
== CAM_LUN_WILDCARD
)
4488 * We need our own path with wildcards expanded to
4489 * handle certain types of events.
4491 if ((async_code
== AC_SENT_BDR
)
4492 || (async_code
== AC_BUS_RESET
)
4493 || (async_code
== AC_INQ_CHANGED
))
4494 status
= xpt_compile_path(&newpath
, NULL
,
4499 status
= CAM_REQ_CMP_ERR
;
4501 if (status
== CAM_REQ_CMP
) {
4504 * Allow transfer negotiation to occur in a
4505 * tag free environment.
4507 if (async_code
== AC_SENT_BDR
4508 || async_code
== AC_BUS_RESET
)
4509 xpt_toggle_tags(&newpath
);
4511 if (async_code
== AC_INQ_CHANGED
) {
4513 * We've sent a start unit command, or
4514 * something similar to a device that
4515 * may have caused its inquiry data to
4516 * change. So we re-scan the device to
4517 * refresh the inquiry data for it.
4519 xpt_scan_lun(newpath
.periph
, &newpath
,
4520 CAM_EXPECT_INQ_CHANGE
, NULL
);
4522 xpt_release_path(&newpath
);
4523 } else if (async_code
== AC_LOST_DEVICE
) {
4525 * When we lose a device the device may be about to detach
4526 * the sim, we have to clear out all pending timeouts and
4527 * requests before that happens. XXX it would be nice if
4528 * we could abort the requests pertaining to the device.
4530 xpt_release_devq_timeout(device
);
4531 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
4532 device
->flags
|= CAM_DEV_UNCONFIGURED
;
4533 xpt_release_device(bus
, target
, device
);
4535 } else if (async_code
== AC_TRANSFER_NEG
) {
4536 struct ccb_trans_settings
*settings
;
4538 settings
= (struct ccb_trans_settings
*)async_arg
;
4539 xpt_set_transfer_settings(settings
, device
,
4540 /*async_update*/TRUE
);
4545 xpt_freeze_devq(struct cam_path
*path
, u_int count
)
4547 struct ccb_hdr
*ccbh
;
4550 path
->device
->qfrozen_cnt
+= count
;
4553 * Mark the last CCB in the queue as needing
4554 * to be requeued if the driver hasn't
4555 * changed it's state yet. This fixes a race
4556 * where a ccb is just about to be queued to
4557 * a controller driver when it's interrupt routine
4558 * freezes the queue. To completly close the
4559 * hole, controller drives must check to see
4560 * if a ccb's status is still CAM_REQ_INPROG
4561 * under critical section protection just before they queue
4562 * the CCB. See ahc_action/ahc_freeze_devq for
4565 ccbh
= TAILQ_LAST(&path
->device
->ccbq
.active_ccbs
, ccb_hdr_tailq
);
4566 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4567 ccbh
->status
= CAM_REQUEUE_REQ
;
4569 return (path
->device
->qfrozen_cnt
);
4573 xpt_freeze_simq(struct cam_sim
*sim
, u_int count
)
4575 if (sim
->devq
== NULL
)
4577 sim
->devq
->send_queue
.qfrozen_cnt
+= count
;
4578 if (sim
->devq
->active_dev
!= NULL
) {
4579 struct ccb_hdr
*ccbh
;
4581 ccbh
= TAILQ_LAST(&sim
->devq
->active_dev
->ccbq
.active_ccbs
,
4583 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4584 ccbh
->status
= CAM_REQUEUE_REQ
;
4586 return (sim
->devq
->send_queue
.qfrozen_cnt
);
4590 * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4591 * We ref-count the sim (and the bus only NULLs it out when the bus has been
4592 * freed, which is not the case here), but the device queue is also freed XXX
4593 * and we have to check that here.
4595 * XXX fixme: could we simply not null-out the device queue via
4599 xpt_release_devq_timeout(void *arg
)
4601 struct cam_ed
*device
;
4603 device
= (struct cam_ed
*)arg
;
4605 xpt_release_devq_device(device
, /*count*/1, /*run_queue*/TRUE
);
4609 xpt_release_devq(struct cam_path
*path
, u_int count
, int run_queue
)
4611 xpt_release_devq_device(path
->device
, count
, run_queue
);
4615 xpt_release_devq_device(struct cam_ed
*dev
, u_int count
, int run_queue
)
4622 if (dev
->qfrozen_cnt
> 0) {
4624 count
= (count
> dev
->qfrozen_cnt
) ? dev
->qfrozen_cnt
: count
;
4625 dev
->qfrozen_cnt
-= count
;
4626 if (dev
->qfrozen_cnt
== 0) {
4629 * No longer need to wait for a successful
4630 * command completion.
4632 dev
->flags
&= ~CAM_DEV_REL_ON_COMPLETE
;
4635 * Remove any timeouts that might be scheduled
4636 * to release this queue.
4638 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
4639 callout_stop(&dev
->c_handle
);
4640 dev
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
4644 * Now that we are unfrozen schedule the
4645 * device so any pending transactions are
4648 if ((dev
->ccbq
.queue
.entries
> 0)
4649 && (xpt_schedule_dev_sendq(dev
->target
->bus
, dev
))
4650 && (run_queue
!= 0)) {
4656 xpt_run_dev_sendq(dev
->target
->bus
);
4661 xpt_release_simq(struct cam_sim
*sim
, int run_queue
)
4665 if (sim
->devq
== NULL
)
4668 sendq
= &(sim
->devq
->send_queue
);
4671 if (sendq
->qfrozen_cnt
> 0) {
4672 sendq
->qfrozen_cnt
--;
4673 if (sendq
->qfrozen_cnt
== 0) {
4677 * If there is a timeout scheduled to release this
4678 * sim queue, remove it. The queue frozen count is
4681 if ((sim
->flags
& CAM_SIM_REL_TIMEOUT_PENDING
) != 0){
4682 callout_stop(&sim
->c_handle
);
4683 sim
->flags
&= ~CAM_SIM_REL_TIMEOUT_PENDING
;
4685 bus
= xpt_find_bus(sim
->path_id
);
4690 * Now that we are unfrozen run the send queue.
4692 xpt_run_dev_sendq(bus
);
4694 xpt_release_bus(bus
);
4704 xpt_done(union ccb
*done_ccb
)
4708 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_done\n"));
4709 if ((done_ccb
->ccb_h
.func_code
& XPT_FC_QUEUED
) != 0) {
4711 * Queue up the request for handling by our SWI handler
4712 * any of the "non-immediate" type of ccbs.
4714 switch (done_ccb
->ccb_h
.path
->periph
->type
) {
4715 case CAM_PERIPH_BIO
:
4716 TAILQ_INSERT_TAIL(&cam_bioq
, &done_ccb
->ccb_h
,
4718 done_ccb
->ccb_h
.pinfo
.index
= CAM_DONEQ_INDEX
;
4722 panic("unknown periph type %d",
4723 done_ccb
->ccb_h
.path
->periph
->type
);
4734 new_ccb
= kmalloc(sizeof(*new_ccb
), M_CAMXPT
, M_INTWAIT
);
4739 xpt_free_ccb(union ccb
*free_ccb
)
4741 kfree(free_ccb
, M_CAMXPT
);
4746 /* Private XPT functions */
4749 * Get a CAM control block for the caller. Charge the structure to the device
4750 * referenced by the path. If the this device has no 'credits' then the
4751 * device already has the maximum number of outstanding operations under way
4752 * and we return NULL. If we don't have sufficient resources to allocate more
4753 * ccbs, we also return NULL.
4756 xpt_get_ccb(struct cam_ed
*device
)
4761 if ((new_ccb
= (union ccb
*)SLIST_FIRST(&ccb_freeq
)) == NULL
) {
4762 new_ccb
= kmalloc(sizeof(*new_ccb
), M_CAMXPT
, M_INTWAIT
);
4763 SLIST_INSERT_HEAD(&ccb_freeq
, &new_ccb
->ccb_h
,
4767 cam_ccbq_take_opening(&device
->ccbq
);
4768 SLIST_REMOVE_HEAD(&ccb_freeq
, xpt_links
.sle
);
4774 xpt_release_bus(struct cam_eb
*bus
)
4778 if (bus
->refcount
== 1) {
4779 KKASSERT(TAILQ_FIRST(&bus
->et_entries
) == NULL
);
4780 TAILQ_REMOVE(&xpt_busses
, bus
, links
);
4782 cam_sim_release(bus
->sim
, 0);
4786 KKASSERT(bus
->refcount
== 1);
4787 kfree(bus
, M_CAMXPT
);
4794 static struct cam_et
*
4795 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
)
4797 struct cam_et
*target
;
4798 struct cam_et
*cur_target
;
4800 target
= kmalloc(sizeof(*target
), M_CAMXPT
, M_INTWAIT
);
4802 TAILQ_INIT(&target
->ed_entries
);
4804 target
->target_id
= target_id
;
4805 target
->refcount
= 1;
4806 target
->generation
= 0;
4807 timevalclear(&target
->last_reset
);
4809 * Hold a reference to our parent bus so it
4810 * will not go away before we do.
4814 /* Insertion sort into our bus's target list */
4815 cur_target
= TAILQ_FIRST(&bus
->et_entries
);
4816 while (cur_target
!= NULL
&& cur_target
->target_id
< target_id
)
4817 cur_target
= TAILQ_NEXT(cur_target
, links
);
4819 if (cur_target
!= NULL
) {
4820 TAILQ_INSERT_BEFORE(cur_target
, target
, links
);
4822 TAILQ_INSERT_TAIL(&bus
->et_entries
, target
, links
);
4829 xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
)
4832 if (target
->refcount
== 1) {
4833 KKASSERT(TAILQ_FIRST(&target
->ed_entries
) == NULL
);
4834 TAILQ_REMOVE(&bus
->et_entries
, target
, links
);
4836 xpt_release_bus(bus
);
4837 KKASSERT(target
->refcount
== 1);
4838 kfree(target
, M_CAMXPT
);
4845 static struct cam_ed
*
4846 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
, lun_id_t lun_id
)
4848 struct cam_path path
;
4849 struct cam_ed
*device
;
4850 struct cam_devq
*devq
;
4853 if (SIM_DEAD(bus
->sim
))
4856 /* Make space for us in the device queue on our bus */
4857 if (bus
->sim
->devq
== NULL
)
4859 devq
= bus
->sim
->devq
;
4860 status
= cam_devq_resize(devq
, devq
->alloc_queue
.array_size
+ 1);
4862 if (status
!= CAM_REQ_CMP
) {
4865 device
= kmalloc(sizeof(*device
), M_CAMXPT
, M_INTWAIT
);
4868 if (device
!= NULL
) {
4869 struct cam_ed
*cur_device
;
4871 cam_init_pinfo(&device
->alloc_ccb_entry
.pinfo
);
4872 device
->alloc_ccb_entry
.device
= device
;
4873 cam_init_pinfo(&device
->send_ccb_entry
.pinfo
);
4874 device
->send_ccb_entry
.device
= device
;
4875 device
->target
= target
;
4876 device
->lun_id
= lun_id
;
4877 /* Initialize our queues */
4878 if (camq_init(&device
->drvq
, 0) != 0) {
4879 kfree(device
, M_CAMXPT
);
4882 if (cam_ccbq_init(&device
->ccbq
,
4883 bus
->sim
->max_dev_openings
) != 0) {
4884 camq_fini(&device
->drvq
);
4885 kfree(device
, M_CAMXPT
);
4888 SLIST_INIT(&device
->asyncs
);
4889 SLIST_INIT(&device
->periphs
);
4890 device
->generation
= 0;
4891 device
->owner
= NULL
;
4893 * Take the default quirk entry until we have inquiry
4894 * data and can determine a better quirk to use.
4896 device
->quirk
= &xpt_quirk_table
[xpt_quirk_table_size
- 1];
4897 bzero(&device
->inq_data
, sizeof(device
->inq_data
));
4898 device
->inq_flags
= 0;
4899 device
->queue_flags
= 0;
4900 device
->serial_num
= NULL
;
4901 device
->serial_num_len
= 0;
4902 device
->qfrozen_cnt
= 0;
4903 device
->flags
= CAM_DEV_UNCONFIGURED
;
4904 device
->tag_delay_count
= 0;
4905 device
->tag_saved_openings
= 0;
4906 device
->refcount
= 1;
4907 callout_init(&device
->c_handle
);
4910 * Hold a reference to our parent target so it
4911 * will not go away before we do.
4916 * XXX should be limited by number of CCBs this bus can
4919 xpt_max_ccbs
+= device
->ccbq
.devq_openings
;
4920 /* Insertion sort into our target's device list */
4921 cur_device
= TAILQ_FIRST(&target
->ed_entries
);
4922 while (cur_device
!= NULL
&& cur_device
->lun_id
< lun_id
)
4923 cur_device
= TAILQ_NEXT(cur_device
, links
);
4924 if (cur_device
!= NULL
) {
4925 TAILQ_INSERT_BEFORE(cur_device
, device
, links
);
4927 TAILQ_INSERT_TAIL(&target
->ed_entries
, device
, links
);
4929 target
->generation
++;
4930 if (lun_id
!= CAM_LUN_WILDCARD
) {
4931 xpt_compile_path(&path
,
4936 xpt_devise_transport(&path
);
4937 xpt_release_path(&path
);
4944 xpt_reference_device(struct cam_ed
*device
)
4950 xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
4951 struct cam_ed
*device
)
4953 struct cam_devq
*devq
;
4956 if (device
->refcount
== 1) {
4957 KKASSERT(device
->flags
& CAM_DEV_UNCONFIGURED
);
4959 if (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
4960 || device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
)
4961 panic("Removing device while still queued for ccbs");
4963 if ((device
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
4964 device
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
4965 callout_stop(&device
->c_handle
);
4968 TAILQ_REMOVE(&target
->ed_entries
, device
,links
);
4969 target
->generation
++;
4970 xpt_max_ccbs
-= device
->ccbq
.devq_openings
;
4971 if (!SIM_DEAD(bus
->sim
)) {
4972 /* Release our slot in the devq */
4973 devq
= bus
->sim
->devq
;
4974 cam_devq_resize(devq
, devq
->alloc_queue
.array_size
- 1);
4976 camq_fini(&device
->drvq
);
4977 camq_fini(&device
->ccbq
.queue
);
4978 xpt_release_target(bus
, target
);
4979 KKASSERT(device
->refcount
== 1);
4980 kfree(device
, M_CAMXPT
);
4988 xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
)
4998 diff
= newopenings
- (dev
->ccbq
.dev_active
+ dev
->ccbq
.dev_openings
);
4999 result
= cam_ccbq_resize(&dev
->ccbq
, newopenings
);
5000 if (result
== CAM_REQ_CMP
&& (diff
< 0)) {
5001 dev
->flags
|= CAM_DEV_RESIZE_QUEUE_NEEDED
;
5003 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
5004 || (dev
->inq_flags
& SID_CmdQue
) != 0)
5005 dev
->tag_saved_openings
= newopenings
;
5006 /* Adjust the global limit */
5007 xpt_max_ccbs
+= diff
;
5012 static struct cam_eb
*
5013 xpt_find_bus(path_id_t path_id
)
5017 TAILQ_FOREACH(bus
, &xpt_busses
, links
) {
5018 if (bus
->path_id
== path_id
) {
5026 static struct cam_et
*
5027 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
)
5029 struct cam_et
*target
;
5031 TAILQ_FOREACH(target
, &bus
->et_entries
, links
) {
5032 if (target
->target_id
== target_id
) {
5040 static struct cam_ed
*
5041 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
)
5043 struct cam_ed
*device
;
5045 TAILQ_FOREACH(device
, &target
->ed_entries
, links
) {
5046 if (device
->lun_id
== lun_id
) {
5055 union ccb
*request_ccb
;
5056 struct ccb_pathinq
*cpi
;
5058 } xpt_scan_bus_info
;
5061 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5062 * As the scan progresses, xpt_scan_bus is used as the
5063 * callback on completion function.
5066 xpt_scan_bus(struct cam_periph
*periph
, union ccb
*request_ccb
)
5068 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5069 ("xpt_scan_bus\n"));
5070 switch (request_ccb
->ccb_h
.func_code
) {
5073 xpt_scan_bus_info
*scan_info
;
5074 union ccb
*work_ccb
;
5075 struct cam_path
*path
;
5080 /* Find out the characteristics of the bus */
5081 work_ccb
= xpt_alloc_ccb();
5082 xpt_setup_ccb(&work_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5083 request_ccb
->ccb_h
.pinfo
.priority
);
5084 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
5085 xpt_action(work_ccb
);
5086 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5087 request_ccb
->ccb_h
.status
= work_ccb
->ccb_h
.status
;
5088 xpt_free_ccb(work_ccb
);
5089 xpt_done(request_ccb
);
5093 if ((work_ccb
->cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5095 * Can't scan the bus on an adapter that
5096 * cannot perform the initiator role.
5098 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5099 xpt_free_ccb(work_ccb
);
5100 xpt_done(request_ccb
);
5104 /* Save some state for use while we probe for devices */
5105 scan_info
= (xpt_scan_bus_info
*)
5106 kmalloc(sizeof(xpt_scan_bus_info
), M_TEMP
, M_INTWAIT
);
5107 scan_info
->request_ccb
= request_ccb
;
5108 scan_info
->cpi
= &work_ccb
->cpi
;
5110 /* Cache on our stack so we can work asynchronously */
5111 max_target
= scan_info
->cpi
->max_target
;
5112 initiator_id
= scan_info
->cpi
->initiator_id
;
5116 * We can scan all targets in parallel, or do it sequentially.
5118 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5120 scan_info
->counter
= 0;
5122 scan_info
->counter
= scan_info
->cpi
->max_target
+ 1;
5123 if (scan_info
->cpi
->initiator_id
< scan_info
->counter
) {
5124 scan_info
->counter
--;
5128 for (i
= 0; i
<= max_target
; i
++) {
5130 if (i
== initiator_id
)
5133 status
= xpt_create_path(&path
, xpt_periph
,
5134 request_ccb
->ccb_h
.path_id
,
5136 if (status
!= CAM_REQ_CMP
) {
5137 kprintf("xpt_scan_bus: xpt_create_path failed"
5138 " with status %#x, bus scan halted\n",
5140 kfree(scan_info
, M_TEMP
);
5141 request_ccb
->ccb_h
.status
= status
;
5142 xpt_free_ccb(work_ccb
);
5143 xpt_done(request_ccb
);
5146 work_ccb
= xpt_alloc_ccb();
5147 xpt_setup_ccb(&work_ccb
->ccb_h
, path
,
5148 request_ccb
->ccb_h
.pinfo
.priority
);
5149 work_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5150 work_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5151 work_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5152 work_ccb
->crcn
.flags
= request_ccb
->crcn
.flags
;
5153 xpt_action(work_ccb
);
5160 struct cam_path
*path
;
5161 xpt_scan_bus_info
*scan_info
;
5163 target_id_t target_id
;
5166 /* Reuse the same CCB to query if a device was really found */
5167 scan_info
= (xpt_scan_bus_info
*)request_ccb
->ccb_h
.ppriv_ptr0
;
5168 xpt_setup_ccb(&request_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5169 request_ccb
->ccb_h
.pinfo
.priority
);
5170 request_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
5172 path_id
= request_ccb
->ccb_h
.path_id
;
5173 target_id
= request_ccb
->ccb_h
.target_id
;
5174 lun_id
= request_ccb
->ccb_h
.target_lun
;
5175 xpt_action(request_ccb
);
5177 if (request_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5178 struct cam_ed
*device
;
5179 struct cam_et
*target
;
5183 * If we already probed lun 0 successfully, or
5184 * we have additional configured luns on this
5185 * target that might have "gone away", go onto
5188 target
= request_ccb
->ccb_h
.path
->target
;
5190 * We may touch devices that we don't
5191 * hold references too, so ensure they
5192 * don't disappear out from under us.
5193 * The target above is referenced by the
5194 * path in the request ccb.
5198 device
= TAILQ_FIRST(&target
->ed_entries
);
5199 if (device
!= NULL
) {
5200 phl
= CAN_SRCH_HI_SPARSE(device
);
5201 if (device
->lun_id
== 0)
5202 device
= TAILQ_NEXT(device
, links
);
5205 if ((lun_id
!= 0) || (device
!= NULL
)) {
5206 if (lun_id
< (CAM_SCSI2_MAXLUN
-1) || phl
)
5210 struct cam_ed
*device
;
5212 device
= request_ccb
->ccb_h
.path
->device
;
5214 if ((device
->quirk
->quirks
& CAM_QUIRK_NOLUNS
) == 0) {
5215 /* Try the next lun */
5216 if (lun_id
< (CAM_SCSI2_MAXLUN
-1)
5217 || CAN_SRCH_HI_DENSE(device
))
5223 * Free the current request path- we're done with it.
5225 xpt_free_path(request_ccb
->ccb_h
.path
);
5228 * Check to see if we scan any further luns.
5230 if (lun_id
== request_ccb
->ccb_h
.target_lun
5231 || lun_id
> scan_info
->cpi
->max_lun
) {
5236 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5237 scan_info
->counter
++;
5238 if (scan_info
->counter
==
5239 scan_info
->cpi
->initiator_id
) {
5240 scan_info
->counter
++;
5242 if (scan_info
->counter
>=
5243 scan_info
->cpi
->max_target
+1) {
5247 scan_info
->counter
--;
5248 if (scan_info
->counter
== 0) {
5253 xpt_free_ccb(request_ccb
);
5254 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5255 request_ccb
= scan_info
->request_ccb
;
5256 kfree(scan_info
, M_TEMP
);
5257 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5258 xpt_done(request_ccb
);
5262 if ((scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) == 0) {
5265 status
= xpt_create_path(&path
, xpt_periph
,
5266 scan_info
->request_ccb
->ccb_h
.path_id
,
5267 scan_info
->counter
, 0);
5268 if (status
!= CAM_REQ_CMP
) {
5269 kprintf("xpt_scan_bus: xpt_create_path failed"
5270 " with status %#x, bus scan halted\n",
5272 xpt_free_ccb(request_ccb
);
5273 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5274 request_ccb
= scan_info
->request_ccb
;
5275 kfree(scan_info
, M_TEMP
);
5276 request_ccb
->ccb_h
.status
= status
;
5277 xpt_done(request_ccb
);
5280 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5281 request_ccb
->ccb_h
.pinfo
.priority
);
5282 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5283 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5284 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5285 request_ccb
->crcn
.flags
=
5286 scan_info
->request_ccb
->crcn
.flags
;
5288 status
= xpt_create_path(&path
, xpt_periph
,
5289 path_id
, target_id
, lun_id
);
5290 if (status
!= CAM_REQ_CMP
) {
5291 kprintf("xpt_scan_bus: xpt_create_path failed "
5292 "with status %#x, halting LUN scan\n",
5296 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5297 request_ccb
->ccb_h
.pinfo
.priority
);
5298 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5299 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5300 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5301 request_ccb
->crcn
.flags
=
5302 scan_info
->request_ccb
->crcn
.flags
;
5304 xpt_action(request_ccb
);
5318 PROBE_TUR_FOR_NEGOTIATION
5322 PROBE_INQUIRY_CKSUM
= 0x01,
5323 PROBE_SERIAL_CKSUM
= 0x02,
5324 PROBE_NO_ANNOUNCE
= 0x04
5328 TAILQ_HEAD(, ccb_hdr
) request_ccbs
;
5329 probe_action action
;
5330 union ccb saved_ccb
;
5333 u_int8_t digest
[16];
5337 xpt_scan_lun(struct cam_periph
*periph
, struct cam_path
*path
,
5338 cam_flags flags
, union ccb
*request_ccb
)
5340 struct ccb_pathinq cpi
;
5342 struct cam_path
*new_path
;
5343 struct cam_periph
*old_periph
;
5345 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5346 ("xpt_scan_lun\n"));
5348 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
5349 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5350 xpt_action((union ccb
*)&cpi
);
5352 if (cpi
.ccb_h
.status
!= CAM_REQ_CMP
) {
5353 if (request_ccb
!= NULL
) {
5354 request_ccb
->ccb_h
.status
= cpi
.ccb_h
.status
;
5355 xpt_done(request_ccb
);
5360 if ((cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5362 * Can't scan the bus on an adapter that
5363 * cannot perform the initiator role.
5365 if (request_ccb
!= NULL
) {
5366 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5367 xpt_done(request_ccb
);
5372 if (request_ccb
== NULL
) {
5373 request_ccb
= kmalloc(sizeof(union ccb
), M_TEMP
, M_INTWAIT
);
5374 new_path
= kmalloc(sizeof(*new_path
), M_TEMP
, M_INTWAIT
);
5375 status
= xpt_compile_path(new_path
, xpt_periph
,
5377 path
->target
->target_id
,
5378 path
->device
->lun_id
);
5380 if (status
!= CAM_REQ_CMP
) {
5381 xpt_print_path(path
);
5382 kprintf("xpt_scan_lun: can't compile path, can't "
5384 kfree(request_ccb
, M_TEMP
);
5385 kfree(new_path
, M_TEMP
);
5388 xpt_setup_ccb(&request_ccb
->ccb_h
, new_path
, /*priority*/ 1);
5389 request_ccb
->ccb_h
.cbfcnp
= xptscandone
;
5390 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5391 request_ccb
->crcn
.flags
= flags
;
5395 if ((old_periph
= cam_periph_find(path
, "probe")) != NULL
) {
5398 softc
= (probe_softc
*)old_periph
->softc
;
5399 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5402 status
= cam_periph_alloc(proberegister
, NULL
, probecleanup
,
5403 probestart
, "probe",
5405 request_ccb
->ccb_h
.path
, NULL
, 0,
5408 if (status
!= CAM_REQ_CMP
) {
5409 xpt_print_path(path
);
5410 kprintf("xpt_scan_lun: cam_alloc_periph returned an "
5411 "error, can't continue probe\n");
5412 request_ccb
->ccb_h
.status
= status
;
5413 xpt_done(request_ccb
);
5420 xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5422 xpt_release_path(done_ccb
->ccb_h
.path
);
5423 kfree(done_ccb
->ccb_h
.path
, M_TEMP
);
5424 kfree(done_ccb
, M_TEMP
);
5428 proberegister(struct cam_periph
*periph
, void *arg
)
5430 union ccb
*request_ccb
; /* CCB representing the probe request */
5433 request_ccb
= (union ccb
*)arg
;
5434 if (periph
== NULL
) {
5435 kprintf("proberegister: periph was NULL!!\n");
5436 return(CAM_REQ_CMP_ERR
);
5439 if (request_ccb
== NULL
) {
5440 kprintf("proberegister: no probe CCB, "
5441 "can't register device\n");
5442 return(CAM_REQ_CMP_ERR
);
5445 softc
= kmalloc(sizeof(*softc
), M_TEMP
, M_INTWAIT
| M_ZERO
);
5446 TAILQ_INIT(&softc
->request_ccbs
);
5447 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5450 periph
->softc
= softc
;
5451 cam_periph_acquire(periph
);
5453 * Ensure we've waited at least a bus settle
5454 * delay before attempting to probe the device.
5455 * For HBAs that don't do bus resets, this won't make a difference.
5457 cam_periph_freeze_after_event(periph
, &periph
->path
->bus
->last_reset
,
5459 probeschedule(periph
);
5460 return(CAM_REQ_CMP
);
5464 probeschedule(struct cam_periph
*periph
)
5466 struct ccb_pathinq cpi
;
5470 softc
= (probe_softc
*)periph
->softc
;
5471 ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
5473 xpt_setup_ccb(&cpi
.ccb_h
, periph
->path
, /*priority*/1);
5474 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5475 xpt_action((union ccb
*)&cpi
);
5478 * If a device has gone away and another device, or the same one,
5479 * is back in the same place, it should have a unit attention
5480 * condition pending. It will not report the unit attention in
5481 * response to an inquiry, which may leave invalid transfer
5482 * negotiations in effect. The TUR will reveal the unit attention
5483 * condition. Only send the TUR for lun 0, since some devices
5484 * will get confused by commands other than inquiry to non-existent
5485 * luns. If you think a device has gone away start your scan from
5486 * lun 0. This will insure that any bogus transfer settings are
5489 * If we haven't seen the device before and the controller supports
5490 * some kind of transfer negotiation, negotiate with the first
5491 * sent command if no bus reset was performed at startup. This
5492 * ensures that the device is not confused by transfer negotiation
5493 * settings left over by loader or BIOS action.
5495 if (((ccb
->ccb_h
.path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0)
5496 && (ccb
->ccb_h
.target_lun
== 0)) {
5497 softc
->action
= PROBE_TUR
;
5498 } else if ((cpi
.hba_inquiry
& (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
)) != 0
5499 && (cpi
.hba_misc
& PIM_NOBUSRESET
) != 0) {
5500 proberequestdefaultnegotiation(periph
);
5501 softc
->action
= PROBE_INQUIRY
;
5503 softc
->action
= PROBE_INQUIRY
;
5506 if (ccb
->crcn
.flags
& CAM_EXPECT_INQ_CHANGE
)
5507 softc
->flags
|= PROBE_NO_ANNOUNCE
;
5509 softc
->flags
&= ~PROBE_NO_ANNOUNCE
;
5511 xpt_schedule(periph
, ccb
->ccb_h
.pinfo
.priority
);
5515 probestart(struct cam_periph
*periph
, union ccb
*start_ccb
)
5517 /* Probe the device that our peripheral driver points to */
5518 struct ccb_scsiio
*csio
;
5521 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probestart\n"));
5523 softc
= (probe_softc
*)periph
->softc
;
5524 csio
= &start_ccb
->csio
;
5526 switch (softc
->action
) {
5528 case PROBE_TUR_FOR_NEGOTIATION
:
5530 scsi_test_unit_ready(csio
,
5539 case PROBE_FULL_INQUIRY
:
5542 struct scsi_inquiry_data
*inq_buf
;
5544 inq_buf
= &periph
->path
->device
->inq_data
;
5546 * If the device is currently configured, we calculate an
5547 * MD5 checksum of the inquiry data, and if the serial number
5548 * length is greater than 0, add the serial number data
5549 * into the checksum as well. Once the inquiry and the
5550 * serial number check finish, we attempt to figure out
5551 * whether we still have the same device.
5553 if ((periph
->path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
5555 MD5Init(&softc
->context
);
5556 MD5Update(&softc
->context
, (unsigned char *)inq_buf
,
5557 sizeof(struct scsi_inquiry_data
));
5558 softc
->flags
|= PROBE_INQUIRY_CKSUM
;
5559 if (periph
->path
->device
->serial_num_len
> 0) {
5560 MD5Update(&softc
->context
,
5561 periph
->path
->device
->serial_num
,
5562 periph
->path
->device
->serial_num_len
);
5563 softc
->flags
|= PROBE_SERIAL_CKSUM
;
5565 MD5Final(softc
->digest
, &softc
->context
);
5568 if (softc
->action
== PROBE_INQUIRY
)
5569 inquiry_len
= SHORT_INQUIRY_LENGTH
;
5571 inquiry_len
= inq_buf
->additional_length
5572 + offsetof(struct scsi_inquiry_data
,
5573 additional_length
) + 1;
5576 * Some parallel SCSI devices fail to send an
5577 * ignore wide residue message when dealing with
5578 * odd length inquiry requests. Round up to be
5581 inquiry_len
= roundup2(inquiry_len
, 2);
5587 (u_int8_t
*)inq_buf
,
5592 /*timeout*/60 * 1000);
5595 case PROBE_MODE_SENSE
:
5600 mode_buf_len
= sizeof(struct scsi_mode_header_6
)
5601 + sizeof(struct scsi_mode_blk_desc
)
5602 + sizeof(struct scsi_control_page
);
5603 mode_buf
= kmalloc(mode_buf_len
, M_TEMP
, M_INTWAIT
);
5604 scsi_mode_sense(csio
,
5609 SMS_PAGE_CTRL_CURRENT
,
5610 SMS_CONTROL_MODE_PAGE
,
5617 case PROBE_SERIAL_NUM
:
5619 struct scsi_vpd_unit_serial_number
*serial_buf
;
5620 struct cam_ed
* device
;
5623 device
= periph
->path
->device
;
5624 device
->serial_num
= NULL
;
5625 device
->serial_num_len
= 0;
5627 if ((device
->quirk
->quirks
& CAM_QUIRK_NOSERIAL
) == 0) {
5628 serial_buf
= kmalloc(sizeof(*serial_buf
), M_TEMP
,
5629 M_INTWAIT
| M_ZERO
);
5634 (u_int8_t
*)serial_buf
,
5635 sizeof(*serial_buf
),
5637 SVPD_UNIT_SERIAL_NUMBER
,
5639 /*timeout*/60 * 1000);
5643 * We'll have to do without, let our probedone
5644 * routine finish up for us.
5646 start_ccb
->csio
.data_ptr
= NULL
;
5647 probedone(periph
, start_ccb
);
5651 xpt_action(start_ccb
);
5655 proberequestdefaultnegotiation(struct cam_periph
*periph
)
5657 struct ccb_trans_settings cts
;
5659 xpt_setup_ccb(&cts
.ccb_h
, periph
->path
, /*priority*/1);
5660 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
5661 cts
.type
= CTS_TYPE_USER_SETTINGS
;
5662 xpt_action((union ccb
*)&cts
);
5663 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5666 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
5667 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5668 xpt_action((union ccb
*)&cts
);
5672 probedone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5675 struct cam_path
*path
;
5678 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probedone\n"));
5680 softc
= (probe_softc
*)periph
->softc
;
5681 path
= done_ccb
->ccb_h
.path
;
5682 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
5684 switch (softc
->action
) {
5687 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5689 if (cam_periph_error(done_ccb
, 0,
5690 SF_NO_PRINT
, NULL
) == ERESTART
)
5692 else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0)
5693 /* Don't wedge the queue */
5694 xpt_release_devq(done_ccb
->ccb_h
.path
,
5698 softc
->action
= PROBE_INQUIRY
;
5699 xpt_release_ccb(done_ccb
);
5700 xpt_schedule(periph
, priority
);
5704 case PROBE_FULL_INQUIRY
:
5706 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
5707 struct scsi_inquiry_data
*inq_buf
;
5708 u_int8_t periph_qual
;
5710 path
->device
->flags
|= CAM_DEV_INQUIRY_DATA_VALID
;
5711 inq_buf
= &path
->device
->inq_data
;
5713 periph_qual
= SID_QUAL(inq_buf
);
5715 switch(periph_qual
) {
5716 case SID_QUAL_LU_CONNECTED
:
5721 * We conservatively request only
5722 * SHORT_INQUIRY_LEN bytes of inquiry
5723 * information during our first try
5724 * at sending an INQUIRY. If the device
5725 * has more information to give,
5726 * perform a second request specifying
5727 * the amount of information the device
5728 * is willing to give.
5730 len
= inq_buf
->additional_length
5731 + offsetof(struct scsi_inquiry_data
,
5732 additional_length
) + 1;
5733 if (softc
->action
== PROBE_INQUIRY
5734 && len
> SHORT_INQUIRY_LENGTH
) {
5735 softc
->action
= PROBE_FULL_INQUIRY
;
5736 xpt_release_ccb(done_ccb
);
5737 xpt_schedule(periph
, priority
);
5741 xpt_find_quirk(path
->device
);
5743 xpt_devise_transport(path
);
5744 if (INQ_DATA_TQ_ENABLED(inq_buf
))
5745 softc
->action
= PROBE_MODE_SENSE
;
5747 softc
->action
= PROBE_SERIAL_NUM
;
5749 path
->device
->flags
&= ~CAM_DEV_UNCONFIGURED
;
5750 xpt_reference_device(path
->device
);
5752 xpt_release_ccb(done_ccb
);
5753 xpt_schedule(periph
, priority
);
5759 } else if (cam_periph_error(done_ccb
, 0,
5760 done_ccb
->ccb_h
.target_lun
> 0
5761 ? SF_RETRY_UA
|SF_QUIET_IR
5763 &softc
->saved_ccb
) == ERESTART
) {
5765 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
5766 /* Don't wedge the queue */
5767 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
5771 * If we get to this point, we got an error status back
5772 * from the inquiry and the error status doesn't require
5773 * automatically retrying the command. Therefore, the
5774 * inquiry failed. If we had inquiry information before
5775 * for this device, but this latest inquiry command failed,
5776 * the device has probably gone away. If this device isn't
5777 * already marked unconfigured, notify the peripheral
5778 * drivers that this device is no more.
5780 if ((path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
5781 /* Send the async notification. */
5782 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
5785 xpt_release_ccb(done_ccb
);
5788 case PROBE_MODE_SENSE
:
5790 struct ccb_scsiio
*csio
;
5791 struct scsi_mode_header_6
*mode_hdr
;
5793 csio
= &done_ccb
->csio
;
5794 mode_hdr
= (struct scsi_mode_header_6
*)csio
->data_ptr
;
5795 if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
5796 struct scsi_control_page
*page
;
5799 offset
= ((u_int8_t
*)&mode_hdr
[1])
5800 + mode_hdr
->blk_desc_len
;
5801 page
= (struct scsi_control_page
*)offset
;
5802 path
->device
->queue_flags
= page
->queue_flags
;
5803 } else if (cam_periph_error(done_ccb
, 0,
5804 SF_RETRY_UA
|SF_NO_PRINT
,
5805 &softc
->saved_ccb
) == ERESTART
) {
5807 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
5808 /* Don't wedge the queue */
5809 xpt_release_devq(done_ccb
->ccb_h
.path
,
5810 /*count*/1, /*run_queue*/TRUE
);
5812 xpt_release_ccb(done_ccb
);
5813 kfree(mode_hdr
, M_TEMP
);
5814 softc
->action
= PROBE_SERIAL_NUM
;
5815 xpt_schedule(periph
, priority
);
5818 case PROBE_SERIAL_NUM
:
5820 struct ccb_scsiio
*csio
;
5821 struct scsi_vpd_unit_serial_number
*serial_buf
;
5828 csio
= &done_ccb
->csio
;
5829 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
5831 (struct scsi_vpd_unit_serial_number
*)csio
->data_ptr
;
5833 /* Clean up from previous instance of this device */
5834 if (path
->device
->serial_num
!= NULL
) {
5835 kfree(path
->device
->serial_num
, M_CAMXPT
);
5836 path
->device
->serial_num
= NULL
;
5837 path
->device
->serial_num_len
= 0;
5840 if (serial_buf
== NULL
) {
5842 * Don't process the command as it was never sent
5844 } else if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
5845 && (serial_buf
->length
> 0)) {
5848 path
->device
->serial_num
=
5849 kmalloc((serial_buf
->length
+ 1),
5850 M_CAMXPT
, M_INTWAIT
);
5851 bcopy(serial_buf
->serial_num
,
5852 path
->device
->serial_num
,
5853 serial_buf
->length
);
5854 path
->device
->serial_num_len
= serial_buf
->length
;
5855 path
->device
->serial_num
[serial_buf
->length
] = '\0';
5856 } else if (cam_periph_error(done_ccb
, 0,
5857 SF_RETRY_UA
|SF_NO_PRINT
,
5858 &softc
->saved_ccb
) == ERESTART
) {
5860 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
5861 /* Don't wedge the queue */
5862 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
5867 * Let's see if we have seen this device before.
5869 if ((softc
->flags
& PROBE_INQUIRY_CKSUM
) != 0) {
5871 u_int8_t digest
[16];
5876 (unsigned char *)&path
->device
->inq_data
,
5877 sizeof(struct scsi_inquiry_data
));
5880 MD5Update(&context
, serial_buf
->serial_num
,
5881 serial_buf
->length
);
5883 MD5Final(digest
, &context
);
5884 if (bcmp(softc
->digest
, digest
, 16) == 0)
5888 * XXX Do we need to do a TUR in order to ensure
5889 * that the device really hasn't changed???
5892 && ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0))
5893 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
5895 if (serial_buf
!= NULL
)
5896 kfree(serial_buf
, M_TEMP
);
5900 * Now that we have all the necessary
5901 * information to safely perform transfer
5902 * negotiations... Controllers don't perform
5903 * any negotiation or tagged queuing until
5904 * after the first XPT_SET_TRAN_SETTINGS ccb is
5905 * received. So, on a new device, just retreive
5906 * the user settings, and set them as the current
5907 * settings to set the device up.
5909 proberequestdefaultnegotiation(periph
);
5910 xpt_release_ccb(done_ccb
);
5913 * Perform a TUR to allow the controller to
5914 * perform any necessary transfer negotiation.
5916 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
5917 xpt_schedule(periph
, priority
);
5920 xpt_release_ccb(done_ccb
);
5923 case PROBE_TUR_FOR_NEGOTIATION
:
5924 if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
5925 /* Don't wedge the queue */
5926 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
5930 path
->device
->flags
&= ~CAM_DEV_UNCONFIGURED
;
5931 xpt_reference_device(path
->device
);
5933 if ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0) {
5934 /* Inform the XPT that a new device has been found */
5935 done_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
5936 xpt_action(done_ccb
);
5938 xpt_async(AC_FOUND_DEVICE
, done_ccb
->ccb_h
.path
,
5941 xpt_release_ccb(done_ccb
);
5944 done_ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
5945 TAILQ_REMOVE(&softc
->request_ccbs
, &done_ccb
->ccb_h
, periph_links
.tqe
);
5946 done_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5948 if (TAILQ_FIRST(&softc
->request_ccbs
) == NULL
) {
5949 cam_periph_invalidate(periph
);
5950 cam_periph_release(periph
);
5952 probeschedule(periph
);
5957 probecleanup(struct cam_periph
*periph
)
5959 kfree(periph
->softc
, M_TEMP
);
5963 xpt_find_quirk(struct cam_ed
*device
)
5967 match
= cam_quirkmatch((caddr_t
)&device
->inq_data
,
5968 (caddr_t
)xpt_quirk_table
,
5969 sizeof(xpt_quirk_table
)/sizeof(*xpt_quirk_table
),
5970 sizeof(*xpt_quirk_table
), scsi_inquiry_match
);
5973 panic("xpt_find_quirk: device didn't match wildcard entry!!");
5975 device
->quirk
= (struct xpt_quirk_entry
*)match
;
5979 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
)
5984 error
= sysctl_handle_int(oidp
, &bool, sizeof(bool), req
);
5985 if (error
!= 0 || req
->newptr
== NULL
)
5987 if (bool == 0 || bool == 1) {
5996 xpt_devise_transport(struct cam_path
*path
)
5998 struct ccb_pathinq cpi
;
5999 struct ccb_trans_settings cts
;
6000 struct scsi_inquiry_data
*inq_buf
;
6002 /* Get transport information from the SIM */
6003 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
6004 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6005 xpt_action((union ccb
*)&cpi
);
6008 if ((path
->device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0)
6009 inq_buf
= &path
->device
->inq_data
;
6010 path
->device
->protocol
= PROTO_SCSI
;
6011 path
->device
->protocol_version
=
6012 inq_buf
!= NULL
? SID_ANSI_REV(inq_buf
) : cpi
.protocol_version
;
6013 path
->device
->transport
= cpi
.transport
;
6014 path
->device
->transport_version
= cpi
.transport_version
;
6017 * Any device not using SPI3 features should
6018 * be considered SPI2 or lower.
6020 if (inq_buf
!= NULL
) {
6021 if (path
->device
->transport
== XPORT_SPI
6022 && (inq_buf
->spi3data
& SID_SPI_MASK
) == 0
6023 && path
->device
->transport_version
> 2)
6024 path
->device
->transport_version
= 2;
6026 struct cam_ed
* otherdev
;
6028 for (otherdev
= TAILQ_FIRST(&path
->target
->ed_entries
);
6030 otherdev
= TAILQ_NEXT(otherdev
, links
)) {
6031 if (otherdev
!= path
->device
)
6035 if (otherdev
!= NULL
) {
6037 * Initially assume the same versioning as
6038 * prior luns for this target.
6040 path
->device
->protocol_version
=
6041 otherdev
->protocol_version
;
6042 path
->device
->transport_version
=
6043 otherdev
->transport_version
;
6045 /* Until we know better, opt for safty */
6046 path
->device
->protocol_version
= 2;
6047 if (path
->device
->transport
== XPORT_SPI
)
6048 path
->device
->transport_version
= 2;
6050 path
->device
->transport_version
= 0;
6056 * For a device compliant with SPC-2 we should be able
6057 * to determine the transport version supported by
6058 * scrutinizing the version descriptors in the
6062 /* Tell the controller what we think */
6063 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
6064 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
6065 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
6066 cts
.transport
= path
->device
->transport
;
6067 cts
.transport_version
= path
->device
->transport_version
;
6068 cts
.protocol
= path
->device
->protocol
;
6069 cts
.protocol_version
= path
->device
->protocol_version
;
6070 cts
.proto_specific
.valid
= 0;
6071 cts
.xport_specific
.valid
= 0;
6072 xpt_action((union ccb
*)&cts
);
6076 xpt_set_transfer_settings(struct ccb_trans_settings
*cts
, struct cam_ed
*device
,
6079 struct ccb_pathinq cpi
;
6080 struct ccb_trans_settings cur_cts
;
6081 struct ccb_trans_settings_scsi
*scsi
;
6082 struct ccb_trans_settings_scsi
*cur_scsi
;
6083 struct cam_sim
*sim
;
6084 struct scsi_inquiry_data
*inq_data
;
6086 if (device
== NULL
) {
6087 cts
->ccb_h
.status
= CAM_PATH_INVALID
;
6088 xpt_done((union ccb
*)cts
);
6092 if (cts
->protocol
== PROTO_UNKNOWN
6093 || cts
->protocol
== PROTO_UNSPECIFIED
) {
6094 cts
->protocol
= device
->protocol
;
6095 cts
->protocol_version
= device
->protocol_version
;
6098 if (cts
->protocol_version
== PROTO_VERSION_UNKNOWN
6099 || cts
->protocol_version
== PROTO_VERSION_UNSPECIFIED
)
6100 cts
->protocol_version
= device
->protocol_version
;
6102 if (cts
->protocol
!= device
->protocol
) {
6103 xpt_print_path(cts
->ccb_h
.path
);
6104 kprintf("Uninitialized Protocol %x:%x?\n",
6105 cts
->protocol
, device
->protocol
);
6106 cts
->protocol
= device
->protocol
;
6109 if (cts
->protocol_version
> device
->protocol_version
) {
6111 xpt_print_path(cts
->ccb_h
.path
);
6112 kprintf("Down reving Protocol Version from %d to %d?\n",
6113 cts
->protocol_version
, device
->protocol_version
);
6115 cts
->protocol_version
= device
->protocol_version
;
6118 if (cts
->transport
== XPORT_UNKNOWN
6119 || cts
->transport
== XPORT_UNSPECIFIED
) {
6120 cts
->transport
= device
->transport
;
6121 cts
->transport_version
= device
->transport_version
;
6124 if (cts
->transport_version
== XPORT_VERSION_UNKNOWN
6125 || cts
->transport_version
== XPORT_VERSION_UNSPECIFIED
)
6126 cts
->transport_version
= device
->transport_version
;
6128 if (cts
->transport
!= device
->transport
) {
6129 xpt_print_path(cts
->ccb_h
.path
);
6130 kprintf("Uninitialized Transport %x:%x?\n",
6131 cts
->transport
, device
->transport
);
6132 cts
->transport
= device
->transport
;
6135 if (cts
->transport_version
> device
->transport_version
) {
6137 xpt_print_path(cts
->ccb_h
.path
);
6138 kprintf("Down reving Transport Version from %d to %d?\n",
6139 cts
->transport_version
,
6140 device
->transport_version
);
6142 cts
->transport_version
= device
->transport_version
;
6145 sim
= cts
->ccb_h
.path
->bus
->sim
;
6148 * Nothing more of interest to do unless
6149 * this is a device connected via the
6152 if (cts
->protocol
!= PROTO_SCSI
) {
6153 if (async_update
== FALSE
)
6154 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6158 inq_data
= &device
->inq_data
;
6159 scsi
= &cts
->proto_specific
.scsi
;
6160 xpt_setup_ccb(&cpi
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6161 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6162 xpt_action((union ccb
*)&cpi
);
6164 /* SCSI specific sanity checking */
6165 if ((cpi
.hba_inquiry
& PI_TAG_ABLE
) == 0
6166 || (INQ_DATA_TQ_ENABLED(inq_data
)) == 0
6167 || (device
->queue_flags
& SCP_QUEUE_DQUE
) != 0
6168 || (device
->quirk
->mintags
== 0)) {
6170 * Can't tag on hardware that doesn't support tags,
6171 * doesn't have it enabled, or has broken tag support.
6173 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6176 if (async_update
== FALSE
) {
6178 * Perform sanity checking against what the
6179 * controller and device can do.
6181 xpt_setup_ccb(&cur_cts
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6182 cur_cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
6183 cur_cts
.type
= cts
->type
;
6184 xpt_action((union ccb
*)&cur_cts
);
6185 if ((cur_cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
6188 cur_scsi
= &cur_cts
.proto_specific
.scsi
;
6189 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0) {
6190 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6191 scsi
->flags
|= cur_scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
;
6193 if ((cur_scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0)
6194 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6197 /* SPI specific sanity checking */
6198 if (cts
->transport
== XPORT_SPI
&& async_update
== FALSE
) {
6200 struct ccb_trans_settings_spi
*spi
;
6201 struct ccb_trans_settings_spi
*cur_spi
;
6203 spi
= &cts
->xport_specific
.spi
;
6205 cur_spi
= &cur_cts
.xport_specific
.spi
;
6207 /* Fill in any gaps in what the user gave us */
6208 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6209 spi
->sync_period
= cur_spi
->sync_period
;
6210 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6211 spi
->sync_period
= 0;
6212 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6213 spi
->sync_offset
= cur_spi
->sync_offset
;
6214 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6215 spi
->sync_offset
= 0;
6216 if ((spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6217 spi
->ppr_options
= cur_spi
->ppr_options
;
6218 if ((cur_spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6219 spi
->ppr_options
= 0;
6220 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6221 spi
->bus_width
= cur_spi
->bus_width
;
6222 if ((cur_spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6224 if ((spi
->valid
& CTS_SPI_VALID_DISC
) == 0) {
6225 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6226 spi
->flags
|= cur_spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
;
6228 if ((cur_spi
->valid
& CTS_SPI_VALID_DISC
) == 0)
6229 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6230 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6231 && (inq_data
->flags
& SID_Sync
) == 0
6232 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6233 || ((cpi
.hba_inquiry
& PI_SDTR_ABLE
) == 0)
6234 || (spi
->sync_offset
== 0)
6235 || (spi
->sync_period
== 0)) {
6237 spi
->sync_period
= 0;
6238 spi
->sync_offset
= 0;
6241 switch (spi
->bus_width
) {
6242 case MSG_EXT_WDTR_BUS_32_BIT
:
6243 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6244 || (inq_data
->flags
& SID_WBus32
) != 0
6245 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6246 && (cpi
.hba_inquiry
& PI_WIDE_32
) != 0)
6248 /* Fall Through to 16-bit */
6249 case MSG_EXT_WDTR_BUS_16_BIT
:
6250 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6251 || (inq_data
->flags
& SID_WBus16
) != 0
6252 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6253 && (cpi
.hba_inquiry
& PI_WIDE_16
) != 0) {
6254 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
6257 /* Fall Through to 8-bit */
6258 default: /* New bus width?? */
6259 case MSG_EXT_WDTR_BUS_8_BIT
:
6260 /* All targets can do this */
6261 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
6265 spi3caps
= cpi
.xport_specific
.spi
.ppr_options
;
6266 if ((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6267 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6268 spi3caps
&= inq_data
->spi3data
;
6270 if ((spi3caps
& SID_SPI_CLOCK_DT
) == 0)
6271 spi
->ppr_options
&= ~MSG_EXT_PPR_DT_REQ
;
6273 if ((spi3caps
& SID_SPI_IUS
) == 0)
6274 spi
->ppr_options
&= ~MSG_EXT_PPR_IU_REQ
;
6276 if ((spi3caps
& SID_SPI_QAS
) == 0)
6277 spi
->ppr_options
&= ~MSG_EXT_PPR_QAS_REQ
;
6279 /* No SPI Transfer settings are allowed unless we are wide */
6280 if (spi
->bus_width
== 0)
6281 spi
->ppr_options
= 0;
6283 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) == 0) {
6285 * Can't tag queue without disconnection.
6287 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6288 scsi
->valid
|= CTS_SCSI_VALID_TQ
;
6292 * If we are currently performing tagged transactions to
6293 * this device and want to change its negotiation parameters,
6294 * go non-tagged for a bit to give the controller a chance to
6295 * negotiate unhampered by tag messages.
6297 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6298 && (device
->inq_flags
& SID_CmdQue
) != 0
6299 && (scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6300 && (spi
->flags
& (CTS_SPI_VALID_SYNC_RATE
|
6301 CTS_SPI_VALID_SYNC_OFFSET
|
6302 CTS_SPI_VALID_BUS_WIDTH
)) != 0)
6303 xpt_toggle_tags(cts
->ccb_h
.path
);
6306 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6307 && (scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
6311 * If we are transitioning from tags to no-tags or
6312 * vice-versa, we need to carefully freeze and restart
6313 * the queue so that we don't overlap tagged and non-tagged
6314 * commands. We also temporarily stop tags if there is
6315 * a change in transfer negotiation settings to allow
6316 * "tag-less" negotiation.
6318 if ((device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6319 || (device
->inq_flags
& SID_CmdQue
) != 0)
6320 device_tagenb
= TRUE
;
6322 device_tagenb
= FALSE
;
6324 if (((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6325 && device_tagenb
== FALSE
)
6326 || ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) == 0
6327 && device_tagenb
== TRUE
)) {
6329 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0) {
6331 * Delay change to use tags until after a
6332 * few commands have gone to this device so
6333 * the controller has time to perform transfer
6334 * negotiations without tagged messages getting
6337 device
->tag_delay_count
= CAM_TAG_DELAY_COUNT
;
6338 device
->flags
|= CAM_DEV_TAG_AFTER_COUNT
;
6340 struct ccb_relsim crs
;
6342 xpt_freeze_devq(cts
->ccb_h
.path
, /*count*/1);
6343 device
->inq_flags
&= ~SID_CmdQue
;
6344 xpt_dev_ccbq_resize(cts
->ccb_h
.path
,
6345 sim
->max_dev_openings
);
6346 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6347 device
->tag_delay_count
= 0;
6349 xpt_setup_ccb(&crs
.ccb_h
, cts
->ccb_h
.path
,
6351 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6352 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6354 = crs
.release_timeout
6357 xpt_action((union ccb
*)&crs
);
6361 if (async_update
== FALSE
)
6362 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6366 xpt_toggle_tags(struct cam_path
*path
)
6371 * Give controllers a chance to renegotiate
6372 * before starting tag operations. We
6373 * "toggle" tagged queuing off then on
6374 * which causes the tag enable command delay
6375 * counter to come into effect.
6378 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6379 || ((dev
->inq_flags
& SID_CmdQue
) != 0
6380 && (dev
->inq_flags
& (SID_Sync
|SID_WBus16
|SID_WBus32
)) != 0)) {
6381 struct ccb_trans_settings cts
;
6383 xpt_setup_ccb(&cts
.ccb_h
, path
, 1);
6384 cts
.protocol
= PROTO_SCSI
;
6385 cts
.protocol_version
= PROTO_VERSION_UNSPECIFIED
;
6386 cts
.transport
= XPORT_UNSPECIFIED
;
6387 cts
.transport_version
= XPORT_VERSION_UNSPECIFIED
;
6388 cts
.proto_specific
.scsi
.flags
= 0;
6389 cts
.proto_specific
.scsi
.valid
= CTS_SCSI_VALID_TQ
;
6390 xpt_set_transfer_settings(&cts
, path
->device
,
6391 /*async_update*/TRUE
);
6392 cts
.proto_specific
.scsi
.flags
= CTS_SCSI_FLAGS_TAG_ENB
;
6393 xpt_set_transfer_settings(&cts
, path
->device
,
6394 /*async_update*/TRUE
);
6399 xpt_start_tags(struct cam_path
*path
)
6401 struct ccb_relsim crs
;
6402 struct cam_ed
*device
;
6403 struct cam_sim
*sim
;
6406 device
= path
->device
;
6407 sim
= path
->bus
->sim
;
6408 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6409 xpt_freeze_devq(path
, /*count*/1);
6410 device
->inq_flags
|= SID_CmdQue
;
6411 if (device
->tag_saved_openings
!= 0)
6412 newopenings
= device
->tag_saved_openings
;
6414 newopenings
= min(device
->quirk
->maxtags
,
6415 sim
->max_tagged_dev_openings
);
6416 xpt_dev_ccbq_resize(path
, newopenings
);
6417 xpt_setup_ccb(&crs
.ccb_h
, path
, /*priority*/1);
6418 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6419 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6421 = crs
.release_timeout
6424 xpt_action((union ccb
*)&crs
);
6427 static int busses_to_config
;
6428 static int busses_to_reset
;
6431 xptconfigbuscountfunc(struct cam_eb
*bus
, void *arg
)
6433 if (bus
->path_id
!= CAM_XPT_PATH_ID
) {
6434 struct cam_path path
;
6435 struct ccb_pathinq cpi
;
6439 xpt_compile_path(&path
, NULL
, bus
->path_id
,
6440 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
6441 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
6442 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6443 xpt_action((union ccb
*)&cpi
);
6444 can_negotiate
= cpi
.hba_inquiry
;
6445 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6446 if ((cpi
.hba_misc
& PIM_NOBUSRESET
) == 0
6449 xpt_release_path(&path
);
6456 xptconfigfunc(struct cam_eb
*bus
, void *arg
)
6458 struct cam_path
*path
;
6459 union ccb
*work_ccb
;
6461 if (bus
->path_id
!= CAM_XPT_PATH_ID
) {
6465 work_ccb
= xpt_alloc_ccb();
6466 if ((status
= xpt_create_path(&path
, xpt_periph
, bus
->path_id
,
6467 CAM_TARGET_WILDCARD
,
6468 CAM_LUN_WILDCARD
)) !=CAM_REQ_CMP
){
6469 kprintf("xptconfigfunc: xpt_create_path failed with "
6470 "status %#x for bus %d\n", status
, bus
->path_id
);
6471 kprintf("xptconfigfunc: halting bus configuration\n");
6472 xpt_free_ccb(work_ccb
);
6474 xpt_finishconfig(xpt_periph
, NULL
);
6477 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6478 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
6479 xpt_action(work_ccb
);
6480 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
6481 kprintf("xptconfigfunc: CPI failed on bus %d "
6482 "with status %d\n", bus
->path_id
,
6483 work_ccb
->ccb_h
.status
);
6484 xpt_finishconfig(xpt_periph
, work_ccb
);
6488 can_negotiate
= work_ccb
->cpi
.hba_inquiry
;
6489 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6490 if ((work_ccb
->cpi
.hba_misc
& PIM_NOBUSRESET
) == 0
6491 && (can_negotiate
!= 0)) {
6492 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6493 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6494 work_ccb
->ccb_h
.cbfcnp
= NULL
;
6495 CAM_DEBUG(path
, CAM_DEBUG_SUBTRACE
,
6496 ("Resetting Bus\n"));
6497 xpt_action(work_ccb
);
6498 xpt_finishconfig(xpt_periph
, work_ccb
);
6500 /* Act as though we performed a successful BUS RESET */
6501 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6502 xpt_finishconfig(xpt_periph
, work_ccb
);
6510 xpt_config(void *arg
)
6513 * Now that interrupts are enabled, go find our devices
6517 /* Setup debugging flags and path */
6518 #ifdef CAM_DEBUG_FLAGS
6519 cam_dflags
= CAM_DEBUG_FLAGS
;
6520 #else /* !CAM_DEBUG_FLAGS */
6521 cam_dflags
= CAM_DEBUG_NONE
;
6522 #endif /* CAM_DEBUG_FLAGS */
6523 #ifdef CAM_DEBUG_BUS
6524 if (cam_dflags
!= CAM_DEBUG_NONE
) {
6525 if (xpt_create_path(&cam_dpath
, xpt_periph
,
6526 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
,
6527 CAM_DEBUG_LUN
) != CAM_REQ_CMP
) {
6528 kprintf("xpt_config: xpt_create_path() failed for debug"
6529 " target %d:%d:%d, debugging disabled\n",
6530 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
, CAM_DEBUG_LUN
);
6531 cam_dflags
= CAM_DEBUG_NONE
;
6535 #else /* !CAM_DEBUG_BUS */
6537 #endif /* CAM_DEBUG_BUS */
6538 #endif /* CAMDEBUG */
6541 * Scan all installed busses.
6543 xpt_for_all_busses(xptconfigbuscountfunc
, NULL
);
6545 if (busses_to_config
== 0) {
6546 /* Call manually because we don't have any busses */
6547 xpt_finishconfig(xpt_periph
, NULL
);
6549 if (busses_to_reset
> 0 && scsi_delay
>= 2000) {
6550 kprintf("Waiting %d seconds for SCSI "
6551 "devices to settle\n", scsi_delay
/1000);
6553 xpt_for_all_busses(xptconfigfunc
, NULL
);
6558 * If the given device only has one peripheral attached to it, and if that
6559 * peripheral is the passthrough driver, announce it. This insures that the
6560 * user sees some sort of announcement for every peripheral in their system.
6563 xptpassannouncefunc(struct cam_ed
*device
, void *arg
)
6565 struct cam_periph
*periph
;
6568 for (periph
= SLIST_FIRST(&device
->periphs
), i
= 0; periph
!= NULL
;
6569 periph
= SLIST_NEXT(periph
, periph_links
), i
++);
6571 periph
= SLIST_FIRST(&device
->periphs
);
6573 && (strncmp(periph
->periph_name
, "pass", 4) == 0))
6574 xpt_announce_periph(periph
, NULL
);
6580 xpt_finishconfig(struct cam_periph
*periph
, union ccb
*done_ccb
)
6582 struct periph_driver
**p_drv
;
6585 if (done_ccb
!= NULL
) {
6586 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
6587 ("xpt_finishconfig\n"));
6588 switch(done_ccb
->ccb_h
.func_code
) {
6590 if (done_ccb
->ccb_h
.status
== CAM_REQ_CMP
) {
6591 done_ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
6592 done_ccb
->ccb_h
.cbfcnp
= xpt_finishconfig
;
6593 done_ccb
->crcn
.flags
= 0;
6594 xpt_action(done_ccb
);
6600 xpt_free_path(done_ccb
->ccb_h
.path
);
6606 if (busses_to_config
== 0) {
6607 /* Register all the peripheral drivers */
6608 /* XXX This will have to change when we have loadable modules */
6609 p_drv
= periph_drivers
;
6610 for (i
= 0; p_drv
[i
] != NULL
; i
++) {
6611 (*p_drv
[i
]->init
)();
6615 * Check for devices with no "standard" peripheral driver
6616 * attached. For any devices like that, announce the
6617 * passthrough driver so the user will see something.
6619 xpt_for_all_devices(xptpassannouncefunc
, NULL
);
6621 /* Release our hook so that the boot can continue. */
6622 config_intrhook_disestablish(xpt_config_hook
);
6623 kfree(xpt_config_hook
, M_TEMP
);
6624 xpt_config_hook
= NULL
;
6626 if (done_ccb
!= NULL
)
6627 xpt_free_ccb(done_ccb
);
6631 xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
)
6633 CAM_DEBUG(work_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xptaction\n"));
6635 switch (work_ccb
->ccb_h
.func_code
) {
6636 /* Common cases first */
6637 case XPT_PATH_INQ
: /* Path routing inquiry */
6639 struct ccb_pathinq
*cpi
;
6641 cpi
= &work_ccb
->cpi
;
6642 cpi
->version_num
= 1; /* XXX??? */
6643 cpi
->hba_inquiry
= 0;
6644 cpi
->target_sprt
= 0;
6646 cpi
->hba_eng_cnt
= 0;
6647 cpi
->max_target
= 0;
6649 cpi
->initiator_id
= 0;
6650 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
6651 strncpy(cpi
->hba_vid
, "", HBA_IDLEN
);
6652 strncpy(cpi
->dev_name
, sim
->sim_name
, DEV_IDLEN
);
6653 cpi
->unit_number
= sim
->unit_number
;
6654 cpi
->bus_id
= sim
->bus_id
;
6655 cpi
->base_transfer_speed
= 0;
6656 cpi
->protocol
= PROTO_UNSPECIFIED
;
6657 cpi
->protocol_version
= PROTO_VERSION_UNSPECIFIED
;
6658 cpi
->transport
= XPORT_UNSPECIFIED
;
6659 cpi
->transport_version
= XPORT_VERSION_UNSPECIFIED
;
6660 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
6665 work_ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
6672 * The xpt as a "controller" has no interrupt sources, so polling
6676 xptpoll(struct cam_sim
*sim
)
6681 * Should only be called by the machine interrupt dispatch routines,
6682 * so put these prototypes here instead of in the header.
6686 swi_cambio(void *arg
, void *frame
)
6692 camisr(cam_isrq_t
*queue
)
6694 struct ccb_hdr
*ccb_h
;
6697 while ((ccb_h
= TAILQ_FIRST(queue
)) != NULL
) {
6700 TAILQ_REMOVE(queue
, ccb_h
, sim_links
.tqe
);
6701 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
6704 CAM_DEBUG(ccb_h
->path
, CAM_DEBUG_TRACE
,
6709 if (ccb_h
->flags
& CAM_HIGH_POWER
) {
6710 struct highpowerlist
*hphead
;
6711 struct cam_ed
*device
;
6712 union ccb
*send_ccb
;
6714 hphead
= &highpowerq
;
6716 send_ccb
= (union ccb
*)STAILQ_FIRST(hphead
);
6719 * Increment the count since this command is done.
6724 * Any high powered commands queued up?
6726 if (send_ccb
!= NULL
) {
6727 device
= send_ccb
->ccb_h
.path
->device
;
6729 STAILQ_REMOVE_HEAD(hphead
, xpt_links
.stqe
);
6731 xpt_release_devq(send_ccb
->ccb_h
.path
,
6732 /*count*/1, /*runqueue*/TRUE
);
6735 if ((ccb_h
->func_code
& XPT_FC_USER_CCB
) == 0) {
6738 dev
= ccb_h
->path
->device
;
6740 cam_ccbq_ccb_done(&dev
->ccbq
, (union ccb
*)ccb_h
);
6742 if (!SIM_DEAD(ccb_h
->path
->bus
->sim
)) {
6743 ccb_h
->path
->bus
->sim
->devq
->send_active
--;
6744 ccb_h
->path
->bus
->sim
->devq
->send_openings
++;
6747 if (((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0
6748 && (ccb_h
->status
&CAM_STATUS_MASK
) != CAM_REQUEUE_REQ
)
6749 || ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
6750 && (dev
->ccbq
.dev_active
== 0))) {
6752 xpt_release_devq(ccb_h
->path
, /*count*/1,
6756 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6757 && (--dev
->tag_delay_count
== 0))
6758 xpt_start_tags(ccb_h
->path
);
6760 if ((dev
->ccbq
.queue
.entries
> 0)
6761 && (dev
->qfrozen_cnt
== 0)
6762 && (device_is_send_queued(dev
) == 0)) {
6763 runq
= xpt_schedule_dev_sendq(ccb_h
->path
->bus
,
6768 if (ccb_h
->status
& CAM_RELEASE_SIMQ
) {
6769 xpt_release_simq(ccb_h
->path
->bus
->sim
,
6771 ccb_h
->status
&= ~CAM_RELEASE_SIMQ
;
6775 if ((ccb_h
->flags
& CAM_DEV_QFRZDIS
)
6776 && (ccb_h
->status
& CAM_DEV_QFRZN
)) {
6777 xpt_release_devq(ccb_h
->path
, /*count*/1,
6779 ccb_h
->status
&= ~CAM_DEV_QFRZN
;
6781 xpt_run_dev_sendq(ccb_h
->path
->bus
);
6784 /* Call the peripheral driver's callback */
6785 (*ccb_h
->cbfcnp
)(ccb_h
->path
->periph
, (union ccb
*)ccb_h
);
6791 dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
)
6794 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
6799 dead_sim_poll(struct cam_sim
*sim
)