2 * Implementation of the Common Access Method Transport (XPT) layer.
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.61 2007/12/02 05:38:03 pavalos Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
46 #include <sys/thread.h>
47 #include <sys/thread2.h>
49 #include <machine/clock.h>
53 #include "cam_periph.h"
56 #include "cam_xpt_sim.h"
57 #include "cam_xpt_periph.h"
58 #include "cam_debug.h"
60 #include "scsi/scsi_all.h"
61 #include "scsi/scsi_message.h"
62 #include "scsi/scsi_pass.h"
65 /* Datastructures internal to the xpt layer */
66 MALLOC_DEFINE(M_CAMXPT
, "CAM XPT", "CAM XPT buffers");
69 * Definition of an async handler callback block. These are used to add
70 * SIMs and peripherals to the async callback lists.
73 SLIST_ENTRY(async_node
) links
;
74 u_int32_t event_enable
; /* Async Event enables */
75 void (*callback
)(void *arg
, u_int32_t code
,
76 struct cam_path
*path
, void *args
);
80 SLIST_HEAD(async_list
, async_node
);
81 SLIST_HEAD(periph_list
, cam_periph
);
82 static STAILQ_HEAD(highpowerlist
, ccb_hdr
) highpowerq
;
85 * This is the maximum number of high powered commands (e.g. start unit)
86 * that can be outstanding at a particular time.
88 #ifndef CAM_MAX_HIGHPOWER
89 #define CAM_MAX_HIGHPOWER 4
92 /* number of high powered commands that can go through right now */
93 static int num_highpower
= CAM_MAX_HIGHPOWER
;
96 * Structure for queueing a device in a run queue.
97 * There is one run queue for allocating new ccbs,
98 * and another for sending ccbs to the controller.
100 struct cam_ed_qinfo
{
102 struct cam_ed
*device
;
106 * The CAM EDT (Existing Device Table) contains the device information for
107 * all devices for all busses in the system. The table contains a
108 * cam_ed structure for each device on the bus.
111 TAILQ_ENTRY(cam_ed
) links
;
112 struct cam_ed_qinfo alloc_ccb_entry
;
113 struct cam_ed_qinfo send_ccb_entry
;
114 struct cam_et
*target
;
117 * Queue of type drivers wanting to do
118 * work on this device.
120 struct cam_ccbq ccbq
; /* Queue of pending ccbs */
121 struct async_list asyncs
; /* Async callback info for this B/T/L */
122 struct periph_list periphs
; /* All attached devices */
123 u_int generation
; /* Generation number */
124 struct cam_periph
*owner
; /* Peripheral driver's ownership tag */
125 struct xpt_quirk_entry
*quirk
; /* Oddities about this device */
126 /* Storage for the inquiry data */
127 #ifdef CAM_NEW_TRAN_CODE
129 u_int protocol_version
;
131 u_int transport_version
;
132 #endif /* CAM_NEW_TRAN_CODE */
133 struct scsi_inquiry_data inq_data
;
134 u_int8_t inq_flags
; /*
135 * Current settings for inquiry flags.
136 * This allows us to override settings
137 * like disconnection and tagged
138 * queuing for a device.
140 u_int8_t queue_flags
; /* Queue flags from the control page */
141 u_int8_t serial_num_len
;
142 u_int8_t
*serial_num
;
143 u_int32_t qfrozen_cnt
;
145 #define CAM_DEV_UNCONFIGURED 0x01
146 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
147 #define CAM_DEV_REL_ON_COMPLETE 0x04
148 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
149 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
150 #define CAM_DEV_TAG_AFTER_COUNT 0x20
151 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
152 u_int32_t tag_delay_count
;
153 #define CAM_TAG_DELAY_COUNT 5
154 u_int32_t tag_saved_openings
;
156 struct callout c_handle
;
160 * Each target is represented by an ET (Existing Target). These
161 * entries are created when a target is successfully probed with an
162 * identify, and removed when a device fails to respond after a number
163 * of retries, or a bus rescan finds the device missing.
166 TAILQ_HEAD(, cam_ed
) ed_entries
;
167 TAILQ_ENTRY(cam_et
) links
;
169 target_id_t target_id
;
172 struct timeval last_reset
; /* uptime of last reset */
176 * Each bus is represented by an EB (Existing Bus). These entries
177 * are created by calls to xpt_bus_register and deleted by calls to
178 * xpt_bus_deregister.
181 TAILQ_HEAD(, cam_et
) et_entries
;
182 TAILQ_ENTRY(cam_eb
) links
;
185 struct timeval last_reset
; /* uptime of last reset */
187 #define CAM_EB_RUNQ_SCHEDULED 0x01
193 struct cam_periph
*periph
;
195 struct cam_et
*target
;
196 struct cam_ed
*device
;
199 struct xpt_quirk_entry
{
200 struct scsi_inquiry_pattern inq_pat
;
202 #define CAM_QUIRK_NOLUNS 0x01
203 #define CAM_QUIRK_NOSERIAL 0x02
204 #define CAM_QUIRK_HILUNS 0x04
205 #define CAM_QUIRK_NOHILUNS 0x08
210 static int cam_srch_hi
= 0;
211 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi
);
212 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
);
213 SYSCTL_PROC(_kern_cam
, OID_AUTO
, cam_srch_hi
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
214 sysctl_cam_search_luns
, "I",
215 "allow search above LUN 7 for SCSI3 and greater devices");
217 #define CAM_SCSI2_MAXLUN 8
219 * If we're not quirked to search <= the first 8 luns
220 * and we are either quirked to search above lun 8,
221 * or we're > SCSI-2 and we've enabled hilun searching,
222 * or we're > SCSI-2 and the last lun was a success,
223 * we can look for luns above lun 8.
225 #define CAN_SRCH_HI_SPARSE(dv) \
226 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
227 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
228 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
230 #define CAN_SRCH_HI_DENSE(dv) \
231 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
232 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
233 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
241 u_int32_t generation
;
244 static const char quantum
[] = "QUANTUM";
245 static const char sony
[] = "SONY";
246 static const char west_digital
[] = "WDIGTL";
247 static const char samsung
[] = "SAMSUNG";
248 static const char seagate
[] = "SEAGATE";
249 static const char microp
[] = "MICROP";
251 static struct xpt_quirk_entry xpt_quirk_table
[] =
254 /* Reports QUEUE FULL for temporary resource shortages */
255 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP39100*", "*" },
256 /*quirks*/0, /*mintags*/24, /*maxtags*/32
259 /* Reports QUEUE FULL for temporary resource shortages */
260 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP34550*", "*" },
261 /*quirks*/0, /*mintags*/24, /*maxtags*/32
264 /* Reports QUEUE FULL for temporary resource shortages */
265 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP32275*", "*" },
266 /*quirks*/0, /*mintags*/24, /*maxtags*/32
269 /* Broken tagged queuing drive */
270 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "4421-07*", "*" },
271 /*quirks*/0, /*mintags*/0, /*maxtags*/0
274 /* Broken tagged queuing drive */
275 { T_DIRECT
, SIP_MEDIA_FIXED
, "HP", "C372*", "*" },
276 /*quirks*/0, /*mintags*/0, /*maxtags*/0
279 /* Broken tagged queuing drive */
280 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "3391*", "x43h" },
281 /*quirks*/0, /*mintags*/0, /*maxtags*/0
285 * Unfortunately, the Quantum Atlas III has the same
286 * problem as the Atlas II drives above.
287 * Reported by: "Johan Granlund" <johan@granlund.nu>
289 * For future reference, the drive with the problem was:
290 * QUANTUM QM39100TD-SW N1B0
292 * It's possible that Quantum will fix the problem in later
293 * firmware revisions. If that happens, the quirk entry
294 * will need to be made specific to the firmware revisions
298 /* Reports QUEUE FULL for temporary resource shortages */
299 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM39100*", "*" },
300 /*quirks*/0, /*mintags*/24, /*maxtags*/32
304 * 18 Gig Atlas III, same problem as the 9G version.
305 * Reported by: Andre Albsmeier
306 * <andre.albsmeier@mchp.siemens.de>
308 * For future reference, the drive with the problem was:
309 * QUANTUM QM318000TD-S N491
311 /* Reports QUEUE FULL for temporary resource shortages */
312 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM318000*", "*" },
313 /*quirks*/0, /*mintags*/24, /*maxtags*/32
317 * Broken tagged queuing drive
318 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
319 * and: Martin Renters <martin@tdc.on.ca>
321 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST410800*", "71*" },
322 /*quirks*/0, /*mintags*/0, /*maxtags*/0
325 * The Seagate Medalist Pro drives have very poor write
326 * performance with anything more than 2 tags.
328 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
329 * Drive: <SEAGATE ST36530N 1444>
331 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
332 * Drive: <SEAGATE ST34520W 1281>
334 * No one has actually reported that the 9G version
335 * (ST39140*) of the Medalist Pro has the same problem, but
336 * we're assuming that it does because the 4G and 6.5G
337 * versions of the drive are broken.
340 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST34520*", "*"},
341 /*quirks*/0, /*mintags*/2, /*maxtags*/2
344 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST36530*", "*"},
345 /*quirks*/0, /*mintags*/2, /*maxtags*/2
348 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST39140*", "*"},
349 /*quirks*/0, /*mintags*/2, /*maxtags*/2
353 * Slow when tagged queueing is enabled. Write performance
354 * steadily drops off with more and more concurrent
355 * transactions. Best sequential write performance with
356 * tagged queueing turned off and write caching turned on.
359 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
360 * Drive: DCAS-34330 w/ "S65A" firmware.
362 * The drive with the problem had the "S65A" firmware
363 * revision, and has also been reported (by Stephen J.
364 * Roznowski <sjr@home.net>) for a drive with the "S61A"
367 * Although no one has reported problems with the 2 gig
368 * version of the DCAS drive, the assumption is that it
369 * has the same problems as the 4 gig version. Therefore
370 * this quirk entries disables tagged queueing for all
373 { T_DIRECT
, SIP_MEDIA_FIXED
, "IBM", "DCAS*", "*" },
374 /*quirks*/0, /*mintags*/0, /*maxtags*/0
377 /* Broken tagged queuing drive */
378 { T_DIRECT
, SIP_MEDIA_REMOVABLE
, "iomega", "jaz*", "*" },
379 /*quirks*/0, /*mintags*/0, /*maxtags*/0
382 /* Broken tagged queuing drive */
383 { T_DIRECT
, SIP_MEDIA_FIXED
, "CONNER", "CFP2107*", "*" },
384 /*quirks*/0, /*mintags*/0, /*maxtags*/0
388 * Broken tagged queuing drive.
390 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
393 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN34324U*", "*" },
394 /*quirks*/0, /*mintags*/0, /*maxtags*/0
398 * Slow when tagged queueing is enabled. (1.5MB/sec versus
400 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
401 * Best performance with these drives is achieved with
402 * tagged queueing turned off, and write caching turned on.
404 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "WDE*", "*" },
405 /*quirks*/0, /*mintags*/0, /*maxtags*/0
409 * Slow when tagged queueing is enabled. (1.5MB/sec versus
411 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
412 * Best performance with these drives is achieved with
413 * tagged queueing turned off, and write caching turned on.
415 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "ENTERPRISE", "*" },
416 /*quirks*/0, /*mintags*/0, /*maxtags*/0
420 * Doesn't handle queue full condition correctly,
421 * so we need to limit maxtags to what the device
422 * can handle instead of determining this automatically.
424 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN321010S*", "*" },
425 /*quirks*/0, /*mintags*/2, /*maxtags*/32
428 /* Really only one LUN */
429 { T_ENCLOSURE
, SIP_MEDIA_FIXED
, "SUN", "SENA", "*" },
430 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
433 /* I can't believe we need a quirk for DPT volumes. */
434 { T_ANY
, SIP_MEDIA_FIXED
|SIP_MEDIA_REMOVABLE
, "DPT", "*", "*" },
435 CAM_QUIRK_NOSERIAL
|CAM_QUIRK_NOLUNS
,
436 /*mintags*/0, /*maxtags*/255
440 * Many Sony CDROM drives don't like multi-LUN probing.
442 { T_CDROM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-ROM CDU*", "*" },
443 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
447 * This drive doesn't like multiple LUN probing.
448 * Submitted by: Parag Patel <parag@cgt.com>
450 { T_WORM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-R CDU9*", "*" },
451 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
454 { T_WORM
, SIP_MEDIA_REMOVABLE
, "YAMAHA", "CDR100*", "*" },
455 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
459 * The 8200 doesn't like multi-lun probing, and probably
460 * don't like serial number requests either.
463 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
466 CAM_QUIRK_NOSERIAL
|CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
470 * Let's try the same as above, but for a drive that says
471 * it's an IPL-6860 but is actually an EXB 8200.
474 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
477 CAM_QUIRK_NOSERIAL
|CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
481 * These Hitachi drives don't like multi-lun probing.
482 * The PR submitter has a DK319H, but says that the Linux
483 * kernel has a similar work-around for the DK312 and DK314,
484 * so all DK31* drives are quirked here.
486 * Submitted by: Paul Haddad <paul@pth.com>
488 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "DK31*", "*" },
489 CAM_QUIRK_NOLUNS
, /*mintags*/2, /*maxtags*/255
493 * This old revision of the TDC3600 is also SCSI-1, and
494 * hangs upon serial number probing.
497 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "TANDBERG",
500 CAM_QUIRK_NOSERIAL
, /*mintags*/0, /*maxtags*/0
504 * Would repond to all LUNs if asked for.
507 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "CALIPER",
510 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
514 * Would repond to all LUNs if asked for.
517 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "KENNEDY",
520 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
523 /* Submitted by: Matthew Dodd <winter@jurai.net> */
524 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "Cabletrn", "EA41*", "*" },
525 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
528 /* Submitted by: Matthew Dodd <winter@jurai.net> */
529 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "CABLETRN", "EA41*", "*" },
530 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
533 /* TeraSolutions special settings for TRC-22 RAID */
534 { T_DIRECT
, SIP_MEDIA_FIXED
, "TERASOLU", "TRC-22", "*" },
535 /*quirks*/0, /*mintags*/55, /*maxtags*/255
538 /* Veritas Storage Appliance */
539 { T_DIRECT
, SIP_MEDIA_FIXED
, "VERITAS", "*", "*" },
540 CAM_QUIRK_HILUNS
, /*mintags*/2, /*maxtags*/1024
544 * Would respond to all LUNs. Device type and removable
545 * flag are jumper-selectable.
547 { T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
, "MaxOptix",
550 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
553 /* Default tagged queuing parameters for all devices */
555 T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
,
556 /*vendor*/"*", /*product*/"*", /*revision*/"*"
558 /*quirks*/0, /*mintags*/2, /*maxtags*/255
562 static const int xpt_quirk_table_size
=
563 sizeof(xpt_quirk_table
) / sizeof(*xpt_quirk_table
);
567 DM_RET_FLAG_MASK
= 0x0f,
570 DM_RET_DESCEND
= 0x20,
572 DM_RET_ACTION_MASK
= 0xf0
580 } xpt_traverse_depth
;
582 struct xpt_traverse_config
{
583 xpt_traverse_depth depth
;
588 typedef int xpt_busfunc_t (struct cam_eb
*bus
, void *arg
);
589 typedef int xpt_targetfunc_t (struct cam_et
*target
, void *arg
);
590 typedef int xpt_devicefunc_t (struct cam_ed
*device
, void *arg
);
591 typedef int xpt_periphfunc_t (struct cam_periph
*periph
, void *arg
);
592 typedef int xpt_pdrvfunc_t (struct periph_driver
**pdrv
, void *arg
);
594 /* Transport layer configuration information */
595 static struct xpt_softc xsoftc
;
597 /* Queues for our software interrupt handler */
598 typedef TAILQ_HEAD(cam_isrq
, ccb_hdr
) cam_isrq_t
;
599 static cam_isrq_t cam_bioq
;
601 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
602 static SLIST_HEAD(,ccb_hdr
) ccb_freeq
;
603 static u_int xpt_max_ccbs
; /*
604 * Maximum size of ccb pool. Modified as
605 * devices are added/removed or have their
606 * opening counts changed.
608 static u_int xpt_ccb_count
; /* Current count of allocated ccbs */
610 struct cam_periph
*xpt_periph
;
612 static periph_init_t xpt_periph_init
;
614 static periph_init_t probe_periph_init
;
616 static struct periph_driver xpt_driver
=
618 xpt_periph_init
, "xpt",
619 TAILQ_HEAD_INITIALIZER(xpt_driver
.units
)
622 static struct periph_driver probe_driver
=
624 probe_periph_init
, "probe",
625 TAILQ_HEAD_INITIALIZER(probe_driver
.units
)
628 PERIPHDRIVER_DECLARE(xpt
, xpt_driver
);
629 PERIPHDRIVER_DECLARE(probe
, probe_driver
);
631 #define XPT_CDEV_MAJOR 104
633 static d_open_t xptopen
;
634 static d_close_t xptclose
;
635 static d_ioctl_t xptioctl
;
637 static struct dev_ops xpt_ops
= {
638 { "xpt", XPT_CDEV_MAJOR
, 0 },
644 static struct intr_config_hook
*xpt_config_hook
;
646 static void dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
);
647 static void dead_sim_poll(struct cam_sim
*sim
);
649 /* Dummy SIM that is used when the real one has gone. */
650 static struct cam_sim cam_dead_sim
= {
651 .sim_action
= dead_sim_action
,
652 .sim_poll
= dead_sim_poll
,
653 .sim_name
= "dead_sim",
656 #define SIM_DEAD(sim) ((sim) == &cam_dead_sim)
658 /* Registered busses */
659 static TAILQ_HEAD(,cam_eb
) xpt_busses
;
660 static u_int bus_generation
;
662 /* Storage for debugging datastructures */
664 struct cam_path
*cam_dpath
;
665 u_int32_t cam_dflags
;
666 u_int32_t cam_debug_delay
;
669 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
670 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
674 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
675 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
676 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
678 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
679 || defined(CAM_DEBUG_LUN)
681 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
682 || !defined(CAM_DEBUG_LUN)
683 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
685 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
686 #else /* !CAMDEBUG */
687 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
688 #endif /* CAMDEBUG */
689 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
691 /* Our boot-time initialization hook */
692 static int cam_module_event_handler(module_t
, int /*modeventtype_t*/, void *);
694 static moduledata_t cam_moduledata
= {
696 cam_module_event_handler
,
700 static void xpt_init(void *);
702 DECLARE_MODULE(cam
, cam_moduledata
, SI_SUB_CONFIGURE
, SI_ORDER_SECOND
);
703 MODULE_VERSION(cam
, 1);
706 static cam_status
xpt_compile_path(struct cam_path
*new_path
,
707 struct cam_periph
*perph
,
709 target_id_t target_id
,
712 static void xpt_release_path(struct cam_path
*path
);
714 static void xpt_async_bcast(struct async_list
*async_head
,
715 u_int32_t async_code
,
716 struct cam_path
*path
,
718 static void xpt_dev_async(u_int32_t async_code
,
720 struct cam_et
*target
,
721 struct cam_ed
*device
,
723 static path_id_t
xptnextfreepathid(void);
724 static path_id_t
xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
);
725 static union ccb
*xpt_get_ccb(struct cam_ed
*device
);
726 static int xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*dev_pinfo
,
727 u_int32_t new_priority
);
728 static void xpt_run_dev_allocq(struct cam_eb
*bus
);
729 static void xpt_run_dev_sendq(struct cam_eb
*bus
);
730 static timeout_t xpt_release_devq_timeout
;
731 static void xpt_release_bus(struct cam_eb
*bus
);
732 static void xpt_release_devq_device(struct cam_ed
*dev
, u_int count
,
734 static struct cam_et
*
735 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
);
736 static void xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
);
737 static struct cam_ed
*
738 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
,
740 static void xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
741 struct cam_ed
*device
);
742 static u_int32_t
xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
);
743 static struct cam_eb
*
744 xpt_find_bus(path_id_t path_id
);
745 static struct cam_et
*
746 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
);
747 static struct cam_ed
*
748 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
);
749 static void xpt_scan_bus(struct cam_periph
*periph
, union ccb
*ccb
);
750 static void xpt_scan_lun(struct cam_periph
*periph
,
751 struct cam_path
*path
, cam_flags flags
,
753 static void xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
);
754 static xpt_busfunc_t xptconfigbuscountfunc
;
755 static xpt_busfunc_t xptconfigfunc
;
756 static void xpt_config(void *arg
);
757 static xpt_devicefunc_t xptpassannouncefunc
;
758 static void xpt_finishconfig(struct cam_periph
*periph
, union ccb
*ccb
);
759 static void xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
);
760 static void xptpoll(struct cam_sim
*sim
);
761 static inthand2_t swi_cambio
;
762 static void camisr(cam_isrq_t
*queue
);
764 static void xptstart(struct cam_periph
*periph
, union ccb
*work_ccb
);
765 static void xptasync(struct cam_periph
*periph
,
766 u_int32_t code
, cam_path
*path
);
768 static dev_match_ret
xptbusmatch(struct dev_match_pattern
*patterns
,
769 u_int num_patterns
, struct cam_eb
*bus
);
770 static dev_match_ret
xptdevicematch(struct dev_match_pattern
*patterns
,
772 struct cam_ed
*device
);
773 static dev_match_ret
xptperiphmatch(struct dev_match_pattern
*patterns
,
775 struct cam_periph
*periph
);
776 static xpt_busfunc_t xptedtbusfunc
;
777 static xpt_targetfunc_t xptedttargetfunc
;
778 static xpt_devicefunc_t xptedtdevicefunc
;
779 static xpt_periphfunc_t xptedtperiphfunc
;
780 static xpt_pdrvfunc_t xptplistpdrvfunc
;
781 static xpt_periphfunc_t xptplistperiphfunc
;
782 static int xptedtmatch(struct ccb_dev_match
*cdm
);
783 static int xptperiphlistmatch(struct ccb_dev_match
*cdm
);
784 static int xptbustraverse(struct cam_eb
*start_bus
,
785 xpt_busfunc_t
*tr_func
, void *arg
);
786 static int xpttargettraverse(struct cam_eb
*bus
,
787 struct cam_et
*start_target
,
788 xpt_targetfunc_t
*tr_func
, void *arg
);
789 static int xptdevicetraverse(struct cam_et
*target
,
790 struct cam_ed
*start_device
,
791 xpt_devicefunc_t
*tr_func
, void *arg
);
792 static int xptperiphtraverse(struct cam_ed
*device
,
793 struct cam_periph
*start_periph
,
794 xpt_periphfunc_t
*tr_func
, void *arg
);
795 static int xptpdrvtraverse(struct periph_driver
**start_pdrv
,
796 xpt_pdrvfunc_t
*tr_func
, void *arg
);
797 static int xptpdperiphtraverse(struct periph_driver
**pdrv
,
798 struct cam_periph
*start_periph
,
799 xpt_periphfunc_t
*tr_func
,
801 static xpt_busfunc_t xptdefbusfunc
;
802 static xpt_targetfunc_t xptdeftargetfunc
;
803 static xpt_devicefunc_t xptdefdevicefunc
;
804 static xpt_periphfunc_t xptdefperiphfunc
;
805 static int xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
);
807 static int xpt_for_all_targets(xpt_targetfunc_t
*tr_func
,
810 static int xpt_for_all_devices(xpt_devicefunc_t
*tr_func
,
813 static int xpt_for_all_periphs(xpt_periphfunc_t
*tr_func
,
816 static xpt_devicefunc_t xptsetasyncfunc
;
817 static xpt_busfunc_t xptsetasyncbusfunc
;
818 static cam_status
xptregister(struct cam_periph
*periph
,
820 static cam_status
proberegister(struct cam_periph
*periph
,
822 static void probeschedule(struct cam_periph
*probe_periph
);
823 static void probestart(struct cam_periph
*periph
, union ccb
*start_ccb
);
824 static void proberequestdefaultnegotiation(struct cam_periph
*periph
);
825 static void probedone(struct cam_periph
*periph
, union ccb
*done_ccb
);
826 static void probecleanup(struct cam_periph
*periph
);
827 static void xpt_find_quirk(struct cam_ed
*device
);
828 #ifdef CAM_NEW_TRAN_CODE
829 static void xpt_devise_transport(struct cam_path
*path
);
830 #endif /* CAM_NEW_TRAN_CODE */
831 static void xpt_set_transfer_settings(struct ccb_trans_settings
*cts
,
832 struct cam_ed
*device
,
834 static void xpt_toggle_tags(struct cam_path
*path
);
835 static void xpt_start_tags(struct cam_path
*path
);
836 static __inline
int xpt_schedule_dev_allocq(struct cam_eb
*bus
,
838 static __inline
int xpt_schedule_dev_sendq(struct cam_eb
*bus
,
840 static __inline
int periph_is_queued(struct cam_periph
*periph
);
841 static __inline
int device_is_alloc_queued(struct cam_ed
*device
);
842 static __inline
int device_is_send_queued(struct cam_ed
*device
);
843 static __inline
int dev_allocq_is_runnable(struct cam_devq
*devq
);
846 xpt_schedule_dev_allocq(struct cam_eb
*bus
, struct cam_ed
*dev
)
850 if (bus
->sim
->devq
&& dev
->ccbq
.devq_openings
> 0) {
851 if ((dev
->flags
& CAM_DEV_RESIZE_QUEUE_NEEDED
) != 0) {
852 cam_ccbq_resize(&dev
->ccbq
,
853 dev
->ccbq
.dev_openings
854 + dev
->ccbq
.dev_active
);
855 dev
->flags
&= ~CAM_DEV_RESIZE_QUEUE_NEEDED
;
858 * The priority of a device waiting for CCB resources
859 * is that of the the highest priority peripheral driver
862 retval
= xpt_schedule_dev(&bus
->sim
->devq
->alloc_queue
,
863 &dev
->alloc_ccb_entry
.pinfo
,
864 CAMQ_GET_HEAD(&dev
->drvq
)->priority
);
873 xpt_schedule_dev_sendq(struct cam_eb
*bus
, struct cam_ed
*dev
)
877 if (bus
->sim
->devq
&& dev
->ccbq
.dev_openings
> 0) {
879 * The priority of a device waiting for controller
880 * resources is that of the the highest priority CCB
884 xpt_schedule_dev(&bus
->sim
->devq
->send_queue
,
885 &dev
->send_ccb_entry
.pinfo
,
886 CAMQ_GET_HEAD(&dev
->ccbq
.queue
)->priority
);
894 periph_is_queued(struct cam_periph
*periph
)
896 return (periph
->pinfo
.index
!= CAM_UNQUEUED_INDEX
);
900 device_is_alloc_queued(struct cam_ed
*device
)
902 return (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
906 device_is_send_queued(struct cam_ed
*device
)
908 return (device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
912 dev_allocq_is_runnable(struct cam_devq
*devq
)
916 * Have space to do more work.
917 * Allowed to do work.
919 return ((devq
->alloc_queue
.qfrozen_cnt
== 0)
920 && (devq
->alloc_queue
.entries
> 0)
921 && (devq
->alloc_openings
> 0));
925 xpt_periph_init(void)
927 dev_ops_add(&xpt_ops
, 0, 0);
928 make_dev(&xpt_ops
, 0, UID_ROOT
, GID_OPERATOR
, 0600, "xpt0");
932 probe_periph_init(void)
938 xptdone(struct cam_periph
*periph
, union ccb
*done_ccb
)
940 /* Caller will release the CCB */
941 wakeup(&done_ccb
->ccb_h
.cbfcnp
);
945 xptopen(struct dev_open_args
*ap
)
947 cdev_t dev
= ap
->a_head
.a_dev
;
950 unit
= minor(dev
) & 0xff;
953 * Only allow read-write access.
955 if (((ap
->a_oflags
& FWRITE
) == 0) || ((ap
->a_oflags
& FREAD
) == 0))
959 * We don't allow nonblocking access.
961 if ((ap
->a_oflags
& O_NONBLOCK
) != 0) {
962 kprintf("xpt%d: can't do nonblocking access\n", unit
);
967 * We only have one transport layer right now. If someone accesses
968 * us via something other than minor number 1, point out their
972 kprintf("xptopen: got invalid xpt unit %d\n", unit
);
976 /* Mark ourselves open */
977 xsoftc
.flags
|= XPT_FLAG_OPEN
;
983 xptclose(struct dev_close_args
*ap
)
985 cdev_t dev
= ap
->a_head
.a_dev
;
988 unit
= minor(dev
) & 0xff;
991 * We only have one transport layer right now. If someone accesses
992 * us via something other than minor number 1, point out their
996 kprintf("xptclose: got invalid xpt unit %d\n", unit
);
1000 /* Mark ourselves closed */
1001 xsoftc
.flags
&= ~XPT_FLAG_OPEN
;
1007 xptioctl(struct dev_ioctl_args
*ap
)
1009 cdev_t dev
= ap
->a_head
.a_dev
;
1013 unit
= minor(dev
) & 0xff;
1016 * We only have one transport layer right now. If someone accesses
1017 * us via something other than minor number 1, point out their
1021 kprintf("xptioctl: got invalid xpt unit %d\n", unit
);
1027 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1028 * to accept CCB types that don't quite make sense to send through a
1029 * passthrough driver.
1031 case CAMIOCOMMAND
: {
1035 inccb
= (union ccb
*)ap
->a_data
;
1037 switch(inccb
->ccb_h
.func_code
) {
1040 if ((inccb
->ccb_h
.target_id
!= CAM_TARGET_WILDCARD
)
1041 || (inccb
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
)) {
1050 ccb
= xpt_alloc_ccb();
1053 * Create a path using the bus, target, and lun the
1056 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
,
1057 inccb
->ccb_h
.path_id
,
1058 inccb
->ccb_h
.target_id
,
1059 inccb
->ccb_h
.target_lun
) !=
1065 /* Ensure all of our fields are correct */
1066 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
,
1067 inccb
->ccb_h
.pinfo
.priority
);
1068 xpt_merge_ccb(ccb
, inccb
);
1069 ccb
->ccb_h
.cbfcnp
= xptdone
;
1070 cam_periph_runccb(ccb
, NULL
, 0, 0, NULL
);
1071 bcopy(ccb
, inccb
, sizeof(union ccb
));
1072 xpt_free_path(ccb
->ccb_h
.path
);
1080 * This is an immediate CCB, so it's okay to
1081 * allocate it on the stack.
1085 * Create a path using the bus, target, and lun the
1088 if (xpt_create_path(&ccb
.ccb_h
.path
, xpt_periph
,
1089 inccb
->ccb_h
.path_id
,
1090 inccb
->ccb_h
.target_id
,
1091 inccb
->ccb_h
.target_lun
) !=
1096 /* Ensure all of our fields are correct */
1097 xpt_setup_ccb(&ccb
.ccb_h
, ccb
.ccb_h
.path
,
1098 inccb
->ccb_h
.pinfo
.priority
);
1099 xpt_merge_ccb(&ccb
, inccb
);
1100 ccb
.ccb_h
.cbfcnp
= xptdone
;
1102 bcopy(&ccb
, inccb
, sizeof(union ccb
));
1103 xpt_free_path(ccb
.ccb_h
.path
);
1107 case XPT_DEV_MATCH
: {
1108 struct cam_periph_map_info mapinfo
;
1109 struct cam_path
*old_path
;
1112 * We can't deal with physical addresses for this
1113 * type of transaction.
1115 if (inccb
->ccb_h
.flags
& CAM_DATA_PHYS
) {
1121 * Save this in case the caller had it set to
1122 * something in particular.
1124 old_path
= inccb
->ccb_h
.path
;
1127 * We really don't need a path for the matching
1128 * code. The path is needed because of the
1129 * debugging statements in xpt_action(). They
1130 * assume that the CCB has a valid path.
1132 inccb
->ccb_h
.path
= xpt_periph
->path
;
1134 bzero(&mapinfo
, sizeof(mapinfo
));
1137 * Map the pattern and match buffers into kernel
1138 * virtual address space.
1140 error
= cam_periph_mapmem(inccb
, &mapinfo
);
1143 inccb
->ccb_h
.path
= old_path
;
1148 * This is an immediate CCB, we can send it on directly.
1153 * Map the buffers back into user space.
1155 cam_periph_unmapmem(inccb
, &mapinfo
);
1157 inccb
->ccb_h
.path
= old_path
;
1169 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1170 * with the periphal driver name and unit name filled in. The other
1171 * fields don't really matter as input. The passthrough driver name
1172 * ("pass"), and unit number are passed back in the ccb. The current
1173 * device generation number, and the index into the device peripheral
1174 * driver list, and the status are also passed back. Note that
1175 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1176 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1177 * (or rather should be) impossible for the device peripheral driver
1178 * list to change since we look at the whole thing in one pass, and
1179 * we do it within a critical section.
1182 case CAMGETPASSTHRU
: {
1184 struct cam_periph
*periph
;
1185 struct periph_driver
**p_drv
;
1188 u_int cur_generation
;
1189 int base_periph_found
;
1192 ccb
= (union ccb
*)ap
->a_data
;
1193 unit
= ccb
->cgdl
.unit_number
;
1194 name
= ccb
->cgdl
.periph_name
;
1196 * Every 100 devices, we want to call splz() to check for
1197 * and allow the software interrupt handler a chance to run.
1199 * Most systems won't run into this check, but this should
1200 * avoid starvation in the software interrupt handler in
1205 ccb
= (union ccb
*)ap
->a_data
;
1207 base_periph_found
= 0;
1210 * Sanity check -- make sure we don't get a null peripheral
1213 if (*ccb
->cgdl
.periph_name
== '\0') {
1218 /* Keep the list from changing while we traverse it */
1221 cur_generation
= xsoftc
.generation
;
1223 /* first find our driver in the list of drivers */
1224 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
1225 if (strcmp((*p_drv
)->driver_name
, name
) == 0)
1229 if (*p_drv
== NULL
) {
1231 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1232 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1233 *ccb
->cgdl
.periph_name
= '\0';
1234 ccb
->cgdl
.unit_number
= 0;
1240 * Run through every peripheral instance of this driver
1241 * and check to see whether it matches the unit passed
1242 * in by the user. If it does, get out of the loops and
1243 * find the passthrough driver associated with that
1244 * peripheral driver.
1246 TAILQ_FOREACH(periph
, &(*p_drv
)->units
, unit_links
) {
1248 if (periph
->unit_number
== unit
) {
1250 } else if (--splbreaknum
== 0) {
1253 if (cur_generation
!= xsoftc
.generation
)
1258 * If we found the peripheral driver that the user passed
1259 * in, go through all of the peripheral drivers for that
1260 * particular device and look for a passthrough driver.
1262 if (periph
!= NULL
) {
1263 struct cam_ed
*device
;
1266 base_periph_found
= 1;
1267 device
= periph
->path
->device
;
1268 for (i
= 0, periph
= SLIST_FIRST(&device
->periphs
);
1270 periph
= SLIST_NEXT(periph
, periph_links
), i
++) {
1272 * Check to see whether we have a
1273 * passthrough device or not.
1275 if (strcmp(periph
->periph_name
, "pass") == 0) {
1277 * Fill in the getdevlist fields.
1279 strcpy(ccb
->cgdl
.periph_name
,
1280 periph
->periph_name
);
1281 ccb
->cgdl
.unit_number
=
1282 periph
->unit_number
;
1283 if (SLIST_NEXT(periph
, periph_links
))
1285 CAM_GDEVLIST_MORE_DEVS
;
1288 CAM_GDEVLIST_LAST_DEVICE
;
1289 ccb
->cgdl
.generation
=
1291 ccb
->cgdl
.index
= i
;
1293 * Fill in some CCB header fields
1294 * that the user may want.
1296 ccb
->ccb_h
.path_id
=
1297 periph
->path
->bus
->path_id
;
1298 ccb
->ccb_h
.target_id
=
1299 periph
->path
->target
->target_id
;
1300 ccb
->ccb_h
.target_lun
=
1301 periph
->path
->device
->lun_id
;
1302 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
1309 * If the periph is null here, one of two things has
1310 * happened. The first possibility is that we couldn't
1311 * find the unit number of the particular peripheral driver
1312 * that the user is asking about. e.g. the user asks for
1313 * the passthrough driver for "da11". We find the list of
1314 * "da" peripherals all right, but there is no unit 11.
1315 * The other possibility is that we went through the list
1316 * of peripheral drivers attached to the device structure,
1317 * but didn't find one with the name "pass". Either way,
1318 * we return ENOENT, since we couldn't find something.
1320 if (periph
== NULL
) {
1321 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1322 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1323 *ccb
->cgdl
.periph_name
= '\0';
1324 ccb
->cgdl
.unit_number
= 0;
1327 * It is unfortunate that this is even necessary,
1328 * but there are many, many clueless users out there.
1329 * If this is true, the user is looking for the
1330 * passthrough driver, but doesn't have one in his
1333 if (base_periph_found
== 1) {
1334 kprintf("xptioctl: pass driver is not in the "
1336 kprintf("xptioctl: put \"device pass0\" in "
1337 "your kernel config file\n");
1352 cam_module_event_handler(module_t mod
, int what
, void *arg
)
1354 if (what
== MOD_LOAD
) {
1356 } else if (what
== MOD_UNLOAD
) {
1365 /* Functions accessed by the peripheral drivers */
1367 xpt_init(void *dummy
)
1369 struct cam_sim
*xpt_sim
;
1370 struct cam_path
*path
;
1371 struct cam_devq
*devq
;
1374 TAILQ_INIT(&xpt_busses
);
1375 TAILQ_INIT(&cam_bioq
);
1376 SLIST_INIT(&ccb_freeq
);
1377 STAILQ_INIT(&highpowerq
);
1380 * The xpt layer is, itself, the equivelent of a SIM.
1381 * Allow 16 ccbs in the ccb pool for it. This should
1382 * give decent parallelism when we probe busses and
1383 * perform other XPT functions.
1385 devq
= cam_simq_alloc(16);
1386 xpt_sim
= cam_sim_alloc(xptaction
,
1391 /*max_dev_transactions*/0,
1392 /*max_tagged_dev_transactions*/0,
1394 cam_simq_release(devq
);
1397 xpt_bus_register(xpt_sim
, /*bus #*/0);
1400 * Looking at the XPT from the SIM layer, the XPT is
1401 * the equivelent of a peripheral driver. Allocate
1402 * a peripheral driver entry for us.
1404 if ((status
= xpt_create_path(&path
, NULL
, CAM_XPT_PATH_ID
,
1405 CAM_TARGET_WILDCARD
,
1406 CAM_LUN_WILDCARD
)) != CAM_REQ_CMP
) {
1407 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1408 " failing attach\n", status
);
1412 cam_periph_alloc(xptregister
, NULL
, NULL
, NULL
, "xpt", CAM_PERIPH_BIO
,
1413 path
, NULL
, 0, NULL
);
1414 xpt_free_path(path
);
1416 xpt_sim
->softc
= xpt_periph
;
1419 * Register a callback for when interrupts are enabled.
1421 xpt_config_hook
= kmalloc(sizeof(struct intr_config_hook
),
1422 M_TEMP
, M_INTWAIT
| M_ZERO
);
1423 xpt_config_hook
->ich_func
= xpt_config
;
1424 xpt_config_hook
->ich_desc
= "xpt";
1425 xpt_config_hook
->ich_order
= 1000;
1426 if (config_intrhook_establish(xpt_config_hook
) != 0) {
1427 kfree (xpt_config_hook
, M_TEMP
);
1428 kprintf("xpt_init: config_intrhook_establish failed "
1429 "- failing attach\n");
1432 /* Install our software interrupt handlers */
1433 register_swi(SWI_CAMBIO
, swi_cambio
, NULL
, "swi_cambio", NULL
);
1437 xptregister(struct cam_periph
*periph
, void *arg
)
1439 if (periph
== NULL
) {
1440 kprintf("xptregister: periph was NULL!!\n");
1441 return(CAM_REQ_CMP_ERR
);
1444 periph
->softc
= NULL
;
1446 xpt_periph
= periph
;
1448 return(CAM_REQ_CMP
);
1452 xpt_add_periph(struct cam_periph
*periph
)
1454 struct cam_ed
*device
;
1456 struct periph_list
*periph_head
;
1458 device
= periph
->path
->device
;
1460 periph_head
= &device
->periphs
;
1462 status
= CAM_REQ_CMP
;
1464 if (device
!= NULL
) {
1466 * Make room for this peripheral
1467 * so it will fit in the queue
1468 * when it's scheduled to run
1471 status
= camq_resize(&device
->drvq
,
1472 device
->drvq
.array_size
+ 1);
1474 device
->generation
++;
1476 SLIST_INSERT_HEAD(periph_head
, periph
, periph_links
);
1480 xsoftc
.generation
++;
1486 xpt_remove_periph(struct cam_periph
*periph
)
1488 struct cam_ed
*device
;
1490 device
= periph
->path
->device
;
1492 if (device
!= NULL
) {
1493 struct periph_list
*periph_head
;
1495 periph_head
= &device
->periphs
;
1497 /* Release the slot for this peripheral */
1499 camq_resize(&device
->drvq
, device
->drvq
.array_size
- 1);
1501 device
->generation
++;
1503 SLIST_REMOVE(periph_head
, periph
, cam_periph
, periph_links
);
1507 xsoftc
.generation
++;
1511 #ifdef CAM_NEW_TRAN_CODE
1514 xpt_announce_periph(struct cam_periph
*periph
, char *announce_string
)
1516 struct ccb_pathinq cpi
;
1517 struct ccb_trans_settings cts
;
1518 struct cam_path
*path
;
1523 path
= periph
->path
;
1525 * To ensure that this is printed in one piece,
1526 * mask out CAM interrupts.
1529 printf("%s%d at %s%d bus %d target %d lun %d\n",
1530 periph
->periph_name
, periph
->unit_number
,
1531 path
->bus
->sim
->sim_name
,
1532 path
->bus
->sim
->unit_number
,
1533 path
->bus
->sim
->bus_id
,
1534 path
->target
->target_id
,
1535 path
->device
->lun_id
);
1536 printf("%s%d: ", periph
->periph_name
, periph
->unit_number
);
1537 scsi_print_inquiry(&path
->device
->inq_data
);
1538 if (bootverbose
&& path
->device
->serial_num_len
> 0) {
1539 /* Don't wrap the screen - print only the first 60 chars */
1540 printf("%s%d: Serial Number %.60s\n", periph
->periph_name
,
1541 periph
->unit_number
, path
->device
->serial_num
);
1543 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
1544 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
1545 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
1546 xpt_action((union ccb
*)&cts
);
1547 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
1551 /* Ask the SIM for its base transfer speed */
1552 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
1553 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
1554 xpt_action((union ccb
*)&cpi
);
1556 speed
= cpi
.base_transfer_speed
;
1558 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1559 struct ccb_trans_settings_spi
*spi
;
1561 spi
= &cts
.xport_specific
.spi
;
1562 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) != 0
1563 && spi
->sync_offset
!= 0) {
1564 freq
= scsi_calc_syncsrate(spi
->sync_period
);
1568 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0)
1569 speed
*= (0x01 << spi
->bus_width
);
1571 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1572 struct ccb_trans_settings_fc
*fc
= &cts
.xport_specific
.fc
;
1573 if (fc
->valid
& CTS_FC_VALID_SPEED
) {
1574 speed
= fc
->bitrate
;
1578 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SAS
) {
1579 struct ccb_trans_settings_sas
*sas
= &cts
.xport_specific
.sas
;
1580 if (sas
->valid
& CTS_SAS_VALID_SPEED
) {
1581 speed
= sas
->bitrate
;
1587 printf("%s%d: %d.%03dMB/s transfers",
1588 periph
->periph_name
, periph
->unit_number
,
1591 printf("%s%d: %dKB/s transfers", periph
->periph_name
,
1592 periph
->unit_number
, speed
);
1593 /* Report additional information about SPI connections */
1594 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1595 struct ccb_trans_settings_spi
*spi
;
1597 spi
= &cts
.xport_specific
.spi
;
1599 printf(" (%d.%03dMHz%s, offset %d", freq
/ 1000,
1601 (spi
->ppr_options
& MSG_EXT_PPR_DT_REQ
) != 0
1605 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0
1606 && spi
->bus_width
> 0) {
1612 printf("%dbit)", 8 * (0x01 << spi
->bus_width
));
1613 } else if (freq
!= 0) {
1617 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1618 struct ccb_trans_settings_fc
*fc
;
1620 fc
= &cts
.xport_specific
.fc
;
1621 if (fc
->valid
& CTS_FC_VALID_WWNN
)
1622 printf(" WWNN 0x%llx", (long long) fc
->wwnn
);
1623 if (fc
->valid
& CTS_FC_VALID_WWPN
)
1624 printf(" WWPN 0x%llx", (long long) fc
->wwpn
);
1625 if (fc
->valid
& CTS_FC_VALID_PORT
)
1626 printf(" PortID 0x%x", fc
->port
);
1629 if (path
->device
->inq_flags
& SID_CmdQue
1630 || path
->device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) {
1631 printf("\n%s%d: Tagged Queueing Enabled",
1632 periph
->periph_name
, periph
->unit_number
);
1637 * We only want to print the caller's announce string if they've
1640 if (announce_string
!= NULL
)
1641 printf("%s%d: %s\n", periph
->periph_name
,
1642 periph
->unit_number
, announce_string
);
1645 #else /* CAM_NEW_TRAN_CODE */
1647 xpt_announce_periph(struct cam_periph
*periph
, char *announce_string
)
1650 struct cam_path
*path
;
1651 struct ccb_trans_settings cts
;
1653 path
= periph
->path
;
1655 * To ensure that this is printed in one piece,
1656 * mask out CAM interrupts.
1659 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1660 periph
->periph_name
, periph
->unit_number
,
1661 path
->bus
->sim
->sim_name
,
1662 path
->bus
->sim
->unit_number
,
1663 path
->bus
->sim
->bus_id
,
1664 path
->target
->target_id
,
1665 path
->device
->lun_id
);
1666 kprintf("%s%d: ", periph
->periph_name
, periph
->unit_number
);
1667 scsi_print_inquiry(&path
->device
->inq_data
);
1669 && (path
->device
->serial_num_len
> 0)) {
1670 /* Don't wrap the screen - print only the first 60 chars */
1671 kprintf("%s%d: Serial Number %.60s\n", periph
->periph_name
,
1672 periph
->unit_number
, path
->device
->serial_num
);
1674 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
1675 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
1676 cts
.flags
= CCB_TRANS_CURRENT_SETTINGS
;
1677 xpt_action((union ccb
*)&cts
);
1678 if (cts
.ccb_h
.status
== CAM_REQ_CMP
) {
1682 if ((cts
.valid
& CCB_TRANS_SYNC_OFFSET_VALID
) != 0
1683 && cts
.sync_offset
!= 0) {
1684 freq
= scsi_calc_syncsrate(cts
.sync_period
);
1687 struct ccb_pathinq cpi
;
1689 /* Ask the SIM for its base transfer speed */
1690 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
1691 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
1692 xpt_action((union ccb
*)&cpi
);
1694 speed
= cpi
.base_transfer_speed
;
1697 if ((cts
.valid
& CCB_TRANS_BUS_WIDTH_VALID
) != 0)
1698 speed
*= (0x01 << cts
.bus_width
);
1701 kprintf("%s%d: %d.%03dMB/s transfers",
1702 periph
->periph_name
, periph
->unit_number
,
1705 kprintf("%s%d: %dKB/s transfers", periph
->periph_name
,
1706 periph
->unit_number
, speed
);
1707 if ((cts
.valid
& CCB_TRANS_SYNC_OFFSET_VALID
) != 0
1708 && cts
.sync_offset
!= 0) {
1709 kprintf(" (%d.%03dMHz, offset %d", freq
/ 1000,
1710 freq
% 1000, cts
.sync_offset
);
1712 if ((cts
.valid
& CCB_TRANS_BUS_WIDTH_VALID
) != 0
1713 && cts
.bus_width
> 0) {
1714 if ((cts
.valid
& CCB_TRANS_SYNC_OFFSET_VALID
) != 0
1715 && cts
.sync_offset
!= 0) {
1720 kprintf("%dbit)", 8 * (0x01 << cts
.bus_width
));
1721 } else if ((cts
.valid
& CCB_TRANS_SYNC_OFFSET_VALID
) != 0
1722 && cts
.sync_offset
!= 0) {
1726 if (path
->device
->inq_flags
& SID_CmdQue
1727 || path
->device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) {
1728 kprintf(", Tagged Queueing Enabled");
1732 } else if (path
->device
->inq_flags
& SID_CmdQue
1733 || path
->device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) {
1734 kprintf("%s%d: Tagged Queueing Enabled\n",
1735 periph
->periph_name
, periph
->unit_number
);
1739 * We only want to print the caller's announce string if they've
1742 if (announce_string
!= NULL
)
1743 kprintf("%s%d: %s\n", periph
->periph_name
,
1744 periph
->unit_number
, announce_string
);
1748 #endif /* CAM_NEW_TRAN_CODE */
1750 static dev_match_ret
1751 xptbusmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1754 dev_match_ret retval
;
1757 retval
= DM_RET_NONE
;
1760 * If we aren't given something to match against, that's an error.
1763 return(DM_RET_ERROR
);
1766 * If there are no match entries, then this bus matches no
1769 if ((patterns
== NULL
) || (num_patterns
== 0))
1770 return(DM_RET_DESCEND
| DM_RET_COPY
);
1772 for (i
= 0; i
< num_patterns
; i
++) {
1773 struct bus_match_pattern
*cur_pattern
;
1776 * If the pattern in question isn't for a bus node, we
1777 * aren't interested. However, we do indicate to the
1778 * calling routine that we should continue descending the
1779 * tree, since the user wants to match against lower-level
1782 if (patterns
[i
].type
!= DEV_MATCH_BUS
) {
1783 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1784 retval
|= DM_RET_DESCEND
;
1788 cur_pattern
= &patterns
[i
].pattern
.bus_pattern
;
1791 * If they want to match any bus node, we give them any
1794 if (cur_pattern
->flags
== BUS_MATCH_ANY
) {
1795 /* set the copy flag */
1796 retval
|= DM_RET_COPY
;
1799 * If we've already decided on an action, go ahead
1802 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1807 * Not sure why someone would do this...
1809 if (cur_pattern
->flags
== BUS_MATCH_NONE
)
1812 if (((cur_pattern
->flags
& BUS_MATCH_PATH
) != 0)
1813 && (cur_pattern
->path_id
!= bus
->path_id
))
1816 if (((cur_pattern
->flags
& BUS_MATCH_BUS_ID
) != 0)
1817 && (cur_pattern
->bus_id
!= bus
->sim
->bus_id
))
1820 if (((cur_pattern
->flags
& BUS_MATCH_UNIT
) != 0)
1821 && (cur_pattern
->unit_number
!= bus
->sim
->unit_number
))
1824 if (((cur_pattern
->flags
& BUS_MATCH_NAME
) != 0)
1825 && (strncmp(cur_pattern
->dev_name
, bus
->sim
->sim_name
,
1830 * If we get to this point, the user definitely wants
1831 * information on this bus. So tell the caller to copy the
1834 retval
|= DM_RET_COPY
;
1837 * If the return action has been set to descend, then we
1838 * know that we've already seen a non-bus matching
1839 * expression, therefore we need to further descend the tree.
1840 * This won't change by continuing around the loop, so we
1841 * go ahead and return. If we haven't seen a non-bus
1842 * matching expression, we keep going around the loop until
1843 * we exhaust the matching expressions. We'll set the stop
1844 * flag once we fall out of the loop.
1846 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1851 * If the return action hasn't been set to descend yet, that means
1852 * we haven't seen anything other than bus matching patterns. So
1853 * tell the caller to stop descending the tree -- the user doesn't
1854 * want to match against lower level tree elements.
1856 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1857 retval
|= DM_RET_STOP
;
1862 static dev_match_ret
1863 xptdevicematch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1864 struct cam_ed
*device
)
1866 dev_match_ret retval
;
1869 retval
= DM_RET_NONE
;
1872 * If we aren't given something to match against, that's an error.
1875 return(DM_RET_ERROR
);
1878 * If there are no match entries, then this device matches no
1881 if ((patterns
== NULL
) || (num_patterns
== 0))
1882 return(DM_RET_DESCEND
| DM_RET_COPY
);
1884 for (i
= 0; i
< num_patterns
; i
++) {
1885 struct device_match_pattern
*cur_pattern
;
1888 * If the pattern in question isn't for a device node, we
1889 * aren't interested.
1891 if (patterns
[i
].type
!= DEV_MATCH_DEVICE
) {
1892 if ((patterns
[i
].type
== DEV_MATCH_PERIPH
)
1893 && ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
))
1894 retval
|= DM_RET_DESCEND
;
1898 cur_pattern
= &patterns
[i
].pattern
.device_pattern
;
1901 * If they want to match any device node, we give them any
1904 if (cur_pattern
->flags
== DEV_MATCH_ANY
) {
1905 /* set the copy flag */
1906 retval
|= DM_RET_COPY
;
1910 * If we've already decided on an action, go ahead
1913 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1918 * Not sure why someone would do this...
1920 if (cur_pattern
->flags
== DEV_MATCH_NONE
)
1923 if (((cur_pattern
->flags
& DEV_MATCH_PATH
) != 0)
1924 && (cur_pattern
->path_id
!= device
->target
->bus
->path_id
))
1927 if (((cur_pattern
->flags
& DEV_MATCH_TARGET
) != 0)
1928 && (cur_pattern
->target_id
!= device
->target
->target_id
))
1931 if (((cur_pattern
->flags
& DEV_MATCH_LUN
) != 0)
1932 && (cur_pattern
->target_lun
!= device
->lun_id
))
1935 if (((cur_pattern
->flags
& DEV_MATCH_INQUIRY
) != 0)
1936 && (cam_quirkmatch((caddr_t
)&device
->inq_data
,
1937 (caddr_t
)&cur_pattern
->inq_pat
,
1938 1, sizeof(cur_pattern
->inq_pat
),
1939 scsi_static_inquiry_match
) == NULL
))
1943 * If we get to this point, the user definitely wants
1944 * information on this device. So tell the caller to copy
1947 retval
|= DM_RET_COPY
;
1950 * If the return action has been set to descend, then we
1951 * know that we've already seen a peripheral matching
1952 * expression, therefore we need to further descend the tree.
1953 * This won't change by continuing around the loop, so we
1954 * go ahead and return. If we haven't seen a peripheral
1955 * matching expression, we keep going around the loop until
1956 * we exhaust the matching expressions. We'll set the stop
1957 * flag once we fall out of the loop.
1959 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1964 * If the return action hasn't been set to descend yet, that means
1965 * we haven't seen any peripheral matching patterns. So tell the
1966 * caller to stop descending the tree -- the user doesn't want to
1967 * match against lower level tree elements.
1969 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1970 retval
|= DM_RET_STOP
;
1976 * Match a single peripheral against any number of match patterns.
1978 static dev_match_ret
1979 xptperiphmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1980 struct cam_periph
*periph
)
1982 dev_match_ret retval
;
1986 * If we aren't given something to match against, that's an error.
1989 return(DM_RET_ERROR
);
1992 * If there are no match entries, then this peripheral matches no
1995 if ((patterns
== NULL
) || (num_patterns
== 0))
1996 return(DM_RET_STOP
| DM_RET_COPY
);
1999 * There aren't any nodes below a peripheral node, so there's no
2000 * reason to descend the tree any further.
2002 retval
= DM_RET_STOP
;
2004 for (i
= 0; i
< num_patterns
; i
++) {
2005 struct periph_match_pattern
*cur_pattern
;
2008 * If the pattern in question isn't for a peripheral, we
2009 * aren't interested.
2011 if (patterns
[i
].type
!= DEV_MATCH_PERIPH
)
2014 cur_pattern
= &patterns
[i
].pattern
.periph_pattern
;
2017 * If they want to match on anything, then we will do so.
2019 if (cur_pattern
->flags
== PERIPH_MATCH_ANY
) {
2020 /* set the copy flag */
2021 retval
|= DM_RET_COPY
;
2024 * We've already set the return action to stop,
2025 * since there are no nodes below peripherals in
2032 * Not sure why someone would do this...
2034 if (cur_pattern
->flags
== PERIPH_MATCH_NONE
)
2037 if (((cur_pattern
->flags
& PERIPH_MATCH_PATH
) != 0)
2038 && (cur_pattern
->path_id
!= periph
->path
->bus
->path_id
))
2042 * For the target and lun id's, we have to make sure the
2043 * target and lun pointers aren't NULL. The xpt peripheral
2044 * has a wildcard target and device.
2046 if (((cur_pattern
->flags
& PERIPH_MATCH_TARGET
) != 0)
2047 && ((periph
->path
->target
== NULL
)
2048 ||(cur_pattern
->target_id
!= periph
->path
->target
->target_id
)))
2051 if (((cur_pattern
->flags
& PERIPH_MATCH_LUN
) != 0)
2052 && ((periph
->path
->device
== NULL
)
2053 || (cur_pattern
->target_lun
!= periph
->path
->device
->lun_id
)))
2056 if (((cur_pattern
->flags
& PERIPH_MATCH_UNIT
) != 0)
2057 && (cur_pattern
->unit_number
!= periph
->unit_number
))
2060 if (((cur_pattern
->flags
& PERIPH_MATCH_NAME
) != 0)
2061 && (strncmp(cur_pattern
->periph_name
, periph
->periph_name
,
2066 * If we get to this point, the user definitely wants
2067 * information on this peripheral. So tell the caller to
2068 * copy the data out.
2070 retval
|= DM_RET_COPY
;
2073 * The return action has already been set to stop, since
2074 * peripherals don't have any nodes below them in the EDT.
2080 * If we get to this point, the peripheral that was passed in
2081 * doesn't match any of the patterns.
2087 xptedtbusfunc(struct cam_eb
*bus
, void *arg
)
2089 struct ccb_dev_match
*cdm
;
2090 dev_match_ret retval
;
2092 cdm
= (struct ccb_dev_match
*)arg
;
2095 * If our position is for something deeper in the tree, that means
2096 * that we've already seen this node. So, we keep going down.
2098 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2099 && (cdm
->pos
.cookie
.bus
== bus
)
2100 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2101 && (cdm
->pos
.cookie
.target
!= NULL
))
2102 retval
= DM_RET_DESCEND
;
2104 retval
= xptbusmatch(cdm
->patterns
, cdm
->num_patterns
, bus
);
2107 * If we got an error, bail out of the search.
2109 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2110 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2115 * If the copy flag is set, copy this bus out.
2117 if (retval
& DM_RET_COPY
) {
2120 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2121 sizeof(struct dev_match_result
));
2124 * If we don't have enough space to put in another
2125 * match result, save our position and tell the
2126 * user there are more devices to check.
2128 if (spaceleft
< sizeof(struct dev_match_result
)) {
2129 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2130 cdm
->pos
.position_type
=
2131 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
;
2133 cdm
->pos
.cookie
.bus
= bus
;
2134 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2136 cdm
->status
= CAM_DEV_MATCH_MORE
;
2139 j
= cdm
->num_matches
;
2141 cdm
->matches
[j
].type
= DEV_MATCH_BUS
;
2142 cdm
->matches
[j
].result
.bus_result
.path_id
= bus
->path_id
;
2143 cdm
->matches
[j
].result
.bus_result
.bus_id
= bus
->sim
->bus_id
;
2144 cdm
->matches
[j
].result
.bus_result
.unit_number
=
2145 bus
->sim
->unit_number
;
2146 strncpy(cdm
->matches
[j
].result
.bus_result
.dev_name
,
2147 bus
->sim
->sim_name
, DEV_IDLEN
);
2151 * If the user is only interested in busses, there's no
2152 * reason to descend to the next level in the tree.
2154 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2158 * If there is a target generation recorded, check it to
2159 * make sure the target list hasn't changed.
2161 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2162 && (bus
== cdm
->pos
.cookie
.bus
)
2163 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2164 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] != 0)
2165 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] !=
2167 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2171 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2172 && (cdm
->pos
.cookie
.bus
== bus
)
2173 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2174 && (cdm
->pos
.cookie
.target
!= NULL
))
2175 return(xpttargettraverse(bus
,
2176 (struct cam_et
*)cdm
->pos
.cookie
.target
,
2177 xptedttargetfunc
, arg
));
2179 return(xpttargettraverse(bus
, NULL
, xptedttargetfunc
, arg
));
2183 xptedttargetfunc(struct cam_et
*target
, void *arg
)
2185 struct ccb_dev_match
*cdm
;
2187 cdm
= (struct ccb_dev_match
*)arg
;
2190 * If there is a device list generation recorded, check it to
2191 * make sure the device list hasn't changed.
2193 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2194 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2195 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2196 && (cdm
->pos
.cookie
.target
== target
)
2197 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2198 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] != 0)
2199 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] !=
2200 target
->generation
)) {
2201 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2205 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2206 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2207 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2208 && (cdm
->pos
.cookie
.target
== target
)
2209 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2210 && (cdm
->pos
.cookie
.device
!= NULL
))
2211 return(xptdevicetraverse(target
,
2212 (struct cam_ed
*)cdm
->pos
.cookie
.device
,
2213 xptedtdevicefunc
, arg
));
2215 return(xptdevicetraverse(target
, NULL
, xptedtdevicefunc
, arg
));
2219 xptedtdevicefunc(struct cam_ed
*device
, void *arg
)
2222 struct ccb_dev_match
*cdm
;
2223 dev_match_ret retval
;
2225 cdm
= (struct ccb_dev_match
*)arg
;
2228 * If our position is for something deeper in the tree, that means
2229 * that we've already seen this node. So, we keep going down.
2231 if ((cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2232 && (cdm
->pos
.cookie
.device
== device
)
2233 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2234 && (cdm
->pos
.cookie
.periph
!= NULL
))
2235 retval
= DM_RET_DESCEND
;
2237 retval
= xptdevicematch(cdm
->patterns
, cdm
->num_patterns
,
2240 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2241 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2246 * If the copy flag is set, copy this device out.
2248 if (retval
& DM_RET_COPY
) {
2251 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2252 sizeof(struct dev_match_result
));
2255 * If we don't have enough space to put in another
2256 * match result, save our position and tell the
2257 * user there are more devices to check.
2259 if (spaceleft
< sizeof(struct dev_match_result
)) {
2260 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2261 cdm
->pos
.position_type
=
2262 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2263 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
;
2265 cdm
->pos
.cookie
.bus
= device
->target
->bus
;
2266 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2268 cdm
->pos
.cookie
.target
= device
->target
;
2269 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2270 device
->target
->bus
->generation
;
2271 cdm
->pos
.cookie
.device
= device
;
2272 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2273 device
->target
->generation
;
2274 cdm
->status
= CAM_DEV_MATCH_MORE
;
2277 j
= cdm
->num_matches
;
2279 cdm
->matches
[j
].type
= DEV_MATCH_DEVICE
;
2280 cdm
->matches
[j
].result
.device_result
.path_id
=
2281 device
->target
->bus
->path_id
;
2282 cdm
->matches
[j
].result
.device_result
.target_id
=
2283 device
->target
->target_id
;
2284 cdm
->matches
[j
].result
.device_result
.target_lun
=
2286 bcopy(&device
->inq_data
,
2287 &cdm
->matches
[j
].result
.device_result
.inq_data
,
2288 sizeof(struct scsi_inquiry_data
));
2290 /* Let the user know whether this device is unconfigured */
2291 if (device
->flags
& CAM_DEV_UNCONFIGURED
)
2292 cdm
->matches
[j
].result
.device_result
.flags
=
2293 DEV_RESULT_UNCONFIGURED
;
2295 cdm
->matches
[j
].result
.device_result
.flags
=
2300 * If the user isn't interested in peripherals, don't descend
2301 * the tree any further.
2303 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2307 * If there is a peripheral list generation recorded, make sure
2308 * it hasn't changed.
2310 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2311 && (device
->target
->bus
== cdm
->pos
.cookie
.bus
)
2312 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2313 && (device
->target
== cdm
->pos
.cookie
.target
)
2314 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2315 && (device
== cdm
->pos
.cookie
.device
)
2316 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2317 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2318 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2319 device
->generation
)){
2320 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2324 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2325 && (cdm
->pos
.cookie
.bus
== device
->target
->bus
)
2326 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2327 && (cdm
->pos
.cookie
.target
== device
->target
)
2328 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2329 && (cdm
->pos
.cookie
.device
== device
)
2330 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2331 && (cdm
->pos
.cookie
.periph
!= NULL
))
2332 return(xptperiphtraverse(device
,
2333 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2334 xptedtperiphfunc
, arg
));
2336 return(xptperiphtraverse(device
, NULL
, xptedtperiphfunc
, arg
));
2340 xptedtperiphfunc(struct cam_periph
*periph
, void *arg
)
2342 struct ccb_dev_match
*cdm
;
2343 dev_match_ret retval
;
2345 cdm
= (struct ccb_dev_match
*)arg
;
2347 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2349 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2350 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2355 * If the copy flag is set, copy this peripheral out.
2357 if (retval
& DM_RET_COPY
) {
2360 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2361 sizeof(struct dev_match_result
));
2364 * If we don't have enough space to put in another
2365 * match result, save our position and tell the
2366 * user there are more devices to check.
2368 if (spaceleft
< sizeof(struct dev_match_result
)) {
2369 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2370 cdm
->pos
.position_type
=
2371 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2372 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
|
2375 cdm
->pos
.cookie
.bus
= periph
->path
->bus
;
2376 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2378 cdm
->pos
.cookie
.target
= periph
->path
->target
;
2379 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2380 periph
->path
->bus
->generation
;
2381 cdm
->pos
.cookie
.device
= periph
->path
->device
;
2382 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2383 periph
->path
->target
->generation
;
2384 cdm
->pos
.cookie
.periph
= periph
;
2385 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2386 periph
->path
->device
->generation
;
2387 cdm
->status
= CAM_DEV_MATCH_MORE
;
2391 j
= cdm
->num_matches
;
2393 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2394 cdm
->matches
[j
].result
.periph_result
.path_id
=
2395 periph
->path
->bus
->path_id
;
2396 cdm
->matches
[j
].result
.periph_result
.target_id
=
2397 periph
->path
->target
->target_id
;
2398 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2399 periph
->path
->device
->lun_id
;
2400 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2401 periph
->unit_number
;
2402 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2403 periph
->periph_name
, DEV_IDLEN
);
2410 xptedtmatch(struct ccb_dev_match
*cdm
)
2414 cdm
->num_matches
= 0;
2417 * Check the bus list generation. If it has changed, the user
2418 * needs to reset everything and start over.
2420 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2421 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != 0)
2422 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != bus_generation
)) {
2423 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2427 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2428 && (cdm
->pos
.cookie
.bus
!= NULL
))
2429 ret
= xptbustraverse((struct cam_eb
*)cdm
->pos
.cookie
.bus
,
2430 xptedtbusfunc
, cdm
);
2432 ret
= xptbustraverse(NULL
, xptedtbusfunc
, cdm
);
2435 * If we get back 0, that means that we had to stop before fully
2436 * traversing the EDT. It also means that one of the subroutines
2437 * has set the status field to the proper value. If we get back 1,
2438 * we've fully traversed the EDT and copied out any matching entries.
2441 cdm
->status
= CAM_DEV_MATCH_LAST
;
2447 xptplistpdrvfunc(struct periph_driver
**pdrv
, void *arg
)
2449 struct ccb_dev_match
*cdm
;
2451 cdm
= (struct ccb_dev_match
*)arg
;
2453 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2454 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2455 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2456 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2457 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2458 (*pdrv
)->generation
)) {
2459 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2463 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2464 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2465 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2466 && (cdm
->pos
.cookie
.periph
!= NULL
))
2467 return(xptpdperiphtraverse(pdrv
,
2468 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2469 xptplistperiphfunc
, arg
));
2471 return(xptpdperiphtraverse(pdrv
, NULL
,xptplistperiphfunc
, arg
));
2475 xptplistperiphfunc(struct cam_periph
*periph
, void *arg
)
2477 struct ccb_dev_match
*cdm
;
2478 dev_match_ret retval
;
2480 cdm
= (struct ccb_dev_match
*)arg
;
2482 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2484 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2485 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2490 * If the copy flag is set, copy this peripheral out.
2492 if (retval
& DM_RET_COPY
) {
2495 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2496 sizeof(struct dev_match_result
));
2499 * If we don't have enough space to put in another
2500 * match result, save our position and tell the
2501 * user there are more devices to check.
2503 if (spaceleft
< sizeof(struct dev_match_result
)) {
2504 struct periph_driver
**pdrv
;
2507 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2508 cdm
->pos
.position_type
=
2509 CAM_DEV_POS_PDRV
| CAM_DEV_POS_PDPTR
|
2513 * This may look a bit non-sensical, but it is
2514 * actually quite logical. There are very few
2515 * peripheral drivers, and bloating every peripheral
2516 * structure with a pointer back to its parent
2517 * peripheral driver linker set entry would cost
2518 * more in the long run than doing this quick lookup.
2520 for (pdrv
= periph_drivers
; *pdrv
!= NULL
; pdrv
++) {
2521 if (strcmp((*pdrv
)->driver_name
,
2522 periph
->periph_name
) == 0)
2526 if (*pdrv
== NULL
) {
2527 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2531 cdm
->pos
.cookie
.pdrv
= pdrv
;
2533 * The periph generation slot does double duty, as
2534 * does the periph pointer slot. They are used for
2535 * both edt and pdrv lookups and positioning.
2537 cdm
->pos
.cookie
.periph
= periph
;
2538 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2539 (*pdrv
)->generation
;
2540 cdm
->status
= CAM_DEV_MATCH_MORE
;
2544 j
= cdm
->num_matches
;
2546 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2547 cdm
->matches
[j
].result
.periph_result
.path_id
=
2548 periph
->path
->bus
->path_id
;
2551 * The transport layer peripheral doesn't have a target or
2554 if (periph
->path
->target
)
2555 cdm
->matches
[j
].result
.periph_result
.target_id
=
2556 periph
->path
->target
->target_id
;
2558 cdm
->matches
[j
].result
.periph_result
.target_id
= -1;
2560 if (periph
->path
->device
)
2561 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2562 periph
->path
->device
->lun_id
;
2564 cdm
->matches
[j
].result
.periph_result
.target_lun
= -1;
2566 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2567 periph
->unit_number
;
2568 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2569 periph
->periph_name
, DEV_IDLEN
);
2576 xptperiphlistmatch(struct ccb_dev_match
*cdm
)
2580 cdm
->num_matches
= 0;
2583 * At this point in the edt traversal function, we check the bus
2584 * list generation to make sure that no busses have been added or
2585 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2586 * For the peripheral driver list traversal function, however, we
2587 * don't have to worry about new peripheral driver types coming or
2588 * going; they're in a linker set, and therefore can't change
2589 * without a recompile.
2592 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2593 && (cdm
->pos
.cookie
.pdrv
!= NULL
))
2594 ret
= xptpdrvtraverse(
2595 (struct periph_driver
**)cdm
->pos
.cookie
.pdrv
,
2596 xptplistpdrvfunc
, cdm
);
2598 ret
= xptpdrvtraverse(NULL
, xptplistpdrvfunc
, cdm
);
2601 * If we get back 0, that means that we had to stop before fully
2602 * traversing the peripheral driver tree. It also means that one of
2603 * the subroutines has set the status field to the proper value. If
2604 * we get back 1, we've fully traversed the EDT and copied out any
2608 cdm
->status
= CAM_DEV_MATCH_LAST
;
2614 xptbustraverse(struct cam_eb
*start_bus
, xpt_busfunc_t
*tr_func
, void *arg
)
2616 struct cam_eb
*bus
, *next_bus
;
2621 for (bus
= (start_bus
? start_bus
: TAILQ_FIRST(&xpt_busses
));
2624 next_bus
= TAILQ_NEXT(bus
, links
);
2626 retval
= tr_func(bus
, arg
);
2635 xpttargettraverse(struct cam_eb
*bus
, struct cam_et
*start_target
,
2636 xpt_targetfunc_t
*tr_func
, void *arg
)
2638 struct cam_et
*target
, *next_target
;
2642 for (target
= (start_target
? start_target
:
2643 TAILQ_FIRST(&bus
->et_entries
));
2644 target
!= NULL
; target
= next_target
) {
2646 next_target
= TAILQ_NEXT(target
, links
);
2648 retval
= tr_func(target
, arg
);
2658 xptdevicetraverse(struct cam_et
*target
, struct cam_ed
*start_device
,
2659 xpt_devicefunc_t
*tr_func
, void *arg
)
2661 struct cam_ed
*device
, *next_device
;
2665 for (device
= (start_device
? start_device
:
2666 TAILQ_FIRST(&target
->ed_entries
));
2668 device
= next_device
) {
2670 next_device
= TAILQ_NEXT(device
, links
);
2672 retval
= tr_func(device
, arg
);
2682 xptperiphtraverse(struct cam_ed
*device
, struct cam_periph
*start_periph
,
2683 xpt_periphfunc_t
*tr_func
, void *arg
)
2685 struct cam_periph
*periph
, *next_periph
;
2690 for (periph
= (start_periph
? start_periph
:
2691 SLIST_FIRST(&device
->periphs
));
2693 periph
= next_periph
) {
2695 next_periph
= SLIST_NEXT(periph
, periph_links
);
2697 retval
= tr_func(periph
, arg
);
2706 xptpdrvtraverse(struct periph_driver
**start_pdrv
,
2707 xpt_pdrvfunc_t
*tr_func
, void *arg
)
2709 struct periph_driver
**pdrv
;
2715 * We don't traverse the peripheral driver list like we do the
2716 * other lists, because it is a linker set, and therefore cannot be
2717 * changed during runtime. If the peripheral driver list is ever
2718 * re-done to be something other than a linker set (i.e. it can
2719 * change while the system is running), the list traversal should
2720 * be modified to work like the other traversal functions.
2722 for (pdrv
= (start_pdrv
? start_pdrv
: periph_drivers
);
2723 *pdrv
!= NULL
; pdrv
++) {
2724 retval
= tr_func(pdrv
, arg
);
2734 xptpdperiphtraverse(struct periph_driver
**pdrv
,
2735 struct cam_periph
*start_periph
,
2736 xpt_periphfunc_t
*tr_func
, void *arg
)
2738 struct cam_periph
*periph
, *next_periph
;
2743 for (periph
= (start_periph
? start_periph
:
2744 TAILQ_FIRST(&(*pdrv
)->units
)); periph
!= NULL
;
2745 periph
= next_periph
) {
2747 next_periph
= TAILQ_NEXT(periph
, unit_links
);
2749 retval
= tr_func(periph
, arg
);
2757 xptdefbusfunc(struct cam_eb
*bus
, void *arg
)
2759 struct xpt_traverse_config
*tr_config
;
2761 tr_config
= (struct xpt_traverse_config
*)arg
;
2763 if (tr_config
->depth
== XPT_DEPTH_BUS
) {
2764 xpt_busfunc_t
*tr_func
;
2766 tr_func
= (xpt_busfunc_t
*)tr_config
->tr_func
;
2768 return(tr_func(bus
, tr_config
->tr_arg
));
2770 return(xpttargettraverse(bus
, NULL
, xptdeftargetfunc
, arg
));
2774 xptdeftargetfunc(struct cam_et
*target
, void *arg
)
2776 struct xpt_traverse_config
*tr_config
;
2778 tr_config
= (struct xpt_traverse_config
*)arg
;
2780 if (tr_config
->depth
== XPT_DEPTH_TARGET
) {
2781 xpt_targetfunc_t
*tr_func
;
2783 tr_func
= (xpt_targetfunc_t
*)tr_config
->tr_func
;
2785 return(tr_func(target
, tr_config
->tr_arg
));
2787 return(xptdevicetraverse(target
, NULL
, xptdefdevicefunc
, arg
));
2791 xptdefdevicefunc(struct cam_ed
*device
, void *arg
)
2793 struct xpt_traverse_config
*tr_config
;
2795 tr_config
= (struct xpt_traverse_config
*)arg
;
2797 if (tr_config
->depth
== XPT_DEPTH_DEVICE
) {
2798 xpt_devicefunc_t
*tr_func
;
2800 tr_func
= (xpt_devicefunc_t
*)tr_config
->tr_func
;
2802 return(tr_func(device
, tr_config
->tr_arg
));
2804 return(xptperiphtraverse(device
, NULL
, xptdefperiphfunc
, arg
));
2808 xptdefperiphfunc(struct cam_periph
*periph
, void *arg
)
2810 struct xpt_traverse_config
*tr_config
;
2811 xpt_periphfunc_t
*tr_func
;
2813 tr_config
= (struct xpt_traverse_config
*)arg
;
2815 tr_func
= (xpt_periphfunc_t
*)tr_config
->tr_func
;
2818 * Unlike the other default functions, we don't check for depth
2819 * here. The peripheral driver level is the last level in the EDT,
2820 * so if we're here, we should execute the function in question.
2822 return(tr_func(periph
, tr_config
->tr_arg
));
2826 * Execute the given function for every bus in the EDT.
2829 xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
)
2831 struct xpt_traverse_config tr_config
;
2833 tr_config
.depth
= XPT_DEPTH_BUS
;
2834 tr_config
.tr_func
= tr_func
;
2835 tr_config
.tr_arg
= arg
;
2837 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2842 * Execute the given function for every target in the EDT.
2845 xpt_for_all_targets(xpt_targetfunc_t
*tr_func
, void *arg
)
2847 struct xpt_traverse_config tr_config
;
2849 tr_config
.depth
= XPT_DEPTH_TARGET
;
2850 tr_config
.tr_func
= tr_func
;
2851 tr_config
.tr_arg
= arg
;
2853 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2855 #endif /* notusedyet */
2858 * Execute the given function for every device in the EDT.
2861 xpt_for_all_devices(xpt_devicefunc_t
*tr_func
, void *arg
)
2863 struct xpt_traverse_config tr_config
;
2865 tr_config
.depth
= XPT_DEPTH_DEVICE
;
2866 tr_config
.tr_func
= tr_func
;
2867 tr_config
.tr_arg
= arg
;
2869 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2874 * Execute the given function for every peripheral in the EDT.
2877 xpt_for_all_periphs(xpt_periphfunc_t
*tr_func
, void *arg
)
2879 struct xpt_traverse_config tr_config
;
2881 tr_config
.depth
= XPT_DEPTH_PERIPH
;
2882 tr_config
.tr_func
= tr_func
;
2883 tr_config
.tr_arg
= arg
;
2885 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2887 #endif /* notusedyet */
2890 xptsetasyncfunc(struct cam_ed
*device
, void *arg
)
2892 struct cam_path path
;
2893 struct ccb_getdev cgd
;
2894 struct async_node
*cur_entry
;
2896 cur_entry
= (struct async_node
*)arg
;
2899 * Don't report unconfigured devices (Wildcard devs,
2900 * devices only for target mode, device instances
2901 * that have been invalidated but are waiting for
2902 * their last reference count to be released).
2904 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) != 0)
2907 xpt_compile_path(&path
,
2909 device
->target
->bus
->path_id
,
2910 device
->target
->target_id
,
2912 xpt_setup_ccb(&cgd
.ccb_h
, &path
, /*priority*/1);
2913 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
2914 xpt_action((union ccb
*)&cgd
);
2915 cur_entry
->callback(cur_entry
->callback_arg
,
2918 xpt_release_path(&path
);
2924 xptsetasyncbusfunc(struct cam_eb
*bus
, void *arg
)
2926 struct cam_path path
;
2927 struct ccb_pathinq cpi
;
2928 struct async_node
*cur_entry
;
2930 cur_entry
= (struct async_node
*)arg
;
2932 xpt_compile_path(&path
, /*periph*/NULL
,
2934 CAM_TARGET_WILDCARD
,
2936 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
2937 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
2938 xpt_action((union ccb
*)&cpi
);
2939 cur_entry
->callback(cur_entry
->callback_arg
,
2942 xpt_release_path(&path
);
2948 xpt_action(union ccb
*start_ccb
)
2950 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_action\n"));
2952 start_ccb
->ccb_h
.status
= CAM_REQ_INPROG
;
2956 switch (start_ccb
->ccb_h
.func_code
) {
2959 #ifdef CAM_NEW_TRAN_CODE
2960 struct cam_ed
*device
;
2961 #endif /* CAM_NEW_TRAN_CODE */
2963 char cdb_str
[(SCSI_MAX_CDBLEN
* 3) + 1];
2964 struct cam_path
*path
;
2966 path
= start_ccb
->ccb_h
.path
;
2970 * For the sake of compatibility with SCSI-1
2971 * devices that may not understand the identify
2972 * message, we include lun information in the
2973 * second byte of all commands. SCSI-1 specifies
2974 * that luns are a 3 bit value and reserves only 3
2975 * bits for lun information in the CDB. Later
2976 * revisions of the SCSI spec allow for more than 8
2977 * luns, but have deprecated lun information in the
2978 * CDB. So, if the lun won't fit, we must omit.
2980 * Also be aware that during initial probing for devices,
2981 * the inquiry information is unknown but initialized to 0.
2982 * This means that this code will be exercised while probing
2983 * devices with an ANSI revision greater than 2.
2985 #ifdef CAM_NEW_TRAN_CODE
2986 device
= start_ccb
->ccb_h
.path
->device
;
2987 if (device
->protocol_version
<= SCSI_REV_2
2988 #else /* CAM_NEW_TRAN_CODE */
2989 if (SID_ANSI_REV(&start_ccb
->ccb_h
.path
->device
->inq_data
) <= 2
2990 #endif /* CAM_NEW_TRAN_CODE */
2991 && start_ccb
->ccb_h
.target_lun
< 8
2992 && (start_ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) == 0) {
2994 start_ccb
->csio
.cdb_io
.cdb_bytes
[1] |=
2995 start_ccb
->ccb_h
.target_lun
<< 5;
2997 start_ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
2998 CAM_DEBUG(path
, CAM_DEBUG_CDB
,("%s. CDB: %s\n",
2999 scsi_op_desc(start_ccb
->csio
.cdb_io
.cdb_bytes
[0],
3000 &path
->device
->inq_data
),
3001 scsi_cdb_string(start_ccb
->csio
.cdb_io
.cdb_bytes
,
3002 cdb_str
, sizeof(cdb_str
))));
3006 case XPT_CONT_TARGET_IO
:
3007 start_ccb
->csio
.sense_resid
= 0;
3008 start_ccb
->csio
.resid
= 0;
3013 struct cam_path
*path
;
3014 struct cam_sim
*sim
;
3017 path
= start_ccb
->ccb_h
.path
;
3019 sim
= path
->bus
->sim
;
3020 if (SIM_DEAD(sim
)) {
3021 /* The SIM has gone; just execute the CCB directly. */
3022 cam_ccbq_send_ccb(&path
->device
->ccbq
, start_ccb
);
3023 (*(sim
->sim_action
))(sim
, start_ccb
);
3027 cam_ccbq_insert_ccb(&path
->device
->ccbq
, start_ccb
);
3028 if (path
->device
->qfrozen_cnt
== 0)
3029 runq
= xpt_schedule_dev_sendq(path
->bus
, path
->device
);
3033 xpt_run_dev_sendq(path
->bus
);
3036 case XPT_SET_TRAN_SETTINGS
:
3038 xpt_set_transfer_settings(&start_ccb
->cts
,
3039 start_ccb
->ccb_h
.path
->device
,
3040 /*async_update*/FALSE
);
3043 case XPT_CALC_GEOMETRY
:
3045 struct cam_sim
*sim
;
3047 /* Filter out garbage */
3048 if (start_ccb
->ccg
.block_size
== 0
3049 || start_ccb
->ccg
.volume_size
== 0) {
3050 start_ccb
->ccg
.cylinders
= 0;
3051 start_ccb
->ccg
.heads
= 0;
3052 start_ccb
->ccg
.secs_per_track
= 0;
3053 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3056 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3057 (*(sim
->sim_action
))(sim
, start_ccb
);
3062 union ccb
* abort_ccb
;
3064 abort_ccb
= start_ccb
->cab
.abort_ccb
;
3065 if (XPT_FC_IS_DEV_QUEUED(abort_ccb
)) {
3067 if (abort_ccb
->ccb_h
.pinfo
.index
>= 0) {
3068 struct cam_ccbq
*ccbq
;
3070 ccbq
= &abort_ccb
->ccb_h
.path
->device
->ccbq
;
3071 cam_ccbq_remove_ccb(ccbq
, abort_ccb
);
3072 abort_ccb
->ccb_h
.status
=
3073 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3074 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3075 xpt_done(abort_ccb
);
3076 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3079 if (abort_ccb
->ccb_h
.pinfo
.index
== CAM_UNQUEUED_INDEX
3080 && (abort_ccb
->ccb_h
.status
& CAM_SIM_QUEUED
) == 0) {
3082 * We've caught this ccb en route to
3083 * the SIM. Flag it for abort and the
3084 * SIM will do so just before starting
3085 * real work on the CCB.
3087 abort_ccb
->ccb_h
.status
=
3088 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3089 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3090 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3094 if (XPT_FC_IS_QUEUED(abort_ccb
)
3095 && (abort_ccb
->ccb_h
.pinfo
.index
== CAM_DONEQ_INDEX
)) {
3097 * It's already completed but waiting
3098 * for our SWI to get to it.
3100 start_ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3104 * If we weren't able to take care of the abort request
3105 * in the XPT, pass the request down to the SIM for processing.
3109 case XPT_ACCEPT_TARGET_IO
:
3111 case XPT_IMMED_NOTIFY
:
3112 case XPT_NOTIFY_ACK
:
3113 case XPT_GET_TRAN_SETTINGS
:
3116 struct cam_sim
*sim
;
3118 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3119 (*(sim
->sim_action
))(sim
, start_ccb
);
3124 struct cam_sim
*sim
;
3126 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3127 (*(sim
->sim_action
))(sim
, start_ccb
);
3130 case XPT_PATH_STATS
:
3131 start_ccb
->cpis
.last_reset
=
3132 start_ccb
->ccb_h
.path
->bus
->last_reset
;
3133 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3139 dev
= start_ccb
->ccb_h
.path
->device
;
3140 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3141 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3143 struct ccb_getdev
*cgd
;
3147 cgd
= &start_ccb
->cgd
;
3148 bus
= cgd
->ccb_h
.path
->bus
;
3149 tar
= cgd
->ccb_h
.path
->target
;
3150 cgd
->inq_data
= dev
->inq_data
;
3151 cgd
->ccb_h
.status
= CAM_REQ_CMP
;
3152 cgd
->serial_num_len
= dev
->serial_num_len
;
3153 if ((dev
->serial_num_len
> 0)
3154 && (dev
->serial_num
!= NULL
))
3155 bcopy(dev
->serial_num
, cgd
->serial_num
,
3156 dev
->serial_num_len
);
3160 case XPT_GDEV_STATS
:
3164 dev
= start_ccb
->ccb_h
.path
->device
;
3165 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3166 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3168 struct ccb_getdevstats
*cgds
;
3172 cgds
= &start_ccb
->cgds
;
3173 bus
= cgds
->ccb_h
.path
->bus
;
3174 tar
= cgds
->ccb_h
.path
->target
;
3175 cgds
->dev_openings
= dev
->ccbq
.dev_openings
;
3176 cgds
->dev_active
= dev
->ccbq
.dev_active
;
3177 cgds
->devq_openings
= dev
->ccbq
.devq_openings
;
3178 cgds
->devq_queued
= dev
->ccbq
.queue
.entries
;
3179 cgds
->held
= dev
->ccbq
.held
;
3180 cgds
->last_reset
= tar
->last_reset
;
3181 cgds
->maxtags
= dev
->quirk
->maxtags
;
3182 cgds
->mintags
= dev
->quirk
->mintags
;
3183 if (timevalcmp(&tar
->last_reset
, &bus
->last_reset
, <))
3184 cgds
->last_reset
= bus
->last_reset
;
3185 cgds
->ccb_h
.status
= CAM_REQ_CMP
;
3191 struct cam_periph
*nperiph
;
3192 struct periph_list
*periph_head
;
3193 struct ccb_getdevlist
*cgdl
;
3195 struct cam_ed
*device
;
3202 * Don't want anyone mucking with our data.
3204 device
= start_ccb
->ccb_h
.path
->device
;
3205 periph_head
= &device
->periphs
;
3206 cgdl
= &start_ccb
->cgdl
;
3209 * Check and see if the list has changed since the user
3210 * last requested a list member. If so, tell them that the
3211 * list has changed, and therefore they need to start over
3212 * from the beginning.
3214 if ((cgdl
->index
!= 0) &&
3215 (cgdl
->generation
!= device
->generation
)) {
3216 cgdl
->status
= CAM_GDEVLIST_LIST_CHANGED
;
3221 * Traverse the list of peripherals and attempt to find
3222 * the requested peripheral.
3224 for (nperiph
= SLIST_FIRST(periph_head
), i
= 0;
3225 (nperiph
!= NULL
) && (i
<= cgdl
->index
);
3226 nperiph
= SLIST_NEXT(nperiph
, periph_links
), i
++) {
3227 if (i
== cgdl
->index
) {
3228 strncpy(cgdl
->periph_name
,
3229 nperiph
->periph_name
,
3231 cgdl
->unit_number
= nperiph
->unit_number
;
3236 cgdl
->status
= CAM_GDEVLIST_ERROR
;
3240 if (nperiph
== NULL
)
3241 cgdl
->status
= CAM_GDEVLIST_LAST_DEVICE
;
3243 cgdl
->status
= CAM_GDEVLIST_MORE_DEVS
;
3246 cgdl
->generation
= device
->generation
;
3248 cgdl
->ccb_h
.status
= CAM_REQ_CMP
;
3253 dev_pos_type position_type
;
3254 struct ccb_dev_match
*cdm
;
3257 cdm
= &start_ccb
->cdm
;
3260 * Prevent EDT changes while we traverse it.
3263 * There are two ways of getting at information in the EDT.
3264 * The first way is via the primary EDT tree. It starts
3265 * with a list of busses, then a list of targets on a bus,
3266 * then devices/luns on a target, and then peripherals on a
3267 * device/lun. The "other" way is by the peripheral driver
3268 * lists. The peripheral driver lists are organized by
3269 * peripheral driver. (obviously) So it makes sense to
3270 * use the peripheral driver list if the user is looking
3271 * for something like "da1", or all "da" devices. If the
3272 * user is looking for something on a particular bus/target
3273 * or lun, it's generally better to go through the EDT tree.
3276 if (cdm
->pos
.position_type
!= CAM_DEV_POS_NONE
)
3277 position_type
= cdm
->pos
.position_type
;
3281 position_type
= CAM_DEV_POS_NONE
;
3283 for (i
= 0; i
< cdm
->num_patterns
; i
++) {
3284 if ((cdm
->patterns
[i
].type
== DEV_MATCH_BUS
)
3285 ||(cdm
->patterns
[i
].type
== DEV_MATCH_DEVICE
)){
3286 position_type
= CAM_DEV_POS_EDT
;
3291 if (cdm
->num_patterns
== 0)
3292 position_type
= CAM_DEV_POS_EDT
;
3293 else if (position_type
== CAM_DEV_POS_NONE
)
3294 position_type
= CAM_DEV_POS_PDRV
;
3297 switch(position_type
& CAM_DEV_POS_TYPEMASK
) {
3298 case CAM_DEV_POS_EDT
:
3299 ret
= xptedtmatch(cdm
);
3301 case CAM_DEV_POS_PDRV
:
3302 ret
= xptperiphlistmatch(cdm
);
3305 cdm
->status
= CAM_DEV_MATCH_ERROR
;
3309 if (cdm
->status
== CAM_DEV_MATCH_ERROR
)
3310 start_ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
3312 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3318 struct ccb_setasync
*csa
;
3319 struct async_node
*cur_entry
;
3320 struct async_list
*async_head
;
3323 csa
= &start_ccb
->csa
;
3324 added
= csa
->event_enable
;
3325 async_head
= &csa
->ccb_h
.path
->device
->asyncs
;
3328 * If there is already an entry for us, simply
3331 cur_entry
= SLIST_FIRST(async_head
);
3332 while (cur_entry
!= NULL
) {
3333 if ((cur_entry
->callback_arg
== csa
->callback_arg
)
3334 && (cur_entry
->callback
== csa
->callback
))
3336 cur_entry
= SLIST_NEXT(cur_entry
, links
);
3339 if (cur_entry
!= NULL
) {
3341 * If the request has no flags set,
3344 added
&= ~cur_entry
->event_enable
;
3345 if (csa
->event_enable
== 0) {
3346 SLIST_REMOVE(async_head
, cur_entry
,
3348 csa
->ccb_h
.path
->device
->refcount
--;
3349 kfree(cur_entry
, M_CAMXPT
);
3351 cur_entry
->event_enable
= csa
->event_enable
;
3354 cur_entry
= kmalloc(sizeof(*cur_entry
),
3355 M_CAMXPT
, M_INTWAIT
);
3356 cur_entry
->event_enable
= csa
->event_enable
;
3357 cur_entry
->callback_arg
= csa
->callback_arg
;
3358 cur_entry
->callback
= csa
->callback
;
3359 SLIST_INSERT_HEAD(async_head
, cur_entry
, links
);
3360 csa
->ccb_h
.path
->device
->refcount
++;
3363 if ((added
& AC_FOUND_DEVICE
) != 0) {
3365 * Get this peripheral up to date with all
3366 * the currently existing devices.
3368 xpt_for_all_devices(xptsetasyncfunc
, cur_entry
);
3370 if ((added
& AC_PATH_REGISTERED
) != 0) {
3372 * Get this peripheral up to date with all
3373 * the currently existing busses.
3375 xpt_for_all_busses(xptsetasyncbusfunc
, cur_entry
);
3377 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3382 struct ccb_relsim
*crs
;
3385 crs
= &start_ccb
->crs
;
3386 dev
= crs
->ccb_h
.path
->device
;
3389 crs
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3393 if ((crs
->release_flags
& RELSIM_ADJUST_OPENINGS
) != 0) {
3395 if (INQ_DATA_TQ_ENABLED(&dev
->inq_data
)) {
3396 /* Don't ever go below one opening */
3397 if (crs
->openings
> 0) {
3398 xpt_dev_ccbq_resize(crs
->ccb_h
.path
,
3402 xpt_print_path(crs
->ccb_h
.path
);
3403 kprintf("tagged openings "
3411 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_TIMEOUT
) != 0) {
3413 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
3416 * Just extend the old timeout and decrement
3417 * the freeze count so that a single timeout
3418 * is sufficient for releasing the queue.
3420 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3421 callout_stop(&dev
->c_handle
);
3424 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3427 callout_reset(&dev
->c_handle
,
3428 (crs
->release_timeout
* hz
) / 1000,
3429 xpt_release_devq_timeout
, dev
);
3431 dev
->flags
|= CAM_DEV_REL_TIMEOUT_PENDING
;
3435 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_CMDCMPLT
) != 0) {
3437 if ((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0) {
3439 * Decrement the freeze count so that a single
3440 * completion is still sufficient to unfreeze
3443 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3446 dev
->flags
|= CAM_DEV_REL_ON_COMPLETE
;
3447 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3451 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_QEMPTY
) != 0) {
3453 if ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
3454 || (dev
->ccbq
.dev_active
== 0)) {
3456 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3459 dev
->flags
|= CAM_DEV_REL_ON_QUEUE_EMPTY
;
3460 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3464 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) == 0) {
3466 xpt_release_devq(crs
->ccb_h
.path
, /*count*/1,
3469 start_ccb
->crs
.qfrozen_cnt
= dev
->qfrozen_cnt
;
3470 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3474 xpt_scan_bus(start_ccb
->ccb_h
.path
->periph
, start_ccb
);
3477 xpt_scan_lun(start_ccb
->ccb_h
.path
->periph
,
3478 start_ccb
->ccb_h
.path
, start_ccb
->crcn
.flags
,
3483 #ifdef CAM_DEBUG_DELAY
3484 cam_debug_delay
= CAM_DEBUG_DELAY
;
3486 cam_dflags
= start_ccb
->cdbg
.flags
;
3487 if (cam_dpath
!= NULL
) {
3488 xpt_free_path(cam_dpath
);
3492 if (cam_dflags
!= CAM_DEBUG_NONE
) {
3493 if (xpt_create_path(&cam_dpath
, xpt_periph
,
3494 start_ccb
->ccb_h
.path_id
,
3495 start_ccb
->ccb_h
.target_id
,
3496 start_ccb
->ccb_h
.target_lun
) !=
3498 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3499 cam_dflags
= CAM_DEBUG_NONE
;
3501 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3502 xpt_print_path(cam_dpath
);
3503 kprintf("debugging flags now %x\n", cam_dflags
);
3507 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3509 #else /* !CAMDEBUG */
3510 start_ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
3511 #endif /* CAMDEBUG */
3515 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0)
3516 xpt_freeze_devq(start_ccb
->ccb_h
.path
, 1);
3517 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3524 start_ccb
->ccb_h
.status
= CAM_PROVIDE_FAIL
;
3531 xpt_polled_action(union ccb
*start_ccb
)
3534 struct cam_sim
*sim
;
3535 struct cam_devq
*devq
;
3538 timeout
= start_ccb
->ccb_h
.timeout
;
3539 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3541 dev
= start_ccb
->ccb_h
.path
->device
;
3546 * Steal an opening so that no other queued requests
3547 * can get it before us while we simulate interrupts.
3549 dev
->ccbq
.devq_openings
--;
3550 dev
->ccbq
.dev_openings
--;
3552 while(((devq
&& devq
->send_openings
<= 0) || dev
->ccbq
.dev_openings
< 0)
3553 && (--timeout
> 0)) {
3555 (*(sim
->sim_poll
))(sim
);
3556 swi_cambio(NULL
, NULL
);
3559 dev
->ccbq
.devq_openings
++;
3560 dev
->ccbq
.dev_openings
++;
3563 xpt_action(start_ccb
);
3564 while(--timeout
> 0) {
3565 (*(sim
->sim_poll
))(sim
);
3566 swi_cambio(NULL
, NULL
);
3567 if ((start_ccb
->ccb_h
.status
& CAM_STATUS_MASK
)
3574 * XXX Is it worth adding a sim_timeout entry
3575 * point so we can attempt recovery? If
3576 * this is only used for dumps, I don't think
3579 start_ccb
->ccb_h
.status
= CAM_CMD_TIMEOUT
;
3582 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3588 * Schedule a peripheral driver to receive a ccb when it's
3589 * target device has space for more transactions.
3592 xpt_schedule(struct cam_periph
*perph
, u_int32_t new_priority
)
3594 struct cam_ed
*device
;
3595 union ccb
*work_ccb
;
3598 CAM_DEBUG(perph
->path
, CAM_DEBUG_TRACE
, ("xpt_schedule\n"));
3599 device
= perph
->path
->device
;
3601 if (periph_is_queued(perph
)) {
3602 /* Simply reorder based on new priority */
3603 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3604 (" change priority to %d\n", new_priority
));
3605 if (new_priority
< perph
->pinfo
.priority
) {
3606 camq_change_priority(&device
->drvq
,
3611 } else if (SIM_DEAD(perph
->path
->bus
->sim
)) {
3612 /* The SIM is gone so just call periph_start directly. */
3613 work_ccb
= xpt_get_ccb(perph
->path
->device
);
3615 if (work_ccb
== NULL
)
3617 xpt_setup_ccb(&work_ccb
->ccb_h
, perph
->path
, new_priority
);
3618 perph
->pinfo
.priority
= new_priority
;
3619 perph
->periph_start(perph
, work_ccb
);
3622 /* New entry on the queue */
3623 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3624 (" added periph to queue\n"));
3625 perph
->pinfo
.priority
= new_priority
;
3626 perph
->pinfo
.generation
= ++device
->drvq
.generation
;
3627 camq_insert(&device
->drvq
, &perph
->pinfo
);
3628 runq
= xpt_schedule_dev_allocq(perph
->path
->bus
, device
);
3632 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3633 (" calling xpt_run_devq\n"));
3634 xpt_run_dev_allocq(perph
->path
->bus
);
3640 * Schedule a device to run on a given queue.
3641 * If the device was inserted as a new entry on the queue,
3642 * return 1 meaning the device queue should be run. If we
3643 * were already queued, implying someone else has already
3644 * started the queue, return 0 so the caller doesn't attempt
3645 * to run the queue. Must be run in a critical section.
3648 xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*pinfo
,
3649 u_int32_t new_priority
)
3652 u_int32_t old_priority
;
3654 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_schedule_dev\n"));
3656 old_priority
= pinfo
->priority
;
3659 * Are we already queued?
3661 if (pinfo
->index
!= CAM_UNQUEUED_INDEX
) {
3662 /* Simply reorder based on new priority */
3663 if (new_priority
< old_priority
) {
3664 camq_change_priority(queue
, pinfo
->index
,
3666 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3667 ("changed priority to %d\n",
3672 /* New entry on the queue */
3673 if (new_priority
< old_priority
)
3674 pinfo
->priority
= new_priority
;
3676 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3677 ("Inserting onto queue\n"));
3678 pinfo
->generation
= ++queue
->generation
;
3679 camq_insert(queue
, pinfo
);
3686 xpt_run_dev_allocq(struct cam_eb
*bus
)
3688 struct cam_devq
*devq
;
3690 if ((devq
= bus
->sim
->devq
) == NULL
) {
3691 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq: NULL devq\n"));
3694 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq\n"));
3696 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3697 (" qfrozen_cnt == 0x%x, entries == %d, "
3698 "openings == %d, active == %d\n",
3699 devq
->alloc_queue
.qfrozen_cnt
,
3700 devq
->alloc_queue
.entries
,
3701 devq
->alloc_openings
,
3702 devq
->alloc_active
));
3705 devq
->alloc_queue
.qfrozen_cnt
++;
3706 while ((devq
->alloc_queue
.entries
> 0)
3707 && (devq
->alloc_openings
> 0)
3708 && (devq
->alloc_queue
.qfrozen_cnt
<= 1)) {
3709 struct cam_ed_qinfo
*qinfo
;
3710 struct cam_ed
*device
;
3711 union ccb
*work_ccb
;
3712 struct cam_periph
*drv
;
3715 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
3717 device
= qinfo
->device
;
3719 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3720 ("running device %p\n", device
));
3722 drvq
= &device
->drvq
;
3725 if (drvq
->entries
<= 0) {
3726 panic("xpt_run_dev_allocq: "
3727 "Device on queue without any work to do");
3730 if ((work_ccb
= xpt_get_ccb(device
)) != NULL
) {
3731 devq
->alloc_openings
--;
3732 devq
->alloc_active
++;
3733 drv
= (struct cam_periph
*)camq_remove(drvq
, CAMQ_HEAD
);
3735 xpt_setup_ccb(&work_ccb
->ccb_h
, drv
->path
,
3736 drv
->pinfo
.priority
);
3737 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3738 ("calling periph start\n"));
3739 drv
->periph_start(drv
, work_ccb
);
3742 * Malloc failure in alloc_ccb
3745 * XXX add us to a list to be run from free_ccb
3746 * if we don't have any ccbs active on this
3747 * device queue otherwise we may never get run
3753 /* Raise IPL for possible insertion and test at top of loop */
3756 if (drvq
->entries
> 0) {
3757 /* We have more work. Attempt to reschedule */
3758 xpt_schedule_dev_allocq(bus
, device
);
3761 devq
->alloc_queue
.qfrozen_cnt
--;
3766 xpt_run_dev_sendq(struct cam_eb
*bus
)
3768 struct cam_devq
*devq
;
3770 if ((devq
= bus
->sim
->devq
) == NULL
) {
3771 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq: NULL devq\n"));
3774 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq\n"));
3777 devq
->send_queue
.qfrozen_cnt
++;
3778 while ((devq
->send_queue
.entries
> 0)
3779 && (devq
->send_openings
> 0)) {
3780 struct cam_ed_qinfo
*qinfo
;
3781 struct cam_ed
*device
;
3782 union ccb
*work_ccb
;
3783 struct cam_sim
*sim
;
3785 if (devq
->send_queue
.qfrozen_cnt
> 1) {
3789 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
3791 device
= qinfo
->device
;
3794 * If the device has been "frozen", don't attempt
3797 if (device
->qfrozen_cnt
> 0) {
3801 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3802 ("running device %p\n", device
));
3804 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
3805 if (work_ccb
== NULL
) {
3806 kprintf("device on run queue with no ccbs???\n");
3810 if ((work_ccb
->ccb_h
.flags
& CAM_HIGH_POWER
) != 0) {
3812 if (num_highpower
<= 0) {
3814 * We got a high power command, but we
3815 * don't have any available slots. Freeze
3816 * the device queue until we have a slot
3819 device
->qfrozen_cnt
++;
3820 STAILQ_INSERT_TAIL(&highpowerq
,
3827 * Consume a high power slot while
3833 devq
->active_dev
= device
;
3834 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
3836 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
3838 devq
->send_openings
--;
3839 devq
->send_active
++;
3841 if (device
->ccbq
.queue
.entries
> 0)
3842 xpt_schedule_dev_sendq(bus
, device
);
3844 if (work_ccb
&& (work_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0){
3846 * The client wants to freeze the queue
3847 * after this CCB is sent.
3849 device
->qfrozen_cnt
++;
3852 /* In Target mode, the peripheral driver knows best... */
3853 if (work_ccb
->ccb_h
.func_code
== XPT_SCSI_IO
) {
3854 if ((device
->inq_flags
& SID_CmdQue
) != 0
3855 && work_ccb
->csio
.tag_action
!= CAM_TAG_ACTION_NONE
)
3856 work_ccb
->ccb_h
.flags
|= CAM_TAG_ACTION_VALID
;
3859 * Clear this in case of a retried CCB that
3860 * failed due to a rejected tag.
3862 work_ccb
->ccb_h
.flags
&= ~CAM_TAG_ACTION_VALID
;
3866 * Device queues can be shared among multiple sim instances
3867 * that reside on different busses. Use the SIM in the queue
3868 * CCB's path, rather than the one in the bus that was passed
3869 * into this function.
3871 sim
= work_ccb
->ccb_h
.path
->bus
->sim
;
3872 (*(sim
->sim_action
))(sim
, work_ccb
);
3874 devq
->active_dev
= NULL
;
3875 /* Raise IPL for possible insertion and test at top of loop */
3877 devq
->send_queue
.qfrozen_cnt
--;
3882 * This function merges stuff from the slave ccb into the master ccb, while
3883 * keeping important fields in the master ccb constant.
3886 xpt_merge_ccb(union ccb
*master_ccb
, union ccb
*slave_ccb
)
3889 * Pull fields that are valid for peripheral drivers to set
3890 * into the master CCB along with the CCB "payload".
3892 master_ccb
->ccb_h
.retry_count
= slave_ccb
->ccb_h
.retry_count
;
3893 master_ccb
->ccb_h
.func_code
= slave_ccb
->ccb_h
.func_code
;
3894 master_ccb
->ccb_h
.timeout
= slave_ccb
->ccb_h
.timeout
;
3895 master_ccb
->ccb_h
.flags
= slave_ccb
->ccb_h
.flags
;
3896 bcopy(&(&slave_ccb
->ccb_h
)[1], &(&master_ccb
->ccb_h
)[1],
3897 sizeof(union ccb
) - sizeof(struct ccb_hdr
));
3901 xpt_setup_ccb(struct ccb_hdr
*ccb_h
, struct cam_path
*path
, u_int32_t priority
)
3903 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_setup_ccb\n"));
3904 callout_init(&ccb_h
->timeout_ch
);
3905 ccb_h
->pinfo
.priority
= priority
;
3907 ccb_h
->path_id
= path
->bus
->path_id
;
3909 ccb_h
->target_id
= path
->target
->target_id
;
3911 ccb_h
->target_id
= CAM_TARGET_WILDCARD
;
3913 ccb_h
->target_lun
= path
->device
->lun_id
;
3914 ccb_h
->pinfo
.generation
= ++path
->device
->ccbq
.queue
.generation
;
3916 ccb_h
->target_lun
= CAM_TARGET_WILDCARD
;
3918 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
3922 /* Path manipulation functions */
3924 xpt_create_path(struct cam_path
**new_path_ptr
, struct cam_periph
*perph
,
3925 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3927 struct cam_path
*path
;
3930 path
= kmalloc(sizeof(*path
), M_CAMXPT
, M_INTWAIT
);
3931 status
= xpt_compile_path(path
, perph
, path_id
, target_id
, lun_id
);
3932 if (status
!= CAM_REQ_CMP
) {
3933 kfree(path
, M_CAMXPT
);
3936 *new_path_ptr
= path
;
3941 xpt_compile_path(struct cam_path
*new_path
, struct cam_periph
*perph
,
3942 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3945 struct cam_et
*target
;
3946 struct cam_ed
*device
;
3949 status
= CAM_REQ_CMP
; /* Completed without error */
3950 target
= NULL
; /* Wildcarded */
3951 device
= NULL
; /* Wildcarded */
3954 * We will potentially modify the EDT, so block interrupts
3955 * that may attempt to create cam paths.
3958 bus
= xpt_find_bus(path_id
);
3960 status
= CAM_PATH_INVALID
;
3962 target
= xpt_find_target(bus
, target_id
);
3963 if (target
== NULL
) {
3965 struct cam_et
*new_target
;
3967 new_target
= xpt_alloc_target(bus
, target_id
);
3968 if (new_target
== NULL
) {
3969 status
= CAM_RESRC_UNAVAIL
;
3971 target
= new_target
;
3974 if (target
!= NULL
) {
3975 device
= xpt_find_device(target
, lun_id
);
3976 if (device
== NULL
) {
3978 struct cam_ed
*new_device
;
3980 new_device
= xpt_alloc_device(bus
,
3983 if (new_device
== NULL
) {
3984 status
= CAM_RESRC_UNAVAIL
;
3986 device
= new_device
;
3994 * Only touch the user's data if we are successful.
3996 if (status
== CAM_REQ_CMP
) {
3997 new_path
->periph
= perph
;
3998 new_path
->bus
= bus
;
3999 new_path
->target
= target
;
4000 new_path
->device
= device
;
4001 CAM_DEBUG(new_path
, CAM_DEBUG_TRACE
, ("xpt_compile_path\n"));
4004 xpt_release_device(bus
, target
, device
);
4006 xpt_release_target(bus
, target
);
4008 xpt_release_bus(bus
);
4014 xpt_release_path(struct cam_path
*path
)
4016 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_release_path\n"));
4017 if (path
->device
!= NULL
) {
4018 xpt_release_device(path
->bus
, path
->target
, path
->device
);
4019 path
->device
= NULL
;
4021 if (path
->target
!= NULL
) {
4022 xpt_release_target(path
->bus
, path
->target
);
4023 path
->target
= NULL
;
4025 if (path
->bus
!= NULL
) {
4026 xpt_release_bus(path
->bus
);
4032 xpt_free_path(struct cam_path
*path
)
4034 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_free_path\n"));
4035 xpt_release_path(path
);
4036 kfree(path
, M_CAMXPT
);
4041 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4042 * in path1, 2 for match with wildcards in path2.
4045 xpt_path_comp(struct cam_path
*path1
, struct cam_path
*path2
)
4049 if (path1
->bus
!= path2
->bus
) {
4050 if (path1
->bus
->path_id
== CAM_BUS_WILDCARD
)
4052 else if (path2
->bus
->path_id
== CAM_BUS_WILDCARD
)
4057 if (path1
->target
!= path2
->target
) {
4058 if (path1
->target
->target_id
== CAM_TARGET_WILDCARD
) {
4061 } else if (path2
->target
->target_id
== CAM_TARGET_WILDCARD
)
4066 if (path1
->device
!= path2
->device
) {
4067 if (path1
->device
->lun_id
== CAM_LUN_WILDCARD
) {
4070 } else if (path2
->device
->lun_id
== CAM_LUN_WILDCARD
)
4079 xpt_print_path(struct cam_path
*path
)
4082 kprintf("(nopath): ");
4084 if (path
->periph
!= NULL
)
4085 kprintf("(%s%d:", path
->periph
->periph_name
,
4086 path
->periph
->unit_number
);
4088 kprintf("(noperiph:");
4090 if (path
->bus
!= NULL
)
4091 kprintf("%s%d:%d:", path
->bus
->sim
->sim_name
,
4092 path
->bus
->sim
->unit_number
,
4093 path
->bus
->sim
->bus_id
);
4097 if (path
->target
!= NULL
)
4098 kprintf("%d:", path
->target
->target_id
);
4102 if (path
->device
!= NULL
)
4103 kprintf("%d): ", path
->device
->lun_id
);
4110 xpt_path_string(struct cam_path
*path
, char *str
, size_t str_len
)
4114 sbuf_new(&sb
, str
, str_len
, 0);
4117 sbuf_printf(&sb
, "(nopath): ");
4119 if (path
->periph
!= NULL
)
4120 sbuf_printf(&sb
, "(%s%d:", path
->periph
->periph_name
,
4121 path
->periph
->unit_number
);
4123 sbuf_printf(&sb
, "(noperiph:");
4125 if (path
->bus
!= NULL
)
4126 sbuf_printf(&sb
, "%s%d:%d:", path
->bus
->sim
->sim_name
,
4127 path
->bus
->sim
->unit_number
,
4128 path
->bus
->sim
->bus_id
);
4130 sbuf_printf(&sb
, "nobus:");
4132 if (path
->target
!= NULL
)
4133 sbuf_printf(&sb
, "%d:", path
->target
->target_id
);
4135 sbuf_printf(&sb
, "X:");
4137 if (path
->device
!= NULL
)
4138 sbuf_printf(&sb
, "%d): ", path
->device
->lun_id
);
4140 sbuf_printf(&sb
, "X): ");
4144 return(sbuf_len(&sb
));
4148 xpt_path_path_id(struct cam_path
*path
)
4150 return(path
->bus
->path_id
);
4154 xpt_path_target_id(struct cam_path
*path
)
4156 if (path
->target
!= NULL
)
4157 return (path
->target
->target_id
);
4159 return (CAM_TARGET_WILDCARD
);
4163 xpt_path_lun_id(struct cam_path
*path
)
4165 if (path
->device
!= NULL
)
4166 return (path
->device
->lun_id
);
4168 return (CAM_LUN_WILDCARD
);
4172 xpt_path_sim(struct cam_path
*path
)
4174 return (path
->bus
->sim
);
4178 xpt_path_periph(struct cam_path
*path
)
4180 return (path
->periph
);
4184 * Release a CAM control block for the caller. Remit the cost of the structure
4185 * to the device referenced by the path. If the this device had no 'credits'
4186 * and peripheral drivers have registered async callbacks for this notification
4190 xpt_release_ccb(union ccb
*free_ccb
)
4192 struct cam_path
*path
;
4193 struct cam_ed
*device
;
4196 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_release_ccb\n"));
4197 path
= free_ccb
->ccb_h
.path
;
4198 device
= path
->device
;
4201 cam_ccbq_release_opening(&device
->ccbq
);
4202 if (xpt_ccb_count
> xpt_max_ccbs
) {
4203 xpt_free_ccb(free_ccb
);
4206 SLIST_INSERT_HEAD(&ccb_freeq
, &free_ccb
->ccb_h
, xpt_links
.sle
);
4208 if (bus
->sim
->devq
== NULL
) {
4212 bus
->sim
->devq
->alloc_openings
++;
4213 bus
->sim
->devq
->alloc_active
--;
4214 /* XXX Turn this into an inline function - xpt_run_device?? */
4215 if ((device_is_alloc_queued(device
) == 0)
4216 && (device
->drvq
.entries
> 0)) {
4217 xpt_schedule_dev_allocq(bus
, device
);
4220 if (bus
->sim
->devq
&& dev_allocq_is_runnable(bus
->sim
->devq
))
4221 xpt_run_dev_allocq(bus
);
4224 /* Functions accessed by SIM drivers */
4227 * A sim structure, listing the SIM entry points and instance
4228 * identification info is passed to xpt_bus_register to hook the SIM
4229 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4230 * for this new bus and places it in the array of busses and assigns
4231 * it a path_id. The path_id may be influenced by "hard wiring"
4232 * information specified by the user. Once interrupt services are
4233 * availible, the bus will be probed.
4236 xpt_bus_register(struct cam_sim
*sim
, u_int32_t bus
)
4238 struct cam_eb
*new_bus
;
4239 struct cam_eb
*old_bus
;
4240 struct ccb_pathinq cpi
;
4243 new_bus
= kmalloc(sizeof(*new_bus
), M_CAMXPT
, M_INTWAIT
);
4245 if (strcmp(sim
->sim_name
, "xpt") != 0) {
4247 xptpathid(sim
->sim_name
, sim
->unit_number
, sim
->bus_id
);
4250 TAILQ_INIT(&new_bus
->et_entries
);
4251 new_bus
->path_id
= sim
->path_id
;
4254 timevalclear(&new_bus
->last_reset
);
4256 new_bus
->refcount
= 1; /* Held until a bus_deregister event */
4257 new_bus
->generation
= 0;
4259 old_bus
= TAILQ_FIRST(&xpt_busses
);
4260 while (old_bus
!= NULL
4261 && old_bus
->path_id
< new_bus
->path_id
)
4262 old_bus
= TAILQ_NEXT(old_bus
, links
);
4263 if (old_bus
!= NULL
)
4264 TAILQ_INSERT_BEFORE(old_bus
, new_bus
, links
);
4266 TAILQ_INSERT_TAIL(&xpt_busses
, new_bus
, links
);
4270 /* Notify interested parties */
4271 if (sim
->path_id
!= CAM_XPT_PATH_ID
) {
4272 struct cam_path path
;
4274 xpt_compile_path(&path
, /*periph*/NULL
, sim
->path_id
,
4275 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4276 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
4277 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
4278 xpt_action((union ccb
*)&cpi
);
4279 xpt_async(AC_PATH_REGISTERED
, &path
, &cpi
);
4280 xpt_release_path(&path
);
4282 return (CAM_SUCCESS
);
4286 * Deregister a bus. We must clean out all transactions pending on the bus.
4287 * This routine is typically called prior to cam_sim_free() (e.g. see
4288 * dev/usbmisc/umass/umass.c)
4291 xpt_bus_deregister(path_id_t pathid
)
4293 struct cam_path bus_path
;
4294 struct cam_ed
*device
;
4295 struct cam_ed_qinfo
*qinfo
;
4296 struct cam_devq
*devq
;
4297 struct cam_periph
*periph
;
4298 struct cam_sim
*ccbsim
;
4299 union ccb
*work_ccb
;
4302 status
= xpt_compile_path(&bus_path
, NULL
, pathid
,
4303 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4304 if (status
!= CAM_REQ_CMP
)
4308 * This should clear out all pending requests and timeouts, but
4309 * the ccb's may be queued to a software interrupt.
4311 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4312 * and it really ought to.
4314 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4315 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4317 /* The SIM may be gone, so use a dummy SIM for any stray operations. */
4318 devq
= bus_path
.bus
->sim
->devq
;
4319 bus_path
.bus
->sim
= &cam_dead_sim
;
4321 /* Execute any pending operations now. */
4322 while ((qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
4323 CAMQ_HEAD
)) != NULL
||
4324 (qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
4325 CAMQ_HEAD
)) != NULL
) {
4327 device
= qinfo
->device
;
4328 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
4329 if (work_ccb
!= NULL
) {
4330 devq
->active_dev
= device
;
4331 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
4332 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
4333 ccbsim
= work_ccb
->ccb_h
.path
->bus
->sim
;
4334 (*(ccbsim
->sim_action
))(ccbsim
, work_ccb
);
4337 periph
= (struct cam_periph
*)camq_remove(&device
->drvq
,
4340 xpt_schedule(periph
, periph
->pinfo
.priority
);
4341 } while (work_ccb
!= NULL
|| periph
!= NULL
);
4344 /* Make sure all completed CCBs are processed. */
4345 while (!TAILQ_EMPTY(&cam_bioq
)) {
4348 /* Repeat the async's for the benefit of any new devices. */
4349 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4350 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4353 /* Release the reference count held while registered. */
4354 xpt_release_bus(bus_path
.bus
);
4355 xpt_release_path(&bus_path
);
4357 /* Recheck for more completed CCBs. */
4358 while (!TAILQ_EMPTY(&cam_bioq
))
4361 return (CAM_REQ_CMP
);
4365 xptnextfreepathid(void)
4372 bus
= TAILQ_FIRST(&xpt_busses
);
4374 /* Find an unoccupied pathid */
4376 && bus
->path_id
<= pathid
) {
4377 if (bus
->path_id
== pathid
)
4379 bus
= TAILQ_NEXT(bus
, links
);
4383 * Ensure that this pathid is not reserved for
4384 * a bus that may be registered in the future.
4386 if (resource_string_value("scbus", pathid
, "at", &strval
) == 0) {
4388 /* Start the search over */
4395 xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
)
4401 pathid
= CAM_XPT_PATH_ID
;
4402 ksnprintf(buf
, sizeof(buf
), "%s%d", sim_name
, sim_unit
);
4404 while ((i
= resource_query_string(i
, "at", buf
)) != -1) {
4405 if (strcmp(resource_query_name(i
), "scbus")) {
4406 /* Avoid a bit of foot shooting. */
4409 dunit
= resource_query_unit(i
);
4410 if (dunit
< 0) /* unwired?! */
4412 if (resource_int_value("scbus", dunit
, "bus", &val
) == 0) {
4413 if (sim_bus
== val
) {
4417 } else if (sim_bus
== 0) {
4418 /* Unspecified matches bus 0 */
4422 kprintf("Ambiguous scbus configuration for %s%d "
4423 "bus %d, cannot wire down. The kernel "
4424 "config entry for scbus%d should "
4425 "specify a controller bus.\n"
4426 "Scbus will be assigned dynamically.\n",
4427 sim_name
, sim_unit
, sim_bus
, dunit
);
4432 if (pathid
== CAM_XPT_PATH_ID
)
4433 pathid
= xptnextfreepathid();
4438 xpt_async(u_int32_t async_code
, struct cam_path
*path
, void *async_arg
)
4441 struct cam_et
*target
, *next_target
;
4442 struct cam_ed
*device
, *next_device
;
4444 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_async\n"));
4447 * Most async events come from a CAM interrupt context. In
4448 * a few cases, the error recovery code at the peripheral layer,
4449 * which may run from our SWI or a process context, may signal
4450 * deferred events with a call to xpt_async. Ensure async
4451 * notifications are serialized by blocking cam interrupts.
4457 if (async_code
== AC_BUS_RESET
) {
4458 /* Update our notion of when the last reset occurred */
4459 microuptime(&bus
->last_reset
);
4462 for (target
= TAILQ_FIRST(&bus
->et_entries
);
4464 target
= next_target
) {
4466 next_target
= TAILQ_NEXT(target
, links
);
4468 if (path
->target
!= target
4469 && path
->target
->target_id
!= CAM_TARGET_WILDCARD
4470 && target
->target_id
!= CAM_TARGET_WILDCARD
)
4473 if (async_code
== AC_SENT_BDR
) {
4474 /* Update our notion of when the last reset occurred */
4475 microuptime(&path
->target
->last_reset
);
4478 for (device
= TAILQ_FIRST(&target
->ed_entries
);
4480 device
= next_device
) {
4482 next_device
= TAILQ_NEXT(device
, links
);
4484 if (path
->device
!= device
4485 && path
->device
->lun_id
!= CAM_LUN_WILDCARD
4486 && device
->lun_id
!= CAM_LUN_WILDCARD
)
4489 xpt_dev_async(async_code
, bus
, target
,
4492 xpt_async_bcast(&device
->asyncs
, async_code
,
4498 * If this wasn't a fully wildcarded async, tell all
4499 * clients that want all async events.
4501 if (bus
!= xpt_periph
->path
->bus
)
4502 xpt_async_bcast(&xpt_periph
->path
->device
->asyncs
, async_code
,
4508 xpt_async_bcast(struct async_list
*async_head
,
4509 u_int32_t async_code
,
4510 struct cam_path
*path
, void *async_arg
)
4512 struct async_node
*cur_entry
;
4514 cur_entry
= SLIST_FIRST(async_head
);
4515 while (cur_entry
!= NULL
) {
4516 struct async_node
*next_entry
;
4518 * Grab the next list entry before we call the current
4519 * entry's callback. This is because the callback function
4520 * can delete its async callback entry.
4522 next_entry
= SLIST_NEXT(cur_entry
, links
);
4523 if ((cur_entry
->event_enable
& async_code
) != 0)
4524 cur_entry
->callback(cur_entry
->callback_arg
,
4527 cur_entry
= next_entry
;
4532 * Handle any per-device event notifications that require action by the XPT.
4535 xpt_dev_async(u_int32_t async_code
, struct cam_eb
*bus
, struct cam_et
*target
,
4536 struct cam_ed
*device
, void *async_arg
)
4539 struct cam_path newpath
;
4542 * We only need to handle events for real devices.
4544 if (target
->target_id
== CAM_TARGET_WILDCARD
4545 || device
->lun_id
== CAM_LUN_WILDCARD
)
4549 * We need our own path with wildcards expanded to
4550 * handle certain types of events.
4552 if ((async_code
== AC_SENT_BDR
)
4553 || (async_code
== AC_BUS_RESET
)
4554 || (async_code
== AC_INQ_CHANGED
))
4555 status
= xpt_compile_path(&newpath
, NULL
,
4560 status
= CAM_REQ_CMP_ERR
;
4562 if (status
== CAM_REQ_CMP
) {
4565 * Allow transfer negotiation to occur in a
4566 * tag free environment.
4568 if (async_code
== AC_SENT_BDR
4569 || async_code
== AC_BUS_RESET
)
4570 xpt_toggle_tags(&newpath
);
4572 if (async_code
== AC_INQ_CHANGED
) {
4574 * We've sent a start unit command, or
4575 * something similar to a device that
4576 * may have caused its inquiry data to
4577 * change. So we re-scan the device to
4578 * refresh the inquiry data for it.
4580 xpt_scan_lun(newpath
.periph
, &newpath
,
4581 CAM_EXPECT_INQ_CHANGE
, NULL
);
4583 xpt_release_path(&newpath
);
4584 } else if (async_code
== AC_LOST_DEVICE
) {
4586 * When we lose a device the device may be about to detach
4587 * the sim, we have to clear out all pending timeouts and
4588 * requests before that happens. XXX it would be nice if
4589 * we could abort the requests pertaining to the device.
4591 xpt_release_devq_timeout(device
);
4592 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
4593 device
->flags
|= CAM_DEV_UNCONFIGURED
;
4594 xpt_release_device(bus
, target
, device
);
4596 } else if (async_code
== AC_TRANSFER_NEG
) {
4597 struct ccb_trans_settings
*settings
;
4599 settings
= (struct ccb_trans_settings
*)async_arg
;
4600 xpt_set_transfer_settings(settings
, device
,
4601 /*async_update*/TRUE
);
4606 xpt_freeze_devq(struct cam_path
*path
, u_int count
)
4608 struct ccb_hdr
*ccbh
;
4611 path
->device
->qfrozen_cnt
+= count
;
4614 * Mark the last CCB in the queue as needing
4615 * to be requeued if the driver hasn't
4616 * changed it's state yet. This fixes a race
4617 * where a ccb is just about to be queued to
4618 * a controller driver when it's interrupt routine
4619 * freezes the queue. To completly close the
4620 * hole, controller drives must check to see
4621 * if a ccb's status is still CAM_REQ_INPROG
4622 * under critical section protection just before they queue
4623 * the CCB. See ahc_action/ahc_freeze_devq for
4626 ccbh
= TAILQ_LAST(&path
->device
->ccbq
.active_ccbs
, ccb_hdr_tailq
);
4627 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4628 ccbh
->status
= CAM_REQUEUE_REQ
;
4630 return (path
->device
->qfrozen_cnt
);
4634 xpt_freeze_simq(struct cam_sim
*sim
, u_int count
)
4636 if (sim
->devq
== NULL
)
4638 sim
->devq
->send_queue
.qfrozen_cnt
+= count
;
4639 if (sim
->devq
->active_dev
!= NULL
) {
4640 struct ccb_hdr
*ccbh
;
4642 ccbh
= TAILQ_LAST(&sim
->devq
->active_dev
->ccbq
.active_ccbs
,
4644 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4645 ccbh
->status
= CAM_REQUEUE_REQ
;
4647 return (sim
->devq
->send_queue
.qfrozen_cnt
);
4651 * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4652 * We ref-count the sim (and the bus only NULLs it out when the bus has been
4653 * freed, which is not the case here), but the device queue is also freed XXX
4654 * and we have to check that here.
4656 * XXX fixme: could we simply not null-out the device queue via
4660 xpt_release_devq_timeout(void *arg
)
4662 struct cam_ed
*device
;
4664 device
= (struct cam_ed
*)arg
;
4666 xpt_release_devq_device(device
, /*count*/1, /*run_queue*/TRUE
);
4670 xpt_release_devq(struct cam_path
*path
, u_int count
, int run_queue
)
4672 xpt_release_devq_device(path
->device
, count
, run_queue
);
4676 xpt_release_devq_device(struct cam_ed
*dev
, u_int count
, int run_queue
)
4683 if (dev
->qfrozen_cnt
> 0) {
4685 count
= (count
> dev
->qfrozen_cnt
) ? dev
->qfrozen_cnt
: count
;
4686 dev
->qfrozen_cnt
-= count
;
4687 if (dev
->qfrozen_cnt
== 0) {
4690 * No longer need to wait for a successful
4691 * command completion.
4693 dev
->flags
&= ~CAM_DEV_REL_ON_COMPLETE
;
4696 * Remove any timeouts that might be scheduled
4697 * to release this queue.
4699 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
4700 callout_stop(&dev
->c_handle
);
4701 dev
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
4705 * Now that we are unfrozen schedule the
4706 * device so any pending transactions are
4709 if ((dev
->ccbq
.queue
.entries
> 0)
4710 && (xpt_schedule_dev_sendq(dev
->target
->bus
, dev
))
4711 && (run_queue
!= 0)) {
4717 xpt_run_dev_sendq(dev
->target
->bus
);
4722 xpt_release_simq(struct cam_sim
*sim
, int run_queue
)
4726 if (sim
->devq
== NULL
)
4729 sendq
= &(sim
->devq
->send_queue
);
4732 if (sendq
->qfrozen_cnt
> 0) {
4733 sendq
->qfrozen_cnt
--;
4734 if (sendq
->qfrozen_cnt
== 0) {
4738 * If there is a timeout scheduled to release this
4739 * sim queue, remove it. The queue frozen count is
4742 if ((sim
->flags
& CAM_SIM_REL_TIMEOUT_PENDING
) != 0){
4743 callout_stop(&sim
->c_handle
);
4744 sim
->flags
&= ~CAM_SIM_REL_TIMEOUT_PENDING
;
4746 bus
= xpt_find_bus(sim
->path_id
);
4751 * Now that we are unfrozen run the send queue.
4753 xpt_run_dev_sendq(bus
);
4755 xpt_release_bus(bus
);
4765 xpt_done(union ccb
*done_ccb
)
4769 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_done\n"));
4770 if ((done_ccb
->ccb_h
.func_code
& XPT_FC_QUEUED
) != 0) {
4772 * Queue up the request for handling by our SWI handler
4773 * any of the "non-immediate" type of ccbs.
4775 switch (done_ccb
->ccb_h
.path
->periph
->type
) {
4776 case CAM_PERIPH_BIO
:
4777 TAILQ_INSERT_TAIL(&cam_bioq
, &done_ccb
->ccb_h
,
4779 done_ccb
->ccb_h
.pinfo
.index
= CAM_DONEQ_INDEX
;
4783 panic("unknown periph type %d",
4784 done_ccb
->ccb_h
.path
->periph
->type
);
4795 new_ccb
= kmalloc(sizeof(*new_ccb
), M_CAMXPT
, M_INTWAIT
);
4800 xpt_free_ccb(union ccb
*free_ccb
)
4802 kfree(free_ccb
, M_CAMXPT
);
4807 /* Private XPT functions */
4810 * Get a CAM control block for the caller. Charge the structure to the device
4811 * referenced by the path. If the this device has no 'credits' then the
4812 * device already has the maximum number of outstanding operations under way
4813 * and we return NULL. If we don't have sufficient resources to allocate more
4814 * ccbs, we also return NULL.
4817 xpt_get_ccb(struct cam_ed
*device
)
4822 if ((new_ccb
= (union ccb
*)SLIST_FIRST(&ccb_freeq
)) == NULL
) {
4823 new_ccb
= kmalloc(sizeof(*new_ccb
), M_CAMXPT
, M_INTWAIT
);
4824 SLIST_INSERT_HEAD(&ccb_freeq
, &new_ccb
->ccb_h
,
4828 cam_ccbq_take_opening(&device
->ccbq
);
4829 SLIST_REMOVE_HEAD(&ccb_freeq
, xpt_links
.sle
);
4835 xpt_release_bus(struct cam_eb
*bus
)
4839 if (bus
->refcount
== 1) {
4840 KKASSERT(TAILQ_FIRST(&bus
->et_entries
) == NULL
);
4841 TAILQ_REMOVE(&xpt_busses
, bus
, links
);
4843 cam_sim_release(bus
->sim
, 0);
4847 KKASSERT(bus
->refcount
== 1);
4848 kfree(bus
, M_CAMXPT
);
4855 static struct cam_et
*
4856 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
)
4858 struct cam_et
*target
;
4859 struct cam_et
*cur_target
;
4861 target
= kmalloc(sizeof(*target
), M_CAMXPT
, M_INTWAIT
);
4863 TAILQ_INIT(&target
->ed_entries
);
4865 target
->target_id
= target_id
;
4866 target
->refcount
= 1;
4867 target
->generation
= 0;
4868 timevalclear(&target
->last_reset
);
4870 * Hold a reference to our parent bus so it
4871 * will not go away before we do.
4875 /* Insertion sort into our bus's target list */
4876 cur_target
= TAILQ_FIRST(&bus
->et_entries
);
4877 while (cur_target
!= NULL
&& cur_target
->target_id
< target_id
)
4878 cur_target
= TAILQ_NEXT(cur_target
, links
);
4880 if (cur_target
!= NULL
) {
4881 TAILQ_INSERT_BEFORE(cur_target
, target
, links
);
4883 TAILQ_INSERT_TAIL(&bus
->et_entries
, target
, links
);
4890 xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
)
4893 if (target
->refcount
== 1) {
4894 KKASSERT(TAILQ_FIRST(&target
->ed_entries
) == NULL
);
4895 TAILQ_REMOVE(&bus
->et_entries
, target
, links
);
4897 xpt_release_bus(bus
);
4898 KKASSERT(target
->refcount
== 1);
4899 kfree(target
, M_CAMXPT
);
4906 static struct cam_ed
*
4907 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
, lun_id_t lun_id
)
4909 #ifdef CAM_NEW_TRAN_CODE
4910 struct cam_path path
;
4911 #endif /* CAM_NEW_TRAN_CODE */
4912 struct cam_ed
*device
;
4913 struct cam_devq
*devq
;
4916 if (SIM_DEAD(bus
->sim
))
4919 /* Make space for us in the device queue on our bus */
4920 if (bus
->sim
->devq
== NULL
)
4922 devq
= bus
->sim
->devq
;
4923 status
= cam_devq_resize(devq
, devq
->alloc_queue
.array_size
+ 1);
4925 if (status
!= CAM_REQ_CMP
) {
4928 device
= kmalloc(sizeof(*device
), M_CAMXPT
, M_INTWAIT
);
4931 if (device
!= NULL
) {
4932 struct cam_ed
*cur_device
;
4934 cam_init_pinfo(&device
->alloc_ccb_entry
.pinfo
);
4935 device
->alloc_ccb_entry
.device
= device
;
4936 cam_init_pinfo(&device
->send_ccb_entry
.pinfo
);
4937 device
->send_ccb_entry
.device
= device
;
4938 device
->target
= target
;
4939 device
->lun_id
= lun_id
;
4940 /* Initialize our queues */
4941 if (camq_init(&device
->drvq
, 0) != 0) {
4942 kfree(device
, M_CAMXPT
);
4945 if (cam_ccbq_init(&device
->ccbq
,
4946 bus
->sim
->max_dev_openings
) != 0) {
4947 camq_fini(&device
->drvq
);
4948 kfree(device
, M_CAMXPT
);
4951 SLIST_INIT(&device
->asyncs
);
4952 SLIST_INIT(&device
->periphs
);
4953 device
->generation
= 0;
4954 device
->owner
= NULL
;
4956 * Take the default quirk entry until we have inquiry
4957 * data and can determine a better quirk to use.
4959 device
->quirk
= &xpt_quirk_table
[xpt_quirk_table_size
- 1];
4960 bzero(&device
->inq_data
, sizeof(device
->inq_data
));
4961 device
->inq_flags
= 0;
4962 device
->queue_flags
= 0;
4963 device
->serial_num
= NULL
;
4964 device
->serial_num_len
= 0;
4965 device
->qfrozen_cnt
= 0;
4966 device
->flags
= CAM_DEV_UNCONFIGURED
;
4967 device
->tag_delay_count
= 0;
4968 device
->tag_saved_openings
= 0;
4969 device
->refcount
= 1;
4970 callout_init(&device
->c_handle
);
4973 * Hold a reference to our parent target so it
4974 * will not go away before we do.
4979 * XXX should be limited by number of CCBs this bus can
4982 xpt_max_ccbs
+= device
->ccbq
.devq_openings
;
4983 /* Insertion sort into our target's device list */
4984 cur_device
= TAILQ_FIRST(&target
->ed_entries
);
4985 while (cur_device
!= NULL
&& cur_device
->lun_id
< lun_id
)
4986 cur_device
= TAILQ_NEXT(cur_device
, links
);
4987 if (cur_device
!= NULL
) {
4988 TAILQ_INSERT_BEFORE(cur_device
, device
, links
);
4990 TAILQ_INSERT_TAIL(&target
->ed_entries
, device
, links
);
4992 target
->generation
++;
4993 #ifdef CAM_NEW_TRAN_CODE
4994 if (lun_id
!= CAM_LUN_WILDCARD
) {
4995 xpt_compile_path(&path
,
5000 xpt_devise_transport(&path
);
5001 xpt_release_path(&path
);
5003 #endif /* CAM_NEW_TRAN_CODE */
5009 xpt_reference_device(struct cam_ed
*device
)
5015 xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
5016 struct cam_ed
*device
)
5018 struct cam_devq
*devq
;
5021 if (device
->refcount
== 1) {
5022 KKASSERT(device
->flags
& CAM_DEV_UNCONFIGURED
);
5024 if (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
5025 || device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
)
5026 panic("Removing device while still queued for ccbs");
5028 if ((device
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
5029 device
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
5030 callout_stop(&device
->c_handle
);
5033 TAILQ_REMOVE(&target
->ed_entries
, device
,links
);
5034 target
->generation
++;
5035 xpt_max_ccbs
-= device
->ccbq
.devq_openings
;
5036 if (!SIM_DEAD(bus
->sim
)) {
5037 /* Release our slot in the devq */
5038 devq
= bus
->sim
->devq
;
5039 cam_devq_resize(devq
, devq
->alloc_queue
.array_size
- 1);
5041 camq_fini(&device
->drvq
);
5042 camq_fini(&device
->ccbq
.queue
);
5043 xpt_release_target(bus
, target
);
5044 KKASSERT(device
->refcount
== 1);
5045 kfree(device
, M_CAMXPT
);
5053 xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
)
5063 diff
= newopenings
- (dev
->ccbq
.dev_active
+ dev
->ccbq
.dev_openings
);
5064 result
= cam_ccbq_resize(&dev
->ccbq
, newopenings
);
5065 if (result
== CAM_REQ_CMP
&& (diff
< 0)) {
5066 dev
->flags
|= CAM_DEV_RESIZE_QUEUE_NEEDED
;
5068 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
5069 || (dev
->inq_flags
& SID_CmdQue
) != 0)
5070 dev
->tag_saved_openings
= newopenings
;
5071 /* Adjust the global limit */
5072 xpt_max_ccbs
+= diff
;
5077 static struct cam_eb
*
5078 xpt_find_bus(path_id_t path_id
)
5082 TAILQ_FOREACH(bus
, &xpt_busses
, links
) {
5083 if (bus
->path_id
== path_id
) {
5091 static struct cam_et
*
5092 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
)
5094 struct cam_et
*target
;
5096 TAILQ_FOREACH(target
, &bus
->et_entries
, links
) {
5097 if (target
->target_id
== target_id
) {
5105 static struct cam_ed
*
5106 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
)
5108 struct cam_ed
*device
;
5110 TAILQ_FOREACH(device
, &target
->ed_entries
, links
) {
5111 if (device
->lun_id
== lun_id
) {
5120 union ccb
*request_ccb
;
5121 struct ccb_pathinq
*cpi
;
5123 } xpt_scan_bus_info
;
5126 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5127 * As the scan progresses, xpt_scan_bus is used as the
5128 * callback on completion function.
5131 xpt_scan_bus(struct cam_periph
*periph
, union ccb
*request_ccb
)
5133 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5134 ("xpt_scan_bus\n"));
5135 switch (request_ccb
->ccb_h
.func_code
) {
5138 xpt_scan_bus_info
*scan_info
;
5139 union ccb
*work_ccb
;
5140 struct cam_path
*path
;
5145 /* Find out the characteristics of the bus */
5146 work_ccb
= xpt_alloc_ccb();
5147 xpt_setup_ccb(&work_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5148 request_ccb
->ccb_h
.pinfo
.priority
);
5149 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
5150 xpt_action(work_ccb
);
5151 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5152 request_ccb
->ccb_h
.status
= work_ccb
->ccb_h
.status
;
5153 xpt_free_ccb(work_ccb
);
5154 xpt_done(request_ccb
);
5158 if ((work_ccb
->cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5160 * Can't scan the bus on an adapter that
5161 * cannot perform the initiator role.
5163 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5164 xpt_free_ccb(work_ccb
);
5165 xpt_done(request_ccb
);
5169 /* Save some state for use while we probe for devices */
5170 scan_info
= (xpt_scan_bus_info
*)
5171 kmalloc(sizeof(xpt_scan_bus_info
), M_TEMP
, M_INTWAIT
);
5172 scan_info
->request_ccb
= request_ccb
;
5173 scan_info
->cpi
= &work_ccb
->cpi
;
5175 /* Cache on our stack so we can work asynchronously */
5176 max_target
= scan_info
->cpi
->max_target
;
5177 initiator_id
= scan_info
->cpi
->initiator_id
;
5181 * We can scan all targets in parallel, or do it sequentially.
5183 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5185 scan_info
->counter
= 0;
5187 scan_info
->counter
= scan_info
->cpi
->max_target
+ 1;
5188 if (scan_info
->cpi
->initiator_id
< scan_info
->counter
) {
5189 scan_info
->counter
--;
5193 for (i
= 0; i
<= max_target
; i
++) {
5195 if (i
== initiator_id
)
5198 status
= xpt_create_path(&path
, xpt_periph
,
5199 request_ccb
->ccb_h
.path_id
,
5201 if (status
!= CAM_REQ_CMP
) {
5202 kprintf("xpt_scan_bus: xpt_create_path failed"
5203 " with status %#x, bus scan halted\n",
5205 kfree(scan_info
, M_TEMP
);
5206 request_ccb
->ccb_h
.status
= status
;
5207 xpt_free_ccb(work_ccb
);
5208 xpt_done(request_ccb
);
5211 work_ccb
= xpt_alloc_ccb();
5212 xpt_setup_ccb(&work_ccb
->ccb_h
, path
,
5213 request_ccb
->ccb_h
.pinfo
.priority
);
5214 work_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5215 work_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5216 work_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5217 work_ccb
->crcn
.flags
= request_ccb
->crcn
.flags
;
5218 xpt_action(work_ccb
);
5225 struct cam_path
*path
;
5226 xpt_scan_bus_info
*scan_info
;
5228 target_id_t target_id
;
5231 /* Reuse the same CCB to query if a device was really found */
5232 scan_info
= (xpt_scan_bus_info
*)request_ccb
->ccb_h
.ppriv_ptr0
;
5233 xpt_setup_ccb(&request_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5234 request_ccb
->ccb_h
.pinfo
.priority
);
5235 request_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
5237 path_id
= request_ccb
->ccb_h
.path_id
;
5238 target_id
= request_ccb
->ccb_h
.target_id
;
5239 lun_id
= request_ccb
->ccb_h
.target_lun
;
5240 xpt_action(request_ccb
);
5242 if (request_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5243 struct cam_ed
*device
;
5244 struct cam_et
*target
;
5248 * If we already probed lun 0 successfully, or
5249 * we have additional configured luns on this
5250 * target that might have "gone away", go onto
5253 target
= request_ccb
->ccb_h
.path
->target
;
5255 * We may touch devices that we don't
5256 * hold references too, so ensure they
5257 * don't disappear out from under us.
5258 * The target above is referenced by the
5259 * path in the request ccb.
5263 device
= TAILQ_FIRST(&target
->ed_entries
);
5264 if (device
!= NULL
) {
5265 phl
= CAN_SRCH_HI_SPARSE(device
);
5266 if (device
->lun_id
== 0)
5267 device
= TAILQ_NEXT(device
, links
);
5270 if ((lun_id
!= 0) || (device
!= NULL
)) {
5271 if (lun_id
< (CAM_SCSI2_MAXLUN
-1) || phl
)
5275 struct cam_ed
*device
;
5277 device
= request_ccb
->ccb_h
.path
->device
;
5279 if ((device
->quirk
->quirks
& CAM_QUIRK_NOLUNS
) == 0) {
5280 /* Try the next lun */
5281 if (lun_id
< (CAM_SCSI2_MAXLUN
-1)
5282 || CAN_SRCH_HI_DENSE(device
))
5288 * Free the current request path- we're done with it.
5290 xpt_free_path(request_ccb
->ccb_h
.path
);
5293 * Check to see if we scan any further luns.
5295 if (lun_id
== request_ccb
->ccb_h
.target_lun
5296 || lun_id
> scan_info
->cpi
->max_lun
) {
5301 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5302 scan_info
->counter
++;
5303 if (scan_info
->counter
==
5304 scan_info
->cpi
->initiator_id
) {
5305 scan_info
->counter
++;
5307 if (scan_info
->counter
>=
5308 scan_info
->cpi
->max_target
+1) {
5312 scan_info
->counter
--;
5313 if (scan_info
->counter
== 0) {
5318 xpt_free_ccb(request_ccb
);
5319 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5320 request_ccb
= scan_info
->request_ccb
;
5321 kfree(scan_info
, M_TEMP
);
5322 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5323 xpt_done(request_ccb
);
5327 if ((scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) == 0) {
5330 status
= xpt_create_path(&path
, xpt_periph
,
5331 scan_info
->request_ccb
->ccb_h
.path_id
,
5332 scan_info
->counter
, 0);
5333 if (status
!= CAM_REQ_CMP
) {
5334 kprintf("xpt_scan_bus: xpt_create_path failed"
5335 " with status %#x, bus scan halted\n",
5337 xpt_free_ccb(request_ccb
);
5338 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5339 request_ccb
= scan_info
->request_ccb
;
5340 kfree(scan_info
, M_TEMP
);
5341 request_ccb
->ccb_h
.status
= status
;
5342 xpt_done(request_ccb
);
5345 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5346 request_ccb
->ccb_h
.pinfo
.priority
);
5347 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5348 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5349 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5350 request_ccb
->crcn
.flags
=
5351 scan_info
->request_ccb
->crcn
.flags
;
5353 status
= xpt_create_path(&path
, xpt_periph
,
5354 path_id
, target_id
, lun_id
);
5355 if (status
!= CAM_REQ_CMP
) {
5356 kprintf("xpt_scan_bus: xpt_create_path failed "
5357 "with status %#x, halting LUN scan\n",
5361 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5362 request_ccb
->ccb_h
.pinfo
.priority
);
5363 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5364 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5365 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5366 request_ccb
->crcn
.flags
=
5367 scan_info
->request_ccb
->crcn
.flags
;
5369 xpt_action(request_ccb
);
5383 PROBE_TUR_FOR_NEGOTIATION
5387 PROBE_INQUIRY_CKSUM
= 0x01,
5388 PROBE_SERIAL_CKSUM
= 0x02,
5389 PROBE_NO_ANNOUNCE
= 0x04
5393 TAILQ_HEAD(, ccb_hdr
) request_ccbs
;
5394 probe_action action
;
5395 union ccb saved_ccb
;
5398 u_int8_t digest
[16];
5402 xpt_scan_lun(struct cam_periph
*periph
, struct cam_path
*path
,
5403 cam_flags flags
, union ccb
*request_ccb
)
5405 struct ccb_pathinq cpi
;
5407 struct cam_path
*new_path
;
5408 struct cam_periph
*old_periph
;
5410 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5411 ("xpt_scan_lun\n"));
5413 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
5414 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5415 xpt_action((union ccb
*)&cpi
);
5417 if (cpi
.ccb_h
.status
!= CAM_REQ_CMP
) {
5418 if (request_ccb
!= NULL
) {
5419 request_ccb
->ccb_h
.status
= cpi
.ccb_h
.status
;
5420 xpt_done(request_ccb
);
5425 if ((cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5427 * Can't scan the bus on an adapter that
5428 * cannot perform the initiator role.
5430 if (request_ccb
!= NULL
) {
5431 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5432 xpt_done(request_ccb
);
5437 if (request_ccb
== NULL
) {
5438 request_ccb
= kmalloc(sizeof(union ccb
), M_TEMP
, M_INTWAIT
);
5439 new_path
= kmalloc(sizeof(*new_path
), M_TEMP
, M_INTWAIT
);
5440 status
= xpt_compile_path(new_path
, xpt_periph
,
5442 path
->target
->target_id
,
5443 path
->device
->lun_id
);
5445 if (status
!= CAM_REQ_CMP
) {
5446 xpt_print_path(path
);
5447 kprintf("xpt_scan_lun: can't compile path, can't "
5449 kfree(request_ccb
, M_TEMP
);
5450 kfree(new_path
, M_TEMP
);
5453 xpt_setup_ccb(&request_ccb
->ccb_h
, new_path
, /*priority*/ 1);
5454 request_ccb
->ccb_h
.cbfcnp
= xptscandone
;
5455 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5456 request_ccb
->crcn
.flags
= flags
;
5460 if ((old_periph
= cam_periph_find(path
, "probe")) != NULL
) {
5463 softc
= (probe_softc
*)old_periph
->softc
;
5464 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5467 status
= cam_periph_alloc(proberegister
, NULL
, probecleanup
,
5468 probestart
, "probe",
5470 request_ccb
->ccb_h
.path
, NULL
, 0,
5473 if (status
!= CAM_REQ_CMP
) {
5474 xpt_print_path(path
);
5475 kprintf("xpt_scan_lun: cam_alloc_periph returned an "
5476 "error, can't continue probe\n");
5477 request_ccb
->ccb_h
.status
= status
;
5478 xpt_done(request_ccb
);
5485 xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5487 xpt_release_path(done_ccb
->ccb_h
.path
);
5488 kfree(done_ccb
->ccb_h
.path
, M_TEMP
);
5489 kfree(done_ccb
, M_TEMP
);
5493 proberegister(struct cam_periph
*periph
, void *arg
)
5495 union ccb
*request_ccb
; /* CCB representing the probe request */
5498 request_ccb
= (union ccb
*)arg
;
5499 if (periph
== NULL
) {
5500 kprintf("proberegister: periph was NULL!!\n");
5501 return(CAM_REQ_CMP_ERR
);
5504 if (request_ccb
== NULL
) {
5505 kprintf("proberegister: no probe CCB, "
5506 "can't register device\n");
5507 return(CAM_REQ_CMP_ERR
);
5510 softc
= kmalloc(sizeof(*softc
), M_TEMP
, M_INTWAIT
| M_ZERO
);
5511 TAILQ_INIT(&softc
->request_ccbs
);
5512 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5515 periph
->softc
= softc
;
5516 cam_periph_acquire(periph
);
5518 * Ensure we've waited at least a bus settle
5519 * delay before attempting to probe the device.
5520 * For HBAs that don't do bus resets, this won't make a difference.
5522 cam_periph_freeze_after_event(periph
, &periph
->path
->bus
->last_reset
,
5524 probeschedule(periph
);
5525 return(CAM_REQ_CMP
);
5529 probeschedule(struct cam_periph
*periph
)
5531 struct ccb_pathinq cpi
;
5535 softc
= (probe_softc
*)periph
->softc
;
5536 ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
5538 xpt_setup_ccb(&cpi
.ccb_h
, periph
->path
, /*priority*/1);
5539 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5540 xpt_action((union ccb
*)&cpi
);
5543 * If a device has gone away and another device, or the same one,
5544 * is back in the same place, it should have a unit attention
5545 * condition pending. It will not report the unit attention in
5546 * response to an inquiry, which may leave invalid transfer
5547 * negotiations in effect. The TUR will reveal the unit attention
5548 * condition. Only send the TUR for lun 0, since some devices
5549 * will get confused by commands other than inquiry to non-existent
5550 * luns. If you think a device has gone away start your scan from
5551 * lun 0. This will insure that any bogus transfer settings are
5554 * If we haven't seen the device before and the controller supports
5555 * some kind of transfer negotiation, negotiate with the first
5556 * sent command if no bus reset was performed at startup. This
5557 * ensures that the device is not confused by transfer negotiation
5558 * settings left over by loader or BIOS action.
5560 if (((ccb
->ccb_h
.path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0)
5561 && (ccb
->ccb_h
.target_lun
== 0)) {
5562 softc
->action
= PROBE_TUR
;
5563 } else if ((cpi
.hba_inquiry
& (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
)) != 0
5564 && (cpi
.hba_misc
& PIM_NOBUSRESET
) != 0) {
5565 proberequestdefaultnegotiation(periph
);
5566 softc
->action
= PROBE_INQUIRY
;
5568 softc
->action
= PROBE_INQUIRY
;
5571 if (ccb
->crcn
.flags
& CAM_EXPECT_INQ_CHANGE
)
5572 softc
->flags
|= PROBE_NO_ANNOUNCE
;
5574 softc
->flags
&= ~PROBE_NO_ANNOUNCE
;
5576 xpt_schedule(periph
, ccb
->ccb_h
.pinfo
.priority
);
5580 probestart(struct cam_periph
*periph
, union ccb
*start_ccb
)
5582 /* Probe the device that our peripheral driver points to */
5583 struct ccb_scsiio
*csio
;
5586 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probestart\n"));
5588 softc
= (probe_softc
*)periph
->softc
;
5589 csio
= &start_ccb
->csio
;
5591 switch (softc
->action
) {
5593 case PROBE_TUR_FOR_NEGOTIATION
:
5595 scsi_test_unit_ready(csio
,
5604 case PROBE_FULL_INQUIRY
:
5607 struct scsi_inquiry_data
*inq_buf
;
5609 inq_buf
= &periph
->path
->device
->inq_data
;
5611 * If the device is currently configured, we calculate an
5612 * MD5 checksum of the inquiry data, and if the serial number
5613 * length is greater than 0, add the serial number data
5614 * into the checksum as well. Once the inquiry and the
5615 * serial number check finish, we attempt to figure out
5616 * whether we still have the same device.
5618 if ((periph
->path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
5620 MD5Init(&softc
->context
);
5621 MD5Update(&softc
->context
, (unsigned char *)inq_buf
,
5622 sizeof(struct scsi_inquiry_data
));
5623 softc
->flags
|= PROBE_INQUIRY_CKSUM
;
5624 if (periph
->path
->device
->serial_num_len
> 0) {
5625 MD5Update(&softc
->context
,
5626 periph
->path
->device
->serial_num
,
5627 periph
->path
->device
->serial_num_len
);
5628 softc
->flags
|= PROBE_SERIAL_CKSUM
;
5630 MD5Final(softc
->digest
, &softc
->context
);
5633 if (softc
->action
== PROBE_INQUIRY
)
5634 inquiry_len
= SHORT_INQUIRY_LENGTH
;
5636 inquiry_len
= inq_buf
->additional_length
5637 + offsetof(struct scsi_inquiry_data
,
5638 additional_length
) + 1;
5641 * Some parallel SCSI devices fail to send an
5642 * ignore wide residue message when dealing with
5643 * odd length inquiry requests. Round up to be
5646 inquiry_len
= roundup2(inquiry_len
, 2);
5652 (u_int8_t
*)inq_buf
,
5657 /*timeout*/60 * 1000);
5660 case PROBE_MODE_SENSE
:
5665 mode_buf_len
= sizeof(struct scsi_mode_header_6
)
5666 + sizeof(struct scsi_mode_blk_desc
)
5667 + sizeof(struct scsi_control_page
);
5668 mode_buf
= kmalloc(mode_buf_len
, M_TEMP
, M_INTWAIT
);
5669 scsi_mode_sense(csio
,
5674 SMS_PAGE_CTRL_CURRENT
,
5675 SMS_CONTROL_MODE_PAGE
,
5682 case PROBE_SERIAL_NUM
:
5684 struct scsi_vpd_unit_serial_number
*serial_buf
;
5685 struct cam_ed
* device
;
5688 device
= periph
->path
->device
;
5689 device
->serial_num
= NULL
;
5690 device
->serial_num_len
= 0;
5692 if ((device
->quirk
->quirks
& CAM_QUIRK_NOSERIAL
) == 0) {
5693 serial_buf
= kmalloc(sizeof(*serial_buf
), M_TEMP
,
5694 M_INTWAIT
| M_ZERO
);
5699 (u_int8_t
*)serial_buf
,
5700 sizeof(*serial_buf
),
5702 SVPD_UNIT_SERIAL_NUMBER
,
5704 /*timeout*/60 * 1000);
5708 * We'll have to do without, let our probedone
5709 * routine finish up for us.
5711 start_ccb
->csio
.data_ptr
= NULL
;
5712 probedone(periph
, start_ccb
);
5716 xpt_action(start_ccb
);
5720 proberequestdefaultnegotiation(struct cam_periph
*periph
)
5722 struct ccb_trans_settings cts
;
5724 xpt_setup_ccb(&cts
.ccb_h
, periph
->path
, /*priority*/1);
5725 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
5726 #ifdef CAM_NEW_TRAN_CODE
5727 cts
.type
= CTS_TYPE_USER_SETTINGS
;
5728 #else /* CAM_NEW_TRAN_CODE */
5729 cts
.flags
= CCB_TRANS_USER_SETTINGS
;
5730 #endif /* CAM_NEW_TRAN_CODE */
5731 xpt_action((union ccb
*)&cts
);
5732 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5735 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
5736 #ifdef CAM_NEW_TRAN_CODE
5737 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5738 #else /* CAM_NEW_TRAN_CODE */
5739 cts
.flags
&= ~CCB_TRANS_USER_SETTINGS
;
5740 cts
.flags
|= CCB_TRANS_CURRENT_SETTINGS
;
5741 #endif /* CAM_NEW_TRAN_CODE */
5742 xpt_action((union ccb
*)&cts
);
5746 probedone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5749 struct cam_path
*path
;
5752 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probedone\n"));
5754 softc
= (probe_softc
*)periph
->softc
;
5755 path
= done_ccb
->ccb_h
.path
;
5756 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
5758 switch (softc
->action
) {
5761 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5763 if (cam_periph_error(done_ccb
, 0,
5764 SF_NO_PRINT
, NULL
) == ERESTART
)
5766 else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0)
5767 /* Don't wedge the queue */
5768 xpt_release_devq(done_ccb
->ccb_h
.path
,
5772 softc
->action
= PROBE_INQUIRY
;
5773 xpt_release_ccb(done_ccb
);
5774 xpt_schedule(periph
, priority
);
5778 case PROBE_FULL_INQUIRY
:
5780 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
5781 struct scsi_inquiry_data
*inq_buf
;
5782 u_int8_t periph_qual
;
5784 path
->device
->flags
|= CAM_DEV_INQUIRY_DATA_VALID
;
5785 inq_buf
= &path
->device
->inq_data
;
5787 periph_qual
= SID_QUAL(inq_buf
);
5789 switch(periph_qual
) {
5790 case SID_QUAL_LU_CONNECTED
:
5795 * We conservatively request only
5796 * SHORT_INQUIRY_LEN bytes of inquiry
5797 * information during our first try
5798 * at sending an INQUIRY. If the device
5799 * has more information to give,
5800 * perform a second request specifying
5801 * the amount of information the device
5802 * is willing to give.
5804 len
= inq_buf
->additional_length
5805 + offsetof(struct scsi_inquiry_data
,
5806 additional_length
) + 1;
5807 if (softc
->action
== PROBE_INQUIRY
5808 && len
> SHORT_INQUIRY_LENGTH
) {
5809 softc
->action
= PROBE_FULL_INQUIRY
;
5810 xpt_release_ccb(done_ccb
);
5811 xpt_schedule(periph
, priority
);
5815 xpt_find_quirk(path
->device
);
5817 #ifdef CAM_NEW_TRAN_CODE
5818 xpt_devise_transport(path
);
5819 #endif /* CAM_NEW_TRAN_CODE */
5820 if (INQ_DATA_TQ_ENABLED(inq_buf
))
5821 softc
->action
= PROBE_MODE_SENSE
;
5823 softc
->action
= PROBE_SERIAL_NUM
;
5825 path
->device
->flags
&= ~CAM_DEV_UNCONFIGURED
;
5826 xpt_reference_device(path
->device
);
5828 xpt_release_ccb(done_ccb
);
5829 xpt_schedule(periph
, priority
);
5835 } else if (cam_periph_error(done_ccb
, 0,
5836 done_ccb
->ccb_h
.target_lun
> 0
5837 ? SF_RETRY_UA
|SF_QUIET_IR
5839 &softc
->saved_ccb
) == ERESTART
) {
5841 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
5842 /* Don't wedge the queue */
5843 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
5847 * If we get to this point, we got an error status back
5848 * from the inquiry and the error status doesn't require
5849 * automatically retrying the command. Therefore, the
5850 * inquiry failed. If we had inquiry information before
5851 * for this device, but this latest inquiry command failed,
5852 * the device has probably gone away. If this device isn't
5853 * already marked unconfigured, notify the peripheral
5854 * drivers that this device is no more.
5856 if ((path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
5857 /* Send the async notification. */
5858 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
5861 xpt_release_ccb(done_ccb
);
5864 case PROBE_MODE_SENSE
:
5866 struct ccb_scsiio
*csio
;
5867 struct scsi_mode_header_6
*mode_hdr
;
5869 csio
= &done_ccb
->csio
;
5870 mode_hdr
= (struct scsi_mode_header_6
*)csio
->data_ptr
;
5871 if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
5872 struct scsi_control_page
*page
;
5875 offset
= ((u_int8_t
*)&mode_hdr
[1])
5876 + mode_hdr
->blk_desc_len
;
5877 page
= (struct scsi_control_page
*)offset
;
5878 path
->device
->queue_flags
= page
->queue_flags
;
5879 } else if (cam_periph_error(done_ccb
, 0,
5880 SF_RETRY_UA
|SF_NO_PRINT
,
5881 &softc
->saved_ccb
) == ERESTART
) {
5883 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
5884 /* Don't wedge the queue */
5885 xpt_release_devq(done_ccb
->ccb_h
.path
,
5886 /*count*/1, /*run_queue*/TRUE
);
5888 xpt_release_ccb(done_ccb
);
5889 kfree(mode_hdr
, M_TEMP
);
5890 softc
->action
= PROBE_SERIAL_NUM
;
5891 xpt_schedule(periph
, priority
);
5894 case PROBE_SERIAL_NUM
:
5896 struct ccb_scsiio
*csio
;
5897 struct scsi_vpd_unit_serial_number
*serial_buf
;
5904 csio
= &done_ccb
->csio
;
5905 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
5907 (struct scsi_vpd_unit_serial_number
*)csio
->data_ptr
;
5909 /* Clean up from previous instance of this device */
5910 if (path
->device
->serial_num
!= NULL
) {
5911 kfree(path
->device
->serial_num
, M_CAMXPT
);
5912 path
->device
->serial_num
= NULL
;
5913 path
->device
->serial_num_len
= 0;
5916 if (serial_buf
== NULL
) {
5918 * Don't process the command as it was never sent
5920 } else if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
5921 && (serial_buf
->length
> 0)) {
5924 path
->device
->serial_num
=
5925 kmalloc((serial_buf
->length
+ 1),
5926 M_CAMXPT
, M_INTWAIT
);
5927 bcopy(serial_buf
->serial_num
,
5928 path
->device
->serial_num
,
5929 serial_buf
->length
);
5930 path
->device
->serial_num_len
= serial_buf
->length
;
5931 path
->device
->serial_num
[serial_buf
->length
] = '\0';
5932 } else if (cam_periph_error(done_ccb
, 0,
5933 SF_RETRY_UA
|SF_NO_PRINT
,
5934 &softc
->saved_ccb
) == ERESTART
) {
5936 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
5937 /* Don't wedge the queue */
5938 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
5943 * Let's see if we have seen this device before.
5945 if ((softc
->flags
& PROBE_INQUIRY_CKSUM
) != 0) {
5947 u_int8_t digest
[16];
5952 (unsigned char *)&path
->device
->inq_data
,
5953 sizeof(struct scsi_inquiry_data
));
5956 MD5Update(&context
, serial_buf
->serial_num
,
5957 serial_buf
->length
);
5959 MD5Final(digest
, &context
);
5960 if (bcmp(softc
->digest
, digest
, 16) == 0)
5964 * XXX Do we need to do a TUR in order to ensure
5965 * that the device really hasn't changed???
5968 && ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0))
5969 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
5971 if (serial_buf
!= NULL
)
5972 kfree(serial_buf
, M_TEMP
);
5976 * Now that we have all the necessary
5977 * information to safely perform transfer
5978 * negotiations... Controllers don't perform
5979 * any negotiation or tagged queuing until
5980 * after the first XPT_SET_TRAN_SETTINGS ccb is
5981 * received. So, on a new device, just retreive
5982 * the user settings, and set them as the current
5983 * settings to set the device up.
5985 proberequestdefaultnegotiation(periph
);
5986 xpt_release_ccb(done_ccb
);
5989 * Perform a TUR to allow the controller to
5990 * perform any necessary transfer negotiation.
5992 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
5993 xpt_schedule(periph
, priority
);
5996 xpt_release_ccb(done_ccb
);
5999 case PROBE_TUR_FOR_NEGOTIATION
:
6000 if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6001 /* Don't wedge the queue */
6002 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6006 path
->device
->flags
&= ~CAM_DEV_UNCONFIGURED
;
6007 xpt_reference_device(path
->device
);
6009 if ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0) {
6010 /* Inform the XPT that a new device has been found */
6011 done_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
6012 xpt_action(done_ccb
);
6014 xpt_async(AC_FOUND_DEVICE
, done_ccb
->ccb_h
.path
,
6017 xpt_release_ccb(done_ccb
);
6020 done_ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
6021 TAILQ_REMOVE(&softc
->request_ccbs
, &done_ccb
->ccb_h
, periph_links
.tqe
);
6022 done_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
6024 if (TAILQ_FIRST(&softc
->request_ccbs
) == NULL
) {
6025 cam_periph_invalidate(periph
);
6026 cam_periph_release(periph
);
6028 probeschedule(periph
);
6033 probecleanup(struct cam_periph
*periph
)
6035 kfree(periph
->softc
, M_TEMP
);
6039 xpt_find_quirk(struct cam_ed
*device
)
6043 match
= cam_quirkmatch((caddr_t
)&device
->inq_data
,
6044 (caddr_t
)xpt_quirk_table
,
6045 sizeof(xpt_quirk_table
)/sizeof(*xpt_quirk_table
),
6046 sizeof(*xpt_quirk_table
), scsi_inquiry_match
);
6049 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6051 device
->quirk
= (struct xpt_quirk_entry
*)match
;
6055 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
)
6060 error
= sysctl_handle_int(oidp
, &bool, sizeof(bool), req
);
6061 if (error
!= 0 || req
->newptr
== NULL
)
6063 if (bool == 0 || bool == 1) {
6071 #ifdef CAM_NEW_TRAN_CODE
6074 xpt_devise_transport(struct cam_path
*path
)
6076 struct ccb_pathinq cpi
;
6077 struct ccb_trans_settings cts
;
6078 struct scsi_inquiry_data
*inq_buf
;
6080 /* Get transport information from the SIM */
6081 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
6082 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6083 xpt_action((union ccb
*)&cpi
);
6086 if ((path
->device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0)
6087 inq_buf
= &path
->device
->inq_data
;
6088 path
->device
->protocol
= PROTO_SCSI
;
6089 path
->device
->protocol_version
=
6090 inq_buf
!= NULL
? SID_ANSI_REV(inq_buf
) : cpi
.protocol_version
;
6091 path
->device
->transport
= cpi
.transport
;
6092 path
->device
->transport_version
= cpi
.transport_version
;
6095 * Any device not using SPI3 features should
6096 * be considered SPI2 or lower.
6098 if (inq_buf
!= NULL
) {
6099 if (path
->device
->transport
== XPORT_SPI
6100 && (inq_buf
->spi3data
& SID_SPI_MASK
) == 0
6101 && path
->device
->transport_version
> 2)
6102 path
->device
->transport_version
= 2;
6104 struct cam_ed
* otherdev
;
6106 for (otherdev
= TAILQ_FIRST(&path
->target
->ed_entries
);
6108 otherdev
= TAILQ_NEXT(otherdev
, links
)) {
6109 if (otherdev
!= path
->device
)
6113 if (otherdev
!= NULL
) {
6115 * Initially assume the same versioning as
6116 * prior luns for this target.
6118 path
->device
->protocol_version
=
6119 otherdev
->protocol_version
;
6120 path
->device
->transport_version
=
6121 otherdev
->transport_version
;
6123 /* Until we know better, opt for safty */
6124 path
->device
->protocol_version
= 2;
6125 if (path
->device
->transport
== XPORT_SPI
)
6126 path
->device
->transport_version
= 2;
6128 path
->device
->transport_version
= 0;
6134 * For a device compliant with SPC-2 we should be able
6135 * to determine the transport version supported by
6136 * scrutinizing the version descriptors in the
6140 /* Tell the controller what we think */
6141 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
6142 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
6143 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
6144 cts
.transport
= path
->device
->transport
;
6145 cts
.transport_version
= path
->device
->transport_version
;
6146 cts
.protocol
= path
->device
->protocol
;
6147 cts
.protocol_version
= path
->device
->protocol_version
;
6148 cts
.proto_specific
.valid
= 0;
6149 cts
.xport_specific
.valid
= 0;
6150 xpt_action((union ccb
*)&cts
);
6154 xpt_set_transfer_settings(struct ccb_trans_settings
*cts
, struct cam_ed
*device
,
6157 struct ccb_pathinq cpi
;
6158 struct ccb_trans_settings cur_cts
;
6159 struct ccb_trans_settings_scsi
*scsi
;
6160 struct ccb_trans_settings_scsi
*cur_scsi
;
6161 struct cam_sim
*sim
;
6162 struct scsi_inquiry_data
*inq_data
;
6164 if (device
== NULL
) {
6165 cts
->ccb_h
.status
= CAM_PATH_INVALID
;
6166 xpt_done((union ccb
*)cts
);
6170 if (cts
->protocol
== PROTO_UNKNOWN
6171 || cts
->protocol
== PROTO_UNSPECIFIED
) {
6172 cts
->protocol
= device
->protocol
;
6173 cts
->protocol_version
= device
->protocol_version
;
6176 if (cts
->protocol_version
== PROTO_VERSION_UNKNOWN
6177 || cts
->protocol_version
== PROTO_VERSION_UNSPECIFIED
)
6178 cts
->protocol_version
= device
->protocol_version
;
6180 if (cts
->protocol
!= device
->protocol
) {
6181 xpt_print_path(cts
->ccb_h
.path
);
6182 printf("Uninitialized Protocol %x:%x?\n",
6183 cts
->protocol
, device
->protocol
);
6184 cts
->protocol
= device
->protocol
;
6187 if (cts
->protocol_version
> device
->protocol_version
) {
6189 xpt_print_path(cts
->ccb_h
.path
);
6190 printf("Down reving Protocol Version from %d to %d?\n",
6191 cts
->protocol_version
, device
->protocol_version
);
6193 cts
->protocol_version
= device
->protocol_version
;
6196 if (cts
->transport
== XPORT_UNKNOWN
6197 || cts
->transport
== XPORT_UNSPECIFIED
) {
6198 cts
->transport
= device
->transport
;
6199 cts
->transport_version
= device
->transport_version
;
6202 if (cts
->transport_version
== XPORT_VERSION_UNKNOWN
6203 || cts
->transport_version
== XPORT_VERSION_UNSPECIFIED
)
6204 cts
->transport_version
= device
->transport_version
;
6206 if (cts
->transport
!= device
->transport
) {
6207 xpt_print_path(cts
->ccb_h
.path
);
6208 printf("Uninitialized Transport %x:%x?\n",
6209 cts
->transport
, device
->transport
);
6210 cts
->transport
= device
->transport
;
6213 if (cts
->transport_version
> device
->transport_version
) {
6215 xpt_print_path(cts
->ccb_h
.path
);
6216 printf("Down reving Transport Version from %d to %d?\n",
6217 cts
->transport_version
,
6218 device
->transport_version
);
6220 cts
->transport_version
= device
->transport_version
;
6223 sim
= cts
->ccb_h
.path
->bus
->sim
;
6226 * Nothing more of interest to do unless
6227 * this is a device connected via the
6230 if (cts
->protocol
!= PROTO_SCSI
) {
6231 if (async_update
== FALSE
)
6232 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6236 inq_data
= &device
->inq_data
;
6237 scsi
= &cts
->proto_specific
.scsi
;
6238 xpt_setup_ccb(&cpi
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6239 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6240 xpt_action((union ccb
*)&cpi
);
6242 /* SCSI specific sanity checking */
6243 if ((cpi
.hba_inquiry
& PI_TAG_ABLE
) == 0
6244 || (INQ_DATA_TQ_ENABLED(inq_data
)) == 0
6245 || (device
->queue_flags
& SCP_QUEUE_DQUE
) != 0
6246 || (device
->quirk
->mintags
== 0)) {
6248 * Can't tag on hardware that doesn't support tags,
6249 * doesn't have it enabled, or has broken tag support.
6251 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6254 if (async_update
== FALSE
) {
6256 * Perform sanity checking against what the
6257 * controller and device can do.
6259 xpt_setup_ccb(&cur_cts
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6260 cur_cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
6261 cur_cts
.type
= cts
->type
;
6262 xpt_action((union ccb
*)&cur_cts
);
6263 if ((cur_cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
6266 cur_scsi
= &cur_cts
.proto_specific
.scsi
;
6267 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0) {
6268 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6269 scsi
->flags
|= cur_scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
;
6271 if ((cur_scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0)
6272 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6275 /* SPI specific sanity checking */
6276 if (cts
->transport
== XPORT_SPI
&& async_update
== FALSE
) {
6278 struct ccb_trans_settings_spi
*spi
;
6279 struct ccb_trans_settings_spi
*cur_spi
;
6281 spi
= &cts
->xport_specific
.spi
;
6283 cur_spi
= &cur_cts
.xport_specific
.spi
;
6285 /* Fill in any gaps in what the user gave us */
6286 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6287 spi
->sync_period
= cur_spi
->sync_period
;
6288 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6289 spi
->sync_period
= 0;
6290 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6291 spi
->sync_offset
= cur_spi
->sync_offset
;
6292 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6293 spi
->sync_offset
= 0;
6294 if ((spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6295 spi
->ppr_options
= cur_spi
->ppr_options
;
6296 if ((cur_spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6297 spi
->ppr_options
= 0;
6298 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6299 spi
->bus_width
= cur_spi
->bus_width
;
6300 if ((cur_spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6302 if ((spi
->valid
& CTS_SPI_VALID_DISC
) == 0) {
6303 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6304 spi
->flags
|= cur_spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
;
6306 if ((cur_spi
->valid
& CTS_SPI_VALID_DISC
) == 0)
6307 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6308 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6309 && (inq_data
->flags
& SID_Sync
) == 0
6310 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6311 || ((cpi
.hba_inquiry
& PI_SDTR_ABLE
) == 0)
6312 || (spi
->sync_offset
== 0)
6313 || (spi
->sync_period
== 0)) {
6315 spi
->sync_period
= 0;
6316 spi
->sync_offset
= 0;
6319 switch (spi
->bus_width
) {
6320 case MSG_EXT_WDTR_BUS_32_BIT
:
6321 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6322 || (inq_data
->flags
& SID_WBus32
) != 0
6323 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6324 && (cpi
.hba_inquiry
& PI_WIDE_32
) != 0)
6326 /* Fall Through to 16-bit */
6327 case MSG_EXT_WDTR_BUS_16_BIT
:
6328 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6329 || (inq_data
->flags
& SID_WBus16
) != 0
6330 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6331 && (cpi
.hba_inquiry
& PI_WIDE_16
) != 0) {
6332 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
6335 /* Fall Through to 8-bit */
6336 default: /* New bus width?? */
6337 case MSG_EXT_WDTR_BUS_8_BIT
:
6338 /* All targets can do this */
6339 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
6343 spi3caps
= cpi
.xport_specific
.spi
.ppr_options
;
6344 if ((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6345 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6346 spi3caps
&= inq_data
->spi3data
;
6348 if ((spi3caps
& SID_SPI_CLOCK_DT
) == 0)
6349 spi
->ppr_options
&= ~MSG_EXT_PPR_DT_REQ
;
6351 if ((spi3caps
& SID_SPI_IUS
) == 0)
6352 spi
->ppr_options
&= ~MSG_EXT_PPR_IU_REQ
;
6354 if ((spi3caps
& SID_SPI_QAS
) == 0)
6355 spi
->ppr_options
&= ~MSG_EXT_PPR_QAS_REQ
;
6357 /* No SPI Transfer settings are allowed unless we are wide */
6358 if (spi
->bus_width
== 0)
6359 spi
->ppr_options
= 0;
6361 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) == 0) {
6363 * Can't tag queue without disconnection.
6365 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6366 scsi
->valid
|= CTS_SCSI_VALID_TQ
;
6370 * If we are currently performing tagged transactions to
6371 * this device and want to change its negotiation parameters,
6372 * go non-tagged for a bit to give the controller a chance to
6373 * negotiate unhampered by tag messages.
6375 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6376 && (device
->inq_flags
& SID_CmdQue
) != 0
6377 && (scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6378 && (spi
->flags
& (CTS_SPI_VALID_SYNC_RATE
|
6379 CTS_SPI_VALID_SYNC_OFFSET
|
6380 CTS_SPI_VALID_BUS_WIDTH
)) != 0)
6381 xpt_toggle_tags(cts
->ccb_h
.path
);
6384 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6385 && (scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
6389 * If we are transitioning from tags to no-tags or
6390 * vice-versa, we need to carefully freeze and restart
6391 * the queue so that we don't overlap tagged and non-tagged
6392 * commands. We also temporarily stop tags if there is
6393 * a change in transfer negotiation settings to allow
6394 * "tag-less" negotiation.
6396 if ((device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6397 || (device
->inq_flags
& SID_CmdQue
) != 0)
6398 device_tagenb
= TRUE
;
6400 device_tagenb
= FALSE
;
6402 if (((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6403 && device_tagenb
== FALSE
)
6404 || ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) == 0
6405 && device_tagenb
== TRUE
)) {
6407 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0) {
6409 * Delay change to use tags until after a
6410 * few commands have gone to this device so
6411 * the controller has time to perform transfer
6412 * negotiations without tagged messages getting
6415 device
->tag_delay_count
= CAM_TAG_DELAY_COUNT
;
6416 device
->flags
|= CAM_DEV_TAG_AFTER_COUNT
;
6418 struct ccb_relsim crs
;
6420 xpt_freeze_devq(cts
->ccb_h
.path
, /*count*/1);
6421 device
->inq_flags
&= ~SID_CmdQue
;
6422 xpt_dev_ccbq_resize(cts
->ccb_h
.path
,
6423 sim
->max_dev_openings
);
6424 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6425 device
->tag_delay_count
= 0;
6427 xpt_setup_ccb(&crs
.ccb_h
, cts
->ccb_h
.path
,
6429 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6430 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6432 = crs
.release_timeout
6435 xpt_action((union ccb
*)&crs
);
6439 if (async_update
== FALSE
)
6440 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6443 #else /* CAM_NEW_TRAN_CODE */
6446 xpt_set_transfer_settings(struct ccb_trans_settings
*cts
, struct cam_ed
*device
,
6449 struct cam_sim
*sim
;
6452 sim
= cts
->ccb_h
.path
->bus
->sim
;
6453 if (async_update
== FALSE
) {
6454 struct scsi_inquiry_data
*inq_data
;
6455 struct ccb_pathinq cpi
;
6456 struct ccb_trans_settings cur_cts
;
6458 if (device
== NULL
) {
6459 cts
->ccb_h
.status
= CAM_PATH_INVALID
;
6460 xpt_done((union ccb
*)cts
);
6465 * Perform sanity checking against what the
6466 * controller and device can do.
6468 xpt_setup_ccb(&cpi
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6469 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6470 xpt_action((union ccb
*)&cpi
);
6471 xpt_setup_ccb(&cur_cts
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6472 cur_cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
6473 cur_cts
.flags
= CCB_TRANS_CURRENT_SETTINGS
;
6474 xpt_action((union ccb
*)&cur_cts
);
6475 inq_data
= &device
->inq_data
;
6477 /* Fill in any gaps in what the user gave us */
6478 if ((cts
->valid
& CCB_TRANS_SYNC_RATE_VALID
) == 0)
6479 cts
->sync_period
= cur_cts
.sync_period
;
6480 if ((cts
->valid
& CCB_TRANS_SYNC_OFFSET_VALID
) == 0)
6481 cts
->sync_offset
= cur_cts
.sync_offset
;
6482 if ((cts
->valid
& CCB_TRANS_BUS_WIDTH_VALID
) == 0)
6483 cts
->bus_width
= cur_cts
.bus_width
;
6484 if ((cts
->valid
& CCB_TRANS_DISC_VALID
) == 0) {
6485 cts
->flags
&= ~CCB_TRANS_DISC_ENB
;
6486 cts
->flags
|= cur_cts
.flags
& CCB_TRANS_DISC_ENB
;
6488 if ((cts
->valid
& CCB_TRANS_TQ_VALID
) == 0) {
6489 cts
->flags
&= ~CCB_TRANS_TAG_ENB
;
6490 cts
->flags
|= cur_cts
.flags
& CCB_TRANS_TAG_ENB
;
6493 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6494 && (inq_data
->flags
& SID_Sync
) == 0)
6495 || ((cpi
.hba_inquiry
& PI_SDTR_ABLE
) == 0)
6496 || (cts
->sync_offset
== 0)
6497 || (cts
->sync_period
== 0)) {
6499 cts
->sync_period
= 0;
6500 cts
->sync_offset
= 0;
6501 } else if ((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0) {
6503 if ((inq_data
->spi3data
& SID_SPI_CLOCK_DT
) == 0
6504 && cts
->sync_period
<= 0x9) {
6506 * Don't allow DT transmission rates if the
6507 * device does not support it.
6509 cts
->sync_period
= 0xa;
6511 if ((inq_data
->spi3data
& SID_SPI_IUS
) == 0
6512 && cts
->sync_period
<= 0x8) {
6514 * Don't allow PACE transmission rates
6515 * if the device does support packetized
6518 cts
->sync_period
= 0x9;
6522 switch (cts
->bus_width
) {
6523 case MSG_EXT_WDTR_BUS_32_BIT
:
6524 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6525 || (inq_data
->flags
& SID_WBus32
) != 0)
6526 && (cpi
.hba_inquiry
& PI_WIDE_32
) != 0)
6528 /* Fall Through to 16-bit */
6529 case MSG_EXT_WDTR_BUS_16_BIT
:
6530 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6531 || (inq_data
->flags
& SID_WBus16
) != 0)
6532 && (cpi
.hba_inquiry
& PI_WIDE_16
) != 0) {
6533 cts
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
6536 /* Fall Through to 8-bit */
6537 default: /* New bus width?? */
6538 case MSG_EXT_WDTR_BUS_8_BIT
:
6539 /* All targets can do this */
6540 cts
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
6544 if ((cts
->flags
& CCB_TRANS_DISC_ENB
) == 0) {
6546 * Can't tag queue without disconnection.
6548 cts
->flags
&= ~CCB_TRANS_TAG_ENB
;
6549 cts
->valid
|= CCB_TRANS_TQ_VALID
;
6552 if ((cpi
.hba_inquiry
& PI_TAG_ABLE
) == 0
6553 || (INQ_DATA_TQ_ENABLED(inq_data
)) == 0
6554 || (device
->queue_flags
& SCP_QUEUE_DQUE
) != 0
6555 || (device
->quirk
->mintags
== 0)) {
6557 * Can't tag on hardware that doesn't support,
6558 * doesn't have it enabled, or has broken tag support.
6560 cts
->flags
&= ~CCB_TRANS_TAG_ENB
;
6565 if ((cts
->valid
& CCB_TRANS_TQ_VALID
) != 0) {
6569 * If we are transitioning from tags to no-tags or
6570 * vice-versa, we need to carefully freeze and restart
6571 * the queue so that we don't overlap tagged and non-tagged
6572 * commands. We also temporarily stop tags if there is
6573 * a change in transfer negotiation settings to allow
6574 * "tag-less" negotiation.
6576 if ((device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6577 || (device
->inq_flags
& SID_CmdQue
) != 0)
6578 device_tagenb
= TRUE
;
6580 device_tagenb
= FALSE
;
6582 if (((cts
->flags
& CCB_TRANS_TAG_ENB
) != 0
6583 && device_tagenb
== FALSE
)
6584 || ((cts
->flags
& CCB_TRANS_TAG_ENB
) == 0
6585 && device_tagenb
== TRUE
)) {
6587 if ((cts
->flags
& CCB_TRANS_TAG_ENB
) != 0) {
6589 * Delay change to use tags until after a
6590 * few commands have gone to this device so
6591 * the controller has time to perform transfer
6592 * negotiations without tagged messages getting
6595 device
->tag_delay_count
= CAM_TAG_DELAY_COUNT
;
6596 device
->flags
|= CAM_DEV_TAG_AFTER_COUNT
;
6598 xpt_freeze_devq(cts
->ccb_h
.path
, /*count*/1);
6600 device
->inq_flags
&= ~SID_CmdQue
;
6601 xpt_dev_ccbq_resize(cts
->ccb_h
.path
,
6602 sim
->max_dev_openings
);
6603 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6604 device
->tag_delay_count
= 0;
6609 if (async_update
== FALSE
) {
6611 * If we are currently performing tagged transactions to
6612 * this device and want to change its negotiation parameters,
6613 * go non-tagged for a bit to give the controller a chance to
6614 * negotiate unhampered by tag messages.
6616 if ((device
->inq_flags
& SID_CmdQue
) != 0
6617 && (cts
->flags
& (CCB_TRANS_SYNC_RATE_VALID
|
6618 CCB_TRANS_SYNC_OFFSET_VALID
|
6619 CCB_TRANS_BUS_WIDTH_VALID
)) != 0)
6620 xpt_toggle_tags(cts
->ccb_h
.path
);
6622 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6626 struct ccb_relsim crs
;
6628 xpt_setup_ccb(&crs
.ccb_h
, cts
->ccb_h
.path
,
6630 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6631 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6633 = crs
.release_timeout
6636 xpt_action((union ccb
*)&crs
);
6641 #endif /* CAM_NEW_TRAN_CODE */
6644 xpt_toggle_tags(struct cam_path
*path
)
6649 * Give controllers a chance to renegotiate
6650 * before starting tag operations. We
6651 * "toggle" tagged queuing off then on
6652 * which causes the tag enable command delay
6653 * counter to come into effect.
6656 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6657 || ((dev
->inq_flags
& SID_CmdQue
) != 0
6658 && (dev
->inq_flags
& (SID_Sync
|SID_WBus16
|SID_WBus32
)) != 0)) {
6659 struct ccb_trans_settings cts
;
6661 xpt_setup_ccb(&cts
.ccb_h
, path
, 1);
6662 #ifdef CAM_NEW_TRAN_CODE
6663 cts
.protocol
= PROTO_SCSI
;
6664 cts
.protocol_version
= PROTO_VERSION_UNSPECIFIED
;
6665 cts
.transport
= XPORT_UNSPECIFIED
;
6666 cts
.transport_version
= XPORT_VERSION_UNSPECIFIED
;
6667 cts
.proto_specific
.scsi
.flags
= 0;
6668 cts
.proto_specific
.scsi
.valid
= CTS_SCSI_VALID_TQ
;
6669 #else /* CAM_NEW_TRAN_CODE */
6671 cts
.valid
= CCB_TRANS_TQ_VALID
;
6672 #endif /* CAM_NEW_TRAN_CODE */
6673 xpt_set_transfer_settings(&cts
, path
->device
,
6674 /*async_update*/TRUE
);
6675 #ifdef CAM_NEW_TRAN_CODE
6676 cts
.proto_specific
.scsi
.flags
= CTS_SCSI_FLAGS_TAG_ENB
;
6677 #else /* CAM_NEW_TRAN_CODE */
6678 cts
.flags
= CCB_TRANS_TAG_ENB
;
6679 #endif /* CAM_NEW_TRAN_CODE */
6680 xpt_set_transfer_settings(&cts
, path
->device
,
6681 /*async_update*/TRUE
);
6686 xpt_start_tags(struct cam_path
*path
)
6688 struct ccb_relsim crs
;
6689 struct cam_ed
*device
;
6690 struct cam_sim
*sim
;
6693 device
= path
->device
;
6694 sim
= path
->bus
->sim
;
6695 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6696 xpt_freeze_devq(path
, /*count*/1);
6697 device
->inq_flags
|= SID_CmdQue
;
6698 if (device
->tag_saved_openings
!= 0)
6699 newopenings
= device
->tag_saved_openings
;
6701 newopenings
= min(device
->quirk
->maxtags
,
6702 sim
->max_tagged_dev_openings
);
6703 xpt_dev_ccbq_resize(path
, newopenings
);
6704 xpt_setup_ccb(&crs
.ccb_h
, path
, /*priority*/1);
6705 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6706 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6708 = crs
.release_timeout
6711 xpt_action((union ccb
*)&crs
);
6714 static int busses_to_config
;
6715 static int busses_to_reset
;
6718 xptconfigbuscountfunc(struct cam_eb
*bus
, void *arg
)
6720 if (bus
->path_id
!= CAM_XPT_PATH_ID
) {
6721 struct cam_path path
;
6722 struct ccb_pathinq cpi
;
6726 xpt_compile_path(&path
, NULL
, bus
->path_id
,
6727 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
6728 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
6729 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6730 xpt_action((union ccb
*)&cpi
);
6731 can_negotiate
= cpi
.hba_inquiry
;
6732 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6733 if ((cpi
.hba_misc
& PIM_NOBUSRESET
) == 0
6736 xpt_release_path(&path
);
6743 xptconfigfunc(struct cam_eb
*bus
, void *arg
)
6745 struct cam_path
*path
;
6746 union ccb
*work_ccb
;
6748 if (bus
->path_id
!= CAM_XPT_PATH_ID
) {
6752 work_ccb
= xpt_alloc_ccb();
6753 if ((status
= xpt_create_path(&path
, xpt_periph
, bus
->path_id
,
6754 CAM_TARGET_WILDCARD
,
6755 CAM_LUN_WILDCARD
)) !=CAM_REQ_CMP
){
6756 kprintf("xptconfigfunc: xpt_create_path failed with "
6757 "status %#x for bus %d\n", status
, bus
->path_id
);
6758 kprintf("xptconfigfunc: halting bus configuration\n");
6759 xpt_free_ccb(work_ccb
);
6761 xpt_finishconfig(xpt_periph
, NULL
);
6764 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6765 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
6766 xpt_action(work_ccb
);
6767 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
6768 kprintf("xptconfigfunc: CPI failed on bus %d "
6769 "with status %d\n", bus
->path_id
,
6770 work_ccb
->ccb_h
.status
);
6771 xpt_finishconfig(xpt_periph
, work_ccb
);
6775 can_negotiate
= work_ccb
->cpi
.hba_inquiry
;
6776 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6777 if ((work_ccb
->cpi
.hba_misc
& PIM_NOBUSRESET
) == 0
6778 && (can_negotiate
!= 0)) {
6779 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6780 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6781 work_ccb
->ccb_h
.cbfcnp
= NULL
;
6782 CAM_DEBUG(path
, CAM_DEBUG_SUBTRACE
,
6783 ("Resetting Bus\n"));
6784 xpt_action(work_ccb
);
6785 xpt_finishconfig(xpt_periph
, work_ccb
);
6787 /* Act as though we performed a successful BUS RESET */
6788 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6789 xpt_finishconfig(xpt_periph
, work_ccb
);
6797 xpt_config(void *arg
)
6800 * Now that interrupts are enabled, go find our devices
6804 /* Setup debugging flags and path */
6805 #ifdef CAM_DEBUG_FLAGS
6806 cam_dflags
= CAM_DEBUG_FLAGS
;
6807 #else /* !CAM_DEBUG_FLAGS */
6808 cam_dflags
= CAM_DEBUG_NONE
;
6809 #endif /* CAM_DEBUG_FLAGS */
6810 #ifdef CAM_DEBUG_BUS
6811 if (cam_dflags
!= CAM_DEBUG_NONE
) {
6812 if (xpt_create_path(&cam_dpath
, xpt_periph
,
6813 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
,
6814 CAM_DEBUG_LUN
) != CAM_REQ_CMP
) {
6815 kprintf("xpt_config: xpt_create_path() failed for debug"
6816 " target %d:%d:%d, debugging disabled\n",
6817 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
, CAM_DEBUG_LUN
);
6818 cam_dflags
= CAM_DEBUG_NONE
;
6822 #else /* !CAM_DEBUG_BUS */
6824 #endif /* CAM_DEBUG_BUS */
6825 #endif /* CAMDEBUG */
6828 * Scan all installed busses.
6830 xpt_for_all_busses(xptconfigbuscountfunc
, NULL
);
6832 if (busses_to_config
== 0) {
6833 /* Call manually because we don't have any busses */
6834 xpt_finishconfig(xpt_periph
, NULL
);
6836 if (busses_to_reset
> 0 && scsi_delay
>= 2000) {
6837 kprintf("Waiting %d seconds for SCSI "
6838 "devices to settle\n", scsi_delay
/1000);
6840 xpt_for_all_busses(xptconfigfunc
, NULL
);
6845 * If the given device only has one peripheral attached to it, and if that
6846 * peripheral is the passthrough driver, announce it. This insures that the
6847 * user sees some sort of announcement for every peripheral in their system.
6850 xptpassannouncefunc(struct cam_ed
*device
, void *arg
)
6852 struct cam_periph
*periph
;
6855 for (periph
= SLIST_FIRST(&device
->periphs
), i
= 0; periph
!= NULL
;
6856 periph
= SLIST_NEXT(periph
, periph_links
), i
++);
6858 periph
= SLIST_FIRST(&device
->periphs
);
6860 && (strncmp(periph
->periph_name
, "pass", 4) == 0))
6861 xpt_announce_periph(periph
, NULL
);
6867 xpt_finishconfig(struct cam_periph
*periph
, union ccb
*done_ccb
)
6869 struct periph_driver
**p_drv
;
6872 if (done_ccb
!= NULL
) {
6873 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
6874 ("xpt_finishconfig\n"));
6875 switch(done_ccb
->ccb_h
.func_code
) {
6877 if (done_ccb
->ccb_h
.status
== CAM_REQ_CMP
) {
6878 done_ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
6879 done_ccb
->ccb_h
.cbfcnp
= xpt_finishconfig
;
6880 done_ccb
->crcn
.flags
= 0;
6881 xpt_action(done_ccb
);
6887 xpt_free_path(done_ccb
->ccb_h
.path
);
6893 if (busses_to_config
== 0) {
6894 /* Register all the peripheral drivers */
6895 /* XXX This will have to change when we have loadable modules */
6896 p_drv
= periph_drivers
;
6897 for (i
= 0; p_drv
[i
] != NULL
; i
++) {
6898 (*p_drv
[i
]->init
)();
6902 * Check for devices with no "standard" peripheral driver
6903 * attached. For any devices like that, announce the
6904 * passthrough driver so the user will see something.
6906 xpt_for_all_devices(xptpassannouncefunc
, NULL
);
6908 /* Release our hook so that the boot can continue. */
6909 config_intrhook_disestablish(xpt_config_hook
);
6910 kfree(xpt_config_hook
, M_TEMP
);
6911 xpt_config_hook
= NULL
;
6913 if (done_ccb
!= NULL
)
6914 xpt_free_ccb(done_ccb
);
6918 xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
)
6920 CAM_DEBUG(work_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xptaction\n"));
6922 switch (work_ccb
->ccb_h
.func_code
) {
6923 /* Common cases first */
6924 case XPT_PATH_INQ
: /* Path routing inquiry */
6926 struct ccb_pathinq
*cpi
;
6928 cpi
= &work_ccb
->cpi
;
6929 cpi
->version_num
= 1; /* XXX??? */
6930 cpi
->hba_inquiry
= 0;
6931 cpi
->target_sprt
= 0;
6933 cpi
->hba_eng_cnt
= 0;
6934 cpi
->max_target
= 0;
6936 cpi
->initiator_id
= 0;
6937 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
6938 strncpy(cpi
->hba_vid
, "", HBA_IDLEN
);
6939 strncpy(cpi
->dev_name
, sim
->sim_name
, DEV_IDLEN
);
6940 cpi
->unit_number
= sim
->unit_number
;
6941 cpi
->bus_id
= sim
->bus_id
;
6942 cpi
->base_transfer_speed
= 0;
6943 #ifdef CAM_NEW_TRAN_CODE
6944 cpi
->protocol
= PROTO_UNSPECIFIED
;
6945 cpi
->protocol_version
= PROTO_VERSION_UNSPECIFIED
;
6946 cpi
->transport
= XPORT_UNSPECIFIED
;
6947 cpi
->transport_version
= XPORT_VERSION_UNSPECIFIED
;
6948 #endif /* CAM_NEW_TRAN_CODE */
6949 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
6954 work_ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
6961 * The xpt as a "controller" has no interrupt sources, so polling
6965 xptpoll(struct cam_sim
*sim
)
6970 * Should only be called by the machine interrupt dispatch routines,
6971 * so put these prototypes here instead of in the header.
6975 swi_cambio(void *arg
, void *frame
)
6981 camisr(cam_isrq_t
*queue
)
6983 struct ccb_hdr
*ccb_h
;
6986 while ((ccb_h
= TAILQ_FIRST(queue
)) != NULL
) {
6989 TAILQ_REMOVE(queue
, ccb_h
, sim_links
.tqe
);
6990 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
6993 CAM_DEBUG(ccb_h
->path
, CAM_DEBUG_TRACE
,
6998 if (ccb_h
->flags
& CAM_HIGH_POWER
) {
6999 struct highpowerlist
*hphead
;
7000 struct cam_ed
*device
;
7001 union ccb
*send_ccb
;
7003 hphead
= &highpowerq
;
7005 send_ccb
= (union ccb
*)STAILQ_FIRST(hphead
);
7008 * Increment the count since this command is done.
7013 * Any high powered commands queued up?
7015 if (send_ccb
!= NULL
) {
7016 device
= send_ccb
->ccb_h
.path
->device
;
7018 STAILQ_REMOVE_HEAD(hphead
, xpt_links
.stqe
);
7020 xpt_release_devq(send_ccb
->ccb_h
.path
,
7021 /*count*/1, /*runqueue*/TRUE
);
7024 if ((ccb_h
->func_code
& XPT_FC_USER_CCB
) == 0) {
7027 dev
= ccb_h
->path
->device
;
7029 cam_ccbq_ccb_done(&dev
->ccbq
, (union ccb
*)ccb_h
);
7031 if (!SIM_DEAD(ccb_h
->path
->bus
->sim
)) {
7032 ccb_h
->path
->bus
->sim
->devq
->send_active
--;
7033 ccb_h
->path
->bus
->sim
->devq
->send_openings
++;
7036 if (((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0
7037 && (ccb_h
->status
&CAM_STATUS_MASK
) != CAM_REQUEUE_REQ
)
7038 || ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
7039 && (dev
->ccbq
.dev_active
== 0))) {
7041 xpt_release_devq(ccb_h
->path
, /*count*/1,
7045 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
7046 && (--dev
->tag_delay_count
== 0))
7047 xpt_start_tags(ccb_h
->path
);
7049 if ((dev
->ccbq
.queue
.entries
> 0)
7050 && (dev
->qfrozen_cnt
== 0)
7051 && (device_is_send_queued(dev
) == 0)) {
7052 runq
= xpt_schedule_dev_sendq(ccb_h
->path
->bus
,
7057 if (ccb_h
->status
& CAM_RELEASE_SIMQ
) {
7058 xpt_release_simq(ccb_h
->path
->bus
->sim
,
7060 ccb_h
->status
&= ~CAM_RELEASE_SIMQ
;
7064 if ((ccb_h
->flags
& CAM_DEV_QFRZDIS
)
7065 && (ccb_h
->status
& CAM_DEV_QFRZN
)) {
7066 xpt_release_devq(ccb_h
->path
, /*count*/1,
7068 ccb_h
->status
&= ~CAM_DEV_QFRZN
;
7070 xpt_run_dev_sendq(ccb_h
->path
->bus
);
7073 /* Call the peripheral driver's callback */
7074 (*ccb_h
->cbfcnp
)(ccb_h
->path
->periph
, (union ccb
*)ccb_h
);
7080 dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
)
7083 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
7088 dead_sim_poll(struct cam_sim
*sim
)