2 * Implementation of the Common Access Method Transport (XPT) layer.
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.67 2008/07/18 00:07:21 dillon Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
45 #include <sys/taskqueue.h>
47 #include <sys/thread.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
53 #include <machine/clock.h>
54 #include <machine/stdarg.h>
58 #include "cam_periph.h"
61 #include "cam_xpt_sim.h"
62 #include "cam_xpt_periph.h"
63 #include "cam_debug.h"
65 #include "scsi/scsi_all.h"
66 #include "scsi/scsi_message.h"
67 #include "scsi/scsi_pass.h"
68 #include <sys/kthread.h>
71 /* Datastructures internal to the xpt layer */
72 MALLOC_DEFINE(M_CAMXPT
, "CAM XPT", "CAM XPT buffers");
74 /* Object for defering XPT actions to a taskqueue */
82 * Definition of an async handler callback block. These are used to add
83 * SIMs and peripherals to the async callback lists.
86 SLIST_ENTRY(async_node
) links
;
87 u_int32_t event_enable
; /* Async Event enables */
88 void (*callback
)(void *arg
, u_int32_t code
,
89 struct cam_path
*path
, void *args
);
93 SLIST_HEAD(async_list
, async_node
);
94 SLIST_HEAD(periph_list
, cam_periph
);
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
100 #ifndef CAM_MAX_HIGHPOWER
101 #define CAM_MAX_HIGHPOWER 4
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
109 struct cam_ed_qinfo
{
111 struct cam_ed
*device
;
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system. The table contains a
117 * cam_ed structure for each device on the bus.
120 TAILQ_ENTRY(cam_ed
) links
;
121 struct cam_ed_qinfo alloc_ccb_entry
;
122 struct cam_ed_qinfo send_ccb_entry
;
123 struct cam_et
*target
;
127 * Queue of type drivers wanting to do
128 * work on this device.
130 struct cam_ccbq ccbq
; /* Queue of pending ccbs */
131 struct async_list asyncs
; /* Async callback info for this B/T/L */
132 struct periph_list periphs
; /* All attached devices */
133 u_int generation
; /* Generation number */
134 struct cam_periph
*owner
; /* Peripheral driver's ownership tag */
135 struct xpt_quirk_entry
*quirk
; /* Oddities about this device */
136 /* Storage for the inquiry data */
138 u_int protocol_version
;
140 u_int transport_version
;
141 struct scsi_inquiry_data inq_data
;
142 u_int8_t inq_flags
; /*
143 * Current settings for inquiry flags.
144 * This allows us to override settings
145 * like disconnection and tagged
146 * queuing for a device.
148 u_int8_t queue_flags
; /* Queue flags from the control page */
149 u_int8_t serial_num_len
;
150 u_int8_t
*serial_num
;
151 u_int32_t qfrozen_cnt
;
153 #define CAM_DEV_UNCONFIGURED 0x01
154 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
155 #define CAM_DEV_REL_ON_COMPLETE 0x04
156 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
157 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
158 #define CAM_DEV_TAG_AFTER_COUNT 0x20
159 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
160 #define CAM_DEV_IN_DV 0x80
161 #define CAM_DEV_DV_HIT_BOTTOM 0x100
162 u_int32_t tag_delay_count
;
163 #define CAM_TAG_DELAY_COUNT 5
164 u_int32_t tag_saved_openings
;
166 struct callout callout
;
170 * Each target is represented by an ET (Existing Target). These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
176 TAILQ_HEAD(, cam_ed
) ed_entries
;
177 TAILQ_ENTRY(cam_et
) links
;
179 target_id_t target_id
;
182 struct timeval last_reset
; /* uptime of last reset */
186 * Each bus is represented by an EB (Existing Bus). These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
191 TAILQ_HEAD(, cam_et
) et_entries
;
192 TAILQ_ENTRY(cam_eb
) links
;
195 struct timeval last_reset
; /* uptime of last reset */
197 #define CAM_EB_RUNQ_SCHEDULED 0x01
203 struct cam_periph
*periph
;
205 struct cam_et
*target
;
206 struct cam_ed
*device
;
209 struct xpt_quirk_entry
{
210 struct scsi_inquiry_pattern inq_pat
;
212 #define CAM_QUIRK_NOLUNS 0x01
213 #define CAM_QUIRK_NOSERIAL 0x02
214 #define CAM_QUIRK_HILUNS 0x04
215 #define CAM_QUIRK_NOHILUNS 0x08
220 static int cam_srch_hi
= 0;
221 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi
);
222 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
);
223 SYSCTL_PROC(_kern_cam
, OID_AUTO
, cam_srch_hi
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
224 sysctl_cam_search_luns
, "I",
225 "allow search above LUN 7 for SCSI3 and greater devices");
227 #define CAM_SCSI2_MAXLUN 8
229 * If we're not quirked to search <= the first 8 luns
230 * and we are either quirked to search above lun 8,
231 * or we're > SCSI-2 and we've enabled hilun searching,
232 * or we're > SCSI-2 and the last lun was a success,
233 * we can look for luns above lun 8.
235 #define CAN_SRCH_HI_SPARSE(dv) \
236 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
237 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
238 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
240 #define CAN_SRCH_HI_DENSE(dv) \
241 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
242 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
243 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
251 u_int32_t xpt_generation
;
253 /* number of high powered commands that can go through right now */
254 STAILQ_HEAD(highpowerlist
, ccb_hdr
) highpowerq
;
257 /* queue for handling async rescan requests. */
258 TAILQ_HEAD(, ccb_hdr
) ccb_scanq
;
260 /* Registered busses */
261 TAILQ_HEAD(,cam_eb
) xpt_busses
;
262 u_int bus_generation
;
264 struct intr_config_hook
*xpt_config_hook
;
266 struct lock xpt_topo_lock
;
267 struct lock xpt_lock
;
270 static const char quantum
[] = "QUANTUM";
271 static const char sony
[] = "SONY";
272 static const char west_digital
[] = "WDIGTL";
273 static const char samsung
[] = "SAMSUNG";
274 static const char seagate
[] = "SEAGATE";
275 static const char microp
[] = "MICROP";
277 static struct xpt_quirk_entry xpt_quirk_table
[] =
280 /* Reports QUEUE FULL for temporary resource shortages */
281 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP39100*", "*" },
282 /*quirks*/0, /*mintags*/24, /*maxtags*/32
285 /* Reports QUEUE FULL for temporary resource shortages */
286 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP34550*", "*" },
287 /*quirks*/0, /*mintags*/24, /*maxtags*/32
290 /* Reports QUEUE FULL for temporary resource shortages */
291 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP32275*", "*" },
292 /*quirks*/0, /*mintags*/24, /*maxtags*/32
295 /* Broken tagged queuing drive */
296 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "4421-07*", "*" },
297 /*quirks*/0, /*mintags*/0, /*maxtags*/0
300 /* Broken tagged queuing drive */
301 { T_DIRECT
, SIP_MEDIA_FIXED
, "HP", "C372*", "*" },
302 /*quirks*/0, /*mintags*/0, /*maxtags*/0
305 /* Broken tagged queuing drive */
306 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "3391*", "x43h" },
307 /*quirks*/0, /*mintags*/0, /*maxtags*/0
311 * Unfortunately, the Quantum Atlas III has the same
312 * problem as the Atlas II drives above.
313 * Reported by: "Johan Granlund" <johan@granlund.nu>
315 * For future reference, the drive with the problem was:
316 * QUANTUM QM39100TD-SW N1B0
318 * It's possible that Quantum will fix the problem in later
319 * firmware revisions. If that happens, the quirk entry
320 * will need to be made specific to the firmware revisions
324 /* Reports QUEUE FULL for temporary resource shortages */
325 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM39100*", "*" },
326 /*quirks*/0, /*mintags*/24, /*maxtags*/32
330 * 18 Gig Atlas III, same problem as the 9G version.
331 * Reported by: Andre Albsmeier
332 * <andre.albsmeier@mchp.siemens.de>
334 * For future reference, the drive with the problem was:
335 * QUANTUM QM318000TD-S N491
337 /* Reports QUEUE FULL for temporary resource shortages */
338 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM318000*", "*" },
339 /*quirks*/0, /*mintags*/24, /*maxtags*/32
343 * Broken tagged queuing drive
344 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
345 * and: Martin Renters <martin@tdc.on.ca>
347 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST410800*", "71*" },
348 /*quirks*/0, /*mintags*/0, /*maxtags*/0
351 * The Seagate Medalist Pro drives have very poor write
352 * performance with anything more than 2 tags.
354 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
355 * Drive: <SEAGATE ST36530N 1444>
357 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
358 * Drive: <SEAGATE ST34520W 1281>
360 * No one has actually reported that the 9G version
361 * (ST39140*) of the Medalist Pro has the same problem, but
362 * we're assuming that it does because the 4G and 6.5G
363 * versions of the drive are broken.
366 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST34520*", "*"},
367 /*quirks*/0, /*mintags*/2, /*maxtags*/2
370 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST36530*", "*"},
371 /*quirks*/0, /*mintags*/2, /*maxtags*/2
374 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST39140*", "*"},
375 /*quirks*/0, /*mintags*/2, /*maxtags*/2
379 * Slow when tagged queueing is enabled. Write performance
380 * steadily drops off with more and more concurrent
381 * transactions. Best sequential write performance with
382 * tagged queueing turned off and write caching turned on.
385 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
386 * Drive: DCAS-34330 w/ "S65A" firmware.
388 * The drive with the problem had the "S65A" firmware
389 * revision, and has also been reported (by Stephen J.
390 * Roznowski <sjr@home.net>) for a drive with the "S61A"
393 * Although no one has reported problems with the 2 gig
394 * version of the DCAS drive, the assumption is that it
395 * has the same problems as the 4 gig version. Therefore
396 * this quirk entries disables tagged queueing for all
399 { T_DIRECT
, SIP_MEDIA_FIXED
, "IBM", "DCAS*", "*" },
400 /*quirks*/0, /*mintags*/0, /*maxtags*/0
403 /* Broken tagged queuing drive */
404 { T_DIRECT
, SIP_MEDIA_REMOVABLE
, "iomega", "jaz*", "*" },
405 /*quirks*/0, /*mintags*/0, /*maxtags*/0
408 /* Broken tagged queuing drive */
409 { T_DIRECT
, SIP_MEDIA_FIXED
, "CONNER", "CFP2107*", "*" },
410 /*quirks*/0, /*mintags*/0, /*maxtags*/0
413 /* This does not support other than LUN 0 */
414 { T_DIRECT
, SIP_MEDIA_FIXED
, "VMware*", "*", "*" },
415 CAM_QUIRK_NOLUNS
, /*mintags*/2, /*maxtags*/255
419 * Broken tagged queuing drive.
421 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
424 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN34324U*", "*" },
425 /*quirks*/0, /*mintags*/0, /*maxtags*/0
429 * Slow when tagged queueing is enabled. (1.5MB/sec versus
431 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
432 * Best performance with these drives is achieved with
433 * tagged queueing turned off, and write caching turned on.
435 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "WDE*", "*" },
436 /*quirks*/0, /*mintags*/0, /*maxtags*/0
440 * Slow when tagged queueing is enabled. (1.5MB/sec versus
442 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
443 * Best performance with these drives is achieved with
444 * tagged queueing turned off, and write caching turned on.
446 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "ENTERPRISE", "*" },
447 /*quirks*/0, /*mintags*/0, /*maxtags*/0
451 * Doesn't handle queue full condition correctly,
452 * so we need to limit maxtags to what the device
453 * can handle instead of determining this automatically.
455 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN321010S*", "*" },
456 /*quirks*/0, /*mintags*/2, /*maxtags*/32
459 /* Really only one LUN */
460 { T_ENCLOSURE
, SIP_MEDIA_FIXED
, "SUN", "SENA", "*" },
461 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
464 /* I can't believe we need a quirk for DPT volumes. */
465 { T_ANY
, SIP_MEDIA_FIXED
|SIP_MEDIA_REMOVABLE
, "DPT", "*", "*" },
467 /*mintags*/0, /*maxtags*/255
471 * Many Sony CDROM drives don't like multi-LUN probing.
473 { T_CDROM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-ROM CDU*", "*" },
474 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
478 * This drive doesn't like multiple LUN probing.
479 * Submitted by: Parag Patel <parag@cgt.com>
481 { T_WORM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-R CDU9*", "*" },
482 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
485 { T_WORM
, SIP_MEDIA_REMOVABLE
, "YAMAHA", "CDR100*", "*" },
486 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
490 * The 8200 doesn't like multi-lun probing, and probably
491 * don't like serial number requests either.
494 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
497 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
501 * Let's try the same as above, but for a drive that says
502 * it's an IPL-6860 but is actually an EXB 8200.
505 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
508 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
512 * These Hitachi drives don't like multi-lun probing.
513 * The PR submitter has a DK319H, but says that the Linux
514 * kernel has a similar work-around for the DK312 and DK314,
515 * so all DK31* drives are quirked here.
517 * Submitted by: Paul Haddad <paul@pth.com>
519 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "DK31*", "*" },
520 CAM_QUIRK_NOLUNS
, /*mintags*/2, /*maxtags*/255
524 * The Hitachi CJ series with J8A8 firmware apparantly has
525 * problems with tagged commands.
527 * Reported by: amagai@nue.org
529 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "DK32CJ*", "J8A8" },
530 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
534 * These are the large storage arrays.
535 * Submitted by: William Carrel <william.carrel@infospace.com>
537 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "OPEN*", "*" },
538 CAM_QUIRK_HILUNS
, 2, 1024
542 * This old revision of the TDC3600 is also SCSI-1, and
543 * hangs upon serial number probing.
546 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "TANDBERG",
549 CAM_QUIRK_NOSERIAL
, /*mintags*/0, /*maxtags*/0
553 * Would repond to all LUNs if asked for.
556 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "CALIPER",
559 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
563 * Would repond to all LUNs if asked for.
566 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "KENNEDY",
569 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
572 /* Submitted by: Matthew Dodd <winter@jurai.net> */
573 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "Cabletrn", "EA41*", "*" },
574 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
577 /* Submitted by: Matthew Dodd <winter@jurai.net> */
578 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "CABLETRN", "EA41*", "*" },
579 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
582 /* TeraSolutions special settings for TRC-22 RAID */
583 { T_DIRECT
, SIP_MEDIA_FIXED
, "TERASOLU", "TRC-22", "*" },
584 /*quirks*/0, /*mintags*/55, /*maxtags*/255
587 /* Veritas Storage Appliance */
588 { T_DIRECT
, SIP_MEDIA_FIXED
, "VERITAS", "*", "*" },
589 CAM_QUIRK_HILUNS
, /*mintags*/2, /*maxtags*/1024
593 * Would respond to all LUNs. Device type and removable
594 * flag are jumper-selectable.
596 { T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
, "MaxOptix",
599 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
602 /* EasyRAID E5A aka. areca ARC-6010 */
603 { T_DIRECT
, SIP_MEDIA_FIXED
, "easyRAID", "*", "*" },
604 CAM_QUIRK_NOHILUNS
, /*mintags*/2, /*maxtags*/255
607 { T_ENCLOSURE
, SIP_MEDIA_FIXED
, "DP", "BACKPLANE", "*" },
608 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
611 /* Default tagged queuing parameters for all devices */
613 T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
,
614 /*vendor*/"*", /*product*/"*", /*revision*/"*"
616 /*quirks*/0, /*mintags*/2, /*maxtags*/255
620 static const int xpt_quirk_table_size
=
621 sizeof(xpt_quirk_table
) / sizeof(*xpt_quirk_table
);
625 DM_RET_FLAG_MASK
= 0x0f,
628 DM_RET_DESCEND
= 0x20,
630 DM_RET_ACTION_MASK
= 0xf0
638 } xpt_traverse_depth
;
640 struct xpt_traverse_config
{
641 xpt_traverse_depth depth
;
646 typedef int xpt_busfunc_t (struct cam_eb
*bus
, void *arg
);
647 typedef int xpt_targetfunc_t (struct cam_et
*target
, void *arg
);
648 typedef int xpt_devicefunc_t (struct cam_ed
*device
, void *arg
);
649 typedef int xpt_periphfunc_t (struct cam_periph
*periph
, void *arg
);
650 typedef int xpt_pdrvfunc_t (struct periph_driver
**pdrv
, void *arg
);
652 /* Transport layer configuration information */
653 static struct xpt_softc xsoftc
;
655 /* Queues for our software interrupt handler */
656 typedef TAILQ_HEAD(cam_isrq
, ccb_hdr
) cam_isrq_t
;
657 typedef TAILQ_HEAD(cam_simq
, cam_sim
) cam_simq_t
;
658 static cam_simq_t cam_simq
;
659 static struct spinlock cam_simq_spin
;
661 struct cam_periph
*xpt_periph
;
663 static periph_init_t xpt_periph_init
;
665 static periph_init_t probe_periph_init
;
667 static struct periph_driver xpt_driver
=
669 xpt_periph_init
, "xpt",
670 TAILQ_HEAD_INITIALIZER(xpt_driver
.units
)
673 static struct periph_driver probe_driver
=
675 probe_periph_init
, "probe",
676 TAILQ_HEAD_INITIALIZER(probe_driver
.units
)
679 PERIPHDRIVER_DECLARE(xpt
, xpt_driver
);
680 PERIPHDRIVER_DECLARE(probe
, probe_driver
);
682 #define XPT_CDEV_MAJOR 104
684 static d_open_t xptopen
;
685 static d_close_t xptclose
;
686 static d_ioctl_t xptioctl
;
688 static struct dev_ops xpt_ops
= {
689 { "xpt", XPT_CDEV_MAJOR
, 0 },
695 static void dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
);
696 static void dead_sim_poll(struct cam_sim
*sim
);
698 /* Dummy SIM that is used when the real one has gone. */
699 static struct cam_sim cam_dead_sim
;
700 static struct lock cam_dead_lock
;
702 /* Storage for debugging datastructures */
704 struct cam_path
*cam_dpath
;
705 u_int32_t cam_dflags
;
706 u_int32_t cam_debug_delay
;
709 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
710 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
714 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
715 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
716 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
718 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
719 || defined(CAM_DEBUG_LUN)
721 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
722 || !defined(CAM_DEBUG_LUN)
723 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
725 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
726 #else /* !CAMDEBUG */
727 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
728 #endif /* CAMDEBUG */
729 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
731 /* Our boot-time initialization hook */
732 static int cam_module_event_handler(module_t
, int /*modeventtype_t*/, void *);
734 static moduledata_t cam_moduledata
= {
736 cam_module_event_handler
,
740 static int xpt_init(void *);
742 DECLARE_MODULE(cam
, cam_moduledata
, SI_SUB_CONFIGURE
, SI_ORDER_SECOND
);
743 MODULE_VERSION(cam
, 1);
746 static cam_status
xpt_compile_path(struct cam_path
*new_path
,
747 struct cam_periph
*perph
,
749 target_id_t target_id
,
752 static void xpt_release_path(struct cam_path
*path
);
754 static void xpt_async_bcast(struct async_list
*async_head
,
755 u_int32_t async_code
,
756 struct cam_path
*path
,
758 static void xpt_dev_async(u_int32_t async_code
,
760 struct cam_et
*target
,
761 struct cam_ed
*device
,
763 static path_id_t
xptnextfreepathid(void);
764 static path_id_t
xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
);
765 static union ccb
*xpt_get_ccb(struct cam_ed
*device
);
766 static int xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*dev_pinfo
,
767 u_int32_t new_priority
);
768 static void xpt_run_dev_allocq(struct cam_eb
*bus
);
769 static void xpt_run_dev_sendq(struct cam_eb
*bus
);
770 static timeout_t xpt_release_devq_timeout
;
771 static void xpt_release_bus(struct cam_eb
*bus
);
772 static void xpt_release_devq_device(struct cam_ed
*dev
, u_int count
,
774 static struct cam_et
*
775 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
);
776 static void xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
);
777 static struct cam_ed
*
778 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
,
780 static void xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
781 struct cam_ed
*device
);
782 static u_int32_t
xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
);
783 static struct cam_eb
*
784 xpt_find_bus(path_id_t path_id
);
785 static struct cam_et
*
786 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
);
787 static struct cam_ed
*
788 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
);
789 static void xpt_scan_bus(struct cam_periph
*periph
, union ccb
*ccb
);
790 static void xpt_scan_lun(struct cam_periph
*periph
,
791 struct cam_path
*path
, cam_flags flags
,
793 static void xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
);
794 static xpt_busfunc_t xptconfigbuscountfunc
;
795 static xpt_busfunc_t xptconfigfunc
;
796 static void xpt_config(void *arg
);
797 static xpt_devicefunc_t xptpassannouncefunc
;
798 static void xpt_finishconfig(struct cam_periph
*periph
, union ccb
*ccb
);
799 static void xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
);
800 static void xptpoll(struct cam_sim
*sim
);
801 static inthand2_t swi_cambio
;
802 static void camisr(void *);
803 static void camisr_runqueue(struct cam_sim
*);
804 static dev_match_ret
xptbusmatch(struct dev_match_pattern
*patterns
,
805 u_int num_patterns
, struct cam_eb
*bus
);
806 static dev_match_ret
xptdevicematch(struct dev_match_pattern
*patterns
,
808 struct cam_ed
*device
);
809 static dev_match_ret
xptperiphmatch(struct dev_match_pattern
*patterns
,
811 struct cam_periph
*periph
);
812 static xpt_busfunc_t xptedtbusfunc
;
813 static xpt_targetfunc_t xptedttargetfunc
;
814 static xpt_devicefunc_t xptedtdevicefunc
;
815 static xpt_periphfunc_t xptedtperiphfunc
;
816 static xpt_pdrvfunc_t xptplistpdrvfunc
;
817 static xpt_periphfunc_t xptplistperiphfunc
;
818 static int xptedtmatch(struct ccb_dev_match
*cdm
);
819 static int xptperiphlistmatch(struct ccb_dev_match
*cdm
);
820 static int xptbustraverse(struct cam_eb
*start_bus
,
821 xpt_busfunc_t
*tr_func
, void *arg
);
822 static int xpttargettraverse(struct cam_eb
*bus
,
823 struct cam_et
*start_target
,
824 xpt_targetfunc_t
*tr_func
, void *arg
);
825 static int xptdevicetraverse(struct cam_et
*target
,
826 struct cam_ed
*start_device
,
827 xpt_devicefunc_t
*tr_func
, void *arg
);
828 static int xptperiphtraverse(struct cam_ed
*device
,
829 struct cam_periph
*start_periph
,
830 xpt_periphfunc_t
*tr_func
, void *arg
);
831 static int xptpdrvtraverse(struct periph_driver
**start_pdrv
,
832 xpt_pdrvfunc_t
*tr_func
, void *arg
);
833 static int xptpdperiphtraverse(struct periph_driver
**pdrv
,
834 struct cam_periph
*start_periph
,
835 xpt_periphfunc_t
*tr_func
,
837 static xpt_busfunc_t xptdefbusfunc
;
838 static xpt_targetfunc_t xptdeftargetfunc
;
839 static xpt_devicefunc_t xptdefdevicefunc
;
840 static xpt_periphfunc_t xptdefperiphfunc
;
841 static int xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
);
842 static int xpt_for_all_devices(xpt_devicefunc_t
*tr_func
,
844 static xpt_devicefunc_t xptsetasyncfunc
;
845 static xpt_busfunc_t xptsetasyncbusfunc
;
846 static cam_status
xptregister(struct cam_periph
*periph
,
848 static cam_status
proberegister(struct cam_periph
*periph
,
850 static void probeschedule(struct cam_periph
*probe_periph
);
851 static void probestart(struct cam_periph
*periph
, union ccb
*start_ccb
);
852 static void proberequestdefaultnegotiation(struct cam_periph
*periph
);
853 static int proberequestbackoff(struct cam_periph
*periph
,
854 struct cam_ed
*device
);
855 static void probedone(struct cam_periph
*periph
, union ccb
*done_ccb
);
856 static void probecleanup(struct cam_periph
*periph
);
857 static void xpt_find_quirk(struct cam_ed
*device
);
858 static void xpt_devise_transport(struct cam_path
*path
);
859 static void xpt_set_transfer_settings(struct ccb_trans_settings
*cts
,
860 struct cam_ed
*device
,
862 static void xpt_toggle_tags(struct cam_path
*path
);
863 static void xpt_start_tags(struct cam_path
*path
);
864 static __inline
int xpt_schedule_dev_allocq(struct cam_eb
*bus
,
866 static __inline
int xpt_schedule_dev_sendq(struct cam_eb
*bus
,
868 static __inline
int periph_is_queued(struct cam_periph
*periph
);
869 static __inline
int device_is_alloc_queued(struct cam_ed
*device
);
870 static __inline
int device_is_send_queued(struct cam_ed
*device
);
871 static __inline
int dev_allocq_is_runnable(struct cam_devq
*devq
);
874 xpt_schedule_dev_allocq(struct cam_eb
*bus
, struct cam_ed
*dev
)
878 if (bus
->sim
->devq
&& dev
->ccbq
.devq_openings
> 0) {
879 if ((dev
->flags
& CAM_DEV_RESIZE_QUEUE_NEEDED
) != 0) {
880 cam_ccbq_resize(&dev
->ccbq
,
881 dev
->ccbq
.dev_openings
882 + dev
->ccbq
.dev_active
);
883 dev
->flags
&= ~CAM_DEV_RESIZE_QUEUE_NEEDED
;
886 * The priority of a device waiting for CCB resources
887 * is that of the the highest priority peripheral driver
890 retval
= xpt_schedule_dev(&bus
->sim
->devq
->alloc_queue
,
891 &dev
->alloc_ccb_entry
.pinfo
,
892 CAMQ_GET_HEAD(&dev
->drvq
)->priority
);
901 xpt_schedule_dev_sendq(struct cam_eb
*bus
, struct cam_ed
*dev
)
905 if (bus
->sim
->devq
&& dev
->ccbq
.dev_openings
> 0) {
907 * The priority of a device waiting for controller
908 * resources is that of the the highest priority CCB
912 xpt_schedule_dev(&bus
->sim
->devq
->send_queue
,
913 &dev
->send_ccb_entry
.pinfo
,
914 CAMQ_GET_HEAD(&dev
->ccbq
.queue
)->priority
);
922 periph_is_queued(struct cam_periph
*periph
)
924 return (periph
->pinfo
.index
!= CAM_UNQUEUED_INDEX
);
928 device_is_alloc_queued(struct cam_ed
*device
)
930 return (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
934 device_is_send_queued(struct cam_ed
*device
)
936 return (device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
940 dev_allocq_is_runnable(struct cam_devq
*devq
)
944 * Have space to do more work.
945 * Allowed to do work.
947 return ((devq
->alloc_queue
.qfrozen_cnt
== 0)
948 && (devq
->alloc_queue
.entries
> 0)
949 && (devq
->alloc_openings
> 0));
953 xpt_periph_init(void)
955 dev_ops_add(&xpt_ops
, 0, 0);
956 make_dev(&xpt_ops
, 0, UID_ROOT
, GID_OPERATOR
, 0600, "xpt0");
960 probe_periph_init(void)
966 xptdone(struct cam_periph
*periph
, union ccb
*done_ccb
)
968 /* Caller will release the CCB */
969 wakeup(&done_ccb
->ccb_h
.cbfcnp
);
973 xptopen(struct dev_open_args
*ap
)
975 cdev_t dev
= ap
->a_head
.a_dev
;
978 * Only allow read-write access.
980 if (((ap
->a_oflags
& FWRITE
) == 0) || ((ap
->a_oflags
& FREAD
) == 0))
984 * We don't allow nonblocking access.
986 if ((ap
->a_oflags
& O_NONBLOCK
) != 0) {
987 kprintf("%s: can't do nonblocking access\n", devtoname(dev
));
991 /* Mark ourselves open */
992 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
993 xsoftc
.flags
|= XPT_FLAG_OPEN
;
994 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
1000 xptclose(struct dev_close_args
*ap
)
1003 /* Mark ourselves closed */
1004 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
1005 xsoftc
.flags
&= ~XPT_FLAG_OPEN
;
1006 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
1012 * Don't automatically grab the xpt softc lock here even though this is going
1013 * through the xpt device. The xpt device is really just a back door for
1014 * accessing other devices and SIMs, so the right thing to do is to grab
1015 * the appropriate SIM lock once the bus/SIM is located.
1018 xptioctl(struct dev_ioctl_args
*ap
)
1026 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1027 * to accept CCB types that don't quite make sense to send through a
1028 * passthrough driver.
1030 case CAMIOCOMMAND
: {
1035 inccb
= (union ccb
*)ap
->a_data
;
1037 bus
= xpt_find_bus(inccb
->ccb_h
.path_id
);
1043 switch(inccb
->ccb_h
.func_code
) {
1046 if ((inccb
->ccb_h
.target_id
!= CAM_TARGET_WILDCARD
)
1047 || (inccb
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
)) {
1056 ccb
= xpt_alloc_ccb();
1058 CAM_SIM_LOCK(bus
->sim
);
1061 * Create a path using the bus, target, and lun the
1064 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
,
1065 inccb
->ccb_h
.path_id
,
1066 inccb
->ccb_h
.target_id
,
1067 inccb
->ccb_h
.target_lun
) !=
1070 CAM_SIM_UNLOCK(bus
->sim
);
1074 /* Ensure all of our fields are correct */
1075 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
,
1076 inccb
->ccb_h
.pinfo
.priority
);
1077 xpt_merge_ccb(ccb
, inccb
);
1078 ccb
->ccb_h
.cbfcnp
= xptdone
;
1079 cam_periph_runccb(ccb
, NULL
, 0, 0, NULL
);
1080 bcopy(ccb
, inccb
, sizeof(union ccb
));
1081 xpt_free_path(ccb
->ccb_h
.path
);
1083 CAM_SIM_UNLOCK(bus
->sim
);
1090 * This is an immediate CCB, so it's okay to
1091 * allocate it on the stack.
1094 CAM_SIM_LOCK(bus
->sim
);
1097 * Create a path using the bus, target, and lun the
1100 if (xpt_create_path(&ccb
.ccb_h
.path
, xpt_periph
,
1101 inccb
->ccb_h
.path_id
,
1102 inccb
->ccb_h
.target_id
,
1103 inccb
->ccb_h
.target_lun
) !=
1106 CAM_SIM_UNLOCK(bus
->sim
);
1109 /* Ensure all of our fields are correct */
1110 xpt_setup_ccb(&ccb
.ccb_h
, ccb
.ccb_h
.path
,
1111 inccb
->ccb_h
.pinfo
.priority
);
1112 xpt_merge_ccb(&ccb
, inccb
);
1113 ccb
.ccb_h
.cbfcnp
= xptdone
;
1115 CAM_SIM_UNLOCK(bus
->sim
);
1116 bcopy(&ccb
, inccb
, sizeof(union ccb
));
1117 xpt_free_path(ccb
.ccb_h
.path
);
1121 case XPT_DEV_MATCH
: {
1122 struct cam_periph_map_info mapinfo
;
1123 struct cam_path
*old_path
;
1126 * We can't deal with physical addresses for this
1127 * type of transaction.
1129 if (inccb
->ccb_h
.flags
& CAM_DATA_PHYS
) {
1135 * Save this in case the caller had it set to
1136 * something in particular.
1138 old_path
= inccb
->ccb_h
.path
;
1141 * We really don't need a path for the matching
1142 * code. The path is needed because of the
1143 * debugging statements in xpt_action(). They
1144 * assume that the CCB has a valid path.
1146 inccb
->ccb_h
.path
= xpt_periph
->path
;
1148 bzero(&mapinfo
, sizeof(mapinfo
));
1151 * Map the pattern and match buffers into kernel
1152 * virtual address space.
1154 error
= cam_periph_mapmem(inccb
, &mapinfo
);
1157 inccb
->ccb_h
.path
= old_path
;
1162 * This is an immediate CCB, we can send it on directly.
1167 * Map the buffers back into user space.
1169 cam_periph_unmapmem(inccb
, &mapinfo
);
1171 inccb
->ccb_h
.path
= old_path
;
1180 xpt_release_bus(bus
);
1184 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1185 * with the periphal driver name and unit name filled in. The other
1186 * fields don't really matter as input. The passthrough driver name
1187 * ("pass"), and unit number are passed back in the ccb. The current
1188 * device generation number, and the index into the device peripheral
1189 * driver list, and the status are also passed back. Note that
1190 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1191 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1192 * (or rather should be) impossible for the device peripheral driver
1193 * list to change since we look at the whole thing in one pass, and
1194 * we do it with lock protection.
1197 case CAMGETPASSTHRU
: {
1199 struct cam_periph
*periph
;
1200 struct periph_driver
**p_drv
;
1203 u_int cur_generation
;
1204 int base_periph_found
;
1207 ccb
= (union ccb
*)ap
->a_data
;
1208 unit
= ccb
->cgdl
.unit_number
;
1209 name
= ccb
->cgdl
.periph_name
;
1211 * Every 100 devices, we want to drop our lock protection to
1212 * give the software interrupt handler a chance to run.
1213 * Most systems won't run into this check, but this should
1214 * avoid starvation in the software interrupt handler in
1219 ccb
= (union ccb
*)ap
->a_data
;
1221 base_periph_found
= 0;
1224 * Sanity check -- make sure we don't get a null peripheral
1227 if (*ccb
->cgdl
.periph_name
== '\0') {
1232 /* Keep the list from changing while we traverse it */
1233 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1235 cur_generation
= xsoftc
.xpt_generation
;
1237 /* first find our driver in the list of drivers */
1238 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
1239 if (strcmp((*p_drv
)->driver_name
, name
) == 0)
1243 if (*p_drv
== NULL
) {
1244 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1245 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1246 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1247 *ccb
->cgdl
.periph_name
= '\0';
1248 ccb
->cgdl
.unit_number
= 0;
1254 * Run through every peripheral instance of this driver
1255 * and check to see whether it matches the unit passed
1256 * in by the user. If it does, get out of the loops and
1257 * find the passthrough driver associated with that
1258 * peripheral driver.
1260 TAILQ_FOREACH(periph
, &(*p_drv
)->units
, unit_links
) {
1262 if (periph
->unit_number
== unit
) {
1264 } else if (--splbreaknum
== 0) {
1265 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1266 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1268 if (cur_generation
!= xsoftc
.xpt_generation
)
1273 * If we found the peripheral driver that the user passed
1274 * in, go through all of the peripheral drivers for that
1275 * particular device and look for a passthrough driver.
1277 if (periph
!= NULL
) {
1278 struct cam_ed
*device
;
1281 base_periph_found
= 1;
1282 device
= periph
->path
->device
;
1283 for (i
= 0, periph
= SLIST_FIRST(&device
->periphs
);
1285 periph
= SLIST_NEXT(periph
, periph_links
), i
++) {
1287 * Check to see whether we have a
1288 * passthrough device or not.
1290 if (strcmp(periph
->periph_name
, "pass") == 0) {
1292 * Fill in the getdevlist fields.
1294 strcpy(ccb
->cgdl
.periph_name
,
1295 periph
->periph_name
);
1296 ccb
->cgdl
.unit_number
=
1297 periph
->unit_number
;
1298 if (SLIST_NEXT(periph
, periph_links
))
1300 CAM_GDEVLIST_MORE_DEVS
;
1303 CAM_GDEVLIST_LAST_DEVICE
;
1304 ccb
->cgdl
.generation
=
1306 ccb
->cgdl
.index
= i
;
1308 * Fill in some CCB header fields
1309 * that the user may want.
1311 ccb
->ccb_h
.path_id
=
1312 periph
->path
->bus
->path_id
;
1313 ccb
->ccb_h
.target_id
=
1314 periph
->path
->target
->target_id
;
1315 ccb
->ccb_h
.target_lun
=
1316 periph
->path
->device
->lun_id
;
1317 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
1324 * If the periph is null here, one of two things has
1325 * happened. The first possibility is that we couldn't
1326 * find the unit number of the particular peripheral driver
1327 * that the user is asking about. e.g. the user asks for
1328 * the passthrough driver for "da11". We find the list of
1329 * "da" peripherals all right, but there is no unit 11.
1330 * The other possibility is that we went through the list
1331 * of peripheral drivers attached to the device structure,
1332 * but didn't find one with the name "pass". Either way,
1333 * we return ENOENT, since we couldn't find something.
1335 if (periph
== NULL
) {
1336 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1337 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1338 *ccb
->cgdl
.periph_name
= '\0';
1339 ccb
->cgdl
.unit_number
= 0;
1342 * It is unfortunate that this is even necessary,
1343 * but there are many, many clueless users out there.
1344 * If this is true, the user is looking for the
1345 * passthrough driver, but doesn't have one in his
1348 if (base_periph_found
== 1) {
1349 kprintf("xptioctl: pass driver is not in the "
1351 kprintf("xptioctl: put \"device pass\" in "
1352 "your kernel config file\n");
1355 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1367 cam_module_event_handler(module_t mod
, int what
, void *arg
)
1373 if ((error
= xpt_init(NULL
)) != 0)
1385 /* thread to handle bus rescans */
1387 xpt_scanner_thread(void *dummy
)
1391 struct cam_sim
*sim
;
1395 * Wait for a rescan request to come in. When it does, splice
1396 * it onto a queue from local storage so that the xpt lock
1397 * doesn't need to be held while the requests are being
1401 tsleep_interlock(&xsoftc
.ccb_scanq
);
1403 tsleep(&xsoftc
.ccb_scanq
, 0, "ccb_scanq", 0);
1407 TAILQ_CONCAT(&queue
, &xsoftc
.ccb_scanq
, sim_links
.tqe
);
1410 while ((ccb
= (union ccb
*)TAILQ_FIRST(&queue
)) != NULL
) {
1411 TAILQ_REMOVE(&queue
, &ccb
->ccb_h
, sim_links
.tqe
);
1413 sim
= ccb
->ccb_h
.path
->bus
->sim
;
1416 ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
1417 ccb
->ccb_h
.cbfcnp
= xptdone
;
1418 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
, 5);
1419 cam_periph_runccb(ccb
, NULL
, 0, 0, NULL
);
1420 xpt_free_path(ccb
->ccb_h
.path
);
1422 CAM_SIM_UNLOCK(sim
);
1428 xpt_rescan(union ccb
*ccb
)
1430 struct ccb_hdr
*hdr
;
1433 * Don't make duplicate entries for the same paths.
1436 TAILQ_FOREACH(hdr
, &xsoftc
.ccb_scanq
, sim_links
.tqe
) {
1437 if (xpt_path_comp(hdr
->path
, ccb
->ccb_h
.path
) == 0) {
1439 xpt_print(ccb
->ccb_h
.path
, "rescan already queued\n");
1440 xpt_free_path(ccb
->ccb_h
.path
);
1445 TAILQ_INSERT_TAIL(&xsoftc
.ccb_scanq
, &ccb
->ccb_h
, sim_links
.tqe
);
1446 wakeup(&xsoftc
.ccb_scanq
);
1451 /* Functions accessed by the peripheral drivers */
1453 xpt_init(void *dummy
)
1455 struct cam_sim
*xpt_sim
;
1456 struct cam_path
*path
;
1457 struct cam_devq
*devq
;
1460 TAILQ_INIT(&xsoftc
.xpt_busses
);
1461 TAILQ_INIT(&cam_simq
);
1462 TAILQ_INIT(&xsoftc
.ccb_scanq
);
1463 STAILQ_INIT(&xsoftc
.highpowerq
);
1464 xsoftc
.num_highpower
= CAM_MAX_HIGHPOWER
;
1466 spin_init(&cam_simq_spin
);
1467 lockinit(&xsoftc
.xpt_lock
, "XPT lock", 0, LK_CANRECURSE
);
1468 lockinit(&xsoftc
.xpt_topo_lock
, "XPT topology lock", 0, LK_CANRECURSE
);
1470 SLIST_INIT(&cam_dead_sim
.ccb_freeq
);
1471 TAILQ_INIT(&cam_dead_sim
.sim_doneq
);
1472 spin_init(&cam_dead_sim
.sim_spin
);
1473 cam_dead_sim
.sim_action
= dead_sim_action
;
1474 cam_dead_sim
.sim_poll
= dead_sim_poll
;
1475 cam_dead_sim
.sim_name
= "dead_sim";
1476 cam_dead_sim
.lock
= &cam_dead_lock
;
1477 lockinit(&cam_dead_lock
, "XPT dead_sim lock", 0, LK_CANRECURSE
);
1478 cam_dead_sim
.flags
|= CAM_SIM_DEREGISTERED
;
1481 * The xpt layer is, itself, the equivelent of a SIM.
1482 * Allow 16 ccbs in the ccb pool for it. This should
1483 * give decent parallelism when we probe busses and
1484 * perform other XPT functions.
1486 devq
= cam_simq_alloc(16);
1487 xpt_sim
= cam_sim_alloc(xptaction
,
1492 /*lock*/&xsoftc
.xpt_lock
,
1493 /*max_dev_transactions*/0,
1494 /*max_tagged_dev_transactions*/0,
1496 cam_simq_release(devq
);
1497 if (xpt_sim
== NULL
)
1500 xpt_sim
->max_ccbs
= 16;
1502 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
1503 if ((status
= xpt_bus_register(xpt_sim
, /*bus #*/0)) != CAM_SUCCESS
) {
1504 kprintf("xpt_init: xpt_bus_register failed with status %#x,"
1505 " failing attach\n", status
);
1510 * Looking at the XPT from the SIM layer, the XPT is
1511 * the equivelent of a peripheral driver. Allocate
1512 * a peripheral driver entry for us.
1514 if ((status
= xpt_create_path(&path
, NULL
, CAM_XPT_PATH_ID
,
1515 CAM_TARGET_WILDCARD
,
1516 CAM_LUN_WILDCARD
)) != CAM_REQ_CMP
) {
1517 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1518 " failing attach\n", status
);
1522 cam_periph_alloc(xptregister
, NULL
, NULL
, NULL
, "xpt", CAM_PERIPH_BIO
,
1523 path
, NULL
, 0, xpt_sim
);
1524 xpt_free_path(path
);
1526 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
1529 * Register a callback for when interrupts are enabled.
1531 xsoftc
.xpt_config_hook
= kmalloc(sizeof(struct intr_config_hook
),
1532 M_CAMXPT
, M_INTWAIT
| M_ZERO
);
1533 xsoftc
.xpt_config_hook
->ich_func
= xpt_config
;
1534 xsoftc
.xpt_config_hook
->ich_desc
= "xpt";
1535 xsoftc
.xpt_config_hook
->ich_order
= 1000;
1536 if (config_intrhook_establish(xsoftc
.xpt_config_hook
) != 0) {
1537 kfree (xsoftc
.xpt_config_hook
, M_CAMXPT
);
1538 kprintf("xpt_init: config_intrhook_establish failed "
1539 "- failing attach\n");
1542 /* fire up rescan thread */
1543 if (kthread_create(xpt_scanner_thread
, NULL
, NULL
, "xpt_thrd")) {
1544 kprintf("xpt_init: failed to create rescan thread\n");
1546 /* Install our software interrupt handlers */
1547 register_swi(SWI_CAMBIO
, swi_cambio
, NULL
, "swi_cambio", NULL
);
1553 xptregister(struct cam_periph
*periph
, void *arg
)
1555 struct cam_sim
*xpt_sim
;
1557 if (periph
== NULL
) {
1558 kprintf("xptregister: periph was NULL!!\n");
1559 return(CAM_REQ_CMP_ERR
);
1562 xpt_sim
= (struct cam_sim
*)arg
;
1563 xpt_sim
->softc
= periph
;
1564 xpt_periph
= periph
;
1565 periph
->softc
= NULL
;
1567 return(CAM_REQ_CMP
);
1571 xpt_add_periph(struct cam_periph
*periph
)
1573 struct cam_ed
*device
;
1575 struct periph_list
*periph_head
;
1577 sim_lock_assert_owned(periph
->sim
->lock
);
1579 device
= periph
->path
->device
;
1581 periph_head
= &device
->periphs
;
1583 status
= CAM_REQ_CMP
;
1585 if (device
!= NULL
) {
1587 * Make room for this peripheral
1588 * so it will fit in the queue
1589 * when it's scheduled to run
1591 status
= camq_resize(&device
->drvq
,
1592 device
->drvq
.array_size
+ 1);
1594 device
->generation
++;
1596 SLIST_INSERT_HEAD(periph_head
, periph
, periph_links
);
1599 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1600 xsoftc
.xpt_generation
++;
1601 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1607 xpt_remove_periph(struct cam_periph
*periph
)
1609 struct cam_ed
*device
;
1611 sim_lock_assert_owned(periph
->sim
->lock
);
1613 device
= periph
->path
->device
;
1615 if (device
!= NULL
) {
1616 struct periph_list
*periph_head
;
1618 periph_head
= &device
->periphs
;
1620 /* Release the slot for this peripheral */
1621 camq_resize(&device
->drvq
, device
->drvq
.array_size
- 1);
1623 device
->generation
++;
1625 SLIST_REMOVE(periph_head
, periph
, cam_periph
, periph_links
);
1628 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1629 xsoftc
.xpt_generation
++;
1630 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1634 xpt_announce_periph(struct cam_periph
*periph
, char *announce_string
)
1636 struct ccb_pathinq cpi
;
1637 struct ccb_trans_settings cts
;
1638 struct cam_path
*path
;
1643 sim_lock_assert_owned(periph
->sim
->lock
);
1645 path
= periph
->path
;
1647 * To ensure that this is printed in one piece,
1648 * mask out CAM interrupts.
1650 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1651 periph
->periph_name
, periph
->unit_number
,
1652 path
->bus
->sim
->sim_name
,
1653 path
->bus
->sim
->unit_number
,
1654 path
->bus
->sim
->bus_id
,
1655 path
->target
->target_id
,
1656 path
->device
->lun_id
);
1657 kprintf("%s%d: ", periph
->periph_name
, periph
->unit_number
);
1658 scsi_print_inquiry(&path
->device
->inq_data
);
1659 if (bootverbose
&& path
->device
->serial_num_len
> 0) {
1660 /* Don't wrap the screen - print only the first 60 chars */
1661 kprintf("%s%d: Serial Number %.60s\n", periph
->periph_name
,
1662 periph
->unit_number
, path
->device
->serial_num
);
1664 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
1665 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
1666 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
1667 xpt_action((union ccb
*)&cts
);
1668 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
1672 /* Ask the SIM for its base transfer speed */
1673 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
1674 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
1675 xpt_action((union ccb
*)&cpi
);
1677 speed
= cpi
.base_transfer_speed
;
1679 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1680 struct ccb_trans_settings_spi
*spi
;
1682 spi
= &cts
.xport_specific
.spi
;
1683 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) != 0
1684 && spi
->sync_offset
!= 0) {
1685 freq
= scsi_calc_syncsrate(spi
->sync_period
);
1689 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0)
1690 speed
*= (0x01 << spi
->bus_width
);
1692 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1693 struct ccb_trans_settings_fc
*fc
= &cts
.xport_specific
.fc
;
1694 if (fc
->valid
& CTS_FC_VALID_SPEED
) {
1695 speed
= fc
->bitrate
;
1699 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SAS
) {
1700 struct ccb_trans_settings_sas
*sas
= &cts
.xport_specific
.sas
;
1701 if (sas
->valid
& CTS_SAS_VALID_SPEED
) {
1702 speed
= sas
->bitrate
;
1708 kprintf("%s%d: %d.%03dMB/s transfers",
1709 periph
->periph_name
, periph
->unit_number
,
1712 kprintf("%s%d: %dKB/s transfers", periph
->periph_name
,
1713 periph
->unit_number
, speed
);
1714 /* Report additional information about SPI connections */
1715 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1716 struct ccb_trans_settings_spi
*spi
;
1718 spi
= &cts
.xport_specific
.spi
;
1720 kprintf(" (%d.%03dMHz%s, offset %d", freq
/ 1000,
1722 (spi
->ppr_options
& MSG_EXT_PPR_DT_REQ
) != 0
1726 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0
1727 && spi
->bus_width
> 0) {
1733 kprintf("%dbit)", 8 * (0x01 << spi
->bus_width
));
1734 } else if (freq
!= 0) {
1738 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1739 struct ccb_trans_settings_fc
*fc
;
1741 fc
= &cts
.xport_specific
.fc
;
1742 if (fc
->valid
& CTS_FC_VALID_WWNN
)
1743 kprintf(" WWNN 0x%llx", (long long) fc
->wwnn
);
1744 if (fc
->valid
& CTS_FC_VALID_WWPN
)
1745 kprintf(" WWPN 0x%llx", (long long) fc
->wwpn
);
1746 if (fc
->valid
& CTS_FC_VALID_PORT
)
1747 kprintf(" PortID 0x%x", fc
->port
);
1750 if (path
->device
->inq_flags
& SID_CmdQue
1751 || path
->device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) {
1752 kprintf("\n%s%d: Command Queueing Enabled",
1753 periph
->periph_name
, periph
->unit_number
);
1758 * We only want to print the caller's announce string if they've
1761 if (announce_string
!= NULL
)
1762 kprintf("%s%d: %s\n", periph
->periph_name
,
1763 periph
->unit_number
, announce_string
);
1766 static dev_match_ret
1767 xptbusmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1770 dev_match_ret retval
;
1773 retval
= DM_RET_NONE
;
1776 * If we aren't given something to match against, that's an error.
1779 return(DM_RET_ERROR
);
1782 * If there are no match entries, then this bus matches no
1785 if ((patterns
== NULL
) || (num_patterns
== 0))
1786 return(DM_RET_DESCEND
| DM_RET_COPY
);
1788 for (i
= 0; i
< num_patterns
; i
++) {
1789 struct bus_match_pattern
*cur_pattern
;
1792 * If the pattern in question isn't for a bus node, we
1793 * aren't interested. However, we do indicate to the
1794 * calling routine that we should continue descending the
1795 * tree, since the user wants to match against lower-level
1798 if (patterns
[i
].type
!= DEV_MATCH_BUS
) {
1799 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1800 retval
|= DM_RET_DESCEND
;
1804 cur_pattern
= &patterns
[i
].pattern
.bus_pattern
;
1807 * If they want to match any bus node, we give them any
1810 if (cur_pattern
->flags
== BUS_MATCH_ANY
) {
1811 /* set the copy flag */
1812 retval
|= DM_RET_COPY
;
1815 * If we've already decided on an action, go ahead
1818 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1823 * Not sure why someone would do this...
1825 if (cur_pattern
->flags
== BUS_MATCH_NONE
)
1828 if (((cur_pattern
->flags
& BUS_MATCH_PATH
) != 0)
1829 && (cur_pattern
->path_id
!= bus
->path_id
))
1832 if (((cur_pattern
->flags
& BUS_MATCH_BUS_ID
) != 0)
1833 && (cur_pattern
->bus_id
!= bus
->sim
->bus_id
))
1836 if (((cur_pattern
->flags
& BUS_MATCH_UNIT
) != 0)
1837 && (cur_pattern
->unit_number
!= bus
->sim
->unit_number
))
1840 if (((cur_pattern
->flags
& BUS_MATCH_NAME
) != 0)
1841 && (strncmp(cur_pattern
->dev_name
, bus
->sim
->sim_name
,
1846 * If we get to this point, the user definitely wants
1847 * information on this bus. So tell the caller to copy the
1850 retval
|= DM_RET_COPY
;
1853 * If the return action has been set to descend, then we
1854 * know that we've already seen a non-bus matching
1855 * expression, therefore we need to further descend the tree.
1856 * This won't change by continuing around the loop, so we
1857 * go ahead and return. If we haven't seen a non-bus
1858 * matching expression, we keep going around the loop until
1859 * we exhaust the matching expressions. We'll set the stop
1860 * flag once we fall out of the loop.
1862 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1867 * If the return action hasn't been set to descend yet, that means
1868 * we haven't seen anything other than bus matching patterns. So
1869 * tell the caller to stop descending the tree -- the user doesn't
1870 * want to match against lower level tree elements.
1872 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1873 retval
|= DM_RET_STOP
;
1878 static dev_match_ret
1879 xptdevicematch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1880 struct cam_ed
*device
)
1882 dev_match_ret retval
;
1885 retval
= DM_RET_NONE
;
1888 * If we aren't given something to match against, that's an error.
1891 return(DM_RET_ERROR
);
1894 * If there are no match entries, then this device matches no
1897 if ((patterns
== NULL
) || (num_patterns
== 0))
1898 return(DM_RET_DESCEND
| DM_RET_COPY
);
1900 for (i
= 0; i
< num_patterns
; i
++) {
1901 struct device_match_pattern
*cur_pattern
;
1904 * If the pattern in question isn't for a device node, we
1905 * aren't interested.
1907 if (patterns
[i
].type
!= DEV_MATCH_DEVICE
) {
1908 if ((patterns
[i
].type
== DEV_MATCH_PERIPH
)
1909 && ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
))
1910 retval
|= DM_RET_DESCEND
;
1914 cur_pattern
= &patterns
[i
].pattern
.device_pattern
;
1917 * If they want to match any device node, we give them any
1920 if (cur_pattern
->flags
== DEV_MATCH_ANY
) {
1921 /* set the copy flag */
1922 retval
|= DM_RET_COPY
;
1926 * If we've already decided on an action, go ahead
1929 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1934 * Not sure why someone would do this...
1936 if (cur_pattern
->flags
== DEV_MATCH_NONE
)
1939 if (((cur_pattern
->flags
& DEV_MATCH_PATH
) != 0)
1940 && (cur_pattern
->path_id
!= device
->target
->bus
->path_id
))
1943 if (((cur_pattern
->flags
& DEV_MATCH_TARGET
) != 0)
1944 && (cur_pattern
->target_id
!= device
->target
->target_id
))
1947 if (((cur_pattern
->flags
& DEV_MATCH_LUN
) != 0)
1948 && (cur_pattern
->target_lun
!= device
->lun_id
))
1951 if (((cur_pattern
->flags
& DEV_MATCH_INQUIRY
) != 0)
1952 && (cam_quirkmatch((caddr_t
)&device
->inq_data
,
1953 (caddr_t
)&cur_pattern
->inq_pat
,
1954 1, sizeof(cur_pattern
->inq_pat
),
1955 scsi_static_inquiry_match
) == NULL
))
1959 * If we get to this point, the user definitely wants
1960 * information on this device. So tell the caller to copy
1963 retval
|= DM_RET_COPY
;
1966 * If the return action has been set to descend, then we
1967 * know that we've already seen a peripheral matching
1968 * expression, therefore we need to further descend the tree.
1969 * This won't change by continuing around the loop, so we
1970 * go ahead and return. If we haven't seen a peripheral
1971 * matching expression, we keep going around the loop until
1972 * we exhaust the matching expressions. We'll set the stop
1973 * flag once we fall out of the loop.
1975 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1980 * If the return action hasn't been set to descend yet, that means
1981 * we haven't seen any peripheral matching patterns. So tell the
1982 * caller to stop descending the tree -- the user doesn't want to
1983 * match against lower level tree elements.
1985 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1986 retval
|= DM_RET_STOP
;
1992 * Match a single peripheral against any number of match patterns.
1994 static dev_match_ret
1995 xptperiphmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1996 struct cam_periph
*periph
)
1998 dev_match_ret retval
;
2002 * If we aren't given something to match against, that's an error.
2005 return(DM_RET_ERROR
);
2008 * If there are no match entries, then this peripheral matches no
2011 if ((patterns
== NULL
) || (num_patterns
== 0))
2012 return(DM_RET_STOP
| DM_RET_COPY
);
2015 * There aren't any nodes below a peripheral node, so there's no
2016 * reason to descend the tree any further.
2018 retval
= DM_RET_STOP
;
2020 for (i
= 0; i
< num_patterns
; i
++) {
2021 struct periph_match_pattern
*cur_pattern
;
2024 * If the pattern in question isn't for a peripheral, we
2025 * aren't interested.
2027 if (patterns
[i
].type
!= DEV_MATCH_PERIPH
)
2030 cur_pattern
= &patterns
[i
].pattern
.periph_pattern
;
2033 * If they want to match on anything, then we will do so.
2035 if (cur_pattern
->flags
== PERIPH_MATCH_ANY
) {
2036 /* set the copy flag */
2037 retval
|= DM_RET_COPY
;
2040 * We've already set the return action to stop,
2041 * since there are no nodes below peripherals in
2048 * Not sure why someone would do this...
2050 if (cur_pattern
->flags
== PERIPH_MATCH_NONE
)
2053 if (((cur_pattern
->flags
& PERIPH_MATCH_PATH
) != 0)
2054 && (cur_pattern
->path_id
!= periph
->path
->bus
->path_id
))
2058 * For the target and lun id's, we have to make sure the
2059 * target and lun pointers aren't NULL. The xpt peripheral
2060 * has a wildcard target and device.
2062 if (((cur_pattern
->flags
& PERIPH_MATCH_TARGET
) != 0)
2063 && ((periph
->path
->target
== NULL
)
2064 ||(cur_pattern
->target_id
!= periph
->path
->target
->target_id
)))
2067 if (((cur_pattern
->flags
& PERIPH_MATCH_LUN
) != 0)
2068 && ((periph
->path
->device
== NULL
)
2069 || (cur_pattern
->target_lun
!= periph
->path
->device
->lun_id
)))
2072 if (((cur_pattern
->flags
& PERIPH_MATCH_UNIT
) != 0)
2073 && (cur_pattern
->unit_number
!= periph
->unit_number
))
2076 if (((cur_pattern
->flags
& PERIPH_MATCH_NAME
) != 0)
2077 && (strncmp(cur_pattern
->periph_name
, periph
->periph_name
,
2082 * If we get to this point, the user definitely wants
2083 * information on this peripheral. So tell the caller to
2084 * copy the data out.
2086 retval
|= DM_RET_COPY
;
2089 * The return action has already been set to stop, since
2090 * peripherals don't have any nodes below them in the EDT.
2096 * If we get to this point, the peripheral that was passed in
2097 * doesn't match any of the patterns.
2103 xptedtbusfunc(struct cam_eb
*bus
, void *arg
)
2105 struct ccb_dev_match
*cdm
;
2106 dev_match_ret retval
;
2108 cdm
= (struct ccb_dev_match
*)arg
;
2111 * If our position is for something deeper in the tree, that means
2112 * that we've already seen this node. So, we keep going down.
2114 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2115 && (cdm
->pos
.cookie
.bus
== bus
)
2116 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2117 && (cdm
->pos
.cookie
.target
!= NULL
))
2118 retval
= DM_RET_DESCEND
;
2120 retval
= xptbusmatch(cdm
->patterns
, cdm
->num_patterns
, bus
);
2123 * If we got an error, bail out of the search.
2125 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2126 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2131 * If the copy flag is set, copy this bus out.
2133 if (retval
& DM_RET_COPY
) {
2136 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2137 sizeof(struct dev_match_result
));
2140 * If we don't have enough space to put in another
2141 * match result, save our position and tell the
2142 * user there are more devices to check.
2144 if (spaceleft
< sizeof(struct dev_match_result
)) {
2145 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2146 cdm
->pos
.position_type
=
2147 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
;
2149 cdm
->pos
.cookie
.bus
= bus
;
2150 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2151 xsoftc
.bus_generation
;
2152 cdm
->status
= CAM_DEV_MATCH_MORE
;
2155 j
= cdm
->num_matches
;
2157 cdm
->matches
[j
].type
= DEV_MATCH_BUS
;
2158 cdm
->matches
[j
].result
.bus_result
.path_id
= bus
->path_id
;
2159 cdm
->matches
[j
].result
.bus_result
.bus_id
= bus
->sim
->bus_id
;
2160 cdm
->matches
[j
].result
.bus_result
.unit_number
=
2161 bus
->sim
->unit_number
;
2162 strncpy(cdm
->matches
[j
].result
.bus_result
.dev_name
,
2163 bus
->sim
->sim_name
, DEV_IDLEN
);
2167 * If the user is only interested in busses, there's no
2168 * reason to descend to the next level in the tree.
2170 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2174 * If there is a target generation recorded, check it to
2175 * make sure the target list hasn't changed.
2177 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2178 && (bus
== cdm
->pos
.cookie
.bus
)
2179 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2180 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] != 0)
2181 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] !=
2183 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2187 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2188 && (cdm
->pos
.cookie
.bus
== bus
)
2189 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2190 && (cdm
->pos
.cookie
.target
!= NULL
))
2191 return(xpttargettraverse(bus
,
2192 (struct cam_et
*)cdm
->pos
.cookie
.target
,
2193 xptedttargetfunc
, arg
));
2195 return(xpttargettraverse(bus
, NULL
, xptedttargetfunc
, arg
));
2199 xptedttargetfunc(struct cam_et
*target
, void *arg
)
2201 struct ccb_dev_match
*cdm
;
2203 cdm
= (struct ccb_dev_match
*)arg
;
2206 * If there is a device list generation recorded, check it to
2207 * make sure the device list hasn't changed.
2209 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2210 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2211 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2212 && (cdm
->pos
.cookie
.target
== target
)
2213 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2214 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] != 0)
2215 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] !=
2216 target
->generation
)) {
2217 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2221 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2222 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2223 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2224 && (cdm
->pos
.cookie
.target
== target
)
2225 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2226 && (cdm
->pos
.cookie
.device
!= NULL
))
2227 return(xptdevicetraverse(target
,
2228 (struct cam_ed
*)cdm
->pos
.cookie
.device
,
2229 xptedtdevicefunc
, arg
));
2231 return(xptdevicetraverse(target
, NULL
, xptedtdevicefunc
, arg
));
2235 xptedtdevicefunc(struct cam_ed
*device
, void *arg
)
2238 struct ccb_dev_match
*cdm
;
2239 dev_match_ret retval
;
2241 cdm
= (struct ccb_dev_match
*)arg
;
2244 * If our position is for something deeper in the tree, that means
2245 * that we've already seen this node. So, we keep going down.
2247 if ((cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2248 && (cdm
->pos
.cookie
.device
== device
)
2249 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2250 && (cdm
->pos
.cookie
.periph
!= NULL
))
2251 retval
= DM_RET_DESCEND
;
2253 retval
= xptdevicematch(cdm
->patterns
, cdm
->num_patterns
,
2256 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2257 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2262 * If the copy flag is set, copy this device out.
2264 if (retval
& DM_RET_COPY
) {
2267 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2268 sizeof(struct dev_match_result
));
2271 * If we don't have enough space to put in another
2272 * match result, save our position and tell the
2273 * user there are more devices to check.
2275 if (spaceleft
< sizeof(struct dev_match_result
)) {
2276 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2277 cdm
->pos
.position_type
=
2278 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2279 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
;
2281 cdm
->pos
.cookie
.bus
= device
->target
->bus
;
2282 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2283 xsoftc
.bus_generation
;
2284 cdm
->pos
.cookie
.target
= device
->target
;
2285 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2286 device
->target
->bus
->generation
;
2287 cdm
->pos
.cookie
.device
= device
;
2288 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2289 device
->target
->generation
;
2290 cdm
->status
= CAM_DEV_MATCH_MORE
;
2293 j
= cdm
->num_matches
;
2295 cdm
->matches
[j
].type
= DEV_MATCH_DEVICE
;
2296 cdm
->matches
[j
].result
.device_result
.path_id
=
2297 device
->target
->bus
->path_id
;
2298 cdm
->matches
[j
].result
.device_result
.target_id
=
2299 device
->target
->target_id
;
2300 cdm
->matches
[j
].result
.device_result
.target_lun
=
2302 bcopy(&device
->inq_data
,
2303 &cdm
->matches
[j
].result
.device_result
.inq_data
,
2304 sizeof(struct scsi_inquiry_data
));
2306 /* Let the user know whether this device is unconfigured */
2307 if (device
->flags
& CAM_DEV_UNCONFIGURED
)
2308 cdm
->matches
[j
].result
.device_result
.flags
=
2309 DEV_RESULT_UNCONFIGURED
;
2311 cdm
->matches
[j
].result
.device_result
.flags
=
2316 * If the user isn't interested in peripherals, don't descend
2317 * the tree any further.
2319 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2323 * If there is a peripheral list generation recorded, make sure
2324 * it hasn't changed.
2326 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2327 && (device
->target
->bus
== cdm
->pos
.cookie
.bus
)
2328 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2329 && (device
->target
== cdm
->pos
.cookie
.target
)
2330 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2331 && (device
== cdm
->pos
.cookie
.device
)
2332 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2333 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2334 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2335 device
->generation
)){
2336 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2340 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2341 && (cdm
->pos
.cookie
.bus
== device
->target
->bus
)
2342 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2343 && (cdm
->pos
.cookie
.target
== device
->target
)
2344 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2345 && (cdm
->pos
.cookie
.device
== device
)
2346 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2347 && (cdm
->pos
.cookie
.periph
!= NULL
))
2348 return(xptperiphtraverse(device
,
2349 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2350 xptedtperiphfunc
, arg
));
2352 return(xptperiphtraverse(device
, NULL
, xptedtperiphfunc
, arg
));
2356 xptedtperiphfunc(struct cam_periph
*periph
, void *arg
)
2358 struct ccb_dev_match
*cdm
;
2359 dev_match_ret retval
;
2361 cdm
= (struct ccb_dev_match
*)arg
;
2363 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2365 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2366 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2371 * If the copy flag is set, copy this peripheral out.
2373 if (retval
& DM_RET_COPY
) {
2376 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2377 sizeof(struct dev_match_result
));
2380 * If we don't have enough space to put in another
2381 * match result, save our position and tell the
2382 * user there are more devices to check.
2384 if (spaceleft
< sizeof(struct dev_match_result
)) {
2385 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2386 cdm
->pos
.position_type
=
2387 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2388 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
|
2391 cdm
->pos
.cookie
.bus
= periph
->path
->bus
;
2392 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2393 xsoftc
.bus_generation
;
2394 cdm
->pos
.cookie
.target
= periph
->path
->target
;
2395 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2396 periph
->path
->bus
->generation
;
2397 cdm
->pos
.cookie
.device
= periph
->path
->device
;
2398 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2399 periph
->path
->target
->generation
;
2400 cdm
->pos
.cookie
.periph
= periph
;
2401 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2402 periph
->path
->device
->generation
;
2403 cdm
->status
= CAM_DEV_MATCH_MORE
;
2407 j
= cdm
->num_matches
;
2409 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2410 cdm
->matches
[j
].result
.periph_result
.path_id
=
2411 periph
->path
->bus
->path_id
;
2412 cdm
->matches
[j
].result
.periph_result
.target_id
=
2413 periph
->path
->target
->target_id
;
2414 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2415 periph
->path
->device
->lun_id
;
2416 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2417 periph
->unit_number
;
2418 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2419 periph
->periph_name
, DEV_IDLEN
);
2426 xptedtmatch(struct ccb_dev_match
*cdm
)
2430 cdm
->num_matches
= 0;
2433 * Check the bus list generation. If it has changed, the user
2434 * needs to reset everything and start over.
2436 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2437 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != 0)
2438 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != xsoftc
.bus_generation
)) {
2439 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2443 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2444 && (cdm
->pos
.cookie
.bus
!= NULL
))
2445 ret
= xptbustraverse((struct cam_eb
*)cdm
->pos
.cookie
.bus
,
2446 xptedtbusfunc
, cdm
);
2448 ret
= xptbustraverse(NULL
, xptedtbusfunc
, cdm
);
2451 * If we get back 0, that means that we had to stop before fully
2452 * traversing the EDT. It also means that one of the subroutines
2453 * has set the status field to the proper value. If we get back 1,
2454 * we've fully traversed the EDT and copied out any matching entries.
2457 cdm
->status
= CAM_DEV_MATCH_LAST
;
2463 xptplistpdrvfunc(struct periph_driver
**pdrv
, void *arg
)
2465 struct ccb_dev_match
*cdm
;
2467 cdm
= (struct ccb_dev_match
*)arg
;
2469 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2470 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2471 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2472 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2473 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2474 (*pdrv
)->generation
)) {
2475 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2479 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2480 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2481 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2482 && (cdm
->pos
.cookie
.periph
!= NULL
))
2483 return(xptpdperiphtraverse(pdrv
,
2484 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2485 xptplistperiphfunc
, arg
));
2487 return(xptpdperiphtraverse(pdrv
, NULL
,xptplistperiphfunc
, arg
));
2491 xptplistperiphfunc(struct cam_periph
*periph
, void *arg
)
2493 struct ccb_dev_match
*cdm
;
2494 dev_match_ret retval
;
2496 cdm
= (struct ccb_dev_match
*)arg
;
2498 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2500 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2501 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2506 * If the copy flag is set, copy this peripheral out.
2508 if (retval
& DM_RET_COPY
) {
2511 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2512 sizeof(struct dev_match_result
));
2515 * If we don't have enough space to put in another
2516 * match result, save our position and tell the
2517 * user there are more devices to check.
2519 if (spaceleft
< sizeof(struct dev_match_result
)) {
2520 struct periph_driver
**pdrv
;
2523 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2524 cdm
->pos
.position_type
=
2525 CAM_DEV_POS_PDRV
| CAM_DEV_POS_PDPTR
|
2529 * This may look a bit non-sensical, but it is
2530 * actually quite logical. There are very few
2531 * peripheral drivers, and bloating every peripheral
2532 * structure with a pointer back to its parent
2533 * peripheral driver linker set entry would cost
2534 * more in the long run than doing this quick lookup.
2536 for (pdrv
= periph_drivers
; *pdrv
!= NULL
; pdrv
++) {
2537 if (strcmp((*pdrv
)->driver_name
,
2538 periph
->periph_name
) == 0)
2542 if (*pdrv
== NULL
) {
2543 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2547 cdm
->pos
.cookie
.pdrv
= pdrv
;
2549 * The periph generation slot does double duty, as
2550 * does the periph pointer slot. They are used for
2551 * both edt and pdrv lookups and positioning.
2553 cdm
->pos
.cookie
.periph
= periph
;
2554 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2555 (*pdrv
)->generation
;
2556 cdm
->status
= CAM_DEV_MATCH_MORE
;
2560 j
= cdm
->num_matches
;
2562 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2563 cdm
->matches
[j
].result
.periph_result
.path_id
=
2564 periph
->path
->bus
->path_id
;
2567 * The transport layer peripheral doesn't have a target or
2570 if (periph
->path
->target
)
2571 cdm
->matches
[j
].result
.periph_result
.target_id
=
2572 periph
->path
->target
->target_id
;
2574 cdm
->matches
[j
].result
.periph_result
.target_id
= -1;
2576 if (periph
->path
->device
)
2577 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2578 periph
->path
->device
->lun_id
;
2580 cdm
->matches
[j
].result
.periph_result
.target_lun
= -1;
2582 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2583 periph
->unit_number
;
2584 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2585 periph
->periph_name
, DEV_IDLEN
);
2592 xptperiphlistmatch(struct ccb_dev_match
*cdm
)
2596 cdm
->num_matches
= 0;
2599 * At this point in the edt traversal function, we check the bus
2600 * list generation to make sure that no busses have been added or
2601 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2602 * For the peripheral driver list traversal function, however, we
2603 * don't have to worry about new peripheral driver types coming or
2604 * going; they're in a linker set, and therefore can't change
2605 * without a recompile.
2608 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2609 && (cdm
->pos
.cookie
.pdrv
!= NULL
))
2610 ret
= xptpdrvtraverse(
2611 (struct periph_driver
**)cdm
->pos
.cookie
.pdrv
,
2612 xptplistpdrvfunc
, cdm
);
2614 ret
= xptpdrvtraverse(NULL
, xptplistpdrvfunc
, cdm
);
2617 * If we get back 0, that means that we had to stop before fully
2618 * traversing the peripheral driver tree. It also means that one of
2619 * the subroutines has set the status field to the proper value. If
2620 * we get back 1, we've fully traversed the EDT and copied out any
2624 cdm
->status
= CAM_DEV_MATCH_LAST
;
2630 xptbustraverse(struct cam_eb
*start_bus
, xpt_busfunc_t
*tr_func
, void *arg
)
2632 struct cam_eb
*bus
, *next_bus
;
2637 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
2638 for (bus
= (start_bus
? start_bus
: TAILQ_FIRST(&xsoftc
.xpt_busses
));
2641 next_bus
= TAILQ_NEXT(bus
, links
);
2643 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
2644 CAM_SIM_LOCK(bus
->sim
);
2645 retval
= tr_func(bus
, arg
);
2646 CAM_SIM_UNLOCK(bus
->sim
);
2649 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
2651 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
2657 xpttargettraverse(struct cam_eb
*bus
, struct cam_et
*start_target
,
2658 xpt_targetfunc_t
*tr_func
, void *arg
)
2660 struct cam_et
*target
, *next_target
;
2664 for (target
= (start_target
? start_target
:
2665 TAILQ_FIRST(&bus
->et_entries
));
2666 target
!= NULL
; target
= next_target
) {
2668 next_target
= TAILQ_NEXT(target
, links
);
2670 retval
= tr_func(target
, arg
);
2680 xptdevicetraverse(struct cam_et
*target
, struct cam_ed
*start_device
,
2681 xpt_devicefunc_t
*tr_func
, void *arg
)
2683 struct cam_ed
*device
, *next_device
;
2687 for (device
= (start_device
? start_device
:
2688 TAILQ_FIRST(&target
->ed_entries
));
2690 device
= next_device
) {
2692 next_device
= TAILQ_NEXT(device
, links
);
2694 retval
= tr_func(device
, arg
);
2704 xptperiphtraverse(struct cam_ed
*device
, struct cam_periph
*start_periph
,
2705 xpt_periphfunc_t
*tr_func
, void *arg
)
2707 struct cam_periph
*periph
, *next_periph
;
2712 for (periph
= (start_periph
? start_periph
:
2713 SLIST_FIRST(&device
->periphs
));
2715 periph
= next_periph
) {
2717 next_periph
= SLIST_NEXT(periph
, periph_links
);
2719 retval
= tr_func(periph
, arg
);
2728 xptpdrvtraverse(struct periph_driver
**start_pdrv
,
2729 xpt_pdrvfunc_t
*tr_func
, void *arg
)
2731 struct periph_driver
**pdrv
;
2737 * We don't traverse the peripheral driver list like we do the
2738 * other lists, because it is a linker set, and therefore cannot be
2739 * changed during runtime. If the peripheral driver list is ever
2740 * re-done to be something other than a linker set (i.e. it can
2741 * change while the system is running), the list traversal should
2742 * be modified to work like the other traversal functions.
2744 for (pdrv
= (start_pdrv
? start_pdrv
: periph_drivers
);
2745 *pdrv
!= NULL
; pdrv
++) {
2746 retval
= tr_func(pdrv
, arg
);
2756 xptpdperiphtraverse(struct periph_driver
**pdrv
,
2757 struct cam_periph
*start_periph
,
2758 xpt_periphfunc_t
*tr_func
, void *arg
)
2760 struct cam_periph
*periph
, *next_periph
;
2765 for (periph
= (start_periph
? start_periph
:
2766 TAILQ_FIRST(&(*pdrv
)->units
)); periph
!= NULL
;
2767 periph
= next_periph
) {
2769 next_periph
= TAILQ_NEXT(periph
, unit_links
);
2771 retval
= tr_func(periph
, arg
);
2779 xptdefbusfunc(struct cam_eb
*bus
, void *arg
)
2781 struct xpt_traverse_config
*tr_config
;
2783 tr_config
= (struct xpt_traverse_config
*)arg
;
2785 if (tr_config
->depth
== XPT_DEPTH_BUS
) {
2786 xpt_busfunc_t
*tr_func
;
2788 tr_func
= (xpt_busfunc_t
*)tr_config
->tr_func
;
2790 return(tr_func(bus
, tr_config
->tr_arg
));
2792 return(xpttargettraverse(bus
, NULL
, xptdeftargetfunc
, arg
));
2796 xptdeftargetfunc(struct cam_et
*target
, void *arg
)
2798 struct xpt_traverse_config
*tr_config
;
2800 tr_config
= (struct xpt_traverse_config
*)arg
;
2802 if (tr_config
->depth
== XPT_DEPTH_TARGET
) {
2803 xpt_targetfunc_t
*tr_func
;
2805 tr_func
= (xpt_targetfunc_t
*)tr_config
->tr_func
;
2807 return(tr_func(target
, tr_config
->tr_arg
));
2809 return(xptdevicetraverse(target
, NULL
, xptdefdevicefunc
, arg
));
2813 xptdefdevicefunc(struct cam_ed
*device
, void *arg
)
2815 struct xpt_traverse_config
*tr_config
;
2817 tr_config
= (struct xpt_traverse_config
*)arg
;
2819 if (tr_config
->depth
== XPT_DEPTH_DEVICE
) {
2820 xpt_devicefunc_t
*tr_func
;
2822 tr_func
= (xpt_devicefunc_t
*)tr_config
->tr_func
;
2824 return(tr_func(device
, tr_config
->tr_arg
));
2826 return(xptperiphtraverse(device
, NULL
, xptdefperiphfunc
, arg
));
2830 xptdefperiphfunc(struct cam_periph
*periph
, void *arg
)
2832 struct xpt_traverse_config
*tr_config
;
2833 xpt_periphfunc_t
*tr_func
;
2835 tr_config
= (struct xpt_traverse_config
*)arg
;
2837 tr_func
= (xpt_periphfunc_t
*)tr_config
->tr_func
;
2840 * Unlike the other default functions, we don't check for depth
2841 * here. The peripheral driver level is the last level in the EDT,
2842 * so if we're here, we should execute the function in question.
2844 return(tr_func(periph
, tr_config
->tr_arg
));
2848 * Execute the given function for every bus in the EDT.
2851 xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
)
2853 struct xpt_traverse_config tr_config
;
2855 tr_config
.depth
= XPT_DEPTH_BUS
;
2856 tr_config
.tr_func
= tr_func
;
2857 tr_config
.tr_arg
= arg
;
2859 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2863 * Execute the given function for every device in the EDT.
2866 xpt_for_all_devices(xpt_devicefunc_t
*tr_func
, void *arg
)
2868 struct xpt_traverse_config tr_config
;
2870 tr_config
.depth
= XPT_DEPTH_DEVICE
;
2871 tr_config
.tr_func
= tr_func
;
2872 tr_config
.tr_arg
= arg
;
2874 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2878 xptsetasyncfunc(struct cam_ed
*device
, void *arg
)
2880 struct cam_path path
;
2881 struct ccb_getdev cgd
;
2882 struct async_node
*cur_entry
;
2884 cur_entry
= (struct async_node
*)arg
;
2887 * Don't report unconfigured devices (Wildcard devs,
2888 * devices only for target mode, device instances
2889 * that have been invalidated but are waiting for
2890 * their last reference count to be released).
2892 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) != 0)
2895 xpt_compile_path(&path
,
2897 device
->target
->bus
->path_id
,
2898 device
->target
->target_id
,
2900 xpt_setup_ccb(&cgd
.ccb_h
, &path
, /*priority*/1);
2901 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
2902 xpt_action((union ccb
*)&cgd
);
2903 cur_entry
->callback(cur_entry
->callback_arg
,
2906 xpt_release_path(&path
);
2912 xptsetasyncbusfunc(struct cam_eb
*bus
, void *arg
)
2914 struct cam_path path
;
2915 struct ccb_pathinq cpi
;
2916 struct async_node
*cur_entry
;
2918 cur_entry
= (struct async_node
*)arg
;
2920 xpt_compile_path(&path
, /*periph*/NULL
,
2922 CAM_TARGET_WILDCARD
,
2924 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
2925 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
2926 xpt_action((union ccb
*)&cpi
);
2927 cur_entry
->callback(cur_entry
->callback_arg
,
2930 xpt_release_path(&path
);
2936 xpt_action_sasync_cb(void *context
, int pending
)
2938 struct async_node
*cur_entry
;
2939 struct xpt_task
*task
;
2942 task
= (struct xpt_task
*)context
;
2943 cur_entry
= (struct async_node
*)task
->data1
;
2944 added
= task
->data2
;
2946 if ((added
& AC_FOUND_DEVICE
) != 0) {
2948 * Get this peripheral up to date with all
2949 * the currently existing devices.
2951 xpt_for_all_devices(xptsetasyncfunc
, cur_entry
);
2953 if ((added
& AC_PATH_REGISTERED
) != 0) {
2955 * Get this peripheral up to date with all
2956 * the currently existing busses.
2958 xpt_for_all_busses(xptsetasyncbusfunc
, cur_entry
);
2961 kfree(task
, M_CAMXPT
);
2965 xpt_action(union ccb
*start_ccb
)
2967 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_action\n"));
2969 start_ccb
->ccb_h
.status
= CAM_REQ_INPROG
;
2971 switch (start_ccb
->ccb_h
.func_code
) {
2974 struct cam_ed
*device
;
2976 char cdb_str
[(SCSI_MAX_CDBLEN
* 3) + 1];
2977 struct cam_path
*path
;
2979 path
= start_ccb
->ccb_h
.path
;
2983 * For the sake of compatibility with SCSI-1
2984 * devices that may not understand the identify
2985 * message, we include lun information in the
2986 * second byte of all commands. SCSI-1 specifies
2987 * that luns are a 3 bit value and reserves only 3
2988 * bits for lun information in the CDB. Later
2989 * revisions of the SCSI spec allow for more than 8
2990 * luns, but have deprecated lun information in the
2991 * CDB. So, if the lun won't fit, we must omit.
2993 * Also be aware that during initial probing for devices,
2994 * the inquiry information is unknown but initialized to 0.
2995 * This means that this code will be exercised while probing
2996 * devices with an ANSI revision greater than 2.
2998 device
= start_ccb
->ccb_h
.path
->device
;
2999 if (device
->protocol_version
<= SCSI_REV_2
3000 && start_ccb
->ccb_h
.target_lun
< 8
3001 && (start_ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) == 0) {
3003 start_ccb
->csio
.cdb_io
.cdb_bytes
[1] |=
3004 start_ccb
->ccb_h
.target_lun
<< 5;
3006 start_ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
3007 CAM_DEBUG(path
, CAM_DEBUG_CDB
,("%s. CDB: %s\n",
3008 scsi_op_desc(start_ccb
->csio
.cdb_io
.cdb_bytes
[0],
3009 &path
->device
->inq_data
),
3010 scsi_cdb_string(start_ccb
->csio
.cdb_io
.cdb_bytes
,
3011 cdb_str
, sizeof(cdb_str
))));
3015 case XPT_CONT_TARGET_IO
:
3016 start_ccb
->csio
.sense_resid
= 0;
3017 start_ccb
->csio
.resid
= 0;
3022 struct cam_path
*path
;
3023 struct cam_sim
*sim
;
3026 path
= start_ccb
->ccb_h
.path
;
3028 sim
= path
->bus
->sim
;
3029 if (sim
== &cam_dead_sim
) {
3030 /* The SIM has gone; just execute the CCB directly. */
3031 cam_ccbq_send_ccb(&path
->device
->ccbq
, start_ccb
);
3032 (*(sim
->sim_action
))(sim
, start_ccb
);
3036 cam_ccbq_insert_ccb(&path
->device
->ccbq
, start_ccb
);
3037 if (path
->device
->qfrozen_cnt
== 0)
3038 runq
= xpt_schedule_dev_sendq(path
->bus
, path
->device
);
3042 xpt_run_dev_sendq(path
->bus
);
3045 case XPT_SET_TRAN_SETTINGS
:
3047 xpt_set_transfer_settings(&start_ccb
->cts
,
3048 start_ccb
->ccb_h
.path
->device
,
3049 /*async_update*/FALSE
);
3052 case XPT_CALC_GEOMETRY
:
3054 struct cam_sim
*sim
;
3056 /* Filter out garbage */
3057 if (start_ccb
->ccg
.block_size
== 0
3058 || start_ccb
->ccg
.volume_size
== 0) {
3059 start_ccb
->ccg
.cylinders
= 0;
3060 start_ccb
->ccg
.heads
= 0;
3061 start_ccb
->ccg
.secs_per_track
= 0;
3062 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3065 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3066 (*(sim
->sim_action
))(sim
, start_ccb
);
3071 union ccb
* abort_ccb
;
3073 abort_ccb
= start_ccb
->cab
.abort_ccb
;
3074 if (XPT_FC_IS_DEV_QUEUED(abort_ccb
)) {
3076 if (abort_ccb
->ccb_h
.pinfo
.index
>= 0) {
3077 struct cam_ccbq
*ccbq
;
3079 ccbq
= &abort_ccb
->ccb_h
.path
->device
->ccbq
;
3080 cam_ccbq_remove_ccb(ccbq
, abort_ccb
);
3081 abort_ccb
->ccb_h
.status
=
3082 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3083 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3084 xpt_done(abort_ccb
);
3085 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3088 if (abort_ccb
->ccb_h
.pinfo
.index
== CAM_UNQUEUED_INDEX
3089 && (abort_ccb
->ccb_h
.status
& CAM_SIM_QUEUED
) == 0) {
3091 * We've caught this ccb en route to
3092 * the SIM. Flag it for abort and the
3093 * SIM will do so just before starting
3094 * real work on the CCB.
3096 abort_ccb
->ccb_h
.status
=
3097 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3098 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3099 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3103 if (XPT_FC_IS_QUEUED(abort_ccb
)
3104 && (abort_ccb
->ccb_h
.pinfo
.index
== CAM_DONEQ_INDEX
)) {
3106 * It's already completed but waiting
3107 * for our SWI to get to it.
3109 start_ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3113 * If we weren't able to take care of the abort request
3114 * in the XPT, pass the request down to the SIM for processing.
3118 case XPT_ACCEPT_TARGET_IO
:
3120 case XPT_IMMED_NOTIFY
:
3121 case XPT_NOTIFY_ACK
:
3122 case XPT_GET_TRAN_SETTINGS
:
3125 struct cam_sim
*sim
;
3127 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3128 (*(sim
->sim_action
))(sim
, start_ccb
);
3133 struct cam_sim
*sim
;
3135 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3136 (*(sim
->sim_action
))(sim
, start_ccb
);
3139 case XPT_PATH_STATS
:
3140 start_ccb
->cpis
.last_reset
=
3141 start_ccb
->ccb_h
.path
->bus
->last_reset
;
3142 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3148 dev
= start_ccb
->ccb_h
.path
->device
;
3149 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3150 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3152 struct ccb_getdev
*cgd
;
3156 cgd
= &start_ccb
->cgd
;
3157 bus
= cgd
->ccb_h
.path
->bus
;
3158 tar
= cgd
->ccb_h
.path
->target
;
3159 cgd
->inq_data
= dev
->inq_data
;
3160 cgd
->ccb_h
.status
= CAM_REQ_CMP
;
3161 cgd
->serial_num_len
= dev
->serial_num_len
;
3162 if ((dev
->serial_num_len
> 0)
3163 && (dev
->serial_num
!= NULL
))
3164 bcopy(dev
->serial_num
, cgd
->serial_num
,
3165 dev
->serial_num_len
);
3169 case XPT_GDEV_STATS
:
3173 dev
= start_ccb
->ccb_h
.path
->device
;
3174 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3175 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3177 struct ccb_getdevstats
*cgds
;
3181 cgds
= &start_ccb
->cgds
;
3182 bus
= cgds
->ccb_h
.path
->bus
;
3183 tar
= cgds
->ccb_h
.path
->target
;
3184 cgds
->dev_openings
= dev
->ccbq
.dev_openings
;
3185 cgds
->dev_active
= dev
->ccbq
.dev_active
;
3186 cgds
->devq_openings
= dev
->ccbq
.devq_openings
;
3187 cgds
->devq_queued
= dev
->ccbq
.queue
.entries
;
3188 cgds
->held
= dev
->ccbq
.held
;
3189 cgds
->last_reset
= tar
->last_reset
;
3190 cgds
->maxtags
= dev
->quirk
->maxtags
;
3191 cgds
->mintags
= dev
->quirk
->mintags
;
3192 if (timevalcmp(&tar
->last_reset
, &bus
->last_reset
, <))
3193 cgds
->last_reset
= bus
->last_reset
;
3194 cgds
->ccb_h
.status
= CAM_REQ_CMP
;
3200 struct cam_periph
*nperiph
;
3201 struct periph_list
*periph_head
;
3202 struct ccb_getdevlist
*cgdl
;
3204 struct cam_ed
*device
;
3211 * Don't want anyone mucking with our data.
3213 device
= start_ccb
->ccb_h
.path
->device
;
3214 periph_head
= &device
->periphs
;
3215 cgdl
= &start_ccb
->cgdl
;
3218 * Check and see if the list has changed since the user
3219 * last requested a list member. If so, tell them that the
3220 * list has changed, and therefore they need to start over
3221 * from the beginning.
3223 if ((cgdl
->index
!= 0) &&
3224 (cgdl
->generation
!= device
->generation
)) {
3225 cgdl
->status
= CAM_GDEVLIST_LIST_CHANGED
;
3230 * Traverse the list of peripherals and attempt to find
3231 * the requested peripheral.
3233 for (nperiph
= SLIST_FIRST(periph_head
), i
= 0;
3234 (nperiph
!= NULL
) && (i
<= cgdl
->index
);
3235 nperiph
= SLIST_NEXT(nperiph
, periph_links
), i
++) {
3236 if (i
== cgdl
->index
) {
3237 strncpy(cgdl
->periph_name
,
3238 nperiph
->periph_name
,
3240 cgdl
->unit_number
= nperiph
->unit_number
;
3245 cgdl
->status
= CAM_GDEVLIST_ERROR
;
3249 if (nperiph
== NULL
)
3250 cgdl
->status
= CAM_GDEVLIST_LAST_DEVICE
;
3252 cgdl
->status
= CAM_GDEVLIST_MORE_DEVS
;
3255 cgdl
->generation
= device
->generation
;
3257 cgdl
->ccb_h
.status
= CAM_REQ_CMP
;
3262 dev_pos_type position_type
;
3263 struct ccb_dev_match
*cdm
;
3266 cdm
= &start_ccb
->cdm
;
3269 * There are two ways of getting at information in the EDT.
3270 * The first way is via the primary EDT tree. It starts
3271 * with a list of busses, then a list of targets on a bus,
3272 * then devices/luns on a target, and then peripherals on a
3273 * device/lun. The "other" way is by the peripheral driver
3274 * lists. The peripheral driver lists are organized by
3275 * peripheral driver. (obviously) So it makes sense to
3276 * use the peripheral driver list if the user is looking
3277 * for something like "da1", or all "da" devices. If the
3278 * user is looking for something on a particular bus/target
3279 * or lun, it's generally better to go through the EDT tree.
3282 if (cdm
->pos
.position_type
!= CAM_DEV_POS_NONE
)
3283 position_type
= cdm
->pos
.position_type
;
3287 position_type
= CAM_DEV_POS_NONE
;
3289 for (i
= 0; i
< cdm
->num_patterns
; i
++) {
3290 if ((cdm
->patterns
[i
].type
== DEV_MATCH_BUS
)
3291 ||(cdm
->patterns
[i
].type
== DEV_MATCH_DEVICE
)){
3292 position_type
= CAM_DEV_POS_EDT
;
3297 if (cdm
->num_patterns
== 0)
3298 position_type
= CAM_DEV_POS_EDT
;
3299 else if (position_type
== CAM_DEV_POS_NONE
)
3300 position_type
= CAM_DEV_POS_PDRV
;
3303 switch(position_type
& CAM_DEV_POS_TYPEMASK
) {
3304 case CAM_DEV_POS_EDT
:
3305 ret
= xptedtmatch(cdm
);
3307 case CAM_DEV_POS_PDRV
:
3308 ret
= xptperiphlistmatch(cdm
);
3311 cdm
->status
= CAM_DEV_MATCH_ERROR
;
3315 if (cdm
->status
== CAM_DEV_MATCH_ERROR
)
3316 start_ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
3318 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3324 struct ccb_setasync
*csa
;
3325 struct async_node
*cur_entry
;
3326 struct async_list
*async_head
;
3329 csa
= &start_ccb
->csa
;
3330 added
= csa
->event_enable
;
3331 async_head
= &csa
->ccb_h
.path
->device
->asyncs
;
3334 * If there is already an entry for us, simply
3337 cur_entry
= SLIST_FIRST(async_head
);
3338 while (cur_entry
!= NULL
) {
3339 if ((cur_entry
->callback_arg
== csa
->callback_arg
)
3340 && (cur_entry
->callback
== csa
->callback
))
3342 cur_entry
= SLIST_NEXT(cur_entry
, links
);
3345 if (cur_entry
!= NULL
) {
3347 * If the request has no flags set,
3350 added
&= ~cur_entry
->event_enable
;
3351 if (csa
->event_enable
== 0) {
3352 SLIST_REMOVE(async_head
, cur_entry
,
3354 csa
->ccb_h
.path
->device
->refcount
--;
3355 kfree(cur_entry
, M_CAMXPT
);
3357 cur_entry
->event_enable
= csa
->event_enable
;
3360 cur_entry
= kmalloc(sizeof(*cur_entry
), M_CAMXPT
,
3362 cur_entry
->event_enable
= csa
->event_enable
;
3363 cur_entry
->callback_arg
= csa
->callback_arg
;
3364 cur_entry
->callback
= csa
->callback
;
3365 SLIST_INSERT_HEAD(async_head
, cur_entry
, links
);
3366 csa
->ccb_h
.path
->device
->refcount
++;
3370 * Need to decouple this operation via a taskqueue so that
3371 * the locking doesn't become a mess.
3373 if ((added
& (AC_FOUND_DEVICE
| AC_PATH_REGISTERED
)) != 0) {
3374 struct xpt_task
*task
;
3376 task
= kmalloc(sizeof(struct xpt_task
), M_CAMXPT
,
3379 TASK_INIT(&task
->task
, 0, xpt_action_sasync_cb
, task
);
3380 task
->data1
= cur_entry
;
3381 task
->data2
= added
;
3382 taskqueue_enqueue(taskqueue_thread
[mycpuid
],
3386 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3391 struct ccb_relsim
*crs
;
3394 crs
= &start_ccb
->crs
;
3395 dev
= crs
->ccb_h
.path
->device
;
3398 crs
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3402 if ((crs
->release_flags
& RELSIM_ADJUST_OPENINGS
) != 0) {
3404 if (INQ_DATA_TQ_ENABLED(&dev
->inq_data
)) {
3405 /* Don't ever go below one opening */
3406 if (crs
->openings
> 0) {
3407 xpt_dev_ccbq_resize(crs
->ccb_h
.path
,
3411 xpt_print(crs
->ccb_h
.path
,
3412 "tagged openings now %d\n",
3419 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_TIMEOUT
) != 0) {
3421 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
3424 * Just extend the old timeout and decrement
3425 * the freeze count so that a single timeout
3426 * is sufficient for releasing the queue.
3428 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3429 callout_stop(&dev
->callout
);
3432 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3435 callout_reset(&dev
->callout
,
3436 (crs
->release_timeout
* hz
) / 1000,
3437 xpt_release_devq_timeout
, dev
);
3439 dev
->flags
|= CAM_DEV_REL_TIMEOUT_PENDING
;
3443 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_CMDCMPLT
) != 0) {
3445 if ((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0) {
3447 * Decrement the freeze count so that a single
3448 * completion is still sufficient to unfreeze
3451 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3454 dev
->flags
|= CAM_DEV_REL_ON_COMPLETE
;
3455 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3459 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_QEMPTY
) != 0) {
3461 if ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
3462 || (dev
->ccbq
.dev_active
== 0)) {
3464 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3467 dev
->flags
|= CAM_DEV_REL_ON_QUEUE_EMPTY
;
3468 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3472 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) == 0) {
3474 xpt_release_devq(crs
->ccb_h
.path
, /*count*/1,
3477 start_ccb
->crs
.qfrozen_cnt
= dev
->qfrozen_cnt
;
3478 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3482 xpt_scan_bus(start_ccb
->ccb_h
.path
->periph
, start_ccb
);
3485 xpt_scan_lun(start_ccb
->ccb_h
.path
->periph
,
3486 start_ccb
->ccb_h
.path
, start_ccb
->crcn
.flags
,
3491 #ifdef CAM_DEBUG_DELAY
3492 cam_debug_delay
= CAM_DEBUG_DELAY
;
3494 cam_dflags
= start_ccb
->cdbg
.flags
;
3495 if (cam_dpath
!= NULL
) {
3496 xpt_free_path(cam_dpath
);
3500 if (cam_dflags
!= CAM_DEBUG_NONE
) {
3501 if (xpt_create_path(&cam_dpath
, xpt_periph
,
3502 start_ccb
->ccb_h
.path_id
,
3503 start_ccb
->ccb_h
.target_id
,
3504 start_ccb
->ccb_h
.target_lun
) !=
3506 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3507 cam_dflags
= CAM_DEBUG_NONE
;
3509 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3510 xpt_print(cam_dpath
, "debugging flags now %x\n",
3515 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3517 #else /* !CAMDEBUG */
3518 start_ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
3519 #endif /* CAMDEBUG */
3523 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0)
3524 xpt_freeze_devq(start_ccb
->ccb_h
.path
, 1);
3525 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3532 start_ccb
->ccb_h
.status
= CAM_PROVIDE_FAIL
;
3538 xpt_polled_action(union ccb
*start_ccb
)
3541 struct cam_sim
*sim
;
3542 struct cam_devq
*devq
;
3545 timeout
= start_ccb
->ccb_h
.timeout
;
3546 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3548 dev
= start_ccb
->ccb_h
.path
->device
;
3550 sim_lock_assert_owned(sim
->lock
);
3553 * Steal an opening so that no other queued requests
3554 * can get it before us while we simulate interrupts.
3556 dev
->ccbq
.devq_openings
--;
3557 dev
->ccbq
.dev_openings
--;
3559 while(((devq
&& devq
->send_openings
<= 0) || dev
->ccbq
.dev_openings
< 0)
3560 && (--timeout
> 0)) {
3562 (*(sim
->sim_poll
))(sim
);
3563 camisr_runqueue(sim
);
3566 dev
->ccbq
.devq_openings
++;
3567 dev
->ccbq
.dev_openings
++;
3570 xpt_action(start_ccb
);
3571 while(--timeout
> 0) {
3572 (*(sim
->sim_poll
))(sim
);
3573 camisr_runqueue(sim
);
3574 if ((start_ccb
->ccb_h
.status
& CAM_STATUS_MASK
)
3581 * XXX Is it worth adding a sim_timeout entry
3582 * point so we can attempt recovery? If
3583 * this is only used for dumps, I don't think
3586 start_ccb
->ccb_h
.status
= CAM_CMD_TIMEOUT
;
3589 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3594 * Schedule a peripheral driver to receive a ccb when it's
3595 * target device has space for more transactions.
3598 xpt_schedule(struct cam_periph
*perph
, u_int32_t new_priority
)
3600 struct cam_ed
*device
;
3601 union ccb
*work_ccb
;
3604 sim_lock_assert_owned(perph
->sim
->lock
);
3606 CAM_DEBUG(perph
->path
, CAM_DEBUG_TRACE
, ("xpt_schedule\n"));
3607 device
= perph
->path
->device
;
3608 if (periph_is_queued(perph
)) {
3609 /* Simply reorder based on new priority */
3610 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3611 (" change priority to %d\n", new_priority
));
3612 if (new_priority
< perph
->pinfo
.priority
) {
3613 camq_change_priority(&device
->drvq
,
3618 } else if (perph
->path
->bus
->sim
== &cam_dead_sim
) {
3619 /* The SIM is gone so just call periph_start directly. */
3620 work_ccb
= xpt_get_ccb(perph
->path
->device
);
3621 if (work_ccb
== NULL
)
3623 xpt_setup_ccb(&work_ccb
->ccb_h
, perph
->path
, new_priority
);
3624 perph
->pinfo
.priority
= new_priority
;
3625 perph
->periph_start(perph
, work_ccb
);
3628 /* New entry on the queue */
3629 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3630 (" added periph to queue\n"));
3631 perph
->pinfo
.priority
= new_priority
;
3632 perph
->pinfo
.generation
= ++device
->drvq
.generation
;
3633 camq_insert(&device
->drvq
, &perph
->pinfo
);
3634 runq
= xpt_schedule_dev_allocq(perph
->path
->bus
, device
);
3637 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3638 (" calling xpt_run_devq\n"));
3639 xpt_run_dev_allocq(perph
->path
->bus
);
3645 * Schedule a device to run on a given queue.
3646 * If the device was inserted as a new entry on the queue,
3647 * return 1 meaning the device queue should be run. If we
3648 * were already queued, implying someone else has already
3649 * started the queue, return 0 so the caller doesn't attempt
3653 xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*pinfo
,
3654 u_int32_t new_priority
)
3657 u_int32_t old_priority
;
3659 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_schedule_dev\n"));
3661 old_priority
= pinfo
->priority
;
3664 * Are we already queued?
3666 if (pinfo
->index
!= CAM_UNQUEUED_INDEX
) {
3667 /* Simply reorder based on new priority */
3668 if (new_priority
< old_priority
) {
3669 camq_change_priority(queue
, pinfo
->index
,
3671 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3672 ("changed priority to %d\n",
3677 /* New entry on the queue */
3678 if (new_priority
< old_priority
)
3679 pinfo
->priority
= new_priority
;
3681 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3682 ("Inserting onto queue\n"));
3683 pinfo
->generation
= ++queue
->generation
;
3684 camq_insert(queue
, pinfo
);
3691 xpt_run_dev_allocq(struct cam_eb
*bus
)
3693 struct cam_devq
*devq
;
3695 if ((devq
= bus
->sim
->devq
) == NULL
) {
3696 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq: NULL devq\n"));
3699 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq\n"));
3701 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3702 (" qfrozen_cnt == 0x%x, entries == %d, "
3703 "openings == %d, active == %d\n",
3704 devq
->alloc_queue
.qfrozen_cnt
,
3705 devq
->alloc_queue
.entries
,
3706 devq
->alloc_openings
,
3707 devq
->alloc_active
));
3709 devq
->alloc_queue
.qfrozen_cnt
++;
3710 while ((devq
->alloc_queue
.entries
> 0)
3711 && (devq
->alloc_openings
> 0)
3712 && (devq
->alloc_queue
.qfrozen_cnt
<= 1)) {
3713 struct cam_ed_qinfo
*qinfo
;
3714 struct cam_ed
*device
;
3715 union ccb
*work_ccb
;
3716 struct cam_periph
*drv
;
3719 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
3721 device
= qinfo
->device
;
3723 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3724 ("running device %p\n", device
));
3726 drvq
= &device
->drvq
;
3729 if (drvq
->entries
<= 0) {
3730 panic("xpt_run_dev_allocq: "
3731 "Device on queue without any work to do");
3734 if ((work_ccb
= xpt_get_ccb(device
)) != NULL
) {
3735 devq
->alloc_openings
--;
3736 devq
->alloc_active
++;
3737 drv
= (struct cam_periph
*)camq_remove(drvq
, CAMQ_HEAD
);
3738 xpt_setup_ccb(&work_ccb
->ccb_h
, drv
->path
,
3739 drv
->pinfo
.priority
);
3740 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3741 ("calling periph start\n"));
3742 drv
->periph_start(drv
, work_ccb
);
3745 * Malloc failure in alloc_ccb
3748 * XXX add us to a list to be run from free_ccb
3749 * if we don't have any ccbs active on this
3750 * device queue otherwise we may never get run
3756 if (drvq
->entries
> 0) {
3757 /* We have more work. Attempt to reschedule */
3758 xpt_schedule_dev_allocq(bus
, device
);
3761 devq
->alloc_queue
.qfrozen_cnt
--;
3765 xpt_run_dev_sendq(struct cam_eb
*bus
)
3767 struct cam_devq
*devq
;
3769 if ((devq
= bus
->sim
->devq
) == NULL
) {
3770 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq: NULL devq\n"));
3773 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq\n"));
3775 devq
->send_queue
.qfrozen_cnt
++;
3776 while ((devq
->send_queue
.entries
> 0)
3777 && (devq
->send_openings
> 0)) {
3778 struct cam_ed_qinfo
*qinfo
;
3779 struct cam_ed
*device
;
3780 union ccb
*work_ccb
;
3781 struct cam_sim
*sim
;
3783 if (devq
->send_queue
.qfrozen_cnt
> 1) {
3787 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
3789 device
= qinfo
->device
;
3792 * If the device has been "frozen", don't attempt
3795 if (device
->qfrozen_cnt
> 0) {
3799 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3800 ("running device %p\n", device
));
3802 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
3803 if (work_ccb
== NULL
) {
3804 kprintf("device on run queue with no ccbs???\n");
3808 if ((work_ccb
->ccb_h
.flags
& CAM_HIGH_POWER
) != 0) {
3810 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
3811 if (xsoftc
.num_highpower
<= 0) {
3813 * We got a high power command, but we
3814 * don't have any available slots. Freeze
3815 * the device queue until we have a slot
3818 device
->qfrozen_cnt
++;
3819 STAILQ_INSERT_TAIL(&xsoftc
.highpowerq
,
3823 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
3827 * Consume a high power slot while
3830 xsoftc
.num_highpower
--;
3832 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
3834 devq
->active_dev
= device
;
3835 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
3837 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
3839 devq
->send_openings
--;
3840 devq
->send_active
++;
3842 if (device
->ccbq
.queue
.entries
> 0)
3843 xpt_schedule_dev_sendq(bus
, device
);
3845 if (work_ccb
&& (work_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0){
3847 * The client wants to freeze the queue
3848 * after this CCB is sent.
3850 device
->qfrozen_cnt
++;
3853 /* In Target mode, the peripheral driver knows best... */
3854 if (work_ccb
->ccb_h
.func_code
== XPT_SCSI_IO
) {
3855 if ((device
->inq_flags
& SID_CmdQue
) != 0
3856 && work_ccb
->csio
.tag_action
!= CAM_TAG_ACTION_NONE
)
3857 work_ccb
->ccb_h
.flags
|= CAM_TAG_ACTION_VALID
;
3860 * Clear this in case of a retried CCB that
3861 * failed due to a rejected tag.
3863 work_ccb
->ccb_h
.flags
&= ~CAM_TAG_ACTION_VALID
;
3867 * Device queues can be shared among multiple sim instances
3868 * that reside on different busses. Use the SIM in the queue
3869 * CCB's path, rather than the one in the bus that was passed
3870 * into this function.
3872 sim
= work_ccb
->ccb_h
.path
->bus
->sim
;
3873 (*(sim
->sim_action
))(sim
, work_ccb
);
3875 devq
->active_dev
= NULL
;
3877 devq
->send_queue
.qfrozen_cnt
--;
3881 * This function merges stuff from the slave ccb into the master ccb, while
3882 * keeping important fields in the master ccb constant.
3885 xpt_merge_ccb(union ccb
*master_ccb
, union ccb
*slave_ccb
)
3888 * Pull fields that are valid for peripheral drivers to set
3889 * into the master CCB along with the CCB "payload".
3891 master_ccb
->ccb_h
.retry_count
= slave_ccb
->ccb_h
.retry_count
;
3892 master_ccb
->ccb_h
.func_code
= slave_ccb
->ccb_h
.func_code
;
3893 master_ccb
->ccb_h
.timeout
= slave_ccb
->ccb_h
.timeout
;
3894 master_ccb
->ccb_h
.flags
= slave_ccb
->ccb_h
.flags
;
3895 bcopy(&(&slave_ccb
->ccb_h
)[1], &(&master_ccb
->ccb_h
)[1],
3896 sizeof(union ccb
) - sizeof(struct ccb_hdr
));
3900 xpt_setup_ccb(struct ccb_hdr
*ccb_h
, struct cam_path
*path
, u_int32_t priority
)
3902 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_setup_ccb\n"));
3903 callout_init(&ccb_h
->timeout_ch
);
3904 ccb_h
->pinfo
.priority
= priority
;
3906 ccb_h
->path_id
= path
->bus
->path_id
;
3908 ccb_h
->target_id
= path
->target
->target_id
;
3910 ccb_h
->target_id
= CAM_TARGET_WILDCARD
;
3912 ccb_h
->target_lun
= path
->device
->lun_id
;
3913 ccb_h
->pinfo
.generation
= ++path
->device
->ccbq
.queue
.generation
;
3915 ccb_h
->target_lun
= CAM_TARGET_WILDCARD
;
3917 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
3921 /* Path manipulation functions */
3923 xpt_create_path(struct cam_path
**new_path_ptr
, struct cam_periph
*perph
,
3924 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3926 struct cam_path
*path
;
3929 path
= kmalloc(sizeof(*path
), M_CAMXPT
, M_INTWAIT
);
3930 status
= xpt_compile_path(path
, perph
, path_id
, target_id
, lun_id
);
3931 if (status
!= CAM_REQ_CMP
) {
3932 kfree(path
, M_CAMXPT
);
3935 *new_path_ptr
= path
;
3940 xpt_create_path_unlocked(struct cam_path
**new_path_ptr
,
3941 struct cam_periph
*periph
, path_id_t path_id
,
3942 target_id_t target_id
, lun_id_t lun_id
)
3944 struct cam_path
*path
;
3945 struct cam_eb
*bus
= NULL
;
3947 int need_unlock
= 0;
3949 path
= (struct cam_path
*)kmalloc(sizeof(*path
), M_CAMXPT
, M_WAITOK
);
3951 if (path_id
!= CAM_BUS_WILDCARD
) {
3952 bus
= xpt_find_bus(path_id
);
3955 CAM_SIM_LOCK(bus
->sim
);
3958 status
= xpt_compile_path(path
, periph
, path_id
, target_id
, lun_id
);
3960 CAM_SIM_UNLOCK(bus
->sim
);
3961 if (status
!= CAM_REQ_CMP
) {
3962 kfree(path
, M_CAMXPT
);
3965 *new_path_ptr
= path
;
3970 xpt_compile_path(struct cam_path
*new_path
, struct cam_periph
*perph
,
3971 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3974 struct cam_et
*target
;
3975 struct cam_ed
*device
;
3978 status
= CAM_REQ_CMP
; /* Completed without error */
3979 target
= NULL
; /* Wildcarded */
3980 device
= NULL
; /* Wildcarded */
3983 * We will potentially modify the EDT, so block interrupts
3984 * that may attempt to create cam paths.
3986 bus
= xpt_find_bus(path_id
);
3988 status
= CAM_PATH_INVALID
;
3990 target
= xpt_find_target(bus
, target_id
);
3991 if (target
== NULL
) {
3993 struct cam_et
*new_target
;
3995 new_target
= xpt_alloc_target(bus
, target_id
);
3996 if (new_target
== NULL
) {
3997 status
= CAM_RESRC_UNAVAIL
;
3999 target
= new_target
;
4002 if (target
!= NULL
) {
4003 device
= xpt_find_device(target
, lun_id
);
4004 if (device
== NULL
) {
4006 struct cam_ed
*new_device
;
4008 new_device
= xpt_alloc_device(bus
,
4011 if (new_device
== NULL
) {
4012 status
= CAM_RESRC_UNAVAIL
;
4014 device
= new_device
;
4021 * Only touch the user's data if we are successful.
4023 if (status
== CAM_REQ_CMP
) {
4024 new_path
->periph
= perph
;
4025 new_path
->bus
= bus
;
4026 new_path
->target
= target
;
4027 new_path
->device
= device
;
4028 CAM_DEBUG(new_path
, CAM_DEBUG_TRACE
, ("xpt_compile_path\n"));
4031 xpt_release_device(bus
, target
, device
);
4033 xpt_release_target(bus
, target
);
4035 xpt_release_bus(bus
);
4041 xpt_release_path(struct cam_path
*path
)
4043 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_release_path\n"));
4044 if (path
->device
!= NULL
) {
4045 xpt_release_device(path
->bus
, path
->target
, path
->device
);
4046 path
->device
= NULL
;
4048 if (path
->target
!= NULL
) {
4049 xpt_release_target(path
->bus
, path
->target
);
4050 path
->target
= NULL
;
4052 if (path
->bus
!= NULL
) {
4053 xpt_release_bus(path
->bus
);
4059 xpt_free_path(struct cam_path
*path
)
4061 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_free_path\n"));
4062 xpt_release_path(path
);
4063 kfree(path
, M_CAMXPT
);
4068 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4069 * in path1, 2 for match with wildcards in path2.
4072 xpt_path_comp(struct cam_path
*path1
, struct cam_path
*path2
)
4076 if (path1
->bus
!= path2
->bus
) {
4077 if (path1
->bus
->path_id
== CAM_BUS_WILDCARD
)
4079 else if (path2
->bus
->path_id
== CAM_BUS_WILDCARD
)
4084 if (path1
->target
!= path2
->target
) {
4085 if (path1
->target
->target_id
== CAM_TARGET_WILDCARD
) {
4088 } else if (path2
->target
->target_id
== CAM_TARGET_WILDCARD
)
4093 if (path1
->device
!= path2
->device
) {
4094 if (path1
->device
->lun_id
== CAM_LUN_WILDCARD
) {
4097 } else if (path2
->device
->lun_id
== CAM_LUN_WILDCARD
)
4106 xpt_print_path(struct cam_path
*path
)
4110 kprintf("(nopath): ");
4112 if (path
->periph
!= NULL
)
4113 kprintf("(%s%d:", path
->periph
->periph_name
,
4114 path
->periph
->unit_number
);
4116 kprintf("(noperiph:");
4118 if (path
->bus
!= NULL
)
4119 kprintf("%s%d:%d:", path
->bus
->sim
->sim_name
,
4120 path
->bus
->sim
->unit_number
,
4121 path
->bus
->sim
->bus_id
);
4125 if (path
->target
!= NULL
)
4126 kprintf("%d:", path
->target
->target_id
);
4130 if (path
->device
!= NULL
)
4131 kprintf("%d): ", path
->device
->lun_id
);
4138 xpt_print(struct cam_path
*path
, const char *fmt
, ...)
4141 xpt_print_path(path
);
4142 __va_start(ap
, fmt
);
4148 xpt_path_string(struct cam_path
*path
, char *str
, size_t str_len
)
4152 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4154 sbuf_new(&sb
, str
, str_len
, 0);
4157 sbuf_printf(&sb
, "(nopath): ");
4159 if (path
->periph
!= NULL
)
4160 sbuf_printf(&sb
, "(%s%d:", path
->periph
->periph_name
,
4161 path
->periph
->unit_number
);
4163 sbuf_printf(&sb
, "(noperiph:");
4165 if (path
->bus
!= NULL
)
4166 sbuf_printf(&sb
, "%s%d:%d:", path
->bus
->sim
->sim_name
,
4167 path
->bus
->sim
->unit_number
,
4168 path
->bus
->sim
->bus_id
);
4170 sbuf_printf(&sb
, "nobus:");
4172 if (path
->target
!= NULL
)
4173 sbuf_printf(&sb
, "%d:", path
->target
->target_id
);
4175 sbuf_printf(&sb
, "X:");
4177 if (path
->device
!= NULL
)
4178 sbuf_printf(&sb
, "%d): ", path
->device
->lun_id
);
4180 sbuf_printf(&sb
, "X): ");
4184 return(sbuf_len(&sb
));
4188 xpt_path_path_id(struct cam_path
*path
)
4190 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4192 return(path
->bus
->path_id
);
4196 xpt_path_target_id(struct cam_path
*path
)
4198 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4200 if (path
->target
!= NULL
)
4201 return (path
->target
->target_id
);
4203 return (CAM_TARGET_WILDCARD
);
4207 xpt_path_lun_id(struct cam_path
*path
)
4209 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4211 if (path
->device
!= NULL
)
4212 return (path
->device
->lun_id
);
4214 return (CAM_LUN_WILDCARD
);
4218 xpt_path_sim(struct cam_path
*path
)
4220 return (path
->bus
->sim
);
4224 xpt_path_periph(struct cam_path
*path
)
4226 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4228 return (path
->periph
);
4232 * Release a CAM control block for the caller. Remit the cost of the structure
4233 * to the device referenced by the path. If the this device had no 'credits'
4234 * and peripheral drivers have registered async callbacks for this notification
4238 xpt_release_ccb(union ccb
*free_ccb
)
4240 struct cam_path
*path
;
4241 struct cam_ed
*device
;
4243 struct cam_sim
*sim
;
4245 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_release_ccb\n"));
4246 path
= free_ccb
->ccb_h
.path
;
4247 device
= path
->device
;
4251 sim_lock_assert_owned(sim
->lock
);
4253 cam_ccbq_release_opening(&device
->ccbq
);
4254 if (sim
->ccb_count
> sim
->max_ccbs
) {
4255 xpt_free_ccb(free_ccb
);
4257 } else if (sim
== &cam_dead_sim
) {
4258 xpt_free_ccb(free_ccb
);
4260 SLIST_INSERT_HEAD(&sim
->ccb_freeq
, &free_ccb
->ccb_h
,
4263 if (sim
->devq
== NULL
) {
4266 sim
->devq
->alloc_openings
++;
4267 sim
->devq
->alloc_active
--;
4268 /* XXX Turn this into an inline function - xpt_run_device?? */
4269 if ((device_is_alloc_queued(device
) == 0)
4270 && (device
->drvq
.entries
> 0)) {
4271 xpt_schedule_dev_allocq(bus
, device
);
4273 if (dev_allocq_is_runnable(sim
->devq
))
4274 xpt_run_dev_allocq(bus
);
4277 /* Functions accessed by SIM drivers */
4280 * A sim structure, listing the SIM entry points and instance
4281 * identification info is passed to xpt_bus_register to hook the SIM
4282 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4283 * for this new bus and places it in the array of busses and assigns
4284 * it a path_id. The path_id may be influenced by "hard wiring"
4285 * information specified by the user. Once interrupt services are
4286 * availible, the bus will be probed.
4289 xpt_bus_register(struct cam_sim
*sim
, u_int32_t bus
)
4291 struct cam_eb
*new_bus
;
4292 struct cam_eb
*old_bus
;
4293 struct ccb_pathinq cpi
;
4295 sim_lock_assert_owned(sim
->lock
);
4298 new_bus
= kmalloc(sizeof(*new_bus
), M_CAMXPT
, M_INTWAIT
);
4300 if (strcmp(sim
->sim_name
, "xpt") != 0) {
4302 xptpathid(sim
->sim_name
, sim
->unit_number
, sim
->bus_id
);
4305 TAILQ_INIT(&new_bus
->et_entries
);
4306 new_bus
->path_id
= sim
->path_id
;
4309 timevalclear(&new_bus
->last_reset
);
4311 new_bus
->refcount
= 1; /* Held until a bus_deregister event */
4312 new_bus
->generation
= 0;
4313 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4314 old_bus
= TAILQ_FIRST(&xsoftc
.xpt_busses
);
4315 while (old_bus
!= NULL
4316 && old_bus
->path_id
< new_bus
->path_id
)
4317 old_bus
= TAILQ_NEXT(old_bus
, links
);
4318 if (old_bus
!= NULL
)
4319 TAILQ_INSERT_BEFORE(old_bus
, new_bus
, links
);
4321 TAILQ_INSERT_TAIL(&xsoftc
.xpt_busses
, new_bus
, links
);
4322 xsoftc
.bus_generation
++;
4323 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
4325 /* Notify interested parties */
4326 if (sim
->path_id
!= CAM_XPT_PATH_ID
) {
4327 struct cam_path path
;
4329 xpt_compile_path(&path
, /*periph*/NULL
, sim
->path_id
,
4330 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4331 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
4332 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
4333 xpt_action((union ccb
*)&cpi
);
4334 xpt_async(AC_PATH_REGISTERED
, &path
, &cpi
);
4335 xpt_release_path(&path
);
4337 return (CAM_SUCCESS
);
4341 * Deregister a bus. We must clean out all transactions pending on the bus.
4342 * This routine is typically called prior to cam_sim_free() (e.g. see
4343 * dev/usbmisc/umass/umass.c)
4346 xpt_bus_deregister(path_id_t pathid
)
4348 struct cam_path bus_path
;
4349 struct cam_et
*target
;
4350 struct cam_ed
*device
;
4351 struct cam_ed_qinfo
*qinfo
;
4352 struct cam_devq
*devq
;
4353 struct cam_periph
*periph
;
4354 struct cam_sim
*ccbsim
;
4355 union ccb
*work_ccb
;
4359 status
= xpt_compile_path(&bus_path
, NULL
, pathid
,
4360 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4361 if (status
!= CAM_REQ_CMP
)
4365 * This should clear out all pending requests and timeouts, but
4366 * the ccb's may be queued to a software interrupt.
4368 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4369 * and it really ought to.
4371 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4372 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4375 * Mark the SIM as having been deregistered. This prevents
4376 * certain operations from re-queueing to it, stops new devices
4377 * from being added, etc.
4379 devq
= bus_path
.bus
->sim
->devq
;
4380 ccbsim
= bus_path
.bus
->sim
;
4381 ccbsim
->flags
|= CAM_SIM_DEREGISTERED
;
4385 * Execute any pending operations now.
4387 while ((qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
4388 CAMQ_HEAD
)) != NULL
||
4389 (qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
4390 CAMQ_HEAD
)) != NULL
) {
4392 device
= qinfo
->device
;
4393 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
4394 if (work_ccb
!= NULL
) {
4395 devq
->active_dev
= device
;
4396 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
4397 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
4398 (*(ccbsim
->sim_action
))(ccbsim
, work_ccb
);
4401 periph
= (struct cam_periph
*)camq_remove(&device
->drvq
,
4404 xpt_schedule(periph
, periph
->pinfo
.priority
);
4405 } while (work_ccb
!= NULL
|| periph
!= NULL
);
4409 * Make sure all completed CCBs are processed.
4411 while (!TAILQ_EMPTY(&ccbsim
->sim_doneq
)) {
4412 camisr_runqueue(ccbsim
);
4416 * Check for requeues, reissues asyncs if necessary
4418 if (CAMQ_GET_HEAD(&devq
->send_queue
))
4419 kprintf("camq: devq send_queue still in use\n");
4420 if (CAMQ_GET_HEAD(&devq
->alloc_queue
))
4421 kprintf("camq: devq alloc_queue still in use\n");
4422 if (CAMQ_GET_HEAD(&devq
->send_queue
) ||
4423 CAMQ_GET_HEAD(&devq
->alloc_queue
)) {
4424 if (++retries
< 5) {
4425 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4426 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4432 * Retarget the bus and all cached sim pointers to dead_sim.
4434 * Various CAM subsystems may be holding on to targets, devices,
4435 * and/or peripherals and may attempt to use the sim pointer cached
4436 * in some of these structures during close.
4438 bus_path
.bus
->sim
= &cam_dead_sim
;
4439 TAILQ_FOREACH(target
, &bus_path
.bus
->et_entries
, links
) {
4440 TAILQ_FOREACH(device
, &target
->ed_entries
, links
) {
4441 device
->sim
= &cam_dead_sim
;
4442 SLIST_FOREACH(periph
, &device
->periphs
, periph_links
) {
4443 periph
->sim
= &cam_dead_sim
;
4449 * Repeat the async's for the benefit of any new devices, such as
4450 * might be created from completed probes. Any new device
4451 * ops will run on dead_sim.
4453 * XXX There are probably races :-(
4455 CAM_SIM_LOCK(&cam_dead_sim
);
4456 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4457 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4458 CAM_SIM_UNLOCK(&cam_dead_sim
);
4460 /* Release the reference count held while registered. */
4461 xpt_release_bus(bus_path
.bus
);
4462 xpt_release_path(&bus_path
);
4464 return (CAM_REQ_CMP
);
4468 xptnextfreepathid(void)
4475 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4476 bus
= TAILQ_FIRST(&xsoftc
.xpt_busses
);
4478 /* Find an unoccupied pathid */
4479 while (bus
!= NULL
&& bus
->path_id
<= pathid
) {
4480 if (bus
->path_id
== pathid
)
4482 bus
= TAILQ_NEXT(bus
, links
);
4484 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
4487 * Ensure that this pathid is not reserved for
4488 * a bus that may be registered in the future.
4490 if (resource_string_value("scbus", pathid
, "at", &strval
) == 0) {
4492 /* Start the search over */
4493 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4500 xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
)
4506 pathid
= CAM_XPT_PATH_ID
;
4507 ksnprintf(buf
, sizeof(buf
), "%s%d", sim_name
, sim_unit
);
4509 while ((i
= resource_query_string(i
, "at", buf
)) != -1) {
4510 if (strcmp(resource_query_name(i
), "scbus")) {
4511 /* Avoid a bit of foot shooting. */
4514 dunit
= resource_query_unit(i
);
4515 if (dunit
< 0) /* unwired?! */
4517 if (resource_int_value("scbus", dunit
, "bus", &val
) == 0) {
4518 if (sim_bus
== val
) {
4522 } else if (sim_bus
== 0) {
4523 /* Unspecified matches bus 0 */
4527 kprintf("Ambiguous scbus configuration for %s%d "
4528 "bus %d, cannot wire down. The kernel "
4529 "config entry for scbus%d should "
4530 "specify a controller bus.\n"
4531 "Scbus will be assigned dynamically.\n",
4532 sim_name
, sim_unit
, sim_bus
, dunit
);
4537 if (pathid
== CAM_XPT_PATH_ID
)
4538 pathid
= xptnextfreepathid();
4543 xpt_async(u_int32_t async_code
, struct cam_path
*path
, void *async_arg
)
4546 struct cam_et
*target
, *next_target
;
4547 struct cam_ed
*device
, *next_device
;
4549 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4551 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_async\n"));
4554 * Most async events come from a CAM interrupt context. In
4555 * a few cases, the error recovery code at the peripheral layer,
4556 * which may run from our SWI or a process context, may signal
4557 * deferred events with a call to xpt_async.
4562 if (async_code
== AC_BUS_RESET
) {
4563 /* Update our notion of when the last reset occurred */
4564 microuptime(&bus
->last_reset
);
4567 for (target
= TAILQ_FIRST(&bus
->et_entries
);
4569 target
= next_target
) {
4571 next_target
= TAILQ_NEXT(target
, links
);
4573 if (path
->target
!= target
4574 && path
->target
->target_id
!= CAM_TARGET_WILDCARD
4575 && target
->target_id
!= CAM_TARGET_WILDCARD
)
4578 if (async_code
== AC_SENT_BDR
) {
4579 /* Update our notion of when the last reset occurred */
4580 microuptime(&path
->target
->last_reset
);
4583 for (device
= TAILQ_FIRST(&target
->ed_entries
);
4585 device
= next_device
) {
4587 next_device
= TAILQ_NEXT(device
, links
);
4589 if (path
->device
!= device
4590 && path
->device
->lun_id
!= CAM_LUN_WILDCARD
4591 && device
->lun_id
!= CAM_LUN_WILDCARD
)
4594 xpt_dev_async(async_code
, bus
, target
,
4597 xpt_async_bcast(&device
->asyncs
, async_code
,
4603 * If this wasn't a fully wildcarded async, tell all
4604 * clients that want all async events.
4606 if (bus
!= xpt_periph
->path
->bus
)
4607 xpt_async_bcast(&xpt_periph
->path
->device
->asyncs
, async_code
,
4612 xpt_async_bcast(struct async_list
*async_head
,
4613 u_int32_t async_code
,
4614 struct cam_path
*path
, void *async_arg
)
4616 struct async_node
*cur_entry
;
4618 cur_entry
= SLIST_FIRST(async_head
);
4619 while (cur_entry
!= NULL
) {
4620 struct async_node
*next_entry
;
4622 * Grab the next list entry before we call the current
4623 * entry's callback. This is because the callback function
4624 * can delete its async callback entry.
4626 next_entry
= SLIST_NEXT(cur_entry
, links
);
4627 if ((cur_entry
->event_enable
& async_code
) != 0)
4628 cur_entry
->callback(cur_entry
->callback_arg
,
4631 cur_entry
= next_entry
;
4636 * Handle any per-device event notifications that require action by the XPT.
4639 xpt_dev_async(u_int32_t async_code
, struct cam_eb
*bus
, struct cam_et
*target
,
4640 struct cam_ed
*device
, void *async_arg
)
4643 struct cam_path newpath
;
4646 * We only need to handle events for real devices.
4648 if (target
->target_id
== CAM_TARGET_WILDCARD
4649 || device
->lun_id
== CAM_LUN_WILDCARD
)
4653 * We need our own path with wildcards expanded to
4654 * handle certain types of events.
4656 if ((async_code
== AC_SENT_BDR
)
4657 || (async_code
== AC_BUS_RESET
)
4658 || (async_code
== AC_INQ_CHANGED
))
4659 status
= xpt_compile_path(&newpath
, NULL
,
4664 status
= CAM_REQ_CMP_ERR
;
4666 if (status
== CAM_REQ_CMP
) {
4669 * Allow transfer negotiation to occur in a
4670 * tag free environment.
4672 if (async_code
== AC_SENT_BDR
4673 || async_code
== AC_BUS_RESET
)
4674 xpt_toggle_tags(&newpath
);
4676 if (async_code
== AC_INQ_CHANGED
) {
4678 * We've sent a start unit command, or
4679 * something similar to a device that
4680 * may have caused its inquiry data to
4681 * change. So we re-scan the device to
4682 * refresh the inquiry data for it.
4684 xpt_scan_lun(newpath
.periph
, &newpath
,
4685 CAM_EXPECT_INQ_CHANGE
, NULL
);
4687 xpt_release_path(&newpath
);
4688 } else if (async_code
== AC_LOST_DEVICE
) {
4690 * When we lose a device the device may be about to detach
4691 * the sim, we have to clear out all pending timeouts and
4692 * requests before that happens. XXX it would be nice if
4693 * we could abort the requests pertaining to the device.
4695 xpt_release_devq_timeout(device
);
4696 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
4697 device
->flags
|= CAM_DEV_UNCONFIGURED
;
4698 xpt_release_device(bus
, target
, device
);
4700 } else if (async_code
== AC_TRANSFER_NEG
) {
4701 struct ccb_trans_settings
*settings
;
4703 settings
= (struct ccb_trans_settings
*)async_arg
;
4704 xpt_set_transfer_settings(settings
, device
,
4705 /*async_update*/TRUE
);
4710 xpt_freeze_devq(struct cam_path
*path
, u_int count
)
4712 struct ccb_hdr
*ccbh
;
4714 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4716 path
->device
->qfrozen_cnt
+= count
;
4719 * Mark the last CCB in the queue as needing
4720 * to be requeued if the driver hasn't
4721 * changed it's state yet. This fixes a race
4722 * where a ccb is just about to be queued to
4723 * a controller driver when it's interrupt routine
4724 * freezes the queue. To completly close the
4725 * hole, controller drives must check to see
4726 * if a ccb's status is still CAM_REQ_INPROG
4727 * just before they queue
4728 * the CCB. See ahc_action/ahc_freeze_devq for
4731 ccbh
= TAILQ_LAST(&path
->device
->ccbq
.active_ccbs
, ccb_hdr_tailq
);
4732 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4733 ccbh
->status
= CAM_REQUEUE_REQ
;
4734 return (path
->device
->qfrozen_cnt
);
4738 xpt_freeze_simq(struct cam_sim
*sim
, u_int count
)
4740 sim_lock_assert_owned(sim
->lock
);
4742 if (sim
->devq
== NULL
)
4744 sim
->devq
->send_queue
.qfrozen_cnt
+= count
;
4745 if (sim
->devq
->active_dev
!= NULL
) {
4746 struct ccb_hdr
*ccbh
;
4748 ccbh
= TAILQ_LAST(&sim
->devq
->active_dev
->ccbq
.active_ccbs
,
4750 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4751 ccbh
->status
= CAM_REQUEUE_REQ
;
4753 return (sim
->devq
->send_queue
.qfrozen_cnt
);
4757 * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4758 * We ref-count the sim (and the bus only NULLs it out when the bus has been
4759 * freed, which is not the case here), but the device queue is also freed XXX
4760 * and we have to check that here.
4762 * XXX fixme: could we simply not null-out the device queue via
4766 xpt_release_devq_timeout(void *arg
)
4768 struct cam_ed
*device
;
4770 device
= (struct cam_ed
*)arg
;
4772 xpt_release_devq_device(device
, /*count*/1, /*run_queue*/TRUE
);
4776 xpt_release_devq(struct cam_path
*path
, u_int count
, int run_queue
)
4778 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4780 xpt_release_devq_device(path
->device
, count
, run_queue
);
4784 xpt_release_devq_device(struct cam_ed
*dev
, u_int count
, int run_queue
)
4790 if (dev
->qfrozen_cnt
> 0) {
4792 count
= (count
> dev
->qfrozen_cnt
) ? dev
->qfrozen_cnt
: count
;
4793 dev
->qfrozen_cnt
-= count
;
4794 if (dev
->qfrozen_cnt
== 0) {
4797 * No longer need to wait for a successful
4798 * command completion.
4800 dev
->flags
&= ~CAM_DEV_REL_ON_COMPLETE
;
4803 * Remove any timeouts that might be scheduled
4804 * to release this queue.
4806 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
4807 callout_stop(&dev
->callout
);
4808 dev
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
4812 * Now that we are unfrozen schedule the
4813 * device so any pending transactions are
4816 if ((dev
->ccbq
.queue
.entries
> 0)
4817 && (xpt_schedule_dev_sendq(dev
->target
->bus
, dev
))
4818 && (run_queue
!= 0)) {
4824 xpt_run_dev_sendq(dev
->target
->bus
);
4828 xpt_release_simq(struct cam_sim
*sim
, int run_queue
)
4832 sim_lock_assert_owned(sim
->lock
);
4834 if (sim
->devq
== NULL
)
4837 sendq
= &(sim
->devq
->send_queue
);
4838 if (sendq
->qfrozen_cnt
> 0) {
4839 sendq
->qfrozen_cnt
--;
4840 if (sendq
->qfrozen_cnt
== 0) {
4844 * If there is a timeout scheduled to release this
4845 * sim queue, remove it. The queue frozen count is
4848 if ((sim
->flags
& CAM_SIM_REL_TIMEOUT_PENDING
) != 0){
4849 callout_stop(&sim
->callout
);
4850 sim
->flags
&= ~CAM_SIM_REL_TIMEOUT_PENDING
;
4852 bus
= xpt_find_bus(sim
->path_id
);
4856 * Now that we are unfrozen run the send queue.
4858 xpt_run_dev_sendq(bus
);
4860 xpt_release_bus(bus
);
4866 xpt_done(union ccb
*done_ccb
)
4868 struct cam_sim
*sim
;
4870 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_done\n"));
4871 if ((done_ccb
->ccb_h
.func_code
& XPT_FC_QUEUED
) != 0) {
4873 * Queue up the request for handling by our SWI handler
4874 * any of the "non-immediate" type of ccbs.
4876 sim
= done_ccb
->ccb_h
.path
->bus
->sim
;
4877 switch (done_ccb
->ccb_h
.path
->periph
->type
) {
4878 case CAM_PERIPH_BIO
:
4879 spin_lock_wr(&sim
->sim_spin
);
4880 TAILQ_INSERT_TAIL(&sim
->sim_doneq
, &done_ccb
->ccb_h
,
4882 done_ccb
->ccb_h
.pinfo
.index
= CAM_DONEQ_INDEX
;
4883 spin_unlock_wr(&sim
->sim_spin
);
4884 if ((sim
->flags
& CAM_SIM_ON_DONEQ
) == 0) {
4885 spin_lock_wr(&cam_simq_spin
);
4886 if ((sim
->flags
& CAM_SIM_ON_DONEQ
) == 0) {
4887 TAILQ_INSERT_TAIL(&cam_simq
, sim
,
4889 sim
->flags
|= CAM_SIM_ON_DONEQ
;
4891 spin_unlock_wr(&cam_simq_spin
);
4893 if ((done_ccb
->ccb_h
.path
->periph
->flags
&
4894 CAM_PERIPH_POLLED
) == 0)
4898 panic("unknown periph type %d",
4899 done_ccb
->ccb_h
.path
->periph
->type
);
4909 new_ccb
= kmalloc(sizeof(*new_ccb
), M_CAMXPT
, M_INTWAIT
| M_ZERO
);
4914 xpt_free_ccb(union ccb
*free_ccb
)
4916 kfree(free_ccb
, M_CAMXPT
);
4921 /* Private XPT functions */
4924 * Get a CAM control block for the caller. Charge the structure to the device
4925 * referenced by the path. If the this device has no 'credits' then the
4926 * device already has the maximum number of outstanding operations under way
4927 * and we return NULL. If we don't have sufficient resources to allocate more
4928 * ccbs, we also return NULL.
4931 xpt_get_ccb(struct cam_ed
*device
)
4934 struct cam_sim
*sim
;
4937 if ((new_ccb
= (union ccb
*)SLIST_FIRST(&sim
->ccb_freeq
)) == NULL
) {
4938 new_ccb
= xpt_alloc_ccb();
4939 if ((sim
->flags
& CAM_SIM_MPSAFE
) == 0)
4940 callout_init(&new_ccb
->ccb_h
.timeout_ch
);
4941 SLIST_INSERT_HEAD(&sim
->ccb_freeq
, &new_ccb
->ccb_h
,
4945 cam_ccbq_take_opening(&device
->ccbq
);
4946 SLIST_REMOVE_HEAD(&sim
->ccb_freeq
, xpt_links
.sle
);
4951 xpt_release_bus(struct cam_eb
*bus
)
4954 if ((--bus
->refcount
== 0)
4955 && (TAILQ_FIRST(&bus
->et_entries
) == NULL
)) {
4956 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4957 TAILQ_REMOVE(&xsoftc
.xpt_busses
, bus
, links
);
4958 xsoftc
.bus_generation
++;
4959 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
4960 kfree(bus
, M_CAMXPT
);
4964 static struct cam_et
*
4965 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
)
4967 struct cam_et
*target
;
4968 struct cam_et
*cur_target
;
4970 target
= kmalloc(sizeof(*target
), M_CAMXPT
, M_INTWAIT
);
4972 TAILQ_INIT(&target
->ed_entries
);
4974 target
->target_id
= target_id
;
4975 target
->refcount
= 1;
4976 target
->generation
= 0;
4977 timevalclear(&target
->last_reset
);
4979 * Hold a reference to our parent bus so it
4980 * will not go away before we do.
4984 /* Insertion sort into our bus's target list */
4985 cur_target
= TAILQ_FIRST(&bus
->et_entries
);
4986 while (cur_target
!= NULL
&& cur_target
->target_id
< target_id
)
4987 cur_target
= TAILQ_NEXT(cur_target
, links
);
4989 if (cur_target
!= NULL
) {
4990 TAILQ_INSERT_BEFORE(cur_target
, target
, links
);
4992 TAILQ_INSERT_TAIL(&bus
->et_entries
, target
, links
);
4999 xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
)
5001 if (target
->refcount
== 1) {
5002 KKASSERT(TAILQ_FIRST(&target
->ed_entries
) == NULL
);
5003 TAILQ_REMOVE(&bus
->et_entries
, target
, links
);
5005 xpt_release_bus(bus
);
5006 KKASSERT(target
->refcount
== 1);
5007 kfree(target
, M_CAMXPT
);
5013 static struct cam_ed
*
5014 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
, lun_id_t lun_id
)
5016 struct cam_path path
;
5017 struct cam_ed
*device
;
5018 struct cam_devq
*devq
;
5022 * Disallow new devices while trying to deregister a sim
5024 if (bus
->sim
->flags
& CAM_SIM_DEREGISTERED
)
5028 * Make space for us in the device queue on our bus
5030 devq
= bus
->sim
->devq
;
5033 status
= cam_devq_resize(devq
, devq
->alloc_queue
.array_size
+ 1);
5035 if (status
!= CAM_REQ_CMP
) {
5038 device
= kmalloc(sizeof(*device
), M_CAMXPT
, M_INTWAIT
);
5041 if (device
!= NULL
) {
5042 struct cam_ed
*cur_device
;
5044 cam_init_pinfo(&device
->alloc_ccb_entry
.pinfo
);
5045 device
->alloc_ccb_entry
.device
= device
;
5046 cam_init_pinfo(&device
->send_ccb_entry
.pinfo
);
5047 device
->send_ccb_entry
.device
= device
;
5048 device
->target
= target
;
5049 device
->lun_id
= lun_id
;
5050 device
->sim
= bus
->sim
;
5051 /* Initialize our queues */
5052 if (camq_init(&device
->drvq
, 0) != 0) {
5053 kfree(device
, M_CAMXPT
);
5056 if (cam_ccbq_init(&device
->ccbq
,
5057 bus
->sim
->max_dev_openings
) != 0) {
5058 camq_fini(&device
->drvq
);
5059 kfree(device
, M_CAMXPT
);
5062 SLIST_INIT(&device
->asyncs
);
5063 SLIST_INIT(&device
->periphs
);
5064 device
->generation
= 0;
5065 device
->owner
= NULL
;
5067 * Take the default quirk entry until we have inquiry
5068 * data and can determine a better quirk to use.
5070 device
->quirk
= &xpt_quirk_table
[xpt_quirk_table_size
- 1];
5071 bzero(&device
->inq_data
, sizeof(device
->inq_data
));
5072 device
->inq_flags
= 0;
5073 device
->queue_flags
= 0;
5074 device
->serial_num
= NULL
;
5075 device
->serial_num_len
= 0;
5076 device
->qfrozen_cnt
= 0;
5077 device
->flags
= CAM_DEV_UNCONFIGURED
;
5078 device
->tag_delay_count
= 0;
5079 device
->tag_saved_openings
= 0;
5080 device
->refcount
= 1;
5081 callout_init(&device
->callout
);
5084 * Hold a reference to our parent target so it
5085 * will not go away before we do.
5090 * XXX should be limited by number of CCBs this bus can
5093 bus
->sim
->max_ccbs
+= device
->ccbq
.devq_openings
;
5094 /* Insertion sort into our target's device list */
5095 cur_device
= TAILQ_FIRST(&target
->ed_entries
);
5096 while (cur_device
!= NULL
&& cur_device
->lun_id
< lun_id
)
5097 cur_device
= TAILQ_NEXT(cur_device
, links
);
5098 if (cur_device
!= NULL
) {
5099 TAILQ_INSERT_BEFORE(cur_device
, device
, links
);
5101 TAILQ_INSERT_TAIL(&target
->ed_entries
, device
, links
);
5103 target
->generation
++;
5104 if (lun_id
!= CAM_LUN_WILDCARD
) {
5105 xpt_compile_path(&path
,
5110 xpt_devise_transport(&path
);
5111 xpt_release_path(&path
);
5118 xpt_reference_device(struct cam_ed
*device
)
5124 xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
5125 struct cam_ed
*device
)
5127 struct cam_devq
*devq
;
5129 if (device
->refcount
== 1) {
5130 KKASSERT(device
->flags
& CAM_DEV_UNCONFIGURED
);
5132 if (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
5133 || device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
)
5134 panic("Removing device while still queued for ccbs");
5136 if ((device
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
5137 device
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
5138 callout_stop(&device
->callout
);
5141 TAILQ_REMOVE(&target
->ed_entries
, device
,links
);
5142 target
->generation
++;
5143 bus
->sim
->max_ccbs
-= device
->ccbq
.devq_openings
;
5144 if ((devq
= bus
->sim
->devq
) != NULL
) {
5145 /* Release our slot in the devq */
5146 cam_devq_resize(devq
, devq
->alloc_queue
.array_size
- 1);
5148 camq_fini(&device
->drvq
);
5149 camq_fini(&device
->ccbq
.queue
);
5150 xpt_release_target(bus
, target
);
5151 KKASSERT(device
->refcount
== 1);
5152 kfree(device
, M_CAMXPT
);
5159 xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
)
5167 diff
= newopenings
- (dev
->ccbq
.dev_active
+ dev
->ccbq
.dev_openings
);
5168 result
= cam_ccbq_resize(&dev
->ccbq
, newopenings
);
5169 if (result
== CAM_REQ_CMP
&& (diff
< 0)) {
5170 dev
->flags
|= CAM_DEV_RESIZE_QUEUE_NEEDED
;
5172 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
5173 || (dev
->inq_flags
& SID_CmdQue
) != 0)
5174 dev
->tag_saved_openings
= newopenings
;
5175 /* Adjust the global limit */
5176 dev
->sim
->max_ccbs
+= diff
;
5180 static struct cam_eb
*
5181 xpt_find_bus(path_id_t path_id
)
5185 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
5186 TAILQ_FOREACH(bus
, &xsoftc
.xpt_busses
, links
) {
5187 if (bus
->path_id
== path_id
) {
5192 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
5196 static struct cam_et
*
5197 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
)
5199 struct cam_et
*target
;
5201 TAILQ_FOREACH(target
, &bus
->et_entries
, links
) {
5202 if (target
->target_id
== target_id
) {
5210 static struct cam_ed
*
5211 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
)
5213 struct cam_ed
*device
;
5215 TAILQ_FOREACH(device
, &target
->ed_entries
, links
) {
5216 if (device
->lun_id
== lun_id
) {
5225 union ccb
*request_ccb
;
5226 struct ccb_pathinq
*cpi
;
5228 } xpt_scan_bus_info
;
5231 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5232 * As the scan progresses, xpt_scan_bus is used as the
5233 * callback on completion function.
5236 xpt_scan_bus(struct cam_periph
*periph
, union ccb
*request_ccb
)
5238 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5239 ("xpt_scan_bus\n"));
5240 switch (request_ccb
->ccb_h
.func_code
) {
5243 xpt_scan_bus_info
*scan_info
;
5244 union ccb
*work_ccb
;
5245 struct cam_path
*path
;
5250 /* Find out the characteristics of the bus */
5251 work_ccb
= xpt_alloc_ccb();
5252 xpt_setup_ccb(&work_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5253 request_ccb
->ccb_h
.pinfo
.priority
);
5254 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
5255 xpt_action(work_ccb
);
5256 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5257 request_ccb
->ccb_h
.status
= work_ccb
->ccb_h
.status
;
5258 xpt_free_ccb(work_ccb
);
5259 xpt_done(request_ccb
);
5263 if ((work_ccb
->cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5265 * Can't scan the bus on an adapter that
5266 * cannot perform the initiator role.
5268 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5269 xpt_free_ccb(work_ccb
);
5270 xpt_done(request_ccb
);
5274 /* Save some state for use while we probe for devices */
5275 scan_info
= (xpt_scan_bus_info
*)
5276 kmalloc(sizeof(xpt_scan_bus_info
), M_CAMXPT
, M_INTWAIT
);
5277 scan_info
->request_ccb
= request_ccb
;
5278 scan_info
->cpi
= &work_ccb
->cpi
;
5280 /* Cache on our stack so we can work asynchronously */
5281 max_target
= scan_info
->cpi
->max_target
;
5282 initiator_id
= scan_info
->cpi
->initiator_id
;
5286 * We can scan all targets in parallel, or do it sequentially.
5288 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5290 scan_info
->counter
= 0;
5292 scan_info
->counter
= scan_info
->cpi
->max_target
+ 1;
5293 if (scan_info
->cpi
->initiator_id
< scan_info
->counter
) {
5294 scan_info
->counter
--;
5298 for (i
= 0; i
<= max_target
; i
++) {
5300 if (i
== initiator_id
)
5303 status
= xpt_create_path(&path
, xpt_periph
,
5304 request_ccb
->ccb_h
.path_id
,
5306 if (status
!= CAM_REQ_CMP
) {
5307 kprintf("xpt_scan_bus: xpt_create_path failed"
5308 " with status %#x, bus scan halted\n",
5310 kfree(scan_info
, M_CAMXPT
);
5311 request_ccb
->ccb_h
.status
= status
;
5312 xpt_free_ccb(work_ccb
);
5313 xpt_done(request_ccb
);
5316 work_ccb
= xpt_alloc_ccb();
5317 xpt_setup_ccb(&work_ccb
->ccb_h
, path
,
5318 request_ccb
->ccb_h
.pinfo
.priority
);
5319 work_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5320 work_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5321 work_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5322 work_ccb
->crcn
.flags
= request_ccb
->crcn
.flags
;
5323 xpt_action(work_ccb
);
5330 struct cam_path
*path
;
5331 xpt_scan_bus_info
*scan_info
;
5333 target_id_t target_id
;
5336 /* Reuse the same CCB to query if a device was really found */
5337 scan_info
= (xpt_scan_bus_info
*)request_ccb
->ccb_h
.ppriv_ptr0
;
5338 xpt_setup_ccb(&request_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5339 request_ccb
->ccb_h
.pinfo
.priority
);
5340 request_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
5342 path_id
= request_ccb
->ccb_h
.path_id
;
5343 target_id
= request_ccb
->ccb_h
.target_id
;
5344 lun_id
= request_ccb
->ccb_h
.target_lun
;
5345 xpt_action(request_ccb
);
5347 if (request_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5348 struct cam_ed
*device
;
5349 struct cam_et
*target
;
5353 * If we already probed lun 0 successfully, or
5354 * we have additional configured luns on this
5355 * target that might have "gone away", go onto
5358 target
= request_ccb
->ccb_h
.path
->target
;
5360 * We may touch devices that we don't
5361 * hold references too, so ensure they
5362 * don't disappear out from under us.
5363 * The target above is referenced by the
5364 * path in the request ccb.
5367 device
= TAILQ_FIRST(&target
->ed_entries
);
5368 if (device
!= NULL
) {
5369 phl
= CAN_SRCH_HI_SPARSE(device
);
5370 if (device
->lun_id
== 0)
5371 device
= TAILQ_NEXT(device
, links
);
5373 if ((lun_id
!= 0) || (device
!= NULL
)) {
5374 if (lun_id
< (CAM_SCSI2_MAXLUN
-1) || phl
)
5378 struct cam_ed
*device
;
5380 device
= request_ccb
->ccb_h
.path
->device
;
5382 if ((device
->quirk
->quirks
& CAM_QUIRK_NOLUNS
) == 0) {
5383 /* Try the next lun */
5384 if (lun_id
< (CAM_SCSI2_MAXLUN
-1)
5385 || CAN_SRCH_HI_DENSE(device
))
5391 * Free the current request path- we're done with it.
5393 xpt_free_path(request_ccb
->ccb_h
.path
);
5396 * Check to see if we scan any further luns.
5398 if (lun_id
== request_ccb
->ccb_h
.target_lun
5399 || lun_id
> scan_info
->cpi
->max_lun
) {
5404 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5405 scan_info
->counter
++;
5406 if (scan_info
->counter
==
5407 scan_info
->cpi
->initiator_id
) {
5408 scan_info
->counter
++;
5410 if (scan_info
->counter
>=
5411 scan_info
->cpi
->max_target
+1) {
5415 scan_info
->counter
--;
5416 if (scan_info
->counter
== 0) {
5421 xpt_free_ccb(request_ccb
);
5422 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5423 request_ccb
= scan_info
->request_ccb
;
5424 kfree(scan_info
, M_CAMXPT
);
5425 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5426 xpt_done(request_ccb
);
5430 if ((scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) == 0) {
5433 status
= xpt_create_path(&path
, xpt_periph
,
5434 scan_info
->request_ccb
->ccb_h
.path_id
,
5435 scan_info
->counter
, 0);
5436 if (status
!= CAM_REQ_CMP
) {
5437 kprintf("xpt_scan_bus: xpt_create_path failed"
5438 " with status %#x, bus scan halted\n",
5440 xpt_free_ccb(request_ccb
);
5441 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5442 request_ccb
= scan_info
->request_ccb
;
5443 kfree(scan_info
, M_CAMXPT
);
5444 request_ccb
->ccb_h
.status
= status
;
5445 xpt_done(request_ccb
);
5448 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5449 request_ccb
->ccb_h
.pinfo
.priority
);
5450 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5451 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5452 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5453 request_ccb
->crcn
.flags
=
5454 scan_info
->request_ccb
->crcn
.flags
;
5456 status
= xpt_create_path(&path
, xpt_periph
,
5457 path_id
, target_id
, lun_id
);
5458 if (status
!= CAM_REQ_CMP
) {
5459 kprintf("xpt_scan_bus: xpt_create_path failed "
5460 "with status %#x, halting LUN scan\n",
5464 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5465 request_ccb
->ccb_h
.pinfo
.priority
);
5466 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5467 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5468 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5469 request_ccb
->crcn
.flags
=
5470 scan_info
->request_ccb
->crcn
.flags
;
5472 xpt_action(request_ccb
);
5482 PROBE_INQUIRY
, /* this counts as DV0 for Basic Domain Validation */
5487 PROBE_TUR_FOR_NEGOTIATION
,
5488 PROBE_INQUIRY_BASIC_DV1
,
5489 PROBE_INQUIRY_BASIC_DV2
,
5494 PROBE_INQUIRY_CKSUM
= 0x01,
5495 PROBE_SERIAL_CKSUM
= 0x02,
5496 PROBE_NO_ANNOUNCE
= 0x04
5500 TAILQ_HEAD(, ccb_hdr
) request_ccbs
;
5501 probe_action action
;
5502 union ccb saved_ccb
;
5505 u_int8_t digest
[16];
5509 xpt_scan_lun(struct cam_periph
*periph
, struct cam_path
*path
,
5510 cam_flags flags
, union ccb
*request_ccb
)
5512 struct ccb_pathinq cpi
;
5514 struct cam_path
*new_path
;
5515 struct cam_periph
*old_periph
;
5517 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5518 ("xpt_scan_lun\n"));
5520 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
5521 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5522 xpt_action((union ccb
*)&cpi
);
5524 if (cpi
.ccb_h
.status
!= CAM_REQ_CMP
) {
5525 if (request_ccb
!= NULL
) {
5526 request_ccb
->ccb_h
.status
= cpi
.ccb_h
.status
;
5527 xpt_done(request_ccb
);
5532 if ((cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5534 * Can't scan the bus on an adapter that
5535 * cannot perform the initiator role.
5537 if (request_ccb
!= NULL
) {
5538 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5539 xpt_done(request_ccb
);
5544 if (request_ccb
== NULL
) {
5545 request_ccb
= kmalloc(sizeof(union ccb
), M_CAMXPT
, M_INTWAIT
);
5546 new_path
= kmalloc(sizeof(*new_path
), M_CAMXPT
, M_INTWAIT
);
5547 status
= xpt_compile_path(new_path
, xpt_periph
,
5549 path
->target
->target_id
,
5550 path
->device
->lun_id
);
5552 if (status
!= CAM_REQ_CMP
) {
5553 xpt_print(path
, "xpt_scan_lun: can't compile path, "
5554 "can't continue\n");
5555 kfree(request_ccb
, M_CAMXPT
);
5556 kfree(new_path
, M_CAMXPT
);
5559 xpt_setup_ccb(&request_ccb
->ccb_h
, new_path
, /*priority*/ 1);
5560 request_ccb
->ccb_h
.cbfcnp
= xptscandone
;
5561 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5562 request_ccb
->crcn
.flags
= flags
;
5565 if ((old_periph
= cam_periph_find(path
, "probe")) != NULL
) {
5568 softc
= (probe_softc
*)old_periph
->softc
;
5569 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5572 status
= cam_periph_alloc(proberegister
, NULL
, probecleanup
,
5573 probestart
, "probe",
5575 request_ccb
->ccb_h
.path
, NULL
, 0,
5578 if (status
!= CAM_REQ_CMP
) {
5579 xpt_print(path
, "xpt_scan_lun: cam_alloc_periph "
5580 "returned an error, can't continue probe\n");
5581 request_ccb
->ccb_h
.status
= status
;
5582 xpt_done(request_ccb
);
5588 xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5590 xpt_release_path(done_ccb
->ccb_h
.path
);
5591 kfree(done_ccb
->ccb_h
.path
, M_CAMXPT
);
5592 kfree(done_ccb
, M_CAMXPT
);
5596 proberegister(struct cam_periph
*periph
, void *arg
)
5598 union ccb
*request_ccb
; /* CCB representing the probe request */
5602 request_ccb
= (union ccb
*)arg
;
5603 if (periph
== NULL
) {
5604 kprintf("proberegister: periph was NULL!!\n");
5605 return(CAM_REQ_CMP_ERR
);
5608 if (request_ccb
== NULL
) {
5609 kprintf("proberegister: no probe CCB, "
5610 "can't register device\n");
5611 return(CAM_REQ_CMP_ERR
);
5614 softc
= kmalloc(sizeof(*softc
), M_CAMXPT
, M_INTWAIT
| M_ZERO
);
5615 TAILQ_INIT(&softc
->request_ccbs
);
5616 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5619 periph
->softc
= softc
;
5620 status
= cam_periph_acquire(periph
);
5621 if (status
!= CAM_REQ_CMP
) {
5627 * Ensure we've waited at least a bus settle
5628 * delay before attempting to probe the device.
5629 * For HBAs that don't do bus resets, this won't make a difference.
5631 cam_periph_freeze_after_event(periph
, &periph
->path
->bus
->last_reset
,
5633 probeschedule(periph
);
5634 return(CAM_REQ_CMP
);
5638 probeschedule(struct cam_periph
*periph
)
5640 struct ccb_pathinq cpi
;
5644 softc
= (probe_softc
*)periph
->softc
;
5645 ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
5647 xpt_setup_ccb(&cpi
.ccb_h
, periph
->path
, /*priority*/1);
5648 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5649 xpt_action((union ccb
*)&cpi
);
5652 * If a device has gone away and another device, or the same one,
5653 * is back in the same place, it should have a unit attention
5654 * condition pending. It will not report the unit attention in
5655 * response to an inquiry, which may leave invalid transfer
5656 * negotiations in effect. The TUR will reveal the unit attention
5657 * condition. Only send the TUR for lun 0, since some devices
5658 * will get confused by commands other than inquiry to non-existent
5659 * luns. If you think a device has gone away start your scan from
5660 * lun 0. This will insure that any bogus transfer settings are
5663 * If we haven't seen the device before and the controller supports
5664 * some kind of transfer negotiation, negotiate with the first
5665 * sent command if no bus reset was performed at startup. This
5666 * ensures that the device is not confused by transfer negotiation
5667 * settings left over by loader or BIOS action.
5669 if (((ccb
->ccb_h
.path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0)
5670 && (ccb
->ccb_h
.target_lun
== 0)) {
5671 softc
->action
= PROBE_TUR
;
5672 } else if ((cpi
.hba_inquiry
& (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
)) != 0
5673 && (cpi
.hba_misc
& PIM_NOBUSRESET
) != 0) {
5674 proberequestdefaultnegotiation(periph
);
5675 softc
->action
= PROBE_INQUIRY
;
5677 softc
->action
= PROBE_INQUIRY
;
5680 if (ccb
->crcn
.flags
& CAM_EXPECT_INQ_CHANGE
)
5681 softc
->flags
|= PROBE_NO_ANNOUNCE
;
5683 softc
->flags
&= ~PROBE_NO_ANNOUNCE
;
5685 xpt_schedule(periph
, ccb
->ccb_h
.pinfo
.priority
);
5689 probestart(struct cam_periph
*periph
, union ccb
*start_ccb
)
5691 /* Probe the device that our peripheral driver points to */
5692 struct ccb_scsiio
*csio
;
5695 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probestart\n"));
5697 softc
= (probe_softc
*)periph
->softc
;
5698 csio
= &start_ccb
->csio
;
5700 switch (softc
->action
) {
5702 case PROBE_TUR_FOR_NEGOTIATION
:
5705 scsi_test_unit_ready(csio
,
5714 case PROBE_FULL_INQUIRY
:
5715 case PROBE_INQUIRY_BASIC_DV1
:
5716 case PROBE_INQUIRY_BASIC_DV2
:
5719 struct scsi_inquiry_data
*inq_buf
;
5721 inq_buf
= &periph
->path
->device
->inq_data
;
5724 * If the device is currently configured, we calculate an
5725 * MD5 checksum of the inquiry data, and if the serial number
5726 * length is greater than 0, add the serial number data
5727 * into the checksum as well. Once the inquiry and the
5728 * serial number check finish, we attempt to figure out
5729 * whether we still have the same device.
5731 if ((periph
->path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
5733 MD5Init(&softc
->context
);
5734 MD5Update(&softc
->context
, (unsigned char *)inq_buf
,
5735 sizeof(struct scsi_inquiry_data
));
5736 softc
->flags
|= PROBE_INQUIRY_CKSUM
;
5737 if (periph
->path
->device
->serial_num_len
> 0) {
5738 MD5Update(&softc
->context
,
5739 periph
->path
->device
->serial_num
,
5740 periph
->path
->device
->serial_num_len
);
5741 softc
->flags
|= PROBE_SERIAL_CKSUM
;
5743 MD5Final(softc
->digest
, &softc
->context
);
5746 if (softc
->action
== PROBE_INQUIRY
)
5747 inquiry_len
= SHORT_INQUIRY_LENGTH
;
5749 inquiry_len
= SID_ADDITIONAL_LENGTH(inq_buf
);
5752 * Some parallel SCSI devices fail to send an
5753 * ignore wide residue message when dealing with
5754 * odd length inquiry requests. Round up to be
5757 inquiry_len
= roundup2(inquiry_len
, 2);
5759 if (softc
->action
== PROBE_INQUIRY_BASIC_DV1
5760 || softc
->action
== PROBE_INQUIRY_BASIC_DV2
) {
5761 inq_buf
= kmalloc(inquiry_len
, M_CAMXPT
, M_INTWAIT
);
5767 (u_int8_t
*)inq_buf
,
5772 /*timeout*/60 * 1000);
5775 case PROBE_MODE_SENSE
:
5780 mode_buf_len
= sizeof(struct scsi_mode_header_6
)
5781 + sizeof(struct scsi_mode_blk_desc
)
5782 + sizeof(struct scsi_control_page
);
5783 mode_buf
= kmalloc(mode_buf_len
, M_CAMXPT
, M_INTWAIT
);
5784 scsi_mode_sense(csio
,
5789 SMS_PAGE_CTRL_CURRENT
,
5790 SMS_CONTROL_MODE_PAGE
,
5797 case PROBE_SERIAL_NUM_0
:
5799 struct scsi_vpd_supported_page_list
*vpd_list
= NULL
;
5800 struct cam_ed
*device
;
5802 device
= periph
->path
->device
;
5803 if ((device
->quirk
->quirks
& CAM_QUIRK_NOSERIAL
) == 0) {
5804 vpd_list
= kmalloc(sizeof(*vpd_list
), M_CAMXPT
,
5805 M_INTWAIT
| M_ZERO
);
5808 if (vpd_list
!= NULL
) {
5813 (u_int8_t
*)vpd_list
,
5816 SVPD_SUPPORTED_PAGE_LIST
,
5818 /*timeout*/60 * 1000);
5822 * We'll have to do without, let our probedone
5823 * routine finish up for us.
5825 start_ccb
->csio
.data_ptr
= NULL
;
5826 probedone(periph
, start_ccb
);
5829 case PROBE_SERIAL_NUM_1
:
5831 struct scsi_vpd_unit_serial_number
*serial_buf
;
5832 struct cam_ed
* device
;
5835 device
= periph
->path
->device
;
5836 device
->serial_num
= NULL
;
5837 device
->serial_num_len
= 0;
5839 serial_buf
= (struct scsi_vpd_unit_serial_number
*)
5840 kmalloc(sizeof(*serial_buf
), M_CAMXPT
,
5841 M_INTWAIT
| M_ZERO
);
5846 (u_int8_t
*)serial_buf
,
5847 sizeof(*serial_buf
),
5849 SVPD_UNIT_SERIAL_NUMBER
,
5851 /*timeout*/60 * 1000);
5855 xpt_action(start_ccb
);
5859 proberequestdefaultnegotiation(struct cam_periph
*periph
)
5861 struct ccb_trans_settings cts
;
5863 xpt_setup_ccb(&cts
.ccb_h
, periph
->path
, /*priority*/1);
5864 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
5865 cts
.type
= CTS_TYPE_USER_SETTINGS
;
5866 xpt_action((union ccb
*)&cts
);
5867 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5870 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
5871 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5872 xpt_action((union ccb
*)&cts
);
5876 * Backoff Negotiation Code- only pertinent for SPI devices.
5879 proberequestbackoff(struct cam_periph
*periph
, struct cam_ed
*device
)
5881 struct ccb_trans_settings cts
;
5882 struct ccb_trans_settings_spi
*spi
;
5884 memset(&cts
, 0, sizeof (cts
));
5885 xpt_setup_ccb(&cts
.ccb_h
, periph
->path
, /*priority*/1);
5886 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
5887 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5888 xpt_action((union ccb
*)&cts
);
5889 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5891 xpt_print(periph
->path
,
5892 "failed to get current device settings\n");
5896 if (cts
.transport
!= XPORT_SPI
) {
5898 xpt_print(periph
->path
, "not SPI transport\n");
5902 spi
= &cts
.xport_specific
.spi
;
5905 * We cannot renegotiate sync rate if we don't have one.
5907 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0) {
5909 xpt_print(periph
->path
, "no sync rate known\n");
5915 * We'll assert that we don't have to touch PPR options- the
5916 * SIM will see what we do with period and offset and adjust
5917 * the PPR options as appropriate.
5921 * A sync rate with unknown or zero offset is nonsensical.
5922 * A sync period of zero means Async.
5924 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0
5925 || spi
->sync_offset
== 0 || spi
->sync_period
== 0) {
5927 xpt_print(periph
->path
, "no sync rate available\n");
5932 if (device
->flags
& CAM_DEV_DV_HIT_BOTTOM
) {
5933 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5934 ("hit async: giving up on DV\n"));
5940 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
5941 * We don't try to remember 'last' settings to see if the SIM actually
5942 * gets into the speed we want to set. We check on the SIM telling
5943 * us that a requested speed is bad, but otherwise don't try and
5944 * check the speed due to the asynchronous and handshake nature
5947 spi
->valid
= CTS_SPI_VALID_SYNC_RATE
| CTS_SPI_VALID_SYNC_OFFSET
;
5950 if (spi
->sync_period
>= 0xf) {
5951 spi
->sync_period
= 0;
5952 spi
->sync_offset
= 0;
5953 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5954 ("setting to async for DV\n"));
5956 * Once we hit async, we don't want to try
5957 * any more settings.
5959 device
->flags
|= CAM_DEV_DV_HIT_BOTTOM
;
5960 } else if (bootverbose
) {
5961 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5962 ("DV: period 0x%x\n", spi
->sync_period
));
5963 kprintf("setting period to 0x%x\n", spi
->sync_period
);
5965 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
5966 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5967 xpt_action((union ccb
*)&cts
);
5968 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
5971 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5972 ("DV: failed to set period 0x%x\n", spi
->sync_period
));
5973 if (spi
->sync_period
== 0) {
5981 probedone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5984 struct cam_path
*path
;
5987 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probedone\n"));
5989 softc
= (probe_softc
*)periph
->softc
;
5990 path
= done_ccb
->ccb_h
.path
;
5991 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
5993 switch (softc
->action
) {
5996 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5998 if (cam_periph_error(done_ccb
, 0,
5999 SF_NO_PRINT
, NULL
) == ERESTART
)
6001 else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0)
6002 /* Don't wedge the queue */
6003 xpt_release_devq(done_ccb
->ccb_h
.path
,
6007 softc
->action
= PROBE_INQUIRY
;
6008 xpt_release_ccb(done_ccb
);
6009 xpt_schedule(periph
, priority
);
6013 case PROBE_FULL_INQUIRY
:
6015 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
6016 struct scsi_inquiry_data
*inq_buf
;
6017 u_int8_t periph_qual
;
6019 path
->device
->flags
|= CAM_DEV_INQUIRY_DATA_VALID
;
6020 inq_buf
= &path
->device
->inq_data
;
6022 periph_qual
= SID_QUAL(inq_buf
);
6024 switch(periph_qual
) {
6025 case SID_QUAL_LU_CONNECTED
:
6030 * We conservatively request only
6031 * SHORT_INQUIRY_LEN bytes of inquiry
6032 * information during our first try
6033 * at sending an INQUIRY. If the device
6034 * has more information to give,
6035 * perform a second request specifying
6036 * the amount of information the device
6037 * is willing to give.
6039 len
= inq_buf
->additional_length
6040 + offsetof(struct scsi_inquiry_data
,
6041 additional_length
) + 1;
6042 if (softc
->action
== PROBE_INQUIRY
6043 && len
> SHORT_INQUIRY_LENGTH
) {
6044 softc
->action
= PROBE_FULL_INQUIRY
;
6045 xpt_release_ccb(done_ccb
);
6046 xpt_schedule(periph
, priority
);
6050 xpt_find_quirk(path
->device
);
6052 xpt_devise_transport(path
);
6053 if (INQ_DATA_TQ_ENABLED(inq_buf
))
6054 softc
->action
= PROBE_MODE_SENSE
;
6056 softc
->action
= PROBE_SERIAL_NUM_0
;
6058 path
->device
->flags
&= ~CAM_DEV_UNCONFIGURED
;
6059 xpt_reference_device(path
->device
);
6061 xpt_release_ccb(done_ccb
);
6062 xpt_schedule(periph
, priority
);
6068 } else if (cam_periph_error(done_ccb
, 0,
6069 done_ccb
->ccb_h
.target_lun
> 0
6070 ? SF_RETRY_UA
|SF_QUIET_IR
6072 &softc
->saved_ccb
) == ERESTART
) {
6074 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6075 /* Don't wedge the queue */
6076 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6080 * If we get to this point, we got an error status back
6081 * from the inquiry and the error status doesn't require
6082 * automatically retrying the command. Therefore, the
6083 * inquiry failed. If we had inquiry information before
6084 * for this device, but this latest inquiry command failed,
6085 * the device has probably gone away. If this device isn't
6086 * already marked unconfigured, notify the peripheral
6087 * drivers that this device is no more.
6089 if ((path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
6090 /* Send the async notification. */
6091 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
6094 xpt_release_ccb(done_ccb
);
6097 case PROBE_MODE_SENSE
:
6099 struct ccb_scsiio
*csio
;
6100 struct scsi_mode_header_6
*mode_hdr
;
6102 csio
= &done_ccb
->csio
;
6103 mode_hdr
= (struct scsi_mode_header_6
*)csio
->data_ptr
;
6104 if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
6105 struct scsi_control_page
*page
;
6108 offset
= ((u_int8_t
*)&mode_hdr
[1])
6109 + mode_hdr
->blk_desc_len
;
6110 page
= (struct scsi_control_page
*)offset
;
6111 path
->device
->queue_flags
= page
->queue_flags
;
6112 } else if (cam_periph_error(done_ccb
, 0,
6113 SF_RETRY_UA
|SF_NO_PRINT
,
6114 &softc
->saved_ccb
) == ERESTART
) {
6116 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6117 /* Don't wedge the queue */
6118 xpt_release_devq(done_ccb
->ccb_h
.path
,
6119 /*count*/1, /*run_queue*/TRUE
);
6121 xpt_release_ccb(done_ccb
);
6122 kfree(mode_hdr
, M_CAMXPT
);
6123 softc
->action
= PROBE_SERIAL_NUM_0
;
6124 xpt_schedule(periph
, priority
);
6127 case PROBE_SERIAL_NUM_0
:
6129 struct ccb_scsiio
*csio
;
6130 struct scsi_vpd_supported_page_list
*page_list
;
6131 int length
, serialnum_supported
, i
;
6133 serialnum_supported
= 0;
6134 csio
= &done_ccb
->csio
;
6136 (struct scsi_vpd_supported_page_list
*)csio
->data_ptr
;
6138 if (page_list
== NULL
) {
6140 * Don't process the command as it was never sent
6142 } else if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
6143 && (page_list
->length
> 0)) {
6144 length
= min(page_list
->length
,
6145 SVPD_SUPPORTED_PAGES_SIZE
);
6146 for (i
= 0; i
< length
; i
++) {
6147 if (page_list
->list
[i
] ==
6148 SVPD_UNIT_SERIAL_NUMBER
) {
6149 serialnum_supported
= 1;
6153 } else if (cam_periph_error(done_ccb
, 0,
6154 SF_RETRY_UA
|SF_NO_PRINT
,
6155 &softc
->saved_ccb
) == ERESTART
) {
6157 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6158 /* Don't wedge the queue */
6159 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6163 if (page_list
!= NULL
)
6164 kfree(page_list
, M_DEVBUF
);
6166 if (serialnum_supported
) {
6167 xpt_release_ccb(done_ccb
);
6168 softc
->action
= PROBE_SERIAL_NUM_1
;
6169 xpt_schedule(periph
, priority
);
6172 xpt_release_ccb(done_ccb
);
6173 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
6174 xpt_schedule(periph
, done_ccb
->ccb_h
.pinfo
.priority
);
6178 case PROBE_SERIAL_NUM_1
:
6180 struct ccb_scsiio
*csio
;
6181 struct scsi_vpd_unit_serial_number
*serial_buf
;
6188 csio
= &done_ccb
->csio
;
6189 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
6191 (struct scsi_vpd_unit_serial_number
*)csio
->data_ptr
;
6193 /* Clean up from previous instance of this device */
6194 if (path
->device
->serial_num
!= NULL
) {
6195 kfree(path
->device
->serial_num
, M_CAMXPT
);
6196 path
->device
->serial_num
= NULL
;
6197 path
->device
->serial_num_len
= 0;
6200 if (serial_buf
== NULL
) {
6202 * Don't process the command as it was never sent
6204 } else if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
6205 && (serial_buf
->length
> 0)) {
6208 path
->device
->serial_num
=
6209 kmalloc((serial_buf
->length
+ 1),
6210 M_CAMXPT
, M_INTWAIT
);
6211 bcopy(serial_buf
->serial_num
,
6212 path
->device
->serial_num
,
6213 serial_buf
->length
);
6214 path
->device
->serial_num_len
= serial_buf
->length
;
6215 path
->device
->serial_num
[serial_buf
->length
] = '\0';
6216 } else if (cam_periph_error(done_ccb
, 0,
6217 SF_RETRY_UA
|SF_NO_PRINT
,
6218 &softc
->saved_ccb
) == ERESTART
) {
6220 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6221 /* Don't wedge the queue */
6222 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6227 * Let's see if we have seen this device before.
6229 if ((softc
->flags
& PROBE_INQUIRY_CKSUM
) != 0) {
6231 u_int8_t digest
[16];
6236 (unsigned char *)&path
->device
->inq_data
,
6237 sizeof(struct scsi_inquiry_data
));
6240 MD5Update(&context
, serial_buf
->serial_num
,
6241 serial_buf
->length
);
6243 MD5Final(digest
, &context
);
6244 if (bcmp(softc
->digest
, digest
, 16) == 0)
6248 * XXX Do we need to do a TUR in order to ensure
6249 * that the device really hasn't changed???
6252 && ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0))
6253 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
6255 if (serial_buf
!= NULL
)
6256 kfree(serial_buf
, M_CAMXPT
);
6260 * Now that we have all the necessary
6261 * information to safely perform transfer
6262 * negotiations... Controllers don't perform
6263 * any negotiation or tagged queuing until
6264 * after the first XPT_SET_TRAN_SETTINGS ccb is
6265 * received. So, on a new device, just retrieve
6266 * the user settings, and set them as the current
6267 * settings to set the device up.
6269 proberequestdefaultnegotiation(periph
);
6270 xpt_release_ccb(done_ccb
);
6273 * Perform a TUR to allow the controller to
6274 * perform any necessary transfer negotiation.
6276 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
6277 xpt_schedule(periph
, priority
);
6280 xpt_release_ccb(done_ccb
);
6283 case PROBE_TUR_FOR_NEGOTIATION
:
6285 if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6286 /* Don't wedge the queue */
6287 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6291 xpt_reference_device(path
->device
);
6293 * Do Domain Validation for lun 0 on devices that claim
6294 * to support Synchronous Transfer modes.
6296 if (softc
->action
== PROBE_TUR_FOR_NEGOTIATION
6297 && done_ccb
->ccb_h
.target_lun
== 0
6298 && (path
->device
->inq_data
.flags
& SID_Sync
) != 0
6299 && (path
->device
->flags
& CAM_DEV_IN_DV
) == 0) {
6300 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
6301 ("Begin Domain Validation\n"));
6302 path
->device
->flags
|= CAM_DEV_IN_DV
;
6303 xpt_release_ccb(done_ccb
);
6304 softc
->action
= PROBE_INQUIRY_BASIC_DV1
;
6305 xpt_schedule(periph
, priority
);
6308 if (softc
->action
== PROBE_DV_EXIT
) {
6309 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
6310 ("Leave Domain Validation\n"));
6312 path
->device
->flags
&=
6313 ~(CAM_DEV_UNCONFIGURED
|CAM_DEV_IN_DV
|CAM_DEV_DV_HIT_BOTTOM
);
6314 if ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0) {
6315 /* Inform the XPT that a new device has been found */
6316 done_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
6317 xpt_action(done_ccb
);
6318 xpt_async(AC_FOUND_DEVICE
, done_ccb
->ccb_h
.path
,
6321 xpt_release_ccb(done_ccb
);
6323 case PROBE_INQUIRY_BASIC_DV1
:
6324 case PROBE_INQUIRY_BASIC_DV2
:
6326 struct scsi_inquiry_data
*nbuf
;
6327 struct ccb_scsiio
*csio
;
6329 if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6330 /* Don't wedge the queue */
6331 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6334 csio
= &done_ccb
->csio
;
6335 nbuf
= (struct scsi_inquiry_data
*)csio
->data_ptr
;
6336 if (bcmp(nbuf
, &path
->device
->inq_data
, SHORT_INQUIRY_LENGTH
)) {
6338 "inquiry data fails comparison at DV%d step\n",
6339 softc
->action
== PROBE_INQUIRY_BASIC_DV1
? 1 : 2);
6340 if (proberequestbackoff(periph
, path
->device
)) {
6341 path
->device
->flags
&= ~CAM_DEV_IN_DV
;
6342 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
6345 softc
->action
= PROBE_DV_EXIT
;
6347 kfree(nbuf
, M_CAMXPT
);
6348 xpt_release_ccb(done_ccb
);
6349 xpt_schedule(periph
, priority
);
6352 kfree(nbuf
, M_CAMXPT
);
6353 if (softc
->action
== PROBE_INQUIRY_BASIC_DV1
) {
6354 softc
->action
= PROBE_INQUIRY_BASIC_DV2
;
6355 xpt_release_ccb(done_ccb
);
6356 xpt_schedule(periph
, priority
);
6359 if (softc
->action
== PROBE_DV_EXIT
) {
6360 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
6361 ("Leave Domain Validation Successfully\n"));
6363 path
->device
->flags
&=
6364 ~(CAM_DEV_UNCONFIGURED
|CAM_DEV_IN_DV
|CAM_DEV_DV_HIT_BOTTOM
);
6365 if ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0) {
6366 /* Inform the XPT that a new device has been found */
6367 done_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
6368 xpt_action(done_ccb
);
6369 xpt_async(AC_FOUND_DEVICE
, done_ccb
->ccb_h
.path
,
6372 xpt_release_ccb(done_ccb
);
6376 done_ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
6377 TAILQ_REMOVE(&softc
->request_ccbs
, &done_ccb
->ccb_h
, periph_links
.tqe
);
6378 done_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
6380 if (TAILQ_FIRST(&softc
->request_ccbs
) == NULL
) {
6381 cam_periph_invalidate(periph
);
6382 cam_periph_release(periph
);
6384 probeschedule(periph
);
6389 probecleanup(struct cam_periph
*periph
)
6391 kfree(periph
->softc
, M_CAMXPT
);
6395 xpt_find_quirk(struct cam_ed
*device
)
6399 match
= cam_quirkmatch((caddr_t
)&device
->inq_data
,
6400 (caddr_t
)xpt_quirk_table
,
6401 sizeof(xpt_quirk_table
)/sizeof(*xpt_quirk_table
),
6402 sizeof(*xpt_quirk_table
), scsi_inquiry_match
);
6405 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6407 device
->quirk
= (struct xpt_quirk_entry
*)match
;
6411 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
)
6416 error
= sysctl_handle_int(oidp
, &bool, 0, req
);
6417 if (error
!= 0 || req
->newptr
== NULL
)
6419 if (bool == 0 || bool == 1) {
6428 xpt_devise_transport(struct cam_path
*path
)
6430 struct ccb_pathinq cpi
;
6431 struct ccb_trans_settings cts
;
6432 struct scsi_inquiry_data
*inq_buf
;
6434 /* Get transport information from the SIM */
6435 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
6436 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6437 xpt_action((union ccb
*)&cpi
);
6440 if ((path
->device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0)
6441 inq_buf
= &path
->device
->inq_data
;
6442 path
->device
->protocol
= PROTO_SCSI
;
6443 path
->device
->protocol_version
=
6444 inq_buf
!= NULL
? SID_ANSI_REV(inq_buf
) : cpi
.protocol_version
;
6445 path
->device
->transport
= cpi
.transport
;
6446 path
->device
->transport_version
= cpi
.transport_version
;
6449 * Any device not using SPI3 features should
6450 * be considered SPI2 or lower.
6452 if (inq_buf
!= NULL
) {
6453 if (path
->device
->transport
== XPORT_SPI
6454 && (inq_buf
->spi3data
& SID_SPI_MASK
) == 0
6455 && path
->device
->transport_version
> 2)
6456 path
->device
->transport_version
= 2;
6458 struct cam_ed
* otherdev
;
6460 for (otherdev
= TAILQ_FIRST(&path
->target
->ed_entries
);
6462 otherdev
= TAILQ_NEXT(otherdev
, links
)) {
6463 if (otherdev
!= path
->device
)
6467 if (otherdev
!= NULL
) {
6469 * Initially assume the same versioning as
6470 * prior luns for this target.
6472 path
->device
->protocol_version
=
6473 otherdev
->protocol_version
;
6474 path
->device
->transport_version
=
6475 otherdev
->transport_version
;
6477 /* Until we know better, opt for safty */
6478 path
->device
->protocol_version
= 2;
6479 if (path
->device
->transport
== XPORT_SPI
)
6480 path
->device
->transport_version
= 2;
6482 path
->device
->transport_version
= 0;
6488 * For a device compliant with SPC-2 we should be able
6489 * to determine the transport version supported by
6490 * scrutinizing the version descriptors in the
6494 /* Tell the controller what we think */
6495 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
6496 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
6497 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
6498 cts
.transport
= path
->device
->transport
;
6499 cts
.transport_version
= path
->device
->transport_version
;
6500 cts
.protocol
= path
->device
->protocol
;
6501 cts
.protocol_version
= path
->device
->protocol_version
;
6502 cts
.proto_specific
.valid
= 0;
6503 cts
.xport_specific
.valid
= 0;
6504 xpt_action((union ccb
*)&cts
);
6508 xpt_set_transfer_settings(struct ccb_trans_settings
*cts
, struct cam_ed
*device
,
6511 struct ccb_pathinq cpi
;
6512 struct ccb_trans_settings cur_cts
;
6513 struct ccb_trans_settings_scsi
*scsi
;
6514 struct ccb_trans_settings_scsi
*cur_scsi
;
6515 struct cam_sim
*sim
;
6516 struct scsi_inquiry_data
*inq_data
;
6518 if (device
== NULL
) {
6519 cts
->ccb_h
.status
= CAM_PATH_INVALID
;
6520 xpt_done((union ccb
*)cts
);
6524 if (cts
->protocol
== PROTO_UNKNOWN
6525 || cts
->protocol
== PROTO_UNSPECIFIED
) {
6526 cts
->protocol
= device
->protocol
;
6527 cts
->protocol_version
= device
->protocol_version
;
6530 if (cts
->protocol_version
== PROTO_VERSION_UNKNOWN
6531 || cts
->protocol_version
== PROTO_VERSION_UNSPECIFIED
)
6532 cts
->protocol_version
= device
->protocol_version
;
6534 if (cts
->protocol
!= device
->protocol
) {
6535 xpt_print(cts
->ccb_h
.path
, "Uninitialized Protocol %x:%x?\n",
6536 cts
->protocol
, device
->protocol
);
6537 cts
->protocol
= device
->protocol
;
6540 if (cts
->protocol_version
> device
->protocol_version
) {
6542 xpt_print(cts
->ccb_h
.path
, "Down reving Protocol "
6543 "Version from %d to %d?\n", cts
->protocol_version
,
6544 device
->protocol_version
);
6546 cts
->protocol_version
= device
->protocol_version
;
6549 if (cts
->transport
== XPORT_UNKNOWN
6550 || cts
->transport
== XPORT_UNSPECIFIED
) {
6551 cts
->transport
= device
->transport
;
6552 cts
->transport_version
= device
->transport_version
;
6555 if (cts
->transport_version
== XPORT_VERSION_UNKNOWN
6556 || cts
->transport_version
== XPORT_VERSION_UNSPECIFIED
)
6557 cts
->transport_version
= device
->transport_version
;
6559 if (cts
->transport
!= device
->transport
) {
6560 xpt_print(cts
->ccb_h
.path
, "Uninitialized Transport %x:%x?\n",
6561 cts
->transport
, device
->transport
);
6562 cts
->transport
= device
->transport
;
6565 if (cts
->transport_version
> device
->transport_version
) {
6567 xpt_print(cts
->ccb_h
.path
, "Down reving Transport "
6568 "Version from %d to %d?\n", cts
->transport_version
,
6569 device
->transport_version
);
6571 cts
->transport_version
= device
->transport_version
;
6574 sim
= cts
->ccb_h
.path
->bus
->sim
;
6577 * Nothing more of interest to do unless
6578 * this is a device connected via the
6581 if (cts
->protocol
!= PROTO_SCSI
) {
6582 if (async_update
== FALSE
)
6583 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6587 inq_data
= &device
->inq_data
;
6588 scsi
= &cts
->proto_specific
.scsi
;
6589 xpt_setup_ccb(&cpi
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6590 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6591 xpt_action((union ccb
*)&cpi
);
6593 /* SCSI specific sanity checking */
6594 if ((cpi
.hba_inquiry
& PI_TAG_ABLE
) == 0
6595 || (INQ_DATA_TQ_ENABLED(inq_data
)) == 0
6596 || (device
->queue_flags
& SCP_QUEUE_DQUE
) != 0
6597 || (device
->quirk
->mintags
== 0)) {
6599 * Can't tag on hardware that doesn't support tags,
6600 * doesn't have it enabled, or has broken tag support.
6602 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6605 if (async_update
== FALSE
) {
6607 * Perform sanity checking against what the
6608 * controller and device can do.
6610 xpt_setup_ccb(&cur_cts
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6611 cur_cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
6612 cur_cts
.type
= cts
->type
;
6613 xpt_action((union ccb
*)&cur_cts
);
6614 if ((cur_cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
6617 cur_scsi
= &cur_cts
.proto_specific
.scsi
;
6618 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0) {
6619 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6620 scsi
->flags
|= cur_scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
;
6622 if ((cur_scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0)
6623 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6626 /* SPI specific sanity checking */
6627 if (cts
->transport
== XPORT_SPI
&& async_update
== FALSE
) {
6629 struct ccb_trans_settings_spi
*spi
;
6630 struct ccb_trans_settings_spi
*cur_spi
;
6632 spi
= &cts
->xport_specific
.spi
;
6634 cur_spi
= &cur_cts
.xport_specific
.spi
;
6636 /* Fill in any gaps in what the user gave us */
6637 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6638 spi
->sync_period
= cur_spi
->sync_period
;
6639 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6640 spi
->sync_period
= 0;
6641 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6642 spi
->sync_offset
= cur_spi
->sync_offset
;
6643 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6644 spi
->sync_offset
= 0;
6645 if ((spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6646 spi
->ppr_options
= cur_spi
->ppr_options
;
6647 if ((cur_spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6648 spi
->ppr_options
= 0;
6649 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6650 spi
->bus_width
= cur_spi
->bus_width
;
6651 if ((cur_spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6653 if ((spi
->valid
& CTS_SPI_VALID_DISC
) == 0) {
6654 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6655 spi
->flags
|= cur_spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
;
6657 if ((cur_spi
->valid
& CTS_SPI_VALID_DISC
) == 0)
6658 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6659 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6660 && (inq_data
->flags
& SID_Sync
) == 0
6661 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6662 || ((cpi
.hba_inquiry
& PI_SDTR_ABLE
) == 0)
6663 || (spi
->sync_offset
== 0)
6664 || (spi
->sync_period
== 0)) {
6666 spi
->sync_period
= 0;
6667 spi
->sync_offset
= 0;
6670 switch (spi
->bus_width
) {
6671 case MSG_EXT_WDTR_BUS_32_BIT
:
6672 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6673 || (inq_data
->flags
& SID_WBus32
) != 0
6674 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6675 && (cpi
.hba_inquiry
& PI_WIDE_32
) != 0)
6677 /* Fall Through to 16-bit */
6678 case MSG_EXT_WDTR_BUS_16_BIT
:
6679 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6680 || (inq_data
->flags
& SID_WBus16
) != 0
6681 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6682 && (cpi
.hba_inquiry
& PI_WIDE_16
) != 0) {
6683 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
6686 /* Fall Through to 8-bit */
6687 default: /* New bus width?? */
6688 case MSG_EXT_WDTR_BUS_8_BIT
:
6689 /* All targets can do this */
6690 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
6694 spi3caps
= cpi
.xport_specific
.spi
.ppr_options
;
6695 if ((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6696 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6697 spi3caps
&= inq_data
->spi3data
;
6699 if ((spi3caps
& SID_SPI_CLOCK_DT
) == 0)
6700 spi
->ppr_options
&= ~MSG_EXT_PPR_DT_REQ
;
6702 if ((spi3caps
& SID_SPI_IUS
) == 0)
6703 spi
->ppr_options
&= ~MSG_EXT_PPR_IU_REQ
;
6705 if ((spi3caps
& SID_SPI_QAS
) == 0)
6706 spi
->ppr_options
&= ~MSG_EXT_PPR_QAS_REQ
;
6708 /* No SPI Transfer settings are allowed unless we are wide */
6709 if (spi
->bus_width
== 0)
6710 spi
->ppr_options
= 0;
6712 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) == 0) {
6714 * Can't tag queue without disconnection.
6716 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6717 scsi
->valid
|= CTS_SCSI_VALID_TQ
;
6721 * If we are currently performing tagged transactions to
6722 * this device and want to change its negotiation parameters,
6723 * go non-tagged for a bit to give the controller a chance to
6724 * negotiate unhampered by tag messages.
6726 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6727 && (device
->inq_flags
& SID_CmdQue
) != 0
6728 && (scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6729 && (spi
->flags
& (CTS_SPI_VALID_SYNC_RATE
|
6730 CTS_SPI_VALID_SYNC_OFFSET
|
6731 CTS_SPI_VALID_BUS_WIDTH
)) != 0)
6732 xpt_toggle_tags(cts
->ccb_h
.path
);
6735 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6736 && (scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
6740 * If we are transitioning from tags to no-tags or
6741 * vice-versa, we need to carefully freeze and restart
6742 * the queue so that we don't overlap tagged and non-tagged
6743 * commands. We also temporarily stop tags if there is
6744 * a change in transfer negotiation settings to allow
6745 * "tag-less" negotiation.
6747 if ((device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6748 || (device
->inq_flags
& SID_CmdQue
) != 0)
6749 device_tagenb
= TRUE
;
6751 device_tagenb
= FALSE
;
6753 if (((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6754 && device_tagenb
== FALSE
)
6755 || ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) == 0
6756 && device_tagenb
== TRUE
)) {
6758 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0) {
6760 * Delay change to use tags until after a
6761 * few commands have gone to this device so
6762 * the controller has time to perform transfer
6763 * negotiations without tagged messages getting
6766 device
->tag_delay_count
= CAM_TAG_DELAY_COUNT
;
6767 device
->flags
|= CAM_DEV_TAG_AFTER_COUNT
;
6769 struct ccb_relsim crs
;
6771 xpt_freeze_devq(cts
->ccb_h
.path
, /*count*/1);
6772 device
->inq_flags
&= ~SID_CmdQue
;
6773 xpt_dev_ccbq_resize(cts
->ccb_h
.path
,
6774 sim
->max_dev_openings
);
6775 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6776 device
->tag_delay_count
= 0;
6778 xpt_setup_ccb(&crs
.ccb_h
, cts
->ccb_h
.path
,
6780 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6781 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6783 = crs
.release_timeout
6786 xpt_action((union ccb
*)&crs
);
6790 if (async_update
== FALSE
)
6791 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6795 xpt_toggle_tags(struct cam_path
*path
)
6800 * Give controllers a chance to renegotiate
6801 * before starting tag operations. We
6802 * "toggle" tagged queuing off then on
6803 * which causes the tag enable command delay
6804 * counter to come into effect.
6807 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6808 || ((dev
->inq_flags
& SID_CmdQue
) != 0
6809 && (dev
->inq_flags
& (SID_Sync
|SID_WBus16
|SID_WBus32
)) != 0)) {
6810 struct ccb_trans_settings cts
;
6812 xpt_setup_ccb(&cts
.ccb_h
, path
, 1);
6813 cts
.protocol
= PROTO_SCSI
;
6814 cts
.protocol_version
= PROTO_VERSION_UNSPECIFIED
;
6815 cts
.transport
= XPORT_UNSPECIFIED
;
6816 cts
.transport_version
= XPORT_VERSION_UNSPECIFIED
;
6817 cts
.proto_specific
.scsi
.flags
= 0;
6818 cts
.proto_specific
.scsi
.valid
= CTS_SCSI_VALID_TQ
;
6819 xpt_set_transfer_settings(&cts
, path
->device
,
6820 /*async_update*/TRUE
);
6821 cts
.proto_specific
.scsi
.flags
= CTS_SCSI_FLAGS_TAG_ENB
;
6822 xpt_set_transfer_settings(&cts
, path
->device
,
6823 /*async_update*/TRUE
);
6828 xpt_start_tags(struct cam_path
*path
)
6830 struct ccb_relsim crs
;
6831 struct cam_ed
*device
;
6832 struct cam_sim
*sim
;
6835 device
= path
->device
;
6836 sim
= path
->bus
->sim
;
6837 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6838 xpt_freeze_devq(path
, /*count*/1);
6839 device
->inq_flags
|= SID_CmdQue
;
6840 if (device
->tag_saved_openings
!= 0)
6841 newopenings
= device
->tag_saved_openings
;
6843 newopenings
= min(device
->quirk
->maxtags
,
6844 sim
->max_tagged_dev_openings
);
6845 xpt_dev_ccbq_resize(path
, newopenings
);
6846 xpt_setup_ccb(&crs
.ccb_h
, path
, /*priority*/1);
6847 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6848 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6850 = crs
.release_timeout
6853 xpt_action((union ccb
*)&crs
);
6856 static int busses_to_config
;
6857 static int busses_to_reset
;
6860 xptconfigbuscountfunc(struct cam_eb
*bus
, void *arg
)
6863 sim_lock_assert_owned(bus
->sim
->lock
);
6865 if (bus
->path_id
!= CAM_XPT_PATH_ID
) {
6866 struct cam_path path
;
6867 struct ccb_pathinq cpi
;
6871 xpt_compile_path(&path
, NULL
, bus
->path_id
,
6872 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
6873 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
6874 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6875 xpt_action((union ccb
*)&cpi
);
6876 can_negotiate
= cpi
.hba_inquiry
;
6877 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6878 if ((cpi
.hba_misc
& PIM_NOBUSRESET
) == 0
6881 xpt_release_path(&path
);
6888 xptconfigfunc(struct cam_eb
*bus
, void *arg
)
6890 struct cam_path
*path
;
6891 union ccb
*work_ccb
;
6893 sim_lock_assert_owned(bus
->sim
->lock
);
6895 if (bus
->path_id
!= CAM_XPT_PATH_ID
) {
6899 work_ccb
= xpt_alloc_ccb();
6900 if ((status
= xpt_create_path(&path
, xpt_periph
, bus
->path_id
,
6901 CAM_TARGET_WILDCARD
,
6902 CAM_LUN_WILDCARD
)) !=CAM_REQ_CMP
){
6903 kprintf("xptconfigfunc: xpt_create_path failed with "
6904 "status %#x for bus %d\n", status
, bus
->path_id
);
6905 kprintf("xptconfigfunc: halting bus configuration\n");
6906 xpt_free_ccb(work_ccb
);
6908 xpt_finishconfig(xpt_periph
, NULL
);
6911 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6912 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
6913 xpt_action(work_ccb
);
6914 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
6915 kprintf("xptconfigfunc: CPI failed on bus %d "
6916 "with status %d\n", bus
->path_id
,
6917 work_ccb
->ccb_h
.status
);
6918 xpt_finishconfig(xpt_periph
, work_ccb
);
6922 can_negotiate
= work_ccb
->cpi
.hba_inquiry
;
6923 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6924 if ((work_ccb
->cpi
.hba_misc
& PIM_NOBUSRESET
) == 0
6925 && (can_negotiate
!= 0)) {
6926 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6927 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6928 work_ccb
->ccb_h
.cbfcnp
= NULL
;
6929 CAM_DEBUG(path
, CAM_DEBUG_SUBTRACE
,
6930 ("Resetting Bus\n"));
6931 xpt_action(work_ccb
);
6932 xpt_finishconfig(xpt_periph
, work_ccb
);
6934 /* Act as though we performed a successful BUS RESET */
6935 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6936 xpt_finishconfig(xpt_periph
, work_ccb
);
6944 xpt_config(void *arg
)
6947 * Now that interrupts are enabled, go find our devices
6951 /* Setup debugging flags and path */
6952 #ifdef CAM_DEBUG_FLAGS
6953 cam_dflags
= CAM_DEBUG_FLAGS
;
6954 #else /* !CAM_DEBUG_FLAGS */
6955 cam_dflags
= CAM_DEBUG_NONE
;
6956 #endif /* CAM_DEBUG_FLAGS */
6957 #ifdef CAM_DEBUG_BUS
6958 if (cam_dflags
!= CAM_DEBUG_NONE
) {
6960 * Locking is specifically omitted here. No SIMs have
6961 * registered yet, so xpt_create_path will only be searching
6962 * empty lists of targets and devices.
6964 if (xpt_create_path(&cam_dpath
, xpt_periph
,
6965 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
,
6966 CAM_DEBUG_LUN
) != CAM_REQ_CMP
) {
6967 kprintf("xpt_config: xpt_create_path() failed for debug"
6968 " target %d:%d:%d, debugging disabled\n",
6969 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
, CAM_DEBUG_LUN
);
6970 cam_dflags
= CAM_DEBUG_NONE
;
6974 #else /* !CAM_DEBUG_BUS */
6976 #endif /* CAM_DEBUG_BUS */
6977 #endif /* CAMDEBUG */
6980 * Scan all installed busses.
6982 xpt_for_all_busses(xptconfigbuscountfunc
, NULL
);
6984 if (busses_to_config
== 0) {
6985 /* Call manually because we don't have any busses */
6986 xpt_finishconfig(xpt_periph
, NULL
);
6988 if (busses_to_reset
> 0 && scsi_delay
>= 2000) {
6989 kprintf("Waiting %d seconds for SCSI "
6990 "devices to settle\n", scsi_delay
/1000);
6992 xpt_for_all_busses(xptconfigfunc
, NULL
);
6997 * If the given device only has one peripheral attached to it, and if that
6998 * peripheral is the passthrough driver, announce it. This insures that the
6999 * user sees some sort of announcement for every peripheral in their system.
7002 xptpassannouncefunc(struct cam_ed
*device
, void *arg
)
7004 struct cam_periph
*periph
;
7007 for (periph
= SLIST_FIRST(&device
->periphs
), i
= 0; periph
!= NULL
;
7008 periph
= SLIST_NEXT(periph
, periph_links
), i
++);
7010 periph
= SLIST_FIRST(&device
->periphs
);
7012 && (strncmp(periph
->periph_name
, "pass", 4) == 0))
7013 xpt_announce_periph(periph
, NULL
);
7019 xpt_finishconfig_task(void *context
, int pending
)
7021 struct periph_driver
**p_drv
;
7024 if (busses_to_config
== 0) {
7025 /* Register all the peripheral drivers */
7026 /* XXX This will have to change when we have loadable modules */
7027 p_drv
= periph_drivers
;
7028 for (i
= 0; p_drv
[i
] != NULL
; i
++) {
7029 (*p_drv
[i
]->init
)();
7033 * Check for devices with no "standard" peripheral driver
7034 * attached. For any devices like that, announce the
7035 * passthrough driver so the user will see something.
7037 xpt_for_all_devices(xptpassannouncefunc
, NULL
);
7039 /* Release our hook so that the boot can continue. */
7040 config_intrhook_disestablish(xsoftc
.xpt_config_hook
);
7041 kfree(xsoftc
.xpt_config_hook
, M_CAMXPT
);
7042 xsoftc
.xpt_config_hook
= NULL
;
7045 kfree(context
, M_CAMXPT
);
7049 xpt_finishconfig(struct cam_periph
*periph
, union ccb
*done_ccb
)
7051 struct xpt_task
*task
;
7053 if (done_ccb
!= NULL
) {
7054 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
7055 ("xpt_finishconfig\n"));
7056 switch(done_ccb
->ccb_h
.func_code
) {
7058 if (done_ccb
->ccb_h
.status
== CAM_REQ_CMP
) {
7059 done_ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
7060 done_ccb
->ccb_h
.cbfcnp
= xpt_finishconfig
;
7061 done_ccb
->crcn
.flags
= 0;
7062 xpt_action(done_ccb
);
7068 xpt_free_path(done_ccb
->ccb_h
.path
);
7074 if (busses_to_config
== 0) {
7075 task
= kmalloc(sizeof(struct xpt_task
), M_CAMXPT
, M_INTWAIT
);
7076 TASK_INIT(&task
->task
, 0, xpt_finishconfig_task
, task
);
7077 taskqueue_enqueue(taskqueue_thread
[mycpuid
], &task
->task
);
7080 if (done_ccb
!= NULL
)
7081 xpt_free_ccb(done_ccb
);
7085 xpt_register_async(int event
, ac_callback_t
*cbfunc
, void *cbarg
,
7086 struct cam_path
*path
)
7088 struct ccb_setasync csa
;
7093 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
7094 status
= xpt_create_path(&path
, /*periph*/NULL
, CAM_XPT_PATH_ID
,
7095 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
7096 if (status
!= CAM_REQ_CMP
) {
7097 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7103 xpt_setup_ccb(&csa
.ccb_h
, path
, /*priority*/5);
7104 csa
.ccb_h
.func_code
= XPT_SASYNC_CB
;
7105 csa
.event_enable
= event
;
7106 csa
.callback
= cbfunc
;
7107 csa
.callback_arg
= cbarg
;
7108 xpt_action((union ccb
*)&csa
);
7109 status
= csa
.ccb_h
.status
;
7111 xpt_free_path(path
);
7112 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7118 xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
)
7120 CAM_DEBUG(work_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xptaction\n"));
7122 switch (work_ccb
->ccb_h
.func_code
) {
7123 /* Common cases first */
7124 case XPT_PATH_INQ
: /* Path routing inquiry */
7126 struct ccb_pathinq
*cpi
;
7128 cpi
= &work_ccb
->cpi
;
7129 cpi
->version_num
= 1; /* XXX??? */
7130 cpi
->hba_inquiry
= 0;
7131 cpi
->target_sprt
= 0;
7133 cpi
->hba_eng_cnt
= 0;
7134 cpi
->max_target
= 0;
7136 cpi
->initiator_id
= 0;
7137 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
7138 strncpy(cpi
->hba_vid
, "", HBA_IDLEN
);
7139 strncpy(cpi
->dev_name
, sim
->sim_name
, DEV_IDLEN
);
7140 cpi
->unit_number
= sim
->unit_number
;
7141 cpi
->bus_id
= sim
->bus_id
;
7142 cpi
->base_transfer_speed
= 0;
7143 cpi
->protocol
= PROTO_UNSPECIFIED
;
7144 cpi
->protocol_version
= PROTO_VERSION_UNSPECIFIED
;
7145 cpi
->transport
= XPORT_UNSPECIFIED
;
7146 cpi
->transport_version
= XPORT_VERSION_UNSPECIFIED
;
7147 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
7152 work_ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
7159 * The xpt as a "controller" has no interrupt sources, so polling
7163 xptpoll(struct cam_sim
*sim
)
7168 xpt_lock_buses(void)
7170 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
7174 xpt_unlock_buses(void)
7176 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
7181 * Should only be called by the machine interrupt dispatch routines,
7182 * so put these prototypes here instead of in the header.
7186 swi_cambio(void *arg
, void *frame
)
7195 struct cam_sim
*sim
;
7197 spin_lock_wr(&cam_simq_spin
);
7199 TAILQ_CONCAT(&queue
, &cam_simq
, links
);
7200 spin_unlock_wr(&cam_simq_spin
);
7202 while ((sim
= TAILQ_FIRST(&queue
)) != NULL
) {
7203 TAILQ_REMOVE(&queue
, sim
, links
);
7205 sim
->flags
&= ~CAM_SIM_ON_DONEQ
;
7206 camisr_runqueue(sim
);
7207 CAM_SIM_UNLOCK(sim
);
7212 camisr_runqueue(struct cam_sim
*sim
)
7214 struct ccb_hdr
*ccb_h
;
7217 spin_lock_wr(&sim
->sim_spin
);
7218 while ((ccb_h
= TAILQ_FIRST(&sim
->sim_doneq
)) != NULL
) {
7219 TAILQ_REMOVE(&sim
->sim_doneq
, ccb_h
, sim_links
.tqe
);
7220 spin_unlock_wr(&sim
->sim_spin
);
7221 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
7223 CAM_DEBUG(ccb_h
->path
, CAM_DEBUG_TRACE
,
7228 if (ccb_h
->flags
& CAM_HIGH_POWER
) {
7229 struct highpowerlist
*hphead
;
7230 struct cam_ed
*device
;
7231 union ccb
*send_ccb
;
7233 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
7234 hphead
= &xsoftc
.highpowerq
;
7236 send_ccb
= (union ccb
*)STAILQ_FIRST(hphead
);
7239 * Increment the count since this command is done.
7241 xsoftc
.num_highpower
++;
7244 * Any high powered commands queued up?
7246 if (send_ccb
!= NULL
) {
7247 device
= send_ccb
->ccb_h
.path
->device
;
7249 STAILQ_REMOVE_HEAD(hphead
, xpt_links
.stqe
);
7250 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7252 xpt_release_devq(send_ccb
->ccb_h
.path
,
7253 /*count*/1, /*runqueue*/TRUE
);
7255 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7258 if ((ccb_h
->func_code
& XPT_FC_USER_CCB
) == 0) {
7261 dev
= ccb_h
->path
->device
;
7263 cam_ccbq_ccb_done(&dev
->ccbq
, (union ccb
*)ccb_h
);
7266 * devq may be NULL if this is cam_dead_sim
7268 if (ccb_h
->path
->bus
->sim
->devq
) {
7269 ccb_h
->path
->bus
->sim
->devq
->send_active
--;
7270 ccb_h
->path
->bus
->sim
->devq
->send_openings
++;
7273 if (((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0
7274 && (ccb_h
->status
&CAM_STATUS_MASK
) != CAM_REQUEUE_REQ
)
7275 || ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
7276 && (dev
->ccbq
.dev_active
== 0))) {
7278 xpt_release_devq(ccb_h
->path
, /*count*/1,
7282 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
7283 && (--dev
->tag_delay_count
== 0))
7284 xpt_start_tags(ccb_h
->path
);
7286 if ((dev
->ccbq
.queue
.entries
> 0)
7287 && (dev
->qfrozen_cnt
== 0)
7288 && (device_is_send_queued(dev
) == 0)) {
7289 runq
= xpt_schedule_dev_sendq(ccb_h
->path
->bus
,
7294 if (ccb_h
->status
& CAM_RELEASE_SIMQ
) {
7295 xpt_release_simq(ccb_h
->path
->bus
->sim
,
7297 ccb_h
->status
&= ~CAM_RELEASE_SIMQ
;
7301 if ((ccb_h
->flags
& CAM_DEV_QFRZDIS
)
7302 && (ccb_h
->status
& CAM_DEV_QFRZN
)) {
7303 xpt_release_devq(ccb_h
->path
, /*count*/1,
7305 ccb_h
->status
&= ~CAM_DEV_QFRZN
;
7307 xpt_run_dev_sendq(ccb_h
->path
->bus
);
7310 /* Call the peripheral driver's callback */
7311 (*ccb_h
->cbfcnp
)(ccb_h
->path
->periph
, (union ccb
*)ccb_h
);
7312 spin_lock_wr(&sim
->sim_spin
);
7314 spin_unlock_wr(&sim
->sim_spin
);
7318 * The dead_sim isn't completely hooked into CAM, we have to make sure
7319 * the doneq is cleared after calling xpt_done() so cam_periph_ccbwait()
7323 dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
)
7326 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
7328 camisr_runqueue(sim
);
7332 dead_sim_poll(struct cam_sim
*sim
)