2 * Implementation of the Common Access Method Transport (XPT) layer.
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.68 2008/08/23 17:13:31 pavalos Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
45 #include <sys/taskqueue.h>
47 #include <sys/thread.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
53 #include <machine/clock.h>
54 #include <machine/stdarg.h>
58 #include "cam_periph.h"
61 #include "cam_xpt_sim.h"
62 #include "cam_xpt_periph.h"
63 #include "cam_debug.h"
65 #include "scsi/scsi_all.h"
66 #include "scsi/scsi_message.h"
67 #include "scsi/scsi_pass.h"
68 #include <sys/kthread.h>
71 /* Datastructures internal to the xpt layer */
72 MALLOC_DEFINE(M_CAMXPT
, "CAM XPT", "CAM XPT buffers");
74 /* Object for defering XPT actions to a taskqueue */
82 * Definition of an async handler callback block. These are used to add
83 * SIMs and peripherals to the async callback lists.
86 SLIST_ENTRY(async_node
) links
;
87 u_int32_t event_enable
; /* Async Event enables */
88 void (*callback
)(void *arg
, u_int32_t code
,
89 struct cam_path
*path
, void *args
);
93 SLIST_HEAD(async_list
, async_node
);
94 SLIST_HEAD(periph_list
, cam_periph
);
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
100 #ifndef CAM_MAX_HIGHPOWER
101 #define CAM_MAX_HIGHPOWER 4
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
109 struct cam_ed_qinfo
{
111 struct cam_ed
*device
;
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system. The table contains a
117 * cam_ed structure for each device on the bus.
120 TAILQ_ENTRY(cam_ed
) links
;
121 struct cam_ed_qinfo alloc_ccb_entry
;
122 struct cam_ed_qinfo send_ccb_entry
;
123 struct cam_et
*target
;
127 * Queue of type drivers wanting to do
128 * work on this device.
130 struct cam_ccbq ccbq
; /* Queue of pending ccbs */
131 struct async_list asyncs
; /* Async callback info for this B/T/L */
132 struct periph_list periphs
; /* All attached devices */
133 u_int generation
; /* Generation number */
134 struct cam_periph
*owner
; /* Peripheral driver's ownership tag */
135 struct xpt_quirk_entry
*quirk
; /* Oddities about this device */
136 /* Storage for the inquiry data */
138 u_int protocol_version
;
140 u_int transport_version
;
141 struct scsi_inquiry_data inq_data
;
142 u_int8_t inq_flags
; /*
143 * Current settings for inquiry flags.
144 * This allows us to override settings
145 * like disconnection and tagged
146 * queuing for a device.
148 u_int8_t queue_flags
; /* Queue flags from the control page */
149 u_int8_t serial_num_len
;
150 u_int8_t
*serial_num
;
151 u_int32_t qfrozen_cnt
;
153 #define CAM_DEV_UNCONFIGURED 0x01
154 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02
155 #define CAM_DEV_REL_ON_COMPLETE 0x04
156 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
157 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
158 #define CAM_DEV_TAG_AFTER_COUNT 0x20
159 #define CAM_DEV_INQUIRY_DATA_VALID 0x40
160 #define CAM_DEV_IN_DV 0x80
161 #define CAM_DEV_DV_HIT_BOTTOM 0x100
162 u_int32_t tag_delay_count
;
163 #define CAM_TAG_DELAY_COUNT 5
164 u_int32_t tag_saved_openings
;
166 struct callout callout
;
170 * Each target is represented by an ET (Existing Target). These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
176 TAILQ_HEAD(, cam_ed
) ed_entries
;
177 TAILQ_ENTRY(cam_et
) links
;
179 target_id_t target_id
;
182 struct timeval last_reset
; /* uptime of last reset */
186 * Each bus is represented by an EB (Existing Bus). These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
191 TAILQ_HEAD(, cam_et
) et_entries
;
192 TAILQ_ENTRY(cam_eb
) links
;
195 struct timeval last_reset
; /* uptime of last reset */
197 #define CAM_EB_RUNQ_SCHEDULED 0x01
200 int counted_to_config
; /* busses_to_config */
204 struct cam_periph
*periph
;
206 struct cam_et
*target
;
207 struct cam_ed
*device
;
210 struct xpt_quirk_entry
{
211 struct scsi_inquiry_pattern inq_pat
;
213 #define CAM_QUIRK_NOLUNS 0x01
214 #define CAM_QUIRK_NOSERIAL 0x02
215 #define CAM_QUIRK_HILUNS 0x04
216 #define CAM_QUIRK_NOHILUNS 0x08
221 static int cam_srch_hi
= 0;
222 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi
);
223 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
);
224 SYSCTL_PROC(_kern_cam
, OID_AUTO
, cam_srch_hi
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
225 sysctl_cam_search_luns
, "I",
226 "allow search above LUN 7 for SCSI3 and greater devices");
228 #define CAM_SCSI2_MAXLUN 8
230 * If we're not quirked to search <= the first 8 luns
231 * and we are either quirked to search above lun 8,
232 * or we're > SCSI-2 and we've enabled hilun searching,
233 * or we're > SCSI-2 and the last lun was a success,
234 * we can look for luns above lun 8.
236 #define CAN_SRCH_HI_SPARSE(dv) \
237 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
238 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
239 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
241 #define CAN_SRCH_HI_DENSE(dv) \
242 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
243 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
244 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
252 u_int32_t xpt_generation
;
254 /* number of high powered commands that can go through right now */
255 STAILQ_HEAD(highpowerlist
, ccb_hdr
) highpowerq
;
258 /* queue for handling async rescan requests. */
259 TAILQ_HEAD(, ccb_hdr
) ccb_scanq
;
260 int ccb_scanq_running
;
262 /* Registered busses */
263 TAILQ_HEAD(,cam_eb
) xpt_busses
;
264 u_int bus_generation
;
266 struct intr_config_hook
*xpt_config_hook
;
268 struct lock xpt_topo_lock
;
269 struct lock xpt_lock
;
272 static const char quantum
[] = "QUANTUM";
273 static const char sony
[] = "SONY";
274 static const char west_digital
[] = "WDIGTL";
275 static const char samsung
[] = "SAMSUNG";
276 static const char seagate
[] = "SEAGATE";
277 static const char microp
[] = "MICROP";
279 static struct xpt_quirk_entry xpt_quirk_table
[] =
282 /* Reports QUEUE FULL for temporary resource shortages */
283 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP39100*", "*" },
284 /*quirks*/0, /*mintags*/24, /*maxtags*/32
287 /* Reports QUEUE FULL for temporary resource shortages */
288 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP34550*", "*" },
289 /*quirks*/0, /*mintags*/24, /*maxtags*/32
292 /* Reports QUEUE FULL for temporary resource shortages */
293 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "XP32275*", "*" },
294 /*quirks*/0, /*mintags*/24, /*maxtags*/32
297 /* Broken tagged queuing drive */
298 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "4421-07*", "*" },
299 /*quirks*/0, /*mintags*/0, /*maxtags*/0
302 /* Broken tagged queuing drive */
303 { T_DIRECT
, SIP_MEDIA_FIXED
, "HP", "C372*", "*" },
304 /*quirks*/0, /*mintags*/0, /*maxtags*/0
307 /* Broken tagged queuing drive */
308 { T_DIRECT
, SIP_MEDIA_FIXED
, microp
, "3391*", "x43h" },
309 /*quirks*/0, /*mintags*/0, /*maxtags*/0
313 * Unfortunately, the Quantum Atlas III has the same
314 * problem as the Atlas II drives above.
315 * Reported by: "Johan Granlund" <johan@granlund.nu>
317 * For future reference, the drive with the problem was:
318 * QUANTUM QM39100TD-SW N1B0
320 * It's possible that Quantum will fix the problem in later
321 * firmware revisions. If that happens, the quirk entry
322 * will need to be made specific to the firmware revisions
326 /* Reports QUEUE FULL for temporary resource shortages */
327 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM39100*", "*" },
328 /*quirks*/0, /*mintags*/24, /*maxtags*/32
332 * 18 Gig Atlas III, same problem as the 9G version.
333 * Reported by: Andre Albsmeier
334 * <andre.albsmeier@mchp.siemens.de>
336 * For future reference, the drive with the problem was:
337 * QUANTUM QM318000TD-S N491
339 /* Reports QUEUE FULL for temporary resource shortages */
340 { T_DIRECT
, SIP_MEDIA_FIXED
, quantum
, "QM318000*", "*" },
341 /*quirks*/0, /*mintags*/24, /*maxtags*/32
345 * Broken tagged queuing drive
346 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
347 * and: Martin Renters <martin@tdc.on.ca>
349 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST410800*", "71*" },
350 /*quirks*/0, /*mintags*/0, /*maxtags*/0
353 * The Seagate Medalist Pro drives have very poor write
354 * performance with anything more than 2 tags.
356 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
357 * Drive: <SEAGATE ST36530N 1444>
359 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
360 * Drive: <SEAGATE ST34520W 1281>
362 * No one has actually reported that the 9G version
363 * (ST39140*) of the Medalist Pro has the same problem, but
364 * we're assuming that it does because the 4G and 6.5G
365 * versions of the drive are broken.
368 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST34520*", "*"},
369 /*quirks*/0, /*mintags*/2, /*maxtags*/2
372 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST36530*", "*"},
373 /*quirks*/0, /*mintags*/2, /*maxtags*/2
376 { T_DIRECT
, SIP_MEDIA_FIXED
, seagate
, "ST39140*", "*"},
377 /*quirks*/0, /*mintags*/2, /*maxtags*/2
381 * Slow when tagged queueing is enabled. Write performance
382 * steadily drops off with more and more concurrent
383 * transactions. Best sequential write performance with
384 * tagged queueing turned off and write caching turned on.
387 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
388 * Drive: DCAS-34330 w/ "S65A" firmware.
390 * The drive with the problem had the "S65A" firmware
391 * revision, and has also been reported (by Stephen J.
392 * Roznowski <sjr@home.net>) for a drive with the "S61A"
395 * Although no one has reported problems with the 2 gig
396 * version of the DCAS drive, the assumption is that it
397 * has the same problems as the 4 gig version. Therefore
398 * this quirk entries disables tagged queueing for all
401 { T_DIRECT
, SIP_MEDIA_FIXED
, "IBM", "DCAS*", "*" },
402 /*quirks*/0, /*mintags*/0, /*maxtags*/0
405 /* Broken tagged queuing drive */
406 { T_DIRECT
, SIP_MEDIA_REMOVABLE
, "iomega", "jaz*", "*" },
407 /*quirks*/0, /*mintags*/0, /*maxtags*/0
410 /* Broken tagged queuing drive */
411 { T_DIRECT
, SIP_MEDIA_FIXED
, "CONNER", "CFP2107*", "*" },
412 /*quirks*/0, /*mintags*/0, /*maxtags*/0
415 /* This does not support other than LUN 0 */
416 { T_DIRECT
, SIP_MEDIA_FIXED
, "VMware*", "*", "*" },
417 CAM_QUIRK_NOLUNS
, /*mintags*/2, /*maxtags*/255
421 * Broken tagged queuing drive.
423 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
426 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN34324U*", "*" },
427 /*quirks*/0, /*mintags*/0, /*maxtags*/0
431 * Slow when tagged queueing is enabled. (1.5MB/sec versus
433 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
434 * Best performance with these drives is achieved with
435 * tagged queueing turned off, and write caching turned on.
437 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "WDE*", "*" },
438 /*quirks*/0, /*mintags*/0, /*maxtags*/0
442 * Slow when tagged queueing is enabled. (1.5MB/sec versus
444 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
445 * Best performance with these drives is achieved with
446 * tagged queueing turned off, and write caching turned on.
448 { T_DIRECT
, SIP_MEDIA_FIXED
, west_digital
, "ENTERPRISE", "*" },
449 /*quirks*/0, /*mintags*/0, /*maxtags*/0
453 * Doesn't handle queue full condition correctly,
454 * so we need to limit maxtags to what the device
455 * can handle instead of determining this automatically.
457 { T_DIRECT
, SIP_MEDIA_FIXED
, samsung
, "WN321010S*", "*" },
458 /*quirks*/0, /*mintags*/2, /*maxtags*/32
461 /* Really only one LUN */
462 { T_ENCLOSURE
, SIP_MEDIA_FIXED
, "SUN", "SENA", "*" },
463 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
466 /* I can't believe we need a quirk for DPT volumes. */
467 { T_ANY
, SIP_MEDIA_FIXED
|SIP_MEDIA_REMOVABLE
, "DPT", "*", "*" },
469 /*mintags*/0, /*maxtags*/255
473 * Many Sony CDROM drives don't like multi-LUN probing.
475 { T_CDROM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-ROM CDU*", "*" },
476 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
480 * This drive doesn't like multiple LUN probing.
481 * Submitted by: Parag Patel <parag@cgt.com>
483 { T_WORM
, SIP_MEDIA_REMOVABLE
, sony
, "CD-R CDU9*", "*" },
484 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
487 { T_WORM
, SIP_MEDIA_REMOVABLE
, "YAMAHA", "CDR100*", "*" },
488 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
492 * The 8200 doesn't like multi-lun probing, and probably
493 * don't like serial number requests either.
496 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
499 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
503 * Let's try the same as above, but for a drive that says
504 * it's an IPL-6860 but is actually an EXB 8200.
507 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "EXABYTE",
510 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
514 * These Hitachi drives don't like multi-lun probing.
515 * The PR submitter has a DK319H, but says that the Linux
516 * kernel has a similar work-around for the DK312 and DK314,
517 * so all DK31* drives are quirked here.
519 * Submitted by: Paul Haddad <paul@pth.com>
521 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "DK31*", "*" },
522 CAM_QUIRK_NOLUNS
, /*mintags*/2, /*maxtags*/255
526 * The Hitachi CJ series with J8A8 firmware apparantly has
527 * problems with tagged commands.
529 * Reported by: amagai@nue.org
531 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "DK32CJ*", "J8A8" },
532 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
536 * These are the large storage arrays.
537 * Submitted by: William Carrel <william.carrel@infospace.com>
539 { T_DIRECT
, SIP_MEDIA_FIXED
, "HITACHI", "OPEN*", "*" },
540 CAM_QUIRK_HILUNS
, 2, 1024
544 * This old revision of the TDC3600 is also SCSI-1, and
545 * hangs upon serial number probing.
548 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "TANDBERG",
551 CAM_QUIRK_NOSERIAL
, /*mintags*/0, /*maxtags*/0
555 * Would repond to all LUNs if asked for.
558 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "CALIPER",
561 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
565 * Would repond to all LUNs if asked for.
568 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "KENNEDY",
571 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
574 /* Submitted by: Matthew Dodd <winter@jurai.net> */
575 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "Cabletrn", "EA41*", "*" },
576 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
579 /* Submitted by: Matthew Dodd <winter@jurai.net> */
580 { T_PROCESSOR
, SIP_MEDIA_FIXED
, "CABLETRN", "EA41*", "*" },
581 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
584 /* TeraSolutions special settings for TRC-22 RAID */
585 { T_DIRECT
, SIP_MEDIA_FIXED
, "TERASOLU", "TRC-22", "*" },
586 /*quirks*/0, /*mintags*/55, /*maxtags*/255
589 /* Veritas Storage Appliance */
590 { T_DIRECT
, SIP_MEDIA_FIXED
, "VERITAS", "*", "*" },
591 CAM_QUIRK_HILUNS
, /*mintags*/2, /*maxtags*/1024
595 * Would respond to all LUNs. Device type and removable
596 * flag are jumper-selectable.
598 { T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
, "MaxOptix",
601 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
604 /* EasyRAID E5A aka. areca ARC-6010 */
605 { T_DIRECT
, SIP_MEDIA_FIXED
, "easyRAID", "*", "*" },
606 CAM_QUIRK_NOHILUNS
, /*mintags*/2, /*maxtags*/255
609 { T_ENCLOSURE
, SIP_MEDIA_FIXED
, "DP", "BACKPLANE", "*" },
610 CAM_QUIRK_NOLUNS
, /*mintags*/0, /*maxtags*/0
613 /* Default tagged queuing parameters for all devices */
615 T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
,
616 /*vendor*/"*", /*product*/"*", /*revision*/"*"
618 /*quirks*/0, /*mintags*/2, /*maxtags*/255
622 static const int xpt_quirk_table_size
=
623 sizeof(xpt_quirk_table
) / sizeof(*xpt_quirk_table
);
627 DM_RET_FLAG_MASK
= 0x0f,
630 DM_RET_DESCEND
= 0x20,
632 DM_RET_ACTION_MASK
= 0xf0
640 } xpt_traverse_depth
;
642 struct xpt_traverse_config
{
643 xpt_traverse_depth depth
;
648 typedef int xpt_busfunc_t (struct cam_eb
*bus
, void *arg
);
649 typedef int xpt_targetfunc_t (struct cam_et
*target
, void *arg
);
650 typedef int xpt_devicefunc_t (struct cam_ed
*device
, void *arg
);
651 typedef int xpt_periphfunc_t (struct cam_periph
*periph
, void *arg
);
652 typedef int xpt_pdrvfunc_t (struct periph_driver
**pdrv
, void *arg
);
654 /* Transport layer configuration information */
655 static struct xpt_softc xsoftc
;
657 /* Queues for our software interrupt handler */
658 typedef TAILQ_HEAD(cam_isrq
, ccb_hdr
) cam_isrq_t
;
659 typedef TAILQ_HEAD(cam_simq
, cam_sim
) cam_simq_t
;
660 static cam_simq_t cam_simq
;
661 static struct spinlock cam_simq_spin
;
663 struct cam_periph
*xpt_periph
;
665 static periph_init_t xpt_periph_init
;
667 static periph_init_t probe_periph_init
;
669 static struct periph_driver xpt_driver
=
671 xpt_periph_init
, "xpt",
672 TAILQ_HEAD_INITIALIZER(xpt_driver
.units
)
675 static struct periph_driver probe_driver
=
677 probe_periph_init
, "probe",
678 TAILQ_HEAD_INITIALIZER(probe_driver
.units
)
681 PERIPHDRIVER_DECLARE(xpt
, xpt_driver
);
682 PERIPHDRIVER_DECLARE(probe
, probe_driver
);
684 #define XPT_CDEV_MAJOR 104
686 static d_open_t xptopen
;
687 static d_close_t xptclose
;
688 static d_ioctl_t xptioctl
;
690 static struct dev_ops xpt_ops
= {
691 { "xpt", XPT_CDEV_MAJOR
, 0 },
697 static void dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
);
698 static void dead_sim_poll(struct cam_sim
*sim
);
700 /* Dummy SIM that is used when the real one has gone. */
701 static struct cam_sim cam_dead_sim
;
702 static struct lock cam_dead_lock
;
704 /* Storage for debugging datastructures */
706 struct cam_path
*cam_dpath
;
707 u_int32_t cam_dflags
;
708 u_int32_t cam_debug_delay
;
711 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
712 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
716 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
717 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
718 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
720 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
721 || defined(CAM_DEBUG_LUN)
723 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
724 || !defined(CAM_DEBUG_LUN)
725 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
727 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
728 #else /* !CAMDEBUG */
729 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
730 #endif /* CAMDEBUG */
731 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
733 /* Our boot-time initialization hook */
734 static int cam_module_event_handler(module_t
, int /*modeventtype_t*/, void *);
736 static moduledata_t cam_moduledata
= {
738 cam_module_event_handler
,
742 static int xpt_init(void *);
744 DECLARE_MODULE(cam
, cam_moduledata
, SI_SUB_CONFIGURE
, SI_ORDER_SECOND
);
745 MODULE_VERSION(cam
, 1);
748 static cam_status
xpt_compile_path(struct cam_path
*new_path
,
749 struct cam_periph
*perph
,
751 target_id_t target_id
,
754 static void xpt_release_path(struct cam_path
*path
);
756 static void xpt_async_bcast(struct async_list
*async_head
,
757 u_int32_t async_code
,
758 struct cam_path
*path
,
760 static void xpt_dev_async(u_int32_t async_code
,
762 struct cam_et
*target
,
763 struct cam_ed
*device
,
765 static path_id_t
xptnextfreepathid(void);
766 static path_id_t
xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
);
767 static union ccb
*xpt_get_ccb(struct cam_ed
*device
);
768 static int xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*dev_pinfo
,
769 u_int32_t new_priority
);
770 static void xpt_run_dev_allocq(struct cam_eb
*bus
);
771 static void xpt_run_dev_sendq(struct cam_eb
*bus
);
772 static timeout_t xpt_release_devq_timeout
;
773 static void xpt_release_bus(struct cam_eb
*bus
);
774 static void xpt_release_devq_device(struct cam_ed
*dev
, u_int count
,
776 static struct cam_et
*
777 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
);
778 static void xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
);
779 static struct cam_ed
*
780 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
,
782 static void xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
783 struct cam_ed
*device
);
784 static u_int32_t
xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
);
785 static struct cam_eb
*
786 xpt_find_bus(path_id_t path_id
);
787 static struct cam_et
*
788 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
);
789 static struct cam_ed
*
790 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
);
791 static void xpt_scan_bus(struct cam_periph
*periph
, union ccb
*ccb
);
792 static void xpt_scan_lun(struct cam_periph
*periph
,
793 struct cam_path
*path
, cam_flags flags
,
795 static void xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
);
796 static xpt_busfunc_t xptconfigbuscountfunc
;
797 static xpt_busfunc_t xptconfigfunc
;
798 static void xpt_config(void *arg
);
799 static xpt_devicefunc_t xptpassannouncefunc
;
800 static void xpt_finishconfig(struct cam_periph
*periph
, union ccb
*ccb
);
801 static void xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
);
802 static void xptpoll(struct cam_sim
*sim
);
803 static inthand2_t swi_cambio
;
804 static void camisr(void *);
805 static void camisr_runqueue(struct cam_sim
*);
806 static dev_match_ret
xptbusmatch(struct dev_match_pattern
*patterns
,
807 u_int num_patterns
, struct cam_eb
*bus
);
808 static dev_match_ret
xptdevicematch(struct dev_match_pattern
*patterns
,
810 struct cam_ed
*device
);
811 static dev_match_ret
xptperiphmatch(struct dev_match_pattern
*patterns
,
813 struct cam_periph
*periph
);
814 static xpt_busfunc_t xptedtbusfunc
;
815 static xpt_targetfunc_t xptedttargetfunc
;
816 static xpt_devicefunc_t xptedtdevicefunc
;
817 static xpt_periphfunc_t xptedtperiphfunc
;
818 static xpt_pdrvfunc_t xptplistpdrvfunc
;
819 static xpt_periphfunc_t xptplistperiphfunc
;
820 static int xptedtmatch(struct ccb_dev_match
*cdm
);
821 static int xptperiphlistmatch(struct ccb_dev_match
*cdm
);
822 static int xptbustraverse(struct cam_eb
*start_bus
,
823 xpt_busfunc_t
*tr_func
, void *arg
);
824 static int xpttargettraverse(struct cam_eb
*bus
,
825 struct cam_et
*start_target
,
826 xpt_targetfunc_t
*tr_func
, void *arg
);
827 static int xptdevicetraverse(struct cam_et
*target
,
828 struct cam_ed
*start_device
,
829 xpt_devicefunc_t
*tr_func
, void *arg
);
830 static int xptperiphtraverse(struct cam_ed
*device
,
831 struct cam_periph
*start_periph
,
832 xpt_periphfunc_t
*tr_func
, void *arg
);
833 static int xptpdrvtraverse(struct periph_driver
**start_pdrv
,
834 xpt_pdrvfunc_t
*tr_func
, void *arg
);
835 static int xptpdperiphtraverse(struct periph_driver
**pdrv
,
836 struct cam_periph
*start_periph
,
837 xpt_periphfunc_t
*tr_func
,
839 static xpt_busfunc_t xptdefbusfunc
;
840 static xpt_targetfunc_t xptdeftargetfunc
;
841 static xpt_devicefunc_t xptdefdevicefunc
;
842 static xpt_periphfunc_t xptdefperiphfunc
;
843 static int xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
);
844 static int xpt_for_all_devices(xpt_devicefunc_t
*tr_func
,
846 static xpt_devicefunc_t xptsetasyncfunc
;
847 static xpt_busfunc_t xptsetasyncbusfunc
;
848 static cam_status
xptregister(struct cam_periph
*periph
,
850 static cam_status
proberegister(struct cam_periph
*periph
,
852 static void probeschedule(struct cam_periph
*probe_periph
);
853 static void probestart(struct cam_periph
*periph
, union ccb
*start_ccb
);
854 static void proberequestdefaultnegotiation(struct cam_periph
*periph
);
855 static int proberequestbackoff(struct cam_periph
*periph
,
856 struct cam_ed
*device
);
857 static void probedone(struct cam_periph
*periph
, union ccb
*done_ccb
);
858 static void probecleanup(struct cam_periph
*periph
);
859 static void xpt_find_quirk(struct cam_ed
*device
);
860 static void xpt_devise_transport(struct cam_path
*path
);
861 static void xpt_set_transfer_settings(struct ccb_trans_settings
*cts
,
862 struct cam_ed
*device
,
864 static void xpt_toggle_tags(struct cam_path
*path
);
865 static void xpt_start_tags(struct cam_path
*path
);
866 static __inline
int xpt_schedule_dev_allocq(struct cam_eb
*bus
,
868 static __inline
int xpt_schedule_dev_sendq(struct cam_eb
*bus
,
870 static __inline
int periph_is_queued(struct cam_periph
*periph
);
871 static __inline
int device_is_alloc_queued(struct cam_ed
*device
);
872 static __inline
int device_is_send_queued(struct cam_ed
*device
);
873 static __inline
int dev_allocq_is_runnable(struct cam_devq
*devq
);
876 xpt_schedule_dev_allocq(struct cam_eb
*bus
, struct cam_ed
*dev
)
880 if (bus
->sim
->devq
&& dev
->ccbq
.devq_openings
> 0) {
881 if ((dev
->flags
& CAM_DEV_RESIZE_QUEUE_NEEDED
) != 0) {
882 cam_ccbq_resize(&dev
->ccbq
,
883 dev
->ccbq
.dev_openings
884 + dev
->ccbq
.dev_active
);
885 dev
->flags
&= ~CAM_DEV_RESIZE_QUEUE_NEEDED
;
888 * The priority of a device waiting for CCB resources
889 * is that of the the highest priority peripheral driver
892 retval
= xpt_schedule_dev(&bus
->sim
->devq
->alloc_queue
,
893 &dev
->alloc_ccb_entry
.pinfo
,
894 CAMQ_GET_HEAD(&dev
->drvq
)->priority
);
903 xpt_schedule_dev_sendq(struct cam_eb
*bus
, struct cam_ed
*dev
)
907 if (bus
->sim
->devq
&& dev
->ccbq
.dev_openings
> 0) {
909 * The priority of a device waiting for controller
910 * resources is that of the the highest priority CCB
914 xpt_schedule_dev(&bus
->sim
->devq
->send_queue
,
915 &dev
->send_ccb_entry
.pinfo
,
916 CAMQ_GET_HEAD(&dev
->ccbq
.queue
)->priority
);
924 periph_is_queued(struct cam_periph
*periph
)
926 return (periph
->pinfo
.index
!= CAM_UNQUEUED_INDEX
);
930 device_is_alloc_queued(struct cam_ed
*device
)
932 return (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
936 device_is_send_queued(struct cam_ed
*device
)
938 return (device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
);
942 dev_allocq_is_runnable(struct cam_devq
*devq
)
946 * Have space to do more work.
947 * Allowed to do work.
949 return ((devq
->alloc_queue
.qfrozen_cnt
== 0)
950 && (devq
->alloc_queue
.entries
> 0)
951 && (devq
->alloc_openings
> 0));
955 xpt_periph_init(void)
957 make_dev(&xpt_ops
, 0, UID_ROOT
, GID_OPERATOR
, 0600, "xpt0");
961 probe_periph_init(void)
967 xptdone(struct cam_periph
*periph
, union ccb
*done_ccb
)
969 /* Caller will release the CCB */
970 wakeup(&done_ccb
->ccb_h
.cbfcnp
);
974 xptopen(struct dev_open_args
*ap
)
976 cdev_t dev
= ap
->a_head
.a_dev
;
979 * Only allow read-write access.
981 if (((ap
->a_oflags
& FWRITE
) == 0) || ((ap
->a_oflags
& FREAD
) == 0))
985 * We don't allow nonblocking access.
987 if ((ap
->a_oflags
& O_NONBLOCK
) != 0) {
988 kprintf("%s: can't do nonblocking access\n", devtoname(dev
));
992 /* Mark ourselves open */
993 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
994 xsoftc
.flags
|= XPT_FLAG_OPEN
;
995 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
1001 xptclose(struct dev_close_args
*ap
)
1004 /* Mark ourselves closed */
1005 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
1006 xsoftc
.flags
&= ~XPT_FLAG_OPEN
;
1007 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
1013 * Don't automatically grab the xpt softc lock here even though this is going
1014 * through the xpt device. The xpt device is really just a back door for
1015 * accessing other devices and SIMs, so the right thing to do is to grab
1016 * the appropriate SIM lock once the bus/SIM is located.
1019 xptioctl(struct dev_ioctl_args
*ap
)
1027 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1028 * to accept CCB types that don't quite make sense to send through a
1029 * passthrough driver.
1031 case CAMIOCOMMAND
: {
1036 inccb
= (union ccb
*)ap
->a_data
;
1038 bus
= xpt_find_bus(inccb
->ccb_h
.path_id
);
1044 switch(inccb
->ccb_h
.func_code
) {
1047 if ((inccb
->ccb_h
.target_id
!= CAM_TARGET_WILDCARD
)
1048 || (inccb
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
)) {
1057 ccb
= xpt_alloc_ccb();
1059 CAM_SIM_LOCK(bus
->sim
);
1062 * Create a path using the bus, target, and lun the
1065 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
,
1066 inccb
->ccb_h
.path_id
,
1067 inccb
->ccb_h
.target_id
,
1068 inccb
->ccb_h
.target_lun
) !=
1071 CAM_SIM_UNLOCK(bus
->sim
);
1075 /* Ensure all of our fields are correct */
1076 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
,
1077 inccb
->ccb_h
.pinfo
.priority
);
1078 xpt_merge_ccb(ccb
, inccb
);
1079 ccb
->ccb_h
.cbfcnp
= xptdone
;
1080 cam_periph_runccb(ccb
, NULL
, 0, 0, NULL
);
1081 bcopy(ccb
, inccb
, sizeof(union ccb
));
1082 xpt_free_path(ccb
->ccb_h
.path
);
1084 CAM_SIM_UNLOCK(bus
->sim
);
1091 * This is an immediate CCB, so it's okay to
1092 * allocate it on the stack.
1095 CAM_SIM_LOCK(bus
->sim
);
1098 * Create a path using the bus, target, and lun the
1101 if (xpt_create_path(&ccb
.ccb_h
.path
, xpt_periph
,
1102 inccb
->ccb_h
.path_id
,
1103 inccb
->ccb_h
.target_id
,
1104 inccb
->ccb_h
.target_lun
) !=
1107 CAM_SIM_UNLOCK(bus
->sim
);
1110 /* Ensure all of our fields are correct */
1111 xpt_setup_ccb(&ccb
.ccb_h
, ccb
.ccb_h
.path
,
1112 inccb
->ccb_h
.pinfo
.priority
);
1113 xpt_merge_ccb(&ccb
, inccb
);
1114 ccb
.ccb_h
.cbfcnp
= xptdone
;
1116 CAM_SIM_UNLOCK(bus
->sim
);
1117 bcopy(&ccb
, inccb
, sizeof(union ccb
));
1118 xpt_free_path(ccb
.ccb_h
.path
);
1122 case XPT_DEV_MATCH
: {
1123 struct cam_periph_map_info mapinfo
;
1124 struct cam_path
*old_path
;
1127 * We can't deal with physical addresses for this
1128 * type of transaction.
1130 if (inccb
->ccb_h
.flags
& CAM_DATA_PHYS
) {
1136 * Save this in case the caller had it set to
1137 * something in particular.
1139 old_path
= inccb
->ccb_h
.path
;
1142 * We really don't need a path for the matching
1143 * code. The path is needed because of the
1144 * debugging statements in xpt_action(). They
1145 * assume that the CCB has a valid path.
1147 inccb
->ccb_h
.path
= xpt_periph
->path
;
1149 bzero(&mapinfo
, sizeof(mapinfo
));
1152 * Map the pattern and match buffers into kernel
1153 * virtual address space.
1155 error
= cam_periph_mapmem(inccb
, &mapinfo
);
1158 inccb
->ccb_h
.path
= old_path
;
1163 * This is an immediate CCB, we can send it on directly.
1168 * Map the buffers back into user space.
1170 cam_periph_unmapmem(inccb
, &mapinfo
);
1172 inccb
->ccb_h
.path
= old_path
;
1181 xpt_release_bus(bus
);
1185 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1186 * with the periphal driver name and unit name filled in. The other
1187 * fields don't really matter as input. The passthrough driver name
1188 * ("pass"), and unit number are passed back in the ccb. The current
1189 * device generation number, and the index into the device peripheral
1190 * driver list, and the status are also passed back. Note that
1191 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1192 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1193 * (or rather should be) impossible for the device peripheral driver
1194 * list to change since we look at the whole thing in one pass, and
1195 * we do it with lock protection.
1198 case CAMGETPASSTHRU
: {
1200 struct cam_periph
*periph
;
1201 struct periph_driver
**p_drv
;
1204 u_int cur_generation
;
1205 int base_periph_found
;
1208 ccb
= (union ccb
*)ap
->a_data
;
1209 unit
= ccb
->cgdl
.unit_number
;
1210 name
= ccb
->cgdl
.periph_name
;
1212 * Every 100 devices, we want to drop our lock protection to
1213 * give the software interrupt handler a chance to run.
1214 * Most systems won't run into this check, but this should
1215 * avoid starvation in the software interrupt handler in
1220 ccb
= (union ccb
*)ap
->a_data
;
1222 base_periph_found
= 0;
1225 * Sanity check -- make sure we don't get a null peripheral
1228 if (*ccb
->cgdl
.periph_name
== '\0') {
1233 /* Keep the list from changing while we traverse it */
1234 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1236 cur_generation
= xsoftc
.xpt_generation
;
1238 /* first find our driver in the list of drivers */
1239 for (p_drv
= periph_drivers
; *p_drv
!= NULL
; p_drv
++) {
1240 if (strcmp((*p_drv
)->driver_name
, name
) == 0)
1244 if (*p_drv
== NULL
) {
1245 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1246 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1247 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1248 *ccb
->cgdl
.periph_name
= '\0';
1249 ccb
->cgdl
.unit_number
= 0;
1255 * Run through every peripheral instance of this driver
1256 * and check to see whether it matches the unit passed
1257 * in by the user. If it does, get out of the loops and
1258 * find the passthrough driver associated with that
1259 * peripheral driver.
1261 TAILQ_FOREACH(periph
, &(*p_drv
)->units
, unit_links
) {
1263 if (periph
->unit_number
== unit
) {
1265 } else if (--splbreaknum
== 0) {
1266 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1267 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1269 if (cur_generation
!= xsoftc
.xpt_generation
)
1274 * If we found the peripheral driver that the user passed
1275 * in, go through all of the peripheral drivers for that
1276 * particular device and look for a passthrough driver.
1278 if (periph
!= NULL
) {
1279 struct cam_ed
*device
;
1282 base_periph_found
= 1;
1283 device
= periph
->path
->device
;
1284 for (i
= 0, periph
= SLIST_FIRST(&device
->periphs
);
1286 periph
= SLIST_NEXT(periph
, periph_links
), i
++) {
1288 * Check to see whether we have a
1289 * passthrough device or not.
1291 if (strcmp(periph
->periph_name
, "pass") == 0) {
1293 * Fill in the getdevlist fields.
1295 strcpy(ccb
->cgdl
.periph_name
,
1296 periph
->periph_name
);
1297 ccb
->cgdl
.unit_number
=
1298 periph
->unit_number
;
1299 if (SLIST_NEXT(periph
, periph_links
))
1301 CAM_GDEVLIST_MORE_DEVS
;
1304 CAM_GDEVLIST_LAST_DEVICE
;
1305 ccb
->cgdl
.generation
=
1307 ccb
->cgdl
.index
= i
;
1309 * Fill in some CCB header fields
1310 * that the user may want.
1312 ccb
->ccb_h
.path_id
=
1313 periph
->path
->bus
->path_id
;
1314 ccb
->ccb_h
.target_id
=
1315 periph
->path
->target
->target_id
;
1316 ccb
->ccb_h
.target_lun
=
1317 periph
->path
->device
->lun_id
;
1318 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
1325 * If the periph is null here, one of two things has
1326 * happened. The first possibility is that we couldn't
1327 * find the unit number of the particular peripheral driver
1328 * that the user is asking about. e.g. the user asks for
1329 * the passthrough driver for "da11". We find the list of
1330 * "da" peripherals all right, but there is no unit 11.
1331 * The other possibility is that we went through the list
1332 * of peripheral drivers attached to the device structure,
1333 * but didn't find one with the name "pass". Either way,
1334 * we return ENOENT, since we couldn't find something.
1336 if (periph
== NULL
) {
1337 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1338 ccb
->cgdl
.status
= CAM_GDEVLIST_ERROR
;
1339 *ccb
->cgdl
.periph_name
= '\0';
1340 ccb
->cgdl
.unit_number
= 0;
1343 * It is unfortunate that this is even necessary,
1344 * but there are many, many clueless users out there.
1345 * If this is true, the user is looking for the
1346 * passthrough driver, but doesn't have one in his
1349 if (base_periph_found
== 1) {
1350 kprintf("xptioctl: pass driver is not in the "
1352 kprintf("xptioctl: put \"device pass\" in "
1353 "your kernel config file\n");
1356 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1368 cam_module_event_handler(module_t mod
, int what
, void *arg
)
1374 if ((error
= xpt_init(NULL
)) != 0)
1387 * Thread to handle asynchronous main-context requests.
1389 * This function is typically used by drivers to perform complex actions
1390 * such as bus scans and engineering requests in a main context instead
1391 * of an interrupt context.
1394 xpt_scanner_thread(void *dummy
)
1398 struct cam_sim
*sim
;
1403 xsoftc
.ccb_scanq_running
= 1;
1404 while ((ccb
= (void *)TAILQ_FIRST(&xsoftc
.ccb_scanq
)) != NULL
) {
1405 TAILQ_REMOVE(&xsoftc
.ccb_scanq
, &ccb
->ccb_h
,
1409 sim
= ccb
->ccb_h
.path
->bus
->sim
;
1414 CAM_SIM_UNLOCK(sim
);
1418 xsoftc
.ccb_scanq_running
= 0;
1419 tsleep_interlock(&xsoftc
.ccb_scanq
, 0);
1421 tsleep(&xsoftc
.ccb_scanq
, PINTERLOCKED
, "ccb_scanq", 0);
1426 * Issue an asynchronous asction
1429 xpt_action_async(union ccb
*ccb
)
1432 TAILQ_INSERT_TAIL(&xsoftc
.ccb_scanq
, &ccb
->ccb_h
, sim_links
.tqe
);
1433 if (xsoftc
.ccb_scanq_running
== 0) {
1434 xsoftc
.ccb_scanq_running
= 1;
1435 wakeup(&xsoftc
.ccb_scanq
);
1441 /* Functions accessed by the peripheral drivers */
1443 xpt_init(void *dummy
)
1445 struct cam_sim
*xpt_sim
;
1446 struct cam_path
*path
;
1447 struct cam_devq
*devq
;
1450 TAILQ_INIT(&xsoftc
.xpt_busses
);
1451 TAILQ_INIT(&cam_simq
);
1452 TAILQ_INIT(&xsoftc
.ccb_scanq
);
1453 STAILQ_INIT(&xsoftc
.highpowerq
);
1454 xsoftc
.num_highpower
= CAM_MAX_HIGHPOWER
;
1456 spin_init(&cam_simq_spin
);
1457 lockinit(&xsoftc
.xpt_lock
, "XPT lock", 0, LK_CANRECURSE
);
1458 lockinit(&xsoftc
.xpt_topo_lock
, "XPT topology lock", 0, LK_CANRECURSE
);
1460 SLIST_INIT(&cam_dead_sim
.ccb_freeq
);
1461 TAILQ_INIT(&cam_dead_sim
.sim_doneq
);
1462 spin_init(&cam_dead_sim
.sim_spin
);
1463 cam_dead_sim
.sim_action
= dead_sim_action
;
1464 cam_dead_sim
.sim_poll
= dead_sim_poll
;
1465 cam_dead_sim
.sim_name
= "dead_sim";
1466 cam_dead_sim
.lock
= &cam_dead_lock
;
1467 lockinit(&cam_dead_lock
, "XPT dead_sim lock", 0, LK_CANRECURSE
);
1468 cam_dead_sim
.flags
|= CAM_SIM_DEREGISTERED
;
1471 * The xpt layer is, itself, the equivelent of a SIM.
1472 * Allow 16 ccbs in the ccb pool for it. This should
1473 * give decent parallelism when we probe busses and
1474 * perform other XPT functions.
1476 devq
= cam_simq_alloc(16);
1477 xpt_sim
= cam_sim_alloc(xptaction
,
1482 /*lock*/&xsoftc
.xpt_lock
,
1483 /*max_dev_transactions*/0,
1484 /*max_tagged_dev_transactions*/0,
1486 cam_simq_release(devq
);
1487 if (xpt_sim
== NULL
)
1490 xpt_sim
->max_ccbs
= 16;
1492 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
1493 if ((status
= xpt_bus_register(xpt_sim
, /*bus #*/0)) != CAM_SUCCESS
) {
1494 kprintf("xpt_init: xpt_bus_register failed with status %#x,"
1495 " failing attach\n", status
);
1500 * Looking at the XPT from the SIM layer, the XPT is
1501 * the equivelent of a peripheral driver. Allocate
1502 * a peripheral driver entry for us.
1504 if ((status
= xpt_create_path(&path
, NULL
, CAM_XPT_PATH_ID
,
1505 CAM_TARGET_WILDCARD
,
1506 CAM_LUN_WILDCARD
)) != CAM_REQ_CMP
) {
1507 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1508 " failing attach\n", status
);
1512 cam_periph_alloc(xptregister
, NULL
, NULL
, NULL
, "xpt", CAM_PERIPH_BIO
,
1513 path
, NULL
, 0, xpt_sim
);
1514 xpt_free_path(path
);
1516 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
1519 * Register a callback for when interrupts are enabled.
1521 xsoftc
.xpt_config_hook
= kmalloc(sizeof(struct intr_config_hook
),
1522 M_CAMXPT
, M_INTWAIT
| M_ZERO
);
1523 xsoftc
.xpt_config_hook
->ich_func
= xpt_config
;
1524 xsoftc
.xpt_config_hook
->ich_desc
= "xpt";
1525 xsoftc
.xpt_config_hook
->ich_order
= 1000;
1526 if (config_intrhook_establish(xsoftc
.xpt_config_hook
) != 0) {
1527 kfree (xsoftc
.xpt_config_hook
, M_CAMXPT
);
1528 kprintf("xpt_init: config_intrhook_establish failed "
1529 "- failing attach\n");
1532 /* fire up rescan thread */
1533 if (kthread_create(xpt_scanner_thread
, NULL
, NULL
, "xpt_thrd")) {
1534 kprintf("xpt_init: failed to create rescan thread\n");
1536 /* Install our software interrupt handlers */
1537 register_swi(SWI_CAMBIO
, swi_cambio
, NULL
, "swi_cambio", NULL
);
1543 xptregister(struct cam_periph
*periph
, void *arg
)
1545 struct cam_sim
*xpt_sim
;
1547 if (periph
== NULL
) {
1548 kprintf("xptregister: periph was NULL!!\n");
1549 return(CAM_REQ_CMP_ERR
);
1552 xpt_sim
= (struct cam_sim
*)arg
;
1553 xpt_sim
->softc
= periph
;
1554 xpt_periph
= periph
;
1555 periph
->softc
= NULL
;
1557 return(CAM_REQ_CMP
);
1561 xpt_add_periph(struct cam_periph
*periph
)
1563 struct cam_ed
*device
;
1565 struct periph_list
*periph_head
;
1567 sim_lock_assert_owned(periph
->sim
->lock
);
1569 device
= periph
->path
->device
;
1571 periph_head
= &device
->periphs
;
1573 status
= CAM_REQ_CMP
;
1575 if (device
!= NULL
) {
1577 * Make room for this peripheral
1578 * so it will fit in the queue
1579 * when it's scheduled to run
1581 status
= camq_resize(&device
->drvq
,
1582 device
->drvq
.array_size
+ 1);
1584 device
->generation
++;
1586 SLIST_INSERT_HEAD(periph_head
, periph
, periph_links
);
1589 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1590 xsoftc
.xpt_generation
++;
1591 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1597 xpt_remove_periph(struct cam_periph
*periph
)
1599 struct cam_ed
*device
;
1601 sim_lock_assert_owned(periph
->sim
->lock
);
1603 device
= periph
->path
->device
;
1605 if (device
!= NULL
) {
1606 struct periph_list
*periph_head
;
1608 periph_head
= &device
->periphs
;
1610 /* Release the slot for this peripheral */
1611 camq_resize(&device
->drvq
, device
->drvq
.array_size
- 1);
1613 device
->generation
++;
1615 SLIST_REMOVE(periph_head
, periph
, cam_periph
, periph_links
);
1618 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
1619 xsoftc
.xpt_generation
++;
1620 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
1624 xpt_announce_periph(struct cam_periph
*periph
, char *announce_string
)
1626 struct ccb_pathinq cpi
;
1627 struct ccb_trans_settings cts
;
1628 struct cam_path
*path
;
1633 sim_lock_assert_owned(periph
->sim
->lock
);
1635 path
= periph
->path
;
1637 /* Report basic attachment and inquiry data */
1638 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1639 periph
->periph_name
, periph
->unit_number
,
1640 path
->bus
->sim
->sim_name
,
1641 path
->bus
->sim
->unit_number
,
1642 path
->bus
->sim
->bus_id
,
1643 path
->target
->target_id
,
1644 path
->device
->lun_id
);
1645 kprintf("%s%d: ", periph
->periph_name
, periph
->unit_number
);
1646 scsi_print_inquiry(&path
->device
->inq_data
);
1648 /* Report serial number */
1649 if (path
->device
->serial_num_len
> 0) {
1650 /* Don't wrap the screen - print only the first 60 chars */
1651 kprintf("%s%d: Serial Number %.60s\n", periph
->periph_name
,
1652 periph
->unit_number
, path
->device
->serial_num
);
1655 /* Acquire and report transfer speed */
1656 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
1657 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
1658 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
1659 xpt_action((union ccb
*)&cts
);
1660 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
1664 /* Ask the SIM for its base transfer speed */
1665 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
1666 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
1667 xpt_action((union ccb
*)&cpi
);
1669 speed
= cpi
.base_transfer_speed
;
1671 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1672 struct ccb_trans_settings_spi
*spi
;
1674 spi
= &cts
.xport_specific
.spi
;
1675 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) != 0
1676 && spi
->sync_offset
!= 0) {
1677 freq
= scsi_calc_syncsrate(spi
->sync_period
);
1681 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0)
1682 speed
*= (0x01 << spi
->bus_width
);
1684 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1685 struct ccb_trans_settings_fc
*fc
= &cts
.xport_specific
.fc
;
1686 if (fc
->valid
& CTS_FC_VALID_SPEED
) {
1687 speed
= fc
->bitrate
;
1691 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SAS
) {
1692 struct ccb_trans_settings_sas
*sas
= &cts
.xport_specific
.sas
;
1693 if (sas
->valid
& CTS_SAS_VALID_SPEED
) {
1694 speed
= sas
->bitrate
;
1700 kprintf("%s%d: %d.%03dMB/s transfers",
1701 periph
->periph_name
, periph
->unit_number
,
1704 kprintf("%s%d: %dKB/s transfers", periph
->periph_name
,
1705 periph
->unit_number
, speed
);
1707 /* Report additional information about SPI connections */
1708 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_SPI
) {
1709 struct ccb_trans_settings_spi
*spi
;
1711 spi
= &cts
.xport_specific
.spi
;
1713 kprintf(" (%d.%03dMHz%s, offset %d", freq
/ 1000,
1715 (spi
->ppr_options
& MSG_EXT_PPR_DT_REQ
) != 0
1719 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0
1720 && spi
->bus_width
> 0) {
1726 kprintf("%dbit)", 8 * (0x01 << spi
->bus_width
));
1727 } else if (freq
!= 0) {
1731 if (cts
.ccb_h
.status
== CAM_REQ_CMP
&& cts
.transport
== XPORT_FC
) {
1732 struct ccb_trans_settings_fc
*fc
;
1734 fc
= &cts
.xport_specific
.fc
;
1735 if (fc
->valid
& CTS_FC_VALID_WWNN
)
1736 kprintf(" WWNN 0x%llx", (long long) fc
->wwnn
);
1737 if (fc
->valid
& CTS_FC_VALID_WWPN
)
1738 kprintf(" WWPN 0x%llx", (long long) fc
->wwpn
);
1739 if (fc
->valid
& CTS_FC_VALID_PORT
)
1740 kprintf(" PortID 0x%x", fc
->port
);
1743 if (path
->device
->inq_flags
& SID_CmdQue
1744 || path
->device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) {
1745 kprintf("\n%s%d: Command Queueing Enabled",
1746 periph
->periph_name
, periph
->unit_number
);
1751 * We only want to print the caller's announce string if they've
1754 if (announce_string
!= NULL
)
1755 kprintf("%s%d: %s\n", periph
->periph_name
,
1756 periph
->unit_number
, announce_string
);
1759 static dev_match_ret
1760 xptbusmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1763 dev_match_ret retval
;
1766 retval
= DM_RET_NONE
;
1769 * If we aren't given something to match against, that's an error.
1772 return(DM_RET_ERROR
);
1775 * If there are no match entries, then this bus matches no
1778 if ((patterns
== NULL
) || (num_patterns
== 0))
1779 return(DM_RET_DESCEND
| DM_RET_COPY
);
1781 for (i
= 0; i
< num_patterns
; i
++) {
1782 struct bus_match_pattern
*cur_pattern
;
1785 * If the pattern in question isn't for a bus node, we
1786 * aren't interested. However, we do indicate to the
1787 * calling routine that we should continue descending the
1788 * tree, since the user wants to match against lower-level
1791 if (patterns
[i
].type
!= DEV_MATCH_BUS
) {
1792 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1793 retval
|= DM_RET_DESCEND
;
1797 cur_pattern
= &patterns
[i
].pattern
.bus_pattern
;
1800 * If they want to match any bus node, we give them any
1803 if (cur_pattern
->flags
== BUS_MATCH_ANY
) {
1804 /* set the copy flag */
1805 retval
|= DM_RET_COPY
;
1808 * If we've already decided on an action, go ahead
1811 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1816 * Not sure why someone would do this...
1818 if (cur_pattern
->flags
== BUS_MATCH_NONE
)
1821 if (((cur_pattern
->flags
& BUS_MATCH_PATH
) != 0)
1822 && (cur_pattern
->path_id
!= bus
->path_id
))
1825 if (((cur_pattern
->flags
& BUS_MATCH_BUS_ID
) != 0)
1826 && (cur_pattern
->bus_id
!= bus
->sim
->bus_id
))
1829 if (((cur_pattern
->flags
& BUS_MATCH_UNIT
) != 0)
1830 && (cur_pattern
->unit_number
!= bus
->sim
->unit_number
))
1833 if (((cur_pattern
->flags
& BUS_MATCH_NAME
) != 0)
1834 && (strncmp(cur_pattern
->dev_name
, bus
->sim
->sim_name
,
1839 * If we get to this point, the user definitely wants
1840 * information on this bus. So tell the caller to copy the
1843 retval
|= DM_RET_COPY
;
1846 * If the return action has been set to descend, then we
1847 * know that we've already seen a non-bus matching
1848 * expression, therefore we need to further descend the tree.
1849 * This won't change by continuing around the loop, so we
1850 * go ahead and return. If we haven't seen a non-bus
1851 * matching expression, we keep going around the loop until
1852 * we exhaust the matching expressions. We'll set the stop
1853 * flag once we fall out of the loop.
1855 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1860 * If the return action hasn't been set to descend yet, that means
1861 * we haven't seen anything other than bus matching patterns. So
1862 * tell the caller to stop descending the tree -- the user doesn't
1863 * want to match against lower level tree elements.
1865 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1866 retval
|= DM_RET_STOP
;
1871 static dev_match_ret
1872 xptdevicematch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1873 struct cam_ed
*device
)
1875 dev_match_ret retval
;
1878 retval
= DM_RET_NONE
;
1881 * If we aren't given something to match against, that's an error.
1884 return(DM_RET_ERROR
);
1887 * If there are no match entries, then this device matches no
1890 if ((patterns
== NULL
) || (num_patterns
== 0))
1891 return(DM_RET_DESCEND
| DM_RET_COPY
);
1893 for (i
= 0; i
< num_patterns
; i
++) {
1894 struct device_match_pattern
*cur_pattern
;
1897 * If the pattern in question isn't for a device node, we
1898 * aren't interested.
1900 if (patterns
[i
].type
!= DEV_MATCH_DEVICE
) {
1901 if ((patterns
[i
].type
== DEV_MATCH_PERIPH
)
1902 && ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
))
1903 retval
|= DM_RET_DESCEND
;
1907 cur_pattern
= &patterns
[i
].pattern
.device_pattern
;
1910 * If they want to match any device node, we give them any
1913 if (cur_pattern
->flags
== DEV_MATCH_ANY
) {
1914 /* set the copy flag */
1915 retval
|= DM_RET_COPY
;
1919 * If we've already decided on an action, go ahead
1922 if ((retval
& DM_RET_ACTION_MASK
) != DM_RET_NONE
)
1927 * Not sure why someone would do this...
1929 if (cur_pattern
->flags
== DEV_MATCH_NONE
)
1932 if (((cur_pattern
->flags
& DEV_MATCH_PATH
) != 0)
1933 && (cur_pattern
->path_id
!= device
->target
->bus
->path_id
))
1936 if (((cur_pattern
->flags
& DEV_MATCH_TARGET
) != 0)
1937 && (cur_pattern
->target_id
!= device
->target
->target_id
))
1940 if (((cur_pattern
->flags
& DEV_MATCH_LUN
) != 0)
1941 && (cur_pattern
->target_lun
!= device
->lun_id
))
1944 if (((cur_pattern
->flags
& DEV_MATCH_INQUIRY
) != 0)
1945 && (cam_quirkmatch((caddr_t
)&device
->inq_data
,
1946 (caddr_t
)&cur_pattern
->inq_pat
,
1947 1, sizeof(cur_pattern
->inq_pat
),
1948 scsi_static_inquiry_match
) == NULL
))
1952 * If we get to this point, the user definitely wants
1953 * information on this device. So tell the caller to copy
1956 retval
|= DM_RET_COPY
;
1959 * If the return action has been set to descend, then we
1960 * know that we've already seen a peripheral matching
1961 * expression, therefore we need to further descend the tree.
1962 * This won't change by continuing around the loop, so we
1963 * go ahead and return. If we haven't seen a peripheral
1964 * matching expression, we keep going around the loop until
1965 * we exhaust the matching expressions. We'll set the stop
1966 * flag once we fall out of the loop.
1968 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_DESCEND
)
1973 * If the return action hasn't been set to descend yet, that means
1974 * we haven't seen any peripheral matching patterns. So tell the
1975 * caller to stop descending the tree -- the user doesn't want to
1976 * match against lower level tree elements.
1978 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_NONE
)
1979 retval
|= DM_RET_STOP
;
1985 * Match a single peripheral against any number of match patterns.
1987 static dev_match_ret
1988 xptperiphmatch(struct dev_match_pattern
*patterns
, u_int num_patterns
,
1989 struct cam_periph
*periph
)
1991 dev_match_ret retval
;
1995 * If we aren't given something to match against, that's an error.
1998 return(DM_RET_ERROR
);
2001 * If there are no match entries, then this peripheral matches no
2004 if ((patterns
== NULL
) || (num_patterns
== 0))
2005 return(DM_RET_STOP
| DM_RET_COPY
);
2008 * There aren't any nodes below a peripheral node, so there's no
2009 * reason to descend the tree any further.
2011 retval
= DM_RET_STOP
;
2013 for (i
= 0; i
< num_patterns
; i
++) {
2014 struct periph_match_pattern
*cur_pattern
;
2017 * If the pattern in question isn't for a peripheral, we
2018 * aren't interested.
2020 if (patterns
[i
].type
!= DEV_MATCH_PERIPH
)
2023 cur_pattern
= &patterns
[i
].pattern
.periph_pattern
;
2026 * If they want to match on anything, then we will do so.
2028 if (cur_pattern
->flags
== PERIPH_MATCH_ANY
) {
2029 /* set the copy flag */
2030 retval
|= DM_RET_COPY
;
2033 * We've already set the return action to stop,
2034 * since there are no nodes below peripherals in
2041 * Not sure why someone would do this...
2043 if (cur_pattern
->flags
== PERIPH_MATCH_NONE
)
2046 if (((cur_pattern
->flags
& PERIPH_MATCH_PATH
) != 0)
2047 && (cur_pattern
->path_id
!= periph
->path
->bus
->path_id
))
2051 * For the target and lun id's, we have to make sure the
2052 * target and lun pointers aren't NULL. The xpt peripheral
2053 * has a wildcard target and device.
2055 if (((cur_pattern
->flags
& PERIPH_MATCH_TARGET
) != 0)
2056 && ((periph
->path
->target
== NULL
)
2057 ||(cur_pattern
->target_id
!= periph
->path
->target
->target_id
)))
2060 if (((cur_pattern
->flags
& PERIPH_MATCH_LUN
) != 0)
2061 && ((periph
->path
->device
== NULL
)
2062 || (cur_pattern
->target_lun
!= periph
->path
->device
->lun_id
)))
2065 if (((cur_pattern
->flags
& PERIPH_MATCH_UNIT
) != 0)
2066 && (cur_pattern
->unit_number
!= periph
->unit_number
))
2069 if (((cur_pattern
->flags
& PERIPH_MATCH_NAME
) != 0)
2070 && (strncmp(cur_pattern
->periph_name
, periph
->periph_name
,
2075 * If we get to this point, the user definitely wants
2076 * information on this peripheral. So tell the caller to
2077 * copy the data out.
2079 retval
|= DM_RET_COPY
;
2082 * The return action has already been set to stop, since
2083 * peripherals don't have any nodes below them in the EDT.
2089 * If we get to this point, the peripheral that was passed in
2090 * doesn't match any of the patterns.
2096 xptedtbusfunc(struct cam_eb
*bus
, void *arg
)
2098 struct ccb_dev_match
*cdm
;
2099 dev_match_ret retval
;
2101 cdm
= (struct ccb_dev_match
*)arg
;
2104 * If our position is for something deeper in the tree, that means
2105 * that we've already seen this node. So, we keep going down.
2107 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2108 && (cdm
->pos
.cookie
.bus
== bus
)
2109 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2110 && (cdm
->pos
.cookie
.target
!= NULL
))
2111 retval
= DM_RET_DESCEND
;
2113 retval
= xptbusmatch(cdm
->patterns
, cdm
->num_patterns
, bus
);
2116 * If we got an error, bail out of the search.
2118 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2119 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2124 * If the copy flag is set, copy this bus out.
2126 if (retval
& DM_RET_COPY
) {
2129 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2130 sizeof(struct dev_match_result
));
2133 * If we don't have enough space to put in another
2134 * match result, save our position and tell the
2135 * user there are more devices to check.
2137 if (spaceleft
< sizeof(struct dev_match_result
)) {
2138 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2139 cdm
->pos
.position_type
=
2140 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
;
2142 cdm
->pos
.cookie
.bus
= bus
;
2143 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2144 xsoftc
.bus_generation
;
2145 cdm
->status
= CAM_DEV_MATCH_MORE
;
2148 j
= cdm
->num_matches
;
2150 cdm
->matches
[j
].type
= DEV_MATCH_BUS
;
2151 cdm
->matches
[j
].result
.bus_result
.path_id
= bus
->path_id
;
2152 cdm
->matches
[j
].result
.bus_result
.bus_id
= bus
->sim
->bus_id
;
2153 cdm
->matches
[j
].result
.bus_result
.unit_number
=
2154 bus
->sim
->unit_number
;
2155 strncpy(cdm
->matches
[j
].result
.bus_result
.dev_name
,
2156 bus
->sim
->sim_name
, DEV_IDLEN
);
2160 * If the user is only interested in busses, there's no
2161 * reason to descend to the next level in the tree.
2163 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2167 * If there is a target generation recorded, check it to
2168 * make sure the target list hasn't changed.
2170 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2171 && (bus
== cdm
->pos
.cookie
.bus
)
2172 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2173 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] != 0)
2174 && (cdm
->pos
.generations
[CAM_TARGET_GENERATION
] !=
2176 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2180 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2181 && (cdm
->pos
.cookie
.bus
== bus
)
2182 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2183 && (cdm
->pos
.cookie
.target
!= NULL
))
2184 return(xpttargettraverse(bus
,
2185 (struct cam_et
*)cdm
->pos
.cookie
.target
,
2186 xptedttargetfunc
, arg
));
2188 return(xpttargettraverse(bus
, NULL
, xptedttargetfunc
, arg
));
2192 xptedttargetfunc(struct cam_et
*target
, void *arg
)
2194 struct ccb_dev_match
*cdm
;
2196 cdm
= (struct ccb_dev_match
*)arg
;
2199 * If there is a device list generation recorded, check it to
2200 * make sure the device list hasn't changed.
2202 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2203 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2204 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2205 && (cdm
->pos
.cookie
.target
== target
)
2206 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2207 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] != 0)
2208 && (cdm
->pos
.generations
[CAM_DEV_GENERATION
] !=
2209 target
->generation
)) {
2210 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2214 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2215 && (cdm
->pos
.cookie
.bus
== target
->bus
)
2216 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2217 && (cdm
->pos
.cookie
.target
== target
)
2218 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2219 && (cdm
->pos
.cookie
.device
!= NULL
))
2220 return(xptdevicetraverse(target
,
2221 (struct cam_ed
*)cdm
->pos
.cookie
.device
,
2222 xptedtdevicefunc
, arg
));
2224 return(xptdevicetraverse(target
, NULL
, xptedtdevicefunc
, arg
));
2228 xptedtdevicefunc(struct cam_ed
*device
, void *arg
)
2231 struct ccb_dev_match
*cdm
;
2232 dev_match_ret retval
;
2234 cdm
= (struct ccb_dev_match
*)arg
;
2237 * If our position is for something deeper in the tree, that means
2238 * that we've already seen this node. So, we keep going down.
2240 if ((cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2241 && (cdm
->pos
.cookie
.device
== device
)
2242 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2243 && (cdm
->pos
.cookie
.periph
!= NULL
))
2244 retval
= DM_RET_DESCEND
;
2246 retval
= xptdevicematch(cdm
->patterns
, cdm
->num_patterns
,
2249 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2250 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2255 * If the copy flag is set, copy this device out.
2257 if (retval
& DM_RET_COPY
) {
2260 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2261 sizeof(struct dev_match_result
));
2264 * If we don't have enough space to put in another
2265 * match result, save our position and tell the
2266 * user there are more devices to check.
2268 if (spaceleft
< sizeof(struct dev_match_result
)) {
2269 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2270 cdm
->pos
.position_type
=
2271 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2272 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
;
2274 cdm
->pos
.cookie
.bus
= device
->target
->bus
;
2275 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2276 xsoftc
.bus_generation
;
2277 cdm
->pos
.cookie
.target
= device
->target
;
2278 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2279 device
->target
->bus
->generation
;
2280 cdm
->pos
.cookie
.device
= device
;
2281 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2282 device
->target
->generation
;
2283 cdm
->status
= CAM_DEV_MATCH_MORE
;
2286 j
= cdm
->num_matches
;
2288 cdm
->matches
[j
].type
= DEV_MATCH_DEVICE
;
2289 cdm
->matches
[j
].result
.device_result
.path_id
=
2290 device
->target
->bus
->path_id
;
2291 cdm
->matches
[j
].result
.device_result
.target_id
=
2292 device
->target
->target_id
;
2293 cdm
->matches
[j
].result
.device_result
.target_lun
=
2295 bcopy(&device
->inq_data
,
2296 &cdm
->matches
[j
].result
.device_result
.inq_data
,
2297 sizeof(struct scsi_inquiry_data
));
2299 /* Let the user know whether this device is unconfigured */
2300 if (device
->flags
& CAM_DEV_UNCONFIGURED
)
2301 cdm
->matches
[j
].result
.device_result
.flags
=
2302 DEV_RESULT_UNCONFIGURED
;
2304 cdm
->matches
[j
].result
.device_result
.flags
=
2309 * If the user isn't interested in peripherals, don't descend
2310 * the tree any further.
2312 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_STOP
)
2316 * If there is a peripheral list generation recorded, make sure
2317 * it hasn't changed.
2319 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2320 && (device
->target
->bus
== cdm
->pos
.cookie
.bus
)
2321 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2322 && (device
->target
== cdm
->pos
.cookie
.target
)
2323 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2324 && (device
== cdm
->pos
.cookie
.device
)
2325 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2326 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2327 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2328 device
->generation
)){
2329 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2333 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2334 && (cdm
->pos
.cookie
.bus
== device
->target
->bus
)
2335 && (cdm
->pos
.position_type
& CAM_DEV_POS_TARGET
)
2336 && (cdm
->pos
.cookie
.target
== device
->target
)
2337 && (cdm
->pos
.position_type
& CAM_DEV_POS_DEVICE
)
2338 && (cdm
->pos
.cookie
.device
== device
)
2339 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2340 && (cdm
->pos
.cookie
.periph
!= NULL
))
2341 return(xptperiphtraverse(device
,
2342 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2343 xptedtperiphfunc
, arg
));
2345 return(xptperiphtraverse(device
, NULL
, xptedtperiphfunc
, arg
));
2349 xptedtperiphfunc(struct cam_periph
*periph
, void *arg
)
2351 struct ccb_dev_match
*cdm
;
2352 dev_match_ret retval
;
2354 cdm
= (struct ccb_dev_match
*)arg
;
2356 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2358 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2359 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2364 * If the copy flag is set, copy this peripheral out.
2366 if (retval
& DM_RET_COPY
) {
2369 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2370 sizeof(struct dev_match_result
));
2373 * If we don't have enough space to put in another
2374 * match result, save our position and tell the
2375 * user there are more devices to check.
2377 if (spaceleft
< sizeof(struct dev_match_result
)) {
2378 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2379 cdm
->pos
.position_type
=
2380 CAM_DEV_POS_EDT
| CAM_DEV_POS_BUS
|
2381 CAM_DEV_POS_TARGET
| CAM_DEV_POS_DEVICE
|
2384 cdm
->pos
.cookie
.bus
= periph
->path
->bus
;
2385 cdm
->pos
.generations
[CAM_BUS_GENERATION
]=
2386 xsoftc
.bus_generation
;
2387 cdm
->pos
.cookie
.target
= periph
->path
->target
;
2388 cdm
->pos
.generations
[CAM_TARGET_GENERATION
] =
2389 periph
->path
->bus
->generation
;
2390 cdm
->pos
.cookie
.device
= periph
->path
->device
;
2391 cdm
->pos
.generations
[CAM_DEV_GENERATION
] =
2392 periph
->path
->target
->generation
;
2393 cdm
->pos
.cookie
.periph
= periph
;
2394 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2395 periph
->path
->device
->generation
;
2396 cdm
->status
= CAM_DEV_MATCH_MORE
;
2400 j
= cdm
->num_matches
;
2402 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2403 cdm
->matches
[j
].result
.periph_result
.path_id
=
2404 periph
->path
->bus
->path_id
;
2405 cdm
->matches
[j
].result
.periph_result
.target_id
=
2406 periph
->path
->target
->target_id
;
2407 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2408 periph
->path
->device
->lun_id
;
2409 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2410 periph
->unit_number
;
2411 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2412 periph
->periph_name
, DEV_IDLEN
);
2419 xptedtmatch(struct ccb_dev_match
*cdm
)
2423 cdm
->num_matches
= 0;
2426 * Check the bus list generation. If it has changed, the user
2427 * needs to reset everything and start over.
2429 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2430 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != 0)
2431 && (cdm
->pos
.generations
[CAM_BUS_GENERATION
] != xsoftc
.bus_generation
)) {
2432 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2436 if ((cdm
->pos
.position_type
& CAM_DEV_POS_BUS
)
2437 && (cdm
->pos
.cookie
.bus
!= NULL
))
2438 ret
= xptbustraverse((struct cam_eb
*)cdm
->pos
.cookie
.bus
,
2439 xptedtbusfunc
, cdm
);
2441 ret
= xptbustraverse(NULL
, xptedtbusfunc
, cdm
);
2444 * If we get back 0, that means that we had to stop before fully
2445 * traversing the EDT. It also means that one of the subroutines
2446 * has set the status field to the proper value. If we get back 1,
2447 * we've fully traversed the EDT and copied out any matching entries.
2450 cdm
->status
= CAM_DEV_MATCH_LAST
;
2456 xptplistpdrvfunc(struct periph_driver
**pdrv
, void *arg
)
2458 struct ccb_dev_match
*cdm
;
2460 cdm
= (struct ccb_dev_match
*)arg
;
2462 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2463 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2464 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2465 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] != 0)
2466 && (cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] !=
2467 (*pdrv
)->generation
)) {
2468 cdm
->status
= CAM_DEV_MATCH_LIST_CHANGED
;
2472 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2473 && (cdm
->pos
.cookie
.pdrv
== pdrv
)
2474 && (cdm
->pos
.position_type
& CAM_DEV_POS_PERIPH
)
2475 && (cdm
->pos
.cookie
.periph
!= NULL
))
2476 return(xptpdperiphtraverse(pdrv
,
2477 (struct cam_periph
*)cdm
->pos
.cookie
.periph
,
2478 xptplistperiphfunc
, arg
));
2480 return(xptpdperiphtraverse(pdrv
, NULL
,xptplistperiphfunc
, arg
));
2484 xptplistperiphfunc(struct cam_periph
*periph
, void *arg
)
2486 struct ccb_dev_match
*cdm
;
2487 dev_match_ret retval
;
2489 cdm
= (struct ccb_dev_match
*)arg
;
2491 retval
= xptperiphmatch(cdm
->patterns
, cdm
->num_patterns
, periph
);
2493 if ((retval
& DM_RET_ACTION_MASK
) == DM_RET_ERROR
) {
2494 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2499 * If the copy flag is set, copy this peripheral out.
2501 if (retval
& DM_RET_COPY
) {
2504 spaceleft
= cdm
->match_buf_len
- (cdm
->num_matches
*
2505 sizeof(struct dev_match_result
));
2508 * If we don't have enough space to put in another
2509 * match result, save our position and tell the
2510 * user there are more devices to check.
2512 if (spaceleft
< sizeof(struct dev_match_result
)) {
2513 struct periph_driver
**pdrv
;
2516 bzero(&cdm
->pos
, sizeof(cdm
->pos
));
2517 cdm
->pos
.position_type
=
2518 CAM_DEV_POS_PDRV
| CAM_DEV_POS_PDPTR
|
2522 * This may look a bit non-sensical, but it is
2523 * actually quite logical. There are very few
2524 * peripheral drivers, and bloating every peripheral
2525 * structure with a pointer back to its parent
2526 * peripheral driver linker set entry would cost
2527 * more in the long run than doing this quick lookup.
2529 for (pdrv
= periph_drivers
; *pdrv
!= NULL
; pdrv
++) {
2530 if (strcmp((*pdrv
)->driver_name
,
2531 periph
->periph_name
) == 0)
2535 if (*pdrv
== NULL
) {
2536 cdm
->status
= CAM_DEV_MATCH_ERROR
;
2540 cdm
->pos
.cookie
.pdrv
= pdrv
;
2542 * The periph generation slot does double duty, as
2543 * does the periph pointer slot. They are used for
2544 * both edt and pdrv lookups and positioning.
2546 cdm
->pos
.cookie
.periph
= periph
;
2547 cdm
->pos
.generations
[CAM_PERIPH_GENERATION
] =
2548 (*pdrv
)->generation
;
2549 cdm
->status
= CAM_DEV_MATCH_MORE
;
2553 j
= cdm
->num_matches
;
2555 cdm
->matches
[j
].type
= DEV_MATCH_PERIPH
;
2556 cdm
->matches
[j
].result
.periph_result
.path_id
=
2557 periph
->path
->bus
->path_id
;
2560 * The transport layer peripheral doesn't have a target or
2563 if (periph
->path
->target
)
2564 cdm
->matches
[j
].result
.periph_result
.target_id
=
2565 periph
->path
->target
->target_id
;
2567 cdm
->matches
[j
].result
.periph_result
.target_id
= -1;
2569 if (periph
->path
->device
)
2570 cdm
->matches
[j
].result
.periph_result
.target_lun
=
2571 periph
->path
->device
->lun_id
;
2573 cdm
->matches
[j
].result
.periph_result
.target_lun
= -1;
2575 cdm
->matches
[j
].result
.periph_result
.unit_number
=
2576 periph
->unit_number
;
2577 strncpy(cdm
->matches
[j
].result
.periph_result
.periph_name
,
2578 periph
->periph_name
, DEV_IDLEN
);
2585 xptperiphlistmatch(struct ccb_dev_match
*cdm
)
2589 cdm
->num_matches
= 0;
2592 * At this point in the edt traversal function, we check the bus
2593 * list generation to make sure that no busses have been added or
2594 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2595 * For the peripheral driver list traversal function, however, we
2596 * don't have to worry about new peripheral driver types coming or
2597 * going; they're in a linker set, and therefore can't change
2598 * without a recompile.
2601 if ((cdm
->pos
.position_type
& CAM_DEV_POS_PDPTR
)
2602 && (cdm
->pos
.cookie
.pdrv
!= NULL
))
2603 ret
= xptpdrvtraverse(
2604 (struct periph_driver
**)cdm
->pos
.cookie
.pdrv
,
2605 xptplistpdrvfunc
, cdm
);
2607 ret
= xptpdrvtraverse(NULL
, xptplistpdrvfunc
, cdm
);
2610 * If we get back 0, that means that we had to stop before fully
2611 * traversing the peripheral driver tree. It also means that one of
2612 * the subroutines has set the status field to the proper value. If
2613 * we get back 1, we've fully traversed the EDT and copied out any
2617 cdm
->status
= CAM_DEV_MATCH_LAST
;
2623 xptbustraverse(struct cam_eb
*start_bus
, xpt_busfunc_t
*tr_func
, void *arg
)
2625 struct cam_eb
*bus
, *next_bus
;
2630 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
2631 for (bus
= (start_bus
? start_bus
: TAILQ_FIRST(&xsoftc
.xpt_busses
));
2634 next_bus
= TAILQ_NEXT(bus
, links
);
2636 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
2637 CAM_SIM_LOCK(bus
->sim
);
2638 retval
= tr_func(bus
, arg
);
2639 CAM_SIM_UNLOCK(bus
->sim
);
2642 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
2644 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
2650 xpttargettraverse(struct cam_eb
*bus
, struct cam_et
*start_target
,
2651 xpt_targetfunc_t
*tr_func
, void *arg
)
2653 struct cam_et
*target
, *next_target
;
2657 for (target
= (start_target
? start_target
:
2658 TAILQ_FIRST(&bus
->et_entries
));
2659 target
!= NULL
; target
= next_target
) {
2661 next_target
= TAILQ_NEXT(target
, links
);
2663 retval
= tr_func(target
, arg
);
2673 xptdevicetraverse(struct cam_et
*target
, struct cam_ed
*start_device
,
2674 xpt_devicefunc_t
*tr_func
, void *arg
)
2676 struct cam_ed
*device
, *next_device
;
2680 for (device
= (start_device
? start_device
:
2681 TAILQ_FIRST(&target
->ed_entries
));
2683 device
= next_device
) {
2685 next_device
= TAILQ_NEXT(device
, links
);
2687 retval
= tr_func(device
, arg
);
2697 xptperiphtraverse(struct cam_ed
*device
, struct cam_periph
*start_periph
,
2698 xpt_periphfunc_t
*tr_func
, void *arg
)
2700 struct cam_periph
*periph
, *next_periph
;
2705 for (periph
= (start_periph
? start_periph
:
2706 SLIST_FIRST(&device
->periphs
));
2708 periph
= next_periph
) {
2710 next_periph
= SLIST_NEXT(periph
, periph_links
);
2712 retval
= tr_func(periph
, arg
);
2721 xptpdrvtraverse(struct periph_driver
**start_pdrv
,
2722 xpt_pdrvfunc_t
*tr_func
, void *arg
)
2724 struct periph_driver
**pdrv
;
2730 * We don't traverse the peripheral driver list like we do the
2731 * other lists, because it is a linker set, and therefore cannot be
2732 * changed during runtime. If the peripheral driver list is ever
2733 * re-done to be something other than a linker set (i.e. it can
2734 * change while the system is running), the list traversal should
2735 * be modified to work like the other traversal functions.
2737 for (pdrv
= (start_pdrv
? start_pdrv
: periph_drivers
);
2738 *pdrv
!= NULL
; pdrv
++) {
2739 retval
= tr_func(pdrv
, arg
);
2749 xptpdperiphtraverse(struct periph_driver
**pdrv
,
2750 struct cam_periph
*start_periph
,
2751 xpt_periphfunc_t
*tr_func
, void *arg
)
2753 struct cam_periph
*periph
, *next_periph
;
2758 for (periph
= (start_periph
? start_periph
:
2759 TAILQ_FIRST(&(*pdrv
)->units
)); periph
!= NULL
;
2760 periph
= next_periph
) {
2762 next_periph
= TAILQ_NEXT(periph
, unit_links
);
2764 retval
= tr_func(periph
, arg
);
2772 xptdefbusfunc(struct cam_eb
*bus
, void *arg
)
2774 struct xpt_traverse_config
*tr_config
;
2776 tr_config
= (struct xpt_traverse_config
*)arg
;
2778 if (tr_config
->depth
== XPT_DEPTH_BUS
) {
2779 xpt_busfunc_t
*tr_func
;
2781 tr_func
= (xpt_busfunc_t
*)tr_config
->tr_func
;
2783 return(tr_func(bus
, tr_config
->tr_arg
));
2785 return(xpttargettraverse(bus
, NULL
, xptdeftargetfunc
, arg
));
2789 xptdeftargetfunc(struct cam_et
*target
, void *arg
)
2791 struct xpt_traverse_config
*tr_config
;
2793 tr_config
= (struct xpt_traverse_config
*)arg
;
2795 if (tr_config
->depth
== XPT_DEPTH_TARGET
) {
2796 xpt_targetfunc_t
*tr_func
;
2798 tr_func
= (xpt_targetfunc_t
*)tr_config
->tr_func
;
2800 return(tr_func(target
, tr_config
->tr_arg
));
2802 return(xptdevicetraverse(target
, NULL
, xptdefdevicefunc
, arg
));
2806 xptdefdevicefunc(struct cam_ed
*device
, void *arg
)
2808 struct xpt_traverse_config
*tr_config
;
2810 tr_config
= (struct xpt_traverse_config
*)arg
;
2812 if (tr_config
->depth
== XPT_DEPTH_DEVICE
) {
2813 xpt_devicefunc_t
*tr_func
;
2815 tr_func
= (xpt_devicefunc_t
*)tr_config
->tr_func
;
2817 return(tr_func(device
, tr_config
->tr_arg
));
2819 return(xptperiphtraverse(device
, NULL
, xptdefperiphfunc
, arg
));
2823 xptdefperiphfunc(struct cam_periph
*periph
, void *arg
)
2825 struct xpt_traverse_config
*tr_config
;
2826 xpt_periphfunc_t
*tr_func
;
2828 tr_config
= (struct xpt_traverse_config
*)arg
;
2830 tr_func
= (xpt_periphfunc_t
*)tr_config
->tr_func
;
2833 * Unlike the other default functions, we don't check for depth
2834 * here. The peripheral driver level is the last level in the EDT,
2835 * so if we're here, we should execute the function in question.
2837 return(tr_func(periph
, tr_config
->tr_arg
));
2841 * Execute the given function for every bus in the EDT.
2844 xpt_for_all_busses(xpt_busfunc_t
*tr_func
, void *arg
)
2846 struct xpt_traverse_config tr_config
;
2848 tr_config
.depth
= XPT_DEPTH_BUS
;
2849 tr_config
.tr_func
= tr_func
;
2850 tr_config
.tr_arg
= arg
;
2852 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2856 * Execute the given function for every device in the EDT.
2859 xpt_for_all_devices(xpt_devicefunc_t
*tr_func
, void *arg
)
2861 struct xpt_traverse_config tr_config
;
2863 tr_config
.depth
= XPT_DEPTH_DEVICE
;
2864 tr_config
.tr_func
= tr_func
;
2865 tr_config
.tr_arg
= arg
;
2867 return(xptbustraverse(NULL
, xptdefbusfunc
, &tr_config
));
2871 xptsetasyncfunc(struct cam_ed
*device
, void *arg
)
2873 struct cam_path path
;
2874 struct ccb_getdev cgd
;
2875 struct async_node
*cur_entry
;
2877 cur_entry
= (struct async_node
*)arg
;
2880 * Don't report unconfigured devices (Wildcard devs,
2881 * devices only for target mode, device instances
2882 * that have been invalidated but are waiting for
2883 * their last reference count to be released).
2885 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) != 0)
2888 xpt_compile_path(&path
,
2890 device
->target
->bus
->path_id
,
2891 device
->target
->target_id
,
2893 xpt_setup_ccb(&cgd
.ccb_h
, &path
, /*priority*/1);
2894 cgd
.ccb_h
.func_code
= XPT_GDEV_TYPE
;
2895 xpt_action((union ccb
*)&cgd
);
2896 cur_entry
->callback(cur_entry
->callback_arg
,
2899 xpt_release_path(&path
);
2905 xptsetasyncbusfunc(struct cam_eb
*bus
, void *arg
)
2907 struct cam_path path
;
2908 struct ccb_pathinq cpi
;
2909 struct async_node
*cur_entry
;
2911 cur_entry
= (struct async_node
*)arg
;
2913 xpt_compile_path(&path
, /*periph*/NULL
,
2915 CAM_TARGET_WILDCARD
,
2917 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
2918 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
2919 xpt_action((union ccb
*)&cpi
);
2920 cur_entry
->callback(cur_entry
->callback_arg
,
2923 xpt_release_path(&path
);
2929 xpt_action_sasync_cb(void *context
, int pending
)
2931 struct async_node
*cur_entry
;
2932 struct xpt_task
*task
;
2935 task
= (struct xpt_task
*)context
;
2936 cur_entry
= (struct async_node
*)task
->data1
;
2937 added
= task
->data2
;
2939 if ((added
& AC_FOUND_DEVICE
) != 0) {
2941 * Get this peripheral up to date with all
2942 * the currently existing devices.
2944 xpt_for_all_devices(xptsetasyncfunc
, cur_entry
);
2946 if ((added
& AC_PATH_REGISTERED
) != 0) {
2948 * Get this peripheral up to date with all
2949 * the currently existing busses.
2951 xpt_for_all_busses(xptsetasyncbusfunc
, cur_entry
);
2954 kfree(task
, M_CAMXPT
);
2958 xpt_action(union ccb
*start_ccb
)
2960 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_action\n"));
2962 start_ccb
->ccb_h
.status
= CAM_REQ_INPROG
;
2964 switch (start_ccb
->ccb_h
.func_code
) {
2967 struct cam_ed
*device
;
2969 char cdb_str
[(SCSI_MAX_CDBLEN
* 3) + 1];
2970 struct cam_path
*path
;
2972 path
= start_ccb
->ccb_h
.path
;
2976 * For the sake of compatibility with SCSI-1
2977 * devices that may not understand the identify
2978 * message, we include lun information in the
2979 * second byte of all commands. SCSI-1 specifies
2980 * that luns are a 3 bit value and reserves only 3
2981 * bits for lun information in the CDB. Later
2982 * revisions of the SCSI spec allow for more than 8
2983 * luns, but have deprecated lun information in the
2984 * CDB. So, if the lun won't fit, we must omit.
2986 * Also be aware that during initial probing for devices,
2987 * the inquiry information is unknown but initialized to 0.
2988 * This means that this code will be exercised while probing
2989 * devices with an ANSI revision greater than 2.
2991 device
= start_ccb
->ccb_h
.path
->device
;
2992 if (device
->protocol_version
<= SCSI_REV_2
2993 && start_ccb
->ccb_h
.target_lun
< 8
2994 && (start_ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) == 0) {
2996 start_ccb
->csio
.cdb_io
.cdb_bytes
[1] |=
2997 start_ccb
->ccb_h
.target_lun
<< 5;
2999 start_ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
3000 CAM_DEBUG(path
, CAM_DEBUG_CDB
,("%s. CDB: %s\n",
3001 scsi_op_desc(start_ccb
->csio
.cdb_io
.cdb_bytes
[0],
3002 &path
->device
->inq_data
),
3003 scsi_cdb_string(start_ccb
->csio
.cdb_io
.cdb_bytes
,
3004 cdb_str
, sizeof(cdb_str
))));
3008 case XPT_CONT_TARGET_IO
:
3009 start_ccb
->csio
.sense_resid
= 0;
3010 start_ccb
->csio
.resid
= 0;
3015 struct cam_path
*path
;
3016 struct cam_sim
*sim
;
3019 path
= start_ccb
->ccb_h
.path
;
3021 sim
= path
->bus
->sim
;
3022 if (sim
== &cam_dead_sim
) {
3023 /* The SIM has gone; just execute the CCB directly. */
3024 cam_ccbq_send_ccb(&path
->device
->ccbq
, start_ccb
);
3025 (*(sim
->sim_action
))(sim
, start_ccb
);
3029 cam_ccbq_insert_ccb(&path
->device
->ccbq
, start_ccb
);
3030 if (path
->device
->qfrozen_cnt
== 0)
3031 runq
= xpt_schedule_dev_sendq(path
->bus
, path
->device
);
3035 xpt_run_dev_sendq(path
->bus
);
3038 case XPT_SET_TRAN_SETTINGS
:
3040 xpt_set_transfer_settings(&start_ccb
->cts
,
3041 start_ccb
->ccb_h
.path
->device
,
3042 /*async_update*/FALSE
);
3045 case XPT_CALC_GEOMETRY
:
3047 struct cam_sim
*sim
;
3049 /* Filter out garbage */
3050 if (start_ccb
->ccg
.block_size
== 0
3051 || start_ccb
->ccg
.volume_size
== 0) {
3052 start_ccb
->ccg
.cylinders
= 0;
3053 start_ccb
->ccg
.heads
= 0;
3054 start_ccb
->ccg
.secs_per_track
= 0;
3055 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3058 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3059 (*(sim
->sim_action
))(sim
, start_ccb
);
3064 union ccb
* abort_ccb
;
3066 abort_ccb
= start_ccb
->cab
.abort_ccb
;
3067 if (XPT_FC_IS_DEV_QUEUED(abort_ccb
)) {
3069 if (abort_ccb
->ccb_h
.pinfo
.index
>= 0) {
3070 struct cam_ccbq
*ccbq
;
3072 ccbq
= &abort_ccb
->ccb_h
.path
->device
->ccbq
;
3073 cam_ccbq_remove_ccb(ccbq
, abort_ccb
);
3074 abort_ccb
->ccb_h
.status
=
3075 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3076 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3077 xpt_done(abort_ccb
);
3078 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3081 if (abort_ccb
->ccb_h
.pinfo
.index
== CAM_UNQUEUED_INDEX
3082 && (abort_ccb
->ccb_h
.status
& CAM_SIM_QUEUED
) == 0) {
3084 * We've caught this ccb en route to
3085 * the SIM. Flag it for abort and the
3086 * SIM will do so just before starting
3087 * real work on the CCB.
3089 abort_ccb
->ccb_h
.status
=
3090 CAM_REQ_ABORTED
|CAM_DEV_QFRZN
;
3091 xpt_freeze_devq(abort_ccb
->ccb_h
.path
, 1);
3092 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3096 if (XPT_FC_IS_QUEUED(abort_ccb
)
3097 && (abort_ccb
->ccb_h
.pinfo
.index
== CAM_DONEQ_INDEX
)) {
3099 * It's already completed but waiting
3100 * for our SWI to get to it.
3102 start_ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3106 * If we weren't able to take care of the abort request
3107 * in the XPT, pass the request down to the SIM for processing.
3111 case XPT_ACCEPT_TARGET_IO
:
3113 case XPT_IMMED_NOTIFY
:
3114 case XPT_NOTIFY_ACK
:
3115 case XPT_GET_TRAN_SETTINGS
:
3118 struct cam_sim
*sim
;
3120 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3121 (*(sim
->sim_action
))(sim
, start_ccb
);
3126 struct cam_sim
*sim
;
3128 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3129 (*(sim
->sim_action
))(sim
, start_ccb
);
3132 case XPT_PATH_STATS
:
3133 start_ccb
->cpis
.last_reset
=
3134 start_ccb
->ccb_h
.path
->bus
->last_reset
;
3135 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3141 dev
= start_ccb
->ccb_h
.path
->device
;
3142 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3143 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3145 struct ccb_getdev
*cgd
;
3149 cgd
= &start_ccb
->cgd
;
3150 bus
= cgd
->ccb_h
.path
->bus
;
3151 tar
= cgd
->ccb_h
.path
->target
;
3152 cgd
->inq_data
= dev
->inq_data
;
3153 cgd
->ccb_h
.status
= CAM_REQ_CMP
;
3154 cgd
->serial_num_len
= dev
->serial_num_len
;
3155 if ((dev
->serial_num_len
> 0)
3156 && (dev
->serial_num
!= NULL
))
3157 bcopy(dev
->serial_num
, cgd
->serial_num
,
3158 dev
->serial_num_len
);
3162 case XPT_GDEV_STATS
:
3166 dev
= start_ccb
->ccb_h
.path
->device
;
3167 if ((dev
->flags
& CAM_DEV_UNCONFIGURED
) != 0) {
3168 start_ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3170 struct ccb_getdevstats
*cgds
;
3174 cgds
= &start_ccb
->cgds
;
3175 bus
= cgds
->ccb_h
.path
->bus
;
3176 tar
= cgds
->ccb_h
.path
->target
;
3177 cgds
->dev_openings
= dev
->ccbq
.dev_openings
;
3178 cgds
->dev_active
= dev
->ccbq
.dev_active
;
3179 cgds
->devq_openings
= dev
->ccbq
.devq_openings
;
3180 cgds
->devq_queued
= dev
->ccbq
.queue
.entries
;
3181 cgds
->held
= dev
->ccbq
.held
;
3182 cgds
->last_reset
= tar
->last_reset
;
3183 cgds
->maxtags
= dev
->quirk
->maxtags
;
3184 cgds
->mintags
= dev
->quirk
->mintags
;
3185 if (timevalcmp(&tar
->last_reset
, &bus
->last_reset
, <))
3186 cgds
->last_reset
= bus
->last_reset
;
3187 cgds
->ccb_h
.status
= CAM_REQ_CMP
;
3193 struct cam_periph
*nperiph
;
3194 struct periph_list
*periph_head
;
3195 struct ccb_getdevlist
*cgdl
;
3197 struct cam_ed
*device
;
3204 * Don't want anyone mucking with our data.
3206 device
= start_ccb
->ccb_h
.path
->device
;
3207 periph_head
= &device
->periphs
;
3208 cgdl
= &start_ccb
->cgdl
;
3211 * Check and see if the list has changed since the user
3212 * last requested a list member. If so, tell them that the
3213 * list has changed, and therefore they need to start over
3214 * from the beginning.
3216 if ((cgdl
->index
!= 0) &&
3217 (cgdl
->generation
!= device
->generation
)) {
3218 cgdl
->status
= CAM_GDEVLIST_LIST_CHANGED
;
3223 * Traverse the list of peripherals and attempt to find
3224 * the requested peripheral.
3226 for (nperiph
= SLIST_FIRST(periph_head
), i
= 0;
3227 (nperiph
!= NULL
) && (i
<= cgdl
->index
);
3228 nperiph
= SLIST_NEXT(nperiph
, periph_links
), i
++) {
3229 if (i
== cgdl
->index
) {
3230 strncpy(cgdl
->periph_name
,
3231 nperiph
->periph_name
,
3233 cgdl
->unit_number
= nperiph
->unit_number
;
3238 cgdl
->status
= CAM_GDEVLIST_ERROR
;
3242 if (nperiph
== NULL
)
3243 cgdl
->status
= CAM_GDEVLIST_LAST_DEVICE
;
3245 cgdl
->status
= CAM_GDEVLIST_MORE_DEVS
;
3248 cgdl
->generation
= device
->generation
;
3250 cgdl
->ccb_h
.status
= CAM_REQ_CMP
;
3255 dev_pos_type position_type
;
3256 struct ccb_dev_match
*cdm
;
3259 cdm
= &start_ccb
->cdm
;
3262 * There are two ways of getting at information in the EDT.
3263 * The first way is via the primary EDT tree. It starts
3264 * with a list of busses, then a list of targets on a bus,
3265 * then devices/luns on a target, and then peripherals on a
3266 * device/lun. The "other" way is by the peripheral driver
3267 * lists. The peripheral driver lists are organized by
3268 * peripheral driver. (obviously) So it makes sense to
3269 * use the peripheral driver list if the user is looking
3270 * for something like "da1", or all "da" devices. If the
3271 * user is looking for something on a particular bus/target
3272 * or lun, it's generally better to go through the EDT tree.
3275 if (cdm
->pos
.position_type
!= CAM_DEV_POS_NONE
)
3276 position_type
= cdm
->pos
.position_type
;
3280 position_type
= CAM_DEV_POS_NONE
;
3282 for (i
= 0; i
< cdm
->num_patterns
; i
++) {
3283 if ((cdm
->patterns
[i
].type
== DEV_MATCH_BUS
)
3284 ||(cdm
->patterns
[i
].type
== DEV_MATCH_DEVICE
)){
3285 position_type
= CAM_DEV_POS_EDT
;
3290 if (cdm
->num_patterns
== 0)
3291 position_type
= CAM_DEV_POS_EDT
;
3292 else if (position_type
== CAM_DEV_POS_NONE
)
3293 position_type
= CAM_DEV_POS_PDRV
;
3296 switch(position_type
& CAM_DEV_POS_TYPEMASK
) {
3297 case CAM_DEV_POS_EDT
:
3298 ret
= xptedtmatch(cdm
);
3300 case CAM_DEV_POS_PDRV
:
3301 ret
= xptperiphlistmatch(cdm
);
3304 cdm
->status
= CAM_DEV_MATCH_ERROR
;
3308 if (cdm
->status
== CAM_DEV_MATCH_ERROR
)
3309 start_ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
3311 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3317 struct ccb_setasync
*csa
;
3318 struct async_node
*cur_entry
;
3319 struct async_list
*async_head
;
3322 csa
= &start_ccb
->csa
;
3323 added
= csa
->event_enable
;
3324 async_head
= &csa
->ccb_h
.path
->device
->asyncs
;
3327 * If there is already an entry for us, simply
3330 cur_entry
= SLIST_FIRST(async_head
);
3331 while (cur_entry
!= NULL
) {
3332 if ((cur_entry
->callback_arg
== csa
->callback_arg
)
3333 && (cur_entry
->callback
== csa
->callback
))
3335 cur_entry
= SLIST_NEXT(cur_entry
, links
);
3338 if (cur_entry
!= NULL
) {
3340 * If the request has no flags set,
3343 added
&= ~cur_entry
->event_enable
;
3344 if (csa
->event_enable
== 0) {
3345 SLIST_REMOVE(async_head
, cur_entry
,
3347 csa
->ccb_h
.path
->device
->refcount
--;
3348 kfree(cur_entry
, M_CAMXPT
);
3350 cur_entry
->event_enable
= csa
->event_enable
;
3353 cur_entry
= kmalloc(sizeof(*cur_entry
), M_CAMXPT
,
3355 cur_entry
->event_enable
= csa
->event_enable
;
3356 cur_entry
->callback_arg
= csa
->callback_arg
;
3357 cur_entry
->callback
= csa
->callback
;
3358 SLIST_INSERT_HEAD(async_head
, cur_entry
, links
);
3359 csa
->ccb_h
.path
->device
->refcount
++;
3363 * Need to decouple this operation via a taskqueue so that
3364 * the locking doesn't become a mess.
3366 if ((added
& (AC_FOUND_DEVICE
| AC_PATH_REGISTERED
)) != 0) {
3367 struct xpt_task
*task
;
3369 task
= kmalloc(sizeof(struct xpt_task
), M_CAMXPT
,
3372 TASK_INIT(&task
->task
, 0, xpt_action_sasync_cb
, task
);
3373 task
->data1
= cur_entry
;
3374 task
->data2
= added
;
3375 taskqueue_enqueue(taskqueue_thread
[mycpuid
],
3379 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3384 struct ccb_relsim
*crs
;
3387 crs
= &start_ccb
->crs
;
3388 dev
= crs
->ccb_h
.path
->device
;
3391 crs
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
3395 if ((crs
->release_flags
& RELSIM_ADJUST_OPENINGS
) != 0) {
3397 if (INQ_DATA_TQ_ENABLED(&dev
->inq_data
)) {
3398 /* Don't ever go below one opening */
3399 if (crs
->openings
> 0) {
3400 xpt_dev_ccbq_resize(crs
->ccb_h
.path
,
3404 xpt_print(crs
->ccb_h
.path
,
3405 "tagged openings now %d\n",
3412 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_TIMEOUT
) != 0) {
3414 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
3417 * Just extend the old timeout and decrement
3418 * the freeze count so that a single timeout
3419 * is sufficient for releasing the queue.
3421 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3422 callout_stop(&dev
->callout
);
3425 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3428 callout_reset(&dev
->callout
,
3429 (crs
->release_timeout
* hz
) / 1000,
3430 xpt_release_devq_timeout
, dev
);
3432 dev
->flags
|= CAM_DEV_REL_TIMEOUT_PENDING
;
3436 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_CMDCMPLT
) != 0) {
3438 if ((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0) {
3440 * Decrement the freeze count so that a single
3441 * completion is still sufficient to unfreeze
3444 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3447 dev
->flags
|= CAM_DEV_REL_ON_COMPLETE
;
3448 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3452 if ((crs
->release_flags
& RELSIM_RELEASE_AFTER_QEMPTY
) != 0) {
3454 if ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
3455 || (dev
->ccbq
.dev_active
== 0)) {
3457 start_ccb
->ccb_h
.flags
&= ~CAM_DEV_QFREEZE
;
3460 dev
->flags
|= CAM_DEV_REL_ON_QUEUE_EMPTY
;
3461 start_ccb
->ccb_h
.flags
|= CAM_DEV_QFREEZE
;
3465 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) == 0) {
3467 xpt_release_devq(crs
->ccb_h
.path
, /*count*/1,
3470 start_ccb
->crs
.qfrozen_cnt
= dev
->qfrozen_cnt
;
3471 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3475 xpt_scan_bus(start_ccb
->ccb_h
.path
->periph
, start_ccb
);
3478 xpt_scan_lun(start_ccb
->ccb_h
.path
->periph
,
3479 start_ccb
->ccb_h
.path
, start_ccb
->crcn
.flags
,
3484 #ifdef CAM_DEBUG_DELAY
3485 cam_debug_delay
= CAM_DEBUG_DELAY
;
3487 cam_dflags
= start_ccb
->cdbg
.flags
;
3488 if (cam_dpath
!= NULL
) {
3489 xpt_free_path(cam_dpath
);
3493 if (cam_dflags
!= CAM_DEBUG_NONE
) {
3494 if (xpt_create_path(&cam_dpath
, xpt_periph
,
3495 start_ccb
->ccb_h
.path_id
,
3496 start_ccb
->ccb_h
.target_id
,
3497 start_ccb
->ccb_h
.target_lun
) !=
3499 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3500 cam_dflags
= CAM_DEBUG_NONE
;
3502 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3503 xpt_print(cam_dpath
, "debugging flags now %x\n",
3508 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3510 #else /* !CAMDEBUG */
3511 start_ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
3512 #endif /* CAMDEBUG */
3516 if ((start_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0)
3517 xpt_freeze_devq(start_ccb
->ccb_h
.path
, 1);
3518 start_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
3525 start_ccb
->ccb_h
.status
= CAM_PROVIDE_FAIL
;
3531 xpt_polled_action(union ccb
*start_ccb
)
3534 struct cam_sim
*sim
;
3535 struct cam_devq
*devq
;
3538 timeout
= start_ccb
->ccb_h
.timeout
;
3539 sim
= start_ccb
->ccb_h
.path
->bus
->sim
;
3541 dev
= start_ccb
->ccb_h
.path
->device
;
3543 sim_lock_assert_owned(sim
->lock
);
3546 * Steal an opening so that no other queued requests
3547 * can get it before us while we simulate interrupts.
3549 dev
->ccbq
.devq_openings
--;
3550 dev
->ccbq
.dev_openings
--;
3552 while(((devq
&& devq
->send_openings
<= 0) || dev
->ccbq
.dev_openings
< 0)
3553 && (--timeout
> 0)) {
3555 (*(sim
->sim_poll
))(sim
);
3556 camisr_runqueue(sim
);
3559 dev
->ccbq
.devq_openings
++;
3560 dev
->ccbq
.dev_openings
++;
3563 xpt_action(start_ccb
);
3564 while(--timeout
> 0) {
3565 (*(sim
->sim_poll
))(sim
);
3566 camisr_runqueue(sim
);
3567 if ((start_ccb
->ccb_h
.status
& CAM_STATUS_MASK
)
3574 * XXX Is it worth adding a sim_timeout entry
3575 * point so we can attempt recovery? If
3576 * this is only used for dumps, I don't think
3579 start_ccb
->ccb_h
.status
= CAM_CMD_TIMEOUT
;
3582 start_ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
3587 * Schedule a peripheral driver to receive a ccb when it's
3588 * target device has space for more transactions.
3591 xpt_schedule(struct cam_periph
*perph
, u_int32_t new_priority
)
3593 struct cam_ed
*device
;
3594 union ccb
*work_ccb
;
3597 sim_lock_assert_owned(perph
->sim
->lock
);
3599 CAM_DEBUG(perph
->path
, CAM_DEBUG_TRACE
, ("xpt_schedule\n"));
3600 device
= perph
->path
->device
;
3601 if (periph_is_queued(perph
)) {
3602 /* Simply reorder based on new priority */
3603 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3604 (" change priority to %d\n", new_priority
));
3605 if (new_priority
< perph
->pinfo
.priority
) {
3606 camq_change_priority(&device
->drvq
,
3611 } else if (perph
->path
->bus
->sim
== &cam_dead_sim
) {
3612 /* The SIM is gone so just call periph_start directly. */
3613 work_ccb
= xpt_get_ccb(perph
->path
->device
);
3614 if (work_ccb
== NULL
)
3616 xpt_setup_ccb(&work_ccb
->ccb_h
, perph
->path
, new_priority
);
3617 perph
->pinfo
.priority
= new_priority
;
3618 perph
->periph_start(perph
, work_ccb
);
3621 /* New entry on the queue */
3622 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3623 (" added periph to queue\n"));
3624 perph
->pinfo
.priority
= new_priority
;
3625 perph
->pinfo
.generation
= ++device
->drvq
.generation
;
3626 camq_insert(&device
->drvq
, &perph
->pinfo
);
3627 runq
= xpt_schedule_dev_allocq(perph
->path
->bus
, device
);
3630 CAM_DEBUG(perph
->path
, CAM_DEBUG_SUBTRACE
,
3631 (" calling xpt_run_devq\n"));
3632 xpt_run_dev_allocq(perph
->path
->bus
);
3638 * Schedule a device to run on a given queue.
3639 * If the device was inserted as a new entry on the queue,
3640 * return 1 meaning the device queue should be run. If we
3641 * were already queued, implying someone else has already
3642 * started the queue, return 0 so the caller doesn't attempt
3646 xpt_schedule_dev(struct camq
*queue
, cam_pinfo
*pinfo
,
3647 u_int32_t new_priority
)
3650 u_int32_t old_priority
;
3652 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_schedule_dev\n"));
3654 old_priority
= pinfo
->priority
;
3657 * Are we already queued?
3659 if (pinfo
->index
!= CAM_UNQUEUED_INDEX
) {
3660 /* Simply reorder based on new priority */
3661 if (new_priority
< old_priority
) {
3662 camq_change_priority(queue
, pinfo
->index
,
3664 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3665 ("changed priority to %d\n",
3670 /* New entry on the queue */
3671 if (new_priority
< old_priority
)
3672 pinfo
->priority
= new_priority
;
3674 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3675 ("Inserting onto queue\n"));
3676 pinfo
->generation
= ++queue
->generation
;
3677 camq_insert(queue
, pinfo
);
3684 xpt_run_dev_allocq(struct cam_eb
*bus
)
3686 struct cam_devq
*devq
;
3688 if ((devq
= bus
->sim
->devq
) == NULL
) {
3689 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq: NULL devq\n"));
3692 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_allocq\n"));
3694 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3695 (" qfrozen_cnt == 0x%x, entries == %d, "
3696 "openings == %d, active == %d\n",
3697 devq
->alloc_queue
.qfrozen_cnt
,
3698 devq
->alloc_queue
.entries
,
3699 devq
->alloc_openings
,
3700 devq
->alloc_active
));
3702 devq
->alloc_queue
.qfrozen_cnt
++;
3703 while ((devq
->alloc_queue
.entries
> 0)
3704 && (devq
->alloc_openings
> 0)
3705 && (devq
->alloc_queue
.qfrozen_cnt
<= 1)) {
3706 struct cam_ed_qinfo
*qinfo
;
3707 struct cam_ed
*device
;
3708 union ccb
*work_ccb
;
3709 struct cam_periph
*drv
;
3712 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
3714 device
= qinfo
->device
;
3716 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3717 ("running device %p\n", device
));
3719 drvq
= &device
->drvq
;
3722 if (drvq
->entries
<= 0) {
3723 panic("xpt_run_dev_allocq: "
3724 "Device on queue without any work to do");
3727 if ((work_ccb
= xpt_get_ccb(device
)) != NULL
) {
3728 devq
->alloc_openings
--;
3729 devq
->alloc_active
++;
3730 drv
= (struct cam_periph
*)camq_remove(drvq
, CAMQ_HEAD
);
3731 xpt_setup_ccb(&work_ccb
->ccb_h
, drv
->path
,
3732 drv
->pinfo
.priority
);
3733 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3734 ("calling periph start\n"));
3735 drv
->periph_start(drv
, work_ccb
);
3738 * Malloc failure in alloc_ccb
3741 * XXX add us to a list to be run from free_ccb
3742 * if we don't have any ccbs active on this
3743 * device queue otherwise we may never get run
3749 if (drvq
->entries
> 0) {
3750 /* We have more work. Attempt to reschedule */
3751 xpt_schedule_dev_allocq(bus
, device
);
3754 devq
->alloc_queue
.qfrozen_cnt
--;
3758 xpt_run_dev_sendq(struct cam_eb
*bus
)
3760 struct cam_devq
*devq
;
3762 if ((devq
= bus
->sim
->devq
) == NULL
) {
3763 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq: NULL devq\n"));
3766 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_run_dev_sendq\n"));
3768 devq
->send_queue
.qfrozen_cnt
++;
3769 while ((devq
->send_queue
.entries
> 0)
3770 && (devq
->send_openings
> 0)) {
3771 struct cam_ed_qinfo
*qinfo
;
3772 struct cam_ed
*device
;
3773 union ccb
*work_ccb
;
3774 struct cam_sim
*sim
;
3776 if (devq
->send_queue
.qfrozen_cnt
> 1) {
3780 qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
3782 device
= qinfo
->device
;
3785 * If the device has been "frozen", don't attempt
3788 if (device
->qfrozen_cnt
> 0) {
3792 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
,
3793 ("running device %p\n", device
));
3795 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
3796 if (work_ccb
== NULL
) {
3797 kprintf("device on run queue with no ccbs???\n");
3801 if ((work_ccb
->ccb_h
.flags
& CAM_HIGH_POWER
) != 0) {
3803 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
3804 if (xsoftc
.num_highpower
<= 0) {
3806 * We got a high power command, but we
3807 * don't have any available slots. Freeze
3808 * the device queue until we have a slot
3811 device
->qfrozen_cnt
++;
3812 STAILQ_INSERT_TAIL(&xsoftc
.highpowerq
,
3816 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
3820 * Consume a high power slot while
3823 xsoftc
.num_highpower
--;
3825 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
3827 devq
->active_dev
= device
;
3828 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
3830 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
3832 devq
->send_openings
--;
3833 devq
->send_active
++;
3835 if (device
->ccbq
.queue
.entries
> 0)
3836 xpt_schedule_dev_sendq(bus
, device
);
3838 if (work_ccb
&& (work_ccb
->ccb_h
.flags
& CAM_DEV_QFREEZE
) != 0){
3840 * The client wants to freeze the queue
3841 * after this CCB is sent.
3843 device
->qfrozen_cnt
++;
3846 /* In Target mode, the peripheral driver knows best... */
3847 if (work_ccb
->ccb_h
.func_code
== XPT_SCSI_IO
) {
3848 if ((device
->inq_flags
& SID_CmdQue
) != 0
3849 && work_ccb
->csio
.tag_action
!= CAM_TAG_ACTION_NONE
)
3850 work_ccb
->ccb_h
.flags
|= CAM_TAG_ACTION_VALID
;
3853 * Clear this in case of a retried CCB that
3854 * failed due to a rejected tag.
3856 work_ccb
->ccb_h
.flags
&= ~CAM_TAG_ACTION_VALID
;
3860 * Device queues can be shared among multiple sim instances
3861 * that reside on different busses. Use the SIM in the queue
3862 * CCB's path, rather than the one in the bus that was passed
3863 * into this function.
3865 sim
= work_ccb
->ccb_h
.path
->bus
->sim
;
3866 (*(sim
->sim_action
))(sim
, work_ccb
);
3868 devq
->active_dev
= NULL
;
3870 devq
->send_queue
.qfrozen_cnt
--;
3874 * This function merges stuff from the slave ccb into the master ccb, while
3875 * keeping important fields in the master ccb constant.
3878 xpt_merge_ccb(union ccb
*master_ccb
, union ccb
*slave_ccb
)
3881 * Pull fields that are valid for peripheral drivers to set
3882 * into the master CCB along with the CCB "payload".
3884 master_ccb
->ccb_h
.retry_count
= slave_ccb
->ccb_h
.retry_count
;
3885 master_ccb
->ccb_h
.func_code
= slave_ccb
->ccb_h
.func_code
;
3886 master_ccb
->ccb_h
.timeout
= slave_ccb
->ccb_h
.timeout
;
3887 master_ccb
->ccb_h
.flags
= slave_ccb
->ccb_h
.flags
;
3888 bcopy(&(&slave_ccb
->ccb_h
)[1], &(&master_ccb
->ccb_h
)[1],
3889 sizeof(union ccb
) - sizeof(struct ccb_hdr
));
3893 xpt_setup_ccb(struct ccb_hdr
*ccb_h
, struct cam_path
*path
, u_int32_t priority
)
3895 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_setup_ccb\n"));
3896 callout_init(&ccb_h
->timeout_ch
);
3897 ccb_h
->pinfo
.priority
= priority
;
3899 ccb_h
->path_id
= path
->bus
->path_id
;
3901 ccb_h
->target_id
= path
->target
->target_id
;
3903 ccb_h
->target_id
= CAM_TARGET_WILDCARD
;
3905 ccb_h
->target_lun
= path
->device
->lun_id
;
3906 ccb_h
->pinfo
.generation
= ++path
->device
->ccbq
.queue
.generation
;
3908 ccb_h
->target_lun
= CAM_TARGET_WILDCARD
;
3910 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
3914 /* Path manipulation functions */
3916 xpt_create_path(struct cam_path
**new_path_ptr
, struct cam_periph
*perph
,
3917 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3919 struct cam_path
*path
;
3922 path
= kmalloc(sizeof(*path
), M_CAMXPT
, M_INTWAIT
);
3923 status
= xpt_compile_path(path
, perph
, path_id
, target_id
, lun_id
);
3924 if (status
!= CAM_REQ_CMP
) {
3925 kfree(path
, M_CAMXPT
);
3928 *new_path_ptr
= path
;
3933 xpt_create_path_unlocked(struct cam_path
**new_path_ptr
,
3934 struct cam_periph
*periph
, path_id_t path_id
,
3935 target_id_t target_id
, lun_id_t lun_id
)
3937 struct cam_path
*path
;
3938 struct cam_eb
*bus
= NULL
;
3940 int need_unlock
= 0;
3942 path
= (struct cam_path
*)kmalloc(sizeof(*path
), M_CAMXPT
, M_WAITOK
);
3944 if (path_id
!= CAM_BUS_WILDCARD
) {
3945 bus
= xpt_find_bus(path_id
);
3948 CAM_SIM_LOCK(bus
->sim
);
3951 status
= xpt_compile_path(path
, periph
, path_id
, target_id
, lun_id
);
3953 CAM_SIM_UNLOCK(bus
->sim
);
3954 if (status
!= CAM_REQ_CMP
) {
3955 kfree(path
, M_CAMXPT
);
3958 *new_path_ptr
= path
;
3963 xpt_compile_path(struct cam_path
*new_path
, struct cam_periph
*perph
,
3964 path_id_t path_id
, target_id_t target_id
, lun_id_t lun_id
)
3967 struct cam_et
*target
;
3968 struct cam_ed
*device
;
3971 status
= CAM_REQ_CMP
; /* Completed without error */
3972 target
= NULL
; /* Wildcarded */
3973 device
= NULL
; /* Wildcarded */
3976 * We will potentially modify the EDT, so block interrupts
3977 * that may attempt to create cam paths.
3979 bus
= xpt_find_bus(path_id
);
3981 status
= CAM_PATH_INVALID
;
3983 target
= xpt_find_target(bus
, target_id
);
3984 if (target
== NULL
) {
3986 struct cam_et
*new_target
;
3988 new_target
= xpt_alloc_target(bus
, target_id
);
3989 if (new_target
== NULL
) {
3990 status
= CAM_RESRC_UNAVAIL
;
3992 target
= new_target
;
3995 if (target
!= NULL
) {
3996 device
= xpt_find_device(target
, lun_id
);
3997 if (device
== NULL
) {
3999 struct cam_ed
*new_device
;
4001 new_device
= xpt_alloc_device(bus
,
4004 if (new_device
== NULL
) {
4005 status
= CAM_RESRC_UNAVAIL
;
4007 device
= new_device
;
4014 * Only touch the user's data if we are successful.
4016 if (status
== CAM_REQ_CMP
) {
4017 new_path
->periph
= perph
;
4018 new_path
->bus
= bus
;
4019 new_path
->target
= target
;
4020 new_path
->device
= device
;
4021 CAM_DEBUG(new_path
, CAM_DEBUG_TRACE
, ("xpt_compile_path\n"));
4024 xpt_release_device(bus
, target
, device
);
4026 xpt_release_target(bus
, target
);
4028 xpt_release_bus(bus
);
4034 xpt_release_path(struct cam_path
*path
)
4036 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_release_path\n"));
4037 if (path
->device
!= NULL
) {
4038 xpt_release_device(path
->bus
, path
->target
, path
->device
);
4039 path
->device
= NULL
;
4041 if (path
->target
!= NULL
) {
4042 xpt_release_target(path
->bus
, path
->target
);
4043 path
->target
= NULL
;
4045 if (path
->bus
!= NULL
) {
4046 xpt_release_bus(path
->bus
);
4052 xpt_free_path(struct cam_path
*path
)
4054 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_free_path\n"));
4055 xpt_release_path(path
);
4056 kfree(path
, M_CAMXPT
);
4061 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4062 * in path1, 2 for match with wildcards in path2.
4065 xpt_path_comp(struct cam_path
*path1
, struct cam_path
*path2
)
4069 if (path1
->bus
!= path2
->bus
) {
4070 if (path1
->bus
->path_id
== CAM_BUS_WILDCARD
)
4072 else if (path2
->bus
->path_id
== CAM_BUS_WILDCARD
)
4077 if (path1
->target
!= path2
->target
) {
4078 if (path1
->target
->target_id
== CAM_TARGET_WILDCARD
) {
4081 } else if (path2
->target
->target_id
== CAM_TARGET_WILDCARD
)
4086 if (path1
->device
!= path2
->device
) {
4087 if (path1
->device
->lun_id
== CAM_LUN_WILDCARD
) {
4090 } else if (path2
->device
->lun_id
== CAM_LUN_WILDCARD
)
4099 xpt_print_path(struct cam_path
*path
)
4103 kprintf("(nopath): ");
4105 if (path
->periph
!= NULL
)
4106 kprintf("(%s%d:", path
->periph
->periph_name
,
4107 path
->periph
->unit_number
);
4109 kprintf("(noperiph:");
4111 if (path
->bus
!= NULL
)
4112 kprintf("%s%d:%d:", path
->bus
->sim
->sim_name
,
4113 path
->bus
->sim
->unit_number
,
4114 path
->bus
->sim
->bus_id
);
4118 if (path
->target
!= NULL
)
4119 kprintf("%d:", path
->target
->target_id
);
4123 if (path
->device
!= NULL
)
4124 kprintf("%d): ", path
->device
->lun_id
);
4131 xpt_print(struct cam_path
*path
, const char *fmt
, ...)
4134 xpt_print_path(path
);
4135 __va_start(ap
, fmt
);
4141 xpt_path_string(struct cam_path
*path
, char *str
, size_t str_len
)
4145 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4147 sbuf_new(&sb
, str
, str_len
, 0);
4150 sbuf_printf(&sb
, "(nopath): ");
4152 if (path
->periph
!= NULL
)
4153 sbuf_printf(&sb
, "(%s%d:", path
->periph
->periph_name
,
4154 path
->periph
->unit_number
);
4156 sbuf_printf(&sb
, "(noperiph:");
4158 if (path
->bus
!= NULL
)
4159 sbuf_printf(&sb
, "%s%d:%d:", path
->bus
->sim
->sim_name
,
4160 path
->bus
->sim
->unit_number
,
4161 path
->bus
->sim
->bus_id
);
4163 sbuf_printf(&sb
, "nobus:");
4165 if (path
->target
!= NULL
)
4166 sbuf_printf(&sb
, "%d:", path
->target
->target_id
);
4168 sbuf_printf(&sb
, "X:");
4170 if (path
->device
!= NULL
)
4171 sbuf_printf(&sb
, "%d): ", path
->device
->lun_id
);
4173 sbuf_printf(&sb
, "X): ");
4177 return(sbuf_len(&sb
));
4181 xpt_path_path_id(struct cam_path
*path
)
4183 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4185 return(path
->bus
->path_id
);
4189 xpt_path_target_id(struct cam_path
*path
)
4191 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4193 if (path
->target
!= NULL
)
4194 return (path
->target
->target_id
);
4196 return (CAM_TARGET_WILDCARD
);
4200 xpt_path_lun_id(struct cam_path
*path
)
4202 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4204 if (path
->device
!= NULL
)
4205 return (path
->device
->lun_id
);
4207 return (CAM_LUN_WILDCARD
);
4211 xpt_path_sim(struct cam_path
*path
)
4213 return (path
->bus
->sim
);
4217 xpt_path_periph(struct cam_path
*path
)
4219 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4221 return (path
->periph
);
4225 xpt_path_serialno(struct cam_path
*path
)
4227 return (path
->device
->serial_num
);
4231 * Release a CAM control block for the caller. Remit the cost of the structure
4232 * to the device referenced by the path. If the this device had no 'credits'
4233 * and peripheral drivers have registered async callbacks for this notification
4237 xpt_release_ccb(union ccb
*free_ccb
)
4239 struct cam_path
*path
;
4240 struct cam_ed
*device
;
4242 struct cam_sim
*sim
;
4244 CAM_DEBUG_PRINT(CAM_DEBUG_XPT
, ("xpt_release_ccb\n"));
4245 path
= free_ccb
->ccb_h
.path
;
4246 device
= path
->device
;
4250 sim_lock_assert_owned(sim
->lock
);
4252 cam_ccbq_release_opening(&device
->ccbq
);
4253 if (sim
->ccb_count
> sim
->max_ccbs
) {
4254 xpt_free_ccb(free_ccb
);
4256 } else if (sim
== &cam_dead_sim
) {
4257 xpt_free_ccb(free_ccb
);
4259 SLIST_INSERT_HEAD(&sim
->ccb_freeq
, &free_ccb
->ccb_h
,
4262 if (sim
->devq
== NULL
) {
4265 sim
->devq
->alloc_openings
++;
4266 sim
->devq
->alloc_active
--;
4267 /* XXX Turn this into an inline function - xpt_run_device?? */
4268 if ((device_is_alloc_queued(device
) == 0)
4269 && (device
->drvq
.entries
> 0)) {
4270 xpt_schedule_dev_allocq(bus
, device
);
4272 if (dev_allocq_is_runnable(sim
->devq
))
4273 xpt_run_dev_allocq(bus
);
4276 /* Functions accessed by SIM drivers */
4279 * A sim structure, listing the SIM entry points and instance
4280 * identification info is passed to xpt_bus_register to hook the SIM
4281 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4282 * for this new bus and places it in the array of busses and assigns
4283 * it a path_id. The path_id may be influenced by "hard wiring"
4284 * information specified by the user. Once interrupt services are
4285 * availible, the bus will be probed.
4288 xpt_bus_register(struct cam_sim
*sim
, u_int32_t bus
)
4290 struct cam_eb
*new_bus
;
4291 struct cam_eb
*old_bus
;
4292 struct ccb_pathinq cpi
;
4294 sim_lock_assert_owned(sim
->lock
);
4297 new_bus
= kmalloc(sizeof(*new_bus
), M_CAMXPT
, M_INTWAIT
);
4299 if (strcmp(sim
->sim_name
, "xpt") != 0) {
4301 xptpathid(sim
->sim_name
, sim
->unit_number
, sim
->bus_id
);
4304 TAILQ_INIT(&new_bus
->et_entries
);
4305 new_bus
->path_id
= sim
->path_id
;
4308 timevalclear(&new_bus
->last_reset
);
4310 new_bus
->refcount
= 1; /* Held until a bus_deregister event */
4311 new_bus
->generation
= 0;
4312 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4313 old_bus
= TAILQ_FIRST(&xsoftc
.xpt_busses
);
4314 while (old_bus
!= NULL
4315 && old_bus
->path_id
< new_bus
->path_id
)
4316 old_bus
= TAILQ_NEXT(old_bus
, links
);
4317 if (old_bus
!= NULL
)
4318 TAILQ_INSERT_BEFORE(old_bus
, new_bus
, links
);
4320 TAILQ_INSERT_TAIL(&xsoftc
.xpt_busses
, new_bus
, links
);
4321 xsoftc
.bus_generation
++;
4322 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
4324 /* Notify interested parties */
4325 if (sim
->path_id
!= CAM_XPT_PATH_ID
) {
4326 struct cam_path path
;
4328 xpt_compile_path(&path
, /*periph*/NULL
, sim
->path_id
,
4329 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4330 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
4331 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
4332 xpt_action((union ccb
*)&cpi
);
4333 xpt_async(AC_PATH_REGISTERED
, &path
, &cpi
);
4334 xpt_release_path(&path
);
4336 return (CAM_SUCCESS
);
4340 * Deregister a bus. We must clean out all transactions pending on the bus.
4341 * This routine is typically called prior to cam_sim_free() (e.g. see
4342 * dev/usbmisc/umass/umass.c)
4345 xpt_bus_deregister(path_id_t pathid
)
4347 struct cam_path bus_path
;
4348 struct cam_et
*target
;
4349 struct cam_ed
*device
;
4350 struct cam_ed_qinfo
*qinfo
;
4351 struct cam_devq
*devq
;
4352 struct cam_periph
*periph
;
4353 struct cam_sim
*ccbsim
;
4354 union ccb
*work_ccb
;
4358 status
= xpt_compile_path(&bus_path
, NULL
, pathid
,
4359 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
4360 if (status
!= CAM_REQ_CMP
)
4364 * This should clear out all pending requests and timeouts, but
4365 * the ccb's may be queued to a software interrupt.
4367 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4368 * and it really ought to.
4370 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4371 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4374 * Mark the SIM as having been deregistered. This prevents
4375 * certain operations from re-queueing to it, stops new devices
4376 * from being added, etc.
4378 devq
= bus_path
.bus
->sim
->devq
;
4379 ccbsim
= bus_path
.bus
->sim
;
4380 ccbsim
->flags
|= CAM_SIM_DEREGISTERED
;
4384 * Execute any pending operations now.
4386 while ((qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->send_queue
,
4387 CAMQ_HEAD
)) != NULL
||
4388 (qinfo
= (struct cam_ed_qinfo
*)camq_remove(&devq
->alloc_queue
,
4389 CAMQ_HEAD
)) != NULL
) {
4391 device
= qinfo
->device
;
4392 work_ccb
= cam_ccbq_peek_ccb(&device
->ccbq
, CAMQ_HEAD
);
4393 if (work_ccb
!= NULL
) {
4394 devq
->active_dev
= device
;
4395 cam_ccbq_remove_ccb(&device
->ccbq
, work_ccb
);
4396 cam_ccbq_send_ccb(&device
->ccbq
, work_ccb
);
4397 (*(ccbsim
->sim_action
))(ccbsim
, work_ccb
);
4400 periph
= (struct cam_periph
*)camq_remove(&device
->drvq
,
4403 xpt_schedule(periph
, periph
->pinfo
.priority
);
4404 } while (work_ccb
!= NULL
|| periph
!= NULL
);
4408 * Make sure all completed CCBs are processed.
4410 while (!TAILQ_EMPTY(&ccbsim
->sim_doneq
)) {
4411 camisr_runqueue(ccbsim
);
4415 * Check for requeues, reissues asyncs if necessary
4417 if (CAMQ_GET_HEAD(&devq
->send_queue
))
4418 kprintf("camq: devq send_queue still in use (%d entries)\n",
4419 devq
->send_queue
.entries
);
4420 if (CAMQ_GET_HEAD(&devq
->alloc_queue
))
4421 kprintf("camq: devq alloc_queue still in use (%d entries)\n",
4422 devq
->alloc_queue
.entries
);
4423 if (CAMQ_GET_HEAD(&devq
->send_queue
) ||
4424 CAMQ_GET_HEAD(&devq
->alloc_queue
)) {
4425 if (++retries
< 5) {
4426 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4427 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4433 * Retarget the bus and all cached sim pointers to dead_sim.
4435 * Various CAM subsystems may be holding on to targets, devices,
4436 * and/or peripherals and may attempt to use the sim pointer cached
4437 * in some of these structures during close.
4439 bus_path
.bus
->sim
= &cam_dead_sim
;
4440 TAILQ_FOREACH(target
, &bus_path
.bus
->et_entries
, links
) {
4441 TAILQ_FOREACH(device
, &target
->ed_entries
, links
) {
4442 device
->sim
= &cam_dead_sim
;
4443 SLIST_FOREACH(periph
, &device
->periphs
, periph_links
) {
4444 periph
->sim
= &cam_dead_sim
;
4450 * Repeat the async's for the benefit of any new devices, such as
4451 * might be created from completed probes. Any new device
4452 * ops will run on dead_sim.
4454 * XXX There are probably races :-(
4456 CAM_SIM_LOCK(&cam_dead_sim
);
4457 xpt_async(AC_LOST_DEVICE
, &bus_path
, NULL
);
4458 xpt_async(AC_PATH_DEREGISTERED
, &bus_path
, NULL
);
4459 CAM_SIM_UNLOCK(&cam_dead_sim
);
4461 /* Release the reference count held while registered. */
4462 xpt_release_bus(bus_path
.bus
);
4463 xpt_release_path(&bus_path
);
4465 /* Release the ref we got when the bus was registered */
4466 cam_sim_release(ccbsim
, 0);
4468 return (CAM_REQ_CMP
);
4472 xptnextfreepathid(void)
4479 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4480 bus
= TAILQ_FIRST(&xsoftc
.xpt_busses
);
4482 /* Find an unoccupied pathid */
4483 while (bus
!= NULL
&& bus
->path_id
<= pathid
) {
4484 if (bus
->path_id
== pathid
)
4486 bus
= TAILQ_NEXT(bus
, links
);
4488 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
4491 * Ensure that this pathid is not reserved for
4492 * a bus that may be registered in the future.
4494 if (resource_string_value("scbus", pathid
, "at", &strval
) == 0) {
4496 /* Start the search over */
4497 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4504 xptpathid(const char *sim_name
, int sim_unit
, int sim_bus
)
4510 pathid
= CAM_XPT_PATH_ID
;
4511 ksnprintf(buf
, sizeof(buf
), "%s%d", sim_name
, sim_unit
);
4513 while ((i
= resource_query_string(i
, "at", buf
)) != -1) {
4514 if (strcmp(resource_query_name(i
), "scbus")) {
4515 /* Avoid a bit of foot shooting. */
4518 dunit
= resource_query_unit(i
);
4519 if (dunit
< 0) /* unwired?! */
4521 if (resource_int_value("scbus", dunit
, "bus", &val
) == 0) {
4522 if (sim_bus
== val
) {
4526 } else if (sim_bus
== 0) {
4527 /* Unspecified matches bus 0 */
4531 kprintf("Ambiguous scbus configuration for %s%d "
4532 "bus %d, cannot wire down. The kernel "
4533 "config entry for scbus%d should "
4534 "specify a controller bus.\n"
4535 "Scbus will be assigned dynamically.\n",
4536 sim_name
, sim_unit
, sim_bus
, dunit
);
4541 if (pathid
== CAM_XPT_PATH_ID
)
4542 pathid
= xptnextfreepathid();
4547 xpt_async(u_int32_t async_code
, struct cam_path
*path
, void *async_arg
)
4550 struct cam_et
*target
, *next_target
;
4551 struct cam_ed
*device
, *next_device
;
4553 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4555 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_async\n"));
4558 * Most async events come from a CAM interrupt context. In
4559 * a few cases, the error recovery code at the peripheral layer,
4560 * which may run from our SWI or a process context, may signal
4561 * deferred events with a call to xpt_async.
4566 if (async_code
== AC_BUS_RESET
) {
4567 /* Update our notion of when the last reset occurred */
4568 microuptime(&bus
->last_reset
);
4571 for (target
= TAILQ_FIRST(&bus
->et_entries
);
4573 target
= next_target
) {
4575 next_target
= TAILQ_NEXT(target
, links
);
4577 if (path
->target
!= target
4578 && path
->target
->target_id
!= CAM_TARGET_WILDCARD
4579 && target
->target_id
!= CAM_TARGET_WILDCARD
)
4582 if (async_code
== AC_SENT_BDR
) {
4583 /* Update our notion of when the last reset occurred */
4584 microuptime(&path
->target
->last_reset
);
4587 for (device
= TAILQ_FIRST(&target
->ed_entries
);
4589 device
= next_device
) {
4591 next_device
= TAILQ_NEXT(device
, links
);
4593 if (path
->device
!= device
4594 && path
->device
->lun_id
!= CAM_LUN_WILDCARD
4595 && device
->lun_id
!= CAM_LUN_WILDCARD
)
4598 xpt_dev_async(async_code
, bus
, target
,
4601 xpt_async_bcast(&device
->asyncs
, async_code
,
4607 * If this wasn't a fully wildcarded async, tell all
4608 * clients that want all async events.
4610 if (bus
!= xpt_periph
->path
->bus
)
4611 xpt_async_bcast(&xpt_periph
->path
->device
->asyncs
, async_code
,
4616 xpt_async_bcast(struct async_list
*async_head
,
4617 u_int32_t async_code
,
4618 struct cam_path
*path
, void *async_arg
)
4620 struct async_node
*cur_entry
;
4622 cur_entry
= SLIST_FIRST(async_head
);
4623 while (cur_entry
!= NULL
) {
4624 struct async_node
*next_entry
;
4626 * Grab the next list entry before we call the current
4627 * entry's callback. This is because the callback function
4628 * can delete its async callback entry.
4630 next_entry
= SLIST_NEXT(cur_entry
, links
);
4631 if ((cur_entry
->event_enable
& async_code
) != 0)
4632 cur_entry
->callback(cur_entry
->callback_arg
,
4635 cur_entry
= next_entry
;
4640 * Handle any per-device event notifications that require action by the XPT.
4643 xpt_dev_async(u_int32_t async_code
, struct cam_eb
*bus
, struct cam_et
*target
,
4644 struct cam_ed
*device
, void *async_arg
)
4647 struct cam_path newpath
;
4650 * We only need to handle events for real devices.
4652 if (target
->target_id
== CAM_TARGET_WILDCARD
4653 || device
->lun_id
== CAM_LUN_WILDCARD
)
4657 * We need our own path with wildcards expanded to
4658 * handle certain types of events.
4660 if ((async_code
== AC_SENT_BDR
)
4661 || (async_code
== AC_BUS_RESET
)
4662 || (async_code
== AC_INQ_CHANGED
))
4663 status
= xpt_compile_path(&newpath
, NULL
,
4668 status
= CAM_REQ_CMP_ERR
;
4670 if (status
== CAM_REQ_CMP
) {
4673 * Allow transfer negotiation to occur in a
4674 * tag free environment.
4676 if (async_code
== AC_SENT_BDR
4677 || async_code
== AC_BUS_RESET
)
4678 xpt_toggle_tags(&newpath
);
4680 if (async_code
== AC_INQ_CHANGED
) {
4682 * We've sent a start unit command, or
4683 * something similar to a device that
4684 * may have caused its inquiry data to
4685 * change. So we re-scan the device to
4686 * refresh the inquiry data for it.
4688 xpt_scan_lun(newpath
.periph
, &newpath
,
4689 CAM_EXPECT_INQ_CHANGE
, NULL
);
4691 xpt_release_path(&newpath
);
4692 } else if (async_code
== AC_LOST_DEVICE
) {
4694 * When we lose a device the device may be about to detach
4695 * the sim, we have to clear out all pending timeouts and
4696 * requests before that happens. XXX it would be nice if
4697 * we could abort the requests pertaining to the device.
4699 xpt_release_devq_timeout(device
);
4700 if ((device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
4701 device
->flags
|= CAM_DEV_UNCONFIGURED
;
4702 xpt_release_device(bus
, target
, device
);
4704 } else if (async_code
== AC_TRANSFER_NEG
) {
4705 struct ccb_trans_settings
*settings
;
4707 settings
= (struct ccb_trans_settings
*)async_arg
;
4708 xpt_set_transfer_settings(settings
, device
,
4709 /*async_update*/TRUE
);
4714 xpt_freeze_devq(struct cam_path
*path
, u_int count
)
4716 struct ccb_hdr
*ccbh
;
4718 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4720 path
->device
->qfrozen_cnt
+= count
;
4723 * Mark the last CCB in the queue as needing
4724 * to be requeued if the driver hasn't
4725 * changed it's state yet. This fixes a race
4726 * where a ccb is just about to be queued to
4727 * a controller driver when it's interrupt routine
4728 * freezes the queue. To completly close the
4729 * hole, controller drives must check to see
4730 * if a ccb's status is still CAM_REQ_INPROG
4731 * just before they queue
4732 * the CCB. See ahc_action/ahc_freeze_devq for
4735 ccbh
= TAILQ_LAST(&path
->device
->ccbq
.active_ccbs
, ccb_hdr_tailq
);
4736 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4737 ccbh
->status
= CAM_REQUEUE_REQ
;
4738 return (path
->device
->qfrozen_cnt
);
4742 xpt_freeze_simq(struct cam_sim
*sim
, u_int count
)
4744 sim_lock_assert_owned(sim
->lock
);
4746 if (sim
->devq
== NULL
)
4748 sim
->devq
->send_queue
.qfrozen_cnt
+= count
;
4749 if (sim
->devq
->active_dev
!= NULL
) {
4750 struct ccb_hdr
*ccbh
;
4752 ccbh
= TAILQ_LAST(&sim
->devq
->active_dev
->ccbq
.active_ccbs
,
4754 if (ccbh
&& ccbh
->status
== CAM_REQ_INPROG
)
4755 ccbh
->status
= CAM_REQUEUE_REQ
;
4757 return (sim
->devq
->send_queue
.qfrozen_cnt
);
4761 * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4762 * We ref-count the sim (and the bus only NULLs it out when the bus has been
4763 * freed, which is not the case here), but the device queue is also freed XXX
4764 * and we have to check that here.
4766 * XXX fixme: could we simply not null-out the device queue via
4770 xpt_release_devq_timeout(void *arg
)
4772 struct cam_ed
*device
;
4774 device
= (struct cam_ed
*)arg
;
4776 xpt_release_devq_device(device
, /*count*/1, /*run_queue*/TRUE
);
4780 xpt_release_devq(struct cam_path
*path
, u_int count
, int run_queue
)
4782 sim_lock_assert_owned(path
->bus
->sim
->lock
);
4784 xpt_release_devq_device(path
->device
, count
, run_queue
);
4788 xpt_release_devq_device(struct cam_ed
*dev
, u_int count
, int run_queue
)
4794 if (dev
->qfrozen_cnt
> 0) {
4796 count
= (count
> dev
->qfrozen_cnt
) ? dev
->qfrozen_cnt
: count
;
4797 dev
->qfrozen_cnt
-= count
;
4798 if (dev
->qfrozen_cnt
== 0) {
4801 * No longer need to wait for a successful
4802 * command completion.
4804 dev
->flags
&= ~CAM_DEV_REL_ON_COMPLETE
;
4807 * Remove any timeouts that might be scheduled
4808 * to release this queue.
4810 if ((dev
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
4811 callout_stop(&dev
->callout
);
4812 dev
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
4816 * Now that we are unfrozen schedule the
4817 * device so any pending transactions are
4820 if ((dev
->ccbq
.queue
.entries
> 0)
4821 && (xpt_schedule_dev_sendq(dev
->target
->bus
, dev
))
4822 && (run_queue
!= 0)) {
4828 xpt_run_dev_sendq(dev
->target
->bus
);
4832 xpt_release_simq(struct cam_sim
*sim
, int run_queue
)
4836 sim_lock_assert_owned(sim
->lock
);
4838 if (sim
->devq
== NULL
)
4841 sendq
= &(sim
->devq
->send_queue
);
4842 if (sendq
->qfrozen_cnt
> 0) {
4843 sendq
->qfrozen_cnt
--;
4844 if (sendq
->qfrozen_cnt
== 0) {
4848 * If there is a timeout scheduled to release this
4849 * sim queue, remove it. The queue frozen count is
4852 if ((sim
->flags
& CAM_SIM_REL_TIMEOUT_PENDING
) != 0){
4853 callout_stop(&sim
->callout
);
4854 sim
->flags
&= ~CAM_SIM_REL_TIMEOUT_PENDING
;
4856 bus
= xpt_find_bus(sim
->path_id
);
4860 * Now that we are unfrozen run the send queue.
4862 xpt_run_dev_sendq(bus
);
4864 xpt_release_bus(bus
);
4870 xpt_done(union ccb
*done_ccb
)
4872 struct cam_sim
*sim
;
4874 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xpt_done\n"));
4875 if ((done_ccb
->ccb_h
.func_code
& XPT_FC_QUEUED
) != 0) {
4877 * Queue up the request for handling by our SWI handler
4878 * any of the "non-immediate" type of ccbs.
4880 sim
= done_ccb
->ccb_h
.path
->bus
->sim
;
4881 switch (done_ccb
->ccb_h
.path
->periph
->type
) {
4882 case CAM_PERIPH_BIO
:
4883 spin_lock_wr(&sim
->sim_spin
);
4884 TAILQ_INSERT_TAIL(&sim
->sim_doneq
, &done_ccb
->ccb_h
,
4886 done_ccb
->ccb_h
.pinfo
.index
= CAM_DONEQ_INDEX
;
4887 spin_unlock_wr(&sim
->sim_spin
);
4888 if ((sim
->flags
& CAM_SIM_ON_DONEQ
) == 0) {
4889 spin_lock_wr(&cam_simq_spin
);
4890 if ((sim
->flags
& CAM_SIM_ON_DONEQ
) == 0) {
4891 TAILQ_INSERT_TAIL(&cam_simq
, sim
,
4893 sim
->flags
|= CAM_SIM_ON_DONEQ
;
4895 spin_unlock_wr(&cam_simq_spin
);
4897 if ((done_ccb
->ccb_h
.flags
& CAM_POLLED
) == 0)
4901 panic("unknown periph type %d",
4902 done_ccb
->ccb_h
.path
->periph
->type
);
4912 new_ccb
= kmalloc(sizeof(*new_ccb
), M_CAMXPT
, M_INTWAIT
| M_ZERO
);
4917 xpt_free_ccb(union ccb
*free_ccb
)
4919 kfree(free_ccb
, M_CAMXPT
);
4924 /* Private XPT functions */
4927 * Get a CAM control block for the caller. Charge the structure to the device
4928 * referenced by the path. If the this device has no 'credits' then the
4929 * device already has the maximum number of outstanding operations under way
4930 * and we return NULL. If we don't have sufficient resources to allocate more
4931 * ccbs, we also return NULL.
4934 xpt_get_ccb(struct cam_ed
*device
)
4937 struct cam_sim
*sim
;
4940 if ((new_ccb
= (union ccb
*)SLIST_FIRST(&sim
->ccb_freeq
)) == NULL
) {
4941 new_ccb
= xpt_alloc_ccb();
4942 if ((sim
->flags
& CAM_SIM_MPSAFE
) == 0)
4943 callout_init(&new_ccb
->ccb_h
.timeout_ch
);
4944 SLIST_INSERT_HEAD(&sim
->ccb_freeq
, &new_ccb
->ccb_h
,
4948 cam_ccbq_take_opening(&device
->ccbq
);
4949 SLIST_REMOVE_HEAD(&sim
->ccb_freeq
, xpt_links
.sle
);
4954 xpt_release_bus(struct cam_eb
*bus
)
4957 if ((--bus
->refcount
== 0)
4958 && (TAILQ_FIRST(&bus
->et_entries
) == NULL
)) {
4959 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
4960 TAILQ_REMOVE(&xsoftc
.xpt_busses
, bus
, links
);
4961 xsoftc
.bus_generation
++;
4962 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
4963 kfree(bus
, M_CAMXPT
);
4967 static struct cam_et
*
4968 xpt_alloc_target(struct cam_eb
*bus
, target_id_t target_id
)
4970 struct cam_et
*target
;
4971 struct cam_et
*cur_target
;
4973 target
= kmalloc(sizeof(*target
), M_CAMXPT
, M_INTWAIT
);
4975 TAILQ_INIT(&target
->ed_entries
);
4977 target
->target_id
= target_id
;
4978 target
->refcount
= 1;
4979 target
->generation
= 0;
4980 timevalclear(&target
->last_reset
);
4982 * Hold a reference to our parent bus so it
4983 * will not go away before we do.
4987 /* Insertion sort into our bus's target list */
4988 cur_target
= TAILQ_FIRST(&bus
->et_entries
);
4989 while (cur_target
!= NULL
&& cur_target
->target_id
< target_id
)
4990 cur_target
= TAILQ_NEXT(cur_target
, links
);
4992 if (cur_target
!= NULL
) {
4993 TAILQ_INSERT_BEFORE(cur_target
, target
, links
);
4995 TAILQ_INSERT_TAIL(&bus
->et_entries
, target
, links
);
5002 xpt_release_target(struct cam_eb
*bus
, struct cam_et
*target
)
5004 if (target
->refcount
== 1) {
5005 KKASSERT(TAILQ_FIRST(&target
->ed_entries
) == NULL
);
5006 TAILQ_REMOVE(&bus
->et_entries
, target
, links
);
5008 xpt_release_bus(bus
);
5009 KKASSERT(target
->refcount
== 1);
5010 kfree(target
, M_CAMXPT
);
5016 static struct cam_ed
*
5017 xpt_alloc_device(struct cam_eb
*bus
, struct cam_et
*target
, lun_id_t lun_id
)
5019 struct cam_path path
;
5020 struct cam_ed
*device
;
5021 struct cam_devq
*devq
;
5025 * Disallow new devices while trying to deregister a sim
5027 if (bus
->sim
->flags
& CAM_SIM_DEREGISTERED
)
5031 * Make space for us in the device queue on our bus
5033 devq
= bus
->sim
->devq
;
5036 status
= cam_devq_resize(devq
, devq
->alloc_queue
.array_size
+ 1);
5038 if (status
!= CAM_REQ_CMP
) {
5041 device
= kmalloc(sizeof(*device
), M_CAMXPT
, M_INTWAIT
);
5044 if (device
!= NULL
) {
5045 struct cam_ed
*cur_device
;
5047 cam_init_pinfo(&device
->alloc_ccb_entry
.pinfo
);
5048 device
->alloc_ccb_entry
.device
= device
;
5049 cam_init_pinfo(&device
->send_ccb_entry
.pinfo
);
5050 device
->send_ccb_entry
.device
= device
;
5051 device
->target
= target
;
5052 device
->lun_id
= lun_id
;
5053 device
->sim
= bus
->sim
;
5054 /* Initialize our queues */
5055 if (camq_init(&device
->drvq
, 0) != 0) {
5056 kfree(device
, M_CAMXPT
);
5059 if (cam_ccbq_init(&device
->ccbq
,
5060 bus
->sim
->max_dev_openings
) != 0) {
5061 camq_fini(&device
->drvq
);
5062 kfree(device
, M_CAMXPT
);
5065 SLIST_INIT(&device
->asyncs
);
5066 SLIST_INIT(&device
->periphs
);
5067 device
->generation
= 0;
5068 device
->owner
= NULL
;
5070 * Take the default quirk entry until we have inquiry
5071 * data and can determine a better quirk to use.
5073 device
->quirk
= &xpt_quirk_table
[xpt_quirk_table_size
- 1];
5074 bzero(&device
->inq_data
, sizeof(device
->inq_data
));
5075 device
->inq_flags
= 0;
5076 device
->queue_flags
= 0;
5077 device
->serial_num
= NULL
;
5078 device
->serial_num_len
= 0;
5079 device
->qfrozen_cnt
= 0;
5080 device
->flags
= CAM_DEV_UNCONFIGURED
;
5081 device
->tag_delay_count
= 0;
5082 device
->tag_saved_openings
= 0;
5083 device
->refcount
= 1;
5084 callout_init(&device
->callout
);
5087 * Hold a reference to our parent target so it
5088 * will not go away before we do.
5093 * XXX should be limited by number of CCBs this bus can
5096 bus
->sim
->max_ccbs
+= device
->ccbq
.devq_openings
;
5097 /* Insertion sort into our target's device list */
5098 cur_device
= TAILQ_FIRST(&target
->ed_entries
);
5099 while (cur_device
!= NULL
&& cur_device
->lun_id
< lun_id
)
5100 cur_device
= TAILQ_NEXT(cur_device
, links
);
5101 if (cur_device
!= NULL
) {
5102 TAILQ_INSERT_BEFORE(cur_device
, device
, links
);
5104 TAILQ_INSERT_TAIL(&target
->ed_entries
, device
, links
);
5106 target
->generation
++;
5107 if (lun_id
!= CAM_LUN_WILDCARD
) {
5108 xpt_compile_path(&path
,
5113 xpt_devise_transport(&path
);
5114 xpt_release_path(&path
);
5121 xpt_reference_device(struct cam_ed
*device
)
5127 xpt_release_device(struct cam_eb
*bus
, struct cam_et
*target
,
5128 struct cam_ed
*device
)
5130 struct cam_devq
*devq
;
5132 if (device
->refcount
== 1) {
5133 KKASSERT(device
->flags
& CAM_DEV_UNCONFIGURED
);
5135 if (device
->alloc_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
5136 || device
->send_ccb_entry
.pinfo
.index
!= CAM_UNQUEUED_INDEX
)
5137 panic("Removing device while still queued for ccbs");
5139 if ((device
->flags
& CAM_DEV_REL_TIMEOUT_PENDING
) != 0) {
5140 device
->flags
&= ~CAM_DEV_REL_TIMEOUT_PENDING
;
5141 callout_stop(&device
->callout
);
5144 TAILQ_REMOVE(&target
->ed_entries
, device
,links
);
5145 target
->generation
++;
5146 bus
->sim
->max_ccbs
-= device
->ccbq
.devq_openings
;
5147 if ((devq
= bus
->sim
->devq
) != NULL
) {
5148 /* Release our slot in the devq */
5149 cam_devq_resize(devq
, devq
->alloc_queue
.array_size
- 1);
5151 camq_fini(&device
->drvq
);
5152 camq_fini(&device
->ccbq
.queue
);
5153 xpt_release_target(bus
, target
);
5154 KKASSERT(device
->refcount
== 1);
5155 kfree(device
, M_CAMXPT
);
5162 xpt_dev_ccbq_resize(struct cam_path
*path
, int newopenings
)
5170 diff
= newopenings
- (dev
->ccbq
.dev_active
+ dev
->ccbq
.dev_openings
);
5171 result
= cam_ccbq_resize(&dev
->ccbq
, newopenings
);
5172 if (result
== CAM_REQ_CMP
&& (diff
< 0)) {
5173 dev
->flags
|= CAM_DEV_RESIZE_QUEUE_NEEDED
;
5175 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
5176 || (dev
->inq_flags
& SID_CmdQue
) != 0)
5177 dev
->tag_saved_openings
= newopenings
;
5178 /* Adjust the global limit */
5179 dev
->sim
->max_ccbs
+= diff
;
5183 static struct cam_eb
*
5184 xpt_find_bus(path_id_t path_id
)
5188 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
5189 TAILQ_FOREACH(bus
, &xsoftc
.xpt_busses
, links
) {
5190 if (bus
->path_id
== path_id
) {
5195 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
5199 static struct cam_et
*
5200 xpt_find_target(struct cam_eb
*bus
, target_id_t target_id
)
5202 struct cam_et
*target
;
5204 TAILQ_FOREACH(target
, &bus
->et_entries
, links
) {
5205 if (target
->target_id
== target_id
) {
5213 static struct cam_ed
*
5214 xpt_find_device(struct cam_et
*target
, lun_id_t lun_id
)
5216 struct cam_ed
*device
;
5218 TAILQ_FOREACH(device
, &target
->ed_entries
, links
) {
5219 if (device
->lun_id
== lun_id
) {
5228 union ccb
*request_ccb
;
5229 struct ccb_pathinq
*cpi
;
5231 } xpt_scan_bus_info
;
5234 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5235 * As the scan progresses, xpt_scan_bus is used as the
5236 * callback on completion function.
5239 xpt_scan_bus(struct cam_periph
*periph
, union ccb
*request_ccb
)
5241 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5242 ("xpt_scan_bus\n"));
5243 switch (request_ccb
->ccb_h
.func_code
) {
5246 xpt_scan_bus_info
*scan_info
;
5247 union ccb
*work_ccb
;
5248 struct cam_path
*path
;
5253 /* Find out the characteristics of the bus */
5254 work_ccb
= xpt_alloc_ccb();
5255 xpt_setup_ccb(&work_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5256 request_ccb
->ccb_h
.pinfo
.priority
);
5257 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
5258 xpt_action(work_ccb
);
5259 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5260 request_ccb
->ccb_h
.status
= work_ccb
->ccb_h
.status
;
5261 xpt_free_ccb(work_ccb
);
5262 xpt_done(request_ccb
);
5266 if ((work_ccb
->cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5268 * Can't scan the bus on an adapter that
5269 * cannot perform the initiator role.
5271 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5272 xpt_free_ccb(work_ccb
);
5273 xpt_done(request_ccb
);
5277 /* Save some state for use while we probe for devices */
5278 scan_info
= (xpt_scan_bus_info
*)
5279 kmalloc(sizeof(xpt_scan_bus_info
), M_CAMXPT
, M_INTWAIT
);
5280 scan_info
->request_ccb
= request_ccb
;
5281 scan_info
->cpi
= &work_ccb
->cpi
;
5283 /* Cache on our stack so we can work asynchronously */
5284 max_target
= scan_info
->cpi
->max_target
;
5285 initiator_id
= scan_info
->cpi
->initiator_id
;
5289 * We can scan all targets in parallel, or do it sequentially.
5291 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5293 scan_info
->counter
= 0;
5295 scan_info
->counter
= scan_info
->cpi
->max_target
+ 1;
5296 if (scan_info
->cpi
->initiator_id
< scan_info
->counter
) {
5297 scan_info
->counter
--;
5301 for (i
= 0; i
<= max_target
; i
++) {
5303 if (i
== initiator_id
)
5306 status
= xpt_create_path(&path
, xpt_periph
,
5307 request_ccb
->ccb_h
.path_id
,
5309 if (status
!= CAM_REQ_CMP
) {
5310 kprintf("xpt_scan_bus: xpt_create_path failed"
5311 " with status %#x, bus scan halted\n",
5313 kfree(scan_info
, M_CAMXPT
);
5314 request_ccb
->ccb_h
.status
= status
;
5315 xpt_free_ccb(work_ccb
);
5316 xpt_done(request_ccb
);
5319 work_ccb
= xpt_alloc_ccb();
5320 xpt_setup_ccb(&work_ccb
->ccb_h
, path
,
5321 request_ccb
->ccb_h
.pinfo
.priority
);
5322 work_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5323 work_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5324 work_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5325 work_ccb
->crcn
.flags
= request_ccb
->crcn
.flags
;
5326 xpt_action(work_ccb
);
5333 struct cam_path
*path
;
5334 xpt_scan_bus_info
*scan_info
;
5336 target_id_t target_id
;
5339 /* Reuse the same CCB to query if a device was really found */
5340 scan_info
= (xpt_scan_bus_info
*)request_ccb
->ccb_h
.ppriv_ptr0
;
5341 xpt_setup_ccb(&request_ccb
->ccb_h
, request_ccb
->ccb_h
.path
,
5342 request_ccb
->ccb_h
.pinfo
.priority
);
5343 request_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
5345 path_id
= request_ccb
->ccb_h
.path_id
;
5346 target_id
= request_ccb
->ccb_h
.target_id
;
5347 lun_id
= request_ccb
->ccb_h
.target_lun
;
5348 xpt_action(request_ccb
);
5350 if (request_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
5351 struct cam_ed
*device
;
5352 struct cam_et
*target
;
5356 * If we already probed lun 0 successfully, or
5357 * we have additional configured luns on this
5358 * target that might have "gone away", go onto
5361 target
= request_ccb
->ccb_h
.path
->target
;
5363 * We may touch devices that we don't
5364 * hold references too, so ensure they
5365 * don't disappear out from under us.
5366 * The target above is referenced by the
5367 * path in the request ccb.
5370 device
= TAILQ_FIRST(&target
->ed_entries
);
5371 if (device
!= NULL
) {
5372 phl
= CAN_SRCH_HI_SPARSE(device
);
5373 if (device
->lun_id
== 0)
5374 device
= TAILQ_NEXT(device
, links
);
5376 if ((lun_id
!= 0) || (device
!= NULL
)) {
5377 if (lun_id
< (CAM_SCSI2_MAXLUN
-1) || phl
)
5381 struct cam_ed
*device
;
5383 device
= request_ccb
->ccb_h
.path
->device
;
5385 if ((device
->quirk
->quirks
& CAM_QUIRK_NOLUNS
) == 0) {
5386 /* Try the next lun */
5387 if (lun_id
< (CAM_SCSI2_MAXLUN
-1)
5388 || CAN_SRCH_HI_DENSE(device
))
5394 * Free the current request path- we're done with it.
5396 xpt_free_path(request_ccb
->ccb_h
.path
);
5399 * Check to see if we scan any further luns.
5401 if (lun_id
== request_ccb
->ccb_h
.target_lun
5402 || lun_id
> scan_info
->cpi
->max_lun
) {
5407 if (scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) {
5408 scan_info
->counter
++;
5409 if (scan_info
->counter
==
5410 scan_info
->cpi
->initiator_id
) {
5411 scan_info
->counter
++;
5413 if (scan_info
->counter
>=
5414 scan_info
->cpi
->max_target
+1) {
5418 scan_info
->counter
--;
5419 if (scan_info
->counter
== 0) {
5424 xpt_free_ccb(request_ccb
);
5425 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5426 request_ccb
= scan_info
->request_ccb
;
5427 kfree(scan_info
, M_CAMXPT
);
5428 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5429 xpt_done(request_ccb
);
5433 if ((scan_info
->cpi
->hba_misc
& PIM_SEQSCAN
) == 0) {
5436 status
= xpt_create_path(&path
, xpt_periph
,
5437 scan_info
->request_ccb
->ccb_h
.path_id
,
5438 scan_info
->counter
, 0);
5439 if (status
!= CAM_REQ_CMP
) {
5440 kprintf("xpt_scan_bus: xpt_create_path failed"
5441 " with status %#x, bus scan halted\n",
5443 xpt_free_ccb(request_ccb
);
5444 xpt_free_ccb((union ccb
*)scan_info
->cpi
);
5445 request_ccb
= scan_info
->request_ccb
;
5446 kfree(scan_info
, M_CAMXPT
);
5447 request_ccb
->ccb_h
.status
= status
;
5448 xpt_done(request_ccb
);
5451 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5452 request_ccb
->ccb_h
.pinfo
.priority
);
5453 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5454 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5455 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5456 request_ccb
->crcn
.flags
=
5457 scan_info
->request_ccb
->crcn
.flags
;
5459 status
= xpt_create_path(&path
, xpt_periph
,
5460 path_id
, target_id
, lun_id
);
5461 if (status
!= CAM_REQ_CMP
) {
5462 kprintf("xpt_scan_bus: xpt_create_path failed "
5463 "with status %#x, halting LUN scan\n",
5467 xpt_setup_ccb(&request_ccb
->ccb_h
, path
,
5468 request_ccb
->ccb_h
.pinfo
.priority
);
5469 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5470 request_ccb
->ccb_h
.cbfcnp
= xpt_scan_bus
;
5471 request_ccb
->ccb_h
.ppriv_ptr0
= scan_info
;
5472 request_ccb
->crcn
.flags
=
5473 scan_info
->request_ccb
->crcn
.flags
;
5475 xpt_action(request_ccb
);
5485 PROBE_INQUIRY
, /* this counts as DV0 for Basic Domain Validation */
5490 PROBE_TUR_FOR_NEGOTIATION
,
5491 PROBE_INQUIRY_BASIC_DV1
,
5492 PROBE_INQUIRY_BASIC_DV2
,
5497 PROBE_INQUIRY_CKSUM
= 0x01,
5498 PROBE_SERIAL_CKSUM
= 0x02,
5499 PROBE_NO_ANNOUNCE
= 0x04
5503 TAILQ_HEAD(, ccb_hdr
) request_ccbs
;
5504 probe_action action
;
5505 union ccb saved_ccb
;
5508 u_int8_t digest
[16];
5512 xpt_scan_lun(struct cam_periph
*periph
, struct cam_path
*path
,
5513 cam_flags flags
, union ccb
*request_ccb
)
5515 struct ccb_pathinq cpi
;
5517 struct cam_path
*new_path
;
5518 struct cam_periph
*old_periph
;
5520 CAM_DEBUG(request_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
5521 ("xpt_scan_lun\n"));
5523 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
5524 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5525 xpt_action((union ccb
*)&cpi
);
5527 if (cpi
.ccb_h
.status
!= CAM_REQ_CMP
) {
5528 if (request_ccb
!= NULL
) {
5529 request_ccb
->ccb_h
.status
= cpi
.ccb_h
.status
;
5530 xpt_done(request_ccb
);
5535 if ((cpi
.hba_misc
& PIM_NOINITIATOR
) != 0) {
5537 * Can't scan the bus on an adapter that
5538 * cannot perform the initiator role.
5540 if (request_ccb
!= NULL
) {
5541 request_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
5542 xpt_done(request_ccb
);
5547 if (request_ccb
== NULL
) {
5548 request_ccb
= kmalloc(sizeof(union ccb
), M_CAMXPT
, M_INTWAIT
);
5549 new_path
= kmalloc(sizeof(*new_path
), M_CAMXPT
, M_INTWAIT
);
5550 status
= xpt_compile_path(new_path
, xpt_periph
,
5552 path
->target
->target_id
,
5553 path
->device
->lun_id
);
5555 if (status
!= CAM_REQ_CMP
) {
5556 xpt_print(path
, "xpt_scan_lun: can't compile path, "
5557 "can't continue\n");
5558 kfree(request_ccb
, M_CAMXPT
);
5559 kfree(new_path
, M_CAMXPT
);
5562 xpt_setup_ccb(&request_ccb
->ccb_h
, new_path
, /*priority*/ 1);
5563 request_ccb
->ccb_h
.cbfcnp
= xptscandone
;
5564 request_ccb
->ccb_h
.func_code
= XPT_SCAN_LUN
;
5565 request_ccb
->crcn
.flags
= flags
;
5568 if ((old_periph
= cam_periph_find(path
, "probe")) != NULL
) {
5571 softc
= (probe_softc
*)old_periph
->softc
;
5572 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5575 status
= cam_periph_alloc(proberegister
, NULL
, probecleanup
,
5576 probestart
, "probe",
5578 request_ccb
->ccb_h
.path
, NULL
, 0,
5581 if (status
!= CAM_REQ_CMP
) {
5582 xpt_print(path
, "xpt_scan_lun: cam_alloc_periph "
5583 "returned an error, can't continue probe\n");
5584 request_ccb
->ccb_h
.status
= status
;
5585 xpt_done(request_ccb
);
5591 xptscandone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5593 xpt_release_path(done_ccb
->ccb_h
.path
);
5594 kfree(done_ccb
->ccb_h
.path
, M_CAMXPT
);
5595 kfree(done_ccb
, M_CAMXPT
);
5599 proberegister(struct cam_periph
*periph
, void *arg
)
5601 union ccb
*request_ccb
; /* CCB representing the probe request */
5605 request_ccb
= (union ccb
*)arg
;
5606 if (periph
== NULL
) {
5607 kprintf("proberegister: periph was NULL!!\n");
5608 return(CAM_REQ_CMP_ERR
);
5611 if (request_ccb
== NULL
) {
5612 kprintf("proberegister: no probe CCB, "
5613 "can't register device\n");
5614 return(CAM_REQ_CMP_ERR
);
5617 softc
= kmalloc(sizeof(*softc
), M_CAMXPT
, M_INTWAIT
| M_ZERO
);
5618 TAILQ_INIT(&softc
->request_ccbs
);
5619 TAILQ_INSERT_TAIL(&softc
->request_ccbs
, &request_ccb
->ccb_h
,
5622 periph
->softc
= softc
;
5623 status
= cam_periph_acquire(periph
);
5624 if (status
!= CAM_REQ_CMP
) {
5630 * Ensure we've waited at least a bus settle
5631 * delay before attempting to probe the device.
5632 * For HBAs that don't do bus resets, this won't make a difference.
5634 cam_periph_freeze_after_event(periph
, &periph
->path
->bus
->last_reset
,
5636 probeschedule(periph
);
5637 return(CAM_REQ_CMP
);
5641 probeschedule(struct cam_periph
*periph
)
5643 struct ccb_pathinq cpi
;
5647 softc
= (probe_softc
*)periph
->softc
;
5648 ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
5650 xpt_setup_ccb(&cpi
.ccb_h
, periph
->path
, /*priority*/1);
5651 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
5652 xpt_action((union ccb
*)&cpi
);
5655 * If a device has gone away and another device, or the same one,
5656 * is back in the same place, it should have a unit attention
5657 * condition pending. It will not report the unit attention in
5658 * response to an inquiry, which may leave invalid transfer
5659 * negotiations in effect. The TUR will reveal the unit attention
5660 * condition. Only send the TUR for lun 0, since some devices
5661 * will get confused by commands other than inquiry to non-existent
5662 * luns. If you think a device has gone away start your scan from
5663 * lun 0. This will insure that any bogus transfer settings are
5666 * If we haven't seen the device before and the controller supports
5667 * some kind of transfer negotiation, negotiate with the first
5668 * sent command if no bus reset was performed at startup. This
5669 * ensures that the device is not confused by transfer negotiation
5670 * settings left over by loader or BIOS action.
5672 if (((ccb
->ccb_h
.path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0)
5673 && (ccb
->ccb_h
.target_lun
== 0)) {
5674 softc
->action
= PROBE_TUR
;
5675 } else if ((cpi
.hba_inquiry
& (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
)) != 0
5676 && (cpi
.hba_misc
& PIM_NOBUSRESET
) != 0) {
5677 proberequestdefaultnegotiation(periph
);
5678 softc
->action
= PROBE_INQUIRY
;
5680 softc
->action
= PROBE_INQUIRY
;
5683 if (ccb
->crcn
.flags
& CAM_EXPECT_INQ_CHANGE
)
5684 softc
->flags
|= PROBE_NO_ANNOUNCE
;
5686 softc
->flags
&= ~PROBE_NO_ANNOUNCE
;
5688 xpt_schedule(periph
, ccb
->ccb_h
.pinfo
.priority
);
5692 probestart(struct cam_periph
*periph
, union ccb
*start_ccb
)
5694 /* Probe the device that our peripheral driver points to */
5695 struct ccb_scsiio
*csio
;
5698 CAM_DEBUG(start_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probestart\n"));
5700 softc
= (probe_softc
*)periph
->softc
;
5701 csio
= &start_ccb
->csio
;
5703 switch (softc
->action
) {
5705 case PROBE_TUR_FOR_NEGOTIATION
:
5708 scsi_test_unit_ready(csio
,
5717 case PROBE_FULL_INQUIRY
:
5718 case PROBE_INQUIRY_BASIC_DV1
:
5719 case PROBE_INQUIRY_BASIC_DV2
:
5722 struct scsi_inquiry_data
*inq_buf
;
5724 inq_buf
= &periph
->path
->device
->inq_data
;
5727 * If the device is currently configured, we calculate an
5728 * MD5 checksum of the inquiry data, and if the serial number
5729 * length is greater than 0, add the serial number data
5730 * into the checksum as well. Once the inquiry and the
5731 * serial number check finish, we attempt to figure out
5732 * whether we still have the same device.
5734 if ((periph
->path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
5736 MD5Init(&softc
->context
);
5737 MD5Update(&softc
->context
, (unsigned char *)inq_buf
,
5738 sizeof(struct scsi_inquiry_data
));
5739 softc
->flags
|= PROBE_INQUIRY_CKSUM
;
5740 if (periph
->path
->device
->serial_num_len
> 0) {
5741 MD5Update(&softc
->context
,
5742 periph
->path
->device
->serial_num
,
5743 periph
->path
->device
->serial_num_len
);
5744 softc
->flags
|= PROBE_SERIAL_CKSUM
;
5746 MD5Final(softc
->digest
, &softc
->context
);
5749 if (softc
->action
== PROBE_INQUIRY
)
5750 inquiry_len
= SHORT_INQUIRY_LENGTH
;
5752 inquiry_len
= SID_ADDITIONAL_LENGTH(inq_buf
);
5755 * Some parallel SCSI devices fail to send an
5756 * ignore wide residue message when dealing with
5757 * odd length inquiry requests. Round up to be
5760 inquiry_len
= roundup2(inquiry_len
, 2);
5762 if (softc
->action
== PROBE_INQUIRY_BASIC_DV1
5763 || softc
->action
== PROBE_INQUIRY_BASIC_DV2
) {
5764 inq_buf
= kmalloc(inquiry_len
, M_CAMXPT
, M_INTWAIT
);
5770 (u_int8_t
*)inq_buf
,
5775 /*timeout*/60 * 1000);
5778 case PROBE_MODE_SENSE
:
5783 mode_buf_len
= sizeof(struct scsi_mode_header_6
)
5784 + sizeof(struct scsi_mode_blk_desc
)
5785 + sizeof(struct scsi_control_page
);
5786 mode_buf
= kmalloc(mode_buf_len
, M_CAMXPT
, M_INTWAIT
);
5787 scsi_mode_sense(csio
,
5792 SMS_PAGE_CTRL_CURRENT
,
5793 SMS_CONTROL_MODE_PAGE
,
5800 case PROBE_SERIAL_NUM_0
:
5802 struct scsi_vpd_supported_page_list
*vpd_list
= NULL
;
5803 struct cam_ed
*device
;
5805 device
= periph
->path
->device
;
5806 if ((device
->quirk
->quirks
& CAM_QUIRK_NOSERIAL
) == 0) {
5807 vpd_list
= kmalloc(sizeof(*vpd_list
), M_CAMXPT
,
5808 M_INTWAIT
| M_ZERO
);
5811 if (vpd_list
!= NULL
) {
5816 (u_int8_t
*)vpd_list
,
5819 SVPD_SUPPORTED_PAGE_LIST
,
5821 /*timeout*/60 * 1000);
5825 * We'll have to do without, let our probedone
5826 * routine finish up for us.
5828 start_ccb
->csio
.data_ptr
= NULL
;
5829 probedone(periph
, start_ccb
);
5832 case PROBE_SERIAL_NUM_1
:
5834 struct scsi_vpd_unit_serial_number
*serial_buf
;
5835 struct cam_ed
* device
;
5838 device
= periph
->path
->device
;
5839 device
->serial_num
= NULL
;
5840 device
->serial_num_len
= 0;
5842 serial_buf
= (struct scsi_vpd_unit_serial_number
*)
5843 kmalloc(sizeof(*serial_buf
), M_CAMXPT
,
5844 M_INTWAIT
| M_ZERO
);
5849 (u_int8_t
*)serial_buf
,
5850 sizeof(*serial_buf
),
5852 SVPD_UNIT_SERIAL_NUMBER
,
5854 /*timeout*/60 * 1000);
5858 xpt_action(start_ccb
);
5862 proberequestdefaultnegotiation(struct cam_periph
*periph
)
5864 struct ccb_trans_settings cts
;
5866 xpt_setup_ccb(&cts
.ccb_h
, periph
->path
, /*priority*/1);
5867 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
5868 cts
.type
= CTS_TYPE_USER_SETTINGS
;
5869 xpt_action((union ccb
*)&cts
);
5870 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5873 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
5874 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5875 xpt_action((union ccb
*)&cts
);
5879 * Backoff Negotiation Code- only pertinent for SPI devices.
5882 proberequestbackoff(struct cam_periph
*periph
, struct cam_ed
*device
)
5884 struct ccb_trans_settings cts
;
5885 struct ccb_trans_settings_spi
*spi
;
5887 memset(&cts
, 0, sizeof (cts
));
5888 xpt_setup_ccb(&cts
.ccb_h
, periph
->path
, /*priority*/1);
5889 cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
5890 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5891 xpt_action((union ccb
*)&cts
);
5892 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
5894 xpt_print(periph
->path
,
5895 "failed to get current device settings\n");
5899 if (cts
.transport
!= XPORT_SPI
) {
5901 xpt_print(periph
->path
, "not SPI transport\n");
5905 spi
= &cts
.xport_specific
.spi
;
5908 * We cannot renegotiate sync rate if we don't have one.
5910 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0) {
5912 xpt_print(periph
->path
, "no sync rate known\n");
5918 * We'll assert that we don't have to touch PPR options- the
5919 * SIM will see what we do with period and offset and adjust
5920 * the PPR options as appropriate.
5924 * A sync rate with unknown or zero offset is nonsensical.
5925 * A sync period of zero means Async.
5927 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0
5928 || spi
->sync_offset
== 0 || spi
->sync_period
== 0) {
5930 xpt_print(periph
->path
, "no sync rate available\n");
5935 if (device
->flags
& CAM_DEV_DV_HIT_BOTTOM
) {
5936 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5937 ("hit async: giving up on DV\n"));
5943 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
5944 * We don't try to remember 'last' settings to see if the SIM actually
5945 * gets into the speed we want to set. We check on the SIM telling
5946 * us that a requested speed is bad, but otherwise don't try and
5947 * check the speed due to the asynchronous and handshake nature
5950 spi
->valid
= CTS_SPI_VALID_SYNC_RATE
| CTS_SPI_VALID_SYNC_OFFSET
;
5953 if (spi
->sync_period
>= 0xf) {
5954 spi
->sync_period
= 0;
5955 spi
->sync_offset
= 0;
5956 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5957 ("setting to async for DV\n"));
5959 * Once we hit async, we don't want to try
5960 * any more settings.
5962 device
->flags
|= CAM_DEV_DV_HIT_BOTTOM
;
5963 } else if (bootverbose
) {
5964 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5965 ("DV: period 0x%x\n", spi
->sync_period
));
5966 kprintf("setting period to 0x%x\n", spi
->sync_period
);
5968 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
5969 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
5970 xpt_action((union ccb
*)&cts
);
5971 if ((cts
.ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
5974 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
5975 ("DV: failed to set period 0x%x\n", spi
->sync_period
));
5976 if (spi
->sync_period
== 0) {
5984 probedone(struct cam_periph
*periph
, union ccb
*done_ccb
)
5987 struct cam_path
*path
;
5990 CAM_DEBUG(done_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("probedone\n"));
5992 softc
= (probe_softc
*)periph
->softc
;
5993 path
= done_ccb
->ccb_h
.path
;
5994 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
5996 switch (softc
->action
) {
5999 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
6001 if (cam_periph_error(done_ccb
, 0,
6002 SF_NO_PRINT
, NULL
) == ERESTART
)
6004 else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0)
6005 /* Don't wedge the queue */
6006 xpt_release_devq(done_ccb
->ccb_h
.path
,
6010 softc
->action
= PROBE_INQUIRY
;
6011 xpt_release_ccb(done_ccb
);
6012 xpt_schedule(periph
, priority
);
6016 case PROBE_FULL_INQUIRY
:
6018 if ((done_ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
6019 struct scsi_inquiry_data
*inq_buf
;
6020 u_int8_t periph_qual
;
6022 path
->device
->flags
|= CAM_DEV_INQUIRY_DATA_VALID
;
6023 inq_buf
= &path
->device
->inq_data
;
6025 periph_qual
= SID_QUAL(inq_buf
);
6027 switch(periph_qual
) {
6028 case SID_QUAL_LU_CONNECTED
:
6033 * We conservatively request only
6034 * SHORT_INQUIRY_LEN bytes of inquiry
6035 * information during our first try
6036 * at sending an INQUIRY. If the device
6037 * has more information to give,
6038 * perform a second request specifying
6039 * the amount of information the device
6040 * is willing to give.
6042 len
= inq_buf
->additional_length
6043 + offsetof(struct scsi_inquiry_data
,
6044 additional_length
) + 1;
6045 if (softc
->action
== PROBE_INQUIRY
6046 && len
> SHORT_INQUIRY_LENGTH
) {
6047 softc
->action
= PROBE_FULL_INQUIRY
;
6048 xpt_release_ccb(done_ccb
);
6049 xpt_schedule(periph
, priority
);
6053 xpt_find_quirk(path
->device
);
6055 xpt_devise_transport(path
);
6056 if (INQ_DATA_TQ_ENABLED(inq_buf
))
6057 softc
->action
= PROBE_MODE_SENSE
;
6059 softc
->action
= PROBE_SERIAL_NUM_0
;
6061 path
->device
->flags
&= ~CAM_DEV_UNCONFIGURED
;
6062 xpt_reference_device(path
->device
);
6064 xpt_release_ccb(done_ccb
);
6065 xpt_schedule(periph
, priority
);
6071 } else if (cam_periph_error(done_ccb
, 0,
6072 done_ccb
->ccb_h
.target_lun
> 0
6073 ? SF_RETRY_UA
|SF_QUIET_IR
6075 &softc
->saved_ccb
) == ERESTART
) {
6077 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6078 /* Don't wedge the queue */
6079 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6083 * If we get to this point, we got an error status back
6084 * from the inquiry and the error status doesn't require
6085 * automatically retrying the command. Therefore, the
6086 * inquiry failed. If we had inquiry information before
6087 * for this device, but this latest inquiry command failed,
6088 * the device has probably gone away. If this device isn't
6089 * already marked unconfigured, notify the peripheral
6090 * drivers that this device is no more.
6092 if ((path
->device
->flags
& CAM_DEV_UNCONFIGURED
) == 0) {
6093 /* Send the async notification. */
6094 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
6097 xpt_release_ccb(done_ccb
);
6100 case PROBE_MODE_SENSE
:
6102 struct ccb_scsiio
*csio
;
6103 struct scsi_mode_header_6
*mode_hdr
;
6105 csio
= &done_ccb
->csio
;
6106 mode_hdr
= (struct scsi_mode_header_6
*)csio
->data_ptr
;
6107 if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
) {
6108 struct scsi_control_page
*page
;
6111 offset
= ((u_int8_t
*)&mode_hdr
[1])
6112 + mode_hdr
->blk_desc_len
;
6113 page
= (struct scsi_control_page
*)offset
;
6114 path
->device
->queue_flags
= page
->queue_flags
;
6115 } else if (cam_periph_error(done_ccb
, 0,
6116 SF_RETRY_UA
|SF_NO_PRINT
,
6117 &softc
->saved_ccb
) == ERESTART
) {
6119 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6120 /* Don't wedge the queue */
6121 xpt_release_devq(done_ccb
->ccb_h
.path
,
6122 /*count*/1, /*run_queue*/TRUE
);
6124 xpt_release_ccb(done_ccb
);
6125 kfree(mode_hdr
, M_CAMXPT
);
6126 softc
->action
= PROBE_SERIAL_NUM_0
;
6127 xpt_schedule(periph
, priority
);
6130 case PROBE_SERIAL_NUM_0
:
6132 struct ccb_scsiio
*csio
;
6133 struct scsi_vpd_supported_page_list
*page_list
;
6134 int length
, serialnum_supported
, i
;
6136 serialnum_supported
= 0;
6137 csio
= &done_ccb
->csio
;
6139 (struct scsi_vpd_supported_page_list
*)csio
->data_ptr
;
6141 if (page_list
== NULL
) {
6143 * Don't process the command as it was never sent
6145 } else if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
6146 && (page_list
->length
> 0)) {
6147 length
= min(page_list
->length
,
6148 SVPD_SUPPORTED_PAGES_SIZE
);
6149 for (i
= 0; i
< length
; i
++) {
6150 if (page_list
->list
[i
] ==
6151 SVPD_UNIT_SERIAL_NUMBER
) {
6152 serialnum_supported
= 1;
6156 } else if (cam_periph_error(done_ccb
, 0,
6157 SF_RETRY_UA
|SF_NO_PRINT
,
6158 &softc
->saved_ccb
) == ERESTART
) {
6160 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6161 /* Don't wedge the queue */
6162 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6166 if (page_list
!= NULL
)
6167 kfree(page_list
, M_DEVBUF
);
6169 if (serialnum_supported
) {
6170 xpt_release_ccb(done_ccb
);
6171 softc
->action
= PROBE_SERIAL_NUM_1
;
6172 xpt_schedule(periph
, priority
);
6175 xpt_release_ccb(done_ccb
);
6176 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
6177 xpt_schedule(periph
, done_ccb
->ccb_h
.pinfo
.priority
);
6181 case PROBE_SERIAL_NUM_1
:
6183 struct ccb_scsiio
*csio
;
6184 struct scsi_vpd_unit_serial_number
*serial_buf
;
6191 csio
= &done_ccb
->csio
;
6192 priority
= done_ccb
->ccb_h
.pinfo
.priority
;
6194 (struct scsi_vpd_unit_serial_number
*)csio
->data_ptr
;
6196 /* Clean up from previous instance of this device */
6197 if (path
->device
->serial_num
!= NULL
) {
6198 kfree(path
->device
->serial_num
, M_CAMXPT
);
6199 path
->device
->serial_num
= NULL
;
6200 path
->device
->serial_num_len
= 0;
6203 if (serial_buf
== NULL
) {
6205 * Don't process the command as it was never sent
6207 } else if ((csio
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
6208 && (serial_buf
->length
> 0)) {
6211 path
->device
->serial_num
=
6212 kmalloc((serial_buf
->length
+ 1),
6213 M_CAMXPT
, M_INTWAIT
);
6214 bcopy(serial_buf
->serial_num
,
6215 path
->device
->serial_num
,
6216 serial_buf
->length
);
6217 path
->device
->serial_num_len
= serial_buf
->length
;
6218 path
->device
->serial_num
[serial_buf
->length
] = '\0';
6219 } else if (cam_periph_error(done_ccb
, 0,
6220 SF_RETRY_UA
|SF_NO_PRINT
,
6221 &softc
->saved_ccb
) == ERESTART
) {
6223 } else if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6224 /* Don't wedge the queue */
6225 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6230 * Let's see if we have seen this device before.
6232 if ((softc
->flags
& PROBE_INQUIRY_CKSUM
) != 0) {
6234 u_int8_t digest
[16];
6239 (unsigned char *)&path
->device
->inq_data
,
6240 sizeof(struct scsi_inquiry_data
));
6243 MD5Update(&context
, serial_buf
->serial_num
,
6244 serial_buf
->length
);
6246 MD5Final(digest
, &context
);
6247 if (bcmp(softc
->digest
, digest
, 16) == 0)
6251 * XXX Do we need to do a TUR in order to ensure
6252 * that the device really hasn't changed???
6255 && ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0))
6256 xpt_async(AC_LOST_DEVICE
, path
, NULL
);
6258 if (serial_buf
!= NULL
)
6259 kfree(serial_buf
, M_CAMXPT
);
6263 * Now that we have all the necessary
6264 * information to safely perform transfer
6265 * negotiations... Controllers don't perform
6266 * any negotiation or tagged queuing until
6267 * after the first XPT_SET_TRAN_SETTINGS ccb is
6268 * received. So, on a new device, just retrieve
6269 * the user settings, and set them as the current
6270 * settings to set the device up.
6272 proberequestdefaultnegotiation(periph
);
6273 xpt_release_ccb(done_ccb
);
6276 * Perform a TUR to allow the controller to
6277 * perform any necessary transfer negotiation.
6279 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
6280 xpt_schedule(periph
, priority
);
6283 xpt_release_ccb(done_ccb
);
6286 case PROBE_TUR_FOR_NEGOTIATION
:
6288 if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6289 /* Don't wedge the queue */
6290 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6294 xpt_reference_device(path
->device
);
6296 * Do Domain Validation for lun 0 on devices that claim
6297 * to support Synchronous Transfer modes.
6299 if (softc
->action
== PROBE_TUR_FOR_NEGOTIATION
6300 && done_ccb
->ccb_h
.target_lun
== 0
6301 && (path
->device
->inq_data
.flags
& SID_Sync
) != 0
6302 && (path
->device
->flags
& CAM_DEV_IN_DV
) == 0) {
6303 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
6304 ("Begin Domain Validation\n"));
6305 path
->device
->flags
|= CAM_DEV_IN_DV
;
6306 xpt_release_ccb(done_ccb
);
6307 softc
->action
= PROBE_INQUIRY_BASIC_DV1
;
6308 xpt_schedule(periph
, priority
);
6311 if (softc
->action
== PROBE_DV_EXIT
) {
6312 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
6313 ("Leave Domain Validation\n"));
6315 path
->device
->flags
&=
6316 ~(CAM_DEV_UNCONFIGURED
|CAM_DEV_IN_DV
|CAM_DEV_DV_HIT_BOTTOM
);
6317 if ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0) {
6318 /* Inform the XPT that a new device has been found */
6319 done_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
6320 xpt_action(done_ccb
);
6321 xpt_async(AC_FOUND_DEVICE
, done_ccb
->ccb_h
.path
,
6324 xpt_release_ccb(done_ccb
);
6326 case PROBE_INQUIRY_BASIC_DV1
:
6327 case PROBE_INQUIRY_BASIC_DV2
:
6329 struct scsi_inquiry_data
*nbuf
;
6330 struct ccb_scsiio
*csio
;
6332 if ((done_ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) != 0) {
6333 /* Don't wedge the queue */
6334 xpt_release_devq(done_ccb
->ccb_h
.path
, /*count*/1,
6337 csio
= &done_ccb
->csio
;
6338 nbuf
= (struct scsi_inquiry_data
*)csio
->data_ptr
;
6339 if (bcmp(nbuf
, &path
->device
->inq_data
, SHORT_INQUIRY_LENGTH
)) {
6341 "inquiry data fails comparison at DV%d step\n",
6342 softc
->action
== PROBE_INQUIRY_BASIC_DV1
? 1 : 2);
6343 if (proberequestbackoff(periph
, path
->device
)) {
6344 path
->device
->flags
&= ~CAM_DEV_IN_DV
;
6345 softc
->action
= PROBE_TUR_FOR_NEGOTIATION
;
6348 softc
->action
= PROBE_DV_EXIT
;
6350 kfree(nbuf
, M_CAMXPT
);
6351 xpt_release_ccb(done_ccb
);
6352 xpt_schedule(periph
, priority
);
6355 kfree(nbuf
, M_CAMXPT
);
6356 if (softc
->action
== PROBE_INQUIRY_BASIC_DV1
) {
6357 softc
->action
= PROBE_INQUIRY_BASIC_DV2
;
6358 xpt_release_ccb(done_ccb
);
6359 xpt_schedule(periph
, priority
);
6362 if (softc
->action
== PROBE_DV_EXIT
) {
6363 CAM_DEBUG(periph
->path
, CAM_DEBUG_INFO
,
6364 ("Leave Domain Validation Successfully\n"));
6366 path
->device
->flags
&=
6367 ~(CAM_DEV_UNCONFIGURED
|CAM_DEV_IN_DV
|CAM_DEV_DV_HIT_BOTTOM
);
6368 if ((softc
->flags
& PROBE_NO_ANNOUNCE
) == 0) {
6369 /* Inform the XPT that a new device has been found */
6370 done_ccb
->ccb_h
.func_code
= XPT_GDEV_TYPE
;
6371 xpt_action(done_ccb
);
6372 xpt_async(AC_FOUND_DEVICE
, done_ccb
->ccb_h
.path
,
6375 xpt_release_ccb(done_ccb
);
6379 done_ccb
= (union ccb
*)TAILQ_FIRST(&softc
->request_ccbs
);
6380 TAILQ_REMOVE(&softc
->request_ccbs
, &done_ccb
->ccb_h
, periph_links
.tqe
);
6381 done_ccb
->ccb_h
.status
= CAM_REQ_CMP
;
6383 if (TAILQ_FIRST(&softc
->request_ccbs
) == NULL
) {
6384 cam_periph_invalidate(periph
);
6385 cam_periph_release(periph
);
6387 probeschedule(periph
);
6392 probecleanup(struct cam_periph
*periph
)
6394 kfree(periph
->softc
, M_CAMXPT
);
6398 xpt_find_quirk(struct cam_ed
*device
)
6402 match
= cam_quirkmatch((caddr_t
)&device
->inq_data
,
6403 (caddr_t
)xpt_quirk_table
,
6404 sizeof(xpt_quirk_table
)/sizeof(*xpt_quirk_table
),
6405 sizeof(*xpt_quirk_table
), scsi_inquiry_match
);
6408 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6410 device
->quirk
= (struct xpt_quirk_entry
*)match
;
6414 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS
)
6419 error
= sysctl_handle_int(oidp
, &bool, 0, req
);
6420 if (error
!= 0 || req
->newptr
== NULL
)
6422 if (bool == 0 || bool == 1) {
6431 xpt_devise_transport(struct cam_path
*path
)
6433 struct ccb_pathinq cpi
;
6434 struct ccb_trans_settings cts
;
6435 struct scsi_inquiry_data
*inq_buf
;
6437 /* Get transport information from the SIM */
6438 xpt_setup_ccb(&cpi
.ccb_h
, path
, /*priority*/1);
6439 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6440 xpt_action((union ccb
*)&cpi
);
6443 if ((path
->device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0)
6444 inq_buf
= &path
->device
->inq_data
;
6445 path
->device
->protocol
= PROTO_SCSI
;
6446 path
->device
->protocol_version
=
6447 inq_buf
!= NULL
? SID_ANSI_REV(inq_buf
) : cpi
.protocol_version
;
6448 path
->device
->transport
= cpi
.transport
;
6449 path
->device
->transport_version
= cpi
.transport_version
;
6452 * Any device not using SPI3 features should
6453 * be considered SPI2 or lower.
6455 if (inq_buf
!= NULL
) {
6456 if (path
->device
->transport
== XPORT_SPI
6457 && (inq_buf
->spi3data
& SID_SPI_MASK
) == 0
6458 && path
->device
->transport_version
> 2)
6459 path
->device
->transport_version
= 2;
6461 struct cam_ed
* otherdev
;
6463 for (otherdev
= TAILQ_FIRST(&path
->target
->ed_entries
);
6465 otherdev
= TAILQ_NEXT(otherdev
, links
)) {
6466 if (otherdev
!= path
->device
)
6470 if (otherdev
!= NULL
) {
6472 * Initially assume the same versioning as
6473 * prior luns for this target.
6475 path
->device
->protocol_version
=
6476 otherdev
->protocol_version
;
6477 path
->device
->transport_version
=
6478 otherdev
->transport_version
;
6480 /* Until we know better, opt for safty */
6481 path
->device
->protocol_version
= 2;
6482 if (path
->device
->transport
== XPORT_SPI
)
6483 path
->device
->transport_version
= 2;
6485 path
->device
->transport_version
= 0;
6491 * For a device compliant with SPC-2 we should be able
6492 * to determine the transport version supported by
6493 * scrutinizing the version descriptors in the
6497 /* Tell the controller what we think */
6498 xpt_setup_ccb(&cts
.ccb_h
, path
, /*priority*/1);
6499 cts
.ccb_h
.func_code
= XPT_SET_TRAN_SETTINGS
;
6500 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
6501 cts
.transport
= path
->device
->transport
;
6502 cts
.transport_version
= path
->device
->transport_version
;
6503 cts
.protocol
= path
->device
->protocol
;
6504 cts
.protocol_version
= path
->device
->protocol_version
;
6505 cts
.proto_specific
.valid
= 0;
6506 cts
.xport_specific
.valid
= 0;
6507 xpt_action((union ccb
*)&cts
);
6511 xpt_set_transfer_settings(struct ccb_trans_settings
*cts
, struct cam_ed
*device
,
6514 struct ccb_pathinq cpi
;
6515 struct ccb_trans_settings cur_cts
;
6516 struct ccb_trans_settings_scsi
*scsi
;
6517 struct ccb_trans_settings_scsi
*cur_scsi
;
6518 struct cam_sim
*sim
;
6519 struct scsi_inquiry_data
*inq_data
;
6521 if (device
== NULL
) {
6522 cts
->ccb_h
.status
= CAM_PATH_INVALID
;
6523 xpt_done((union ccb
*)cts
);
6527 if (cts
->protocol
== PROTO_UNKNOWN
6528 || cts
->protocol
== PROTO_UNSPECIFIED
) {
6529 cts
->protocol
= device
->protocol
;
6530 cts
->protocol_version
= device
->protocol_version
;
6533 if (cts
->protocol_version
== PROTO_VERSION_UNKNOWN
6534 || cts
->protocol_version
== PROTO_VERSION_UNSPECIFIED
)
6535 cts
->protocol_version
= device
->protocol_version
;
6537 if (cts
->protocol
!= device
->protocol
) {
6538 xpt_print(cts
->ccb_h
.path
, "Uninitialized Protocol %x:%x?\n",
6539 cts
->protocol
, device
->protocol
);
6540 cts
->protocol
= device
->protocol
;
6543 if (cts
->protocol_version
> device
->protocol_version
) {
6545 xpt_print(cts
->ccb_h
.path
, "Down reving Protocol "
6546 "Version from %d to %d?\n", cts
->protocol_version
,
6547 device
->protocol_version
);
6549 cts
->protocol_version
= device
->protocol_version
;
6552 if (cts
->transport
== XPORT_UNKNOWN
6553 || cts
->transport
== XPORT_UNSPECIFIED
) {
6554 cts
->transport
= device
->transport
;
6555 cts
->transport_version
= device
->transport_version
;
6558 if (cts
->transport_version
== XPORT_VERSION_UNKNOWN
6559 || cts
->transport_version
== XPORT_VERSION_UNSPECIFIED
)
6560 cts
->transport_version
= device
->transport_version
;
6562 if (cts
->transport
!= device
->transport
) {
6563 xpt_print(cts
->ccb_h
.path
, "Uninitialized Transport %x:%x?\n",
6564 cts
->transport
, device
->transport
);
6565 cts
->transport
= device
->transport
;
6568 if (cts
->transport_version
> device
->transport_version
) {
6570 xpt_print(cts
->ccb_h
.path
, "Down reving Transport "
6571 "Version from %d to %d?\n", cts
->transport_version
,
6572 device
->transport_version
);
6574 cts
->transport_version
= device
->transport_version
;
6577 sim
= cts
->ccb_h
.path
->bus
->sim
;
6580 * Nothing more of interest to do unless
6581 * this is a device connected via the
6584 if (cts
->protocol
!= PROTO_SCSI
) {
6585 if (async_update
== FALSE
)
6586 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6590 inq_data
= &device
->inq_data
;
6591 scsi
= &cts
->proto_specific
.scsi
;
6592 xpt_setup_ccb(&cpi
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6593 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6594 xpt_action((union ccb
*)&cpi
);
6596 /* SCSI specific sanity checking */
6597 if ((cpi
.hba_inquiry
& PI_TAG_ABLE
) == 0
6598 || (INQ_DATA_TQ_ENABLED(inq_data
)) == 0
6599 || (device
->queue_flags
& SCP_QUEUE_DQUE
) != 0
6600 || (device
->quirk
->mintags
== 0)) {
6602 * Can't tag on hardware that doesn't support tags,
6603 * doesn't have it enabled, or has broken tag support.
6605 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6608 if (async_update
== FALSE
) {
6610 * Perform sanity checking against what the
6611 * controller and device can do.
6613 xpt_setup_ccb(&cur_cts
.ccb_h
, cts
->ccb_h
.path
, /*priority*/1);
6614 cur_cts
.ccb_h
.func_code
= XPT_GET_TRAN_SETTINGS
;
6615 cur_cts
.type
= cts
->type
;
6616 xpt_action((union ccb
*)&cur_cts
);
6617 if ((cur_cts
.ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
6620 cur_scsi
= &cur_cts
.proto_specific
.scsi
;
6621 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0) {
6622 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6623 scsi
->flags
|= cur_scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
;
6625 if ((cur_scsi
->valid
& CTS_SCSI_VALID_TQ
) == 0)
6626 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6629 /* SPI specific sanity checking */
6630 if (cts
->transport
== XPORT_SPI
&& async_update
== FALSE
) {
6632 struct ccb_trans_settings_spi
*spi
;
6633 struct ccb_trans_settings_spi
*cur_spi
;
6635 spi
= &cts
->xport_specific
.spi
;
6637 cur_spi
= &cur_cts
.xport_specific
.spi
;
6639 /* Fill in any gaps in what the user gave us */
6640 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6641 spi
->sync_period
= cur_spi
->sync_period
;
6642 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0)
6643 spi
->sync_period
= 0;
6644 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6645 spi
->sync_offset
= cur_spi
->sync_offset
;
6646 if ((cur_spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0)
6647 spi
->sync_offset
= 0;
6648 if ((spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6649 spi
->ppr_options
= cur_spi
->ppr_options
;
6650 if ((cur_spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0)
6651 spi
->ppr_options
= 0;
6652 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6653 spi
->bus_width
= cur_spi
->bus_width
;
6654 if ((cur_spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) == 0)
6656 if ((spi
->valid
& CTS_SPI_VALID_DISC
) == 0) {
6657 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6658 spi
->flags
|= cur_spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
;
6660 if ((cur_spi
->valid
& CTS_SPI_VALID_DISC
) == 0)
6661 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
6662 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6663 && (inq_data
->flags
& SID_Sync
) == 0
6664 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6665 || ((cpi
.hba_inquiry
& PI_SDTR_ABLE
) == 0)
6666 || (spi
->sync_offset
== 0)
6667 || (spi
->sync_period
== 0)) {
6669 spi
->sync_period
= 0;
6670 spi
->sync_offset
= 0;
6673 switch (spi
->bus_width
) {
6674 case MSG_EXT_WDTR_BUS_32_BIT
:
6675 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6676 || (inq_data
->flags
& SID_WBus32
) != 0
6677 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6678 && (cpi
.hba_inquiry
& PI_WIDE_32
) != 0)
6680 /* Fall Through to 16-bit */
6681 case MSG_EXT_WDTR_BUS_16_BIT
:
6682 if (((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) == 0
6683 || (inq_data
->flags
& SID_WBus16
) != 0
6684 || cts
->type
== CTS_TYPE_USER_SETTINGS
)
6685 && (cpi
.hba_inquiry
& PI_WIDE_16
) != 0) {
6686 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
6689 /* Fall Through to 8-bit */
6690 default: /* New bus width?? */
6691 case MSG_EXT_WDTR_BUS_8_BIT
:
6692 /* All targets can do this */
6693 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
6697 spi3caps
= cpi
.xport_specific
.spi
.ppr_options
;
6698 if ((device
->flags
& CAM_DEV_INQUIRY_DATA_VALID
) != 0
6699 && cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
6700 spi3caps
&= inq_data
->spi3data
;
6702 if ((spi3caps
& SID_SPI_CLOCK_DT
) == 0)
6703 spi
->ppr_options
&= ~MSG_EXT_PPR_DT_REQ
;
6705 if ((spi3caps
& SID_SPI_IUS
) == 0)
6706 spi
->ppr_options
&= ~MSG_EXT_PPR_IU_REQ
;
6708 if ((spi3caps
& SID_SPI_QAS
) == 0)
6709 spi
->ppr_options
&= ~MSG_EXT_PPR_QAS_REQ
;
6711 /* No SPI Transfer settings are allowed unless we are wide */
6712 if (spi
->bus_width
== 0)
6713 spi
->ppr_options
= 0;
6715 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) == 0) {
6717 * Can't tag queue without disconnection.
6719 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
6720 scsi
->valid
|= CTS_SCSI_VALID_TQ
;
6724 * If we are currently performing tagged transactions to
6725 * this device and want to change its negotiation parameters,
6726 * go non-tagged for a bit to give the controller a chance to
6727 * negotiate unhampered by tag messages.
6729 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6730 && (device
->inq_flags
& SID_CmdQue
) != 0
6731 && (scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6732 && (spi
->flags
& (CTS_SPI_VALID_SYNC_RATE
|
6733 CTS_SPI_VALID_SYNC_OFFSET
|
6734 CTS_SPI_VALID_BUS_WIDTH
)) != 0)
6735 xpt_toggle_tags(cts
->ccb_h
.path
);
6738 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
6739 && (scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
6743 * If we are transitioning from tags to no-tags or
6744 * vice-versa, we need to carefully freeze and restart
6745 * the queue so that we don't overlap tagged and non-tagged
6746 * commands. We also temporarily stop tags if there is
6747 * a change in transfer negotiation settings to allow
6748 * "tag-less" negotiation.
6750 if ((device
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6751 || (device
->inq_flags
& SID_CmdQue
) != 0)
6752 device_tagenb
= TRUE
;
6754 device_tagenb
= FALSE
;
6756 if (((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0
6757 && device_tagenb
== FALSE
)
6758 || ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) == 0
6759 && device_tagenb
== TRUE
)) {
6761 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0) {
6763 * Delay change to use tags until after a
6764 * few commands have gone to this device so
6765 * the controller has time to perform transfer
6766 * negotiations without tagged messages getting
6769 device
->tag_delay_count
= CAM_TAG_DELAY_COUNT
;
6770 device
->flags
|= CAM_DEV_TAG_AFTER_COUNT
;
6772 struct ccb_relsim crs
;
6774 xpt_freeze_devq(cts
->ccb_h
.path
, /*count*/1);
6775 device
->inq_flags
&= ~SID_CmdQue
;
6776 xpt_dev_ccbq_resize(cts
->ccb_h
.path
,
6777 sim
->max_dev_openings
);
6778 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6779 device
->tag_delay_count
= 0;
6781 xpt_setup_ccb(&crs
.ccb_h
, cts
->ccb_h
.path
,
6783 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6784 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6786 = crs
.release_timeout
6789 xpt_action((union ccb
*)&crs
);
6793 if (async_update
== FALSE
)
6794 (*(sim
->sim_action
))(sim
, (union ccb
*)cts
);
6798 xpt_toggle_tags(struct cam_path
*path
)
6803 * Give controllers a chance to renegotiate
6804 * before starting tag operations. We
6805 * "toggle" tagged queuing off then on
6806 * which causes the tag enable command delay
6807 * counter to come into effect.
6810 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
6811 || ((dev
->inq_flags
& SID_CmdQue
) != 0
6812 && (dev
->inq_flags
& (SID_Sync
|SID_WBus16
|SID_WBus32
)) != 0)) {
6813 struct ccb_trans_settings cts
;
6815 xpt_setup_ccb(&cts
.ccb_h
, path
, 1);
6816 cts
.protocol
= PROTO_SCSI
;
6817 cts
.protocol_version
= PROTO_VERSION_UNSPECIFIED
;
6818 cts
.transport
= XPORT_UNSPECIFIED
;
6819 cts
.transport_version
= XPORT_VERSION_UNSPECIFIED
;
6820 cts
.proto_specific
.scsi
.flags
= 0;
6821 cts
.proto_specific
.scsi
.valid
= CTS_SCSI_VALID_TQ
;
6822 xpt_set_transfer_settings(&cts
, path
->device
,
6823 /*async_update*/TRUE
);
6824 cts
.proto_specific
.scsi
.flags
= CTS_SCSI_FLAGS_TAG_ENB
;
6825 xpt_set_transfer_settings(&cts
, path
->device
,
6826 /*async_update*/TRUE
);
6831 xpt_start_tags(struct cam_path
*path
)
6833 struct ccb_relsim crs
;
6834 struct cam_ed
*device
;
6835 struct cam_sim
*sim
;
6838 device
= path
->device
;
6839 sim
= path
->bus
->sim
;
6840 device
->flags
&= ~CAM_DEV_TAG_AFTER_COUNT
;
6841 xpt_freeze_devq(path
, /*count*/1);
6842 device
->inq_flags
|= SID_CmdQue
;
6843 if (device
->tag_saved_openings
!= 0)
6844 newopenings
= device
->tag_saved_openings
;
6846 newopenings
= min(device
->quirk
->maxtags
,
6847 sim
->max_tagged_dev_openings
);
6848 xpt_dev_ccbq_resize(path
, newopenings
);
6849 xpt_setup_ccb(&crs
.ccb_h
, path
, /*priority*/1);
6850 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
6851 crs
.release_flags
= RELSIM_RELEASE_AFTER_QEMPTY
;
6853 = crs
.release_timeout
6856 xpt_action((union ccb
*)&crs
);
6859 static int busses_to_config
;
6860 static int busses_to_reset
;
6863 xptconfigbuscountfunc(struct cam_eb
*bus
, void *arg
)
6865 sim_lock_assert_owned(bus
->sim
->lock
);
6867 if (bus
->counted_to_config
== 0 && bus
->path_id
!= CAM_XPT_PATH_ID
) {
6868 struct cam_path path
;
6869 struct ccb_pathinq cpi
;
6873 kprintf("CAM: Configuring bus:");
6877 bus
->sim
->unit_number
);
6879 kprintf(" (unknown)\n");
6883 bus
->counted_to_config
= 1;
6884 xpt_compile_path(&path
, NULL
, bus
->path_id
,
6885 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
6886 xpt_setup_ccb(&cpi
.ccb_h
, &path
, /*priority*/1);
6887 cpi
.ccb_h
.func_code
= XPT_PATH_INQ
;
6888 xpt_action((union ccb
*)&cpi
);
6889 can_negotiate
= cpi
.hba_inquiry
;
6890 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6891 if ((cpi
.hba_misc
& PIM_NOBUSRESET
) == 0 && can_negotiate
)
6893 xpt_release_path(&path
);
6900 xptconfigfunc(struct cam_eb
*bus
, void *arg
)
6902 struct cam_path
*path
;
6903 union ccb
*work_ccb
;
6905 sim_lock_assert_owned(bus
->sim
->lock
);
6907 if (bus
->path_id
!= CAM_XPT_PATH_ID
) {
6911 work_ccb
= xpt_alloc_ccb();
6912 if ((status
= xpt_create_path(&path
, xpt_periph
, bus
->path_id
,
6913 CAM_TARGET_WILDCARD
,
6914 CAM_LUN_WILDCARD
)) !=CAM_REQ_CMP
){
6915 kprintf("xptconfigfunc: xpt_create_path failed with "
6916 "status %#x for bus %d\n", status
, bus
->path_id
);
6917 kprintf("xptconfigfunc: halting bus configuration\n");
6918 xpt_free_ccb(work_ccb
);
6919 if (bus
->counted_to_config
) {
6920 bus
->counted_to_config
= 0;
6923 xpt_finishconfig(xpt_periph
, NULL
);
6926 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6927 work_ccb
->ccb_h
.func_code
= XPT_PATH_INQ
;
6928 xpt_action(work_ccb
);
6929 if (work_ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
6930 kprintf("xptconfigfunc: CPI failed on bus %d "
6931 "with status %d\n", bus
->path_id
,
6932 work_ccb
->ccb_h
.status
);
6933 xpt_finishconfig(xpt_periph
, work_ccb
);
6937 can_negotiate
= work_ccb
->cpi
.hba_inquiry
;
6938 can_negotiate
&= (PI_WIDE_32
|PI_WIDE_16
|PI_SDTR_ABLE
);
6939 if ((work_ccb
->cpi
.hba_misc
& PIM_NOBUSRESET
) == 0
6940 && (can_negotiate
!= 0)) {
6941 xpt_setup_ccb(&work_ccb
->ccb_h
, path
, /*priority*/1);
6942 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6943 work_ccb
->ccb_h
.cbfcnp
= NULL
;
6944 CAM_DEBUG(path
, CAM_DEBUG_SUBTRACE
,
6945 ("Resetting Bus\n"));
6946 xpt_action(work_ccb
);
6947 xpt_finishconfig(xpt_periph
, work_ccb
);
6949 /* Act as though we performed a successful BUS RESET */
6950 work_ccb
->ccb_h
.func_code
= XPT_RESET_BUS
;
6951 xpt_finishconfig(xpt_periph
, work_ccb
);
6959 * Now that interrupts are enabled, go find our devices.
6961 * This hook function is called once by run_interrupt_driven_config_hooks().
6962 * XPT is expected to disestablish its hook when done.
6965 xpt_config(void *arg
)
6969 /* Setup debugging flags and path */
6970 #ifdef CAM_DEBUG_FLAGS
6971 cam_dflags
= CAM_DEBUG_FLAGS
;
6972 #else /* !CAM_DEBUG_FLAGS */
6973 cam_dflags
= CAM_DEBUG_NONE
;
6974 #endif /* CAM_DEBUG_FLAGS */
6975 #ifdef CAM_DEBUG_BUS
6976 if (cam_dflags
!= CAM_DEBUG_NONE
) {
6978 * Locking is specifically omitted here. No SIMs have
6979 * registered yet, so xpt_create_path will only be searching
6980 * empty lists of targets and devices.
6982 if (xpt_create_path(&cam_dpath
, xpt_periph
,
6983 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
,
6984 CAM_DEBUG_LUN
) != CAM_REQ_CMP
) {
6985 kprintf("xpt_config: xpt_create_path() failed for debug"
6986 " target %d:%d:%d, debugging disabled\n",
6987 CAM_DEBUG_BUS
, CAM_DEBUG_TARGET
, CAM_DEBUG_LUN
);
6988 cam_dflags
= CAM_DEBUG_NONE
;
6993 #else /* !CAM_DEBUG_BUS */
6995 #endif /* CAM_DEBUG_BUS */
6996 #endif /* CAMDEBUG */
6999 * Scan all installed busses.
7001 xpt_for_all_busses(xptconfigbuscountfunc
, NULL
);
7003 kprintf("CAM: Configuring %d busses\n", busses_to_config
);
7005 if (busses_to_config
== 0) {
7006 /* Call manually because we don't have any busses */
7007 xpt_finishconfig(xpt_periph
, NULL
);
7009 if (busses_to_reset
> 0 && scsi_delay
>= 2000) {
7010 kprintf("Waiting %d seconds for SCSI "
7011 "devices to settle\n", scsi_delay
/1000);
7013 xpt_for_all_busses(xptconfigfunc
, NULL
);
7018 * If the given device only has one peripheral attached to it, and if that
7019 * peripheral is the passthrough driver, announce it. This insures that the
7020 * user sees some sort of announcement for every peripheral in their system.
7023 xptpassannouncefunc(struct cam_ed
*device
, void *arg
)
7025 struct cam_periph
*periph
;
7028 for (periph
= SLIST_FIRST(&device
->periphs
), i
= 0; periph
!= NULL
;
7029 periph
= SLIST_NEXT(periph
, periph_links
), i
++);
7031 periph
= SLIST_FIRST(&device
->periphs
);
7033 && (strncmp(periph
->periph_name
, "pass", 4) == 0))
7034 xpt_announce_periph(periph
, NULL
);
7040 xpt_finishconfig_task(void *context
, int pending
)
7042 struct periph_driver
**p_drv
;
7045 kprintf("CAM: finished configuring all busses (%d left)\n",
7048 if (busses_to_config
== 0) {
7049 /* Register all the peripheral drivers */
7050 /* XXX This will have to change when we have loadable modules */
7051 p_drv
= periph_drivers
;
7052 for (i
= 0; p_drv
[i
] != NULL
; i
++) {
7053 (*p_drv
[i
]->init
)();
7057 * Check for devices with no "standard" peripheral driver
7058 * attached. For any devices like that, announce the
7059 * passthrough driver so the user will see something.
7061 xpt_for_all_devices(xptpassannouncefunc
, NULL
);
7063 /* Release our hook so that the boot can continue. */
7064 config_intrhook_disestablish(xsoftc
.xpt_config_hook
);
7065 kfree(xsoftc
.xpt_config_hook
, M_CAMXPT
);
7066 xsoftc
.xpt_config_hook
= NULL
;
7069 kfree(context
, M_CAMXPT
);
7073 xpt_finishconfig(struct cam_periph
*periph
, union ccb
*done_ccb
)
7075 struct xpt_task
*task
;
7076 struct cam_path
*path
;
7078 if (done_ccb
!= NULL
) {
7079 path
= done_ccb
->ccb_h
.path
;
7080 CAM_DEBUG(path
, CAM_DEBUG_TRACE
, ("xpt_finishconfig\n"));
7082 switch(done_ccb
->ccb_h
.func_code
) {
7084 if (done_ccb
->ccb_h
.status
== CAM_REQ_CMP
) {
7085 done_ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
7086 done_ccb
->ccb_h
.cbfcnp
= xpt_finishconfig
;
7087 done_ccb
->crcn
.flags
= 0;
7088 xpt_action(done_ccb
);
7095 kprintf("CAM: Finished configuring bus:");
7096 if (path
->bus
->sim
) {
7098 path
->bus
->sim
->sim_name
,
7099 path
->bus
->sim
->unit_number
);
7101 kprintf(" (unknown)\n");
7104 if (path
->bus
->counted_to_config
) {
7105 path
->bus
->counted_to_config
= 0;
7108 xpt_free_path(path
);
7113 if (busses_to_config
== 0) {
7114 task
= kmalloc(sizeof(struct xpt_task
), M_CAMXPT
,
7115 M_INTWAIT
| M_ZERO
);
7116 TASK_INIT(&task
->task
, 0, xpt_finishconfig_task
, task
);
7117 taskqueue_enqueue(taskqueue_thread
[mycpuid
], &task
->task
);
7120 if (done_ccb
!= NULL
)
7121 xpt_free_ccb(done_ccb
);
7125 xpt_register_async(int event
, ac_callback_t
*cbfunc
, void *cbarg
,
7126 struct cam_path
*path
)
7128 struct ccb_setasync csa
;
7133 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
7134 status
= xpt_create_path(&path
, /*periph*/NULL
, CAM_XPT_PATH_ID
,
7135 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
);
7136 if (status
!= CAM_REQ_CMP
) {
7137 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7143 xpt_setup_ccb(&csa
.ccb_h
, path
, /*priority*/5);
7144 csa
.ccb_h
.func_code
= XPT_SASYNC_CB
;
7145 csa
.event_enable
= event
;
7146 csa
.callback
= cbfunc
;
7147 csa
.callback_arg
= cbarg
;
7148 xpt_action((union ccb
*)&csa
);
7149 status
= csa
.ccb_h
.status
;
7151 xpt_free_path(path
);
7152 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7158 xptaction(struct cam_sim
*sim
, union ccb
*work_ccb
)
7160 CAM_DEBUG(work_ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("xptaction\n"));
7162 switch (work_ccb
->ccb_h
.func_code
) {
7163 /* Common cases first */
7164 case XPT_PATH_INQ
: /* Path routing inquiry */
7166 struct ccb_pathinq
*cpi
;
7168 cpi
= &work_ccb
->cpi
;
7169 cpi
->version_num
= 1; /* XXX??? */
7170 cpi
->hba_inquiry
= 0;
7171 cpi
->target_sprt
= 0;
7173 cpi
->hba_eng_cnt
= 0;
7174 cpi
->max_target
= 0;
7176 cpi
->initiator_id
= 0;
7177 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
7178 strncpy(cpi
->hba_vid
, "", HBA_IDLEN
);
7179 strncpy(cpi
->dev_name
, sim
->sim_name
, DEV_IDLEN
);
7180 cpi
->unit_number
= sim
->unit_number
;
7181 cpi
->bus_id
= sim
->bus_id
;
7182 cpi
->base_transfer_speed
= 0;
7183 cpi
->protocol
= PROTO_UNSPECIFIED
;
7184 cpi
->protocol_version
= PROTO_VERSION_UNSPECIFIED
;
7185 cpi
->transport
= XPORT_UNSPECIFIED
;
7186 cpi
->transport_version
= XPORT_VERSION_UNSPECIFIED
;
7187 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
7192 work_ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
7199 * The xpt as a "controller" has no interrupt sources, so polling
7203 xptpoll(struct cam_sim
*sim
)
7208 xpt_lock_buses(void)
7210 lockmgr(&xsoftc
.xpt_topo_lock
, LK_EXCLUSIVE
);
7214 xpt_unlock_buses(void)
7216 lockmgr(&xsoftc
.xpt_topo_lock
, LK_RELEASE
);
7221 * Should only be called by the machine interrupt dispatch routines,
7222 * so put these prototypes here instead of in the header.
7226 swi_cambio(void *arg
, void *frame
)
7235 struct cam_sim
*sim
;
7237 spin_lock_wr(&cam_simq_spin
);
7239 TAILQ_CONCAT(&queue
, &cam_simq
, links
);
7240 spin_unlock_wr(&cam_simq_spin
);
7242 while ((sim
= TAILQ_FIRST(&queue
)) != NULL
) {
7243 TAILQ_REMOVE(&queue
, sim
, links
);
7245 sim
->flags
&= ~CAM_SIM_ON_DONEQ
;
7246 camisr_runqueue(sim
);
7247 CAM_SIM_UNLOCK(sim
);
7252 camisr_runqueue(struct cam_sim
*sim
)
7254 struct ccb_hdr
*ccb_h
;
7257 spin_lock_wr(&sim
->sim_spin
);
7258 while ((ccb_h
= TAILQ_FIRST(&sim
->sim_doneq
)) != NULL
) {
7259 TAILQ_REMOVE(&sim
->sim_doneq
, ccb_h
, sim_links
.tqe
);
7260 spin_unlock_wr(&sim
->sim_spin
);
7261 ccb_h
->pinfo
.index
= CAM_UNQUEUED_INDEX
;
7263 CAM_DEBUG(ccb_h
->path
, CAM_DEBUG_TRACE
,
7268 if (ccb_h
->flags
& CAM_HIGH_POWER
) {
7269 struct highpowerlist
*hphead
;
7270 struct cam_ed
*device
;
7271 union ccb
*send_ccb
;
7273 lockmgr(&xsoftc
.xpt_lock
, LK_EXCLUSIVE
);
7274 hphead
= &xsoftc
.highpowerq
;
7276 send_ccb
= (union ccb
*)STAILQ_FIRST(hphead
);
7279 * Increment the count since this command is done.
7281 xsoftc
.num_highpower
++;
7284 * Any high powered commands queued up?
7286 if (send_ccb
!= NULL
) {
7287 device
= send_ccb
->ccb_h
.path
->device
;
7289 STAILQ_REMOVE_HEAD(hphead
, xpt_links
.stqe
);
7290 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7292 xpt_release_devq(send_ccb
->ccb_h
.path
,
7293 /*count*/1, /*runqueue*/TRUE
);
7295 lockmgr(&xsoftc
.xpt_lock
, LK_RELEASE
);
7298 if ((ccb_h
->func_code
& XPT_FC_USER_CCB
) == 0) {
7301 dev
= ccb_h
->path
->device
;
7303 cam_ccbq_ccb_done(&dev
->ccbq
, (union ccb
*)ccb_h
);
7306 * devq may be NULL if this is cam_dead_sim
7308 if (ccb_h
->path
->bus
->sim
->devq
) {
7309 ccb_h
->path
->bus
->sim
->devq
->send_active
--;
7310 ccb_h
->path
->bus
->sim
->devq
->send_openings
++;
7313 if (((dev
->flags
& CAM_DEV_REL_ON_COMPLETE
) != 0
7314 && (ccb_h
->status
&CAM_STATUS_MASK
) != CAM_REQUEUE_REQ
)
7315 || ((dev
->flags
& CAM_DEV_REL_ON_QUEUE_EMPTY
) != 0
7316 && (dev
->ccbq
.dev_active
== 0))) {
7318 xpt_release_devq(ccb_h
->path
, /*count*/1,
7322 if ((dev
->flags
& CAM_DEV_TAG_AFTER_COUNT
) != 0
7323 && (--dev
->tag_delay_count
== 0))
7324 xpt_start_tags(ccb_h
->path
);
7326 if ((dev
->ccbq
.queue
.entries
> 0)
7327 && (dev
->qfrozen_cnt
== 0)
7328 && (device_is_send_queued(dev
) == 0)) {
7329 runq
= xpt_schedule_dev_sendq(ccb_h
->path
->bus
,
7334 if (ccb_h
->status
& CAM_RELEASE_SIMQ
) {
7335 xpt_release_simq(ccb_h
->path
->bus
->sim
,
7337 ccb_h
->status
&= ~CAM_RELEASE_SIMQ
;
7341 if ((ccb_h
->flags
& CAM_DEV_QFRZDIS
)
7342 && (ccb_h
->status
& CAM_DEV_QFRZN
)) {
7343 xpt_release_devq(ccb_h
->path
, /*count*/1,
7345 ccb_h
->status
&= ~CAM_DEV_QFRZN
;
7347 xpt_run_dev_sendq(ccb_h
->path
->bus
);
7350 /* Call the peripheral driver's callback */
7351 (*ccb_h
->cbfcnp
)(ccb_h
->path
->periph
, (union ccb
*)ccb_h
);
7352 spin_lock_wr(&sim
->sim_spin
);
7354 spin_unlock_wr(&sim
->sim_spin
);
7358 * The dead_sim isn't completely hooked into CAM, we have to make sure
7359 * the doneq is cleared after calling xpt_done() so cam_periph_ccbwait()
7363 dead_sim_action(struct cam_sim
*sim
, union ccb
*ccb
)
7366 ccb
->ccb_h
.status
= CAM_DEV_NOT_THERE
;
7368 camisr_runqueue(sim
);
7372 dead_sim_poll(struct cam_sim
*sim
)