1 /* $FreeBSD: src/sys/dev/isp/isp_freebsd.c,v 1.32.2.20 2002/10/11 18:49:25 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_freebsd.c,v 1.20 2008/02/10 00:01:02 pavalos Exp $ */
4 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
32 #include <sys/device.h>
33 #include <sys/ioccom.h>
34 #include <machine/stdarg.h> /* for use by isp_prt below */
36 #include "isp_ioctl.h"
37 #include "isp_freebsd.h"
39 static d_ioctl_t ispioctl
;
40 static void isp_intr_enable(void *);
41 static void isp_cam_async(void *, u_int32_t
, struct cam_path
*, void *);
42 static void isp_poll(struct cam_sim
*);
43 static timeout_t isp_watchdog
;
44 static void isp_kthread(void *);
45 static void isp_action(struct cam_sim
*, union ccb
*);
48 #define ISP_CDEV_MAJOR 248
49 static struct dev_ops isp_ops
= {
50 { "isp", ISP_CDEV_MAJOR
, D_TAPE
},
56 static struct ispsoftc
*isplist
= NULL
;
59 isp_attach(struct ispsoftc
*isp
)
61 int primary
, secondary
;
62 struct ccb_setasync csa
;
63 struct cam_devq
*devq
;
65 struct cam_path
*path
;
68 * Establish (in case of 12X0) which bus is the primary.
75 * Create the device queue for our SIM(s).
77 devq
= cam_simq_alloc(isp
->isp_maxcmds
);
83 * Construct our SIM entry.
85 ISPLOCK_2_CAMLOCK(isp
);
86 sim
= cam_sim_alloc(isp_action
, isp_poll
, "isp", isp
,
87 device_get_unit(isp
->isp_dev
), 1, isp
->isp_maxcmds
, devq
);
88 cam_simq_release(devq
); /* leaves 1 ref due to cam_sim_alloc */
90 CAMLOCK_2_ISPLOCK(isp
);
93 CAMLOCK_2_ISPLOCK(isp
);
95 isp
->isp_osinfo
.ehook
.ich_func
= isp_intr_enable
;
96 isp
->isp_osinfo
.ehook
.ich_arg
= isp
;
97 isp
->isp_osinfo
.ehook
.ich_desc
= "isp";
98 ISPLOCK_2_CAMLOCK(isp
);
99 if (config_intrhook_establish(&isp
->isp_osinfo
.ehook
) != 0) {
101 CAMLOCK_2_ISPLOCK(isp
);
102 isp_prt(isp
, ISP_LOGERR
,
103 "could not establish interrupt enable hook");
107 if (xpt_bus_register(sim
, primary
) != CAM_SUCCESS
) {
109 CAMLOCK_2_ISPLOCK(isp
);
113 if (xpt_create_path(&path
, NULL
, cam_sim_path(sim
),
114 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
115 xpt_bus_deregister(cam_sim_path(sim
));
117 config_intrhook_disestablish(&isp
->isp_osinfo
.ehook
);
118 CAMLOCK_2_ISPLOCK(isp
);
122 xpt_setup_ccb(&csa
.ccb_h
, path
, 5);
123 csa
.ccb_h
.func_code
= XPT_SASYNC_CB
;
124 csa
.event_enable
= AC_LOST_DEVICE
;
125 csa
.callback
= isp_cam_async
;
126 csa
.callback_arg
= sim
;
127 xpt_action((union ccb
*)&csa
);
128 CAMLOCK_2_ISPLOCK(isp
);
130 isp
->isp_path
= path
;
132 * Create a kernel thread for fibre channel instances. We
133 * don't have dual channel FC cards.
136 ISPLOCK_2_CAMLOCK(isp
);
137 if (kthread_create(isp_kthread
, isp
, &isp
->isp_osinfo
.kthread
,
138 "%s: fc_thrd", device_get_nameunit(isp
->isp_dev
))) {
139 xpt_bus_deregister(cam_sim_path(sim
));
141 config_intrhook_disestablish(&isp
->isp_osinfo
.ehook
);
142 CAMLOCK_2_ISPLOCK(isp
);
143 isp_prt(isp
, ISP_LOGERR
, "could not create kthread");
146 CAMLOCK_2_ISPLOCK(isp
);
151 * If we have a second channel, construct SIM entry for that.
153 if (IS_DUALBUS(isp
)) {
154 ISPLOCK_2_CAMLOCK(isp
);
155 sim
= cam_sim_alloc(isp_action
, isp_poll
, "isp", isp
,
156 device_get_unit(isp
->isp_dev
), 1, isp
->isp_maxcmds
, devq
);
158 xpt_bus_deregister(cam_sim_path(isp
->isp_sim
));
159 xpt_free_path(isp
->isp_path
);
160 config_intrhook_disestablish(&isp
->isp_osinfo
.ehook
);
163 if (xpt_bus_register(sim
, secondary
) != CAM_SUCCESS
) {
164 xpt_bus_deregister(cam_sim_path(isp
->isp_sim
));
165 xpt_free_path(isp
->isp_path
);
167 config_intrhook_disestablish(&isp
->isp_osinfo
.ehook
);
168 CAMLOCK_2_ISPLOCK(isp
);
172 if (xpt_create_path(&path
, NULL
, cam_sim_path(sim
),
173 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
174 xpt_bus_deregister(cam_sim_path(isp
->isp_sim
));
175 xpt_free_path(isp
->isp_path
);
176 xpt_bus_deregister(cam_sim_path(sim
));
178 config_intrhook_disestablish(&isp
->isp_osinfo
.ehook
);
179 CAMLOCK_2_ISPLOCK(isp
);
183 xpt_setup_ccb(&csa
.ccb_h
, path
, 5);
184 csa
.ccb_h
.func_code
= XPT_SASYNC_CB
;
185 csa
.event_enable
= AC_LOST_DEVICE
;
186 csa
.callback
= isp_cam_async
;
187 csa
.callback_arg
= sim
;
188 xpt_action((union ccb
*)&csa
);
189 CAMLOCK_2_ISPLOCK(isp
);
191 isp
->isp_path2
= path
;
194 * Create device nodes
196 dev_ops_add(&isp_ops
, -1, device_get_unit(isp
->isp_dev
));
197 make_dev(&isp_ops
, device_get_unit(isp
->isp_dev
), UID_ROOT
,
198 GID_OPERATOR
, 0600, "%s", device_get_nameunit(isp
->isp_dev
));
200 if (isp
->isp_role
!= ISP_ROLE_NONE
) {
201 isp
->isp_state
= ISP_RUNSTATE
;
203 if (isplist
== NULL
) {
206 struct ispsoftc
*tmp
= isplist
;
207 while (tmp
->isp_osinfo
.next
) {
208 tmp
= tmp
->isp_osinfo
.next
;
210 tmp
->isp_osinfo
.next
= isp
;
216 isp_freeze_loopdown(struct ispsoftc
*isp
, char *msg
)
218 if (isp
->isp_osinfo
.simqfrozen
== 0) {
219 isp_prt(isp
, ISP_LOGDEBUG0
, "%s: freeze simq (loopdown)", msg
);
220 isp
->isp_osinfo
.simqfrozen
|= SIMQFRZ_LOOPDOWN
;
221 ISPLOCK_2_CAMLOCK(isp
);
222 xpt_freeze_simq(isp
->isp_sim
, 1);
223 CAMLOCK_2_ISPLOCK(isp
);
225 isp_prt(isp
, ISP_LOGDEBUG0
, "%s: mark frozen (loopdown)", msg
);
226 isp
->isp_osinfo
.simqfrozen
|= SIMQFRZ_LOOPDOWN
;
231 ispioctl(struct dev_ioctl_args
*ap
)
233 cdev_t dev
= ap
->a_head
.a_dev
;
234 struct ispsoftc
*isp
;
239 if (minor(dev
) == device_get_unit(isp
->isp_dev
)) {
242 isp
= isp
->isp_osinfo
.next
;
248 #ifdef ISP_FW_CRASH_DUMP
249 case ISP_GET_FW_CRASH_DUMP
:
251 u_int16_t
*ptr
= FCPARAM(isp
)->isp_dump_data
;
256 sz
= QLA2200_RISC_IMAGE_DUMP_SIZE
;
258 sz
= QLA2300_RISC_IMAGE_DUMP_SIZE
;
261 void *uaddr
= *((void **) addr
);
262 if (copyout(ptr
, uaddr
, sz
)) {
274 case ISP_FORCE_CRASH_DUMP
:
276 isp_freeze_loopdown(isp
, "ispioctl(ISP_FORCE_CRASH_DUMP)");
285 int olddblev
= isp
->isp_dblev
;
286 isp
->isp_dblev
= *(int *)ap
->a_data
;
287 *(int *)ap
->a_data
= olddblev
;
300 if (isp_fc_runstate(isp
, 5 * 1000000)) {
311 if (isp_control(isp
, ISPCTL_SEND_LIP
, 0)) {
319 case ISP_FC_GETDINFO
:
321 struct isp_fc_device
*ifc
= (struct isp_fc_device
*) ap
->a_data
;
324 if (ifc
->loopid
< 0 || ifc
->loopid
>= MAX_FC_TARG
) {
329 lp
= &FCPARAM(isp
)->portdb
[ifc
->loopid
];
331 ifc
->loopid
= lp
->loopid
;
332 ifc
->portid
= lp
->portid
;
333 ifc
->node_wwn
= lp
->node_wwn
;
334 ifc
->port_wwn
= lp
->port_wwn
;
344 isp_stats_t
*sp
= (isp_stats_t
*) ap
->a_data
;
346 MEMZERO(sp
, sizeof (*sp
));
347 sp
->isp_stat_version
= ISP_STATS_VERSION
;
348 sp
->isp_type
= isp
->isp_type
;
349 sp
->isp_revision
= isp
->isp_revision
;
351 sp
->isp_stats
[ISP_INTCNT
] = isp
->isp_intcnt
;
352 sp
->isp_stats
[ISP_INTBOGUS
] = isp
->isp_intbogus
;
353 sp
->isp_stats
[ISP_INTMBOXC
] = isp
->isp_intmboxc
;
354 sp
->isp_stats
[ISP_INGOASYNC
] = isp
->isp_intoasync
;
355 sp
->isp_stats
[ISP_RSLTCCMPLT
] = isp
->isp_rsltccmplt
;
356 sp
->isp_stats
[ISP_FPHCCMCPLT
] = isp
->isp_fphccmplt
;
357 sp
->isp_stats
[ISP_RSCCHIWAT
] = isp
->isp_rscchiwater
;
358 sp
->isp_stats
[ISP_FPCCHIWAT
] = isp
->isp_fpcchiwater
;
366 isp
->isp_intbogus
= 0;
367 isp
->isp_intmboxc
= 0;
368 isp
->isp_intoasync
= 0;
369 isp
->isp_rsltccmplt
= 0;
370 isp
->isp_fphccmplt
= 0;
371 isp
->isp_rscchiwater
= 0;
372 isp
->isp_fpcchiwater
= 0;
376 case ISP_FC_GETHINFO
:
378 struct isp_hba_device
*hba
= (struct isp_hba_device
*) ap
->a_data
;
379 MEMZERO(hba
, sizeof (*hba
));
381 hba
->fc_speed
= FCPARAM(isp
)->isp_gbspeed
;
382 hba
->fc_scsi_supported
= 1;
383 hba
->fc_topology
= FCPARAM(isp
)->isp_topo
+ 1;
384 hba
->fc_loopid
= FCPARAM(isp
)->isp_loopid
;
385 hba
->active_node_wwn
= FCPARAM(isp
)->isp_nodewwn
;
386 hba
->active_port_wwn
= FCPARAM(isp
)->isp_portwwn
;
391 case ISP_GET_FC_PARAM
:
393 struct isp_fc_param
*f
= (struct isp_fc_param
*) ap
->a_data
;
400 if (strcmp(f
->param_name
, "framelength") == 0) {
401 f
->parameter
= FCPARAM(isp
)->isp_maxfrmlen
;
405 if (strcmp(f
->param_name
, "exec_throttle") == 0) {
406 f
->parameter
= FCPARAM(isp
)->isp_execthrottle
;
410 if (strcmp(f
->param_name
, "fullduplex") == 0) {
411 if (FCPARAM(isp
)->isp_fwoptions
& ICBOPT_FULL_DUPLEX
)
416 if (strcmp(f
->param_name
, "loopid") == 0) {
417 f
->parameter
= FCPARAM(isp
)->isp_loopid
;
424 case ISP_SET_FC_PARAM
:
426 struct isp_fc_param
*f
= (struct isp_fc_param
*) ap
->a_data
;
427 u_int32_t param
= f
->parameter
;
434 if (strcmp(f
->param_name
, "framelength") == 0) {
435 if (param
!= 512 && param
!= 1024 && param
!= 1024) {
439 FCPARAM(isp
)->isp_maxfrmlen
= param
;
443 if (strcmp(f
->param_name
, "exec_throttle") == 0) {
444 if (param
< 16 || param
> 255) {
448 FCPARAM(isp
)->isp_execthrottle
= param
;
452 if (strcmp(f
->param_name
, "fullduplex") == 0) {
453 if (param
!= 0 && param
!= 1) {
458 FCPARAM(isp
)->isp_fwoptions
|=
461 FCPARAM(isp
)->isp_fwoptions
&=
467 if (strcmp(f
->param_name
, "loopid") == 0) {
468 if (param
< 0 || param
> 125) {
472 FCPARAM(isp
)->isp_loopid
= param
;
486 isp_intr_enable(void *arg
)
488 struct ispsoftc
*isp
= arg
;
489 if (isp
->isp_role
!= ISP_ROLE_NONE
) {
492 /* Release our hook so that the boot can continue. */
493 config_intrhook_disestablish(&isp
->isp_osinfo
.ehook
);
497 * Put the target mode functions here, because some are inlines
500 #ifdef ISP_TARGET_MODE
502 static INLINE
int is_lun_enabled(struct ispsoftc
*, int, lun_id_t
);
503 static INLINE
int are_any_luns_enabled(struct ispsoftc
*, int);
504 static INLINE tstate_t
*get_lun_statep(struct ispsoftc
*, int, lun_id_t
);
505 static INLINE
void rls_lun_statep(struct ispsoftc
*, tstate_t
*);
506 static INLINE
int isp_psema_sig_rqe(struct ispsoftc
*, int);
507 static INLINE
int isp_cv_wait_timed_rqe(struct ispsoftc
*, int, int);
508 static INLINE
void isp_cv_signal_rqe(struct ispsoftc
*, int, int);
509 static INLINE
void isp_vsema_rqe(struct ispsoftc
*, int);
510 static INLINE atio_private_data_t
*isp_get_atpd(struct ispsoftc
*, int);
512 create_lun_state(struct ispsoftc
*, int, struct cam_path
*, tstate_t
**);
513 static void destroy_lun_state(struct ispsoftc
*, tstate_t
*);
514 static void isp_en_lun(struct ispsoftc
*, union ccb
*);
515 static cam_status
isp_abort_tgt_ccb(struct ispsoftc
*, union ccb
*);
516 static timeout_t isp_refire_putback_atio
;
517 static void isp_complete_ctio(union ccb
*);
518 static void isp_target_putback_atio(union ccb
*);
519 static cam_status
isp_target_start_ctio(struct ispsoftc
*, union ccb
*);
520 static int isp_handle_platform_atio(struct ispsoftc
*, at_entry_t
*);
521 static int isp_handle_platform_atio2(struct ispsoftc
*, at2_entry_t
*);
522 static int isp_handle_platform_ctio(struct ispsoftc
*, void *);
523 static int isp_handle_platform_notify_scsi(struct ispsoftc
*, in_entry_t
*);
524 static int isp_handle_platform_notify_fc(struct ispsoftc
*, in_fcentry_t
*);
527 is_lun_enabled(struct ispsoftc
*isp
, int bus
, lun_id_t lun
)
530 tptr
= isp
->isp_osinfo
.lun_hash
[LUN_HASH_FUNC(isp
, bus
, lun
)];
535 if (tptr
->lun
== (lun_id_t
) lun
&& tptr
->bus
== bus
) {
538 } while ((tptr
= tptr
->next
) != NULL
);
543 are_any_luns_enabled(struct ispsoftc
*isp
, int port
)
546 if (IS_DUALBUS(isp
)) {
547 lo
= (port
* (LUN_HASH_SIZE
>> 1));
548 hi
= lo
+ (LUN_HASH_SIZE
>> 1);
553 for (lo
= 0; lo
< hi
; lo
++) {
554 if (isp
->isp_osinfo
.lun_hash
[lo
]) {
561 static INLINE tstate_t
*
562 get_lun_statep(struct ispsoftc
*isp
, int bus
, lun_id_t lun
)
564 tstate_t
*tptr
= NULL
;
566 if (lun
== CAM_LUN_WILDCARD
) {
567 if (isp
->isp_osinfo
.tmflags
[bus
] & TM_WILDCARD_ENABLED
) {
568 tptr
= &isp
->isp_osinfo
.tsdflt
[bus
];
573 tptr
= isp
->isp_osinfo
.lun_hash
[LUN_HASH_FUNC(isp
, bus
, lun
)];
580 if (tptr
->lun
== lun
&& tptr
->bus
== bus
) {
584 } while ((tptr
= tptr
->next
) != NULL
);
589 rls_lun_statep(struct ispsoftc
*isp
, tstate_t
*tptr
)
596 isp_psema_sig_rqe(struct ispsoftc
*isp
, int bus
)
598 while (isp
->isp_osinfo
.tmflags
[bus
] & TM_BUSY
) {
599 isp
->isp_osinfo
.tmflags
[bus
] |= TM_WANTED
;
600 if (tsleep(&isp
->isp_osinfo
.tmflags
[bus
], PCATCH
, "i0", 0)) {
603 isp
->isp_osinfo
.tmflags
[bus
] |= TM_BUSY
;
609 isp_cv_wait_timed_rqe(struct ispsoftc
*isp
, int bus
, int timo
)
611 if (tsleep(&isp
->isp_osinfo
.rstatus
[bus
], 0, "qt1", timo
)) {
618 isp_cv_signal_rqe(struct ispsoftc
*isp
, int bus
, int status
)
620 isp
->isp_osinfo
.rstatus
[bus
] = status
;
621 wakeup(&isp
->isp_osinfo
.rstatus
[bus
]);
625 isp_vsema_rqe(struct ispsoftc
*isp
, int bus
)
627 if (isp
->isp_osinfo
.tmflags
[bus
] & TM_WANTED
) {
628 isp
->isp_osinfo
.tmflags
[bus
] &= ~TM_WANTED
;
629 wakeup(&isp
->isp_osinfo
.tmflags
[bus
]);
631 isp
->isp_osinfo
.tmflags
[bus
] &= ~TM_BUSY
;
634 static __inline atio_private_data_t
*
635 isp_get_atpd(struct ispsoftc
*isp
, int tag
)
637 atio_private_data_t
*atp
;
638 for (atp
= isp
->isp_osinfo
.atpdp
;
639 atp
< &isp
->isp_osinfo
.atpdp
[ATPDPSIZE
]; atp
++) {
647 create_lun_state(struct ispsoftc
*isp
, int bus
,
648 struct cam_path
*path
, tstate_t
**rslt
)
653 tstate_t
*tptr
, *new;
655 lun
= xpt_path_lun_id(path
);
657 return (CAM_LUN_INVALID
);
659 if (is_lun_enabled(isp
, bus
, lun
)) {
660 return (CAM_LUN_ALRDY_ENA
);
662 new = kmalloc(sizeof (tstate_t
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
663 status
= xpt_create_path(&new->owner
, NULL
, xpt_path_path_id(path
),
664 xpt_path_target_id(path
), xpt_path_lun_id(path
));
665 if (status
!= CAM_REQ_CMP
) {
666 kfree(new, M_DEVBUF
);
671 SLIST_INIT(&new->atios
);
672 SLIST_INIT(&new->inots
);
675 hfx
= LUN_HASH_FUNC(isp
, new->bus
, new->lun
);
676 tptr
= isp
->isp_osinfo
.lun_hash
[hfx
];
678 isp
->isp_osinfo
.lun_hash
[hfx
] = new;
685 return (CAM_REQ_CMP
);
689 destroy_lun_state(struct ispsoftc
*isp
, tstate_t
*tptr
)
694 hfx
= LUN_HASH_FUNC(isp
, tptr
->bus
, tptr
->lun
);
698 pw
= isp
->isp_osinfo
.lun_hash
[hfx
];
701 } else if (pw
->lun
== tptr
->lun
&& pw
->bus
== tptr
->bus
) {
702 isp
->isp_osinfo
.lun_hash
[hfx
] = pw
->next
;
707 if (pw
->lun
== tptr
->lun
&& pw
->bus
== tptr
->bus
) {
718 kfree(tptr
, M_DEVBUF
);
722 * we enter with our locks held.
725 isp_en_lun(struct ispsoftc
*isp
, union ccb
*ccb
)
727 const char lfmt
[] = "Lun now %sabled for target mode on channel %d";
728 struct ccb_en_lun
*cel
= &ccb
->cel
;
731 int bus
, cmd
, av
, wildcard
;
736 bus
= XS_CHANNEL(ccb
) & 0x1;
737 tgt
= ccb
->ccb_h
.target_id
;
738 lun
= ccb
->ccb_h
.target_lun
;
741 * Do some sanity checking first.
744 if ((lun
!= CAM_LUN_WILDCARD
) &&
745 (lun
< 0 || lun
>= (lun_id_t
) isp
->isp_maxluns
)) {
746 ccb
->ccb_h
.status
= CAM_LUN_INVALID
;
751 sdparam
*sdp
= isp
->isp_param
;
753 if (tgt
!= CAM_TARGET_WILDCARD
&&
754 tgt
!= sdp
->isp_initiator_id
) {
755 ccb
->ccb_h
.status
= CAM_TID_INVALID
;
759 if (tgt
!= CAM_TARGET_WILDCARD
&&
760 tgt
!= FCPARAM(isp
)->isp_iid
) {
761 ccb
->ccb_h
.status
= CAM_TID_INVALID
;
765 * This is as a good a place as any to check f/w capabilities.
767 if ((FCPARAM(isp
)->isp_fwattr
& ISP_FW_ATTR_TMODE
) == 0) {
768 isp_prt(isp
, ISP_LOGERR
,
769 "firmware does not support target mode");
770 ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
774 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
775 * XXX: dorks with our already fragile enable/disable code.
777 if ((FCPARAM(isp
)->isp_fwattr
& ISP_FW_ATTR_SCCLUN
) == 0) {
778 isp_prt(isp
, ISP_LOGERR
,
779 "firmware not SCCLUN capable");
783 if (tgt
== CAM_TARGET_WILDCARD
) {
784 if (lun
== CAM_LUN_WILDCARD
) {
787 ccb
->ccb_h
.status
= CAM_LUN_INVALID
;
795 * Next check to see whether this is a target/lun wildcard action.
797 * If so, we know that we can accept commands for luns that haven't
798 * been enabled yet and send them upstream. Otherwise, we have to
799 * handle them locally (if we see them at all).
803 tptr
= &isp
->isp_osinfo
.tsdflt
[bus
];
805 if (isp
->isp_osinfo
.tmflags
[bus
] &
806 TM_WILDCARD_ENABLED
) {
807 ccb
->ccb_h
.status
= CAM_LUN_ALRDY_ENA
;
811 xpt_create_path(&tptr
->owner
, NULL
,
812 xpt_path_path_id(ccb
->ccb_h
.path
),
813 xpt_path_target_id(ccb
->ccb_h
.path
),
814 xpt_path_lun_id(ccb
->ccb_h
.path
));
815 if (ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
818 SLIST_INIT(&tptr
->atios
);
819 SLIST_INIT(&tptr
->inots
);
820 isp
->isp_osinfo
.tmflags
[bus
] |= TM_WILDCARD_ENABLED
;
822 if ((isp
->isp_osinfo
.tmflags
[bus
] &
823 TM_WILDCARD_ENABLED
) == 0) {
824 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
828 ccb
->ccb_h
.status
= CAM_SCSI_BUSY
;
831 xpt_free_path(tptr
->owner
);
832 isp
->isp_osinfo
.tmflags
[bus
] &= ~TM_WILDCARD_ENABLED
;
837 * Now check to see whether this bus needs to be
838 * enabled/disabled with respect to target mode.
841 if (cel
->enable
&& !(isp
->isp_osinfo
.tmflags
[bus
] & TM_TMODE_ENABLED
)) {
842 av
|= ENABLE_TARGET_FLAG
;
843 av
= isp_control(isp
, ISPCTL_TOGGLE_TMODE
, &av
);
845 ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
847 isp
->isp_osinfo
.tmflags
[bus
] &=
848 ~TM_WILDCARD_ENABLED
;
849 xpt_free_path(tptr
->owner
);
853 isp
->isp_osinfo
.tmflags
[bus
] |= TM_TMODE_ENABLED
;
854 isp_prt(isp
, ISP_LOGINFO
,
855 "Target Mode enabled on channel %d", bus
);
856 } else if (cel
->enable
== 0 &&
857 (isp
->isp_osinfo
.tmflags
[bus
] & TM_TMODE_ENABLED
) && wildcard
) {
858 if (are_any_luns_enabled(isp
, bus
)) {
859 ccb
->ccb_h
.status
= CAM_SCSI_BUSY
;
862 av
= isp_control(isp
, ISPCTL_TOGGLE_TMODE
, &av
);
864 ccb
->ccb_h
.status
= CAM_FUNC_NOTAVAIL
;
867 isp
->isp_osinfo
.tmflags
[bus
] &= ~TM_TMODE_ENABLED
;
868 isp_prt(isp
, ISP_LOGINFO
,
869 "Target Mode disabled on channel %d", bus
);
873 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
879 create_lun_state(isp
, bus
, ccb
->ccb_h
.path
, &tptr
);
880 if (ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
884 tptr
= get_lun_statep(isp
, bus
, lun
);
886 ccb
->ccb_h
.status
= CAM_LUN_INVALID
;
891 if (isp_psema_sig_rqe(isp
, bus
)) {
892 rls_lun_statep(isp
, tptr
);
894 destroy_lun_state(isp
, tptr
);
895 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
900 u_int32_t seq
= isp
->isp_osinfo
.rollinfo
++;
901 int c
, n
, ulun
= lun
;
903 cmd
= RQSTYPE_ENABLE_LUN
;
906 if (IS_FC(isp
) && lun
!= 0) {
907 cmd
= RQSTYPE_MODIFY_LUN
;
910 * For SCC firmware, we only deal with setting
911 * (enabling or modifying) lun 0.
916 if (isp_lun_cmd(isp
, cmd
, bus
, tgt
, ulun
, c
, n
, seq
)) {
917 xpt_print_path(ccb
->ccb_h
.path
);
918 isp_prt(isp
, ISP_LOGWARN
, "isp_lun_cmd failed");
921 if (isp_cv_wait_timed_rqe(isp
, bus
, 30 * hz
)) {
922 xpt_print_path(ccb
->ccb_h
.path
);
923 isp_prt(isp
, ISP_LOGERR
,
924 "wait for ENABLE/MODIFY LUN timed out");
927 rstat
= isp
->isp_osinfo
.rstatus
[bus
];
928 if (rstat
!= LUN_OK
) {
929 xpt_print_path(ccb
->ccb_h
.path
);
930 isp_prt(isp
, ISP_LOGERR
,
931 "ENABLE/MODIFY LUN returned 0x%x", rstat
);
935 int c
, n
, ulun
= lun
;
939 seq
= isp
->isp_osinfo
.rollinfo
++;
940 cmd
= -RQSTYPE_MODIFY_LUN
;
944 if (IS_FC(isp
) && lun
!= 0) {
947 * For SCC firmware, we only deal with setting
948 * (enabling or modifying) lun 0.
952 if (isp_lun_cmd(isp
, cmd
, bus
, tgt
, ulun
, c
, n
, seq
)) {
953 xpt_print_path(ccb
->ccb_h
.path
);
954 isp_prt(isp
, ISP_LOGERR
, "isp_lun_cmd failed");
957 if (isp_cv_wait_timed_rqe(isp
, bus
, 30 * hz
)) {
958 xpt_print_path(ccb
->ccb_h
.path
);
959 isp_prt(isp
, ISP_LOGERR
,
960 "wait for MODIFY LUN timed out");
963 rstat
= isp
->isp_osinfo
.rstatus
[bus
];
964 if (rstat
!= LUN_OK
) {
965 xpt_print_path(ccb
->ccb_h
.path
);
966 isp_prt(isp
, ISP_LOGERR
,
967 "MODIFY LUN returned 0x%x", rstat
);
970 if (IS_FC(isp
) && lun
) {
974 seq
= isp
->isp_osinfo
.rollinfo
++;
977 cmd
= -RQSTYPE_ENABLE_LUN
;
978 if (isp_lun_cmd(isp
, cmd
, bus
, tgt
, lun
, 0, 0, seq
)) {
979 xpt_print_path(ccb
->ccb_h
.path
);
980 isp_prt(isp
, ISP_LOGERR
, "isp_lun_cmd failed");
983 if (isp_cv_wait_timed_rqe(isp
, bus
, 30 * hz
)) {
984 xpt_print_path(ccb
->ccb_h
.path
);
985 isp_prt(isp
, ISP_LOGERR
,
986 "wait for DISABLE LUN timed out");
989 rstat
= isp
->isp_osinfo
.rstatus
[bus
];
990 if (rstat
!= LUN_OK
) {
991 xpt_print_path(ccb
->ccb_h
.path
);
992 isp_prt(isp
, ISP_LOGWARN
,
993 "DISABLE LUN returned 0x%x", rstat
);
996 if (are_any_luns_enabled(isp
, bus
) == 0) {
997 av
= isp_control(isp
, ISPCTL_TOGGLE_TMODE
, &av
);
999 isp_prt(isp
, ISP_LOGWARN
,
1000 "disable target mode on channel %d failed",
1004 isp
->isp_osinfo
.tmflags
[bus
] &= ~TM_TMODE_ENABLED
;
1005 xpt_print_path(ccb
->ccb_h
.path
);
1006 isp_prt(isp
, ISP_LOGINFO
,
1007 "Target Mode disabled on channel %d", bus
);
1012 isp_vsema_rqe(isp
, bus
);
1014 if (rstat
!= LUN_OK
) {
1015 xpt_print_path(ccb
->ccb_h
.path
);
1016 isp_prt(isp
, ISP_LOGWARN
,
1017 "lun %sable failed", (cel
->enable
) ? "en" : "dis");
1018 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
1019 rls_lun_statep(isp
, tptr
);
1021 destroy_lun_state(isp
, tptr
);
1023 xpt_print_path(ccb
->ccb_h
.path
);
1024 isp_prt(isp
, ISP_LOGINFO
, lfmt
,
1025 (cel
->enable
) ? "en" : "dis", bus
);
1026 rls_lun_statep(isp
, tptr
);
1027 if (cel
->enable
== 0) {
1028 destroy_lun_state(isp
, tptr
);
1030 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
1035 isp_abort_tgt_ccb(struct ispsoftc
*isp
, union ccb
*ccb
)
1038 struct ccb_hdr_slist
*lp
;
1039 struct ccb_hdr
*curelm
;
1041 union ccb
*accb
= ccb
->cab
.abort_ccb
;
1043 if (accb
->ccb_h
.target_id
!= CAM_TARGET_WILDCARD
) {
1044 if (IS_FC(isp
) && (accb
->ccb_h
.target_id
!=
1045 ((fcparam
*) isp
->isp_param
)->isp_loopid
)) {
1046 return (CAM_PATH_INVALID
);
1047 } else if (IS_SCSI(isp
) && (accb
->ccb_h
.target_id
!=
1048 ((sdparam
*) isp
->isp_param
)->isp_initiator_id
)) {
1049 return (CAM_PATH_INVALID
);
1052 tptr
= get_lun_statep(isp
, XS_CHANNEL(ccb
), accb
->ccb_h
.target_lun
);
1054 return (CAM_PATH_INVALID
);
1056 if (accb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
) {
1058 } else if (accb
->ccb_h
.func_code
== XPT_IMMED_NOTIFY
) {
1061 rls_lun_statep(isp
, tptr
);
1062 return (CAM_UA_ABORT
);
1064 curelm
= SLIST_FIRST(lp
);
1066 if (curelm
== &accb
->ccb_h
) {
1068 SLIST_REMOVE_HEAD(lp
, sim_links
.sle
);
1070 while(curelm
!= NULL
) {
1071 struct ccb_hdr
*nextelm
;
1073 nextelm
= SLIST_NEXT(curelm
, sim_links
.sle
);
1074 if (nextelm
== &accb
->ccb_h
) {
1076 SLIST_NEXT(curelm
, sim_links
.sle
) =
1077 SLIST_NEXT(nextelm
, sim_links
.sle
);
1083 rls_lun_statep(isp
, tptr
);
1085 accb
->ccb_h
.status
= CAM_REQ_ABORTED
;
1086 return (CAM_REQ_CMP
);
1088 return(CAM_PATH_INVALID
);
1092 isp_target_start_ctio(struct ispsoftc
*isp
, union ccb
*ccb
)
1095 struct ccb_scsiio
*cso
= &ccb
->csio
;
1096 u_int16_t
*hp
, save_handle
;
1097 u_int16_t nxti
, optr
;
1098 u_int8_t local
[QENTRY_LEN
];
1101 if (isp_getrqentry(isp
, &nxti
, &optr
, &qe
)) {
1102 xpt_print_path(ccb
->ccb_h
.path
);
1103 kprintf("Request Queue Overflow in isp_target_start_ctio\n");
1104 return (CAM_RESRC_UNAVAIL
);
1106 bzero(local
, QENTRY_LEN
);
1109 * We're either moving data or completing a command here.
1113 atio_private_data_t
*atp
;
1114 ct2_entry_t
*cto
= (ct2_entry_t
*) local
;
1116 cto
->ct_header
.rqs_entry_type
= RQSTYPE_CTIO2
;
1117 cto
->ct_header
.rqs_entry_count
= 1;
1118 cto
->ct_iid
= cso
->init_id
;
1119 if ((FCPARAM(isp
)->isp_fwattr
& ISP_FW_ATTR_SCCLUN
) == 0) {
1120 cto
->ct_lun
= ccb
->ccb_h
.target_lun
;
1123 atp
= isp_get_atpd(isp
, cso
->tag_id
);
1125 isp_prt(isp
, ISP_LOGERR
,
1126 "cannot find private data adjunct for tag %x",
1131 cto
->ct_rxid
= cso
->tag_id
;
1132 if (cso
->dxfer_len
== 0) {
1133 cto
->ct_flags
|= CT2_FLAG_MODE1
| CT2_NO_DATA
;
1134 if (ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) {
1135 cto
->ct_flags
|= CT2_SENDSTATUS
;
1136 cto
->rsp
.m1
.ct_scsi_status
= cso
->scsi_status
;
1138 atp
->orig_datalen
- atp
->bytes_xfered
;
1139 if (cto
->ct_resid
< 0) {
1140 cto
->rsp
.m1
.ct_scsi_status
|=
1142 } else if (cto
->ct_resid
> 0) {
1143 cto
->rsp
.m1
.ct_scsi_status
|=
1147 if ((ccb
->ccb_h
.flags
& CAM_SEND_SENSE
) != 0) {
1148 int m
= min(cso
->sense_len
, MAXRESPLEN
);
1149 bcopy(&cso
->sense_data
, cto
->rsp
.m1
.ct_resp
, m
);
1150 cto
->rsp
.m1
.ct_senselen
= m
;
1151 cto
->rsp
.m1
.ct_scsi_status
|= CT2_SNSLEN_VALID
;
1154 cto
->ct_flags
|= CT2_FLAG_MODE0
;
1155 if ((cso
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1156 cto
->ct_flags
|= CT2_DATA_IN
;
1158 cto
->ct_flags
|= CT2_DATA_OUT
;
1160 cto
->ct_reloff
= atp
->bytes_xfered
;
1161 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) != 0) {
1162 cto
->ct_flags
|= CT2_SENDSTATUS
;
1163 cto
->rsp
.m0
.ct_scsi_status
= cso
->scsi_status
;
1166 (atp
->bytes_xfered
+ cso
->dxfer_len
);
1167 if (cto
->ct_resid
< 0) {
1168 cto
->rsp
.m0
.ct_scsi_status
|=
1170 } else if (cto
->ct_resid
> 0) {
1171 cto
->rsp
.m0
.ct_scsi_status
|=
1175 atp
->last_xframt
= cso
->dxfer_len
;
1178 * If we're sending data and status back together,
1179 * we can't also send back sense data as well.
1181 ccb
->ccb_h
.flags
&= ~CAM_SEND_SENSE
;
1184 if (cto
->ct_flags
& CT2_SENDSTATUS
) {
1185 isp_prt(isp
, ISP_LOGTDEBUG0
,
1186 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1187 cto
->ct_rxid
, cso
->scsi_status
, atp
->orig_datalen
,
1188 cso
->dxfer_len
, cto
->ct_resid
);
1189 cto
->ct_flags
|= CT2_CCINCR
;
1190 atp
->state
= ATPD_STATE_LAST_CTIO
;
1192 atp
->state
= ATPD_STATE_CTIO
;
1193 cto
->ct_timeout
= 10;
1194 hp
= &cto
->ct_syshandle
;
1196 ct_entry_t
*cto
= (ct_entry_t
*) local
;
1198 cto
->ct_header
.rqs_entry_type
= RQSTYPE_CTIO
;
1199 cto
->ct_header
.rqs_entry_count
= 1;
1200 cto
->ct_iid
= cso
->init_id
;
1201 cto
->ct_iid
|= XS_CHANNEL(ccb
) << 7;
1202 cto
->ct_tgt
= ccb
->ccb_h
.target_id
;
1203 cto
->ct_lun
= ccb
->ccb_h
.target_lun
;
1204 cto
->ct_fwhandle
= AT_GET_HANDLE(cso
->tag_id
);
1205 if (AT_HAS_TAG(cso
->tag_id
)) {
1206 cto
->ct_tag_val
= (u_int8_t
) AT_GET_TAG(cso
->tag_id
);
1207 cto
->ct_flags
|= CT_TQAE
;
1209 if (ccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
) {
1210 cto
->ct_flags
|= CT_NODISC
;
1212 if (cso
->dxfer_len
== 0) {
1213 cto
->ct_flags
|= CT_NO_DATA
;
1214 } else if ((cso
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1215 cto
->ct_flags
|= CT_DATA_IN
;
1217 cto
->ct_flags
|= CT_DATA_OUT
;
1219 if (ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) {
1220 cto
->ct_flags
|= CT_SENDSTATUS
|CT_CCINCR
;
1221 cto
->ct_scsi_status
= cso
->scsi_status
;
1222 cto
->ct_resid
= cso
->resid
;
1223 isp_prt(isp
, ISP_LOGTDEBUG0
,
1224 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1225 cto
->ct_fwhandle
, cso
->scsi_status
, cso
->resid
,
1228 ccb
->ccb_h
.flags
&= ~CAM_SEND_SENSE
;
1229 cto
->ct_timeout
= 10;
1230 hp
= &cto
->ct_syshandle
;
1233 if (isp_save_xs(isp
, (XS_T
*)ccb
, hp
)) {
1234 xpt_print_path(ccb
->ccb_h
.path
);
1235 kprintf("No XFLIST pointers for isp_target_start_ctio\n");
1236 return (CAM_RESRC_UNAVAIL
);
1241 * Call the dma setup routines for this entry (and any subsequent
1242 * CTIOs) if there's data to move, and then tell the f/w it's got
1243 * new things to play with. As with isp_start's usage of DMA setup,
1244 * any swizzling is done in the machine dependent layer. Because
1245 * of this, we put the request onto the queue area first in native
1251 switch (ISP_DMASETUP(isp
, cso
, (ispreq_t
*) local
, &nxti
, optr
)) {
1253 ISP_ADD_REQUEST(isp
, nxti
);
1254 return (CAM_REQ_INPROG
);
1257 ccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
1258 isp_destroy_handle(isp
, save_handle
);
1259 return (CAM_RESRC_UNAVAIL
);
1262 isp_destroy_handle(isp
, save_handle
);
1263 return (XS_ERR(ccb
));
1268 isp_refire_putback_atio(void *arg
)
1271 isp_target_putback_atio(arg
);
1276 isp_target_putback_atio(union ccb
*ccb
)
1278 struct ispsoftc
*isp
;
1279 struct ccb_scsiio
*cso
;
1280 u_int16_t nxti
, optr
;
1285 if (isp_getrqentry(isp
, &nxti
, &optr
, &qe
)) {
1286 (void) timeout(isp_refire_putback_atio
, ccb
, 10);
1287 isp_prt(isp
, ISP_LOGWARN
,
1288 "isp_target_putback_atio: Request Queue Overflow");
1291 bzero(qe
, QENTRY_LEN
);
1294 at2_entry_t local
, *at
= &local
;
1295 MEMZERO(at
, sizeof (at2_entry_t
));
1296 at
->at_header
.rqs_entry_type
= RQSTYPE_ATIO2
;
1297 at
->at_header
.rqs_entry_count
= 1;
1298 if ((FCPARAM(isp
)->isp_fwattr
& ISP_FW_ATTR_SCCLUN
) != 0) {
1299 at
->at_scclun
= (uint16_t) ccb
->ccb_h
.target_lun
;
1301 at
->at_lun
= (uint8_t) ccb
->ccb_h
.target_lun
;
1303 at
->at_status
= CT_OK
;
1304 at
->at_rxid
= cso
->tag_id
;
1305 at
->at_iid
= cso
->ccb_h
.target_id
;
1306 isp_put_atio2(isp
, at
, qe
);
1308 at_entry_t local
, *at
= &local
;
1309 MEMZERO(at
, sizeof (at_entry_t
));
1310 at
->at_header
.rqs_entry_type
= RQSTYPE_ATIO
;
1311 at
->at_header
.rqs_entry_count
= 1;
1312 at
->at_iid
= cso
->init_id
;
1313 at
->at_iid
|= XS_CHANNEL(ccb
) << 7;
1314 at
->at_tgt
= cso
->ccb_h
.target_id
;
1315 at
->at_lun
= cso
->ccb_h
.target_lun
;
1316 at
->at_status
= CT_OK
;
1317 at
->at_tag_val
= AT_GET_TAG(cso
->tag_id
);
1318 at
->at_handle
= AT_GET_HANDLE(cso
->tag_id
);
1319 isp_put_atio(isp
, at
, qe
);
1321 ISP_TDQE(isp
, "isp_target_putback_atio", (int) optr
, qe
);
1322 ISP_ADD_REQUEST(isp
, nxti
);
1323 isp_complete_ctio(ccb
);
1327 isp_complete_ctio(union ccb
*ccb
)
1329 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
) {
1330 ccb
->ccb_h
.status
|= CAM_REQ_CMP
;
1332 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
1337 * Handle ATIO stuff that the generic code can't.
1338 * This means handling CDBs.
1342 isp_handle_platform_atio(struct ispsoftc
*isp
, at_entry_t
*aep
)
1345 int status
, bus
, iswildcard
;
1346 struct ccb_accept_tio
*atiop
;
1349 * The firmware status (except for the QLTM_SVALID bit)
1350 * indicates why this ATIO was sent to us.
1352 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1354 * If the DISCONNECTS DISABLED bit is set in the flags field,
1355 * we're still connected on the SCSI bus.
1357 status
= aep
->at_status
;
1358 if ((status
& ~QLTM_SVALID
) == AT_PHASE_ERROR
) {
1360 * Bus Phase Sequence error. We should have sense data
1361 * suggested by the f/w. I'm not sure quite yet what
1362 * to do about this for CAM.
1364 isp_prt(isp
, ISP_LOGWARN
, "PHASE ERROR");
1365 isp_endcmd(isp
, aep
, SCSI_STATUS_BUSY
, 0);
1368 if ((status
& ~QLTM_SVALID
) != AT_CDB
) {
1369 isp_prt(isp
, ISP_LOGWARN
, "bad atio (0x%x) leaked to platform",
1371 isp_endcmd(isp
, aep
, SCSI_STATUS_BUSY
, 0);
1375 bus
= GET_BUS_VAL(aep
->at_iid
);
1376 tptr
= get_lun_statep(isp
, bus
, aep
->at_lun
);
1378 tptr
= get_lun_statep(isp
, bus
, CAM_LUN_WILDCARD
);
1386 * Because we can't autofeed sense data back with
1387 * a command for parallel SCSI, we can't give back
1388 * a CHECK CONDITION. We'll give back a BUSY status
1389 * instead. This works out okay because the only
1390 * time we should, in fact, get this, is in the
1391 * case that somebody configured us without the
1392 * blackhole driver, so they get what they deserve.
1394 isp_endcmd(isp
, aep
, SCSI_STATUS_BUSY
, 0);
1398 atiop
= (struct ccb_accept_tio
*) SLIST_FIRST(&tptr
->atios
);
1399 if (atiop
== NULL
) {
1401 * Because we can't autofeed sense data back with
1402 * a command for parallel SCSI, we can't give back
1403 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1404 * instead. This works out okay because the only time we
1405 * should, in fact, get this, is in the case that we've
1408 xpt_print_path(tptr
->owner
);
1409 isp_prt(isp
, ISP_LOGWARN
,
1410 "no ATIOS for lun %d from initiator %d on channel %d",
1411 aep
->at_lun
, GET_IID_VAL(aep
->at_iid
), bus
);
1412 if (aep
->at_flags
& AT_TQAE
)
1413 isp_endcmd(isp
, aep
, SCSI_STATUS_QUEUE_FULL
, 0);
1415 isp_endcmd(isp
, aep
, SCSI_STATUS_BUSY
, 0);
1416 rls_lun_statep(isp
, tptr
);
1419 SLIST_REMOVE_HEAD(&tptr
->atios
, sim_links
.sle
);
1421 atiop
->ccb_h
.target_id
= aep
->at_tgt
;
1422 atiop
->ccb_h
.target_lun
= aep
->at_lun
;
1424 if (aep
->at_flags
& AT_NODISC
) {
1425 atiop
->ccb_h
.flags
= CAM_DIS_DISCONNECT
;
1427 atiop
->ccb_h
.flags
= 0;
1430 if (status
& QLTM_SVALID
) {
1431 size_t amt
= imin(QLTM_SENSELEN
, sizeof (atiop
->sense_data
));
1432 atiop
->sense_len
= amt
;
1433 MEMCPY(&atiop
->sense_data
, aep
->at_sense
, amt
);
1435 atiop
->sense_len
= 0;
1438 atiop
->init_id
= GET_IID_VAL(aep
->at_iid
);
1439 atiop
->cdb_len
= aep
->at_cdblen
;
1440 MEMCPY(atiop
->cdb_io
.cdb_bytes
, aep
->at_cdb
, aep
->at_cdblen
);
1441 atiop
->ccb_h
.status
= CAM_CDB_RECVD
;
1443 * Construct a tag 'id' based upon tag value (which may be 0..255)
1444 * and the handle (which we have to preserve).
1446 AT_MAKE_TAGID(atiop
->tag_id
, aep
);
1447 if (aep
->at_flags
& AT_TQAE
) {
1448 atiop
->tag_action
= aep
->at_tag_type
;
1449 atiop
->ccb_h
.status
|= CAM_TAG_ACTION_VALID
;
1451 xpt_done((union ccb
*)atiop
);
1452 isp_prt(isp
, ISP_LOGTDEBUG0
,
1453 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1454 aep
->at_handle
, aep
->at_cdb
[0] & 0xff, GET_BUS_VAL(aep
->at_iid
),
1455 GET_IID_VAL(aep
->at_iid
), aep
->at_lun
, aep
->at_tag_val
& 0xff,
1456 aep
->at_tag_type
, (aep
->at_flags
& AT_NODISC
)?
1457 "nondisc" : "disconnecting");
1458 rls_lun_statep(isp
, tptr
);
1463 isp_handle_platform_atio2(struct ispsoftc
*isp
, at2_entry_t
*aep
)
1467 struct ccb_accept_tio
*atiop
;
1468 atio_private_data_t
*atp
;
1471 * The firmware status (except for the QLTM_SVALID bit)
1472 * indicates why this ATIO was sent to us.
1474 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1476 if ((aep
->at_status
& ~QLTM_SVALID
) != AT_CDB
) {
1477 isp_prt(isp
, ISP_LOGWARN
,
1478 "bogus atio (0x%x) leaked to platform", aep
->at_status
);
1479 isp_endcmd(isp
, aep
, SCSI_STATUS_BUSY
, 0);
1483 if ((FCPARAM(isp
)->isp_fwattr
& ISP_FW_ATTR_SCCLUN
) != 0) {
1484 lun
= aep
->at_scclun
;
1488 tptr
= get_lun_statep(isp
, 0, lun
);
1490 isp_prt(isp
, ISP_LOGWARN
, "no state pointer for lun %d", lun
);
1491 tptr
= get_lun_statep(isp
, 0, CAM_LUN_WILDCARD
);
1496 * What we'd like to know is whether or not we have a listener
1497 * upstream that really hasn't configured yet. If we do, then
1498 * we can give a more sensible reply here. If not, then we can
1499 * reject this out of hand.
1501 * Choices for what to send were
1503 * Not Ready, Unit Not Self-Configured Yet
1506 * for the former and
1508 * Illegal Request, Logical Unit Not Supported
1513 * We used to decide whether there was at least one listener
1514 * based upon whether the black hole driver was configured.
1515 * However, recent config(8) changes have made this hard to do
1519 isp_endcmd(isp
, aep
, SCSI_STATUS_BUSY
, 0);
1523 atp
= isp_get_atpd(isp
, 0);
1524 atiop
= (struct ccb_accept_tio
*) SLIST_FIRST(&tptr
->atios
);
1525 if (atiop
== NULL
|| atp
== NULL
) {
1527 * Because we can't autofeed sense data back with
1528 * a command for parallel SCSI, we can't give back
1529 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1530 * instead. This works out okay because the only time we
1531 * should, in fact, get this, is in the case that we've
1534 xpt_print_path(tptr
->owner
);
1535 isp_prt(isp
, ISP_LOGWARN
,
1536 "no %s for lun %d from initiator %d",
1537 (atp
== NULL
&& atiop
== NULL
)? "ATIO2s *or* ATPS" :
1538 ((atp
== NULL
)? "ATPs" : "ATIO2s"), lun
, aep
->at_iid
);
1539 rls_lun_statep(isp
, tptr
);
1540 isp_endcmd(isp
, aep
, SCSI_STATUS_QUEUE_FULL
, 0);
1543 atp
->state
= ATPD_STATE_ATIO
;
1544 SLIST_REMOVE_HEAD(&tptr
->atios
, sim_links
.sle
);
1546 isp_prt(isp
, ISP_LOGTDEBUG0
, "Take FREE ATIO2 lun %d, count now %d",
1547 lun
, tptr
->atio_count
);
1549 if (tptr
== &isp
->isp_osinfo
.tsdflt
[0]) {
1550 atiop
->ccb_h
.target_id
=
1551 ((fcparam
*)isp
->isp_param
)->isp_loopid
;
1552 atiop
->ccb_h
.target_lun
= lun
;
1555 * We don't get 'suggested' sense data as we do with SCSI cards.
1557 atiop
->sense_len
= 0;
1559 atiop
->init_id
= aep
->at_iid
;
1560 atiop
->cdb_len
= ATIO2_CDBLEN
;
1561 MEMCPY(atiop
->cdb_io
.cdb_bytes
, aep
->at_cdb
, ATIO2_CDBLEN
);
1562 atiop
->ccb_h
.status
= CAM_CDB_RECVD
;
1563 atiop
->tag_id
= aep
->at_rxid
;
1564 switch (aep
->at_taskflags
& ATIO2_TC_ATTR_MASK
) {
1565 case ATIO2_TC_ATTR_SIMPLEQ
:
1566 atiop
->tag_action
= MSG_SIMPLE_Q_TAG
;
1568 case ATIO2_TC_ATTR_HEADOFQ
:
1569 atiop
->tag_action
= MSG_HEAD_OF_Q_TAG
;
1571 case ATIO2_TC_ATTR_ORDERED
:
1572 atiop
->tag_action
= MSG_ORDERED_Q_TAG
;
1574 case ATIO2_TC_ATTR_ACAQ
: /* ?? */
1575 case ATIO2_TC_ATTR_UNTAGGED
:
1577 atiop
->tag_action
= 0;
1580 atiop
->ccb_h
.flags
= CAM_TAG_ACTION_VALID
;
1582 atp
->tag
= atiop
->tag_id
;
1584 atp
->orig_datalen
= aep
->at_datalen
;
1585 atp
->last_xframt
= 0;
1586 atp
->bytes_xfered
= 0;
1587 atp
->state
= ATPD_STATE_CAM
;
1588 xpt_done((union ccb
*)atiop
);
1590 isp_prt(isp
, ISP_LOGTDEBUG0
,
1591 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1592 aep
->at_rxid
, aep
->at_cdb
[0] & 0xff, aep
->at_iid
,
1593 lun
, aep
->at_taskflags
, aep
->at_datalen
);
1594 rls_lun_statep(isp
, tptr
);
1599 isp_handle_platform_ctio(struct ispsoftc
*isp
, void *arg
)
1602 int sentstatus
, ok
, notify_cam
, resid
= 0;
1606 * CTIO and CTIO2 are close enough....
1609 ccb
= (union ccb
*) isp_find_xs(isp
, ((ct_entry_t
*)arg
)->ct_syshandle
);
1610 KASSERT((ccb
!= NULL
), ("null ccb in isp_handle_platform_ctio"));
1611 isp_destroy_handle(isp
, ((ct_entry_t
*)arg
)->ct_syshandle
);
1614 ct2_entry_t
*ct
= arg
;
1615 atio_private_data_t
*atp
= isp_get_atpd(isp
, ct
->ct_rxid
);
1617 isp_prt(isp
, ISP_LOGERR
,
1618 "cannot find adjunct for %x after I/O",
1622 sentstatus
= ct
->ct_flags
& CT2_SENDSTATUS
;
1623 ok
= (ct
->ct_status
& ~QLTM_SVALID
) == CT_OK
;
1624 if (ok
&& sentstatus
&& (ccb
->ccb_h
.flags
& CAM_SEND_SENSE
)) {
1625 ccb
->ccb_h
.status
|= CAM_SENT_SENSE
;
1627 notify_cam
= ct
->ct_header
.rqs_seqno
& 0x1;
1628 if ((ct
->ct_flags
& CT2_DATAMASK
) != CT2_NO_DATA
) {
1629 resid
= ct
->ct_resid
;
1630 atp
->bytes_xfered
+= (atp
->last_xframt
- resid
);
1631 atp
->last_xframt
= 0;
1633 if (sentstatus
|| !ok
) {
1636 isp_prt(isp
, ok
? ISP_LOGTDEBUG0
: ISP_LOGWARN
,
1637 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1638 ct
->ct_rxid
, ct
->ct_status
, ct
->ct_flags
,
1639 (ccb
->ccb_h
.status
& CAM_SENT_SENSE
) != 0,
1640 resid
, sentstatus
? "FIN" : "MID");
1643 /* XXX: should really come after isp_complete_ctio */
1644 atp
->state
= ATPD_STATE_PDON
;
1646 ct_entry_t
*ct
= arg
;
1647 sentstatus
= ct
->ct_flags
& CT_SENDSTATUS
;
1648 ok
= (ct
->ct_status
& ~QLTM_SVALID
) == CT_OK
;
1650 * We *ought* to be able to get back to the original ATIO
1651 * here, but for some reason this gets lost. It's just as
1652 * well because it's squirrelled away as part of periph
1655 * We can live without it as long as we continue to use
1656 * the auto-replenish feature for CTIOs.
1658 notify_cam
= ct
->ct_header
.rqs_seqno
& 0x1;
1659 if (ct
->ct_status
& QLTM_SVALID
) {
1660 char *sp
= (char *)ct
;
1661 sp
+= CTIO_SENSE_OFFSET
;
1662 ccb
->csio
.sense_len
=
1663 min(sizeof (ccb
->csio
.sense_data
), QLTM_SENSELEN
);
1664 MEMCPY(&ccb
->csio
.sense_data
, sp
, ccb
->csio
.sense_len
);
1665 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
1667 if ((ct
->ct_flags
& CT_DATAMASK
) != CT_NO_DATA
) {
1668 resid
= ct
->ct_resid
;
1670 isp_prt(isp
, ISP_LOGTDEBUG0
,
1671 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1672 ct
->ct_fwhandle
, ct
->ct_tag_val
, ct
->ct_iid
, ct
->ct_lun
,
1673 ct
->ct_status
, ct
->ct_flags
, resid
,
1674 sentstatus
? "FIN" : "MID");
1675 tval
= ct
->ct_fwhandle
;
1677 ccb
->csio
.resid
+= resid
;
1680 * We're here either because intermediate data transfers are done
1681 * and/or the final status CTIO (which may have joined with a
1682 * Data Transfer) is done.
1684 * In any case, for this platform, the upper layers figure out
1685 * what to do next, so all we do here is collect status and
1686 * pass information along. Any DMA handles have already been
1689 if (notify_cam
== 0) {
1690 isp_prt(isp
, ISP_LOGTDEBUG0
, " INTER CTIO[0x%x] done", tval
);
1694 isp_prt(isp
, ISP_LOGTDEBUG0
, "%s CTIO[0x%x] done",
1695 (sentstatus
)? " FINAL " : "MIDTERM ", tval
);
1698 isp_target_putback_atio(ccb
);
1700 isp_complete_ctio(ccb
);
1707 isp_handle_platform_notify_scsi(struct ispsoftc
*isp
, in_entry_t
*inp
)
1709 return (0); /* XXXX */
1713 isp_handle_platform_notify_fc(struct ispsoftc
*isp
, in_fcentry_t
*inp
)
1716 switch (inp
->in_status
) {
1717 case IN_PORT_LOGOUT
:
1718 isp_prt(isp
, ISP_LOGWARN
, "port logout of iid %d",
1721 case IN_PORT_CHANGED
:
1722 isp_prt(isp
, ISP_LOGWARN
, "port changed for iid %d",
1725 case IN_GLOBAL_LOGO
:
1726 isp_prt(isp
, ISP_LOGINFO
, "all ports logged out");
1730 atio_private_data_t
*atp
= isp_get_atpd(isp
, inp
->in_seqid
);
1731 struct ccb_immed_notify
*inot
= NULL
;
1734 tstate_t
*tptr
= get_lun_statep(isp
, 0, atp
->lun
);
1736 inot
= (struct ccb_immed_notify
*)
1737 SLIST_FIRST(&tptr
->inots
);
1739 SLIST_REMOVE_HEAD(&tptr
->inots
,
1743 isp_prt(isp
, ISP_LOGWARN
,
1744 "abort task RX_ID %x IID %d state %d",
1745 inp
->in_seqid
, inp
->in_iid
, atp
->state
);
1747 isp_prt(isp
, ISP_LOGWARN
,
1748 "abort task RX_ID %x from iid %d, state unknown",
1749 inp
->in_seqid
, inp
->in_iid
);
1752 inot
->initiator_id
= inp
->in_iid
;
1753 inot
->sense_len
= 0;
1754 inot
->message_args
[0] = MSG_ABORT_TAG
;
1755 inot
->message_args
[1] = inp
->in_seqid
& 0xff;
1756 inot
->message_args
[2] = (inp
->in_seqid
>> 8) & 0xff;
1757 inot
->ccb_h
.status
= CAM_MESSAGE_RECV
|CAM_DEV_QFRZN
;
1758 xpt_done((union ccb
*)inot
);
1770 isp_cam_async(void *cbarg
, u_int32_t code
, struct cam_path
*path
, void *arg
)
1772 struct cam_sim
*sim
;
1773 struct ispsoftc
*isp
;
1775 sim
= (struct cam_sim
*)cbarg
;
1776 isp
= (struct ispsoftc
*) cam_sim_softc(sim
);
1778 case AC_LOST_DEVICE
:
1780 u_int16_t oflags
, nflags
;
1781 sdparam
*sdp
= isp
->isp_param
;
1784 tgt
= xpt_path_target_id(path
);
1786 sdp
+= cam_sim_bus(sim
);
1788 nflags
= sdp
->isp_devparam
[tgt
].nvrm_flags
;
1789 #ifndef ISP_TARGET_MODE
1790 nflags
&= DPARM_SAFE_DFLT
;
1791 if (isp
->isp_loaded_fw
) {
1792 nflags
|= DPARM_NARROW
| DPARM_ASYNC
;
1795 nflags
= DPARM_DEFAULT
;
1797 oflags
= sdp
->isp_devparam
[tgt
].goal_flags
;
1798 sdp
->isp_devparam
[tgt
].goal_flags
= nflags
;
1799 sdp
->isp_devparam
[tgt
].dev_update
= 1;
1800 isp
->isp_update
|= (1 << cam_sim_bus(sim
));
1801 (void) isp_control(isp
,
1802 ISPCTL_UPDATE_PARAMS
, NULL
);
1803 sdp
->isp_devparam
[tgt
].goal_flags
= oflags
;
1809 isp_prt(isp
, ISP_LOGWARN
, "isp_cam_async: Code 0x%x", code
);
1815 isp_poll(struct cam_sim
*sim
)
1817 struct ispsoftc
*isp
= cam_sim_softc(sim
);
1818 u_int16_t isr
, sema
, mbox
;
1821 if (ISP_READ_ISR(isp
, &isr
, &sema
, &mbox
)) {
1822 isp_intr(isp
, isr
, sema
, mbox
);
1829 isp_watchdog(void *arg
)
1832 struct ispsoftc
*isp
= XS_ISP(xs
);
1837 * We've decided this command is dead. Make sure we're not trying
1838 * to kill a command that's already dead by getting it's handle and
1839 * and seeing whether it's still alive.
1842 iok
= isp
->isp_osinfo
.intsok
;
1843 isp
->isp_osinfo
.intsok
= 0;
1844 handle
= isp_find_handle(isp
, xs
);
1846 u_int16_t isr
, sema
, mbox
;
1848 if (XS_CMD_DONE_P(xs
)) {
1849 isp_prt(isp
, ISP_LOGDEBUG1
,
1850 "watchdog found done cmd (handle 0x%x)", handle
);
1855 if (XS_CMD_WDOG_P(xs
)) {
1856 isp_prt(isp
, ISP_LOGDEBUG2
,
1857 "recursive watchdog (handle 0x%x)", handle
);
1863 if (ISP_READ_ISR(isp
, &isr
, &sema
, &mbox
)) {
1864 isp_intr(isp
, isr
, sema
, mbox
);
1866 if (XS_CMD_DONE_P(xs
)) {
1867 isp_prt(isp
, ISP_LOGDEBUG2
,
1868 "watchdog cleanup for handle 0x%x", handle
);
1869 xpt_done((union ccb
*) xs
);
1870 } else if (XS_CMD_GRACE_P(xs
)) {
1872 * Make sure the command is *really* dead before we
1873 * release the handle (and DMA resources) for reuse.
1875 (void) isp_control(isp
, ISPCTL_ABORT_CMD
, arg
);
1878 * After this point, the comamnd is really dead.
1880 if (XS_XFRLEN(xs
)) {
1881 ISP_DMAFREE(isp
, xs
, handle
);
1883 isp_destroy_handle(isp
, handle
);
1884 xpt_print_path(xs
->ccb_h
.path
);
1885 isp_prt(isp
, ISP_LOGWARN
,
1886 "watchdog timeout for handle 0x%x", handle
);
1887 XS_SETERR(xs
, CAM_CMD_TIMEOUT
);
1891 u_int16_t nxti
, optr
;
1892 ispreq_t local
, *mp
= &local
, *qe
;
1895 callout_reset(&xs
->ccb_h
.timeout_ch
, hz
,
1897 if (isp_getrqentry(isp
, &nxti
, &optr
, (void **) &qe
)) {
1902 MEMZERO((void *) mp
, sizeof (*mp
));
1903 mp
->req_header
.rqs_entry_count
= 1;
1904 mp
->req_header
.rqs_entry_type
= RQSTYPE_MARKER
;
1905 mp
->req_modifier
= SYNC_ALL
;
1906 mp
->req_target
= XS_CHANNEL(xs
) << 7;
1907 isp_put_request(isp
, mp
, qe
);
1908 ISP_ADD_REQUEST(isp
, nxti
);
1911 isp_prt(isp
, ISP_LOGDEBUG2
, "watchdog with no command");
1913 isp
->isp_osinfo
.intsok
= iok
;
1918 isp_kthread(void *arg
)
1920 struct ispsoftc
*isp
= arg
;
1923 isp
->isp_osinfo
.intsok
= 1;
1926 * The first loop is for our usage where we have yet to have
1927 * gotten good fibre channel state.
1932 isp_prt(isp
, ISP_LOGDEBUG0
, "kthread: checking FC state");
1933 while (isp_fc_runstate(isp
, 2 * 1000000) != 0) {
1934 isp_prt(isp
, ISP_LOGDEBUG0
, "kthread: FC state ungood");
1935 if (FCPARAM(isp
)->isp_fwstate
!= FW_READY
||
1936 FCPARAM(isp
)->isp_loopstate
< LOOP_PDB_RCVD
) {
1937 if (FCPARAM(isp
)->loop_seen_once
== 0 ||
1938 isp
->isp_osinfo
.ktmature
== 0) {
1942 tsleep(isp_kthread
, 0, "isp_fcthrd", hz
);
1947 * Even if we didn't get good loop state we may be
1948 * unfreezing the SIMQ so that we can kill off
1949 * commands (if we've never seen loop before, for example).
1951 isp
->isp_osinfo
.ktmature
= 1;
1952 wasfrozen
= isp
->isp_osinfo
.simqfrozen
& SIMQFRZ_LOOPDOWN
;
1953 isp
->isp_osinfo
.simqfrozen
&= ~SIMQFRZ_LOOPDOWN
;
1954 if (wasfrozen
&& isp
->isp_osinfo
.simqfrozen
== 0) {
1955 isp_prt(isp
, ISP_LOGDEBUG0
, "kthread: releasing simq");
1956 ISPLOCK_2_CAMLOCK(isp
);
1957 xpt_release_simq(isp
->isp_sim
, 1);
1958 CAMLOCK_2_ISPLOCK(isp
);
1960 tsleep(&isp
->isp_osinfo
.kthread
, 0, "isp_fc_worker", 0);
1961 isp_prt(isp
, ISP_LOGDEBUG0
, "kthread: waiting until called");
1966 isp_action(struct cam_sim
*sim
, union ccb
*ccb
)
1968 int bus
, tgt
, error
;
1969 struct ispsoftc
*isp
;
1970 struct ccb_trans_settings
*cts
;
1972 CAM_DEBUG(ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("isp_action\n"));
1974 isp
= (struct ispsoftc
*)cam_sim_softc(sim
);
1975 ccb
->ccb_h
.sim_priv
.entries
[0].field
= 0;
1976 ccb
->ccb_h
.sim_priv
.entries
[1].ptr
= isp
;
1977 if (isp
->isp_state
!= ISP_RUNSTATE
&&
1978 ccb
->ccb_h
.func_code
== XPT_SCSI_IO
) {
1979 CAMLOCK_2_ISPLOCK(isp
);
1981 if (isp
->isp_state
!= ISP_INITSTATE
) {
1984 * Lie. Say it was a selection timeout.
1986 ccb
->ccb_h
.status
= CAM_SEL_TIMEOUT
| CAM_DEV_QFRZN
;
1987 xpt_freeze_devq(ccb
->ccb_h
.path
, 1);
1991 isp
->isp_state
= ISP_RUNSTATE
;
1992 ISPLOCK_2_CAMLOCK(isp
);
1994 isp_prt(isp
, ISP_LOGDEBUG2
, "isp_action code %x", ccb
->ccb_h
.func_code
);
1997 switch (ccb
->ccb_h
.func_code
) {
1998 case XPT_SCSI_IO
: /* Execute the requested I/O operation */
2000 * Do a couple of preliminary checks...
2002 if ((ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) != 0) {
2003 if ((ccb
->ccb_h
.flags
& CAM_CDB_PHYS
) != 0) {
2004 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
2010 if (ccb
->ccb_h
.target_id
> (ISP_MAX_TARGETS(isp
) - 1)) {
2011 ccb
->ccb_h
.status
= CAM_PATH_INVALID
;
2012 } else if (ccb
->ccb_h
.target_lun
> (ISP_MAX_LUNS(isp
) - 1)) {
2013 ccb
->ccb_h
.status
= CAM_PATH_INVALID
;
2015 if (ccb
->ccb_h
.status
== CAM_PATH_INVALID
) {
2016 isp_prt(isp
, ISP_LOGERR
,
2017 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2018 ccb
->ccb_h
.target_id
, ccb
->ccb_h
.target_lun
);
2023 ((struct ccb_scsiio
*) ccb
)->scsi_status
= SCSI_STATUS_OK
;
2024 CAMLOCK_2_ISPLOCK(isp
);
2025 error
= isp_start((XS_T
*) ccb
);
2028 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
2029 if (ccb
->ccb_h
.timeout
!= CAM_TIME_INFINITY
) {
2030 u_int64_t ticks
= (u_int64_t
) hz
;
2031 if (ccb
->ccb_h
.timeout
== CAM_TIME_DEFAULT
)
2032 ticks
= 60 * 1000 * ticks
;
2034 ticks
= ccb
->ccb_h
.timeout
* hz
;
2035 ticks
= ((ticks
+ 999) / 1000) + hz
+ hz
;
2036 if (ticks
>= 0x80000000) {
2037 isp_prt(isp
, ISP_LOGERR
,
2038 "timeout overflow");
2041 callout_reset(&ccb
->ccb_h
.timeout_ch
, ticks
,
2044 ISPLOCK_2_CAMLOCK(isp
);
2048 * This can only happen for Fibre Channel
2050 KASSERT((IS_FC(isp
)), ("CMD_RQLATER for FC only"));
2051 if (FCPARAM(isp
)->loop_seen_once
== 0 &&
2052 isp
->isp_osinfo
.ktmature
) {
2053 ISPLOCK_2_CAMLOCK(isp
);
2054 XS_SETERR(ccb
, CAM_SEL_TIMEOUT
);
2058 wakeup(&isp
->isp_osinfo
.kthread
);
2059 isp_freeze_loopdown(isp
, "isp_action(RQLATER)");
2060 isp
->isp_osinfo
.simqfrozen
|= SIMQFRZ_LOOPDOWN
;
2061 XS_SETERR(ccb
, CAM_REQUEUE_REQ
);
2062 ISPLOCK_2_CAMLOCK(isp
);
2066 XS_SETERR(ccb
, CAM_REQUEUE_REQ
);
2067 ISPLOCK_2_CAMLOCK(isp
);
2071 isp_done((struct ccb_scsiio
*) ccb
);
2072 ISPLOCK_2_CAMLOCK(isp
);
2075 isp_prt(isp
, ISP_LOGERR
,
2076 "What's this? 0x%x at %d in file %s",
2077 error
, __LINE__
, __FILE__
);
2078 XS_SETERR(ccb
, CAM_REQ_CMP_ERR
);
2080 ISPLOCK_2_CAMLOCK(isp
);
2084 #ifdef ISP_TARGET_MODE
2085 case XPT_EN_LUN
: /* Enable LUN as a target */
2088 CAMLOCK_2_ISPLOCK(isp
);
2089 iok
= isp
->isp_osinfo
.intsok
;
2090 isp
->isp_osinfo
.intsok
= 0;
2091 isp_en_lun(isp
, ccb
);
2092 isp
->isp_osinfo
.intsok
= iok
;
2093 ISPLOCK_2_CAMLOCK(isp
);
2097 case XPT_NOTIFY_ACK
: /* recycle notify ack */
2098 case XPT_IMMED_NOTIFY
: /* Add Immediate Notify Resource */
2099 case XPT_ACCEPT_TARGET_IO
: /* Add Accept Target IO Resource */
2102 get_lun_statep(isp
, XS_CHANNEL(ccb
), ccb
->ccb_h
.target_lun
);
2104 ccb
->ccb_h
.status
= CAM_LUN_INVALID
;
2108 ccb
->ccb_h
.sim_priv
.entries
[0].field
= 0;
2109 ccb
->ccb_h
.sim_priv
.entries
[1].ptr
= isp
;
2110 ccb
->ccb_h
.flags
= 0;
2112 CAMLOCK_2_ISPLOCK(isp
);
2113 if (ccb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
) {
2115 * Note that the command itself may not be done-
2116 * it may not even have had the first CTIO sent.
2119 isp_prt(isp
, ISP_LOGTDEBUG0
,
2120 "Put FREE ATIO2, lun %d, count now %d",
2121 ccb
->ccb_h
.target_lun
, tptr
->atio_count
);
2122 SLIST_INSERT_HEAD(&tptr
->atios
, &ccb
->ccb_h
,
2124 } else if (ccb
->ccb_h
.func_code
== XPT_IMMED_NOTIFY
) {
2125 SLIST_INSERT_HEAD(&tptr
->inots
, &ccb
->ccb_h
,
2130 rls_lun_statep(isp
, tptr
);
2131 ccb
->ccb_h
.status
= CAM_REQ_INPROG
;
2132 ISPLOCK_2_CAMLOCK(isp
);
2135 case XPT_CONT_TARGET_IO
:
2137 CAMLOCK_2_ISPLOCK(isp
);
2138 ccb
->ccb_h
.status
= isp_target_start_ctio(isp
, ccb
);
2139 if (ccb
->ccb_h
.status
!= CAM_REQ_INPROG
) {
2140 isp_prt(isp
, ISP_LOGWARN
,
2141 "XPT_CONT_TARGET_IO: status 0x%x",
2143 XS_SETERR(ccb
, CAM_REQUEUE_REQ
);
2144 ISPLOCK_2_CAMLOCK(isp
);
2147 ISPLOCK_2_CAMLOCK(isp
);
2148 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
2153 case XPT_RESET_DEV
: /* BDR the specified SCSI device */
2155 bus
= cam_sim_bus(xpt_path_sim(ccb
->ccb_h
.path
));
2156 tgt
= ccb
->ccb_h
.target_id
;
2159 CAMLOCK_2_ISPLOCK(isp
);
2160 error
= isp_control(isp
, ISPCTL_RESET_DEV
, &tgt
);
2161 ISPLOCK_2_CAMLOCK(isp
);
2163 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
2165 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
2169 case XPT_ABORT
: /* Abort the specified CCB */
2171 union ccb
*accb
= ccb
->cab
.abort_ccb
;
2172 CAMLOCK_2_ISPLOCK(isp
);
2173 switch (accb
->ccb_h
.func_code
) {
2174 #ifdef ISP_TARGET_MODE
2175 case XPT_ACCEPT_TARGET_IO
:
2176 case XPT_IMMED_NOTIFY
:
2177 ccb
->ccb_h
.status
= isp_abort_tgt_ccb(isp
, ccb
);
2179 case XPT_CONT_TARGET_IO
:
2180 isp_prt(isp
, ISP_LOGERR
, "cannot abort CTIOs yet");
2181 ccb
->ccb_h
.status
= CAM_UA_ABORT
;
2185 error
= isp_control(isp
, ISPCTL_ABORT_CMD
, ccb
);
2187 ccb
->ccb_h
.status
= CAM_UA_ABORT
;
2189 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
2193 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
2196 ISPLOCK_2_CAMLOCK(isp
);
2200 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2201 case XPT_SET_TRAN_SETTINGS
: /* Nexus Settings */
2203 if (!IS_CURRENT_SETTINGS(cts
)) {
2204 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
2208 tgt
= cts
->ccb_h
.target_id
;
2209 CAMLOCK_2_ISPLOCK(isp
);
2211 struct ccb_trans_settings_scsi
*scsi
=
2212 &cts
->proto_specific
.scsi
;
2213 struct ccb_trans_settings_spi
*spi
=
2214 &cts
->xport_specific
.spi
;
2215 sdparam
*sdp
= isp
->isp_param
;
2218 bus
= cam_sim_bus(xpt_path_sim(cts
->ccb_h
.path
));
2221 * We always update (internally) from dev_flags
2222 * so any request to change settings just gets
2223 * vectored to that location.
2225 dptr
= &sdp
->isp_devparam
[tgt
].goal_flags
;
2227 if ((spi
->valid
& CTS_SPI_VALID_DISC
) != 0) {
2228 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) != 0)
2229 *dptr
|= DPARM_DISC
;
2231 *dptr
&= ~DPARM_DISC
;
2234 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
2235 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0)
2236 *dptr
|= DPARM_TQING
;
2238 *dptr
&= ~DPARM_TQING
;
2241 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0) {
2242 if (spi
->bus_width
== MSG_EXT_WDTR_BUS_16_BIT
)
2243 *dptr
|= DPARM_WIDE
;
2245 *dptr
&= ~DPARM_WIDE
;
2251 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) &&
2252 (spi
->valid
& CTS_SPI_VALID_SYNC_RATE
)) {
2253 *dptr
|= DPARM_SYNC
;
2254 isp_prt(isp
, ISP_LOGDEBUG0
,
2255 "enabling synchronous mode, but ignoring "
2256 "setting to period 0x%x offset 0x%x",
2257 spi
->sync_period
, spi
->sync_offset
);
2258 } else if (spi
->sync_period
&& spi
->sync_offset
) {
2259 *dptr
|= DPARM_SYNC
;
2260 isp_prt(isp
, ISP_LOGDEBUG0
,
2261 "enabling synchronous mode (1), but ignoring"
2262 " setting to period 0x%x offset 0x%x",
2263 spi
->sync_period
, spi
->sync_offset
);
2265 *dptr
&= ~DPARM_SYNC
;
2267 isp_prt(isp
, ISP_LOGDEBUG0
,
2268 "SET bus %d targ %d to flags %x off %x per %x",
2269 bus
, tgt
, sdp
->isp_devparam
[tgt
].goal_flags
,
2270 sdp
->isp_devparam
[tgt
].goal_offset
,
2271 sdp
->isp_devparam
[tgt
].goal_period
);
2272 sdp
->isp_devparam
[tgt
].dev_update
= 1;
2273 isp
->isp_update
|= (1 << bus
);
2275 ISPLOCK_2_CAMLOCK(isp
);
2276 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
2279 case XPT_GET_TRAN_SETTINGS
:
2281 tgt
= cts
->ccb_h
.target_id
;
2282 CAMLOCK_2_ISPLOCK(isp
);
2284 fcparam
*fcp
= isp
->isp_param
;
2285 struct ccb_trans_settings_fc
*fc
=
2286 &cts
->xport_specific
.fc
;
2288 cts
->protocol
= PROTO_SCSI
;
2289 cts
->protocol_version
= SCSI_REV_2
;
2290 cts
->transport
= XPORT_FC
;
2291 cts
->transport_version
= 0;
2293 fc
->valid
= CTS_FC_VALID_SPEED
;
2294 fc
->bitrate
= 100000;
2295 if (tgt
> 0 && tgt
< MAX_FC_TARG
) {
2296 struct lportdb
*lp
= &fcp
->portdb
[tgt
];
2297 fc
->wwnn
= lp
->node_wwn
;
2298 fc
->wwpn
= lp
->port_wwn
;
2299 fc
->port
= lp
->portid
;
2300 fc
->valid
|= CTS_FC_VALID_WWNN
|
2301 CTS_FC_VALID_WWPN
| CTS_FC_VALID_PORT
;
2304 struct ccb_trans_settings_scsi
*scsi
=
2305 &cts
->proto_specific
.scsi
;
2306 struct ccb_trans_settings_spi
*spi
=
2307 &cts
->xport_specific
.spi
;
2308 sdparam
*sdp
= isp
->isp_param
;
2309 int bus
= cam_sim_bus(xpt_path_sim(cts
->ccb_h
.path
));
2310 u_int16_t dval
, pval
, oval
;
2314 if (IS_CURRENT_SETTINGS(cts
)) {
2315 sdp
->isp_devparam
[tgt
].dev_refresh
= 1;
2316 isp
->isp_update
|= (1 << bus
);
2317 (void) isp_control(isp
, ISPCTL_UPDATE_PARAMS
,
2319 dval
= sdp
->isp_devparam
[tgt
].actv_flags
;
2320 oval
= sdp
->isp_devparam
[tgt
].actv_offset
;
2321 pval
= sdp
->isp_devparam
[tgt
].actv_period
;
2323 dval
= sdp
->isp_devparam
[tgt
].nvrm_flags
;
2324 oval
= sdp
->isp_devparam
[tgt
].nvrm_offset
;
2325 pval
= sdp
->isp_devparam
[tgt
].nvrm_period
;
2328 cts
->protocol
= PROTO_SCSI
;
2329 cts
->protocol_version
= SCSI_REV_2
;
2330 cts
->transport
= XPORT_SPI
;
2331 cts
->transport_version
= 2;
2333 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
2334 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
2335 if (dval
& DPARM_DISC
) {
2336 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
2338 if (dval
& DPARM_TQING
) {
2339 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
2341 if ((dval
& DPARM_SYNC
) && oval
!= 0) {
2342 spi
->sync_offset
= oval
;
2343 spi
->sync_period
= pval
;
2344 spi
->valid
|= CTS_SPI_VALID_SYNC_OFFSET
;
2345 spi
->valid
|= CTS_SPI_VALID_SYNC_RATE
;
2347 spi
->valid
|= CTS_SPI_VALID_BUS_WIDTH
;
2348 if (dval
& DPARM_WIDE
) {
2349 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
2351 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
2353 if (cts
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
) {
2354 scsi
->valid
= CTS_SCSI_VALID_TQ
;
2355 spi
->valid
|= CTS_SPI_VALID_DISC
;
2359 isp_prt(isp
, ISP_LOGDEBUG0
,
2360 "GET %s bus %d targ %d to flags %x off %x per %x",
2361 IS_CURRENT_SETTINGS(cts
)? "ACTIVE" : "NVRAM",
2362 bus
, tgt
, dval
, oval
, pval
);
2364 ISPLOCK_2_CAMLOCK(isp
);
2365 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
2369 case XPT_CALC_GEOMETRY
:
2371 struct ccb_calc_geometry
*ccg
;
2372 u_int32_t secs_per_cylinder
;
2376 if (ccg
->block_size
== 0) {
2377 isp_prt(isp
, ISP_LOGERR
,
2378 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2379 ccg
->ccb_h
.target_id
, ccg
->ccb_h
.target_lun
);
2380 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
2384 size_mb
= ccg
->volume_size
/((1024L * 1024L) / ccg
->block_size
);
2385 if (size_mb
> 1024) {
2387 ccg
->secs_per_track
= 63;
2390 ccg
->secs_per_track
= 32;
2392 secs_per_cylinder
= ccg
->heads
* ccg
->secs_per_track
;
2393 ccg
->cylinders
= ccg
->volume_size
/ secs_per_cylinder
;
2394 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
2398 case XPT_RESET_BUS
: /* Reset the specified bus */
2399 bus
= cam_sim_bus(sim
);
2400 CAMLOCK_2_ISPLOCK(isp
);
2401 error
= isp_control(isp
, ISPCTL_RESET_BUS
, &bus
);
2402 ISPLOCK_2_CAMLOCK(isp
);
2404 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
2406 if (cam_sim_bus(sim
) && isp
->isp_path2
!= NULL
)
2407 xpt_async(AC_BUS_RESET
, isp
->isp_path2
, NULL
);
2408 else if (isp
->isp_path
!= NULL
)
2409 xpt_async(AC_BUS_RESET
, isp
->isp_path
, NULL
);
2410 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
2415 case XPT_TERM_IO
: /* Terminate the I/O process */
2416 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
2420 case XPT_PATH_INQ
: /* Path routing inquiry */
2422 struct ccb_pathinq
*cpi
= &ccb
->cpi
;
2424 cpi
->version_num
= 1;
2425 #ifdef ISP_TARGET_MODE
2426 cpi
->target_sprt
= PIT_PROCESSOR
| PIT_DISCONNECT
| PIT_TERM_IO
;
2428 cpi
->target_sprt
= 0;
2430 cpi
->hba_eng_cnt
= 0;
2431 cpi
->max_target
= ISP_MAX_TARGETS(isp
) - 1;
2432 cpi
->max_lun
= ISP_MAX_LUNS(isp
) - 1;
2433 cpi
->bus_id
= cam_sim_bus(sim
);
2435 cpi
->hba_misc
= PIM_NOBUSRESET
;
2437 * Because our loop ID can shift from time to time,
2438 * make our initiator ID out of range of our bus.
2440 cpi
->initiator_id
= cpi
->max_target
+ 1;
2443 * Set base transfer capabilities for Fibre Channel.
2444 * Technically not correct because we don't know
2445 * what media we're running on top of- but we'll
2446 * look good if we always say 100MB/s.
2448 if (FCPARAM(isp
)->isp_gbspeed
== 2)
2449 cpi
->base_transfer_speed
= 200000;
2451 cpi
->base_transfer_speed
= 100000;
2452 cpi
->hba_inquiry
= PI_TAG_ABLE
;
2453 cpi
->transport
= XPORT_FC
;
2454 cpi
->transport_version
= 0; /* WHAT'S THIS FOR? */
2456 sdparam
*sdp
= isp
->isp_param
;
2457 sdp
+= cam_sim_bus(xpt_path_sim(cpi
->ccb_h
.path
));
2458 cpi
->hba_inquiry
= PI_SDTR_ABLE
|PI_TAG_ABLE
|PI_WIDE_16
;
2460 cpi
->initiator_id
= sdp
->isp_initiator_id
;
2461 cpi
->base_transfer_speed
= 3300;
2462 cpi
->transport
= XPORT_SPI
;
2463 cpi
->transport_version
= 2; /* WHAT'S THIS FOR? */
2465 cpi
->protocol
= PROTO_SCSI
;
2466 cpi
->protocol_version
= SCSI_REV_2
;
2467 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
2468 strncpy(cpi
->hba_vid
, "Qlogic", HBA_IDLEN
);
2469 strncpy(cpi
->dev_name
, cam_sim_name(sim
), DEV_IDLEN
);
2470 cpi
->unit_number
= cam_sim_unit(sim
);
2471 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
2476 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
2482 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2484 isp_done(struct ccb_scsiio
*sccb
)
2486 struct ispsoftc
*isp
= XS_ISP(sccb
);
2489 XS_SETERR(sccb
, CAM_REQ_CMP
);
2491 if ((sccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_CMP
&&
2492 (sccb
->scsi_status
!= SCSI_STATUS_OK
)) {
2493 sccb
->ccb_h
.status
&= ~CAM_STATUS_MASK
;
2494 if ((sccb
->scsi_status
== SCSI_STATUS_CHECK_COND
) &&
2495 (sccb
->ccb_h
.status
& CAM_AUTOSNS_VALID
) == 0) {
2496 sccb
->ccb_h
.status
|= CAM_AUTOSENSE_FAIL
;
2498 sccb
->ccb_h
.status
|= CAM_SCSI_STATUS_ERROR
;
2502 sccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2503 if ((sccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
2504 if ((sccb
->ccb_h
.status
& CAM_DEV_QFRZN
) == 0) {
2505 sccb
->ccb_h
.status
|= CAM_DEV_QFRZN
;
2506 xpt_freeze_devq(sccb
->ccb_h
.path
, 1);
2507 isp_prt(isp
, ISP_LOGDEBUG0
,
2508 "freeze devq %d.%d cam sts %x scsi sts %x",
2509 sccb
->ccb_h
.target_id
, sccb
->ccb_h
.target_lun
,
2510 sccb
->ccb_h
.status
, sccb
->scsi_status
);
2514 if ((CAM_DEBUGGED(sccb
->ccb_h
.path
, ISPDDB
)) &&
2515 (sccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
2516 xpt_print_path(sccb
->ccb_h
.path
);
2517 isp_prt(isp
, ISP_LOGINFO
,
2518 "cam completion status 0x%x", sccb
->ccb_h
.status
);
2521 XS_CMD_S_DONE(sccb
);
2522 if (XS_CMD_WDOG_P(sccb
) == 0) {
2523 callout_stop(&sccb
->ccb_h
.timeout_ch
);
2524 if (XS_CMD_GRACE_P(sccb
)) {
2525 isp_prt(isp
, ISP_LOGDEBUG2
,
2526 "finished command on borrowed time");
2528 XS_CMD_S_CLEAR(sccb
);
2529 ISPLOCK_2_CAMLOCK(isp
);
2530 xpt_done((union ccb
*) sccb
);
2531 CAMLOCK_2_ISPLOCK(isp
);
2536 isp_async(struct ispsoftc
*isp
, ispasync_t cmd
, void *arg
)
2540 case ISPASYNC_NEW_TGT_PARAMS
:
2542 struct ccb_trans_settings_scsi
*scsi
;
2543 struct ccb_trans_settings_spi
*spi
;
2545 sdparam
*sdp
= isp
->isp_param
;
2546 struct ccb_trans_settings cts
;
2547 struct cam_path
*tmppath
;
2549 bzero(&cts
, sizeof (struct ccb_trans_settings
));
2551 tgt
= *((int *)arg
);
2552 bus
= (tgt
>> 16) & 0xffff;
2555 ISPLOCK_2_CAMLOCK(isp
);
2556 if (xpt_create_path(&tmppath
, NULL
,
2557 cam_sim_path(bus
? isp
->isp_sim2
: isp
->isp_sim
),
2558 tgt
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
2559 CAMLOCK_2_ISPLOCK(isp
);
2560 isp_prt(isp
, ISP_LOGWARN
,
2561 "isp_async cannot make temp path for %d.%d",
2566 CAMLOCK_2_ISPLOCK(isp
);
2567 flags
= sdp
->isp_devparam
[tgt
].actv_flags
;
2568 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
2569 cts
.protocol
= PROTO_SCSI
;
2570 cts
.transport
= XPORT_SPI
;
2572 scsi
= &cts
.proto_specific
.scsi
;
2573 spi
= &cts
.xport_specific
.spi
;
2575 if (flags
& DPARM_TQING
) {
2576 scsi
->valid
|= CTS_SCSI_VALID_TQ
;
2577 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
2580 if (flags
& DPARM_DISC
) {
2581 spi
->valid
|= CTS_SPI_VALID_DISC
;
2582 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
2584 spi
->flags
|= CTS_SPI_VALID_BUS_WIDTH
;
2585 if (flags
& DPARM_WIDE
) {
2586 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
2588 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
2590 if (flags
& DPARM_SYNC
) {
2591 spi
->valid
|= CTS_SPI_VALID_SYNC_RATE
;
2592 spi
->valid
|= CTS_SPI_VALID_SYNC_OFFSET
;
2593 spi
->sync_period
= sdp
->isp_devparam
[tgt
].actv_period
;
2594 spi
->sync_offset
= sdp
->isp_devparam
[tgt
].actv_offset
;
2596 isp_prt(isp
, ISP_LOGDEBUG2
,
2597 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2598 bus
, tgt
, sdp
->isp_devparam
[tgt
].actv_period
,
2599 sdp
->isp_devparam
[tgt
].actv_offset
, flags
);
2600 xpt_setup_ccb(&cts
.ccb_h
, tmppath
, 1);
2601 ISPLOCK_2_CAMLOCK(isp
);
2602 xpt_async(AC_TRANSFER_NEG
, tmppath
, &cts
);
2603 xpt_free_path(tmppath
);
2604 CAMLOCK_2_ISPLOCK(isp
);
2607 case ISPASYNC_BUS_RESET
:
2608 bus
= *((int *)arg
);
2609 isp_prt(isp
, ISP_LOGINFO
, "SCSI bus reset on bus %d detected",
2611 if (bus
> 0 && isp
->isp_path2
) {
2612 ISPLOCK_2_CAMLOCK(isp
);
2613 xpt_async(AC_BUS_RESET
, isp
->isp_path2
, NULL
);
2614 CAMLOCK_2_ISPLOCK(isp
);
2615 } else if (isp
->isp_path
) {
2616 ISPLOCK_2_CAMLOCK(isp
);
2617 xpt_async(AC_BUS_RESET
, isp
->isp_path
, NULL
);
2618 CAMLOCK_2_ISPLOCK(isp
);
2622 if (isp
->isp_path
) {
2623 isp_freeze_loopdown(isp
, "ISPASYNC_LIP");
2625 isp_prt(isp
, ISP_LOGINFO
, "LIP Received");
2627 case ISPASYNC_LOOP_RESET
:
2628 if (isp
->isp_path
) {
2629 isp_freeze_loopdown(isp
, "ISPASYNC_LOOP_RESET");
2631 isp_prt(isp
, ISP_LOGINFO
, "Loop Reset Received");
2633 case ISPASYNC_LOOP_DOWN
:
2634 if (isp
->isp_path
) {
2635 isp_freeze_loopdown(isp
, "ISPASYNC_LOOP_DOWN");
2637 isp_prt(isp
, ISP_LOGINFO
, "Loop DOWN");
2639 case ISPASYNC_LOOP_UP
:
2641 * Now we just note that Loop has come up. We don't
2642 * actually do anything because we're waiting for a
2643 * Change Notify before activating the FC cleanup
2644 * thread to look at the state of the loop again.
2646 isp_prt(isp
, ISP_LOGINFO
, "Loop UP");
2648 case ISPASYNC_PROMENADE
:
2650 struct cam_path
*tmppath
;
2651 const char *fmt
= "Target %d (Loop 0x%x) Port ID 0x%x "
2652 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2653 static const char *roles
[4] = {
2654 "(none)", "Target", "Initiator", "Target/Initiator"
2656 fcparam
*fcp
= isp
->isp_param
;
2657 int tgt
= *((int *) arg
);
2658 struct lportdb
*lp
= &fcp
->portdb
[tgt
];
2660 isp_prt(isp
, ISP_LOGINFO
, fmt
, tgt
, lp
->loopid
, lp
->portid
,
2661 roles
[lp
->roles
& 0x3],
2662 (lp
->valid
)? "Arrived" : "Departed",
2663 (u_int32_t
) (lp
->port_wwn
>> 32),
2664 (u_int32_t
) (lp
->port_wwn
& 0xffffffffLL
),
2665 (u_int32_t
) (lp
->node_wwn
>> 32),
2666 (u_int32_t
) (lp
->node_wwn
& 0xffffffffLL
));
2668 if (xpt_create_path(&tmppath
, NULL
, cam_sim_path(isp
->isp_sim
),
2669 (target_id_t
)tgt
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
2672 if (lp
->valid
&& (lp
->roles
&
2673 (SVC3_INI_ROLE
>> SVC3_ROLE_SHIFT
))) {
2674 ISPLOCK_2_CAMLOCK(isp
);
2675 xpt_async(AC_FOUND_DEVICE
, tmppath
, NULL
);
2677 ISPLOCK_2_CAMLOCK(isp
);
2678 xpt_async(AC_LOST_DEVICE
, tmppath
, NULL
);
2680 CAMLOCK_2_ISPLOCK(isp
);
2681 xpt_free_path(tmppath
);
2684 case ISPASYNC_CHANGE_NOTIFY
:
2685 if (arg
== ISPASYNC_CHANGE_PDB
) {
2686 isp_prt(isp
, ISP_LOGINFO
,
2687 "Port Database Changed");
2688 } else if (arg
== ISPASYNC_CHANGE_SNS
) {
2689 isp_prt(isp
, ISP_LOGINFO
,
2690 "Name Server Database Changed");
2692 wakeup(&isp
->isp_osinfo
.kthread
);
2694 case ISPASYNC_FABRIC_DEV
:
2696 int target
, base
, lim
;
2697 fcparam
*fcp
= isp
->isp_param
;
2698 struct lportdb
*lp
= NULL
;
2699 struct lportdb
*clp
= (struct lportdb
*) arg
;
2702 switch (clp
->port_type
) {
2729 isp_prt(isp
, ISP_LOGINFO
,
2730 "%s Fabric Device @ PortID 0x%x", pt
, clp
->portid
);
2733 * If we don't have an initiator role we bail.
2735 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2738 if ((isp
->isp_role
& ISP_ROLE_INITIATOR
) == 0) {
2743 * Is this entry for us? If so, we bail.
2746 if (fcp
->isp_portid
== clp
->portid
) {
2751 * Else, the default policy is to find room for it in
2752 * our local port database. Later, when we execute
2753 * the call to isp_pdb_sync either this newly arrived
2754 * or already logged in device will be (re)announced.
2757 if (fcp
->isp_topo
== TOPO_FL_PORT
)
2762 if (fcp
->isp_topo
== TOPO_N_PORT
)
2768 * Is it already in our list?
2770 for (target
= base
; target
< lim
; target
++) {
2771 if (target
>= FL_PORT_ID
&& target
<= FC_SNS_ID
) {
2774 lp
= &fcp
->portdb
[target
];
2775 if (lp
->port_wwn
== clp
->port_wwn
&&
2776 lp
->node_wwn
== clp
->node_wwn
) {
2784 for (target
= base
; target
< lim
; target
++) {
2785 if (target
>= FL_PORT_ID
&& target
<= FC_SNS_ID
) {
2788 lp
= &fcp
->portdb
[target
];
2789 if (lp
->port_wwn
== 0) {
2793 if (target
== lim
) {
2794 isp_prt(isp
, ISP_LOGWARN
,
2795 "out of space for fabric devices");
2798 lp
->port_type
= clp
->port_type
;
2799 lp
->fc4_type
= clp
->fc4_type
;
2800 lp
->node_wwn
= clp
->node_wwn
;
2801 lp
->port_wwn
= clp
->port_wwn
;
2802 lp
->portid
= clp
->portid
;
2806 #ifdef ISP_TARGET_MODE
2807 case ISPASYNC_TARGET_MESSAGE
:
2809 tmd_msg_t
*mp
= arg
;
2810 isp_prt(isp
, ISP_LOGALL
,
2811 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2812 mp
->nt_bus
, (int) mp
->nt_iid
, (int) mp
->nt_tgt
,
2813 (int) mp
->nt_lun
, mp
->nt_tagtype
, mp
->nt_tagval
,
2817 case ISPASYNC_TARGET_EVENT
:
2819 tmd_event_t
*ep
= arg
;
2820 isp_prt(isp
, ISP_LOGALL
,
2821 "bus %d event code 0x%x", ep
->ev_bus
, ep
->ev_event
);
2824 case ISPASYNC_TARGET_ACTION
:
2825 switch (((isphdr_t
*)arg
)->rqs_entry_type
) {
2827 isp_prt(isp
, ISP_LOGWARN
,
2828 "event 0x%x for unhandled target action",
2829 ((isphdr_t
*)arg
)->rqs_entry_type
);
2831 case RQSTYPE_NOTIFY
:
2833 rv
= isp_handle_platform_notify_scsi(isp
,
2834 (in_entry_t
*) arg
);
2836 rv
= isp_handle_platform_notify_fc(isp
,
2837 (in_fcentry_t
*) arg
);
2841 rv
= isp_handle_platform_atio(isp
, (at_entry_t
*) arg
);
2844 rv
= isp_handle_platform_atio2(isp
, (at2_entry_t
*)arg
);
2848 rv
= isp_handle_platform_ctio(isp
, arg
);
2850 case RQSTYPE_ENABLE_LUN
:
2851 case RQSTYPE_MODIFY_LUN
:
2852 if (IS_DUALBUS(isp
)) {
2854 GET_BUS_VAL(((lun_entry_t
*)arg
)->le_rsvd
);
2858 isp_cv_signal_rqe(isp
, bus
,
2859 ((lun_entry_t
*)arg
)->le_status
);
2864 case ISPASYNC_FW_CRASH
:
2866 u_int16_t mbox1
, mbox6
;
2867 mbox1
= ISP_READ(isp
, OUTMAILBOX1
);
2868 if (IS_DUALBUS(isp
)) {
2869 mbox6
= ISP_READ(isp
, OUTMAILBOX6
);
2873 isp_prt(isp
, ISP_LOGERR
,
2874 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
2876 #ifdef ISP_FW_CRASH_DUMP
2878 * XXX: really need a thread to do this right.
2881 FCPARAM(isp
)->isp_fwstate
= FW_CONFIG_WAIT
;
2882 FCPARAM(isp
)->isp_loopstate
= LOOP_NIL
;
2883 isp_freeze_loopdown(isp
, "f/w crash");
2887 isp_async(isp
, ISPASYNC_FW_RESTARTED
, NULL
);
2891 case ISPASYNC_UNHANDLED_RESPONSE
:
2894 isp_prt(isp
, ISP_LOGERR
, "unknown isp_async event %d", cmd
);
2902 * Locks are held before coming here.
2905 isp_uninit(struct ispsoftc
*isp
)
2907 ISP_WRITE(isp
, HCCR
, HCCR_CMD_RESET
);
2912 isp_prt(struct ispsoftc
*isp
, int level
, const char *fmt
, ...)
2915 if (level
!= ISP_LOGALL
&& (level
& isp
->isp_dblev
) == 0) {
2918 kprintf("%s: ", device_get_nameunit(isp
->isp_dev
));
2919 __va_start(ap
, fmt
);