Sync CAM with FreeBSD using lockmgr locks instead of mutexes.
[dragonfly.git] / sys / dev / raid / asr / asr.c
blob0e98c5e6fd2a5c8a59a4d8a4dd18918e27c7ce50
1 /* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
2 /* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.35 2008/05/18 20:30:23 pavalos Exp $ */
3 /*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
8 * TERMS AND CONDITIONS OF USE
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
25 * SCSI I2O host adapter driver
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use u_int8_t instead of u_char.
85 * - use u_int16_t instead of u_short.
86 * - use u_int32_t instead of u_long where appropriate.
87 * - use 64 bit context handler instead of 32 bit.
88 * - create_ccb should only allocate the worst case
89 * requirements for the driver since CAM may evolve
90 * making union ccb much larger than needed here.
91 * renamed create_ccb to asr_alloc_ccb.
92 * - go nutz justifying all debug prints as macros
93 * defined at the top and remove unsightly ifdefs.
94 * - INLINE STATIC viewed as confusing. Historically
95 * utilized to affect code performance and debug
96 * issues in OS, Compiler or OEM specific situations.
97 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
98 * - Ported from FreeBSD 2.2.X DPT I2O driver.
99 * changed struct scsi_xfer to union ccb/struct ccb_hdr
100 * changed variable name xs to ccb
101 * changed struct scsi_link to struct cam_path
102 * changed struct scsibus_data to struct cam_sim
103 * stopped using fordriver for holding on to the TID
104 * use proprietary packet creation instead of scsi_inquire
105 * CAM layer sends synchronize commands.
108 #define ASR_VERSION 1
109 #define ASR_REVISION '0'
110 #define ASR_SUBREVISION '8'
111 #define ASR_MONTH 8
112 #define ASR_DAY 21
113 #define ASR_YEAR 2001 - 1980
116 * Debug macros to reduce the unsightly ifdefs
118 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
119 # define debug_asr_message(message) \
121 u_int32_t * pointer = (u_int32_t *)message; \
122 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
123 u_int32_t counter = 0; \
125 while (length--) { \
126 kprintf ("%08lx%c", (u_long)*(pointer++), \
127 (((++counter & 7) == 0) || (length == 0)) \
128 ? '\n' \
129 : ' '); \
132 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
134 #if (defined(DEBUG_ASR))
135 /* Breaks on none STDC based compilers :-( */
136 # define debug_asr_printf(fmt,args...) kprintf(fmt, ##args)
137 # define debug_asr_dump_message(message) debug_asr_message(message)
138 # define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
139 /* None fatal version of the ASSERT macro */
140 # if (defined(__STDC__))
141 # define ASSERT(phrase) if(!(phrase))kprintf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
142 # else
143 # define ASSERT(phrase) if(!(phrase))kprintf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
144 # endif
145 #else /* DEBUG_ASR */
146 # define debug_asr_printf(fmt,args...)
147 # define debug_asr_dump_message(message)
148 # define debug_asr_print_path(ccb)
149 # define ASSERT(x)
150 #endif /* DEBUG_ASR */
153 * If DEBUG_ASR_CMD is defined:
154 * 0 - Display incoming SCSI commands
155 * 1 - add in a quick character before queueing.
156 * 2 - add in outgoing message frames.
158 #if (defined(DEBUG_ASR_CMD))
159 # define debug_asr_cmd_printf(fmt,args...) kprintf(fmt,##args)
160 # define debug_asr_dump_ccb(ccb) \
162 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
163 int len = ccb->csio.cdb_len; \
165 while (len) { \
166 debug_asr_cmd_printf (" %02x", *(cp++)); \
167 --len; \
170 # if (DEBUG_ASR_CMD > 0)
171 # define debug_asr_cmd1_printf debug_asr_cmd_printf
172 # else
173 # define debug_asr_cmd1_printf(fmt,args...)
174 # endif
175 # if (DEBUG_ASR_CMD > 1)
176 # define debug_asr_cmd2_printf debug_asr_cmd_printf
177 # define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
178 # else
179 # define debug_asr_cmd2_printf(fmt,args...)
180 # define debug_asr_cmd2_dump_message(message)
181 # endif
182 #else /* DEBUG_ASR_CMD */
183 # define debug_asr_cmd_printf(fmt,args...)
184 # define debug_asr_cmd_dump_ccb(ccb)
185 # define debug_asr_cmd1_printf(fmt,args...)
186 # define debug_asr_cmd2_printf(fmt,args...)
187 # define debug_asr_cmd2_dump_message(message)
188 #endif /* DEBUG_ASR_CMD */
190 #if (defined(DEBUG_ASR_USR_CMD))
191 # define debug_usr_cmd_printf(fmt,args...) kprintf(fmt,##args)
192 # define debug_usr_cmd_dump_message(message) debug_usr_message(message)
193 #else /* DEBUG_ASR_USR_CMD */
194 # define debug_usr_cmd_printf(fmt,args...)
195 # define debug_usr_cmd_dump_message(message)
196 #endif /* DEBUG_ASR_USR_CMD */
198 #define dsDescription_size 46 /* Snug as a bug in a rug */
199 #include "dptsig.h"
201 static dpt_sig_S ASR_sig = {
202 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
203 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
204 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
205 ADF_ALL_SC5,
206 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
207 ASR_MONTH, ASR_DAY, ASR_YEAR,
208 /* 01234567890123456789012345678901234567890123456789 < 50 chars */
209 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
210 /* ^^^^^ asr_attach alters these to match OS */
213 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
214 #include <sys/kernel.h>
215 #include <sys/systm.h>
216 #include <sys/malloc.h>
217 #include <sys/proc.h>
218 #include <sys/conf.h>
219 #include <sys/bus.h>
220 #include <sys/rman.h>
221 #include <sys/stat.h>
222 #include <sys/device.h>
223 #include <sys/thread2.h>
224 #include <sys/ioccom.h>
226 #include <bus/cam/cam.h>
227 #include <bus/cam/cam_ccb.h>
228 #include <bus/cam/cam_sim.h>
229 #include <bus/cam/cam_xpt_sim.h>
230 #include <bus/cam/cam_xpt_periph.h>
232 #include <bus/cam/scsi/scsi_all.h>
233 #include <bus/cam/scsi/scsi_message.h>
235 #include <vm/vm.h>
236 #include <vm/pmap.h>
237 #include <machine/cputypes.h>
238 #include <machine/clock.h>
239 #include <machine/vmparam.h>
241 #include <bus/pci/pcivar.h>
242 #include <bus/pci/pcireg.h>
244 #define STATIC static
245 #define INLINE
247 #if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
248 # undef STATIC
249 # define STATIC
250 # undef INLINE
251 # define INLINE
252 #endif
253 #define IN
254 #define OUT
255 #define INOUT
257 #define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
258 #define KVTOPHYS(x) vtophys(x)
259 #include "dptalign.h"
260 #include "i2oexec.h"
261 #include "i2obscsi.h"
262 #include "i2odpt.h"
263 #include "i2oadptr.h"
264 #include "sys_info.h"
266 /* Configuration Definitions */
268 #define SG_SIZE 58 /* Scatter Gather list Size */
269 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */
270 #define MAX_LUN 255 /* Maximum LUN Supported */
271 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
272 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
273 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
274 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
275 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */
276 /* Also serves as the minimum map for */
277 /* the 2005S zero channel RAID product */
279 /**************************************************************************
280 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
281 ** Is Configured Into The System. The Structure Supplies Configuration **
282 ** Information, Status Info, Queue Info And An Active CCB List Pointer. **
283 ***************************************************************************/
285 /* I2O register set */
286 typedef struct {
287 U8 Address[0x30];
288 volatile U32 Status;
289 volatile U32 Mask;
290 # define Mask_InterruptsDisabled 0x08
291 U32 x[2];
292 volatile U32 ToFIFO; /* In Bound FIFO */
293 volatile U32 FromFIFO; /* Out Bound FIFO */
294 } i2oRegs_t;
297 * A MIX of performance and space considerations for TID lookups
299 typedef u_int16_t tid_t;
301 typedef struct {
302 u_int32_t size; /* up to MAX_LUN */
303 tid_t TID[1];
304 } lun2tid_t;
306 typedef struct {
307 u_int32_t size; /* up to MAX_TARGET */
308 lun2tid_t * LUN[1];
309 } target2lun_t;
312 * To ensure that we only allocate and use the worst case ccb here, lets
313 * make our own local ccb union. If asr_alloc_ccb is utilized for another
314 * ccb type, ensure that you add the additional structures into our local
315 * ccb union. To ensure strict type checking, we will utilize the local
316 * ccb definition wherever possible.
318 union asr_ccb {
319 struct ccb_hdr ccb_h; /* For convenience */
320 struct ccb_scsiio csio;
321 struct ccb_setasync csa;
324 typedef struct Asr_softc {
325 u_int16_t ha_irq;
326 void * ha_Base; /* base port for each board */
327 u_int8_t * volatile ha_blinkLED;
328 i2oRegs_t * ha_Virt; /* Base address of IOP */
329 U8 * ha_Fvirt; /* Base address of Frames */
330 I2O_IOP_ENTRY ha_SystemTable;
331 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
332 struct cam_path * ha_path[MAX_CHANNEL+1];
333 struct cam_sim * ha_sim[MAX_CHANNEL+1];
334 struct resource * ha_mem_res;
335 struct resource * ha_mes_res;
336 struct resource * ha_irq_res;
337 void * ha_intr;
338 PI2O_LCT ha_LCT; /* Complete list of devices */
339 # define le_type IdentityTag[0]
340 # define I2O_BSA 0x20
341 # define I2O_FCA 0x40
342 # define I2O_SCSI 0x00
343 # define I2O_PORT 0x80
344 # define I2O_UNKNOWN 0x7F
345 # define le_bus IdentityTag[1]
346 # define le_target IdentityTag[2]
347 # define le_lun IdentityTag[3]
348 target2lun_t * ha_targets[MAX_CHANNEL+1];
349 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
350 u_long ha_Msgs_Phys;
352 u_int8_t ha_in_reset;
353 # define HA_OPERATIONAL 0
354 # define HA_IN_RESET 1
355 # define HA_OFF_LINE 2
356 # define HA_OFF_LINE_RECOVERY 3
357 /* Configuration information */
358 /* The target id maximums we take */
359 u_int8_t ha_MaxBus; /* Maximum bus */
360 u_int8_t ha_MaxId; /* Maximum target ID */
361 u_int8_t ha_MaxLun; /* Maximum target LUN */
362 u_int8_t ha_SgSize; /* Max SG elements */
363 u_int8_t ha_pciBusNum;
364 u_int8_t ha_pciDeviceNum;
365 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
366 u_int16_t ha_QueueSize; /* Max outstanding commands */
367 u_int16_t ha_Msgs_Count;
369 /* Links into other parents and HBAs */
370 struct Asr_softc * ha_next; /* HBA list */
371 } Asr_softc_t;
373 STATIC Asr_softc_t * Asr_softc;
376 * Prototypes of the routines we have in this object.
379 /* Externally callable routines */
380 #define PROBE_ARGS IN device_t tag
381 #define PROBE_RET int
382 #define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
383 #define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
384 #define ATTACH_ARGS IN device_t tag
385 #define ATTACH_RET int
386 #define ATTACH_SET() int unit = device_get_unit(tag)
387 #define ATTACH_RETURN(retval) return(retval)
388 /* I2O HDM interface */
389 STATIC PROBE_RET asr_probe (PROBE_ARGS);
390 STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
391 /* DOMINO placeholder */
392 STATIC PROBE_RET domino_probe (PROBE_ARGS);
393 STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
394 /* MODE0 adapter placeholder */
395 STATIC PROBE_RET mode0_probe (PROBE_ARGS);
396 STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
398 STATIC Asr_softc_t * ASR_get_sc (cdev_t dev);
399 STATIC d_ioctl_t asr_ioctl;
400 STATIC d_open_t asr_open;
401 STATIC d_close_t asr_close;
402 STATIC int asr_intr (IN Asr_softc_t *sc);
403 STATIC void asr_timeout (INOUT void *arg);
404 STATIC int ASR_init (IN Asr_softc_t *sc);
405 STATIC INLINE int ASR_acquireLct (INOUT Asr_softc_t *sc);
406 STATIC INLINE int ASR_acquireHrt (INOUT Asr_softc_t *sc);
407 STATIC void asr_action (IN struct cam_sim *sim,
408 IN union ccb *ccb);
409 STATIC void asr_poll (IN struct cam_sim * sim);
412 * Here is the auto-probe structure used to nest our tests appropriately
413 * during the startup phase of the operating system.
415 STATIC device_method_t asr_methods[] = {
416 DEVMETHOD(device_probe, asr_probe),
417 DEVMETHOD(device_attach, asr_attach),
418 { 0, 0 }
421 STATIC driver_t asr_driver = {
422 "asr",
423 asr_methods,
424 sizeof(Asr_softc_t)
427 STATIC devclass_t asr_devclass;
429 DECLARE_DUMMY_MODULE(asr);
430 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
432 STATIC device_method_t domino_methods[] = {
433 DEVMETHOD(device_probe, domino_probe),
434 DEVMETHOD(device_attach, domino_attach),
435 { 0, 0 }
438 STATIC driver_t domino_driver = {
439 "domino",
440 domino_methods,
444 STATIC devclass_t domino_devclass;
446 DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
448 STATIC device_method_t mode0_methods[] = {
449 DEVMETHOD(device_probe, mode0_probe),
450 DEVMETHOD(device_attach, mode0_attach),
451 { 0, 0 }
454 STATIC driver_t mode0_driver = {
455 "mode0",
456 mode0_methods,
460 STATIC devclass_t mode0_devclass;
462 DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
465 * devsw for asr hba driver
467 * only ioctl is used. the sd driver provides all other access.
469 STATIC struct dev_ops asr_ops = {
470 { "asr", -1, 0 },
471 .d_open = asr_open,
472 .d_close = asr_close,
473 .d_ioctl = asr_ioctl,
477 * Initialize the dynamic dev_ops hooks.
479 STATIC void
480 asr_drvinit (void * unused)
482 static int asr_devsw_installed = 0;
484 if (asr_devsw_installed) {
485 return;
487 asr_devsw_installed++;
490 * Adding the ops will dynamically assign a major number.
492 dev_ops_add(&asr_ops, 0, 0);
493 } /* asr_drvinit */
495 /* XXX Must initialize before CAM layer picks up our HBA driver */
496 SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,asr_drvinit,NULL)
498 /* I2O support routines */
499 #define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
500 #define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
503 * Fill message with default.
505 STATIC PI2O_MESSAGE_FRAME
506 ASR_fillMessage (
507 IN char * Message,
508 IN u_int16_t size)
510 OUT PI2O_MESSAGE_FRAME Message_Ptr;
512 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
513 bzero ((void *)Message_Ptr, size);
514 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
515 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
516 (size + sizeof(U32) - 1) >> 2);
517 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
518 return (Message_Ptr);
519 } /* ASR_fillMessage */
521 #define EMPTY_QUEUE ((U32)-1L)
523 STATIC INLINE U32
524 ASR_getMessage(
525 IN i2oRegs_t * virt)
527 OUT U32 MessageOffset;
529 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
530 MessageOffset = virt->ToFIFO;
532 return (MessageOffset);
533 } /* ASR_getMessage */
535 /* Issue a polled command */
536 STATIC U32
537 ASR_initiateCp (
538 INOUT i2oRegs_t * virt,
539 INOUT U8 * fvirt,
540 IN PI2O_MESSAGE_FRAME Message)
542 OUT U32 Mask = -1L;
543 U32 MessageOffset;
544 u_int Delay = 1500;
547 * ASR_initiateCp is only used for synchronous commands and will
548 * be made more resiliant to adapter delays since commands like
549 * resetIOP can cause the adapter to be deaf for a little time.
551 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
552 && (--Delay != 0)) {
553 DELAY (10000);
555 if (MessageOffset != EMPTY_QUEUE) {
556 bcopy (Message, fvirt + MessageOffset,
557 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
559 * Disable the Interrupts
561 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
562 virt->ToFIFO = MessageOffset;
564 return (Mask);
565 } /* ASR_initiateCp */
568 * Reset the adapter.
570 STATIC U32
571 ASR_resetIOP (
572 INOUT i2oRegs_t * virt,
573 INOUT U8 * fvirt)
575 struct resetMessage {
576 I2O_EXEC_IOP_RESET_MESSAGE M;
577 U32 R;
579 defAlignLong(struct resetMessage,Message);
580 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
581 OUT U32 * volatile Reply_Ptr;
582 U32 Old;
585 * Build up our copy of the Message.
587 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
588 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
589 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
591 * Reset the Reply Status
593 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
594 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
595 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
596 KVTOPHYS((void *)Reply_Ptr));
598 * Send the Message out
600 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
602 * Wait for a response (Poll), timeouts are dangerous if
603 * the card is truly responsive. We assume response in 2s.
605 u_int8_t Delay = 200;
607 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
608 DELAY (10000);
611 * Re-enable the interrupts.
613 virt->Mask = Old;
614 ASSERT (*Reply_Ptr);
615 return (*Reply_Ptr);
617 ASSERT (Old != (U32)-1L);
618 return (0);
619 } /* ASR_resetIOP */
622 * Get the curent state of the adapter
624 STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
625 ASR_getStatus (
626 INOUT i2oRegs_t * virt,
627 INOUT U8 * fvirt,
628 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
630 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
631 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
632 U32 Old;
635 * Build up our copy of the Message.
637 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
638 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
639 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
640 I2O_EXEC_STATUS_GET);
641 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
642 KVTOPHYS((void *)buffer));
643 /* This one is a Byte Count */
644 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
645 sizeof(I2O_EXEC_STATUS_GET_REPLY));
647 * Reset the Reply Status
649 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
651 * Send the Message out
653 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
655 * Wait for a response (Poll), timeouts are dangerous if
656 * the card is truly responsive. We assume response in 50ms.
658 u_int8_t Delay = 255;
660 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
661 if (--Delay == 0) {
662 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
663 break;
665 DELAY (1000);
668 * Re-enable the interrupts.
670 virt->Mask = Old;
671 return (buffer);
673 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
674 } /* ASR_getStatus */
677 * Check if the device is a SCSI I2O HBA, and add it to the list.
681 * Probe for ASR controller. If we find it, we will use it.
682 * virtual adapters.
684 STATIC PROBE_RET
685 asr_probe(PROBE_ARGS)
687 PROBE_SET();
688 if ((id == 0xA5011044) || (id == 0xA5111044)) {
689 PROBE_RETURN ("Adaptec Caching SCSI RAID");
691 PROBE_RETURN (NULL);
692 } /* asr_probe */
695 * Probe/Attach for DOMINO chipset.
697 STATIC PROBE_RET
698 domino_probe(PROBE_ARGS)
700 PROBE_SET();
701 if (id == 0x10121044) {
702 PROBE_RETURN ("Adaptec Caching Memory Controller");
704 PROBE_RETURN (NULL);
705 } /* domino_probe */
707 STATIC ATTACH_RET
708 domino_attach (ATTACH_ARGS)
710 ATTACH_RETURN (0);
711 } /* domino_attach */
714 * Probe/Attach for MODE0 adapters.
716 STATIC PROBE_RET
717 mode0_probe(PROBE_ARGS)
719 PROBE_SET();
722 * If/When we can get a business case to commit to a
723 * Mode0 driver here, we can make all these tests more
724 * specific and robust. Mode0 adapters have their processors
725 * turned off, this the chips are in a raw state.
728 /* This is a PLX9054 */
729 if (id == 0x905410B5) {
730 PROBE_RETURN ("Adaptec Mode0 PM3757");
732 /* This is a PLX9080 */
733 if (id == 0x908010B5) {
734 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
736 /* This is a ZION 80303 */
737 if (id == 0x53098086) {
738 PROBE_RETURN ("Adaptec Mode0 3010S");
740 /* This is an i960RS */
741 if (id == 0x39628086) {
742 PROBE_RETURN ("Adaptec Mode0 2100S");
744 /* This is an i960RN */
745 if (id == 0x19648086) {
746 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
748 #if 0 /* this would match any generic i960 -- mjs */
749 /* This is an i960RP (typically also on Motherboards) */
750 if (id == 0x19608086) {
751 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
753 #endif
754 PROBE_RETURN (NULL);
755 } /* mode0_probe */
757 STATIC ATTACH_RET
758 mode0_attach (ATTACH_ARGS)
760 ATTACH_RETURN (0);
761 } /* mode0_attach */
763 STATIC INLINE union asr_ccb *
764 asr_alloc_ccb (
765 IN Asr_softc_t * sc)
767 OUT union asr_ccb * new_ccb;
769 new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb), M_DEVBUF,
770 M_WAITOK | M_ZERO);
771 new_ccb->ccb_h.pinfo.priority = 1;
772 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
773 new_ccb->ccb_h.spriv_ptr0 = sc;
774 return (new_ccb);
775 } /* asr_alloc_ccb */
777 STATIC INLINE void
778 asr_free_ccb (
779 IN union asr_ccb * free_ccb)
781 kfree(free_ccb, M_DEVBUF);
782 } /* asr_free_ccb */
785 * Print inquiry data `carefully'
787 STATIC void
788 ASR_prstring (
789 u_int8_t * s,
790 int len)
792 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
793 kprintf ("%c", *(s++));
795 } /* ASR_prstring */
798 * Prototypes
800 STATIC INLINE int ASR_queue (
801 IN Asr_softc_t * sc,
802 IN PI2O_MESSAGE_FRAME Message);
804 * Send a message synchronously and without Interrupt to a ccb.
806 STATIC int
807 ASR_queue_s (
808 INOUT union asr_ccb * ccb,
809 IN PI2O_MESSAGE_FRAME Message)
811 U32 Mask;
812 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
815 * We do not need any (optional byteswapping) method access to
816 * the Initiator context field.
818 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
820 /* Prevent interrupt service */
821 crit_enter();
822 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
823 | Mask_InterruptsDisabled;
825 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
826 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
827 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
831 * Wait for this board to report a finished instruction.
833 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
834 (void)asr_intr (sc);
837 /* Re-enable Interrupts */
838 sc->ha_Virt->Mask = Mask;
839 crit_exit();
841 return (ccb->ccb_h.status);
842 } /* ASR_queue_s */
845 * Send a message synchronously to a Asr_softc_t
847 STATIC int
848 ASR_queue_c (
849 IN Asr_softc_t * sc,
850 IN PI2O_MESSAGE_FRAME Message)
852 union asr_ccb * ccb;
853 OUT int status;
855 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
856 return (CAM_REQUEUE_REQ);
859 status = ASR_queue_s (ccb, Message);
861 asr_free_ccb(ccb);
863 return (status);
864 } /* ASR_queue_c */
867 * Add the specified ccb to the active queue
869 STATIC INLINE void
870 ASR_ccbAdd (
871 IN Asr_softc_t * sc,
872 INOUT union asr_ccb * ccb)
874 crit_enter();
875 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
876 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
877 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
879 * RAID systems can take considerable time to
880 * complete some commands given the large cache
881 * flashes switching from write back to write thru.
883 ccb->ccb_h.timeout = 6 * 60 * 1000;
885 callout_reset(&ccb->ccb_h.timeout_ch,
886 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
888 crit_exit();
889 } /* ASR_ccbAdd */
892 * Remove the specified ccb from the active queue.
894 STATIC INLINE void
895 ASR_ccbRemove (
896 IN Asr_softc_t * sc,
897 INOUT union asr_ccb * ccb)
899 crit_enter();
900 callout_stop(&ccb->ccb_h.timeout_ch);
901 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
902 crit_exit();
903 } /* ASR_ccbRemove */
906 * Fail all the active commands, so they get re-issued by the operating
907 * system.
909 STATIC INLINE void
910 ASR_failActiveCommands (
911 IN Asr_softc_t * sc)
913 struct ccb_hdr * ccb;
915 #if 0 /* Currently handled by callers, unnecessary paranoia currently */
916 /* Left in for historical perspective. */
917 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
918 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
920 /* Send a blind LCT command to wait for the enableSys to complete */
921 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
922 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
923 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
924 I2O_EXEC_LCT_NOTIFY);
925 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
926 I2O_CLASS_MATCH_ANYCLASS);
927 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
928 #endif
930 crit_enter();
932 * We do not need to inform the CAM layer that we had a bus
933 * reset since we manage it on our own, this also prevents the
934 * SCSI_DELAY settling that would be required on other systems.
935 * The `SCSI_DELAY' has already been handled by the card via the
936 * acquisition of the LCT table while we are at CAM priority level.
937 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
938 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
941 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
942 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
944 ccb->status &= ~CAM_STATUS_MASK;
945 ccb->status |= CAM_REQUEUE_REQ;
946 /* Nothing Transfered */
947 ((struct ccb_scsiio *)ccb)->resid
948 = ((struct ccb_scsiio *)ccb)->dxfer_len;
950 if (ccb->path) {
951 xpt_done ((union ccb *)ccb);
952 } else {
953 wakeup ((caddr_t)ccb);
956 crit_exit();
957 } /* ASR_failActiveCommands */
960 * The following command causes the HBA to reset the specific bus
962 STATIC INLINE void
963 ASR_resetBus(
964 IN Asr_softc_t * sc,
965 IN int bus)
967 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
968 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
969 PI2O_LCT_ENTRY Device;
971 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
972 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
973 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
974 I2O_HBA_BUS_RESET);
975 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
976 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
977 ++Device) {
978 if (((Device->le_type & I2O_PORT) != 0)
979 && (Device->le_bus == bus)) {
980 I2O_MESSAGE_FRAME_setTargetAddress(
981 &Message_Ptr->StdMessageFrame,
982 I2O_LCT_ENTRY_getLocalTID(Device));
983 /* Asynchronous command, with no expectations */
984 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
985 break;
988 } /* ASR_resetBus */
990 STATIC INLINE int
991 ASR_getBlinkLedCode (
992 IN Asr_softc_t * sc)
994 if ((sc != (Asr_softc_t *)NULL)
995 && (sc->ha_blinkLED != (u_int8_t *)NULL)
996 && (sc->ha_blinkLED[1] == 0xBC)) {
997 return (sc->ha_blinkLED[0]);
999 return (0);
1000 } /* ASR_getBlinkCode */
1003 * Determine the address of an TID lookup. Must be done at high priority
1004 * since the address can be changed by other threads of execution.
1006 * Returns NULL pointer if not indexible (but will attempt to generate
1007 * an index if `new_entry' flag is set to TRUE).
1009 * All addressible entries are to be guaranteed zero if never initialized.
1011 STATIC INLINE tid_t *
1012 ASR_getTidAddress(
1013 INOUT Asr_softc_t * sc,
1014 IN int bus,
1015 IN int target,
1016 IN int lun,
1017 IN int new_entry)
1019 target2lun_t * bus_ptr;
1020 lun2tid_t * target_ptr;
1021 unsigned new_size;
1024 * Validity checking of incoming parameters. More of a bound
1025 * expansion limit than an issue with the code dealing with the
1026 * values.
1028 * sc must be valid before it gets here, so that check could be
1029 * dropped if speed a critical issue.
1031 if ((sc == (Asr_softc_t *)NULL)
1032 || (bus > MAX_CHANNEL)
1033 || (target > sc->ha_MaxId)
1034 || (lun > sc->ha_MaxLun)) {
1035 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1036 (u_long)sc, bus, target, lun);
1037 return ((tid_t *)NULL);
1040 * See if there is an associated bus list.
1042 * for performance, allocate in size of BUS_CHUNK chunks.
1043 * BUS_CHUNK must be a power of two. This is to reduce
1044 * fragmentation effects on the allocations.
1046 # define BUS_CHUNK 8
1047 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1048 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1050 * Allocate a new structure?
1051 * Since one element in structure, the +1
1052 * needed for size has been abstracted.
1054 if ((new_entry == FALSE)
1055 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
1056 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1057 M_TEMP, M_WAITOK))
1058 == (target2lun_t *)NULL)) {
1059 debug_asr_printf("failed to allocate bus list\n");
1060 return ((tid_t *)NULL);
1062 bzero (bus_ptr, sizeof(*bus_ptr)
1063 + (sizeof(bus_ptr->LUN) * new_size));
1064 bus_ptr->size = new_size + 1;
1065 } else if (bus_ptr->size <= new_size) {
1066 target2lun_t * new_bus_ptr;
1069 * Reallocate a new structure?
1070 * Since one element in structure, the +1
1071 * needed for size has been abstracted.
1073 if ((new_entry == FALSE)
1074 || ((new_bus_ptr = (target2lun_t *)kmalloc (
1075 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1076 M_TEMP, M_WAITOK))
1077 == (target2lun_t *)NULL)) {
1078 debug_asr_printf("failed to reallocate bus list\n");
1079 return ((tid_t *)NULL);
1082 * Zero and copy the whole thing, safer, simpler coding
1083 * and not really performance critical at this point.
1085 bzero (new_bus_ptr, sizeof(*bus_ptr)
1086 + (sizeof(bus_ptr->LUN) * new_size));
1087 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1088 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1089 sc->ha_targets[bus] = new_bus_ptr;
1090 kfree (bus_ptr, M_TEMP);
1091 bus_ptr = new_bus_ptr;
1092 bus_ptr->size = new_size + 1;
1095 * We now have the bus list, lets get to the target list.
1096 * Since most systems have only *one* lun, we do not allocate
1097 * in chunks as above, here we allow one, then in chunk sizes.
1098 * TARGET_CHUNK must be a power of two. This is to reduce
1099 * fragmentation effects on the allocations.
1101 # define TARGET_CHUNK 8
1102 if ((new_size = lun) != 0) {
1103 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1105 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1107 * Allocate a new structure?
1108 * Since one element in structure, the +1
1109 * needed for size has been abstracted.
1111 if ((new_entry == FALSE)
1112 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
1113 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1114 M_TEMP, M_WAITOK))
1115 == (lun2tid_t *)NULL)) {
1116 debug_asr_printf("failed to allocate target list\n");
1117 return ((tid_t *)NULL);
1119 bzero (target_ptr, sizeof(*target_ptr)
1120 + (sizeof(target_ptr->TID) * new_size));
1121 target_ptr->size = new_size + 1;
1122 } else if (target_ptr->size <= new_size) {
1123 lun2tid_t * new_target_ptr;
1126 * Reallocate a new structure?
1127 * Since one element in structure, the +1
1128 * needed for size has been abstracted.
1130 if ((new_entry == FALSE)
1131 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1132 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1133 M_TEMP, M_WAITOK))
1134 == (lun2tid_t *)NULL)) {
1135 debug_asr_printf("failed to reallocate target list\n");
1136 return ((tid_t *)NULL);
1139 * Zero and copy the whole thing, safer, simpler coding
1140 * and not really performance critical at this point.
1142 bzero (new_target_ptr, sizeof(*target_ptr)
1143 + (sizeof(target_ptr->TID) * new_size));
1144 bcopy (target_ptr, new_target_ptr,
1145 sizeof(*target_ptr)
1146 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1147 bus_ptr->LUN[target] = new_target_ptr;
1148 kfree (target_ptr, M_TEMP);
1149 target_ptr = new_target_ptr;
1150 target_ptr->size = new_size + 1;
1153 * Now, acquire the TID address from the LUN indexed list.
1155 return (&(target_ptr->TID[lun]));
1156 } /* ASR_getTidAddress */
1159 * Get a pre-existing TID relationship.
1161 * If the TID was never set, return (tid_t)-1.
1163 * should use mutex rather than spl.
1165 STATIC INLINE tid_t
1166 ASR_getTid (
1167 IN Asr_softc_t * sc,
1168 IN int bus,
1169 IN int target,
1170 IN int lun)
1172 tid_t * tid_ptr;
1173 OUT tid_t retval;
1175 crit_enter();
1176 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1177 == (tid_t *)NULL)
1178 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1179 || (*tid_ptr == (tid_t)0)) {
1180 crit_exit();
1181 return ((tid_t)-1);
1183 retval = *tid_ptr;
1184 crit_exit();
1185 return (retval);
1186 } /* ASR_getTid */
1189 * Set a TID relationship.
1191 * If the TID was not set, return (tid_t)-1.
1193 * should use mutex rather than spl.
1195 STATIC INLINE tid_t
1196 ASR_setTid (
1197 INOUT Asr_softc_t * sc,
1198 IN int bus,
1199 IN int target,
1200 IN int lun,
1201 INOUT tid_t TID)
1203 tid_t * tid_ptr;
1205 if (TID != (tid_t)-1) {
1206 if (TID == 0) {
1207 return ((tid_t)-1);
1209 crit_enter();
1210 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1211 == (tid_t *)NULL) {
1212 crit_exit();
1213 return ((tid_t)-1);
1215 *tid_ptr = TID;
1216 crit_exit();
1218 return (TID);
1219 } /* ASR_setTid */
1221 /*-------------------------------------------------------------------------*/
1222 /* Function ASR_rescan */
1223 /*-------------------------------------------------------------------------*/
1224 /* The Parameters Passed To This Function Are : */
1225 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1226 /* */
1227 /* This Function Will rescan the adapter and resynchronize any data */
1228 /* */
1229 /* Return : 0 For OK, Error Code Otherwise */
1230 /*-------------------------------------------------------------------------*/
1232 STATIC INLINE int
1233 ASR_rescan(
1234 IN Asr_softc_t * sc)
1236 int bus;
1237 OUT int error;
1240 * Re-acquire the LCT table and synchronize us to the adapter.
1242 if ((error = ASR_acquireLct(sc)) == 0) {
1243 error = ASR_acquireHrt(sc);
1246 if (error != 0) {
1247 return error;
1250 bus = sc->ha_MaxBus;
1251 /* Reset all existing cached TID lookups */
1252 do {
1253 int target, event = 0;
1256 * Scan for all targets on this bus to see if they
1257 * got affected by the rescan.
1259 for (target = 0; target <= sc->ha_MaxId; ++target) {
1260 int lun;
1262 /* Stay away from the controller ID */
1263 if (target == sc->ha_adapter_target[bus]) {
1264 continue;
1266 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1267 PI2O_LCT_ENTRY Device;
1268 tid_t TID = (tid_t)-1;
1269 tid_t LastTID;
1272 * See if the cached TID changed. Search for
1273 * the device in our new LCT.
1275 for (Device = sc->ha_LCT->LCTEntry;
1276 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1277 + I2O_LCT_getTableSize(sc->ha_LCT));
1278 ++Device) {
1279 if ((Device->le_type != I2O_UNKNOWN)
1280 && (Device->le_bus == bus)
1281 && (Device->le_target == target)
1282 && (Device->le_lun == lun)
1283 && (I2O_LCT_ENTRY_getUserTID(Device)
1284 == 0xFFF)) {
1285 TID = I2O_LCT_ENTRY_getLocalTID(
1286 Device);
1287 break;
1291 * Indicate to the OS that the label needs
1292 * to be recalculated, or that the specific
1293 * open device is no longer valid (Merde)
1294 * because the cached TID changed.
1296 LastTID = ASR_getTid (sc, bus, target, lun);
1297 if (LastTID != TID) {
1298 struct cam_path * path;
1300 if (xpt_create_path(&path,
1301 /*periph*/NULL,
1302 cam_sim_path(sc->ha_sim[bus]),
1303 target, lun) != CAM_REQ_CMP) {
1304 if (TID == (tid_t)-1) {
1305 event |= AC_LOST_DEVICE;
1306 } else {
1307 event |= AC_INQ_CHANGED
1308 | AC_GETDEV_CHANGED;
1310 } else {
1311 if (TID == (tid_t)-1) {
1312 xpt_async(
1313 AC_LOST_DEVICE,
1314 path, NULL);
1315 } else if (LastTID == (tid_t)-1) {
1316 struct ccb_getdev ccb;
1318 xpt_setup_ccb(
1319 &(ccb.ccb_h),
1320 path, /*priority*/5);
1321 xpt_async(
1322 AC_FOUND_DEVICE,
1323 path,
1324 &ccb);
1325 } else {
1326 xpt_async(
1327 AC_INQ_CHANGED,
1328 path, NULL);
1329 xpt_async(
1330 AC_GETDEV_CHANGED,
1331 path, NULL);
1336 * We have the option of clearing the
1337 * cached TID for it to be rescanned, or to
1338 * set it now even if the device never got
1339 * accessed. We chose the later since we
1340 * currently do not use the condition that
1341 * the TID ever got cached.
1343 ASR_setTid (sc, bus, target, lun, TID);
1347 * The xpt layer can not handle multiple events at the
1348 * same call.
1350 if (event & AC_LOST_DEVICE) {
1351 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1353 if (event & AC_INQ_CHANGED) {
1354 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1356 if (event & AC_GETDEV_CHANGED) {
1357 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1359 } while (--bus >= 0);
1360 return (error);
1361 } /* ASR_rescan */
1363 /*-------------------------------------------------------------------------*/
1364 /* Function ASR_reset */
1365 /*-------------------------------------------------------------------------*/
1366 /* The Parameters Passed To This Function Are : */
1367 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1368 /* */
1369 /* This Function Will reset the adapter and resynchronize any data */
1370 /* */
1371 /* Return : None */
1372 /*-------------------------------------------------------------------------*/
1374 STATIC INLINE int
1375 ASR_reset(
1376 IN Asr_softc_t * sc)
1378 int retVal;
1380 crit_enter();
1381 if ((sc->ha_in_reset == HA_IN_RESET)
1382 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1383 crit_exit();
1384 return (EBUSY);
1387 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1388 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1390 ++(sc->ha_in_reset);
1391 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1392 debug_asr_printf ("ASR_resetIOP failed\n");
1394 * We really need to take this card off-line, easier said
1395 * than make sense. Better to keep retrying for now since if a
1396 * UART cable is connected the blinkLEDs the adapter is now in
1397 * a hard state requiring action from the monitor commands to
1398 * the HBA to continue. For debugging waiting forever is a
1399 * good thing. In a production system, however, one may wish
1400 * to instead take the card off-line ...
1402 # if 0 && (defined(HA_OFF_LINE))
1404 * Take adapter off-line.
1406 kprintf ("asr%d: Taking adapter off-line\n",
1407 sc->ha_path[0]
1408 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1409 : 0);
1410 sc->ha_in_reset = HA_OFF_LINE;
1411 crit_exit();
1412 return (ENXIO);
1413 # else
1414 /* Wait Forever */
1415 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1416 # endif
1418 retVal = ASR_init (sc);
1419 crit_exit();
1420 if (retVal != 0) {
1421 debug_asr_printf ("ASR_init failed\n");
1422 sc->ha_in_reset = HA_OFF_LINE;
1423 return (ENXIO);
1425 if (ASR_rescan (sc) != 0) {
1426 debug_asr_printf ("ASR_rescan failed\n");
1428 ASR_failActiveCommands (sc);
1429 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1430 kprintf ("asr%d: Brining adapter back on-line\n",
1431 sc->ha_path[0]
1432 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1433 : 0);
1435 sc->ha_in_reset = HA_OPERATIONAL;
1436 return (0);
1437 } /* ASR_reset */
1440 * Device timeout handler.
1442 STATIC void
1443 asr_timeout(
1444 INOUT void * arg)
1446 union asr_ccb * ccb = (union asr_ccb *)arg;
1447 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1448 int s;
1450 debug_asr_print_path(ccb);
1451 debug_asr_printf("timed out");
1454 * Check if the adapter has locked up?
1456 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1457 /* Reset Adapter */
1458 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1459 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1460 if (ASR_reset (sc) == ENXIO) {
1461 /* Try again later */
1462 callout_reset(&ccb->ccb_h.timeout_ch,
1463 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1465 return;
1468 * Abort does not function on the ASR card!!! Walking away from
1469 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1470 * our best bet, followed by a complete adapter reset if that fails.
1472 crit_enter();
1473 /* Check if we already timed out once to raise the issue */
1474 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1475 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1476 if (ASR_reset (sc) == ENXIO) {
1477 callout_reset(&ccb->ccb_h.timeout_ch,
1478 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1480 crit_exit();
1481 return;
1483 debug_asr_printf ("\nresetting bus\n");
1484 /* If the BUS reset does not take, then an adapter reset is next! */
1485 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1486 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1487 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1488 asr_timeout, ccb);
1489 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1490 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1491 crit_exit();
1492 } /* asr_timeout */
1495 * send a message asynchronously
1497 STATIC INLINE int
1498 ASR_queue(
1499 IN Asr_softc_t * sc,
1500 IN PI2O_MESSAGE_FRAME Message)
1502 OUT U32 MessageOffset;
1503 union asr_ccb * ccb;
1505 debug_asr_printf ("Host Command Dump:\n");
1506 debug_asr_dump_message (Message);
1508 ccb = (union asr_ccb *)(long)
1509 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1511 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1512 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1513 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1514 if (ccb) {
1515 ASR_ccbAdd (sc, ccb);
1517 /* Post the command */
1518 sc->ha_Virt->ToFIFO = MessageOffset;
1519 } else {
1520 if (ASR_getBlinkLedCode(sc)) {
1522 * Unlikely we can do anything if we can't grab a
1523 * message frame :-(, but lets give it a try.
1525 (void)ASR_reset (sc);
1528 return (MessageOffset);
1529 } /* ASR_queue */
1532 /* Simple Scatter Gather elements */
1533 #define SG(SGL,Index,Flags,Buffer,Size) \
1534 I2O_FLAGS_COUNT_setCount( \
1535 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1536 Size); \
1537 I2O_FLAGS_COUNT_setFlags( \
1538 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1539 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1540 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1541 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1542 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1545 * Retrieve Parameter Group.
1546 * Buffer must be allocated using defAlignLong macro.
1548 STATIC void *
1549 ASR_getParams(
1550 IN Asr_softc_t * sc,
1551 IN tid_t TID,
1552 IN int Group,
1553 OUT void * Buffer,
1554 IN unsigned BufferSize)
1556 struct paramGetMessage {
1557 I2O_UTIL_PARAMS_GET_MESSAGE M;
1558 char F[
1559 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1560 struct Operations {
1561 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1562 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1563 } O;
1565 defAlignLong(struct paramGetMessage, Message);
1566 struct Operations * Operations_Ptr;
1567 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1568 struct ParamBuffer {
1569 I2O_PARAM_RESULTS_LIST_HEADER Header;
1570 I2O_PARAM_READ_OPERATION_RESULT Read;
1571 char Info[1];
1572 } * Buffer_Ptr;
1574 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1575 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1576 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1577 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1578 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1579 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1580 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1581 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1582 &(Operations_Ptr->Header), 1);
1583 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1584 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1585 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1586 &(Operations_Ptr->Template[0]), 0xFFFF);
1587 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1588 &(Operations_Ptr->Template[0]), Group);
1589 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1590 BufferSize);
1592 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1593 I2O_VERSION_11
1594 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1595 / sizeof(U32)) << 4));
1596 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1597 TID);
1598 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1599 I2O_UTIL_PARAMS_GET);
1601 * Set up the buffers as scatter gather elements.
1603 SG(&(Message_Ptr->SGL), 0,
1604 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1605 Operations_Ptr, sizeof(struct Operations));
1606 SG(&(Message_Ptr->SGL), 1,
1607 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1608 Buffer_Ptr, BufferSize);
1610 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1611 && (Buffer_Ptr->Header.ResultCount)) {
1612 return ((void *)(Buffer_Ptr->Info));
1614 return ((void *)NULL);
1615 } /* ASR_getParams */
1618 * Acquire the LCT information.
1620 STATIC INLINE int
1621 ASR_acquireLct (
1622 INOUT Asr_softc_t * sc)
1624 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1625 PI2O_SGE_SIMPLE_ELEMENT sg;
1626 int MessageSizeInBytes;
1627 caddr_t v;
1628 int len;
1629 I2O_LCT Table;
1630 PI2O_LCT_ENTRY Entry;
1633 * sc value assumed valid
1635 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1636 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1637 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc (
1638 MessageSizeInBytes, M_TEMP, M_WAITOK);
1639 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1640 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1641 (I2O_VERSION_11 +
1642 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1643 / sizeof(U32)) << 4)));
1644 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1645 I2O_EXEC_LCT_NOTIFY);
1646 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1647 I2O_CLASS_MATCH_ANYCLASS);
1649 * Call the LCT table to determine the number of device entries
1650 * to reserve space for.
1652 SG(&(Message_Ptr->SGL), 0,
1653 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1654 sizeof(I2O_LCT));
1656 * since this code is reused in several systems, code efficiency
1657 * is greater by using a shift operation rather than a divide by
1658 * sizeof(u_int32_t).
1660 I2O_LCT_setTableSize(&Table,
1661 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1662 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1664 * Determine the size of the LCT table.
1666 if (sc->ha_LCT) {
1667 kfree (sc->ha_LCT, M_TEMP);
1670 * kmalloc only generates contiguous memory when less than a
1671 * page is expected. We must break the request up into an SG list ...
1673 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1674 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1675 || (len > (128 * 1024))) { /* Arbitrary */
1676 kfree (Message_Ptr, M_TEMP);
1677 return (EINVAL);
1679 sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK);
1681 * since this code is reused in several systems, code efficiency
1682 * is greater by using a shift operation rather than a divide by
1683 * sizeof(u_int32_t).
1685 I2O_LCT_setTableSize(sc->ha_LCT,
1686 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1688 * Convert the access to the LCT table into a SG list.
1690 sg = Message_Ptr->SGL.u.Simple;
1691 v = (caddr_t)(sc->ha_LCT);
1692 for (;;) {
1693 int next, base, span;
1695 span = 0;
1696 next = base = KVTOPHYS(v);
1697 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1699 /* How far can we go contiguously */
1700 while ((len > 0) && (base == next)) {
1701 int size;
1703 next = trunc_page(base) + PAGE_SIZE;
1704 size = next - base;
1705 if (size > len) {
1706 size = len;
1708 span += size;
1709 v += size;
1710 len -= size;
1711 base = KVTOPHYS(v);
1714 /* Construct the Flags */
1715 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1717 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1718 if (len <= 0) {
1719 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1720 | I2O_SGL_FLAGS_LAST_ELEMENT
1721 | I2O_SGL_FLAGS_END_OF_BUFFER);
1723 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1726 if (len <= 0) {
1727 break;
1731 * Incrementing requires resizing of the packet.
1733 ++sg;
1734 MessageSizeInBytes += sizeof(*sg);
1735 I2O_MESSAGE_FRAME_setMessageSize(
1736 &(Message_Ptr->StdMessageFrame),
1737 I2O_MESSAGE_FRAME_getMessageSize(
1738 &(Message_Ptr->StdMessageFrame))
1739 + (sizeof(*sg) / sizeof(U32)));
1741 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1743 NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1744 kmalloc (MessageSizeInBytes, M_TEMP, M_WAITOK);
1745 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1746 bcopy ((caddr_t)Message_Ptr,
1747 (caddr_t)NewMessage_Ptr, span);
1748 kfree (Message_Ptr, M_TEMP);
1749 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1750 (((caddr_t)NewMessage_Ptr) + span);
1751 Message_Ptr = NewMessage_Ptr;
1754 { int retval;
1756 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1757 kfree (Message_Ptr, M_TEMP);
1758 if (retval != CAM_REQ_CMP) {
1759 return (ENODEV);
1762 /* If the LCT table grew, lets truncate accesses */
1763 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1764 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1766 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1767 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1768 ++Entry) {
1769 Entry->le_type = I2O_UNKNOWN;
1770 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1772 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1773 Entry->le_type = I2O_BSA;
1774 break;
1776 case I2O_CLASS_SCSI_PERIPHERAL:
1777 Entry->le_type = I2O_SCSI;
1778 break;
1780 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1781 Entry->le_type = I2O_FCA;
1782 break;
1784 case I2O_CLASS_BUS_ADAPTER_PORT:
1785 Entry->le_type = I2O_PORT | I2O_SCSI;
1786 /* FALLTHRU */
1787 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1788 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1789 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1790 Entry->le_type = I2O_PORT | I2O_FCA;
1792 { struct ControllerInfo {
1793 I2O_PARAM_RESULTS_LIST_HEADER Header;
1794 I2O_PARAM_READ_OPERATION_RESULT Read;
1795 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1797 defAlignLong(struct ControllerInfo, Buffer);
1798 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1800 Entry->le_bus = 0xff;
1801 Entry->le_target = 0xff;
1802 Entry->le_lun = 0xff;
1804 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1805 ASR_getParams(sc,
1806 I2O_LCT_ENTRY_getLocalTID(Entry),
1807 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1808 Buffer, sizeof(struct ControllerInfo)))
1809 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1810 continue;
1812 Entry->le_target
1813 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1814 Info);
1815 Entry->le_lun = 0;
1816 } /* FALLTHRU */
1817 default:
1818 continue;
1820 { struct DeviceInfo {
1821 I2O_PARAM_RESULTS_LIST_HEADER Header;
1822 I2O_PARAM_READ_OPERATION_RESULT Read;
1823 I2O_DPT_DEVICE_INFO_SCALAR Info;
1825 defAlignLong (struct DeviceInfo, Buffer);
1826 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1828 Entry->le_bus = 0xff;
1829 Entry->le_target = 0xff;
1830 Entry->le_lun = 0xff;
1832 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1833 ASR_getParams(sc,
1834 I2O_LCT_ENTRY_getLocalTID(Entry),
1835 I2O_DPT_DEVICE_INFO_GROUP_NO,
1836 Buffer, sizeof(struct DeviceInfo)))
1837 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1838 continue;
1840 Entry->le_type
1841 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1842 Entry->le_bus
1843 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1844 if ((Entry->le_bus > sc->ha_MaxBus)
1845 && (Entry->le_bus <= MAX_CHANNEL)) {
1846 sc->ha_MaxBus = Entry->le_bus;
1848 Entry->le_target
1849 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1850 Entry->le_lun
1851 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1855 * A zero return value indicates success.
1857 return (0);
1858 } /* ASR_acquireLct */
1861 * Initialize a message frame.
1862 * We assume that the CDB has already been set up, so all we do here is
1863 * generate the Scatter Gather list.
1865 STATIC INLINE PI2O_MESSAGE_FRAME
1866 ASR_init_message(
1867 IN union asr_ccb * ccb,
1868 OUT PI2O_MESSAGE_FRAME Message)
1870 int next, span, base, rw;
1871 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1872 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1873 PI2O_SGE_SIMPLE_ELEMENT sg;
1874 caddr_t v;
1875 vm_size_t size, len;
1876 U32 MessageSize;
1878 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1879 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1880 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1883 int target = ccb->ccb_h.target_id;
1884 int lun = ccb->ccb_h.target_lun;
1885 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1886 tid_t TID;
1888 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1889 PI2O_LCT_ENTRY Device;
1891 TID = (tid_t)0;
1892 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1893 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1894 ++Device) {
1895 if ((Device->le_type != I2O_UNKNOWN)
1896 && (Device->le_bus == bus)
1897 && (Device->le_target == target)
1898 && (Device->le_lun == lun)
1899 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1900 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1901 ASR_setTid (sc, Device->le_bus,
1902 Device->le_target, Device->le_lun,
1903 TID);
1904 break;
1908 if (TID == (tid_t)0) {
1909 return ((PI2O_MESSAGE_FRAME)NULL);
1911 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1912 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1913 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1915 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1916 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1917 / sizeof(U32)) << 4));
1918 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1919 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1920 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1921 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1922 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1923 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1924 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1925 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1926 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1927 I2O_SCB_FLAG_ENABLE_DISCONNECT
1928 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1929 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1931 * We do not need any (optional byteswapping) method access to
1932 * the Initiator & Transaction context field.
1934 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1936 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1937 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1939 * copy the cdb over
1941 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1942 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1943 bcopy (&(ccb->csio.cdb_io),
1944 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
1947 * Given a buffer describing a transfer, set up a scatter/gather map
1948 * in a ccb to map that SCSI transfer.
1951 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1953 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1954 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1955 (ccb->csio.dxfer_len)
1956 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1957 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1958 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1959 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1960 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1961 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1962 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1963 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1964 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
1965 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1966 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1969 * Given a transfer described by a `data', fill in the SG list.
1971 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1973 len = ccb->csio.dxfer_len;
1974 v = ccb->csio.data_ptr;
1975 ASSERT (ccb->csio.dxfer_len >= 0);
1976 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1977 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1978 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1979 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1980 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1981 span = 0;
1982 next = base = KVTOPHYS(v);
1983 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1985 /* How far can we go contiguously */
1986 while ((len > 0) && (base == next)) {
1987 next = trunc_page(base) + PAGE_SIZE;
1988 size = next - base;
1989 if (size > len) {
1990 size = len;
1992 span += size;
1993 v += size;
1994 len -= size;
1995 base = KVTOPHYS(v);
1998 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1999 if (len == 0) {
2000 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2002 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2003 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2004 ++sg;
2005 MessageSize += sizeof(*sg) / sizeof(U32);
2007 /* We always do the request sense ... */
2008 if ((span = ccb->csio.sense_len) == 0) {
2009 span = sizeof(ccb->csio.sense_data);
2011 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2012 &(ccb->csio.sense_data), span);
2013 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2014 MessageSize + (sizeof(*sg) / sizeof(U32)));
2015 return (Message_Ptr);
2016 } /* ASR_init_message */
2019 * Reset the adapter.
2021 STATIC INLINE U32
2022 ASR_initOutBound (
2023 INOUT Asr_softc_t * sc)
2025 struct initOutBoundMessage {
2026 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2027 U32 R;
2029 defAlignLong(struct initOutBoundMessage,Message);
2030 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2031 OUT U32 * volatile Reply_Ptr;
2032 U32 Old;
2035 * Build up our copy of the Message.
2037 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2038 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2039 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2040 I2O_EXEC_OUTBOUND_INIT);
2041 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2042 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2043 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2045 * Reset the Reply Status
2047 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2048 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2049 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2050 sizeof(U32));
2052 * Send the Message out
2054 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2055 u_long size, addr;
2058 * Wait for a response (Poll).
2060 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2062 * Re-enable the interrupts.
2064 sc->ha_Virt->Mask = Old;
2066 * Populate the outbound table.
2068 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2070 /* Allocate the reply frames */
2071 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2072 * sc->ha_Msgs_Count;
2075 * contigmalloc only works reliably at
2076 * initialization time.
2078 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2079 contigmalloc (size, M_DEVBUF, M_WAITOK | M_ZERO, 0ul,
2080 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2081 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2082 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2086 /* Initialize the outbound FIFO */
2087 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2088 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2089 size; --size) {
2090 sc->ha_Virt->FromFIFO = addr;
2091 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2093 return (*Reply_Ptr);
2095 return (0);
2096 } /* ASR_initOutBound */
2099 * Set the system table
2101 STATIC INLINE int
2102 ASR_setSysTab(
2103 IN Asr_softc_t * sc)
2105 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2106 PI2O_SET_SYSTAB_HEADER SystemTable;
2107 Asr_softc_t * ha;
2108 PI2O_SGE_SIMPLE_ELEMENT sg;
2109 int retVal;
2111 SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
2112 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO);
2113 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2114 ++SystemTable->NumberEntries;
2116 Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
2117 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2118 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2119 M_TEMP, M_WAITOK);
2120 (void)ASR_fillMessage((char *)Message_Ptr,
2121 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2122 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2123 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2124 (I2O_VERSION_11 +
2125 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2126 / sizeof(U32)) << 4)));
2127 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2128 I2O_EXEC_SYS_TAB_SET);
2130 * Call the LCT table to determine the number of device entries
2131 * to reserve space for.
2132 * since this code is reused in several systems, code efficiency
2133 * is greater by using a shift operation rather than a divide by
2134 * sizeof(u_int32_t).
2136 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2137 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2138 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2139 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2140 ++sg;
2141 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2142 SG(sg, 0,
2143 ((ha->ha_next)
2144 ? (I2O_SGL_FLAGS_DIR)
2145 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2146 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2147 ++sg;
2149 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2150 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2151 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2152 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2153 kfree (Message_Ptr, M_TEMP);
2154 kfree (SystemTable, M_TEMP);
2155 return (retVal);
2156 } /* ASR_setSysTab */
2158 STATIC INLINE int
2159 ASR_acquireHrt (
2160 INOUT Asr_softc_t * sc)
2162 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2163 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2164 struct {
2165 I2O_HRT Header;
2166 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2167 } Hrt;
2168 u_int8_t NumberOfEntries;
2169 PI2O_HRT_ENTRY Entry;
2171 bzero ((void *)&Hrt, sizeof (Hrt));
2172 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2173 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2174 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2175 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2176 (I2O_VERSION_11
2177 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2178 / sizeof(U32)) << 4)));
2179 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2180 I2O_EXEC_HRT_GET);
2183 * Set up the buffers as scatter gather elements.
2185 SG(&(Message_Ptr->SGL), 0,
2186 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2187 &Hrt, sizeof(Hrt));
2188 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2189 return (ENODEV);
2191 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2192 > (MAX_CHANNEL + 1)) {
2193 NumberOfEntries = MAX_CHANNEL + 1;
2195 for (Entry = Hrt.Header.HRTEntry;
2196 NumberOfEntries != 0;
2197 ++Entry, --NumberOfEntries) {
2198 PI2O_LCT_ENTRY Device;
2200 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2201 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2202 ++Device) {
2203 if (I2O_LCT_ENTRY_getLocalTID(Device)
2204 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2205 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2206 Entry) >> 16;
2207 if ((Device->le_bus > sc->ha_MaxBus)
2208 && (Device->le_bus <= MAX_CHANNEL)) {
2209 sc->ha_MaxBus = Device->le_bus;
2214 return (0);
2215 } /* ASR_acquireHrt */
2218 * Enable the adapter.
2220 STATIC INLINE int
2221 ASR_enableSys (
2222 IN Asr_softc_t * sc)
2224 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2225 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2227 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2228 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2229 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2230 I2O_EXEC_SYS_ENABLE);
2231 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2232 } /* ASR_enableSys */
2235 * Perform the stages necessary to initialize the adapter
2237 STATIC int
2238 ASR_init(
2239 IN Asr_softc_t * sc)
2241 return ((ASR_initOutBound(sc) == 0)
2242 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2243 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2244 } /* ASR_init */
2247 * Send a Synchronize Cache command to the target device.
2249 STATIC INLINE void
2250 ASR_sync (
2251 IN Asr_softc_t * sc,
2252 IN int bus,
2253 IN int target,
2254 IN int lun)
2256 tid_t TID;
2259 * We will not synchronize the device when there are outstanding
2260 * commands issued by the OS (this is due to a locked up device,
2261 * as the OS normally would flush all outstanding commands before
2262 * issuing a shutdown or an adapter reset).
2264 if ((sc != (Asr_softc_t *)NULL)
2265 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2266 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2267 && (TID != (tid_t)0)) {
2268 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2269 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2271 bzero (Message_Ptr
2272 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2273 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2274 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2276 I2O_MESSAGE_FRAME_setVersionOffset(
2277 (PI2O_MESSAGE_FRAME)Message_Ptr,
2278 I2O_VERSION_11
2279 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2280 - sizeof(I2O_SG_ELEMENT))
2281 / sizeof(U32)) << 4));
2282 I2O_MESSAGE_FRAME_setMessageSize(
2283 (PI2O_MESSAGE_FRAME)Message_Ptr,
2284 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2285 - sizeof(I2O_SG_ELEMENT))
2286 / sizeof(U32));
2287 I2O_MESSAGE_FRAME_setInitiatorAddress (
2288 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2289 I2O_MESSAGE_FRAME_setFunction(
2290 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2291 I2O_MESSAGE_FRAME_setTargetAddress(
2292 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2293 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2294 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2295 I2O_SCSI_SCB_EXEC);
2296 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2297 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2298 I2O_SCB_FLAG_ENABLE_DISCONNECT
2299 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2300 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2301 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2302 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2303 DPT_ORGANIZATION_ID);
2304 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2305 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2306 Message_Ptr->CDB[1] = (lun << 5);
2308 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2309 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2310 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2311 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2312 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2314 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2319 STATIC INLINE void
2320 ASR_synchronize (
2321 IN Asr_softc_t * sc)
2323 int bus, target, lun;
2325 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2326 for (target = 0; target <= sc->ha_MaxId; ++target) {
2327 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2328 ASR_sync(sc,bus,target,lun);
2335 * Reset the HBA, targets and BUS.
2336 * Currently this resets *all* the SCSI busses.
2338 STATIC INLINE void
2339 asr_hbareset(
2340 IN Asr_softc_t * sc)
2342 ASR_synchronize (sc);
2343 (void)ASR_reset (sc);
2344 } /* asr_hbareset */
2347 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2348 * limit and a reduction in error checking (in the pre 4.0 case).
2350 STATIC int
2351 asr_pci_map_mem (
2352 IN device_t tag,
2353 IN Asr_softc_t * sc)
2355 int rid;
2356 u_int32_t p, l, s;
2359 * I2O specification says we must find first *memory* mapped BAR
2361 for (rid = PCIR_MAPS;
2362 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2363 rid += sizeof(u_int32_t)) {
2364 p = pci_read_config(tag, rid, sizeof(p));
2365 if ((p & 1) == 0) {
2366 break;
2370 * Give up?
2372 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2373 rid = PCIR_MAPS;
2375 p = pci_read_config(tag, rid, sizeof(p));
2376 pci_write_config(tag, rid, -1, sizeof(p));
2377 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2378 pci_write_config(tag, rid, p, sizeof(p));
2379 if (l > MAX_MAP) {
2380 l = MAX_MAP;
2383 * The 2005S Zero Channel RAID solution is not a perfect PCI
2384 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2385 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2386 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2387 * accessible via BAR0, the messaging registers are accessible
2388 * via BAR1. If the subdevice code is 50 to 59 decimal.
2390 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2391 if (s != 0xA5111044) {
2392 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2393 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2394 && (ADPTDOMINATOR_SUB_ID_START <= s)
2395 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2396 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2399 p &= ~15;
2400 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2401 p, p + l, l, RF_ACTIVE);
2402 if (sc->ha_mem_res == (struct resource *)NULL) {
2403 return (0);
2405 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2406 if (sc->ha_Base == (void *)NULL) {
2407 return (0);
2409 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2410 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2411 if ((rid += sizeof(u_int32_t))
2412 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2413 return (0);
2415 p = pci_read_config(tag, rid, sizeof(p));
2416 pci_write_config(tag, rid, -1, sizeof(p));
2417 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2418 pci_write_config(tag, rid, p, sizeof(p));
2419 if (l > MAX_MAP) {
2420 l = MAX_MAP;
2422 p &= ~15;
2423 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2424 p, p + l, l, RF_ACTIVE);
2425 if (sc->ha_mes_res == (struct resource *)NULL) {
2426 return (0);
2428 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2429 return (0);
2431 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2432 } else {
2433 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2435 return (1);
2436 } /* asr_pci_map_mem */
2439 * A simplified copy of the real pci_map_int with additional
2440 * registration requirements.
2442 STATIC int
2443 asr_pci_map_int (
2444 IN device_t tag,
2445 IN Asr_softc_t * sc)
2447 int rid = 0;
2448 int error;
2450 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2451 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2452 if (sc->ha_irq_res == (struct resource *)NULL) {
2453 return (0);
2455 error = bus_setup_intr(tag, sc->ha_irq_res, 0,
2456 (driver_intr_t *)asr_intr, (void *)sc,
2457 &(sc->ha_intr), NULL);
2458 if (error) {
2459 return (0);
2461 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2462 return (1);
2463 } /* asr_pci_map_int */
2466 * Attach the devices, and virtual devices to the driver list.
2468 STATIC ATTACH_RET
2469 asr_attach (ATTACH_ARGS)
2471 Asr_softc_t * sc;
2472 struct scsi_inquiry_data * iq;
2473 ATTACH_SET();
2475 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_INTWAIT | M_ZERO);
2476 if (Asr_softc == (Asr_softc_t *)NULL) {
2478 * Fixup the OS revision as saved in the dptsig for the
2479 * engine (dptioctl.h) to pick up.
2481 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2482 kprintf ("asr%d: major=%d\n", unit, asr_ops.head.maj);
2485 * Initialize the software structure
2487 LIST_INIT(&(sc->ha_ccb));
2488 /* Link us into the HA list */
2490 Asr_softc_t **ha;
2492 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2493 *(ha) = sc;
2496 PI2O_EXEC_STATUS_GET_REPLY status;
2497 int size;
2500 * This is the real McCoy!
2502 if (!asr_pci_map_mem(tag, sc)) {
2503 kprintf ("asr%d: could not map memory\n", unit);
2504 ATTACH_RETURN(ENXIO);
2506 /* Enable if not formerly enabled */
2507 pci_write_config (tag, PCIR_COMMAND,
2508 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2509 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2510 /* Knowledge is power, responsibility is direct */
2512 struct pci_devinfo {
2513 STAILQ_ENTRY(pci_devinfo) pci_links;
2514 struct resource_list resources;
2515 pcicfgregs cfg;
2516 } * dinfo = device_get_ivars(tag);
2517 sc->ha_pciBusNum = dinfo->cfg.bus;
2518 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2519 | dinfo->cfg.func;
2521 /* Check if the device is there? */
2522 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2523 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)kmalloc (
2524 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2525 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2526 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2527 kprintf ("asr%d: could not initialize hardware\n", unit);
2528 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2530 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2531 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2532 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2533 sc->ha_SystemTable.IopState = status->IopState;
2534 sc->ha_SystemTable.MessengerType = status->MessengerType;
2535 sc->ha_SystemTable.InboundMessageFrameSize
2536 = status->InboundMFrameSize;
2537 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2538 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2540 if (!asr_pci_map_int(tag, (void *)sc)) {
2541 kprintf ("asr%d: could not map interrupt\n", unit);
2542 ATTACH_RETURN(ENXIO);
2545 /* Adjust the maximim inbound count */
2546 if (((sc->ha_QueueSize
2547 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2548 > MAX_INBOUND)
2549 || (sc->ha_QueueSize == 0)) {
2550 sc->ha_QueueSize = MAX_INBOUND;
2553 /* Adjust the maximum outbound count */
2554 if (((sc->ha_Msgs_Count
2555 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2556 > MAX_OUTBOUND)
2557 || (sc->ha_Msgs_Count == 0)) {
2558 sc->ha_Msgs_Count = MAX_OUTBOUND;
2560 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2561 sc->ha_Msgs_Count = sc->ha_QueueSize;
2564 /* Adjust the maximum SG size to adapter */
2565 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2566 status) << 2)) > MAX_INBOUND_SIZE) {
2567 size = MAX_INBOUND_SIZE;
2569 kfree (status, M_TEMP);
2570 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2571 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2575 * Only do a bus/HBA reset on the first time through. On this
2576 * first time through, we do not send a flush to the devices.
2578 if (ASR_init(sc) == 0) {
2579 struct BufferInfo {
2580 I2O_PARAM_RESULTS_LIST_HEADER Header;
2581 I2O_PARAM_READ_OPERATION_RESULT Read;
2582 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2584 defAlignLong (struct BufferInfo, Buffer);
2585 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2586 # define FW_DEBUG_BLED_OFFSET 8
2588 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2589 ASR_getParams(sc, 0,
2590 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2591 Buffer, sizeof(struct BufferInfo)))
2592 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2593 sc->ha_blinkLED = sc->ha_Fvirt
2594 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2595 + FW_DEBUG_BLED_OFFSET;
2597 if (ASR_acquireLct(sc) == 0) {
2598 (void)ASR_acquireHrt(sc);
2600 } else {
2601 kprintf ("asr%d: failed to initialize\n", unit);
2602 ATTACH_RETURN(ENXIO);
2605 * Add in additional probe responses for more channels. We
2606 * are reusing the variable `target' for a channel loop counter.
2607 * Done here because of we need both the acquireLct and
2608 * acquireHrt data.
2610 { PI2O_LCT_ENTRY Device;
2612 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2613 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2614 ++Device) {
2615 if (Device->le_type == I2O_UNKNOWN) {
2616 continue;
2618 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2619 if (Device->le_target > sc->ha_MaxId) {
2620 sc->ha_MaxId = Device->le_target;
2622 if (Device->le_lun > sc->ha_MaxLun) {
2623 sc->ha_MaxLun = Device->le_lun;
2626 if (((Device->le_type & I2O_PORT) != 0)
2627 && (Device->le_bus <= MAX_CHANNEL)) {
2628 /* Do not increase MaxId for efficiency */
2629 sc->ha_adapter_target[Device->le_bus]
2630 = Device->le_target;
2637 * Print the HBA model number as inquired from the card.
2640 kprintf ("asr%d:", unit);
2642 iq = (struct scsi_inquiry_data *)kmalloc (
2643 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO);
2644 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2645 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2646 int posted = 0;
2648 bzero (Message_Ptr
2649 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2650 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2651 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2653 I2O_MESSAGE_FRAME_setVersionOffset(
2654 (PI2O_MESSAGE_FRAME)Message_Ptr,
2655 I2O_VERSION_11
2656 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2657 - sizeof(I2O_SG_ELEMENT))
2658 / sizeof(U32)) << 4));
2659 I2O_MESSAGE_FRAME_setMessageSize(
2660 (PI2O_MESSAGE_FRAME)Message_Ptr,
2661 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2662 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2663 / sizeof(U32));
2664 I2O_MESSAGE_FRAME_setInitiatorAddress (
2665 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2666 I2O_MESSAGE_FRAME_setFunction(
2667 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2668 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2669 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2670 I2O_SCSI_SCB_EXEC);
2671 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2672 I2O_SCB_FLAG_ENABLE_DISCONNECT
2673 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2674 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2675 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2676 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2677 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2678 DPT_ORGANIZATION_ID);
2679 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2680 Message_Ptr->CDB[0] = INQUIRY;
2681 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2682 if (Message_Ptr->CDB[4] == 0) {
2683 Message_Ptr->CDB[4] = 255;
2686 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2687 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2688 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2689 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2690 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2692 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2693 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2694 sizeof(struct scsi_inquiry_data));
2695 SG(&(Message_Ptr->SGL), 0,
2696 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2697 iq, sizeof(struct scsi_inquiry_data));
2698 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2700 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2701 kprintf (" ");
2702 ASR_prstring (iq->vendor, 8);
2703 ++posted;
2705 if (iq->product[0] && (iq->product[0] != ' ')) {
2706 kprintf (" ");
2707 ASR_prstring (iq->product, 16);
2708 ++posted;
2710 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2711 kprintf (" FW Rev. ");
2712 ASR_prstring (iq->revision, 4);
2713 ++posted;
2715 kfree ((caddr_t)iq, M_TEMP);
2716 if (posted) {
2717 kprintf (",");
2719 kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2720 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2723 * fill in the prototype cam_path.
2726 int bus;
2727 union asr_ccb * ccb;
2729 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
2730 kprintf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
2731 ATTACH_RETURN(ENOMEM);
2733 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2734 int QueueSize = sc->ha_QueueSize;
2736 if (QueueSize > MAX_INBOUND) {
2737 QueueSize = MAX_INBOUND;
2741 * Construct our first channel SIM entry
2743 sc->ha_sim[bus] = cam_sim_alloc(
2744 asr_action, asr_poll, "asr", sc,
2745 unit, &sim_mplock, 1, QueueSize, NULL);
2746 if (sc->ha_sim[bus] == NULL)
2747 continue;
2749 if (xpt_bus_register(sc->ha_sim[bus], bus)
2750 != CAM_SUCCESS) {
2751 cam_sim_free(sc->ha_sim[bus]);
2752 sc->ha_sim[bus] = NULL;
2753 continue;
2756 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2757 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2758 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2759 xpt_bus_deregister(
2760 cam_sim_path(sc->ha_sim[bus]));
2761 cam_sim_free(sc->ha_sim[bus]);
2762 sc->ha_sim[bus] = NULL;
2763 continue;
2766 asr_free_ccb (ccb);
2769 * Generate the device node information
2771 make_dev(&asr_ops, unit, 0, 0, S_IRWXU, "rasr%d", unit);
2772 ATTACH_RETURN(0);
2773 } /* asr_attach */
2775 STATIC void
2776 asr_poll(
2777 IN struct cam_sim *sim)
2779 asr_intr(cam_sim_softc(sim));
2780 } /* asr_poll */
2782 STATIC void
2783 asr_action(
2784 IN struct cam_sim * sim,
2785 IN union ccb * ccb)
2787 struct Asr_softc * sc;
2789 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2790 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2792 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2794 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2796 switch (ccb->ccb_h.func_code) {
2798 /* Common cases first */
2799 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2801 struct Message {
2802 char M[MAX_INBOUND_SIZE];
2804 defAlignLong(struct Message,Message);
2805 PI2O_MESSAGE_FRAME Message_Ptr;
2807 /* Reject incoming commands while we are resetting the card */
2808 if (sc->ha_in_reset != HA_OPERATIONAL) {
2809 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2810 if (sc->ha_in_reset >= HA_OFF_LINE) {
2811 /* HBA is now off-line */
2812 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2813 } else {
2814 /* HBA currently resetting, try again later. */
2815 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2817 debug_asr_cmd_printf (" e\n");
2818 xpt_done(ccb);
2819 debug_asr_cmd_printf (" q\n");
2820 break;
2822 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2823 kprintf(
2824 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2825 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2826 ccb->csio.cdb_io.cdb_bytes[0],
2827 cam_sim_bus(sim),
2828 ccb->ccb_h.target_id,
2829 ccb->ccb_h.target_lun);
2831 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2832 cam_sim_unit(sim),
2833 cam_sim_bus(sim),
2834 ccb->ccb_h.target_id,
2835 ccb->ccb_h.target_lun);
2836 debug_asr_cmd_dump_ccb(ccb);
2838 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2839 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2840 debug_asr_cmd2_printf ("TID=%x:\n",
2841 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2842 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2843 debug_asr_cmd2_dump_message(Message_Ptr);
2844 debug_asr_cmd1_printf (" q");
2846 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2847 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2848 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2849 debug_asr_cmd_printf (" E\n");
2850 xpt_done(ccb);
2852 debug_asr_cmd_printf (" Q\n");
2853 break;
2856 * We will get here if there is no valid TID for the device
2857 * referenced in the scsi command packet.
2859 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2860 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2861 debug_asr_cmd_printf (" B\n");
2862 xpt_done(ccb);
2863 break;
2866 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2867 /* Rese HBA device ... */
2868 asr_hbareset (sc);
2869 ccb->ccb_h.status = CAM_REQ_CMP;
2870 xpt_done(ccb);
2871 break;
2873 # if (defined(REPORT_LUNS))
2874 case REPORT_LUNS:
2875 # endif
2876 case XPT_ABORT: /* Abort the specified CCB */
2877 /* XXX Implement */
2878 ccb->ccb_h.status = CAM_REQ_INVALID;
2879 xpt_done(ccb);
2880 break;
2882 case XPT_SET_TRAN_SETTINGS:
2883 /* XXX Implement */
2884 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2885 xpt_done(ccb);
2886 break;
2888 case XPT_GET_TRAN_SETTINGS:
2889 /* Get default/user set transfer settings for the target */
2891 struct ccb_trans_settings *cts = &(ccb->cts);
2892 struct ccb_trans_settings_scsi *scsi =
2893 &cts->proto_specific.scsi;
2894 struct ccb_trans_settings_spi *spi =
2895 &cts->xport_specific.spi;
2897 if (cts->type == CTS_TYPE_USER_SETTINGS) {
2898 cts->protocol = PROTO_SCSI;
2899 cts->protocol_version = SCSI_REV_2;
2900 cts->transport = XPORT_SPI;
2901 cts->transport_version = 2;
2903 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2904 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2905 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2906 spi->sync_period = 6; /* 40MHz */
2907 spi->sync_offset = 15;
2908 spi->valid = CTS_SPI_VALID_SYNC_RATE
2909 | CTS_SPI_VALID_SYNC_OFFSET
2910 | CTS_SPI_VALID_BUS_WIDTH
2911 | CTS_SPI_VALID_DISC;
2912 scsi->valid = CTS_SCSI_VALID_TQ;
2914 ccb->ccb_h.status = CAM_REQ_CMP;
2915 } else {
2916 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2918 xpt_done(ccb);
2919 break;
2922 case XPT_CALC_GEOMETRY:
2924 struct ccb_calc_geometry *ccg;
2925 u_int32_t size_mb;
2926 u_int32_t secs_per_cylinder;
2928 ccg = &(ccb->ccg);
2929 size_mb = ccg->volume_size
2930 / ((1024L * 1024L) / ccg->block_size);
2932 if (size_mb > 4096) {
2933 ccg->heads = 255;
2934 ccg->secs_per_track = 63;
2935 } else if (size_mb > 2048) {
2936 ccg->heads = 128;
2937 ccg->secs_per_track = 63;
2938 } else if (size_mb > 1024) {
2939 ccg->heads = 65;
2940 ccg->secs_per_track = 63;
2941 } else {
2942 ccg->heads = 64;
2943 ccg->secs_per_track = 32;
2945 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2946 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2947 ccb->ccb_h.status = CAM_REQ_CMP;
2948 xpt_done(ccb);
2949 break;
2952 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2953 ASR_resetBus (sc, cam_sim_bus(sim));
2954 ccb->ccb_h.status = CAM_REQ_CMP;
2955 xpt_done(ccb);
2956 break;
2958 case XPT_TERM_IO: /* Terminate the I/O process */
2959 /* XXX Implement */
2960 ccb->ccb_h.status = CAM_REQ_INVALID;
2961 xpt_done(ccb);
2962 break;
2964 case XPT_PATH_INQ: /* Path routing inquiry */
2966 struct ccb_pathinq *cpi = &(ccb->cpi);
2968 cpi->version_num = 1; /* XXX??? */
2969 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2970 cpi->target_sprt = 0;
2971 /* Not necessary to reset bus, done by HDM initialization */
2972 cpi->hba_misc = PIM_NOBUSRESET;
2973 cpi->hba_eng_cnt = 0;
2974 cpi->max_target = sc->ha_MaxId;
2975 cpi->max_lun = sc->ha_MaxLun;
2976 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2977 cpi->bus_id = cam_sim_bus(sim);
2978 cpi->base_transfer_speed = 3300;
2979 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2980 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2981 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2982 cpi->unit_number = cam_sim_unit(sim);
2983 cpi->ccb_h.status = CAM_REQ_CMP;
2984 cpi->transport = XPORT_SPI;
2985 cpi->transport_version = 2;
2986 cpi->protocol = PROTO_SCSI;
2987 cpi->protocol_version = SCSI_REV_2;
2988 xpt_done(ccb);
2989 break;
2991 default:
2992 ccb->ccb_h.status = CAM_REQ_INVALID;
2993 xpt_done(ccb);
2994 break;
2996 } /* asr_action */
3000 * Handle processing of current CCB as pointed to by the Status.
3002 STATIC int
3003 asr_intr (
3004 IN Asr_softc_t * sc)
3006 OUT int processed;
3008 for (processed = 0;
3009 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3010 processed = 1) {
3011 union asr_ccb * ccb;
3012 U32 ReplyOffset;
3013 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3015 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3016 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3017 break;
3019 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3020 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3022 * We do not need any (optional byteswapping) method access to
3023 * the Initiator context field.
3025 ccb = (union asr_ccb *)(long)
3026 I2O_MESSAGE_FRAME_getInitiatorContext64(
3027 &(Reply->StdReplyFrame.StdMessageFrame));
3028 if (I2O_MESSAGE_FRAME_getMsgFlags(
3029 &(Reply->StdReplyFrame.StdMessageFrame))
3030 & I2O_MESSAGE_FLAGS_FAIL) {
3031 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3032 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3033 U32 MessageOffset;
3035 MessageOffset = (u_long)
3036 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3037 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3039 * Get the Original Message Frame's address, and get
3040 * it's Transaction Context into our space. (Currently
3041 * unused at original authorship, but better to be
3042 * safe than sorry). Straight copy means that we
3043 * need not concern ourselves with the (optional
3044 * byteswapping) method access.
3046 Reply->StdReplyFrame.TransactionContext
3047 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3048 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3050 * For 64 bit machines, we need to reconstruct the
3051 * 64 bit context.
3053 ccb = (union asr_ccb *)(long)
3054 I2O_MESSAGE_FRAME_getInitiatorContext64(
3055 &(Reply->StdReplyFrame.StdMessageFrame));
3057 * Unique error code for command failure.
3059 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3060 &(Reply->StdReplyFrame), (u_int16_t)-2);
3062 * Modify the message frame to contain a NOP and
3063 * re-issue it to the controller.
3065 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3066 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3067 # if (I2O_UTIL_NOP != 0)
3068 I2O_MESSAGE_FRAME_setFunction (
3069 &(Message_Ptr->StdMessageFrame),
3070 I2O_UTIL_NOP);
3071 # endif
3073 * Copy the packet out to the Original Message
3075 bcopy ((caddr_t)Message_Ptr,
3076 sc->ha_Fvirt + MessageOffset,
3077 sizeof(I2O_UTIL_NOP_MESSAGE));
3079 * Issue the NOP
3081 sc->ha_Virt->ToFIFO = MessageOffset;
3085 * Asynchronous command with no return requirements,
3086 * and a generic handler for immunity against odd error
3087 * returns from the adapter.
3089 if (ccb == (union asr_ccb *)NULL) {
3091 * Return Reply so that it can be used for the
3092 * next command
3094 sc->ha_Virt->FromFIFO = ReplyOffset;
3095 continue;
3098 /* Welease Wadjah! (and stop timeouts) */
3099 ASR_ccbRemove (sc, ccb);
3101 switch (
3102 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3103 &(Reply->StdReplyFrame))) {
3105 case I2O_SCSI_DSC_SUCCESS:
3106 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3107 ccb->ccb_h.status |= CAM_REQ_CMP;
3108 break;
3110 case I2O_SCSI_DSC_CHECK_CONDITION:
3111 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3112 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3113 break;
3115 case I2O_SCSI_DSC_BUSY:
3116 /* FALLTHRU */
3117 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3118 /* FALLTHRU */
3119 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3120 /* FALLTHRU */
3121 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3122 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3123 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3124 break;
3126 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3127 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3128 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3129 break;
3131 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3132 /* FALLTHRU */
3133 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3134 /* FALLTHRU */
3135 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3136 /* FALLTHRU */
3137 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3138 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3139 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3140 break;
3142 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3143 /* FALLTHRU */
3144 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3145 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3146 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3147 break;
3149 default:
3150 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3151 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3152 break;
3154 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3155 ccb->csio.resid -=
3156 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3157 Reply);
3160 /* Sense data in reply packet */
3161 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3162 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3164 if (size) {
3165 if (size > sizeof(ccb->csio.sense_data)) {
3166 size = sizeof(ccb->csio.sense_data);
3168 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3169 size = I2O_SCSI_SENSE_DATA_SZ;
3171 if ((ccb->csio.sense_len)
3172 && (size > ccb->csio.sense_len)) {
3173 size = ccb->csio.sense_len;
3175 bcopy ((caddr_t)Reply->SenseData,
3176 (caddr_t)&(ccb->csio.sense_data), size);
3181 * Return Reply so that it can be used for the next command
3182 * since we have no more need for it now
3184 sc->ha_Virt->FromFIFO = ReplyOffset;
3186 if (ccb->ccb_h.path) {
3187 xpt_done ((union ccb *)ccb);
3188 } else {
3189 wakeup ((caddr_t)ccb);
3192 return (processed);
3193 } /* asr_intr */
3195 #undef QueueSize /* Grrrr */
3196 #undef SG_Size /* Grrrr */
3199 * Meant to be included at the bottom of asr.c !!!
3203 * Included here as hard coded. Done because other necessary include
3204 * files utilize C++ comment structures which make them a nuisance to
3205 * included here just to pick up these three typedefs.
3207 typedef U32 DPT_TAG_T;
3208 typedef U32 DPT_MSG_T;
3209 typedef U32 DPT_RTN_T;
3211 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3212 #include "osd_unix.h"
3214 #define asr_unit(dev) minor(dev)
3216 STATIC INLINE Asr_softc_t *
3217 ASR_get_sc (
3218 IN cdev_t dev)
3220 int unit = asr_unit(dev);
3221 OUT Asr_softc_t * sc = Asr_softc;
3223 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3224 sc = sc->ha_next;
3226 return (sc);
3227 } /* ASR_get_sc */
3229 STATIC u_int8_t ASR_ctlr_held;
3230 #if (!defined(UNREFERENCED_PARAMETER))
3231 # define UNREFERENCED_PARAMETER(x) (void)(x)
3232 #endif
3234 STATIC int
3235 asr_open(struct dev_open_args *ap)
3237 cdev_t dev = ap->a_head.a_dev;
3238 OUT int error;
3240 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3241 return (ENODEV);
3243 crit_enter();
3244 if (ASR_ctlr_held) {
3245 error = EBUSY;
3246 } else if ((error = suser_cred(ap->a_cred, 0)) == 0) {
3247 ++ASR_ctlr_held;
3249 crit_exit();
3250 return (error);
3251 } /* asr_open */
3253 STATIC int
3254 asr_close(struct dev_close_args *ap)
3256 ASR_ctlr_held = 0;
3257 return (0);
3258 } /* asr_close */
3261 /*-------------------------------------------------------------------------*/
3262 /* Function ASR_queue_i */
3263 /*-------------------------------------------------------------------------*/
3264 /* The Parameters Passed To This Function Are : */
3265 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3266 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3267 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3268 /* */
3269 /* This Function Will Take The User Request Packet And Convert It To An */
3270 /* I2O MSG And Send It Off To The Adapter. */
3271 /* */
3272 /* Return : 0 For OK, Error Code Otherwise */
3273 /*-------------------------------------------------------------------------*/
3274 STATIC INLINE int
3275 ASR_queue_i(
3276 IN Asr_softc_t * sc,
3277 INOUT PI2O_MESSAGE_FRAME Packet)
3279 union asr_ccb * ccb;
3280 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3281 PI2O_MESSAGE_FRAME Message_Ptr;
3282 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3283 int MessageSizeInBytes;
3284 int ReplySizeInBytes;
3285 int error;
3286 int s;
3287 /* Scatter Gather buffer list */
3288 struct ioctlSgList_S {
3289 SLIST_ENTRY(ioctlSgList_S) link;
3290 caddr_t UserSpace;
3291 I2O_FLAGS_COUNT FlagsCount;
3292 char KernelSpace[sizeof(long)];
3293 } * elm;
3294 /* Generates a `first' entry */
3295 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3297 if (ASR_getBlinkLedCode(sc)) {
3298 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3299 ASR_getBlinkLedCode(sc));
3300 return (EIO);
3302 /* Copy in the message into a local allocation */
3303 Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3304 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK);
3305 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3306 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3307 kfree (Message_Ptr, M_TEMP);
3308 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3309 return (error);
3311 /* Acquire information to determine type of packet */
3312 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3313 /* The offset of the reply information within the user packet */
3314 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3315 + MessageSizeInBytes);
3317 /* Check if the message is a synchronous initialization command */
3318 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3319 kfree (Message_Ptr, M_TEMP);
3320 switch (s) {
3322 case I2O_EXEC_IOP_RESET:
3323 { U32 status;
3325 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3326 ReplySizeInBytes = sizeof(status);
3327 debug_usr_cmd_printf ("resetIOP done\n");
3328 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3329 ReplySizeInBytes));
3332 case I2O_EXEC_STATUS_GET:
3333 { I2O_EXEC_STATUS_GET_REPLY status;
3335 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3336 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3337 debug_usr_cmd_printf ("getStatus failed\n");
3338 return (ENXIO);
3340 ReplySizeInBytes = sizeof(status);
3341 debug_usr_cmd_printf ("getStatus done\n");
3342 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3343 ReplySizeInBytes));
3346 case I2O_EXEC_OUTBOUND_INIT:
3347 { U32 status;
3349 status = ASR_initOutBound(sc);
3350 ReplySizeInBytes = sizeof(status);
3351 debug_usr_cmd_printf ("intOutBound done\n");
3352 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3353 ReplySizeInBytes));
3357 /* Determine if the message size is valid */
3358 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3359 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3360 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3361 MessageSizeInBytes);
3362 return (EINVAL);
3365 Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3366 M_TEMP, M_WAITOK);
3367 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3368 MessageSizeInBytes)) != 0) {
3369 kfree (Message_Ptr, M_TEMP);
3370 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3371 MessageSizeInBytes, error);
3372 return (error);
3375 /* Check the size of the reply frame, and start constructing */
3377 Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3378 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK);
3379 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3380 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3381 kfree (Reply_Ptr, M_TEMP);
3382 kfree (Message_Ptr, M_TEMP);
3383 debug_usr_cmd_printf (
3384 "Failed to copy in reply frame, errno=%d\n",
3385 error);
3386 return (error);
3388 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3389 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3390 kfree (Reply_Ptr, M_TEMP);
3391 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3392 kfree (Message_Ptr, M_TEMP);
3393 debug_usr_cmd_printf (
3394 "Failed to copy in reply frame[%d], errno=%d\n",
3395 ReplySizeInBytes, error);
3396 return (EINVAL);
3399 Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3400 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3401 ? ReplySizeInBytes
3402 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3403 M_TEMP, M_WAITOK);
3404 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3405 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3406 = Message_Ptr->InitiatorContext;
3407 Reply_Ptr->StdReplyFrame.TransactionContext
3408 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3409 I2O_MESSAGE_FRAME_setMsgFlags(
3410 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3411 I2O_MESSAGE_FRAME_getMsgFlags(
3412 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3413 | I2O_MESSAGE_FLAGS_REPLY);
3415 /* Check if the message is a special case command */
3416 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3417 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3418 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3419 Message_Ptr) & 0xF0) >> 2)) {
3420 kfree (Message_Ptr, M_TEMP);
3421 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3422 &(Reply_Ptr->StdReplyFrame),
3423 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3424 I2O_MESSAGE_FRAME_setMessageSize(
3425 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3426 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3427 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3428 ReplySizeInBytes);
3429 kfree (Reply_Ptr, M_TEMP);
3430 return (error);
3434 /* Deal in the general case */
3435 /* First allocate and optionally copy in each scatter gather element */
3436 SLIST_INIT(&sgList);
3437 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3438 PI2O_SGE_SIMPLE_ELEMENT sg;
3441 * since this code is reused in several systems, code
3442 * efficiency is greater by using a shift operation rather
3443 * than a divide by sizeof(u_int32_t).
3445 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3446 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3447 >> 2));
3448 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3449 + MessageSizeInBytes)) {
3450 caddr_t v;
3451 int len;
3453 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3454 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3455 error = EINVAL;
3456 break;
3458 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3459 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3460 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3461 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3462 Message_Ptr) & 0xF0) >> 2)),
3463 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3465 elm = (struct ioctlSgList_S *)kmalloc (
3466 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3467 M_TEMP, M_WAITOK);
3468 SLIST_INSERT_HEAD(&sgList, elm, link);
3469 elm->FlagsCount = sg->FlagsCount;
3470 elm->UserSpace = (caddr_t)
3471 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3472 v = elm->KernelSpace;
3473 /* Copy in outgoing data (DIR bit could be invalid) */
3474 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3475 != 0) {
3476 break;
3479 * If the buffer is not contiguous, lets
3480 * break up the scatter/gather entries.
3482 while ((len > 0)
3483 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3484 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3485 int next, base, span;
3487 span = 0;
3488 next = base = KVTOPHYS(v);
3489 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3490 base);
3492 /* How far can we go physically contiguously */
3493 while ((len > 0) && (base == next)) {
3494 int size;
3496 next = trunc_page(base) + PAGE_SIZE;
3497 size = next - base;
3498 if (size > len) {
3499 size = len;
3501 span += size;
3502 v += size;
3503 len -= size;
3504 base = KVTOPHYS(v);
3507 /* Construct the Flags */
3508 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3509 span);
3511 int flags = I2O_FLAGS_COUNT_getFlags(
3512 &(elm->FlagsCount));
3513 /* Any remaining length? */
3514 if (len > 0) {
3515 flags &=
3516 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3517 | I2O_SGL_FLAGS_LAST_ELEMENT);
3519 I2O_FLAGS_COUNT_setFlags(
3520 &(sg->FlagsCount), flags);
3523 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3524 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3525 ((char *)Message_Ptr
3526 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3527 Message_Ptr) & 0xF0) >> 2)),
3528 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3529 span);
3530 if (len <= 0) {
3531 break;
3535 * Incrementing requires resizing of the
3536 * packet, and moving up the existing SG
3537 * elements.
3539 ++sg;
3540 MessageSizeInBytes += sizeof(*sg);
3541 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3542 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3543 + (sizeof(*sg) / sizeof(U32)));
3545 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3547 NewMessage_Ptr
3548 = (PI2O_MESSAGE_FRAME)
3549 kmalloc (MessageSizeInBytes,
3550 M_TEMP, M_WAITOK);
3551 span = ((caddr_t)sg)
3552 - (caddr_t)Message_Ptr;
3553 bcopy ((caddr_t)Message_Ptr,
3554 (caddr_t)NewMessage_Ptr, span);
3555 bcopy ((caddr_t)(sg-1),
3556 ((caddr_t)NewMessage_Ptr) + span,
3557 MessageSizeInBytes - span);
3558 kfree (Message_Ptr, M_TEMP);
3559 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3560 (((caddr_t)NewMessage_Ptr) + span);
3561 Message_Ptr = NewMessage_Ptr;
3564 if ((error)
3565 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3566 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3567 break;
3569 ++sg;
3571 if (error) {
3572 while ((elm = SLIST_FIRST(&sgList))
3573 != (struct ioctlSgList_S *)NULL) {
3574 SLIST_REMOVE_HEAD(&sgList, link);
3575 kfree (elm, M_TEMP);
3577 kfree (Reply_Ptr, M_TEMP);
3578 kfree (Message_Ptr, M_TEMP);
3579 return (error);
3583 debug_usr_cmd_printf ("Inbound: ");
3584 debug_usr_cmd_dump_message(Message_Ptr);
3586 /* Send the command */
3587 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3588 /* Free up in-kernel buffers */
3589 while ((elm = SLIST_FIRST(&sgList))
3590 != (struct ioctlSgList_S *)NULL) {
3591 SLIST_REMOVE_HEAD(&sgList, link);
3592 kfree (elm, M_TEMP);
3594 kfree (Reply_Ptr, M_TEMP);
3595 kfree (Message_Ptr, M_TEMP);
3596 return (ENOMEM);
3600 * We do not need any (optional byteswapping) method access to
3601 * the Initiator context field.
3603 I2O_MESSAGE_FRAME_setInitiatorContext64(
3604 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3606 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3608 kfree (Message_Ptr, M_TEMP);
3611 * Wait for the board to report a finished instruction.
3613 crit_enter();
3614 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3615 if (ASR_getBlinkLedCode(sc)) {
3616 /* Reset Adapter */
3617 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3618 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3619 ASR_getBlinkLedCode(sc));
3620 if (ASR_reset (sc) == ENXIO) {
3621 /* Command Cleanup */
3622 ASR_ccbRemove(sc, ccb);
3624 crit_exit();
3625 /* Free up in-kernel buffers */
3626 while ((elm = SLIST_FIRST(&sgList))
3627 != (struct ioctlSgList_S *)NULL) {
3628 SLIST_REMOVE_HEAD(&sgList, link);
3629 kfree (elm, M_TEMP);
3631 kfree (Reply_Ptr, M_TEMP);
3632 asr_free_ccb(ccb);
3633 return (EIO);
3635 /* Check every second for BlinkLed */
3636 tsleep((caddr_t)ccb, 0, "asr", hz);
3638 crit_exit();
3640 debug_usr_cmd_printf ("Outbound: ");
3641 debug_usr_cmd_dump_message(Reply_Ptr);
3643 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3644 &(Reply_Ptr->StdReplyFrame),
3645 (ccb->ccb_h.status != CAM_REQ_CMP));
3647 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3648 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3649 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3650 ccb->csio.dxfer_len - ccb->csio.resid);
3652 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3653 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3654 - I2O_SCSI_SENSE_DATA_SZ))) {
3655 int size = ReplySizeInBytes
3656 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3657 - I2O_SCSI_SENSE_DATA_SZ;
3659 if (size > sizeof(ccb->csio.sense_data)) {
3660 size = sizeof(ccb->csio.sense_data);
3662 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData,
3663 size);
3664 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3665 Reply_Ptr, size);
3668 /* Free up in-kernel buffers */
3669 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) {
3670 /* Copy out as necessary */
3671 if ((error == 0)
3672 /* DIR bit considered `valid', error due to ignorance works */
3673 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3674 & I2O_SGL_FLAGS_DIR) == 0)) {
3675 error = copyout ((caddr_t)(elm->KernelSpace),
3676 elm->UserSpace,
3677 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3679 SLIST_REMOVE_HEAD(&sgList, link);
3680 kfree (elm, M_TEMP);
3682 if (error == 0) {
3683 /* Copy reply frame to user space */
3684 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3685 ReplySizeInBytes);
3687 kfree (Reply_Ptr, M_TEMP);
3688 asr_free_ccb(ccb);
3690 return (error);
3691 } /* ASR_queue_i */
3693 /*----------------------------------------------------------------------*/
3694 /* Function asr_ioctl */
3695 /*----------------------------------------------------------------------*/
3696 /* The parameters passed to this function are : */
3697 /* dev : Device number. */
3698 /* cmd : Ioctl Command */
3699 /* data : User Argument Passed In. */
3700 /* flag : Mode Parameter */
3701 /* proc : Process Parameter */
3702 /* */
3703 /* This function is the user interface into this adapter driver */
3704 /* */
3705 /* Return : zero if OK, error code if not */
3706 /*----------------------------------------------------------------------*/
3708 STATIC int
3709 asr_ioctl(struct dev_ioctl_args *ap)
3711 cdev_t dev = ap->a_head.a_dev;
3712 caddr_t data = ap->a_data;
3713 int i, j;
3714 OUT int error = 0;
3715 Asr_softc_t * sc = ASR_get_sc (dev);
3717 if (sc != (Asr_softc_t *)NULL)
3718 switch(ap->a_cmd) {
3720 case DPT_SIGNATURE:
3721 # if (dsDescription_size != 50)
3722 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3723 # endif
3724 if (ap->a_cmd & 0xFFFF0000) {
3725 (void)bcopy ((caddr_t)(&ASR_sig), data,
3726 sizeof(dpt_sig_S));
3727 return (0);
3729 /* Traditional version of the ioctl interface */
3730 case DPT_SIGNATURE & 0x0000FFFF:
3731 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data),
3732 sizeof(dpt_sig_S)));
3734 /* Traditional version of the ioctl interface */
3735 case DPT_CTRLINFO & 0x0000FFFF:
3736 case DPT_CTRLINFO: {
3737 struct {
3738 u_int16_t length;
3739 u_int16_t drvrHBAnum;
3740 u_int32_t baseAddr;
3741 u_int16_t blinkState;
3742 u_int8_t pciBusNum;
3743 u_int8_t pciDeviceNum;
3744 u_int16_t hbaFlags;
3745 u_int16_t Interrupt;
3746 u_int32_t reserved1;
3747 u_int32_t reserved2;
3748 u_int32_t reserved3;
3749 } CtlrInfo;
3751 bzero (&CtlrInfo, sizeof(CtlrInfo));
3752 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3753 CtlrInfo.drvrHBAnum = asr_unit(dev);
3754 CtlrInfo.baseAddr = (u_long)sc->ha_Base;
3755 i = ASR_getBlinkLedCode (sc);
3756 if (i == -1) {
3757 i = 0;
3759 CtlrInfo.blinkState = i;
3760 CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3761 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3762 #define FLG_OSD_PCI_VALID 0x0001
3763 #define FLG_OSD_DMA 0x0002
3764 #define FLG_OSD_I2O 0x0004
3765 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
3766 CtlrInfo.Interrupt = sc->ha_irq;
3767 if (ap->a_cmd & 0xFFFF0000) {
3768 bcopy (&CtlrInfo, data, sizeof(CtlrInfo));
3769 } else {
3770 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3772 } return (error);
3774 /* Traditional version of the ioctl interface */
3775 case DPT_SYSINFO & 0x0000FFFF:
3776 case DPT_SYSINFO: {
3777 sysInfo_S Info;
3778 char * cp;
3779 /* Kernel Specific ptok `hack' */
3780 # define ptok(a) ((char *)(a) + KERNBASE)
3782 bzero (&Info, sizeof(Info));
3784 /* Appears I am the only person in the Kernel doing this */
3785 outb (0x70, 0x12);
3786 i = inb(0x71);
3787 j = i >> 4;
3788 if (i == 0x0f) {
3789 outb (0x70, 0x19);
3790 j = inb (0x71);
3792 Info.drive0CMOS = j;
3794 j = i & 0x0f;
3795 if (i == 0x0f) {
3796 outb (0x70, 0x1a);
3797 j = inb (0x71);
3799 Info.drive1CMOS = j;
3801 Info.numDrives = *((char *)ptok(0x475));
3803 Info.processorFamily = ASR_sig.dsProcessorFamily;
3804 switch (cpu) {
3805 case CPU_386SX: case CPU_386:
3806 Info.processorType = PROC_386; break;
3807 case CPU_486SX: case CPU_486:
3808 Info.processorType = PROC_486; break;
3809 case CPU_586:
3810 Info.processorType = PROC_PENTIUM; break;
3811 case CPU_686:
3812 Info.processorType = PROC_SEXIUM; break;
3814 Info.osType = OS_BSDI_UNIX;
3815 Info.osMajorVersion = osrelease[0] - '0';
3816 Info.osMinorVersion = osrelease[2] - '0';
3817 /* Info.osRevision = 0; */
3818 /* Info.osSubRevision = 0; */
3819 Info.busType = SI_PCI_BUS;
3820 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid
3821 | SI_OSversionValid | SI_BusTypeValid | SI_NO_SmartROM;
3823 /* Go Out And Look For I2O SmartROM */
3824 for(j = 0xC8000; j < 0xE0000; j += 2048) {
3825 int k;
3827 cp = ptok(j);
3828 if (*((unsigned short *)cp) != 0xAA55) {
3829 continue;
3831 j += (cp[2] * 512) - 2048;
3832 if ((*((u_long *)(cp + 6))
3833 != ('S' + (' ' * 256) + (' ' * 65536L)))
3834 || (*((u_long *)(cp + 10))
3835 != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3836 continue;
3838 cp += 0x24;
3839 for (k = 0; k < 64; ++k) {
3840 if (*((unsigned short *)cp)
3841 == (' ' + ('v' * 256))) {
3842 break;
3845 if (k < 64) {
3846 Info.smartROMMajorVersion
3847 = *((unsigned char *)(cp += 4)) - '0';
3848 Info.smartROMMinorVersion
3849 = *((unsigned char *)(cp += 2));
3850 Info.smartROMRevision
3851 = *((unsigned char *)(++cp));
3852 Info.flags |= SI_SmartROMverValid;
3853 Info.flags &= ~SI_NO_SmartROM;
3854 break;
3857 /* Get The Conventional Memory Size From CMOS */
3858 outb (0x70, 0x16);
3859 j = inb (0x71);
3860 j <<= 8;
3861 outb (0x70, 0x15);
3862 j |= inb(0x71);
3863 Info.conventionalMemSize = j;
3865 /* Get The Extended Memory Found At Power On From CMOS */
3866 outb (0x70, 0x31);
3867 j = inb (0x71);
3868 j <<= 8;
3869 outb (0x70, 0x30);
3870 j |= inb(0x71);
3871 Info.extendedMemSize = j;
3872 Info.flags |= SI_MemorySizeValid;
3874 # if (defined(THIS_IS_BROKEN))
3875 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */
3876 if (Info.numDrives > 0) {
3878 * Get The Pointer From Int 41 For The First
3879 * Drive Parameters
3881 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4)
3882 + (unsigned)(*((unsigned short *)ptok(0x104+0)));
3884 * It appears that SmartROM's Int41/Int46 pointers
3885 * use memory that gets stepped on by the kernel
3886 * loading. We no longer have access to this
3887 * geometry information but try anyways (!?)
3889 Info.drives[0].cylinders = *((unsigned char *)ptok(j));
3890 ++j;
3891 Info.drives[0].cylinders += ((int)*((unsigned char *)
3892 ptok(j))) << 8;
3893 ++j;
3894 Info.drives[0].heads = *((unsigned char *)ptok(j));
3895 j += 12;
3896 Info.drives[0].sectors = *((unsigned char *)ptok(j));
3897 Info.flags |= SI_DriveParamsValid;
3898 if ((Info.drives[0].cylinders == 0)
3899 || (Info.drives[0].heads == 0)
3900 || (Info.drives[0].sectors == 0)) {
3901 Info.flags &= ~SI_DriveParamsValid;
3903 if (Info.numDrives > 1) {
3905 * Get The Pointer From Int 46 For The
3906 * Second Drive Parameters
3908 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4)
3909 + (unsigned)(*((unsigned short *)ptok(0x118+0)));
3910 Info.drives[1].cylinders = *((unsigned char *)
3911 ptok(j));
3912 ++j;
3913 Info.drives[1].cylinders += ((int)
3914 *((unsigned char *)ptok(j))) << 8;
3915 ++j;
3916 Info.drives[1].heads = *((unsigned char *)
3917 ptok(j));
3918 j += 12;
3919 Info.drives[1].sectors = *((unsigned char *)
3920 ptok(j));
3921 if ((Info.drives[1].cylinders == 0)
3922 || (Info.drives[1].heads == 0)
3923 || (Info.drives[1].sectors == 0)) {
3924 Info.flags &= ~SI_DriveParamsValid;
3928 # endif
3929 /* Copy Out The Info Structure To The User */
3930 if (ap->a_cmd & 0xFFFF0000) {
3931 bcopy (&Info, data, sizeof(Info));
3932 } else {
3933 error = copyout (&Info, *(caddr_t *)data, sizeof(Info));
3935 return (error); }
3937 /* Get The BlinkLED State */
3938 case DPT_BLINKLED:
3939 i = ASR_getBlinkLedCode (sc);
3940 if (i == -1) {
3941 i = 0;
3943 if (ap->a_cmd & 0xFFFF0000) {
3944 bcopy ((caddr_t)(&i), data, sizeof(i));
3945 } else {
3946 error = copyout (&i, *(caddr_t *)data, sizeof(i));
3948 break;
3950 /* Send an I2O command */
3951 case I2OUSRCMD:
3952 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data)));
3954 /* Reset and re-initialize the adapter */
3955 case I2ORESETCMD:
3956 return (ASR_reset (sc));
3958 /* Rescan the LCT table and resynchronize the information */
3959 case I2ORESCANCMD:
3960 return (ASR_rescan (sc));
3962 return (EINVAL);
3963 } /* asr_ioctl */