Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / drivers / scsi / aacraid / aachba.c
blobbbc7cf7996938b4ca893f5fd1de6ef4b4c31aba7
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/sched.h>
30 #include <linux/pci.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/completion.h>
34 #include <asm/semaphore.h>
35 #include <asm/uaccess.h>
36 #include <linux/blk.h>
37 #include "scsi.h"
38 #include "hosts.h"
40 #include "aacraid.h"
42 /* values for inqd_pdt: Peripheral device type in plain English */
43 #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
44 #define INQD_PDT_PROC 0x03 /* Processor device */
45 #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
46 #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
47 #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
48 #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
50 #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
51 #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
53 #define TARGET_LUN_TO_CONTAINER(target, lun) (target)
54 #define CONTAINER_TO_TARGET(cont) ((cont))
55 #define CONTAINER_TO_LUN(cont) (0)
57 #define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
59 #define MAX_DRIVER_SG_SEGMENT_COUNT 17
62 * Sense keys
64 #define SENKEY_NO_SENSE 0x00
65 #define SENKEY_UNDEFINED 0x01
66 #define SENKEY_NOT_READY 0x02
67 #define SENKEY_MEDIUM_ERR 0x03
68 #define SENKEY_HW_ERR 0x04
69 #define SENKEY_ILLEGAL 0x05
70 #define SENKEY_ATTENTION 0x06
71 #define SENKEY_PROTECTED 0x07
72 #define SENKEY_BLANK 0x08
73 #define SENKEY_V_UNIQUE 0x09
74 #define SENKEY_CPY_ABORT 0x0A
75 #define SENKEY_ABORT 0x0B
76 #define SENKEY_EQUAL 0x0C
77 #define SENKEY_VOL_OVERFLOW 0x0D
78 #define SENKEY_MISCOMP 0x0E
79 #define SENKEY_RESERVED 0x0F
82 * Sense codes
85 #define SENCODE_NO_SENSE 0x00
86 #define SENCODE_END_OF_DATA 0x00
87 #define SENCODE_BECOMING_READY 0x04
88 #define SENCODE_INIT_CMD_REQUIRED 0x04
89 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
90 #define SENCODE_INVALID_COMMAND 0x20
91 #define SENCODE_LBA_OUT_OF_RANGE 0x21
92 #define SENCODE_INVALID_CDB_FIELD 0x24
93 #define SENCODE_LUN_NOT_SUPPORTED 0x25
94 #define SENCODE_INVALID_PARAM_FIELD 0x26
95 #define SENCODE_PARAM_NOT_SUPPORTED 0x26
96 #define SENCODE_PARAM_VALUE_INVALID 0x26
97 #define SENCODE_RESET_OCCURRED 0x29
98 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
99 #define SENCODE_INQUIRY_DATA_CHANGED 0x3F
100 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
101 #define SENCODE_DIAGNOSTIC_FAILURE 0x40
102 #define SENCODE_INTERNAL_TARGET_FAILURE 0x44
103 #define SENCODE_INVALID_MESSAGE_ERROR 0x49
104 #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
105 #define SENCODE_OVERLAPPED_COMMAND 0x4E
108 * Additional sense codes
111 #define ASENCODE_NO_SENSE 0x00
112 #define ASENCODE_END_OF_DATA 0x05
113 #define ASENCODE_BECOMING_READY 0x01
114 #define ASENCODE_INIT_CMD_REQUIRED 0x02
115 #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
116 #define ASENCODE_INVALID_COMMAND 0x00
117 #define ASENCODE_LBA_OUT_OF_RANGE 0x00
118 #define ASENCODE_INVALID_CDB_FIELD 0x00
119 #define ASENCODE_LUN_NOT_SUPPORTED 0x00
120 #define ASENCODE_INVALID_PARAM_FIELD 0x00
121 #define ASENCODE_PARAM_NOT_SUPPORTED 0x01
122 #define ASENCODE_PARAM_VALUE_INVALID 0x02
123 #define ASENCODE_RESET_OCCURRED 0x00
124 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
125 #define ASENCODE_INQUIRY_DATA_CHANGED 0x03
126 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
127 #define ASENCODE_DIAGNOSTIC_FAILURE 0x80
128 #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
129 #define ASENCODE_INVALID_MESSAGE_ERROR 0x00
130 #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
131 #define ASENCODE_OVERLAPPED_COMMAND 0x00
133 #define BYTE0(x) (unsigned char)(x)
134 #define BYTE1(x) (unsigned char)((x) >> 8)
135 #define BYTE2(x) (unsigned char)((x) >> 16)
136 #define BYTE3(x) (unsigned char)((x) >> 24)
138 /*------------------------------------------------------------------------------
139 * S T R U C T S / T Y P E D E F S
140 *----------------------------------------------------------------------------*/
141 /* SCSI inquiry data */
142 struct inquiry_data {
143 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
144 u8 inqd_dtq; /* RMB | Device Type Qualifier */
145 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
146 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
147 u8 inqd_len; /* Additional length (n-4) */
148 u8 inqd_pad1[2];/* Reserved - must be zero */
149 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
150 u8 inqd_vid[8]; /* Vendor ID */
151 u8 inqd_pid[16];/* Product ID */
152 u8 inqd_prl[4]; /* Product Revision Level */
155 struct sense_data {
156 u8 error_code; /* 70h (current errors), 71h(deferred errors) */
157 u8 valid:1; /* A valid bit of one indicates that the information */
158 /* field contains valid information as defined in the
159 * SCSI-2 Standard.
161 u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
162 u8 sense_key:4; /* Sense Key */
163 u8 reserved:1;
164 u8 ILI:1; /* Incorrect Length Indicator */
165 u8 EOM:1; /* End Of Medium - reserved for random access devices */
166 u8 filemark:1; /* Filemark - reserved for random access devices */
168 u8 information[4]; /* for direct-access devices, contains the unsigned
169 * logical block address or residue associated with
170 * the sense key
172 u8 add_sense_len; /* number of additional sense bytes to follow this field */
173 u8 cmnd_info[4]; /* not used */
174 u8 ASC; /* Additional Sense Code */
175 u8 ASCQ; /* Additional Sense Code Qualifier */
176 u8 FRUC; /* Field Replaceable Unit Code - not used */
177 u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
178 * was in error
180 u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
181 * the bit_ptr field has valid value
183 u8 reserved2:2;
184 u8 CD:1; /* command data bit: 1- illegal parameter in CDB.
185 * 0- illegal parameter in data.
187 u8 SKSV:1;
188 u8 field_ptr[2]; /* byte of the CDB or parameter data in error */
192 * M O D U L E G L O B A L S
195 static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS]; /* SCSI Device Instance Pointers */
196 static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
197 static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap);
198 static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg);
199 static int aac_send_srb_fib(Scsi_Cmnd* scsicmd);
200 #ifdef AAC_DETAILED_STATUS_INFO
201 static char *aac_get_status_string(u32 status);
202 #endif
205 * aac_get_containers - list containers
206 * @common: adapter to probe
208 * Make a list of all containers on this controller
210 int aac_get_containers(struct aac_dev *dev)
212 struct fsa_scsi_hba *fsa_dev_ptr;
213 u32 index;
214 int status = 0;
215 struct aac_query_mount *dinfo;
216 struct aac_mount *dresp;
217 struct fib * fibptr;
218 unsigned instance;
220 fsa_dev_ptr = &(dev->fsa_dev);
221 instance = dev->scsi_host_ptr->unique_id;
223 if (!(fibptr = fib_alloc(dev)))
224 return -ENOMEM;
226 for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
227 fib_init(fibptr);
228 dinfo = (struct aac_query_mount *) fib_data(fibptr);
230 dinfo->command = cpu_to_le32(VM_NameServe);
231 dinfo->count = cpu_to_le32(index);
232 dinfo->type = cpu_to_le32(FT_FILESYS);
234 status = fib_send(ContainerCommand,
235 fibptr,
236 sizeof (struct aac_query_mount),
237 FsaNormal,
238 1, 1,
239 NULL, NULL);
240 if (status < 0 ) {
241 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
242 break;
244 dresp = (struct aac_mount *)fib_data(fibptr);
246 if ((le32_to_cpu(dresp->status) == ST_OK) &&
247 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
248 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
249 fsa_dev_ptr->valid[index] = 1;
250 fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
251 fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
252 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
253 fsa_dev_ptr->ro[index] = 1;
255 fib_complete(fibptr);
257 * If there are no more containers, then stop asking.
259 if ((index + 1) >= le32_to_cpu(dresp->count)){
260 break;
263 fib_free(fibptr);
264 fsa_dev[instance] = fsa_dev_ptr;
265 return status;
269 * probe_container - query a logical volume
270 * @dev: device to query
271 * @cid: container identifier
273 * Queries the controller about the given volume. The volume information
274 * is updated in the struct fsa_scsi_hba structure rather than returned.
277 static int probe_container(struct aac_dev *dev, int cid)
279 struct fsa_scsi_hba *fsa_dev_ptr;
280 int status;
281 struct aac_query_mount *dinfo;
282 struct aac_mount *dresp;
283 struct fib * fibptr;
284 unsigned instance;
286 fsa_dev_ptr = &(dev->fsa_dev);
287 instance = dev->scsi_host_ptr->unique_id;
289 if (!(fibptr = fib_alloc(dev)))
290 return -ENOMEM;
292 fib_init(fibptr);
294 dinfo = (struct aac_query_mount *)fib_data(fibptr);
296 dinfo->command = cpu_to_le32(VM_NameServe);
297 dinfo->count = cpu_to_le32(cid);
298 dinfo->type = cpu_to_le32(FT_FILESYS);
300 status = fib_send(ContainerCommand,
301 fibptr,
302 sizeof(struct aac_query_mount),
303 FsaNormal,
304 1, 1,
305 NULL, NULL);
306 if (status < 0) {
307 printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
308 goto error;
311 dresp = (struct aac_mount *) fib_data(fibptr);
313 if ((le32_to_cpu(dresp->status) == ST_OK) &&
314 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
315 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
316 fsa_dev_ptr->valid[cid] = 1;
317 fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
318 fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
319 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
320 fsa_dev_ptr->ro[cid] = 1;
323 error:
324 fib_complete(fibptr);
325 fib_free(fibptr);
327 return status;
330 /* Local Structure to set SCSI inquiry data strings */
331 struct scsi_inq {
332 char vid[8]; /* Vendor ID */
333 char pid[16]; /* Product ID */
334 char prl[4]; /* Product Revision Level */
338 * InqStrCopy - string merge
339 * @a: string to copy from
340 * @b: string to copy to
342 * Copy a String from one location to another
343 * without copying \0
346 static void inqstrcpy(char *a, char *b)
349 while(*a != (char)0)
350 *b++ = *a++;
353 static char *container_types[] = {
354 "None",
355 "Volume",
356 "Mirror",
357 "Stripe",
358 "RAID5",
359 "SSRW",
360 "SSRO",
361 "Morph",
362 "Legacy",
363 "RAID4",
364 "RAID10",
365 "RAID00",
366 "V-MIRRORS",
367 "PSEUDO R4",
368 "RAID50",
369 "Unknown"
374 /* Function: setinqstr
376 * Arguments: [1] pointer to void [1] int
378 * Purpose: Sets SCSI inquiry data strings for vendor, product
379 * and revision level. Allows strings to be set in platform dependent
380 * files instead of in OS dependent driver source.
383 static void setinqstr(int devtype, void *data, int tindex)
385 struct scsi_inq *str;
386 char *findit;
387 struct aac_driver_ident *mp;
389 mp = aac_get_driver_ident(devtype);
391 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
393 inqstrcpy (mp->vname, str->vid);
394 inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
396 findit = str->pid;
398 for ( ; *findit != ' '; findit++); /* walk till we find a space then incr by 1 */
399 findit++;
401 if (tindex < (sizeof(container_types)/sizeof(char *))){
402 inqstrcpy (container_types[tindex], findit);
404 inqstrcpy ("V1.0", str->prl);
407 void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
408 u8 a_sense_code, u8 incorrect_length,
409 u8 bit_pointer, u16 field_pointer,
410 u32 residue)
412 sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
413 sense_buf[1] = 0; /* Segment number, always zero */
415 if (incorrect_length) {
416 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
417 sense_buf[3] = BYTE3(residue);
418 sense_buf[4] = BYTE2(residue);
419 sense_buf[5] = BYTE1(residue);
420 sense_buf[6] = BYTE0(residue);
421 } else
422 sense_buf[2] = sense_key; /* Sense key */
424 if (sense_key == SENKEY_ILLEGAL)
425 sense_buf[7] = 10; /* Additional sense length */
426 else
427 sense_buf[7] = 6; /* Additional sense length */
429 sense_buf[12] = sense_code; /* Additional sense code */
430 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
431 if (sense_key == SENKEY_ILLEGAL) {
432 sense_buf[15] = 0;
434 if (sense_code == SENCODE_INVALID_PARAM_FIELD)
435 sense_buf[15] = 0x80;/* Std sense key specific field */
436 /* Illegal parameter is in the parameter block */
438 if (sense_code == SENCODE_INVALID_CDB_FIELD)
439 sense_buf[15] = 0xc0;/* Std sense key specific field */
440 /* Illegal parameter is in the CDB block */
441 sense_buf[15] |= bit_pointer;
442 sense_buf[16] = field_pointer >> 8; /* MSB */
443 sense_buf[17] = field_pointer; /* LSB */
447 static void aac_io_done(Scsi_Cmnd * scsicmd)
449 unsigned long cpu_flags;
450 struct Scsi_Host *host = scsicmd->device->host;
451 spin_lock_irqsave(host->host_lock, cpu_flags);
452 scsicmd->scsi_done(scsicmd);
453 spin_unlock_irqrestore(host->host_lock, cpu_flags);
456 static void __aac_io_done(Scsi_Cmnd * scsicmd)
458 scsicmd->scsi_done(scsicmd);
461 int aac_get_adapter_info(struct aac_dev* dev)
463 struct fib* fibptr;
464 struct aac_adapter_info* info;
465 int rcode;
466 u32 tmp;
467 if (!(fibptr = fib_alloc(dev)))
468 return -ENOMEM;
470 fib_init(fibptr);
471 info = (struct aac_adapter_info*) fib_data(fibptr);
473 memset(info,0,sizeof(struct aac_adapter_info));
475 rcode = fib_send(RequestAdapterInfo,
476 fibptr,
477 sizeof(struct aac_adapter_info),
478 FsaNormal,
479 1, 1,
480 NULL,
481 NULL);
483 memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
485 tmp = dev->adapter_info.kernelrev;
486 printk(KERN_INFO"%s%d: kernel %d.%d.%d build %d\n",
487 dev->name, dev->id,
488 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
489 dev->adapter_info.kernelbuild);
490 tmp = dev->adapter_info.monitorrev;
491 printk(KERN_INFO"%s%d: monitor %d.%d.%d build %d\n",
492 dev->name, dev->id,
493 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
494 dev->adapter_info.monitorbuild);
495 tmp = dev->adapter_info.biosrev;
496 printk(KERN_INFO"%s%d: bios %d.%d.%d build %d\n",
497 dev->name, dev->id,
498 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
499 dev->adapter_info.biosbuild);
500 printk(KERN_INFO"%s%d: serial %x%x\n",
501 dev->name, dev->id,
502 dev->adapter_info.serial[0],
503 dev->adapter_info.serial[1]);
505 dev->nondasd_support = 0;
506 if(dev->adapter_info.options & AAC_OPT_NONDASD){
507 // dev->nondasd_support = 1;
508 // dmb - temporarily disable nondasd
510 if(nondasd != -1) {
511 dev->nondasd_support = (nondasd!=0);
513 if(dev->nondasd_support != 0){
514 printk(KERN_INFO"%s%d: Non-DASD support enabled\n",dev->name, dev->id);
517 dev->pae_support = 0;
518 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
519 dev->pae_support = 1;
522 if(paemode != -1){
523 dev->pae_support = (paemode!=0);
525 if(dev->pae_support != 0) {
526 printk(KERN_INFO"%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
527 pci_set_dma_mask(dev->pdev, (dma_addr_t)0xFFFFFFFFFFFFFFFFULL);
530 fib_complete(fibptr);
531 fib_free(fibptr);
533 return rcode;
537 static void read_callback(void *context, struct fib * fibptr)
539 struct aac_dev *dev;
540 struct aac_read_reply *readreply;
541 Scsi_Cmnd *scsicmd;
542 u32 lba;
543 u32 cid;
545 scsicmd = (Scsi_Cmnd *) context;
547 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
548 cid =TARGET_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
550 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
551 dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
553 if (fibptr == NULL)
554 BUG();
556 if(scsicmd->use_sg)
557 pci_unmap_sg(dev->pdev,
558 (struct scatterlist *)scsicmd->buffer,
559 scsicmd->use_sg,
560 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
561 else if(scsicmd->request_bufflen)
562 pci_unmap_single(dev->pdev, (dma_addr_t)(ulong)scsicmd->SCp.ptr,
563 scsicmd->request_bufflen,
564 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
565 readreply = (struct aac_read_reply *)fib_data(fibptr);
566 if (le32_to_cpu(readreply->status) == ST_OK)
567 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
568 else {
569 printk(KERN_WARNING "read_callback: read failed, status = %d\n", readreply->status);
570 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
571 set_sense((u8 *) &sense_data[cid],
572 SENKEY_HW_ERR,
573 SENCODE_INTERNAL_TARGET_FAILURE,
574 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
575 0, 0);
577 fib_complete(fibptr);
578 fib_free(fibptr);
580 aac_io_done(scsicmd);
583 static void write_callback(void *context, struct fib * fibptr)
585 struct aac_dev *dev;
586 struct aac_write_reply *writereply;
587 Scsi_Cmnd *scsicmd;
588 u32 lba;
589 u32 cid;
591 scsicmd = (Scsi_Cmnd *) context;
592 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
593 cid = TARGET_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
595 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
596 dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
597 if (fibptr == NULL)
598 BUG();
600 if(scsicmd->use_sg)
601 pci_unmap_sg(dev->pdev,
602 (struct scatterlist *)scsicmd->buffer,
603 scsicmd->use_sg,
604 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
605 else if(scsicmd->request_bufflen)
606 pci_unmap_single(dev->pdev, (dma_addr_t)(ulong)scsicmd->SCp.ptr,
607 scsicmd->request_bufflen,
608 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
610 writereply = (struct aac_write_reply *) fib_data(fibptr);
611 if (le32_to_cpu(writereply->status) == ST_OK)
612 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
613 else {
614 printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
615 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
616 set_sense((u8 *) &sense_data[cid],
617 SENKEY_HW_ERR,
618 SENCODE_INTERNAL_TARGET_FAILURE,
619 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
620 0, 0);
623 fib_complete(fibptr);
624 fib_free(fibptr);
625 aac_io_done(scsicmd);
628 int aac_read(Scsi_Cmnd * scsicmd, int cid)
630 u32 lba;
631 u32 count;
632 int status;
634 u16 fibsize;
635 struct aac_dev *dev;
636 struct fib * cmd_fibcontext;
638 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
640 * Get block address and transfer length
642 if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
644 dprintk((KERN_DEBUG "aachba: received a read(6) command on target %d.\n", cid));
646 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
647 count = scsicmd->cmnd[4];
649 if (count == 0)
650 count = 256;
651 } else {
652 dprintk((KERN_DEBUG "aachba: received a read(10) command on target %d.\n", cid));
654 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
655 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
657 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
659 * Alocate and initialize a Fib
661 if (!(cmd_fibcontext = fib_alloc(dev))) {
662 scsicmd->result = DID_ERROR << 16;
663 aac_io_done(scsicmd);
664 return (-1);
667 fib_init(cmd_fibcontext);
669 if(dev->pae_support == 1){
670 struct aac_read64 *readcmd;
671 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
672 readcmd->command = cpu_to_le32(VM_CtHostRead64);
673 readcmd->cid = cpu_to_le16(cid);
674 readcmd->sector_count = cpu_to_le16(count);
675 readcmd->block = cpu_to_le32(lba);
676 readcmd->pad = cpu_to_le16(0);
677 readcmd->flags = cpu_to_le16(0);
679 aac_build_sg64(scsicmd, &readcmd->sg);
680 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
681 BUG();
682 fibsize = sizeof(struct aac_read64) + ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
684 * Now send the Fib to the adapter
686 status = fib_send(ContainerCommand64,
687 cmd_fibcontext,
688 fibsize,
689 FsaNormal,
690 0, 1,
691 (fib_callback) read_callback,
692 (void *) scsicmd);
693 } else {
694 struct aac_read *readcmd;
695 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
696 readcmd->command = cpu_to_le32(VM_CtBlockRead);
697 readcmd->cid = cpu_to_le32(cid);
698 readcmd->block = cpu_to_le32(lba);
699 readcmd->count = cpu_to_le32(count * 512);
701 if (count * 512 > (64 * 1024))
702 BUG();
704 aac_build_sg(scsicmd, &readcmd->sg);
705 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
706 BUG();
707 fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
709 * Now send the Fib to the adapter
711 status = fib_send(ContainerCommand,
712 cmd_fibcontext,
713 fibsize,
714 FsaNormal,
715 0, 1,
716 (fib_callback) read_callback,
717 (void *) scsicmd);
723 * Check that the command queued to the controller
725 if (status == -EINPROGRESS)
726 return 0;
728 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
730 * For some reason, the Fib didn't queue, return QUEUE_FULL
732 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
733 aac_io_done(scsicmd);
734 fib_complete(cmd_fibcontext);
735 fib_free(cmd_fibcontext);
736 return -1;
739 static int aac_write(Scsi_Cmnd * scsicmd, int cid)
741 u32 lba;
742 u32 count;
743 int status;
744 u16 fibsize;
745 struct aac_dev *dev;
746 struct fib * cmd_fibcontext;
748 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
750 * Get block address and transfer length
752 if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */
754 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
755 count = scsicmd->cmnd[4];
756 if (count == 0)
757 count = 256;
758 } else {
759 dprintk((KERN_DEBUG "aachba: received a write(10) command on target %d.\n", cid));
760 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
761 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
763 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
765 * Allocate and initialize a Fib then setup a BlockWrite command
767 if (!(cmd_fibcontext = fib_alloc(dev))) {
768 scsicmd->result = DID_ERROR << 16;
769 aac_io_done(scsicmd);
770 return -1;
772 fib_init(cmd_fibcontext);
774 if(dev->pae_support == 1){
775 struct aac_write64 *writecmd;
776 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
777 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
778 writecmd->cid = cpu_to_le16(cid);
779 writecmd->sector_count = cpu_to_le16(count);
780 writecmd->block = cpu_to_le32(lba);
781 writecmd->pad = cpu_to_le16(0);
782 writecmd->flags = cpu_to_le16(0);
784 aac_build_sg64(scsicmd, &writecmd->sg);
785 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
786 BUG();
787 fibsize = sizeof(struct aac_write64) + ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
789 * Now send the Fib to the adapter
791 status = fib_send(ContainerCommand64,
792 cmd_fibcontext,
793 fibsize,
794 FsaNormal,
795 0, 1,
796 (fib_callback) write_callback,
797 (void *) scsicmd);
798 } else {
799 struct aac_write *writecmd;
800 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
801 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
802 writecmd->cid = cpu_to_le32(cid);
803 writecmd->block = cpu_to_le32(lba);
804 writecmd->count = cpu_to_le32(count * 512);
805 writecmd->sg.count = cpu_to_le32(1);
806 /* ->stable is not used - it did mean which type of write */
808 if (count * 512 > (64 * 1024)) {
809 BUG();
812 aac_build_sg(scsicmd, &writecmd->sg);
813 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
814 BUG();
815 fibsize = sizeof(struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
817 * Now send the Fib to the adapter
819 status = fib_send(ContainerCommand,
820 cmd_fibcontext,
821 fibsize,
822 FsaNormal,
823 0, 1,
824 (fib_callback) write_callback,
825 (void *) scsicmd);
829 * Check that the command queued to the controller
831 if (status == -EINPROGRESS)
832 return 0;
834 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
836 * For some reason, the Fib didn't queue, return QUEUE_FULL
838 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
839 aac_io_done(scsicmd);
841 fib_complete(cmd_fibcontext);
842 fib_free(cmd_fibcontext);
843 return -1;
848 * aac_scsi_cmd() - Process SCSI command
849 * @scsicmd: SCSI command block
850 * @wait: 1 if the user wants to await completion
852 * Emulate a SCSI command and queue the required request for the
853 * aacraid firmware.
856 int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
858 u32 cid = 0;
859 struct fsa_scsi_hba *fsa_dev_ptr;
860 int cardtype;
861 int ret;
862 struct Scsi_Host *host = scsicmd->device->host;
863 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
865 cardtype = dev->cardtype;
867 fsa_dev_ptr = fsa_dev[host->unique_id];
870 * If the bus, target or lun is out of range, return fail
871 * Test does not apply to ID 16, the pseudo id for the controller
872 * itself.
874 if (scsicmd->device->id != host->this_id) {
875 if ((scsicmd->device->channel == 0) ){
876 if( (scsicmd->device->id >= AAC_MAX_TARGET) || (scsicmd->device->lun != 0)){
877 scsicmd->result = DID_NO_CONNECT << 16;
878 __aac_io_done(scsicmd);
879 return 0;
881 cid = TARGET_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
884 * If the target container doesn't exist, it may have
885 * been newly created
887 if (fsa_dev_ptr->valid[cid] == 0) {
888 switch (scsicmd->cmnd[0]) {
889 case INQUIRY:
890 case READ_CAPACITY:
891 case TEST_UNIT_READY:
892 spin_unlock_irq(host->host_lock);
893 probe_container(dev, cid);
894 spin_lock_irq(host->host_lock);
895 if (fsa_dev_ptr->valid[cid] == 0) {
896 scsicmd->result = DID_NO_CONNECT << 16;
897 __aac_io_done(scsicmd);
898 return 0;
900 default:
901 break;
905 * If the target container still doesn't exist,
906 * return failure
908 if (fsa_dev_ptr->valid[cid] == 0) {
909 scsicmd->result = DID_BAD_TARGET << 16;
910 __aac_io_done(scsicmd);
911 return -1;
913 } else { /* check for physical non-dasd devices */
914 if(dev->nondasd_support == 1){
915 return aac_send_srb_fib(scsicmd);
916 } else {
917 scsicmd->result = DID_NO_CONNECT << 16;
918 __aac_io_done(scsicmd);
919 return 0;
924 * else Command for the controller itself
926 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
927 (scsicmd->cmnd[0] != TEST_UNIT_READY))
929 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
930 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
931 set_sense((u8 *) &sense_data[cid],
932 SENKEY_ILLEGAL,
933 SENCODE_INVALID_COMMAND,
934 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
935 __aac_io_done(scsicmd);
936 return -1;
940 /* Handle commands here that don't really require going out to the adapter */
941 switch (scsicmd->cmnd[0]) {
942 case INQUIRY:
944 struct inquiry_data *inq_data_ptr;
946 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
947 inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
948 memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
950 inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */
951 inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
952 inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
953 inq_data_ptr->inqd_len = 31;
954 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
955 inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
957 * Set the Vendor, Product, and Revision Level
958 * see: <vendor>.c i.e. aac.c
960 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
961 if (scsicmd->device->id == host->this_id)
962 inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
963 else
964 inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
965 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
966 __aac_io_done(scsicmd);
967 return 0;
969 case READ_CAPACITY:
971 int capacity;
972 char *cp;
974 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
975 capacity = fsa_dev_ptr->size[cid] - 1;
976 cp = scsicmd->request_buffer;
977 cp[0] = (capacity >> 24) & 0xff;
978 cp[1] = (capacity >> 16) & 0xff;
979 cp[2] = (capacity >> 8) & 0xff;
980 cp[3] = (capacity >> 0) & 0xff;
981 cp[4] = 0;
982 cp[5] = 0;
983 cp[6] = 2;
984 cp[7] = 0;
986 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
987 __aac_io_done(scsicmd);
989 return 0;
992 case MODE_SENSE:
994 char *mode_buf;
996 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
997 mode_buf = scsicmd->request_buffer;
998 mode_buf[0] = 3; /* Mode data length */
999 mode_buf[1] = 0; /* Medium type - default */
1000 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1001 mode_buf[3] = 0; /* Block descriptor length */
1003 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1004 __aac_io_done(scsicmd);
1006 return 0;
1008 case MODE_SENSE_10:
1010 char *mode_buf;
1012 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1013 mode_buf = scsicmd->request_buffer;
1014 mode_buf[0] = 0; /* Mode data length (MSB) */
1015 mode_buf[1] = 6; /* Mode data length (LSB) */
1016 mode_buf[2] = 0; /* Medium type - default */
1017 mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1018 mode_buf[4] = 0; /* reserved */
1019 mode_buf[5] = 0; /* reserved */
1020 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1021 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1023 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1024 __aac_io_done(scsicmd);
1026 return 0;
1028 case REQUEST_SENSE:
1029 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
1030 memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof (struct sense_data));
1031 memset(&sense_data[cid], 0, sizeof (struct sense_data));
1032 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1033 __aac_io_done(scsicmd);
1034 return (0);
1036 case ALLOW_MEDIUM_REMOVAL:
1037 dprintk((KERN_DEBUG "LOCK command.\n"));
1038 if (scsicmd->cmnd[4])
1039 fsa_dev_ptr->locked[cid] = 1;
1040 else
1041 fsa_dev_ptr->locked[cid] = 0;
1043 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1044 __aac_io_done(scsicmd);
1045 return 0;
1047 * These commands are all No-Ops
1049 case TEST_UNIT_READY:
1050 case RESERVE:
1051 case RELEASE:
1052 case REZERO_UNIT:
1053 case REASSIGN_BLOCKS:
1054 case SEEK_10:
1055 case START_STOP:
1056 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1057 __aac_io_done(scsicmd);
1058 return (0);
1061 switch (scsicmd->cmnd[0])
1063 case READ_6:
1064 case READ_10:
1066 * Hack to keep track of ordinal number of the device that
1067 * corresponds to a container. Needed to convert
1068 * containers to /dev/sd device names
1071 spin_unlock_irq(host->host_lock);
1072 if (scsicmd->request->rq_disk)
1073 memcpy(fsa_dev_ptr->devname[cid],
1074 scsicmd->request->rq_disk->disk_name,
1077 ret = aac_read(scsicmd, cid);
1078 spin_lock_irq(host->host_lock);
1079 return ret;
1081 case WRITE_6:
1082 case WRITE_10:
1083 spin_unlock_irq(host->host_lock);
1084 ret = aac_write(scsicmd, cid);
1085 spin_lock_irq(host->host_lock);
1086 return ret;
1087 default:
1089 * Unhandled commands
1091 printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
1092 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1093 set_sense((u8 *) &sense_data[cid],
1094 SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
1095 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1096 __aac_io_done(scsicmd);
1097 return -1;
1101 static int query_disk(struct aac_dev *dev, void *arg)
1103 struct aac_query_disk qd;
1104 struct fsa_scsi_hba *fsa_dev_ptr;
1106 fsa_dev_ptr = &(dev->fsa_dev);
1107 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1108 return -EFAULT;
1109 if (qd.cnum == -1)
1110 qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
1111 else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1))
1113 if (qd.cnum < 0 || qd.cnum >= MAXIMUM_NUM_CONTAINERS)
1114 return -EINVAL;
1115 qd.instance = dev->scsi_host_ptr->host_no;
1116 qd.bus = 0;
1117 qd.target = CONTAINER_TO_TARGET(qd.cnum);
1118 qd.lun = CONTAINER_TO_LUN(qd.cnum);
1120 else return -EINVAL;
1122 qd.valid = fsa_dev_ptr->valid[qd.cnum];
1123 qd.locked = fsa_dev_ptr->locked[qd.cnum];
1124 qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
1126 if (fsa_dev_ptr->devname[qd.cnum][0] == '\0')
1127 qd.unmapped = 1;
1128 else
1129 qd.unmapped = 0;
1131 strlcpy(qd.name, fsa_dev_ptr->devname[qd.cnum], sizeof(qd.name));
1133 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
1134 return -EFAULT;
1135 return 0;
1138 static int force_delete_disk(struct aac_dev *dev, void *arg)
1140 struct aac_delete_disk dd;
1141 struct fsa_scsi_hba *fsa_dev_ptr;
1143 fsa_dev_ptr = &(dev->fsa_dev);
1145 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1146 return -EFAULT;
1148 if (dd.cnum >= MAXIMUM_NUM_CONTAINERS)
1149 return -EINVAL;
1151 * Mark this container as being deleted.
1153 fsa_dev_ptr->deleted[dd.cnum] = 1;
1155 * Mark the container as no longer valid
1157 fsa_dev_ptr->valid[dd.cnum] = 0;
1158 return 0;
1161 static int delete_disk(struct aac_dev *dev, void *arg)
1163 struct aac_delete_disk dd;
1164 struct fsa_scsi_hba *fsa_dev_ptr;
1166 fsa_dev_ptr = &(dev->fsa_dev);
1168 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1169 return -EFAULT;
1171 if (dd.cnum >= MAXIMUM_NUM_CONTAINERS)
1172 return -EINVAL;
1174 * If the container is locked, it can not be deleted by the API.
1176 if (fsa_dev_ptr->locked[dd.cnum])
1177 return -EBUSY;
1178 else {
1180 * Mark the container as no longer being valid.
1182 fsa_dev_ptr->valid[dd.cnum] = 0;
1183 fsa_dev_ptr->devname[dd.cnum][0] = '\0';
1184 return 0;
1188 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
1190 switch (cmd) {
1191 case FSACTL_QUERY_DISK:
1192 return query_disk(dev, arg);
1193 case FSACTL_DELETE_DISK:
1194 return delete_disk(dev, arg);
1195 case FSACTL_FORCE_DELETE_DISK:
1196 return force_delete_disk(dev, arg);
1197 case 2131:
1198 return aac_get_containers(dev);
1199 default:
1200 return -ENOTTY;
1206 * aac_srb_callback
1207 * @context: the context set in the fib - here it is scsi cmd
1208 * @fibptr: pointer to the fib
1210 * Handles the completion of a scsi command to a non dasd device
1214 static void aac_srb_callback(void *context, struct fib * fibptr)
1216 struct aac_dev *dev;
1217 struct aac_srb_reply *srbreply;
1218 Scsi_Cmnd *scsicmd;
1220 scsicmd = (Scsi_Cmnd *) context;
1221 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1223 if (fibptr == NULL)
1224 BUG();
1226 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
1228 scsicmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
1229 // calculate resid for sg
1230 scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
1232 if(scsicmd->use_sg)
1233 pci_unmap_sg(dev->pdev,
1234 (struct scatterlist *)scsicmd->buffer,
1235 scsicmd->use_sg,
1236 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1237 else if(scsicmd->request_bufflen)
1238 pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, scsicmd->request_bufflen,
1239 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1242 * First check the fib status
1245 if (le32_to_cpu(srbreply->status) != ST_OK){
1246 int len;
1247 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
1248 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1249 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1250 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1251 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1255 * Next check the srb status
1257 switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
1258 case SRB_STATUS_ERROR_RECOVERY:
1259 case SRB_STATUS_PENDING:
1260 case SRB_STATUS_SUCCESS:
1261 if(scsicmd->cmnd[0] == INQUIRY ){
1262 u8 b;
1263 u8 b1;
1264 /* We can't expose disk devices because we can't tell whether they
1265 * are the raw container drives or stand alone drives. If they have
1266 * the removable bit set then we should expose them though.
1268 b = (*(u8*)scsicmd->buffer)&0x1f;
1269 b1 = ((u8*)scsicmd->buffer)[1];
1270 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1271 || (b==TYPE_DISK && (b1&0x80)) ){
1272 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1273 } else {
1274 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1276 } else {
1277 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1279 break;
1280 case SRB_STATUS_DATA_OVERRUN:
1281 switch(scsicmd->cmnd[0]){
1282 case READ_6:
1283 case WRITE_6:
1284 case READ_10:
1285 case WRITE_10:
1286 case READ_12:
1287 case WRITE_12:
1288 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1289 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1290 } else {
1291 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
1293 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1294 break;
1295 case INQUIRY: {
1296 u8 b;
1297 u8 b1;
1298 /* We can't expose disk devices because we can't tell whether they
1299 * are the raw container drives or stand alone drives
1301 b = (*(u8*)scsicmd->buffer)&0x0f;
1302 b1 = ((u8*)scsicmd->buffer)[1];
1303 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1304 || (b==TYPE_DISK && (b1&0x80)) ){
1305 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1306 } else {
1307 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1309 break;
1311 default:
1312 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1313 break;
1315 break;
1316 case SRB_STATUS_ABORTED:
1317 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1318 break;
1319 case SRB_STATUS_ABORT_FAILED:
1320 // Not sure about this one - but assuming the hba was trying to abort for some reason
1321 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1322 break;
1323 case SRB_STATUS_PARITY_ERROR:
1324 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
1325 break;
1326 case SRB_STATUS_NO_DEVICE:
1327 case SRB_STATUS_INVALID_PATH_ID:
1328 case SRB_STATUS_INVALID_TARGET_ID:
1329 case SRB_STATUS_INVALID_LUN:
1330 case SRB_STATUS_SELECTION_TIMEOUT:
1331 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1332 break;
1334 case SRB_STATUS_COMMAND_TIMEOUT:
1335 case SRB_STATUS_TIMEOUT:
1336 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
1337 break;
1339 case SRB_STATUS_BUSY:
1340 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1341 break;
1343 case SRB_STATUS_BUS_RESET:
1344 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
1345 break;
1347 case SRB_STATUS_MESSAGE_REJECTED:
1348 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
1349 break;
1350 case SRB_STATUS_REQUEST_FLUSHED:
1351 case SRB_STATUS_ERROR:
1352 case SRB_STATUS_INVALID_REQUEST:
1353 case SRB_STATUS_REQUEST_SENSE_FAILED:
1354 case SRB_STATUS_NO_HBA:
1355 case SRB_STATUS_UNEXPECTED_BUS_FREE:
1356 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
1357 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
1358 case SRB_STATUS_DELAYED_RETRY:
1359 case SRB_STATUS_BAD_FUNCTION:
1360 case SRB_STATUS_NOT_STARTED:
1361 case SRB_STATUS_NOT_IN_USE:
1362 case SRB_STATUS_FORCE_ABORT:
1363 case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
1364 default:
1365 #ifdef AAC_DETAILED_STATUS_INFO
1366 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",le32_to_cpu(srbreply->srb_status&0x3f),aac_get_status_string(le32_to_cpu(srbreply->srb_status)), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status) );
1367 #endif
1368 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1369 break;
1371 if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
1372 int len;
1373 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
1374 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1375 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1376 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
1377 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1381 * OR in the scsi status (already shifted up a bit)
1383 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
1385 fib_complete(fibptr);
1386 fib_free(fibptr);
1387 aac_io_done(scsicmd);
1392 * aac_send_scb_fib
1393 * @scsicmd: the scsi command block
1395 * This routine will form a FIB and fill in the aac_srb from the
1396 * scsicmd passed in.
1399 static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
1401 struct fib* cmd_fibcontext;
1402 struct aac_dev* dev;
1403 int status;
1404 struct aac_srb *srbcmd;
1405 u16 fibsize;
1406 u32 flag;
1407 u32 timeout;
1409 if( scsicmd->device->id > 15 || scsicmd->device->lun > 7) {
1410 scsicmd->result = DID_NO_CONNECT << 16;
1411 __aac_io_done(scsicmd);
1412 return 0;
1415 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1416 switch(scsicmd->sc_data_direction){
1417 case SCSI_DATA_WRITE:
1418 flag = SRB_DataOut;
1419 break;
1420 case SCSI_DATA_UNKNOWN:
1421 flag = SRB_DataIn | SRB_DataOut;
1422 break;
1423 case SCSI_DATA_READ:
1424 flag = SRB_DataIn;
1425 break;
1426 case SCSI_DATA_NONE:
1427 default:
1428 flag = SRB_NoDataXfer;
1429 break;
1434 * Allocate and initialize a Fib then setup a BlockWrite command
1436 if (!(cmd_fibcontext = fib_alloc(dev))) {
1437 scsicmd->result = DID_ERROR << 16;
1438 __aac_io_done(scsicmd);
1439 return -1;
1441 fib_init(cmd_fibcontext);
1443 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
1444 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1445 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
1446 srbcmd->target = cpu_to_le32(scsicmd->device->id);
1447 srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
1448 srbcmd->flags = cpu_to_le32(flag);
1449 timeout = (scsicmd->timeout-jiffies)/HZ;
1450 if(timeout == 0){
1451 timeout = 1;
1453 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
1454 srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
1455 srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
1457 if( dev->pae_support ==1 ) {
1458 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
1459 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1461 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1462 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1464 * Build Scatter/Gather list
1466 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
1469 * Now send the Fib to the adapter
1471 status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1472 (fib_callback) aac_srb_callback, (void *) scsicmd);
1473 } else {
1474 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
1475 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1477 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1478 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1480 * Build Scatter/Gather list
1482 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
1485 * Now send the Fib to the adapter
1487 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1488 (fib_callback) aac_srb_callback, (void *) scsicmd);
1491 * Check that the command queued to the controller
1493 if (status == -EINPROGRESS){
1494 return 0;
1497 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
1499 * For some reason, the Fib didn't queue, return QUEUE_FULL
1501 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1502 __aac_io_done(scsicmd);
1504 fib_complete(cmd_fibcontext);
1505 fib_free(cmd_fibcontext);
1507 return -1;
1510 static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
1512 struct aac_dev *dev;
1513 unsigned long byte_count = 0;
1515 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1516 // Get rid of old data
1517 psg->count = cpu_to_le32(0);
1518 psg->sg[0].addr = cpu_to_le32(0);
1519 psg->sg[0].count = cpu_to_le32(0);
1520 if (scsicmd->use_sg) {
1521 struct scatterlist *sg;
1522 int i;
1523 int sg_count;
1524 sg = (struct scatterlist *) scsicmd->request_buffer;
1526 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1527 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1528 psg->count = cpu_to_le32(sg_count);
1530 byte_count = 0;
1532 for (i = 0; i < sg_count; i++) {
1533 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
1534 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1535 byte_count += sg_dma_len(sg);
1536 sg++;
1538 /* hba wants the size to be exact */
1539 if(byte_count > scsicmd->request_bufflen){
1540 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1541 byte_count = scsicmd->request_bufflen;
1543 /* Check for command underflow */
1544 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1545 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1546 byte_count, scsicmd->underflow);
1549 else if(scsicmd->request_bufflen) {
1550 dma_addr_t addr;
1551 addr = pci_map_single(dev->pdev,
1552 scsicmd->request_buffer,
1553 scsicmd->request_bufflen,
1554 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1555 psg->count = cpu_to_le32(1);
1556 psg->sg[0].addr = cpu_to_le32(addr);
1557 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1558 scsicmd->SCp.ptr = (char *)(ulong)addr;
1559 byte_count = scsicmd->request_bufflen;
1561 return byte_count;
1565 static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
1567 struct aac_dev *dev;
1568 unsigned long byte_count = 0;
1569 u64 le_addr;
1571 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1572 // Get rid of old data
1573 psg->count = cpu_to_le32(0);
1574 psg->sg[0].addr[0] = cpu_to_le32(0);
1575 psg->sg[0].addr[1] = cpu_to_le32(0);
1576 psg->sg[0].count = cpu_to_le32(0);
1577 if (scsicmd->use_sg) {
1578 struct scatterlist *sg;
1579 int i;
1580 int sg_count;
1581 sg = (struct scatterlist *) scsicmd->request_buffer;
1583 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1584 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1585 psg->count = cpu_to_le32(sg_count);
1587 byte_count = 0;
1589 for (i = 0; i < sg_count; i++) {
1590 le_addr = cpu_to_le64(sg_dma_address(sg));
1591 psg->sg[i].addr[1] = (u32)(le_addr>>32);
1592 psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
1593 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1594 byte_count += sg_dma_len(sg);
1595 sg++;
1597 /* hba wants the size to be exact */
1598 if(byte_count > scsicmd->request_bufflen){
1599 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1600 byte_count = scsicmd->request_bufflen;
1602 /* Check for command underflow */
1603 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1604 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1605 byte_count, scsicmd->underflow);
1608 else if(scsicmd->request_bufflen) {
1609 dma_addr_t addr;
1610 addr = pci_map_single(dev->pdev,
1611 scsicmd->request_buffer,
1612 scsicmd->request_bufflen,
1613 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1614 psg->count = cpu_to_le32(1);
1615 le_addr = cpu_to_le64(addr);
1616 psg->sg[0].addr[1] = (u32)(le_addr>>32);
1617 psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
1618 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1619 scsicmd->SCp.ptr = (char *)(ulong)addr;
1620 byte_count = scsicmd->request_bufflen;
1622 return byte_count;
1625 #ifdef AAC_DETAILED_STATUS_INFO
1627 struct aac_srb_status_info {
1628 u32 status;
1629 char *str;
1633 static struct aac_srb_status_info srb_status_info[] = {
1634 { SRB_STATUS_PENDING, "Pending Status"},
1635 { SRB_STATUS_SUCCESS, "Success"},
1636 { SRB_STATUS_ABORTED, "Aborted Command"},
1637 { SRB_STATUS_ABORT_FAILED, "Abort Failed"},
1638 { SRB_STATUS_ERROR, "Error Event"},
1639 { SRB_STATUS_BUSY, "Device Busy"},
1640 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
1641 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
1642 { SRB_STATUS_NO_DEVICE, "No Device"},
1643 { SRB_STATUS_TIMEOUT, "Timeout"},
1644 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
1645 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
1646 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
1647 { SRB_STATUS_BUS_RESET, "Bus Reset"},
1648 { SRB_STATUS_PARITY_ERROR, "Parity Error"},
1649 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
1650 { SRB_STATUS_NO_HBA, "No HBA"},
1651 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
1652 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
1653 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
1654 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
1655 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
1656 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
1657 { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
1658 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
1659 { SRB_STATUS_BAD_FUNCTION, "Bad Function"},
1660 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
1661 { SRB_STATUS_NOT_STARTED, "Not Started"},
1662 { SRB_STATUS_NOT_IN_USE, "Not In Use"},
1663 { SRB_STATUS_FORCE_ABORT, "Force Abort"},
1664 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
1665 { 0xff, "Unknown Error"}
1668 char *aac_get_status_string(u32 status)
1670 int i;
1672 for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
1673 if(srb_status_info[i].status == status){
1674 return srb_status_info[i].str;
1678 return "Bad Status Code";
1681 #endif