MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / scsi / aacraid / aachba.c
blobbb03a95fad6a7b376d9165468ec5942e611cede3
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/sched.h>
30 #include <linux/pci.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/completion.h>
34 #include <linux/blkdev.h>
35 #include <asm/semaphore.h>
36 #include <asm/uaccess.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
43 #include "aacraid.h"
45 /* values for inqd_pdt: Peripheral device type in plain English */
46 #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
47 #define INQD_PDT_PROC 0x03 /* Processor device */
48 #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
49 #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
50 #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
51 #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
53 #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
54 #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
56 #define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
58 #define MAX_DRIVER_SG_SEGMENT_COUNT 17
61 * Sense codes
64 #define SENCODE_NO_SENSE 0x00
65 #define SENCODE_END_OF_DATA 0x00
66 #define SENCODE_BECOMING_READY 0x04
67 #define SENCODE_INIT_CMD_REQUIRED 0x04
68 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
69 #define SENCODE_INVALID_COMMAND 0x20
70 #define SENCODE_LBA_OUT_OF_RANGE 0x21
71 #define SENCODE_INVALID_CDB_FIELD 0x24
72 #define SENCODE_LUN_NOT_SUPPORTED 0x25
73 #define SENCODE_INVALID_PARAM_FIELD 0x26
74 #define SENCODE_PARAM_NOT_SUPPORTED 0x26
75 #define SENCODE_PARAM_VALUE_INVALID 0x26
76 #define SENCODE_RESET_OCCURRED 0x29
77 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
78 #define SENCODE_INQUIRY_DATA_CHANGED 0x3F
79 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
80 #define SENCODE_DIAGNOSTIC_FAILURE 0x40
81 #define SENCODE_INTERNAL_TARGET_FAILURE 0x44
82 #define SENCODE_INVALID_MESSAGE_ERROR 0x49
83 #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
84 #define SENCODE_OVERLAPPED_COMMAND 0x4E
87 * Additional sense codes
90 #define ASENCODE_NO_SENSE 0x00
91 #define ASENCODE_END_OF_DATA 0x05
92 #define ASENCODE_BECOMING_READY 0x01
93 #define ASENCODE_INIT_CMD_REQUIRED 0x02
94 #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
95 #define ASENCODE_INVALID_COMMAND 0x00
96 #define ASENCODE_LBA_OUT_OF_RANGE 0x00
97 #define ASENCODE_INVALID_CDB_FIELD 0x00
98 #define ASENCODE_LUN_NOT_SUPPORTED 0x00
99 #define ASENCODE_INVALID_PARAM_FIELD 0x00
100 #define ASENCODE_PARAM_NOT_SUPPORTED 0x01
101 #define ASENCODE_PARAM_VALUE_INVALID 0x02
102 #define ASENCODE_RESET_OCCURRED 0x00
103 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
104 #define ASENCODE_INQUIRY_DATA_CHANGED 0x03
105 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
106 #define ASENCODE_DIAGNOSTIC_FAILURE 0x80
107 #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
108 #define ASENCODE_INVALID_MESSAGE_ERROR 0x00
109 #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
110 #define ASENCODE_OVERLAPPED_COMMAND 0x00
112 #define BYTE0(x) (unsigned char)(x)
113 #define BYTE1(x) (unsigned char)((x) >> 8)
114 #define BYTE2(x) (unsigned char)((x) >> 16)
115 #define BYTE3(x) (unsigned char)((x) >> 24)
117 /*------------------------------------------------------------------------------
118 * S T R U C T S / T Y P E D E F S
119 *----------------------------------------------------------------------------*/
120 /* SCSI inquiry data */
121 struct inquiry_data {
122 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
123 u8 inqd_dtq; /* RMB | Device Type Qualifier */
124 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
125 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
126 u8 inqd_len; /* Additional length (n-4) */
127 u8 inqd_pad1[2];/* Reserved - must be zero */
128 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
129 u8 inqd_vid[8]; /* Vendor ID */
130 u8 inqd_pid[16];/* Product ID */
131 u8 inqd_prl[4]; /* Product Revision Level */
134 struct sense_data {
135 u8 error_code; /* 70h (current errors), 71h(deferred errors) */
136 u8 valid:1; /* A valid bit of one indicates that the information */
137 /* field contains valid information as defined in the
138 * SCSI-2 Standard.
140 u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
141 u8 sense_key:4; /* Sense Key */
142 u8 reserved:1;
143 u8 ILI:1; /* Incorrect Length Indicator */
144 u8 EOM:1; /* End Of Medium - reserved for random access devices */
145 u8 filemark:1; /* Filemark - reserved for random access devices */
147 u8 information[4]; /* for direct-access devices, contains the unsigned
148 * logical block address or residue associated with
149 * the sense key
151 u8 add_sense_len; /* number of additional sense bytes to follow this field */
152 u8 cmnd_info[4]; /* not used */
153 u8 ASC; /* Additional Sense Code */
154 u8 ASCQ; /* Additional Sense Code Qualifier */
155 u8 FRUC; /* Field Replaceable Unit Code - not used */
156 u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
157 * was in error
159 u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
160 * the bit_ptr field has valid value
162 u8 reserved2:2;
163 u8 CD:1; /* command data bit: 1- illegal parameter in CDB.
164 * 0- illegal parameter in data.
166 u8 SKSV:1;
167 u8 field_ptr[2]; /* byte of the CDB or parameter data in error */
171 * M O D U L E G L O B A L S
174 static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
175 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
176 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
177 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
178 #ifdef AAC_DETAILED_STATUS_INFO
179 static char *aac_get_status_string(u32 status);
180 #endif
183 * Non dasd selection is handled entirely in aachba now
186 MODULE_PARM(nondasd, "i");
187 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
188 MODULE_PARM(dacmode, "i");
189 MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
190 MODULE_PARM(commit, "i");
191 MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
193 static int nondasd = -1;
194 static int dacmode = -1;
197 * aac_get_containers - list containers
198 * @common: adapter to probe
200 * Make a list of all containers on this controller
202 int aac_get_containers(struct aac_dev *dev)
204 struct fsa_scsi_hba *fsa_dev_ptr;
205 u32 index;
206 int status = 0;
207 struct aac_query_mount *dinfo;
208 struct aac_mount *dresp;
209 struct fib * fibptr;
210 unsigned instance;
212 fsa_dev_ptr = &(dev->fsa_dev);
213 instance = dev->scsi_host_ptr->unique_id;
215 if (!(fibptr = fib_alloc(dev)))
216 return -ENOMEM;
218 for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
219 fib_init(fibptr);
220 dinfo = (struct aac_query_mount *) fib_data(fibptr);
222 dinfo->command = cpu_to_le32(VM_NameServe);
223 dinfo->count = cpu_to_le32(index);
224 dinfo->type = cpu_to_le32(FT_FILESYS);
226 status = fib_send(ContainerCommand,
227 fibptr,
228 sizeof (struct aac_query_mount),
229 FsaNormal,
230 1, 1,
231 NULL, NULL);
232 if (status < 0 ) {
233 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
234 break;
236 dresp = (struct aac_mount *)fib_data(fibptr);
238 if ((le32_to_cpu(dresp->status) == ST_OK) &&
239 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
240 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
241 fsa_dev_ptr->valid[index] = 1;
242 fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
243 fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
244 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
245 fsa_dev_ptr->ro[index] = 1;
247 fib_complete(fibptr);
249 * If there are no more containers, then stop asking.
251 if ((index + 1) >= le32_to_cpu(dresp->count)){
252 break;
255 fib_free(fibptr);
256 return status;
260 * probe_container - query a logical volume
261 * @dev: device to query
262 * @cid: container identifier
264 * Queries the controller about the given volume. The volume information
265 * is updated in the struct fsa_scsi_hba structure rather than returned.
268 static int probe_container(struct aac_dev *dev, int cid)
270 struct fsa_scsi_hba *fsa_dev_ptr;
271 int status;
272 struct aac_query_mount *dinfo;
273 struct aac_mount *dresp;
274 struct fib * fibptr;
275 unsigned instance;
277 fsa_dev_ptr = &(dev->fsa_dev);
278 instance = dev->scsi_host_ptr->unique_id;
280 if (!(fibptr = fib_alloc(dev)))
281 return -ENOMEM;
283 fib_init(fibptr);
285 dinfo = (struct aac_query_mount *)fib_data(fibptr);
287 dinfo->command = cpu_to_le32(VM_NameServe);
288 dinfo->count = cpu_to_le32(cid);
289 dinfo->type = cpu_to_le32(FT_FILESYS);
291 status = fib_send(ContainerCommand,
292 fibptr,
293 sizeof(struct aac_query_mount),
294 FsaNormal,
295 1, 1,
296 NULL, NULL);
297 if (status < 0) {
298 printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
299 goto error;
302 dresp = (struct aac_mount *) fib_data(fibptr);
304 if ((le32_to_cpu(dresp->status) == ST_OK) &&
305 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
306 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
307 fsa_dev_ptr->valid[cid] = 1;
308 fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
309 fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
310 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
311 fsa_dev_ptr->ro[cid] = 1;
314 error:
315 fib_complete(fibptr);
316 fib_free(fibptr);
318 return status;
321 /* Local Structure to set SCSI inquiry data strings */
322 struct scsi_inq {
323 char vid[8]; /* Vendor ID */
324 char pid[16]; /* Product ID */
325 char prl[4]; /* Product Revision Level */
329 * InqStrCopy - string merge
330 * @a: string to copy from
331 * @b: string to copy to
333 * Copy a String from one location to another
334 * without copying \0
337 static void inqstrcpy(char *a, char *b)
340 while(*a != (char)0)
341 *b++ = *a++;
344 static char *container_types[] = {
345 "None",
346 "Volume",
347 "Mirror",
348 "Stripe",
349 "RAID5",
350 "SSRW",
351 "SSRO",
352 "Morph",
353 "Legacy",
354 "RAID4",
355 "RAID10",
356 "RAID00",
357 "V-MIRRORS",
358 "PSEUDO R4",
359 "RAID50",
360 "Unknown"
365 /* Function: setinqstr
367 * Arguments: [1] pointer to void [1] int
369 * Purpose: Sets SCSI inquiry data strings for vendor, product
370 * and revision level. Allows strings to be set in platform dependent
371 * files instead of in OS dependent driver source.
374 static void setinqstr(int devtype, void *data, int tindex)
376 struct scsi_inq *str;
377 char *findit;
378 struct aac_driver_ident *mp;
380 mp = aac_get_driver_ident(devtype);
382 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
384 inqstrcpy (mp->vname, str->vid);
385 inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
387 findit = str->pid;
389 for ( ; *findit != ' '; findit++); /* walk till we find a space then incr by 1 */
390 findit++;
392 if (tindex < (sizeof(container_types)/sizeof(char *))){
393 inqstrcpy (container_types[tindex], findit);
395 inqstrcpy ("V1.0", str->prl);
398 void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
399 u8 a_sense_code, u8 incorrect_length,
400 u8 bit_pointer, u16 field_pointer,
401 u32 residue)
403 sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
404 sense_buf[1] = 0; /* Segment number, always zero */
406 if (incorrect_length) {
407 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
408 sense_buf[3] = BYTE3(residue);
409 sense_buf[4] = BYTE2(residue);
410 sense_buf[5] = BYTE1(residue);
411 sense_buf[6] = BYTE0(residue);
412 } else
413 sense_buf[2] = sense_key; /* Sense key */
415 if (sense_key == ILLEGAL_REQUEST)
416 sense_buf[7] = 10; /* Additional sense length */
417 else
418 sense_buf[7] = 6; /* Additional sense length */
420 sense_buf[12] = sense_code; /* Additional sense code */
421 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
422 if (sense_key == ILLEGAL_REQUEST) {
423 sense_buf[15] = 0;
425 if (sense_code == SENCODE_INVALID_PARAM_FIELD)
426 sense_buf[15] = 0x80;/* Std sense key specific field */
427 /* Illegal parameter is in the parameter block */
429 if (sense_code == SENCODE_INVALID_CDB_FIELD)
430 sense_buf[15] = 0xc0;/* Std sense key specific field */
431 /* Illegal parameter is in the CDB block */
432 sense_buf[15] |= bit_pointer;
433 sense_buf[16] = field_pointer >> 8; /* MSB */
434 sense_buf[17] = field_pointer; /* LSB */
438 static void aac_io_done(struct scsi_cmnd * scsicmd)
440 unsigned long cpu_flags;
441 struct Scsi_Host *host = scsicmd->device->host;
442 spin_lock_irqsave(host->host_lock, cpu_flags);
443 scsicmd->scsi_done(scsicmd);
444 spin_unlock_irqrestore(host->host_lock, cpu_flags);
447 static void __aac_io_done(struct scsi_cmnd * scsicmd)
449 scsicmd->scsi_done(scsicmd);
452 int aac_get_adapter_info(struct aac_dev* dev)
454 struct fib* fibptr;
455 struct aac_adapter_info* info;
456 int rcode;
457 u32 tmp;
458 if (!(fibptr = fib_alloc(dev)))
459 return -ENOMEM;
461 fib_init(fibptr);
462 info = (struct aac_adapter_info*) fib_data(fibptr);
464 memset(info,0,sizeof(struct aac_adapter_info));
466 rcode = fib_send(RequestAdapterInfo,
467 fibptr,
468 sizeof(struct aac_adapter_info),
469 FsaNormal,
470 1, 1,
471 NULL,
472 NULL);
474 memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
476 tmp = dev->adapter_info.kernelrev;
477 printk(KERN_INFO"%s%d: kernel %d.%d.%d build %d\n",
478 dev->name, dev->id,
479 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
480 dev->adapter_info.kernelbuild);
481 tmp = dev->adapter_info.monitorrev;
482 printk(KERN_INFO"%s%d: monitor %d.%d.%d build %d\n",
483 dev->name, dev->id,
484 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
485 dev->adapter_info.monitorbuild);
486 tmp = dev->adapter_info.biosrev;
487 printk(KERN_INFO"%s%d: bios %d.%d.%d build %d\n",
488 dev->name, dev->id,
489 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
490 dev->adapter_info.biosbuild);
491 printk(KERN_INFO"%s%d: serial %x%x\n",
492 dev->name, dev->id,
493 dev->adapter_info.serial[0],
494 dev->adapter_info.serial[1]);
496 dev->nondasd_support = 0;
497 if(dev->adapter_info.options & AAC_OPT_NONDASD){
498 dev->nondasd_support = 1;
500 if(nondasd != -1) {
501 dev->nondasd_support = (nondasd!=0);
503 if(dev->nondasd_support != 0){
504 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
507 dev->dac_support = 0;
508 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
509 printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id);
510 dev->dac_support = 1;
513 if(dacmode != -1) {
514 dev->dac_support = (dacmode!=0);
516 if(dev->dac_support != 0) {
517 if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) &&
518 !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) {
519 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
520 dev->name, dev->id);
521 } else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) &&
522 !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) {
523 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
524 dev->name, dev->id);
525 dev->dac_support = 0;
526 } else {
527 printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
528 dev->name, dev->id);
529 rcode = -ENOMEM;
533 fib_complete(fibptr);
534 fib_free(fibptr);
536 return rcode;
540 static void read_callback(void *context, struct fib * fibptr)
542 struct aac_dev *dev;
543 struct aac_read_reply *readreply;
544 struct scsi_cmnd *scsicmd;
545 u32 lba;
546 u32 cid;
548 scsicmd = (struct scsi_cmnd *) context;
550 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
551 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
553 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
554 dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
556 if (fibptr == NULL)
557 BUG();
559 if(scsicmd->use_sg)
560 pci_unmap_sg(dev->pdev,
561 (struct scatterlist *)scsicmd->buffer,
562 scsicmd->use_sg,
563 scsicmd->sc_data_direction);
564 else if(scsicmd->request_bufflen)
565 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
566 scsicmd->request_bufflen,
567 scsicmd->sc_data_direction);
568 readreply = (struct aac_read_reply *)fib_data(fibptr);
569 if (le32_to_cpu(readreply->status) == ST_OK)
570 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
571 else {
572 printk(KERN_WARNING "read_callback: read failed, status = %d\n", readreply->status);
573 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
574 set_sense((u8 *) &sense_data[cid],
575 HARDWARE_ERROR,
576 SENCODE_INTERNAL_TARGET_FAILURE,
577 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
578 0, 0);
579 memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof(struct sense_data));
581 fib_complete(fibptr);
582 fib_free(fibptr);
584 aac_io_done(scsicmd);
587 static void write_callback(void *context, struct fib * fibptr)
589 struct aac_dev *dev;
590 struct aac_write_reply *writereply;
591 struct scsi_cmnd *scsicmd;
592 u32 lba;
593 u32 cid;
595 scsicmd = (struct scsi_cmnd *) context;
596 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
597 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
599 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
600 dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
601 if (fibptr == NULL)
602 BUG();
604 if(scsicmd->use_sg)
605 pci_unmap_sg(dev->pdev,
606 (struct scatterlist *)scsicmd->buffer,
607 scsicmd->use_sg,
608 scsicmd->sc_data_direction);
609 else if(scsicmd->request_bufflen)
610 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
611 scsicmd->request_bufflen,
612 scsicmd->sc_data_direction);
614 writereply = (struct aac_write_reply *) fib_data(fibptr);
615 if (le32_to_cpu(writereply->status) == ST_OK)
616 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
617 else {
618 printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
619 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
620 set_sense((u8 *) &sense_data[cid],
621 HARDWARE_ERROR,
622 SENCODE_INTERNAL_TARGET_FAILURE,
623 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
624 0, 0);
625 memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof(struct sense_data));
628 fib_complete(fibptr);
629 fib_free(fibptr);
630 aac_io_done(scsicmd);
633 int aac_read(struct scsi_cmnd * scsicmd, int cid)
635 u32 lba;
636 u32 count;
637 int status;
639 u16 fibsize;
640 struct aac_dev *dev;
641 struct fib * cmd_fibcontext;
643 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
645 * Get block address and transfer length
647 if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
649 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
651 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
652 count = scsicmd->cmnd[4];
654 if (count == 0)
655 count = 256;
656 } else {
657 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
659 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
660 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
662 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
664 * Alocate and initialize a Fib
666 if (!(cmd_fibcontext = fib_alloc(dev))) {
667 return -1;
670 fib_init(cmd_fibcontext);
672 if(dev->dac_support == 1) {
673 struct aac_read64 *readcmd;
674 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
675 readcmd->command = cpu_to_le32(VM_CtHostRead64);
676 readcmd->cid = cpu_to_le16(cid);
677 readcmd->sector_count = cpu_to_le16(count);
678 readcmd->block = cpu_to_le32(lba);
679 readcmd->pad = cpu_to_le16(0);
680 readcmd->flags = cpu_to_le16(0);
682 aac_build_sg64(scsicmd, &readcmd->sg);
683 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
684 BUG();
685 fibsize = sizeof(struct aac_read64) + ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
687 * Now send the Fib to the adapter
689 status = fib_send(ContainerCommand64,
690 cmd_fibcontext,
691 fibsize,
692 FsaNormal,
693 0, 1,
694 (fib_callback) read_callback,
695 (void *) scsicmd);
696 } else {
697 struct aac_read *readcmd;
698 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
699 readcmd->command = cpu_to_le32(VM_CtBlockRead);
700 readcmd->cid = cpu_to_le32(cid);
701 readcmd->block = cpu_to_le32(lba);
702 readcmd->count = cpu_to_le32(count * 512);
704 if (count * 512 > (64 * 1024))
705 BUG();
707 aac_build_sg(scsicmd, &readcmd->sg);
708 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
709 BUG();
710 fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
712 * Now send the Fib to the adapter
714 status = fib_send(ContainerCommand,
715 cmd_fibcontext,
716 fibsize,
717 FsaNormal,
718 0, 1,
719 (fib_callback) read_callback,
720 (void *) scsicmd);
726 * Check that the command queued to the controller
728 if (status == -EINPROGRESS)
730 dprintk("read queued.\n");
731 return 0;
734 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
736 * For some reason, the Fib didn't queue, return QUEUE_FULL
738 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
739 aac_io_done(scsicmd);
740 fib_complete(cmd_fibcontext);
741 fib_free(cmd_fibcontext);
742 return -1;
745 static int aac_write(struct scsi_cmnd * scsicmd, int cid)
747 u32 lba;
748 u32 count;
749 int status;
750 u16 fibsize;
751 struct aac_dev *dev;
752 struct fib * cmd_fibcontext;
754 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
756 * Get block address and transfer length
758 if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */
760 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
761 count = scsicmd->cmnd[4];
762 if (count == 0)
763 count = 256;
764 } else {
765 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
766 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
767 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
769 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
771 * Allocate and initialize a Fib then setup a BlockWrite command
773 if (!(cmd_fibcontext = fib_alloc(dev))) {
774 scsicmd->result = DID_ERROR << 16;
775 aac_io_done(scsicmd);
776 return -1;
778 fib_init(cmd_fibcontext);
780 if(dev->dac_support == 1) {
781 struct aac_write64 *writecmd;
782 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
783 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
784 writecmd->cid = cpu_to_le16(cid);
785 writecmd->sector_count = cpu_to_le16(count);
786 writecmd->block = cpu_to_le32(lba);
787 writecmd->pad = cpu_to_le16(0);
788 writecmd->flags = cpu_to_le16(0);
790 aac_build_sg64(scsicmd, &writecmd->sg);
791 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
792 BUG();
793 fibsize = sizeof(struct aac_write64) + ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
795 * Now send the Fib to the adapter
797 status = fib_send(ContainerCommand64,
798 cmd_fibcontext,
799 fibsize,
800 FsaNormal,
801 0, 1,
802 (fib_callback) write_callback,
803 (void *) scsicmd);
804 } else {
805 struct aac_write *writecmd;
806 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
807 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
808 writecmd->cid = cpu_to_le32(cid);
809 writecmd->block = cpu_to_le32(lba);
810 writecmd->count = cpu_to_le32(count * 512);
811 writecmd->sg.count = cpu_to_le32(1);
812 /* ->stable is not used - it did mean which type of write */
814 if (count * 512 > (64 * 1024)) {
815 BUG();
818 aac_build_sg(scsicmd, &writecmd->sg);
819 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
820 BUG();
821 fibsize = sizeof(struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
823 * Now send the Fib to the adapter
825 status = fib_send(ContainerCommand,
826 cmd_fibcontext,
827 fibsize,
828 FsaNormal,
829 0, 1,
830 (fib_callback) write_callback,
831 (void *) scsicmd);
835 * Check that the command queued to the controller
837 if (status == -EINPROGRESS)
839 dprintk("write queued.\n");
840 return 0;
843 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
845 * For some reason, the Fib didn't queue, return QUEUE_FULL
847 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
848 aac_io_done(scsicmd);
850 fib_complete(cmd_fibcontext);
851 fib_free(cmd_fibcontext);
852 return -1;
857 * aac_scsi_cmd() - Process SCSI command
858 * @scsicmd: SCSI command block
860 * Emulate a SCSI command and queue the required request for the
861 * aacraid firmware.
864 int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
866 u32 cid = 0;
867 int ret;
868 struct Scsi_Host *host = scsicmd->device->host;
869 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
870 struct fsa_scsi_hba *fsa_dev_ptr = &dev->fsa_dev;
871 int cardtype = dev->cardtype;
874 * If the bus, id or lun is out of range, return fail
875 * Test does not apply to ID 16, the pseudo id for the controller
876 * itself.
878 if (scsicmd->device->id != host->this_id) {
879 if ((scsicmd->device->channel == 0) ){
880 if( (scsicmd->device->id >= MAXIMUM_NUM_CONTAINERS) || (scsicmd->device->lun != 0)){
881 scsicmd->result = DID_NO_CONNECT << 16;
882 __aac_io_done(scsicmd);
883 return 0;
885 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
888 * If the target container doesn't exist, it may have
889 * been newly created
891 if (fsa_dev_ptr->valid[cid] == 0) {
892 switch (scsicmd->cmnd[0]) {
893 case INQUIRY:
894 case READ_CAPACITY:
895 case TEST_UNIT_READY:
896 spin_unlock_irq(host->host_lock);
897 probe_container(dev, cid);
898 spin_lock_irq(host->host_lock);
899 if (fsa_dev_ptr->valid[cid] == 0) {
900 scsicmd->result = DID_NO_CONNECT << 16;
901 __aac_io_done(scsicmd);
902 return 0;
904 default:
905 break;
909 * If the target container still doesn't exist,
910 * return failure
912 if (fsa_dev_ptr->valid[cid] == 0) {
913 scsicmd->result = DID_BAD_TARGET << 16;
914 __aac_io_done(scsicmd);
915 return 0;
917 } else { /* check for physical non-dasd devices */
918 if(dev->nondasd_support == 1){
919 return aac_send_srb_fib(scsicmd);
920 } else {
921 scsicmd->result = DID_NO_CONNECT << 16;
922 __aac_io_done(scsicmd);
923 return 0;
928 * else Command for the controller itself
930 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
931 (scsicmd->cmnd[0] != TEST_UNIT_READY))
933 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
934 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
935 set_sense((u8 *) &sense_data[cid],
936 ILLEGAL_REQUEST,
937 SENCODE_INVALID_COMMAND,
938 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
939 __aac_io_done(scsicmd);
940 memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof(struct sense_data));
941 return 0;
945 /* Handle commands here that don't really require going out to the adapter */
946 switch (scsicmd->cmnd[0]) {
947 case INQUIRY:
949 struct inquiry_data *inq_data_ptr;
951 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
952 inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
953 memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
955 inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */
956 inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
957 inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
958 inq_data_ptr->inqd_len = 31;
959 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
960 inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
962 * Set the Vendor, Product, and Revision Level
963 * see: <vendor>.c i.e. aac.c
965 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
966 if (scsicmd->device->id == host->this_id)
967 inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
968 else
969 inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
970 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
971 __aac_io_done(scsicmd);
972 return 0;
974 case READ_CAPACITY:
976 int capacity;
977 char *cp;
979 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
980 capacity = fsa_dev_ptr->size[cid] - 1;
981 cp = scsicmd->request_buffer;
982 cp[0] = (capacity >> 24) & 0xff;
983 cp[1] = (capacity >> 16) & 0xff;
984 cp[2] = (capacity >> 8) & 0xff;
985 cp[3] = (capacity >> 0) & 0xff;
986 cp[4] = 0;
987 cp[5] = 0;
988 cp[6] = 2;
989 cp[7] = 0;
991 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
992 __aac_io_done(scsicmd);
994 return 0;
997 case MODE_SENSE:
999 char *mode_buf;
1001 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1002 mode_buf = scsicmd->request_buffer;
1003 mode_buf[0] = 3; /* Mode data length */
1004 mode_buf[1] = 0; /* Medium type - default */
1005 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1006 mode_buf[3] = 0; /* Block descriptor length */
1008 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1009 __aac_io_done(scsicmd);
1011 return 0;
1013 case MODE_SENSE_10:
1015 char *mode_buf;
1017 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1018 mode_buf = scsicmd->request_buffer;
1019 mode_buf[0] = 0; /* Mode data length (MSB) */
1020 mode_buf[1] = 6; /* Mode data length (LSB) */
1021 mode_buf[2] = 0; /* Medium type - default */
1022 mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1023 mode_buf[4] = 0; /* reserved */
1024 mode_buf[5] = 0; /* reserved */
1025 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1026 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1028 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1029 __aac_io_done(scsicmd);
1031 return 0;
1033 case REQUEST_SENSE:
1034 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
1035 memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof (struct sense_data));
1036 memset(&sense_data[cid], 0, sizeof (struct sense_data));
1037 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1038 __aac_io_done(scsicmd);
1039 return 0;
1041 case ALLOW_MEDIUM_REMOVAL:
1042 dprintk((KERN_DEBUG "LOCK command.\n"));
1043 if (scsicmd->cmnd[4])
1044 fsa_dev_ptr->locked[cid] = 1;
1045 else
1046 fsa_dev_ptr->locked[cid] = 0;
1048 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1049 __aac_io_done(scsicmd);
1050 return 0;
1052 * These commands are all No-Ops
1054 case TEST_UNIT_READY:
1055 case RESERVE:
1056 case RELEASE:
1057 case REZERO_UNIT:
1058 case REASSIGN_BLOCKS:
1059 case SEEK_10:
1060 case START_STOP:
1061 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1062 __aac_io_done(scsicmd);
1063 return 0;
1066 switch (scsicmd->cmnd[0])
1068 case READ_6:
1069 case READ_10:
1071 * Hack to keep track of ordinal number of the device that
1072 * corresponds to a container. Needed to convert
1073 * containers to /dev/sd device names
1076 spin_unlock_irq(host->host_lock);
1077 if (scsicmd->request->rq_disk)
1078 memcpy(fsa_dev_ptr->devname[cid],
1079 scsicmd->request->rq_disk->disk_name,
1082 ret = aac_read(scsicmd, cid);
1083 spin_lock_irq(host->host_lock);
1084 return ret;
1086 case WRITE_6:
1087 case WRITE_10:
1088 spin_unlock_irq(host->host_lock);
1089 ret = aac_write(scsicmd, cid);
1090 spin_lock_irq(host->host_lock);
1091 return ret;
1092 default:
1094 * Unhandled commands
1096 printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
1097 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1098 set_sense((u8 *) &sense_data[cid],
1099 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
1100 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1101 memcpy(scsicmd->sense_buffer, &sense_data[cid],
1102 sizeof(struct sense_data));
1103 __aac_io_done(scsicmd);
1104 return 0;
1108 static int query_disk(struct aac_dev *dev, void __user *arg)
1110 struct aac_query_disk qd;
1111 struct fsa_scsi_hba *fsa_dev_ptr;
1113 fsa_dev_ptr = &(dev->fsa_dev);
1114 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1115 return -EFAULT;
1116 if (qd.cnum == -1)
1117 qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun);
1118 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
1120 if (qd.cnum < 0 || qd.cnum >= MAXIMUM_NUM_CONTAINERS)
1121 return -EINVAL;
1122 qd.instance = dev->scsi_host_ptr->host_no;
1123 qd.bus = 0;
1124 qd.id = CONTAINER_TO_ID(qd.cnum);
1125 qd.lun = CONTAINER_TO_LUN(qd.cnum);
1127 else return -EINVAL;
1129 qd.valid = fsa_dev_ptr->valid[qd.cnum];
1130 qd.locked = fsa_dev_ptr->locked[qd.cnum];
1131 qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
1133 if (fsa_dev_ptr->devname[qd.cnum][0] == '\0')
1134 qd.unmapped = 1;
1135 else
1136 qd.unmapped = 0;
1138 strlcpy(qd.name, fsa_dev_ptr->devname[qd.cnum], sizeof(qd.name));
1140 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
1141 return -EFAULT;
1142 return 0;
1145 static int force_delete_disk(struct aac_dev *dev, void __user *arg)
1147 struct aac_delete_disk dd;
1148 struct fsa_scsi_hba *fsa_dev_ptr;
1150 fsa_dev_ptr = &(dev->fsa_dev);
1152 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1153 return -EFAULT;
1155 if (dd.cnum >= MAXIMUM_NUM_CONTAINERS)
1156 return -EINVAL;
1158 * Mark this container as being deleted.
1160 fsa_dev_ptr->deleted[dd.cnum] = 1;
1162 * Mark the container as no longer valid
1164 fsa_dev_ptr->valid[dd.cnum] = 0;
1165 return 0;
1168 static int delete_disk(struct aac_dev *dev, void __user *arg)
1170 struct aac_delete_disk dd;
1171 struct fsa_scsi_hba *fsa_dev_ptr;
1173 fsa_dev_ptr = &(dev->fsa_dev);
1175 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1176 return -EFAULT;
1178 if (dd.cnum >= MAXIMUM_NUM_CONTAINERS)
1179 return -EINVAL;
1181 * If the container is locked, it can not be deleted by the API.
1183 if (fsa_dev_ptr->locked[dd.cnum])
1184 return -EBUSY;
1185 else {
1187 * Mark the container as no longer being valid.
1189 fsa_dev_ptr->valid[dd.cnum] = 0;
1190 fsa_dev_ptr->devname[dd.cnum][0] = '\0';
1191 return 0;
1195 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
1197 switch (cmd) {
1198 case FSACTL_QUERY_DISK:
1199 return query_disk(dev, arg);
1200 case FSACTL_DELETE_DISK:
1201 return delete_disk(dev, arg);
1202 case FSACTL_FORCE_DELETE_DISK:
1203 return force_delete_disk(dev, arg);
1204 case FSACTL_GET_CONTAINERS:
1205 return aac_get_containers(dev);
1206 default:
1207 return -ENOTTY;
1213 * aac_srb_callback
1214 * @context: the context set in the fib - here it is scsi cmd
1215 * @fibptr: pointer to the fib
1217 * Handles the completion of a scsi command to a non dasd device
1221 static void aac_srb_callback(void *context, struct fib * fibptr)
1223 struct aac_dev *dev;
1224 struct aac_srb_reply *srbreply;
1225 struct scsi_cmnd *scsicmd;
1227 scsicmd = (struct scsi_cmnd *) context;
1228 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1230 if (fibptr == NULL)
1231 BUG();
1233 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
1235 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
1237 * Calculate resid for sg
1240 scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
1242 if(scsicmd->use_sg)
1243 pci_unmap_sg(dev->pdev,
1244 (struct scatterlist *)scsicmd->buffer,
1245 scsicmd->use_sg,
1246 scsicmd->sc_data_direction);
1247 else if(scsicmd->request_bufflen)
1248 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
1249 scsicmd->sc_data_direction);
1252 * First check the fib status
1255 if (le32_to_cpu(srbreply->status) != ST_OK){
1256 int len;
1257 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
1258 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1259 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1260 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1261 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1265 * Next check the srb status
1267 switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
1268 case SRB_STATUS_ERROR_RECOVERY:
1269 case SRB_STATUS_PENDING:
1270 case SRB_STATUS_SUCCESS:
1271 if(scsicmd->cmnd[0] == INQUIRY ){
1272 u8 b;
1273 u8 b1;
1274 /* We can't expose disk devices because we can't tell whether they
1275 * are the raw container drives or stand alone drives. If they have
1276 * the removable bit set then we should expose them though.
1278 b = (*(u8*)scsicmd->buffer)&0x1f;
1279 b1 = ((u8*)scsicmd->buffer)[1];
1280 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1281 || (b==TYPE_DISK && (b1&0x80)) ){
1282 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1283 } else {
1284 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1286 } else {
1287 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1289 break;
1290 case SRB_STATUS_DATA_OVERRUN:
1291 switch(scsicmd->cmnd[0]){
1292 case READ_6:
1293 case WRITE_6:
1294 case READ_10:
1295 case WRITE_10:
1296 case READ_12:
1297 case WRITE_12:
1298 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1299 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1300 } else {
1301 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
1303 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1304 break;
1305 case INQUIRY: {
1306 u8 b;
1307 u8 b1;
1308 /* We can't expose disk devices because we can't tell whether they
1309 * are the raw container drives or stand alone drives
1311 b = (*(u8*)scsicmd->buffer)&0x0f;
1312 b1 = ((u8*)scsicmd->buffer)[1];
1313 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1314 || (b==TYPE_DISK && (b1&0x80)) ){
1315 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1316 } else {
1317 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1319 break;
1321 default:
1322 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1323 break;
1325 break;
1326 case SRB_STATUS_ABORTED:
1327 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1328 break;
1329 case SRB_STATUS_ABORT_FAILED:
1330 // Not sure about this one - but assuming the hba was trying to abort for some reason
1331 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1332 break;
1333 case SRB_STATUS_PARITY_ERROR:
1334 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
1335 break;
1336 case SRB_STATUS_NO_DEVICE:
1337 case SRB_STATUS_INVALID_PATH_ID:
1338 case SRB_STATUS_INVALID_TARGET_ID:
1339 case SRB_STATUS_INVALID_LUN:
1340 case SRB_STATUS_SELECTION_TIMEOUT:
1341 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1342 break;
1344 case SRB_STATUS_COMMAND_TIMEOUT:
1345 case SRB_STATUS_TIMEOUT:
1346 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
1347 break;
1349 case SRB_STATUS_BUSY:
1350 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1351 break;
1353 case SRB_STATUS_BUS_RESET:
1354 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
1355 break;
1357 case SRB_STATUS_MESSAGE_REJECTED:
1358 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
1359 break;
1360 case SRB_STATUS_REQUEST_FLUSHED:
1361 case SRB_STATUS_ERROR:
1362 case SRB_STATUS_INVALID_REQUEST:
1363 case SRB_STATUS_REQUEST_SENSE_FAILED:
1364 case SRB_STATUS_NO_HBA:
1365 case SRB_STATUS_UNEXPECTED_BUS_FREE:
1366 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
1367 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
1368 case SRB_STATUS_DELAYED_RETRY:
1369 case SRB_STATUS_BAD_FUNCTION:
1370 case SRB_STATUS_NOT_STARTED:
1371 case SRB_STATUS_NOT_IN_USE:
1372 case SRB_STATUS_FORCE_ABORT:
1373 case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
1374 default:
1375 #ifdef AAC_DETAILED_STATUS_INFO
1376 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
1377 le32_to_cpu(srbreply->srb_status & 0x3F),
1378 aac_get_status_string(
1379 le32_to_cpu(srbreply->srb_status) & 0x3F),
1380 scsicmd->cmnd[0],
1381 le32_to_cpu(srbreply->scsi_status));
1382 #endif
1383 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1384 break;
1386 if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
1387 int len;
1388 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
1389 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1390 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1391 #ifdef AAC_DETAILED_STATUS_INFO
1392 dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
1393 le32_to_cpu(srbreply->status), len));
1394 #endif
1395 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1399 * OR in the scsi status (already shifted up a bit)
1401 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
1403 fib_complete(fibptr);
1404 fib_free(fibptr);
1405 aac_io_done(scsicmd);
1410 * aac_send_scb_fib
1411 * @scsicmd: the scsi command block
1413 * This routine will form a FIB and fill in the aac_srb from the
1414 * scsicmd passed in.
1417 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
1419 struct fib* cmd_fibcontext;
1420 struct aac_dev* dev;
1421 int status;
1422 struct aac_srb *srbcmd;
1423 u16 fibsize;
1424 u32 flag;
1425 u32 timeout;
1427 if( scsicmd->device->id > 15 || scsicmd->device->lun > 7) {
1428 scsicmd->result = DID_NO_CONNECT << 16;
1429 __aac_io_done(scsicmd);
1430 return 0;
1433 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1434 switch(scsicmd->sc_data_direction){
1435 case DMA_TO_DEVICE:
1436 flag = SRB_DataOut;
1437 break;
1438 case DMA_BIDIRECTIONAL:
1439 flag = SRB_DataIn | SRB_DataOut;
1440 break;
1441 case DMA_FROM_DEVICE:
1442 flag = SRB_DataIn;
1443 break;
1444 case DMA_NONE:
1445 default: /* shuts up some versions of gcc */
1446 flag = SRB_NoDataXfer;
1447 break;
1452 * Allocate and initialize a Fib then setup a BlockWrite command
1454 if (!(cmd_fibcontext = fib_alloc(dev))) {
1455 return -1;
1457 fib_init(cmd_fibcontext);
1459 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
1460 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1461 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
1462 srbcmd->id = cpu_to_le32(scsicmd->device->id);
1463 srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
1464 srbcmd->flags = cpu_to_le32(flag);
1465 timeout = (scsicmd->timeout-jiffies)/HZ;
1466 if(timeout == 0){
1467 timeout = 1;
1469 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
1470 srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
1471 srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
1473 if( dev->dac_support == 1 ) {
1474 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
1475 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1477 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1478 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1480 * Build Scatter/Gather list
1482 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
1485 * Now send the Fib to the adapter
1487 status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1488 (fib_callback) aac_srb_callback, (void *) scsicmd);
1489 } else {
1490 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
1491 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1493 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1494 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1496 * Build Scatter/Gather list
1498 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
1501 * Now send the Fib to the adapter
1503 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1504 (fib_callback) aac_srb_callback, (void *) scsicmd);
1507 * Check that the command queued to the controller
1509 if (status == -EINPROGRESS){
1510 return 0;
1513 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
1514 fib_complete(cmd_fibcontext);
1515 fib_free(cmd_fibcontext);
1517 return -1;
1520 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
1522 struct aac_dev *dev;
1523 unsigned long byte_count = 0;
1525 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1526 // Get rid of old data
1527 psg->count = cpu_to_le32(0);
1528 psg->sg[0].addr = cpu_to_le32(0);
1529 psg->sg[0].count = cpu_to_le32(0);
1530 if (scsicmd->use_sg) {
1531 struct scatterlist *sg;
1532 int i;
1533 int sg_count;
1534 sg = (struct scatterlist *) scsicmd->request_buffer;
1536 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1537 scsicmd->sc_data_direction);
1538 psg->count = cpu_to_le32(sg_count);
1540 byte_count = 0;
1542 for (i = 0; i < sg_count; i++) {
1543 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
1544 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1545 byte_count += sg_dma_len(sg);
1546 sg++;
1548 /* hba wants the size to be exact */
1549 if(byte_count > scsicmd->request_bufflen){
1550 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1551 byte_count = scsicmd->request_bufflen;
1553 /* Check for command underflow */
1554 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1555 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1556 byte_count, scsicmd->underflow);
1559 else if(scsicmd->request_bufflen) {
1560 dma_addr_t addr;
1561 addr = pci_map_single(dev->pdev,
1562 scsicmd->request_buffer,
1563 scsicmd->request_bufflen,
1564 scsicmd->sc_data_direction);
1565 psg->count = cpu_to_le32(1);
1566 psg->sg[0].addr = cpu_to_le32(addr);
1567 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1568 scsicmd->SCp.dma_handle = addr;
1569 byte_count = scsicmd->request_bufflen;
1571 return byte_count;
1575 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
1577 struct aac_dev *dev;
1578 unsigned long byte_count = 0;
1579 u64 le_addr;
1581 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1582 // Get rid of old data
1583 psg->count = cpu_to_le32(0);
1584 psg->sg[0].addr[0] = cpu_to_le32(0);
1585 psg->sg[0].addr[1] = cpu_to_le32(0);
1586 psg->sg[0].count = cpu_to_le32(0);
1587 if (scsicmd->use_sg) {
1588 struct scatterlist *sg;
1589 int i;
1590 int sg_count;
1591 sg = (struct scatterlist *) scsicmd->request_buffer;
1593 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1594 scsicmd->sc_data_direction);
1595 psg->count = cpu_to_le32(sg_count);
1597 byte_count = 0;
1599 for (i = 0; i < sg_count; i++) {
1600 le_addr = cpu_to_le64(sg_dma_address(sg));
1601 psg->sg[i].addr[1] = (u32)(le_addr>>32);
1602 psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
1603 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1604 byte_count += sg_dma_len(sg);
1605 sg++;
1607 /* hba wants the size to be exact */
1608 if(byte_count > scsicmd->request_bufflen){
1609 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1610 byte_count = scsicmd->request_bufflen;
1612 /* Check for command underflow */
1613 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1614 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1615 byte_count, scsicmd->underflow);
1618 else if(scsicmd->request_bufflen) {
1619 dma_addr_t addr;
1620 addr = pci_map_single(dev->pdev,
1621 scsicmd->request_buffer,
1622 scsicmd->request_bufflen,
1623 scsicmd->sc_data_direction);
1624 psg->count = cpu_to_le32(1);
1625 le_addr = cpu_to_le64(addr);
1626 psg->sg[0].addr[1] = (u32)(le_addr>>32);
1627 psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
1628 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1629 scsicmd->SCp.dma_handle = addr;
1630 byte_count = scsicmd->request_bufflen;
1632 return byte_count;
1635 #ifdef AAC_DETAILED_STATUS_INFO
1637 struct aac_srb_status_info {
1638 u32 status;
1639 char *str;
1643 static struct aac_srb_status_info srb_status_info[] = {
1644 { SRB_STATUS_PENDING, "Pending Status"},
1645 { SRB_STATUS_SUCCESS, "Success"},
1646 { SRB_STATUS_ABORTED, "Aborted Command"},
1647 { SRB_STATUS_ABORT_FAILED, "Abort Failed"},
1648 { SRB_STATUS_ERROR, "Error Event"},
1649 { SRB_STATUS_BUSY, "Device Busy"},
1650 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
1651 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
1652 { SRB_STATUS_NO_DEVICE, "No Device"},
1653 { SRB_STATUS_TIMEOUT, "Timeout"},
1654 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
1655 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
1656 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
1657 { SRB_STATUS_BUS_RESET, "Bus Reset"},
1658 { SRB_STATUS_PARITY_ERROR, "Parity Error"},
1659 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
1660 { SRB_STATUS_NO_HBA, "No HBA"},
1661 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
1662 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
1663 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
1664 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
1665 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
1666 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
1667 { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
1668 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
1669 { SRB_STATUS_BAD_FUNCTION, "Bad Function"},
1670 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
1671 { SRB_STATUS_NOT_STARTED, "Not Started"},
1672 { SRB_STATUS_NOT_IN_USE, "Not In Use"},
1673 { SRB_STATUS_FORCE_ABORT, "Force Abort"},
1674 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
1675 { 0xff, "Unknown Error"}
1678 char *aac_get_status_string(u32 status)
1680 int i;
1682 for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
1683 if(srb_status_info[i].status == status){
1684 return srb_status_info[i].str;
1688 return "Bad Status Code";
1691 #endif