scsi: Push down BKL into ioctl functions
[linux-2.6-xlnx.git] / drivers / scsi / 3w-9xxx.c
blob4f74850560fe83e843879559bf821bb498796043
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com>
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 NO WARRANTY
19 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 solely responsible for determining the appropriateness of using and
24 distributing the Program and assumes all risks associated with its
25 exercise of rights under this Agreement, including but not limited to
26 the risks and costs of program errors, damage to or loss of data,
27 programs or equipment, and unavailability or interruption of operations.
29 DISCLAIMER OF LIABILITY
30 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 You should have received a copy of the GNU General Public License
39 along with this program; if not, write to the Free Software
40 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 Bugs/Comments/Suggestions should be mailed to:
43 linuxraid@amcc.com
45 For more information, goto:
46 http://www.amcc.com
48 Note: This version of the driver does not contain a bundled firmware
49 image.
51 History
52 -------
53 2.26.02.000 - Driver cleanup for kernel submission.
54 2.26.02.001 - Replace schedule_timeout() calls with msleep().
55 2.26.02.002 - Add support for PAE mode.
56 Add lun support.
57 Fix twa_remove() to free irq handler/unregister_chrdev()
58 before shutting down card.
59 Change to new 'change_queue_depth' api.
60 Fix 'handled=1' ISR usage, remove bogus IRQ check.
61 Remove un-needed eh_abort handler.
62 Add support for embedded firmware error strings.
63 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
64 2.26.02.004 - Add support for 9550SX controllers.
65 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
66 2.26.02.006 - Fix 9550SX pchip reset timeout.
67 Add big endian support.
68 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
69 2.26.02.008 - Free irq handler in __twa_shutdown().
70 Serialize reset code.
71 Add support for 9650SE controllers.
72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
73 2.26.02.010 - Add support for 9690SA controllers.
74 2.26.02.011 - Increase max AENs drained to 256.
75 Add MSI support and "use_msi" module parameter.
76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support.
79 2.26.02.013 - Fix bug in twa_load_sgl().
82 #include <linux/module.h>
83 #include <linux/reboot.h>
84 #include <linux/spinlock.h>
85 #include <linux/interrupt.h>
86 #include <linux/moduleparam.h>
87 #include <linux/errno.h>
88 #include <linux/types.h>
89 #include <linux/delay.h>
90 #include <linux/pci.h>
91 #include <linux/time.h>
92 #include <linux/mutex.h>
93 #include <linux/smp_lock.h>
94 #include <linux/slab.h>
95 #include <asm/io.h>
96 #include <asm/irq.h>
97 #include <asm/uaccess.h>
98 #include <scsi/scsi.h>
99 #include <scsi/scsi_host.h>
100 #include <scsi/scsi_tcq.h>
101 #include <scsi/scsi_cmnd.h>
102 #include "3w-9xxx.h"
104 /* Globals */
105 #define TW_DRIVER_VERSION "2.26.02.013"
106 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
107 static unsigned int twa_device_extension_count;
108 static int twa_major = -1;
109 extern struct timezone sys_tz;
111 /* Module parameters */
112 MODULE_AUTHOR ("AMCC");
113 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
114 MODULE_LICENSE("GPL");
115 MODULE_VERSION(TW_DRIVER_VERSION);
117 static int use_msi = 0;
118 module_param(use_msi, int, S_IRUGO);
119 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
121 /* Function prototypes */
122 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
123 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
124 static char *twa_aen_severity_lookup(unsigned char severity_code);
125 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
126 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
127 static int twa_chrdev_open(struct inode *inode, struct file *file);
128 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
129 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
130 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
131 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
132 u32 set_features, unsigned short current_fw_srl,
133 unsigned short current_fw_arch_id,
134 unsigned short current_fw_branch,
135 unsigned short current_fw_build,
136 unsigned short *fw_on_ctlr_srl,
137 unsigned short *fw_on_ctlr_arch_id,
138 unsigned short *fw_on_ctlr_branch,
139 unsigned short *fw_on_ctlr_build,
140 u32 *init_connect_result);
141 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
142 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
143 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
144 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
145 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
146 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
147 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
148 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
149 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
150 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
152 /* Functions */
154 /* Show some statistics about the card */
155 static ssize_t twa_show_stats(struct device *dev,
156 struct device_attribute *attr, char *buf)
158 struct Scsi_Host *host = class_to_shost(dev);
159 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
160 unsigned long flags = 0;
161 ssize_t len;
163 spin_lock_irqsave(tw_dev->host->host_lock, flags);
164 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
165 "Current commands posted: %4d\n"
166 "Max commands posted: %4d\n"
167 "Current pending commands: %4d\n"
168 "Max pending commands: %4d\n"
169 "Last sgl length: %4d\n"
170 "Max sgl length: %4d\n"
171 "Last sector count: %4d\n"
172 "Max sector count: %4d\n"
173 "SCSI Host Resets: %4d\n"
174 "AEN's: %4d\n",
175 TW_DRIVER_VERSION,
176 tw_dev->posted_request_count,
177 tw_dev->max_posted_request_count,
178 tw_dev->pending_request_count,
179 tw_dev->max_pending_request_count,
180 tw_dev->sgl_entries,
181 tw_dev->max_sgl_entries,
182 tw_dev->sector_count,
183 tw_dev->max_sector_count,
184 tw_dev->num_resets,
185 tw_dev->aen_count);
186 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
187 return len;
188 } /* End twa_show_stats() */
190 /* This function will set a devices queue depth */
191 static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
192 int reason)
194 if (reason != SCSI_QDEPTH_DEFAULT)
195 return -EOPNOTSUPP;
197 if (queue_depth > TW_Q_LENGTH-2)
198 queue_depth = TW_Q_LENGTH-2;
199 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
200 return queue_depth;
201 } /* End twa_change_queue_depth() */
203 /* Create sysfs 'stats' entry */
204 static struct device_attribute twa_host_stats_attr = {
205 .attr = {
206 .name = "stats",
207 .mode = S_IRUGO,
209 .show = twa_show_stats
212 /* Host attributes initializer */
213 static struct device_attribute *twa_host_attrs[] = {
214 &twa_host_stats_attr,
215 NULL,
218 /* File operations struct for character device */
219 static const struct file_operations twa_fops = {
220 .owner = THIS_MODULE,
221 .unlocked_ioctl = twa_chrdev_ioctl,
222 .open = twa_chrdev_open,
223 .release = NULL
226 /* This function will complete an aen request from the isr */
227 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
229 TW_Command_Full *full_command_packet;
230 TW_Command *command_packet;
231 TW_Command_Apache_Header *header;
232 unsigned short aen;
233 int retval = 1;
235 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
236 tw_dev->posted_request_count--;
237 aen = le16_to_cpu(header->status_block.error);
238 full_command_packet = tw_dev->command_packet_virt[request_id];
239 command_packet = &full_command_packet->command.oldcommand;
241 /* First check for internal completion of set param for time sync */
242 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
243 /* Keep reading the queue in case there are more aen's */
244 if (twa_aen_read_queue(tw_dev, request_id))
245 goto out2;
246 else {
247 retval = 0;
248 goto out;
252 switch (aen) {
253 case TW_AEN_QUEUE_EMPTY:
254 /* Quit reading the queue if this is the last one */
255 break;
256 case TW_AEN_SYNC_TIME_WITH_HOST:
257 twa_aen_sync_time(tw_dev, request_id);
258 retval = 0;
259 goto out;
260 default:
261 twa_aen_queue_event(tw_dev, header);
263 /* If there are more aen's, keep reading the queue */
264 if (twa_aen_read_queue(tw_dev, request_id))
265 goto out2;
266 else {
267 retval = 0;
268 goto out;
271 retval = 0;
272 out2:
273 tw_dev->state[request_id] = TW_S_COMPLETED;
274 twa_free_request_id(tw_dev, request_id);
275 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
276 out:
277 return retval;
278 } /* End twa_aen_complete() */
280 /* This function will drain aen queue */
281 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
283 int request_id = 0;
284 char cdb[TW_MAX_CDB_LEN];
285 TW_SG_Entry sglist[1];
286 int finished = 0, count = 0;
287 TW_Command_Full *full_command_packet;
288 TW_Command_Apache_Header *header;
289 unsigned short aen;
290 int first_reset = 0, queue = 0, retval = 1;
292 if (no_check_reset)
293 first_reset = 0;
294 else
295 first_reset = 1;
297 full_command_packet = tw_dev->command_packet_virt[request_id];
298 memset(full_command_packet, 0, sizeof(TW_Command_Full));
300 /* Initialize cdb */
301 memset(&cdb, 0, TW_MAX_CDB_LEN);
302 cdb[0] = REQUEST_SENSE; /* opcode */
303 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
305 /* Initialize sglist */
306 memset(&sglist, 0, sizeof(TW_SG_Entry));
307 sglist[0].length = TW_SECTOR_SIZE;
308 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
310 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
311 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
312 goto out;
315 /* Mark internal command */
316 tw_dev->srb[request_id] = NULL;
318 do {
319 /* Send command to the board */
320 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
321 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
322 goto out;
325 /* Now poll for completion */
326 if (twa_poll_response(tw_dev, request_id, 30)) {
327 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
328 tw_dev->posted_request_count--;
329 goto out;
332 tw_dev->posted_request_count--;
333 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
334 aen = le16_to_cpu(header->status_block.error);
335 queue = 0;
336 count++;
338 switch (aen) {
339 case TW_AEN_QUEUE_EMPTY:
340 if (first_reset != 1)
341 goto out;
342 else
343 finished = 1;
344 break;
345 case TW_AEN_SOFT_RESET:
346 if (first_reset == 0)
347 first_reset = 1;
348 else
349 queue = 1;
350 break;
351 case TW_AEN_SYNC_TIME_WITH_HOST:
352 break;
353 default:
354 queue = 1;
357 /* Now queue an event info */
358 if (queue)
359 twa_aen_queue_event(tw_dev, header);
360 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
362 if (count == TW_MAX_AEN_DRAIN)
363 goto out;
365 retval = 0;
366 out:
367 tw_dev->state[request_id] = TW_S_INITIAL;
368 return retval;
369 } /* End twa_aen_drain_queue() */
371 /* This function will queue an event */
372 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
374 u32 local_time;
375 struct timeval time;
376 TW_Event *event;
377 unsigned short aen;
378 char host[16];
379 char *error_str;
381 tw_dev->aen_count++;
383 /* Fill out event info */
384 event = tw_dev->event_queue[tw_dev->error_index];
386 /* Check for clobber */
387 host[0] = '\0';
388 if (tw_dev->host) {
389 sprintf(host, " scsi%d:", tw_dev->host->host_no);
390 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
391 tw_dev->aen_clobber = 1;
394 aen = le16_to_cpu(header->status_block.error);
395 memset(event, 0, sizeof(TW_Event));
397 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
398 do_gettimeofday(&time);
399 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
400 event->time_stamp_sec = local_time;
401 event->aen_code = aen;
402 event->retrieved = TW_AEN_NOT_RETRIEVED;
403 event->sequence_id = tw_dev->error_sequence_id;
404 tw_dev->error_sequence_id++;
406 /* Check for embedded error string */
407 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
409 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
410 event->parameter_len = strlen(header->err_specific_desc);
411 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
412 if (event->severity != TW_AEN_SEVERITY_DEBUG)
413 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
414 host,
415 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
416 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
417 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
418 header->err_specific_desc);
419 else
420 tw_dev->aen_count--;
422 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
423 tw_dev->event_queue_wrapped = 1;
424 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
425 } /* End twa_aen_queue_event() */
427 /* This function will read the aen queue from the isr */
428 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
430 char cdb[TW_MAX_CDB_LEN];
431 TW_SG_Entry sglist[1];
432 TW_Command_Full *full_command_packet;
433 int retval = 1;
435 full_command_packet = tw_dev->command_packet_virt[request_id];
436 memset(full_command_packet, 0, sizeof(TW_Command_Full));
438 /* Initialize cdb */
439 memset(&cdb, 0, TW_MAX_CDB_LEN);
440 cdb[0] = REQUEST_SENSE; /* opcode */
441 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
443 /* Initialize sglist */
444 memset(&sglist, 0, sizeof(TW_SG_Entry));
445 sglist[0].length = TW_SECTOR_SIZE;
446 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
448 /* Mark internal command */
449 tw_dev->srb[request_id] = NULL;
451 /* Now post the command packet */
452 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
453 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
454 goto out;
456 retval = 0;
457 out:
458 return retval;
459 } /* End twa_aen_read_queue() */
461 /* This function will look up an AEN severity string */
462 static char *twa_aen_severity_lookup(unsigned char severity_code)
464 char *retval = NULL;
466 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
467 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
468 goto out;
470 retval = twa_aen_severity_table[severity_code];
471 out:
472 return retval;
473 } /* End twa_aen_severity_lookup() */
475 /* This function will sync firmware time with the host time */
476 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
478 u32 schedulertime;
479 struct timeval utc;
480 TW_Command_Full *full_command_packet;
481 TW_Command *command_packet;
482 TW_Param_Apache *param;
483 u32 local_time;
485 /* Fill out the command packet */
486 full_command_packet = tw_dev->command_packet_virt[request_id];
487 memset(full_command_packet, 0, sizeof(TW_Command_Full));
488 command_packet = &full_command_packet->command.oldcommand;
489 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
490 command_packet->request_id = request_id;
491 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
492 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
493 command_packet->size = TW_COMMAND_SIZE;
494 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
496 /* Setup the param */
497 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
498 memset(param, 0, TW_SECTOR_SIZE);
499 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
500 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
501 param->parameter_size_bytes = cpu_to_le16(4);
503 /* Convert system time in UTC to local time seconds since last
504 Sunday 12:00AM */
505 do_gettimeofday(&utc);
506 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
507 schedulertime = local_time - (3 * 86400);
508 schedulertime = cpu_to_le32(schedulertime % 604800);
510 memcpy(param->data, &schedulertime, sizeof(u32));
512 /* Mark internal command */
513 tw_dev->srb[request_id] = NULL;
515 /* Now post the command */
516 twa_post_command_packet(tw_dev, request_id, 1);
517 } /* End twa_aen_sync_time() */
519 /* This function will allocate memory and check if it is correctly aligned */
520 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
522 int i;
523 dma_addr_t dma_handle;
524 unsigned long *cpu_addr;
525 int retval = 1;
527 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
528 if (!cpu_addr) {
529 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
530 goto out;
533 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
534 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
535 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
536 goto out;
539 memset(cpu_addr, 0, size*TW_Q_LENGTH);
541 for (i = 0; i < TW_Q_LENGTH; i++) {
542 switch(which) {
543 case 0:
544 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
545 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
546 break;
547 case 1:
548 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
549 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
550 break;
553 retval = 0;
554 out:
555 return retval;
556 } /* End twa_allocate_memory() */
558 /* This function will check the status register for unexpected bits */
559 static int twa_check_bits(u32 status_reg_value)
561 int retval = 1;
563 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
564 goto out;
565 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
566 goto out;
568 retval = 0;
569 out:
570 return retval;
571 } /* End twa_check_bits() */
573 /* This function will check the srl and decide if we are compatible */
574 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
576 int retval = 1;
577 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
578 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
579 u32 init_connect_result = 0;
581 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
582 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
583 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
584 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
585 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
586 &fw_on_ctlr_build, &init_connect_result)) {
587 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
588 goto out;
591 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
592 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
593 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
595 /* Try base mode compatibility */
596 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
597 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
598 TW_EXTENDED_INIT_CONNECT,
599 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
600 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
601 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
602 &fw_on_ctlr_branch, &fw_on_ctlr_build,
603 &init_connect_result)) {
604 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
605 goto out;
607 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
608 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
609 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
610 } else {
611 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
613 goto out;
615 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
616 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
617 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
620 /* Load rest of compatibility struct */
621 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
622 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
623 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
624 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
625 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
626 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
627 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
628 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
629 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
630 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
632 retval = 0;
633 out:
634 return retval;
635 } /* End twa_check_srl() */
637 /* This function handles ioctl for the character device */
638 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
640 struct inode *inode = file->f_path.dentry->d_inode;
641 long timeout;
642 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
643 dma_addr_t dma_handle;
644 int request_id = 0;
645 unsigned int sequence_id = 0;
646 unsigned char event_index, start_index;
647 TW_Ioctl_Driver_Command driver_command;
648 TW_Ioctl_Buf_Apache *tw_ioctl;
649 TW_Lock *tw_lock;
650 TW_Command_Full *full_command_packet;
651 TW_Compatibility_Info *tw_compat_info;
652 TW_Event *event;
653 struct timeval current_time;
654 u32 current_time_ms;
655 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
656 int retval = TW_IOCTL_ERROR_OS_EFAULT;
657 void __user *argp = (void __user *)arg;
659 lock_kernel();
661 /* Only let one of these through at a time */
662 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
663 retval = TW_IOCTL_ERROR_OS_EINTR;
664 goto out;
667 /* First copy down the driver command */
668 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
669 goto out2;
671 /* Check data buffer size */
672 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
673 retval = TW_IOCTL_ERROR_OS_EINVAL;
674 goto out2;
677 /* Hardware can only do multiple of 512 byte transfers */
678 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
680 /* Now allocate ioctl buf memory */
681 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
682 if (!cpu_addr) {
683 retval = TW_IOCTL_ERROR_OS_ENOMEM;
684 goto out2;
687 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
689 /* Now copy down the entire ioctl */
690 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
691 goto out3;
693 /* See which ioctl we are doing */
694 switch (cmd) {
695 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
696 spin_lock_irqsave(tw_dev->host->host_lock, flags);
697 twa_get_request_id(tw_dev, &request_id);
699 /* Flag internal command */
700 tw_dev->srb[request_id] = NULL;
702 /* Flag chrdev ioctl */
703 tw_dev->chrdev_request_id = request_id;
705 full_command_packet = &tw_ioctl->firmware_command;
707 /* Load request id and sglist for both command types */
708 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
710 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
712 /* Now post the command packet to the controller */
713 twa_post_command_packet(tw_dev, request_id, 1);
714 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
716 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
718 /* Now wait for command to complete */
719 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
721 /* We timed out, and didn't get an interrupt */
722 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
723 /* Now we need to reset the board */
724 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
725 tw_dev->host->host_no, TW_DRIVER, 0x37,
726 cmd);
727 retval = TW_IOCTL_ERROR_OS_EIO;
728 twa_reset_device_extension(tw_dev);
729 goto out3;
732 /* Now copy in the command packet response */
733 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
735 /* Now complete the io */
736 spin_lock_irqsave(tw_dev->host->host_lock, flags);
737 tw_dev->posted_request_count--;
738 tw_dev->state[request_id] = TW_S_COMPLETED;
739 twa_free_request_id(tw_dev, request_id);
740 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
741 break;
742 case TW_IOCTL_GET_COMPATIBILITY_INFO:
743 tw_ioctl->driver_command.status = 0;
744 /* Copy compatibility struct into ioctl data buffer */
745 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
746 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
747 break;
748 case TW_IOCTL_GET_LAST_EVENT:
749 if (tw_dev->event_queue_wrapped) {
750 if (tw_dev->aen_clobber) {
751 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
752 tw_dev->aen_clobber = 0;
753 } else
754 tw_ioctl->driver_command.status = 0;
755 } else {
756 if (!tw_dev->error_index) {
757 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
758 break;
760 tw_ioctl->driver_command.status = 0;
762 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
763 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
764 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
765 break;
766 case TW_IOCTL_GET_FIRST_EVENT:
767 if (tw_dev->event_queue_wrapped) {
768 if (tw_dev->aen_clobber) {
769 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
770 tw_dev->aen_clobber = 0;
771 } else
772 tw_ioctl->driver_command.status = 0;
773 event_index = tw_dev->error_index;
774 } else {
775 if (!tw_dev->error_index) {
776 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
777 break;
779 tw_ioctl->driver_command.status = 0;
780 event_index = 0;
782 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
783 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
784 break;
785 case TW_IOCTL_GET_NEXT_EVENT:
786 event = (TW_Event *)tw_ioctl->data_buffer;
787 sequence_id = event->sequence_id;
788 tw_ioctl->driver_command.status = 0;
790 if (tw_dev->event_queue_wrapped) {
791 if (tw_dev->aen_clobber) {
792 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
793 tw_dev->aen_clobber = 0;
795 start_index = tw_dev->error_index;
796 } else {
797 if (!tw_dev->error_index) {
798 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
799 break;
801 start_index = 0;
803 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
805 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
806 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
807 tw_dev->aen_clobber = 1;
808 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
809 break;
811 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
812 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
813 break;
814 case TW_IOCTL_GET_PREVIOUS_EVENT:
815 event = (TW_Event *)tw_ioctl->data_buffer;
816 sequence_id = event->sequence_id;
817 tw_ioctl->driver_command.status = 0;
819 if (tw_dev->event_queue_wrapped) {
820 if (tw_dev->aen_clobber) {
821 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
822 tw_dev->aen_clobber = 0;
824 start_index = tw_dev->error_index;
825 } else {
826 if (!tw_dev->error_index) {
827 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
828 break;
830 start_index = 0;
832 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
834 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
835 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
836 tw_dev->aen_clobber = 1;
837 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
838 break;
840 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
841 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
842 break;
843 case TW_IOCTL_GET_LOCK:
844 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
845 do_gettimeofday(&current_time);
846 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
848 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
849 tw_dev->ioctl_sem_lock = 1;
850 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
851 tw_ioctl->driver_command.status = 0;
852 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
853 } else {
854 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
855 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
857 break;
858 case TW_IOCTL_RELEASE_LOCK:
859 if (tw_dev->ioctl_sem_lock == 1) {
860 tw_dev->ioctl_sem_lock = 0;
861 tw_ioctl->driver_command.status = 0;
862 } else {
863 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
865 break;
866 default:
867 retval = TW_IOCTL_ERROR_OS_ENOTTY;
868 goto out3;
871 /* Now copy the entire response to userspace */
872 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
873 retval = 0;
874 out3:
875 /* Now free ioctl buf memory */
876 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
877 out2:
878 mutex_unlock(&tw_dev->ioctl_lock);
879 out:
880 unlock_kernel();
881 return retval;
882 } /* End twa_chrdev_ioctl() */
884 /* This function handles open for the character device */
885 /* NOTE that this function will race with remove. */
886 static int twa_chrdev_open(struct inode *inode, struct file *file)
888 unsigned int minor_number;
889 int retval = TW_IOCTL_ERROR_OS_ENODEV;
891 cycle_kernel_lock();
892 minor_number = iminor(inode);
893 if (minor_number >= twa_device_extension_count)
894 goto out;
895 retval = 0;
896 out:
897 return retval;
898 } /* End twa_chrdev_open() */
900 /* This function will print readable messages from status register errors */
901 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
903 int retval = 1;
905 /* Check for various error conditions and handle them appropriately */
906 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
907 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
908 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
911 if (status_reg_value & TW_STATUS_PCI_ABORT) {
912 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
913 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
914 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
917 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
918 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
919 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
920 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
921 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
922 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
925 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
926 if (tw_dev->reset_print == 0) {
927 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
928 tw_dev->reset_print = 1;
930 goto out;
932 retval = 0;
933 out:
934 return retval;
935 } /* End twa_decode_bits() */
937 /* This function will empty the response queue */
938 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
940 u32 status_reg_value, response_que_value;
941 int count = 0, retval = 1;
943 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
945 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
946 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
947 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
948 count++;
950 if (count == TW_MAX_RESPONSE_DRAIN)
951 goto out;
953 retval = 0;
954 out:
955 return retval;
956 } /* End twa_empty_response_queue() */
958 /* This function will clear the pchip/response queue on 9550SX */
959 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
961 u32 response_que_value = 0;
962 unsigned long before;
963 int retval = 1;
965 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
966 before = jiffies;
967 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
968 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
969 msleep(1);
970 if (time_after(jiffies, before + HZ * 30))
971 goto out;
973 /* P-chip settle time */
974 msleep(500);
975 retval = 0;
976 } else
977 retval = 0;
978 out:
979 return retval;
980 } /* End twa_empty_response_queue_large() */
982 /* This function passes sense keys from firmware to scsi layer */
983 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
985 TW_Command_Full *full_command_packet;
986 unsigned short error;
987 int retval = 1;
988 char *error_str;
990 full_command_packet = tw_dev->command_packet_virt[request_id];
992 /* Check for embedded error string */
993 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
995 /* Don't print error for Logical unit not supported during rollcall */
996 error = le16_to_cpu(full_command_packet->header.status_block.error);
997 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
998 if (print_host)
999 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1000 tw_dev->host->host_no,
1001 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1002 full_command_packet->header.status_block.error,
1003 error_str[0] == '\0' ?
1004 twa_string_lookup(twa_error_table,
1005 full_command_packet->header.status_block.error) : error_str,
1006 full_command_packet->header.err_specific_desc);
1007 else
1008 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1009 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1010 full_command_packet->header.status_block.error,
1011 error_str[0] == '\0' ?
1012 twa_string_lookup(twa_error_table,
1013 full_command_packet->header.status_block.error) : error_str,
1014 full_command_packet->header.err_specific_desc);
1017 if (copy_sense) {
1018 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1019 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1020 retval = TW_ISR_DONT_RESULT;
1021 goto out;
1023 retval = 0;
1024 out:
1025 return retval;
1026 } /* End twa_fill_sense() */
1028 /* This function will free up device extension resources */
1029 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1031 if (tw_dev->command_packet_virt[0])
1032 pci_free_consistent(tw_dev->tw_pci_dev,
1033 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1034 tw_dev->command_packet_virt[0],
1035 tw_dev->command_packet_phys[0]);
1037 if (tw_dev->generic_buffer_virt[0])
1038 pci_free_consistent(tw_dev->tw_pci_dev,
1039 TW_SECTOR_SIZE*TW_Q_LENGTH,
1040 tw_dev->generic_buffer_virt[0],
1041 tw_dev->generic_buffer_phys[0]);
1043 kfree(tw_dev->event_queue[0]);
1044 } /* End twa_free_device_extension() */
1046 /* This function will free a request id */
1047 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1049 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1050 tw_dev->state[request_id] = TW_S_FINISHED;
1051 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1052 } /* End twa_free_request_id() */
1054 /* This function will get parameter table entries from the firmware */
1055 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1057 TW_Command_Full *full_command_packet;
1058 TW_Command *command_packet;
1059 TW_Param_Apache *param;
1060 void *retval = NULL;
1062 /* Setup the command packet */
1063 full_command_packet = tw_dev->command_packet_virt[request_id];
1064 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1065 command_packet = &full_command_packet->command.oldcommand;
1067 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1068 command_packet->size = TW_COMMAND_SIZE;
1069 command_packet->request_id = request_id;
1070 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1072 /* Now setup the param */
1073 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1074 memset(param, 0, TW_SECTOR_SIZE);
1075 param->table_id = cpu_to_le16(table_id | 0x8000);
1076 param->parameter_id = cpu_to_le16(parameter_id);
1077 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1079 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1080 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1082 /* Post the command packet to the board */
1083 twa_post_command_packet(tw_dev, request_id, 1);
1085 /* Poll for completion */
1086 if (twa_poll_response(tw_dev, request_id, 30))
1087 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1088 else
1089 retval = (void *)&(param->data[0]);
1091 tw_dev->posted_request_count--;
1092 tw_dev->state[request_id] = TW_S_INITIAL;
1094 return retval;
1095 } /* End twa_get_param() */
1097 /* This function will assign an available request id */
1098 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1100 *request_id = tw_dev->free_queue[tw_dev->free_head];
1101 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1102 tw_dev->state[*request_id] = TW_S_STARTED;
1103 } /* End twa_get_request_id() */
1105 /* This function will send an initconnection command to controller */
1106 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1107 u32 set_features, unsigned short current_fw_srl,
1108 unsigned short current_fw_arch_id,
1109 unsigned short current_fw_branch,
1110 unsigned short current_fw_build,
1111 unsigned short *fw_on_ctlr_srl,
1112 unsigned short *fw_on_ctlr_arch_id,
1113 unsigned short *fw_on_ctlr_branch,
1114 unsigned short *fw_on_ctlr_build,
1115 u32 *init_connect_result)
1117 TW_Command_Full *full_command_packet;
1118 TW_Initconnect *tw_initconnect;
1119 int request_id = 0, retval = 1;
1121 /* Initialize InitConnection command packet */
1122 full_command_packet = tw_dev->command_packet_virt[request_id];
1123 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1124 full_command_packet->header.header_desc.size_header = 128;
1126 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1127 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1128 tw_initconnect->request_id = request_id;
1129 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1130 tw_initconnect->features = set_features;
1132 /* Turn on 64-bit sgl support if we need to */
1133 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1135 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1137 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1138 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1139 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1140 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1141 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1142 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1143 } else
1144 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1146 /* Send command packet to the board */
1147 twa_post_command_packet(tw_dev, request_id, 1);
1149 /* Poll for completion */
1150 if (twa_poll_response(tw_dev, request_id, 30)) {
1151 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1152 } else {
1153 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1154 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1155 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1156 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1157 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1158 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1160 retval = 0;
1163 tw_dev->posted_request_count--;
1164 tw_dev->state[request_id] = TW_S_INITIAL;
1166 return retval;
1167 } /* End twa_initconnection() */
1169 /* This function will initialize the fields of a device extension */
1170 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1172 int i, retval = 1;
1174 /* Initialize command packet buffers */
1175 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1176 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1177 goto out;
1180 /* Initialize generic buffer */
1181 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1182 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1183 goto out;
1186 /* Allocate event info space */
1187 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1188 if (!tw_dev->event_queue[0]) {
1189 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1190 goto out;
1194 for (i = 0; i < TW_Q_LENGTH; i++) {
1195 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1196 tw_dev->free_queue[i] = i;
1197 tw_dev->state[i] = TW_S_INITIAL;
1200 tw_dev->pending_head = TW_Q_START;
1201 tw_dev->pending_tail = TW_Q_START;
1202 tw_dev->free_head = TW_Q_START;
1203 tw_dev->free_tail = TW_Q_START;
1204 tw_dev->error_sequence_id = 1;
1205 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1207 mutex_init(&tw_dev->ioctl_lock);
1208 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1210 retval = 0;
1211 out:
1212 return retval;
1213 } /* End twa_initialize_device_extension() */
1215 /* This function is the interrupt service routine */
1216 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1218 int request_id, error = 0;
1219 u32 status_reg_value;
1220 TW_Response_Queue response_que;
1221 TW_Command_Full *full_command_packet;
1222 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1223 int handled = 0;
1225 /* Get the per adapter lock */
1226 spin_lock(tw_dev->host->host_lock);
1228 /* Read the registers */
1229 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1231 /* Check if this is our interrupt, otherwise bail */
1232 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1233 goto twa_interrupt_bail;
1235 handled = 1;
1237 /* If we are resetting, bail */
1238 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1239 goto twa_interrupt_bail;
1241 /* Check controller for errors */
1242 if (twa_check_bits(status_reg_value)) {
1243 if (twa_decode_bits(tw_dev, status_reg_value)) {
1244 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1245 goto twa_interrupt_bail;
1249 /* Handle host interrupt */
1250 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1251 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1253 /* Handle attention interrupt */
1254 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1255 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1256 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1257 twa_get_request_id(tw_dev, &request_id);
1259 error = twa_aen_read_queue(tw_dev, request_id);
1260 if (error) {
1261 tw_dev->state[request_id] = TW_S_COMPLETED;
1262 twa_free_request_id(tw_dev, request_id);
1263 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1268 /* Handle command interrupt */
1269 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1270 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1271 /* Drain as many pending commands as we can */
1272 while (tw_dev->pending_request_count > 0) {
1273 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1274 if (tw_dev->state[request_id] != TW_S_PENDING) {
1275 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1276 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1277 goto twa_interrupt_bail;
1279 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1280 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1281 tw_dev->pending_request_count--;
1282 } else {
1283 /* If we get here, we will continue re-posting on the next command interrupt */
1284 break;
1289 /* Handle response interrupt */
1290 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1292 /* Drain the response queue from the board */
1293 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1294 /* Complete the response */
1295 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1296 request_id = TW_RESID_OUT(response_que.response_id);
1297 full_command_packet = tw_dev->command_packet_virt[request_id];
1298 error = 0;
1299 /* Check for command packet errors */
1300 if (full_command_packet->command.newcommand.status != 0) {
1301 if (tw_dev->srb[request_id] != NULL) {
1302 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1303 } else {
1304 /* Skip ioctl error prints */
1305 if (request_id != tw_dev->chrdev_request_id) {
1306 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1311 /* Check for correct state */
1312 if (tw_dev->state[request_id] != TW_S_POSTED) {
1313 if (tw_dev->srb[request_id] != NULL) {
1314 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1315 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1316 goto twa_interrupt_bail;
1320 /* Check for internal command completion */
1321 if (tw_dev->srb[request_id] == NULL) {
1322 if (request_id != tw_dev->chrdev_request_id) {
1323 if (twa_aen_complete(tw_dev, request_id))
1324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1325 } else {
1326 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1327 wake_up(&tw_dev->ioctl_wqueue);
1329 } else {
1330 struct scsi_cmnd *cmd;
1332 cmd = tw_dev->srb[request_id];
1334 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1335 /* If no error command was a success */
1336 if (error == 0) {
1337 cmd->result = (DID_OK << 16);
1340 /* If error, command failed */
1341 if (error == 1) {
1342 /* Ask for a host reset */
1343 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1346 /* Report residual bytes for single sgl */
1347 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1348 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1349 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1352 /* Now complete the io */
1353 tw_dev->state[request_id] = TW_S_COMPLETED;
1354 twa_free_request_id(tw_dev, request_id);
1355 tw_dev->posted_request_count--;
1356 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1357 twa_unmap_scsi_data(tw_dev, request_id);
1360 /* Check for valid status after each drain */
1361 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1362 if (twa_check_bits(status_reg_value)) {
1363 if (twa_decode_bits(tw_dev, status_reg_value)) {
1364 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1365 goto twa_interrupt_bail;
1371 twa_interrupt_bail:
1372 spin_unlock(tw_dev->host->host_lock);
1373 return IRQ_RETVAL(handled);
1374 } /* End twa_interrupt() */
1376 /* This function will load the request id and various sgls for ioctls */
1377 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1379 TW_Command *oldcommand;
1380 TW_Command_Apache *newcommand;
1381 TW_SG_Entry *sgl;
1382 unsigned int pae = 0;
1384 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1385 pae = 1;
1387 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1388 newcommand = &full_command_packet->command.newcommand;
1389 newcommand->request_id__lunl =
1390 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1391 if (length) {
1392 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1393 newcommand->sg_list[0].length = cpu_to_le32(length);
1395 newcommand->sgl_entries__lunh =
1396 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1397 } else {
1398 oldcommand = &full_command_packet->command.oldcommand;
1399 oldcommand->request_id = request_id;
1401 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1402 /* Load the sg list */
1403 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1404 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1405 else
1406 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1407 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1408 sgl->length = cpu_to_le32(length);
1410 oldcommand->size += pae;
1413 } /* End twa_load_sgl() */
1415 /* This function will perform a pci-dma mapping for a scatter gather list */
1416 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1418 int use_sg;
1419 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1421 use_sg = scsi_dma_map(cmd);
1422 if (!use_sg)
1423 return 0;
1424 else if (use_sg < 0) {
1425 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1426 return 0;
1429 cmd->SCp.phase = TW_PHASE_SGLIST;
1430 cmd->SCp.have_data_in = use_sg;
1432 return use_sg;
1433 } /* End twa_map_scsi_sg_data() */
1435 /* This function will poll for a response interrupt of a request */
1436 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1438 int retval = 1, found = 0, response_request_id;
1439 TW_Response_Queue response_queue;
1440 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1442 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1443 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1444 response_request_id = TW_RESID_OUT(response_queue.response_id);
1445 if (request_id != response_request_id) {
1446 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1447 goto out;
1449 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1450 if (full_command_packet->command.newcommand.status != 0) {
1451 /* bad response */
1452 twa_fill_sense(tw_dev, request_id, 0, 0);
1453 goto out;
1455 found = 1;
1456 } else {
1457 if (full_command_packet->command.oldcommand.status != 0) {
1458 /* bad response */
1459 twa_fill_sense(tw_dev, request_id, 0, 0);
1460 goto out;
1462 found = 1;
1466 if (found)
1467 retval = 0;
1468 out:
1469 return retval;
1470 } /* End twa_poll_response() */
1472 /* This function will poll the status register for a flag */
1473 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1475 u32 status_reg_value;
1476 unsigned long before;
1477 int retval = 1;
1479 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1480 before = jiffies;
1482 if (twa_check_bits(status_reg_value))
1483 twa_decode_bits(tw_dev, status_reg_value);
1485 while ((status_reg_value & flag) != flag) {
1486 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1488 if (twa_check_bits(status_reg_value))
1489 twa_decode_bits(tw_dev, status_reg_value);
1491 if (time_after(jiffies, before + HZ * seconds))
1492 goto out;
1494 msleep(50);
1496 retval = 0;
1497 out:
1498 return retval;
1499 } /* End twa_poll_status() */
1501 /* This function will poll the status register for disappearance of a flag */
1502 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1504 u32 status_reg_value;
1505 unsigned long before;
1506 int retval = 1;
1508 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1509 before = jiffies;
1511 if (twa_check_bits(status_reg_value))
1512 twa_decode_bits(tw_dev, status_reg_value);
1514 while ((status_reg_value & flag) != 0) {
1515 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1516 if (twa_check_bits(status_reg_value))
1517 twa_decode_bits(tw_dev, status_reg_value);
1519 if (time_after(jiffies, before + HZ * seconds))
1520 goto out;
1522 msleep(50);
1524 retval = 0;
1525 out:
1526 return retval;
1527 } /* End twa_poll_status_gone() */
1529 /* This function will attempt to post a command packet to the board */
1530 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1532 u32 status_reg_value;
1533 dma_addr_t command_que_value;
1534 int retval = 1;
1536 command_que_value = tw_dev->command_packet_phys[request_id];
1538 /* For 9650SE write low 4 bytes first */
1539 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1540 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1541 command_que_value += TW_COMMAND_OFFSET;
1542 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1545 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1547 if (twa_check_bits(status_reg_value))
1548 twa_decode_bits(tw_dev, status_reg_value);
1550 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1552 /* Only pend internal driver commands */
1553 if (!internal) {
1554 retval = SCSI_MLQUEUE_HOST_BUSY;
1555 goto out;
1558 /* Couldn't post the command packet, so we do it later */
1559 if (tw_dev->state[request_id] != TW_S_PENDING) {
1560 tw_dev->state[request_id] = TW_S_PENDING;
1561 tw_dev->pending_request_count++;
1562 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1563 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1565 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1566 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1568 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1569 goto out;
1570 } else {
1571 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1572 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1573 /* Now write upper 4 bytes */
1574 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1575 } else {
1576 if (sizeof(dma_addr_t) > 4) {
1577 command_que_value += TW_COMMAND_OFFSET;
1578 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1579 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1580 } else {
1581 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1584 tw_dev->state[request_id] = TW_S_POSTED;
1585 tw_dev->posted_request_count++;
1586 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1587 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1590 retval = 0;
1591 out:
1592 return retval;
1593 } /* End twa_post_command_packet() */
1595 /* This function will reset a device extension */
1596 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1598 int i = 0;
1599 int retval = 1;
1600 unsigned long flags = 0;
1602 set_bit(TW_IN_RESET, &tw_dev->flags);
1603 TW_DISABLE_INTERRUPTS(tw_dev);
1604 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1605 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1607 /* Abort all requests that are in progress */
1608 for (i = 0; i < TW_Q_LENGTH; i++) {
1609 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1610 (tw_dev->state[i] != TW_S_INITIAL) &&
1611 (tw_dev->state[i] != TW_S_COMPLETED)) {
1612 if (tw_dev->srb[i]) {
1613 tw_dev->srb[i]->result = (DID_RESET << 16);
1614 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1615 twa_unmap_scsi_data(tw_dev, i);
1620 /* Reset queues and counts */
1621 for (i = 0; i < TW_Q_LENGTH; i++) {
1622 tw_dev->free_queue[i] = i;
1623 tw_dev->state[i] = TW_S_INITIAL;
1625 tw_dev->free_head = TW_Q_START;
1626 tw_dev->free_tail = TW_Q_START;
1627 tw_dev->posted_request_count = 0;
1628 tw_dev->pending_request_count = 0;
1629 tw_dev->pending_head = TW_Q_START;
1630 tw_dev->pending_tail = TW_Q_START;
1631 tw_dev->reset_print = 0;
1633 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1635 if (twa_reset_sequence(tw_dev, 1))
1636 goto out;
1638 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1639 clear_bit(TW_IN_RESET, &tw_dev->flags);
1640 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1642 retval = 0;
1643 out:
1644 return retval;
1645 } /* End twa_reset_device_extension() */
1647 /* This function will reset a controller */
1648 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1650 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1652 while (tries < TW_MAX_RESET_TRIES) {
1653 if (do_soft_reset) {
1654 TW_SOFT_RESET(tw_dev);
1655 /* Clear pchip/response queue on 9550SX */
1656 if (twa_empty_response_queue_large(tw_dev)) {
1657 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1658 do_soft_reset = 1;
1659 tries++;
1660 continue;
1664 /* Make sure controller is in a good state */
1665 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1666 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1667 do_soft_reset = 1;
1668 tries++;
1669 continue;
1672 /* Empty response queue */
1673 if (twa_empty_response_queue(tw_dev)) {
1674 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1675 do_soft_reset = 1;
1676 tries++;
1677 continue;
1680 flashed = 0;
1682 /* Check for compatibility/flash */
1683 if (twa_check_srl(tw_dev, &flashed)) {
1684 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1685 do_soft_reset = 1;
1686 tries++;
1687 continue;
1688 } else {
1689 if (flashed) {
1690 tries++;
1691 continue;
1695 /* Drain the AEN queue */
1696 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1697 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1698 do_soft_reset = 1;
1699 tries++;
1700 continue;
1703 /* If we got here, controller is in a good state */
1704 retval = 0;
1705 goto out;
1707 out:
1708 return retval;
1709 } /* End twa_reset_sequence() */
1711 /* This funciton returns unit geometry in cylinders/heads/sectors */
1712 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1714 int heads, sectors, cylinders;
1715 TW_Device_Extension *tw_dev;
1717 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1719 if (capacity >= 0x200000) {
1720 heads = 255;
1721 sectors = 63;
1722 cylinders = sector_div(capacity, heads * sectors);
1723 } else {
1724 heads = 64;
1725 sectors = 32;
1726 cylinders = sector_div(capacity, heads * sectors);
1729 geom[0] = heads;
1730 geom[1] = sectors;
1731 geom[2] = cylinders;
1733 return 0;
1734 } /* End twa_scsi_biosparam() */
1736 /* This is the new scsi eh reset function */
1737 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1739 TW_Device_Extension *tw_dev = NULL;
1740 int retval = FAILED;
1742 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1744 tw_dev->num_resets++;
1746 sdev_printk(KERN_WARNING, SCpnt->device,
1747 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1748 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1750 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1751 mutex_lock(&tw_dev->ioctl_lock);
1753 /* Now reset the card and some of the device extension data */
1754 if (twa_reset_device_extension(tw_dev)) {
1755 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1756 goto out;
1759 retval = SUCCESS;
1760 out:
1761 mutex_unlock(&tw_dev->ioctl_lock);
1762 return retval;
1763 } /* End twa_scsi_eh_reset() */
1765 /* This is the main scsi queue function to handle scsi opcodes */
1766 static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1768 int request_id, retval;
1769 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1771 /* If we are resetting due to timed out ioctl, report as busy */
1772 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1773 retval = SCSI_MLQUEUE_HOST_BUSY;
1774 goto out;
1777 /* Check if this FW supports luns */
1778 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1779 SCpnt->result = (DID_BAD_TARGET << 16);
1780 done(SCpnt);
1781 retval = 0;
1782 goto out;
1785 /* Save done function into scsi_cmnd struct */
1786 SCpnt->scsi_done = done;
1788 /* Get a free request id */
1789 twa_get_request_id(tw_dev, &request_id);
1791 /* Save the scsi command for use by the ISR */
1792 tw_dev->srb[request_id] = SCpnt;
1794 /* Initialize phase to zero */
1795 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1797 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1798 switch (retval) {
1799 case SCSI_MLQUEUE_HOST_BUSY:
1800 twa_free_request_id(tw_dev, request_id);
1801 break;
1802 case 1:
1803 tw_dev->state[request_id] = TW_S_COMPLETED;
1804 twa_free_request_id(tw_dev, request_id);
1805 SCpnt->result = (DID_ERROR << 16);
1806 done(SCpnt);
1807 retval = 0;
1809 out:
1810 return retval;
1811 } /* End twa_scsi_queue() */
1813 /* This function hands scsi cdb's to the firmware */
1814 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1816 TW_Command_Full *full_command_packet;
1817 TW_Command_Apache *command_packet;
1818 u32 num_sectors = 0x0;
1819 int i, sg_count;
1820 struct scsi_cmnd *srb = NULL;
1821 struct scatterlist *sglist = NULL, *sg;
1822 int retval = 1;
1824 if (tw_dev->srb[request_id]) {
1825 srb = tw_dev->srb[request_id];
1826 if (scsi_sglist(srb))
1827 sglist = scsi_sglist(srb);
1830 /* Initialize command packet */
1831 full_command_packet = tw_dev->command_packet_virt[request_id];
1832 full_command_packet->header.header_desc.size_header = 128;
1833 full_command_packet->header.status_block.error = 0;
1834 full_command_packet->header.status_block.severity__reserved = 0;
1836 command_packet = &full_command_packet->command.newcommand;
1837 command_packet->status = 0;
1838 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1840 /* We forced 16 byte cdb use earlier */
1841 if (!cdb)
1842 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1843 else
1844 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1846 if (srb) {
1847 command_packet->unit = srb->device->id;
1848 command_packet->request_id__lunl =
1849 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1850 } else {
1851 command_packet->request_id__lunl =
1852 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1853 command_packet->unit = 0;
1856 command_packet->sgl_offset = 16;
1858 if (!sglistarg) {
1859 /* Map sglist from scsi layer to cmd packet */
1861 if (scsi_sg_count(srb)) {
1862 if ((scsi_sg_count(srb) == 1) &&
1863 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1864 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1865 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1866 scsi_sg_copy_to_buffer(srb,
1867 tw_dev->generic_buffer_virt[request_id],
1868 TW_SECTOR_SIZE);
1869 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1870 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1871 } else {
1872 sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1873 if (sg_count == 0)
1874 goto out;
1876 scsi_for_each_sg(srb, sg, sg_count, i) {
1877 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1878 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1879 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1880 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1881 goto out;
1885 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1887 } else {
1888 /* Internal cdb post */
1889 for (i = 0; i < use_sg; i++) {
1890 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1891 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1892 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1893 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1894 goto out;
1897 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1900 if (srb) {
1901 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1902 num_sectors = (u32)srb->cmnd[4];
1904 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1905 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1908 /* Update sector statistic */
1909 tw_dev->sector_count = num_sectors;
1910 if (tw_dev->sector_count > tw_dev->max_sector_count)
1911 tw_dev->max_sector_count = tw_dev->sector_count;
1913 /* Update SG statistics */
1914 if (srb) {
1915 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1916 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1917 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1920 /* Now post the command to the board */
1921 if (srb) {
1922 retval = twa_post_command_packet(tw_dev, request_id, 0);
1923 } else {
1924 twa_post_command_packet(tw_dev, request_id, 1);
1925 retval = 0;
1927 out:
1928 return retval;
1929 } /* End twa_scsiop_execute_scsi() */
1931 /* This function completes an execute scsi operation */
1932 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1934 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1936 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1937 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1938 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1939 if (scsi_sg_count(cmd) == 1) {
1940 void *buf = tw_dev->generic_buffer_virt[request_id];
1942 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1945 } /* End twa_scsiop_execute_scsi_complete() */
1947 /* This function tells the controller to shut down */
1948 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1950 /* Disable interrupts */
1951 TW_DISABLE_INTERRUPTS(tw_dev);
1953 /* Free up the IRQ */
1954 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1956 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1958 /* Tell the card we are shutting down */
1959 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1960 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1961 } else {
1962 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1965 /* Clear all interrupts just before exit */
1966 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1967 } /* End __twa_shutdown() */
1969 /* Wrapper for __twa_shutdown */
1970 static void twa_shutdown(struct pci_dev *pdev)
1972 struct Scsi_Host *host = pci_get_drvdata(pdev);
1973 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1975 __twa_shutdown(tw_dev);
1976 } /* End twa_shutdown() */
1978 /* This function will look up a string */
1979 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1981 int index;
1983 for (index = 0; ((code != table[index].code) &&
1984 (table[index].text != (char *)0)); index++);
1985 return(table[index].text);
1986 } /* End twa_string_lookup() */
1988 /* This function will perform a pci-dma unmap */
1989 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1991 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1993 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1994 scsi_dma_unmap(cmd);
1995 } /* End twa_unmap_scsi_data() */
1997 /* scsi_host_template initializer */
1998 static struct scsi_host_template driver_template = {
1999 .module = THIS_MODULE,
2000 .name = "3ware 9000 Storage Controller",
2001 .queuecommand = twa_scsi_queue,
2002 .eh_host_reset_handler = twa_scsi_eh_reset,
2003 .bios_param = twa_scsi_biosparam,
2004 .change_queue_depth = twa_change_queue_depth,
2005 .can_queue = TW_Q_LENGTH-2,
2006 .this_id = -1,
2007 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2008 .max_sectors = TW_MAX_SECTORS,
2009 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2010 .use_clustering = ENABLE_CLUSTERING,
2011 .shost_attrs = twa_host_attrs,
2012 .emulated = 1
2015 /* This function will probe and initialize a card */
2016 static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2018 struct Scsi_Host *host = NULL;
2019 TW_Device_Extension *tw_dev;
2020 unsigned long mem_addr, mem_len;
2021 int retval = -ENODEV;
2023 retval = pci_enable_device(pdev);
2024 if (retval) {
2025 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2026 goto out_disable_device;
2029 pci_set_master(pdev);
2030 pci_try_set_mwi(pdev);
2032 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2033 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2034 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2035 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2036 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2037 retval = -ENODEV;
2038 goto out_disable_device;
2041 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2042 if (!host) {
2043 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2044 retval = -ENOMEM;
2045 goto out_disable_device;
2047 tw_dev = (TW_Device_Extension *)host->hostdata;
2049 /* Save values to device extension */
2050 tw_dev->host = host;
2051 tw_dev->tw_pci_dev = pdev;
2053 if (twa_initialize_device_extension(tw_dev)) {
2054 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2055 goto out_free_device_extension;
2058 /* Request IO regions */
2059 retval = pci_request_regions(pdev, "3w-9xxx");
2060 if (retval) {
2061 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2062 goto out_free_device_extension;
2065 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2066 mem_addr = pci_resource_start(pdev, 1);
2067 mem_len = pci_resource_len(pdev, 1);
2068 } else {
2069 mem_addr = pci_resource_start(pdev, 2);
2070 mem_len = pci_resource_len(pdev, 2);
2073 /* Save base address */
2074 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2075 if (!tw_dev->base_addr) {
2076 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2077 goto out_release_mem_region;
2080 /* Disable interrupts on the card */
2081 TW_DISABLE_INTERRUPTS(tw_dev);
2083 /* Initialize the card */
2084 if (twa_reset_sequence(tw_dev, 0))
2085 goto out_iounmap;
2087 /* Set host specific parameters */
2088 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2089 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2090 host->max_id = TW_MAX_UNITS_9650SE;
2091 else
2092 host->max_id = TW_MAX_UNITS;
2094 host->max_cmd_len = TW_MAX_CDB_LEN;
2096 /* Channels aren't supported by adapter */
2097 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2098 host->max_channel = 0;
2100 /* Register the card with the kernel SCSI layer */
2101 retval = scsi_add_host(host, &pdev->dev);
2102 if (retval) {
2103 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2104 goto out_iounmap;
2107 pci_set_drvdata(pdev, host);
2109 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2110 host->host_no, mem_addr, pdev->irq);
2111 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2112 host->host_no,
2113 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2114 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2115 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2116 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2117 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2118 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2120 /* Try to enable MSI */
2121 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2122 !pci_enable_msi(pdev))
2123 set_bit(TW_USING_MSI, &tw_dev->flags);
2125 /* Now setup the interrupt handler */
2126 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2127 if (retval) {
2128 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2129 goto out_remove_host;
2132 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2133 twa_device_extension_count++;
2135 /* Re-enable interrupts on the card */
2136 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2138 /* Finally, scan the host */
2139 scsi_scan_host(host);
2141 if (twa_major == -1) {
2142 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2143 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2145 return 0;
2147 out_remove_host:
2148 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2149 pci_disable_msi(pdev);
2150 scsi_remove_host(host);
2151 out_iounmap:
2152 iounmap(tw_dev->base_addr);
2153 out_release_mem_region:
2154 pci_release_regions(pdev);
2155 out_free_device_extension:
2156 twa_free_device_extension(tw_dev);
2157 scsi_host_put(host);
2158 out_disable_device:
2159 pci_disable_device(pdev);
2161 return retval;
2162 } /* End twa_probe() */
2164 /* This function is called to remove a device */
2165 static void twa_remove(struct pci_dev *pdev)
2167 struct Scsi_Host *host = pci_get_drvdata(pdev);
2168 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2170 scsi_remove_host(tw_dev->host);
2172 /* Unregister character device */
2173 if (twa_major >= 0) {
2174 unregister_chrdev(twa_major, "twa");
2175 twa_major = -1;
2178 /* Shutdown the card */
2179 __twa_shutdown(tw_dev);
2181 /* Disable MSI if enabled */
2182 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2183 pci_disable_msi(pdev);
2185 /* Free IO remapping */
2186 iounmap(tw_dev->base_addr);
2188 /* Free up the mem region */
2189 pci_release_regions(pdev);
2191 /* Free up device extension resources */
2192 twa_free_device_extension(tw_dev);
2194 scsi_host_put(tw_dev->host);
2195 pci_disable_device(pdev);
2196 twa_device_extension_count--;
2197 } /* End twa_remove() */
2199 #ifdef CONFIG_PM
2200 /* This function is called on PCI suspend */
2201 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2203 struct Scsi_Host *host = pci_get_drvdata(pdev);
2204 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2206 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2208 TW_DISABLE_INTERRUPTS(tw_dev);
2209 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2211 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2212 pci_disable_msi(pdev);
2214 /* Tell the card we are shutting down */
2215 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2216 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2217 } else {
2218 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2220 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2222 pci_save_state(pdev);
2223 pci_disable_device(pdev);
2224 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2226 return 0;
2227 } /* End twa_suspend() */
2229 /* This function is called on PCI resume */
2230 static int twa_resume(struct pci_dev *pdev)
2232 int retval = 0;
2233 struct Scsi_Host *host = pci_get_drvdata(pdev);
2234 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2236 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2237 pci_set_power_state(pdev, PCI_D0);
2238 pci_enable_wake(pdev, PCI_D0, 0);
2239 pci_restore_state(pdev);
2241 retval = pci_enable_device(pdev);
2242 if (retval) {
2243 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2244 return retval;
2247 pci_set_master(pdev);
2248 pci_try_set_mwi(pdev);
2250 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2251 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2252 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2253 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2254 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2255 retval = -ENODEV;
2256 goto out_disable_device;
2259 /* Initialize the card */
2260 if (twa_reset_sequence(tw_dev, 0)) {
2261 retval = -ENODEV;
2262 goto out_disable_device;
2265 /* Now setup the interrupt handler */
2266 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2267 if (retval) {
2268 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2269 retval = -ENODEV;
2270 goto out_disable_device;
2273 /* Now enable MSI if enabled */
2274 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2275 pci_enable_msi(pdev);
2277 /* Re-enable interrupts on the card */
2278 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2280 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2281 return 0;
2283 out_disable_device:
2284 scsi_remove_host(host);
2285 pci_disable_device(pdev);
2287 return retval;
2288 } /* End twa_resume() */
2289 #endif
2291 /* PCI Devices supported by this driver */
2292 static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2293 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2294 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2295 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2296 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2297 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2298 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2299 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2300 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2303 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2305 /* pci_driver initializer */
2306 static struct pci_driver twa_driver = {
2307 .name = "3w-9xxx",
2308 .id_table = twa_pci_tbl,
2309 .probe = twa_probe,
2310 .remove = twa_remove,
2311 #ifdef CONFIG_PM
2312 .suspend = twa_suspend,
2313 .resume = twa_resume,
2314 #endif
2315 .shutdown = twa_shutdown
2318 /* This function is called on driver initialization */
2319 static int __init twa_init(void)
2321 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2323 return pci_register_driver(&twa_driver);
2324 } /* End twa_init() */
2326 /* This function is called on driver exit */
2327 static void __exit twa_exit(void)
2329 pci_unregister_driver(&twa_driver);
2330 } /* End twa_exit() */
2332 module_init(twa_init);
2333 module_exit(twa_exit);