net/mlx5e: Make function mlx5e_change_rep_mtu() static
[linux-2.6/btrfs-unstable.git] / drivers / scsi / 3w-9xxx.c
blobb42c9c479d4ba039367b6006383fa340dfd404e9
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Tom Couch
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 Bugs/Comments/Suggestions should be mailed to:
44 aradford@gmail.com
46 Note: This version of the driver does not contain a bundled firmware
47 image.
49 History
50 -------
51 2.26.02.000 - Driver cleanup for kernel submission.
52 2.26.02.001 - Replace schedule_timeout() calls with msleep().
53 2.26.02.002 - Add support for PAE mode.
54 Add lun support.
55 Fix twa_remove() to free irq handler/unregister_chrdev()
56 before shutting down card.
57 Change to new 'change_queue_depth' api.
58 Fix 'handled=1' ISR usage, remove bogus IRQ check.
59 Remove un-needed eh_abort handler.
60 Add support for embedded firmware error strings.
61 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62 2.26.02.004 - Add support for 9550SX controllers.
63 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64 2.26.02.006 - Fix 9550SX pchip reset timeout.
65 Add big endian support.
66 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
67 2.26.02.008 - Free irq handler in __twa_shutdown().
68 Serialize reset code.
69 Add support for 9650SE controllers.
70 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
71 2.26.02.010 - Add support for 9690SA controllers.
72 2.26.02.011 - Increase max AENs drained to 256.
73 Add MSI support and "use_msi" module parameter.
74 Fix bug in twa_get_param() on 4GB+.
75 Use pci_resource_len() for ioremap().
76 2.26.02.012 - Add power management support.
77 2.26.02.013 - Fix bug in twa_load_sgl().
78 2.26.02.014 - Force 60 second timeout default.
81 #include <linux/module.h>
82 #include <linux/reboot.h>
83 #include <linux/spinlock.h>
84 #include <linux/interrupt.h>
85 #include <linux/moduleparam.h>
86 #include <linux/errno.h>
87 #include <linux/types.h>
88 #include <linux/delay.h>
89 #include <linux/pci.h>
90 #include <linux/time.h>
91 #include <linux/mutex.h>
92 #include <linux/slab.h>
93 #include <asm/io.h>
94 #include <asm/irq.h>
95 #include <linux/uaccess.h>
96 #include <scsi/scsi.h>
97 #include <scsi/scsi_host.h>
98 #include <scsi/scsi_tcq.h>
99 #include <scsi/scsi_cmnd.h>
100 #include "3w-9xxx.h"
102 /* Globals */
103 #define TW_DRIVER_VERSION "2.26.02.014"
104 static DEFINE_MUTEX(twa_chrdev_mutex);
105 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106 static unsigned int twa_device_extension_count;
107 static int twa_major = -1;
108 extern struct timezone sys_tz;
110 /* Module parameters */
111 MODULE_AUTHOR ("LSI");
112 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113 MODULE_LICENSE("GPL");
114 MODULE_VERSION(TW_DRIVER_VERSION);
116 static int use_msi = 0;
117 module_param(use_msi, int, S_IRUGO);
118 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
120 /* Function prototypes */
121 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123 static char *twa_aen_severity_lookup(unsigned char severity_code);
124 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126 static int twa_chrdev_open(struct inode *inode, struct file *file);
127 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131 u32 set_features, unsigned short current_fw_srl,
132 unsigned short current_fw_arch_id,
133 unsigned short current_fw_branch,
134 unsigned short current_fw_build,
135 unsigned short *fw_on_ctlr_srl,
136 unsigned short *fw_on_ctlr_arch_id,
137 unsigned short *fw_on_ctlr_branch,
138 unsigned short *fw_on_ctlr_build,
139 u32 *init_connect_result);
140 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
147 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
148 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
150 /* Functions */
152 /* Show some statistics about the card */
153 static ssize_t twa_show_stats(struct device *dev,
154 struct device_attribute *attr, char *buf)
156 struct Scsi_Host *host = class_to_shost(dev);
157 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
158 unsigned long flags = 0;
159 ssize_t len;
161 spin_lock_irqsave(tw_dev->host->host_lock, flags);
162 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
163 "Current commands posted: %4d\n"
164 "Max commands posted: %4d\n"
165 "Current pending commands: %4d\n"
166 "Max pending commands: %4d\n"
167 "Last sgl length: %4d\n"
168 "Max sgl length: %4d\n"
169 "Last sector count: %4d\n"
170 "Max sector count: %4d\n"
171 "SCSI Host Resets: %4d\n"
172 "AEN's: %4d\n",
173 TW_DRIVER_VERSION,
174 tw_dev->posted_request_count,
175 tw_dev->max_posted_request_count,
176 tw_dev->pending_request_count,
177 tw_dev->max_pending_request_count,
178 tw_dev->sgl_entries,
179 tw_dev->max_sgl_entries,
180 tw_dev->sector_count,
181 tw_dev->max_sector_count,
182 tw_dev->num_resets,
183 tw_dev->aen_count);
184 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
185 return len;
186 } /* End twa_show_stats() */
188 /* Create sysfs 'stats' entry */
189 static struct device_attribute twa_host_stats_attr = {
190 .attr = {
191 .name = "stats",
192 .mode = S_IRUGO,
194 .show = twa_show_stats
197 /* Host attributes initializer */
198 static struct device_attribute *twa_host_attrs[] = {
199 &twa_host_stats_attr,
200 NULL,
203 /* File operations struct for character device */
204 static const struct file_operations twa_fops = {
205 .owner = THIS_MODULE,
206 .unlocked_ioctl = twa_chrdev_ioctl,
207 .open = twa_chrdev_open,
208 .release = NULL,
209 .llseek = noop_llseek,
213 * The controllers use an inline buffer instead of a mapped SGL for small,
214 * single entry buffers. Note that we treat a zero-length transfer like
215 * a mapped SGL.
217 static bool twa_command_mapped(struct scsi_cmnd *cmd)
219 return scsi_sg_count(cmd) != 1 ||
220 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
223 /* This function will complete an aen request from the isr */
224 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
226 TW_Command_Full *full_command_packet;
227 TW_Command *command_packet;
228 TW_Command_Apache_Header *header;
229 unsigned short aen;
230 int retval = 1;
232 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
233 tw_dev->posted_request_count--;
234 aen = le16_to_cpu(header->status_block.error);
235 full_command_packet = tw_dev->command_packet_virt[request_id];
236 command_packet = &full_command_packet->command.oldcommand;
238 /* First check for internal completion of set param for time sync */
239 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
240 /* Keep reading the queue in case there are more aen's */
241 if (twa_aen_read_queue(tw_dev, request_id))
242 goto out2;
243 else {
244 retval = 0;
245 goto out;
249 switch (aen) {
250 case TW_AEN_QUEUE_EMPTY:
251 /* Quit reading the queue if this is the last one */
252 break;
253 case TW_AEN_SYNC_TIME_WITH_HOST:
254 twa_aen_sync_time(tw_dev, request_id);
255 retval = 0;
256 goto out;
257 default:
258 twa_aen_queue_event(tw_dev, header);
260 /* If there are more aen's, keep reading the queue */
261 if (twa_aen_read_queue(tw_dev, request_id))
262 goto out2;
263 else {
264 retval = 0;
265 goto out;
268 retval = 0;
269 out2:
270 tw_dev->state[request_id] = TW_S_COMPLETED;
271 twa_free_request_id(tw_dev, request_id);
272 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
273 out:
274 return retval;
275 } /* End twa_aen_complete() */
277 /* This function will drain aen queue */
278 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
280 int request_id = 0;
281 char cdb[TW_MAX_CDB_LEN];
282 TW_SG_Entry sglist[1];
283 int finished = 0, count = 0;
284 TW_Command_Full *full_command_packet;
285 TW_Command_Apache_Header *header;
286 unsigned short aen;
287 int first_reset = 0, queue = 0, retval = 1;
289 if (no_check_reset)
290 first_reset = 0;
291 else
292 first_reset = 1;
294 full_command_packet = tw_dev->command_packet_virt[request_id];
295 memset(full_command_packet, 0, sizeof(TW_Command_Full));
297 /* Initialize cdb */
298 memset(&cdb, 0, TW_MAX_CDB_LEN);
299 cdb[0] = REQUEST_SENSE; /* opcode */
300 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
302 /* Initialize sglist */
303 memset(&sglist, 0, sizeof(TW_SG_Entry));
304 sglist[0].length = TW_SECTOR_SIZE;
305 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
307 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
309 goto out;
312 /* Mark internal command */
313 tw_dev->srb[request_id] = NULL;
315 do {
316 /* Send command to the board */
317 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
318 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
319 goto out;
322 /* Now poll for completion */
323 if (twa_poll_response(tw_dev, request_id, 30)) {
324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
325 tw_dev->posted_request_count--;
326 goto out;
329 tw_dev->posted_request_count--;
330 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
331 aen = le16_to_cpu(header->status_block.error);
332 queue = 0;
333 count++;
335 switch (aen) {
336 case TW_AEN_QUEUE_EMPTY:
337 if (first_reset != 1)
338 goto out;
339 else
340 finished = 1;
341 break;
342 case TW_AEN_SOFT_RESET:
343 if (first_reset == 0)
344 first_reset = 1;
345 else
346 queue = 1;
347 break;
348 case TW_AEN_SYNC_TIME_WITH_HOST:
349 break;
350 default:
351 queue = 1;
354 /* Now queue an event info */
355 if (queue)
356 twa_aen_queue_event(tw_dev, header);
357 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
359 if (count == TW_MAX_AEN_DRAIN)
360 goto out;
362 retval = 0;
363 out:
364 tw_dev->state[request_id] = TW_S_INITIAL;
365 return retval;
366 } /* End twa_aen_drain_queue() */
368 /* This function will queue an event */
369 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
371 u32 local_time;
372 TW_Event *event;
373 unsigned short aen;
374 char host[16];
375 char *error_str;
377 tw_dev->aen_count++;
379 /* Fill out event info */
380 event = tw_dev->event_queue[tw_dev->error_index];
382 /* Check for clobber */
383 host[0] = '\0';
384 if (tw_dev->host) {
385 sprintf(host, " scsi%d:", tw_dev->host->host_no);
386 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
387 tw_dev->aen_clobber = 1;
390 aen = le16_to_cpu(header->status_block.error);
391 memset(event, 0, sizeof(TW_Event));
393 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
394 /* event->time_stamp_sec overflows in y2106 */
395 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
396 event->time_stamp_sec = local_time;
397 event->aen_code = aen;
398 event->retrieved = TW_AEN_NOT_RETRIEVED;
399 event->sequence_id = tw_dev->error_sequence_id;
400 tw_dev->error_sequence_id++;
402 /* Check for embedded error string */
403 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
405 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
406 event->parameter_len = strlen(header->err_specific_desc);
407 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
408 if (event->severity != TW_AEN_SEVERITY_DEBUG)
409 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
410 host,
411 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
412 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
413 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
414 header->err_specific_desc);
415 else
416 tw_dev->aen_count--;
418 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
419 tw_dev->event_queue_wrapped = 1;
420 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
421 } /* End twa_aen_queue_event() */
423 /* This function will read the aen queue from the isr */
424 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
426 char cdb[TW_MAX_CDB_LEN];
427 TW_SG_Entry sglist[1];
428 TW_Command_Full *full_command_packet;
429 int retval = 1;
431 full_command_packet = tw_dev->command_packet_virt[request_id];
432 memset(full_command_packet, 0, sizeof(TW_Command_Full));
434 /* Initialize cdb */
435 memset(&cdb, 0, TW_MAX_CDB_LEN);
436 cdb[0] = REQUEST_SENSE; /* opcode */
437 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
439 /* Initialize sglist */
440 memset(&sglist, 0, sizeof(TW_SG_Entry));
441 sglist[0].length = TW_SECTOR_SIZE;
442 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
444 /* Mark internal command */
445 tw_dev->srb[request_id] = NULL;
447 /* Now post the command packet */
448 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
449 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
450 goto out;
452 retval = 0;
453 out:
454 return retval;
455 } /* End twa_aen_read_queue() */
457 /* This function will look up an AEN severity string */
458 static char *twa_aen_severity_lookup(unsigned char severity_code)
460 char *retval = NULL;
462 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
463 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
464 goto out;
466 retval = twa_aen_severity_table[severity_code];
467 out:
468 return retval;
469 } /* End twa_aen_severity_lookup() */
471 /* This function will sync firmware time with the host time */
472 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
474 u32 schedulertime;
475 TW_Command_Full *full_command_packet;
476 TW_Command *command_packet;
477 TW_Param_Apache *param;
478 time64_t local_time;
480 /* Fill out the command packet */
481 full_command_packet = tw_dev->command_packet_virt[request_id];
482 memset(full_command_packet, 0, sizeof(TW_Command_Full));
483 command_packet = &full_command_packet->command.oldcommand;
484 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
485 command_packet->request_id = request_id;
486 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
487 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
488 command_packet->size = TW_COMMAND_SIZE;
489 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
491 /* Setup the param */
492 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
493 memset(param, 0, TW_SECTOR_SIZE);
494 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
495 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
496 param->parameter_size_bytes = cpu_to_le16(4);
498 /* Convert system time in UTC to local time seconds since last
499 Sunday 12:00AM */
500 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
501 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
502 schedulertime = cpu_to_le32(schedulertime % 604800);
504 memcpy(param->data, &schedulertime, sizeof(u32));
506 /* Mark internal command */
507 tw_dev->srb[request_id] = NULL;
509 /* Now post the command */
510 twa_post_command_packet(tw_dev, request_id, 1);
511 } /* End twa_aen_sync_time() */
513 /* This function will allocate memory and check if it is correctly aligned */
514 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
516 int i;
517 dma_addr_t dma_handle;
518 unsigned long *cpu_addr;
519 int retval = 1;
521 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
522 if (!cpu_addr) {
523 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
524 goto out;
527 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
528 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
529 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
530 goto out;
533 memset(cpu_addr, 0, size*TW_Q_LENGTH);
535 for (i = 0; i < TW_Q_LENGTH; i++) {
536 switch(which) {
537 case 0:
538 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
539 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
540 break;
541 case 1:
542 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
543 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
544 break;
547 retval = 0;
548 out:
549 return retval;
550 } /* End twa_allocate_memory() */
552 /* This function will check the status register for unexpected bits */
553 static int twa_check_bits(u32 status_reg_value)
555 int retval = 1;
557 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
558 goto out;
559 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
560 goto out;
562 retval = 0;
563 out:
564 return retval;
565 } /* End twa_check_bits() */
567 /* This function will check the srl and decide if we are compatible */
568 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
570 int retval = 1;
571 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
572 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
573 u32 init_connect_result = 0;
575 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
576 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
577 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
578 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
579 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
580 &fw_on_ctlr_build, &init_connect_result)) {
581 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
582 goto out;
585 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
586 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
587 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
589 /* Try base mode compatibility */
590 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
591 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
592 TW_EXTENDED_INIT_CONNECT,
593 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
594 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
595 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
596 &fw_on_ctlr_branch, &fw_on_ctlr_build,
597 &init_connect_result)) {
598 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
599 goto out;
601 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
602 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
603 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
604 } else {
605 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
607 goto out;
609 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
610 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
611 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
614 /* Load rest of compatibility struct */
615 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
616 sizeof(tw_dev->tw_compat_info.driver_version));
617 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
618 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
619 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
620 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
621 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
622 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
623 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
624 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
625 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
627 retval = 0;
628 out:
629 return retval;
630 } /* End twa_check_srl() */
632 /* This function handles ioctl for the character device */
633 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
635 struct inode *inode = file_inode(file);
636 long timeout;
637 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
638 dma_addr_t dma_handle;
639 int request_id = 0;
640 unsigned int sequence_id = 0;
641 unsigned char event_index, start_index;
642 TW_Ioctl_Driver_Command driver_command;
643 TW_Ioctl_Buf_Apache *tw_ioctl;
644 TW_Lock *tw_lock;
645 TW_Command_Full *full_command_packet;
646 TW_Compatibility_Info *tw_compat_info;
647 TW_Event *event;
648 ktime_t current_time;
649 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
650 int retval = TW_IOCTL_ERROR_OS_EFAULT;
651 void __user *argp = (void __user *)arg;
653 mutex_lock(&twa_chrdev_mutex);
655 /* Only let one of these through at a time */
656 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
657 retval = TW_IOCTL_ERROR_OS_EINTR;
658 goto out;
661 /* First copy down the driver command */
662 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
663 goto out2;
665 /* Check data buffer size */
666 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
667 retval = TW_IOCTL_ERROR_OS_EINVAL;
668 goto out2;
671 /* Hardware can only do multiple of 512 byte transfers */
672 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
674 /* Now allocate ioctl buf memory */
675 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
676 if (!cpu_addr) {
677 retval = TW_IOCTL_ERROR_OS_ENOMEM;
678 goto out2;
681 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
683 /* Now copy down the entire ioctl */
684 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
685 goto out3;
687 /* See which ioctl we are doing */
688 switch (cmd) {
689 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
690 spin_lock_irqsave(tw_dev->host->host_lock, flags);
691 twa_get_request_id(tw_dev, &request_id);
693 /* Flag internal command */
694 tw_dev->srb[request_id] = NULL;
696 /* Flag chrdev ioctl */
697 tw_dev->chrdev_request_id = request_id;
699 full_command_packet = &tw_ioctl->firmware_command;
701 /* Load request id and sglist for both command types */
702 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
704 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
706 /* Now post the command packet to the controller */
707 twa_post_command_packet(tw_dev, request_id, 1);
708 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
710 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
712 /* Now wait for command to complete */
713 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
715 /* We timed out, and didn't get an interrupt */
716 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
717 /* Now we need to reset the board */
718 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
719 tw_dev->host->host_no, TW_DRIVER, 0x37,
720 cmd);
721 retval = TW_IOCTL_ERROR_OS_EIO;
722 twa_reset_device_extension(tw_dev);
723 goto out3;
726 /* Now copy in the command packet response */
727 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
729 /* Now complete the io */
730 spin_lock_irqsave(tw_dev->host->host_lock, flags);
731 tw_dev->posted_request_count--;
732 tw_dev->state[request_id] = TW_S_COMPLETED;
733 twa_free_request_id(tw_dev, request_id);
734 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
735 break;
736 case TW_IOCTL_GET_COMPATIBILITY_INFO:
737 tw_ioctl->driver_command.status = 0;
738 /* Copy compatibility struct into ioctl data buffer */
739 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
740 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
741 break;
742 case TW_IOCTL_GET_LAST_EVENT:
743 if (tw_dev->event_queue_wrapped) {
744 if (tw_dev->aen_clobber) {
745 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
746 tw_dev->aen_clobber = 0;
747 } else
748 tw_ioctl->driver_command.status = 0;
749 } else {
750 if (!tw_dev->error_index) {
751 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
752 break;
754 tw_ioctl->driver_command.status = 0;
756 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
757 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
758 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
759 break;
760 case TW_IOCTL_GET_FIRST_EVENT:
761 if (tw_dev->event_queue_wrapped) {
762 if (tw_dev->aen_clobber) {
763 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
764 tw_dev->aen_clobber = 0;
765 } else
766 tw_ioctl->driver_command.status = 0;
767 event_index = tw_dev->error_index;
768 } else {
769 if (!tw_dev->error_index) {
770 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
771 break;
773 tw_ioctl->driver_command.status = 0;
774 event_index = 0;
776 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
777 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
778 break;
779 case TW_IOCTL_GET_NEXT_EVENT:
780 event = (TW_Event *)tw_ioctl->data_buffer;
781 sequence_id = event->sequence_id;
782 tw_ioctl->driver_command.status = 0;
784 if (tw_dev->event_queue_wrapped) {
785 if (tw_dev->aen_clobber) {
786 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
787 tw_dev->aen_clobber = 0;
789 start_index = tw_dev->error_index;
790 } else {
791 if (!tw_dev->error_index) {
792 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
793 break;
795 start_index = 0;
797 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
799 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
800 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
801 tw_dev->aen_clobber = 1;
802 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
803 break;
805 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
806 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
807 break;
808 case TW_IOCTL_GET_PREVIOUS_EVENT:
809 event = (TW_Event *)tw_ioctl->data_buffer;
810 sequence_id = event->sequence_id;
811 tw_ioctl->driver_command.status = 0;
813 if (tw_dev->event_queue_wrapped) {
814 if (tw_dev->aen_clobber) {
815 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
816 tw_dev->aen_clobber = 0;
818 start_index = tw_dev->error_index;
819 } else {
820 if (!tw_dev->error_index) {
821 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
822 break;
824 start_index = 0;
826 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
828 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
829 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
830 tw_dev->aen_clobber = 1;
831 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
832 break;
834 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
835 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
836 break;
837 case TW_IOCTL_GET_LOCK:
838 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
839 current_time = ktime_get();
841 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
842 ktime_after(current_time, tw_dev->ioctl_time)) {
843 tw_dev->ioctl_sem_lock = 1;
844 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
845 tw_ioctl->driver_command.status = 0;
846 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
847 } else {
848 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
849 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
851 break;
852 case TW_IOCTL_RELEASE_LOCK:
853 if (tw_dev->ioctl_sem_lock == 1) {
854 tw_dev->ioctl_sem_lock = 0;
855 tw_ioctl->driver_command.status = 0;
856 } else {
857 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
859 break;
860 default:
861 retval = TW_IOCTL_ERROR_OS_ENOTTY;
862 goto out3;
865 /* Now copy the entire response to userspace */
866 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
867 retval = 0;
868 out3:
869 /* Now free ioctl buf memory */
870 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
871 out2:
872 mutex_unlock(&tw_dev->ioctl_lock);
873 out:
874 mutex_unlock(&twa_chrdev_mutex);
875 return retval;
876 } /* End twa_chrdev_ioctl() */
878 /* This function handles open for the character device */
879 /* NOTE that this function will race with remove. */
880 static int twa_chrdev_open(struct inode *inode, struct file *file)
882 unsigned int minor_number;
883 int retval = TW_IOCTL_ERROR_OS_ENODEV;
885 minor_number = iminor(inode);
886 if (minor_number >= twa_device_extension_count)
887 goto out;
888 retval = 0;
889 out:
890 return retval;
891 } /* End twa_chrdev_open() */
893 /* This function will print readable messages from status register errors */
894 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
896 int retval = 1;
898 /* Check for various error conditions and handle them appropriately */
899 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
900 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
901 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
904 if (status_reg_value & TW_STATUS_PCI_ABORT) {
905 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
906 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
907 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
910 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
911 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
912 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
913 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
914 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
915 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
918 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
919 if (tw_dev->reset_print == 0) {
920 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
921 tw_dev->reset_print = 1;
923 goto out;
925 retval = 0;
926 out:
927 return retval;
928 } /* End twa_decode_bits() */
930 /* This function will empty the response queue */
931 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
933 u32 status_reg_value, response_que_value;
934 int count = 0, retval = 1;
936 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
938 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
939 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
940 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
941 count++;
943 if (count == TW_MAX_RESPONSE_DRAIN)
944 goto out;
946 retval = 0;
947 out:
948 return retval;
949 } /* End twa_empty_response_queue() */
951 /* This function will clear the pchip/response queue on 9550SX */
952 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
954 u32 response_que_value = 0;
955 unsigned long before;
956 int retval = 1;
958 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
959 before = jiffies;
960 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
961 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
962 msleep(1);
963 if (time_after(jiffies, before + HZ * 30))
964 goto out;
966 /* P-chip settle time */
967 msleep(500);
968 retval = 0;
969 } else
970 retval = 0;
971 out:
972 return retval;
973 } /* End twa_empty_response_queue_large() */
975 /* This function passes sense keys from firmware to scsi layer */
976 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
978 TW_Command_Full *full_command_packet;
979 unsigned short error;
980 int retval = 1;
981 char *error_str;
983 full_command_packet = tw_dev->command_packet_virt[request_id];
985 /* Check for embedded error string */
986 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
988 /* Don't print error for Logical unit not supported during rollcall */
989 error = le16_to_cpu(full_command_packet->header.status_block.error);
990 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
991 if (print_host)
992 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
993 tw_dev->host->host_no,
994 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
995 full_command_packet->header.status_block.error,
996 error_str[0] == '\0' ?
997 twa_string_lookup(twa_error_table,
998 full_command_packet->header.status_block.error) : error_str,
999 full_command_packet->header.err_specific_desc);
1000 else
1001 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1003 full_command_packet->header.status_block.error,
1004 error_str[0] == '\0' ?
1005 twa_string_lookup(twa_error_table,
1006 full_command_packet->header.status_block.error) : error_str,
1007 full_command_packet->header.err_specific_desc);
1010 if (copy_sense) {
1011 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1012 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1013 retval = TW_ISR_DONT_RESULT;
1014 goto out;
1016 retval = 0;
1017 out:
1018 return retval;
1019 } /* End twa_fill_sense() */
1021 /* This function will free up device extension resources */
1022 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1024 if (tw_dev->command_packet_virt[0])
1025 pci_free_consistent(tw_dev->tw_pci_dev,
1026 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1027 tw_dev->command_packet_virt[0],
1028 tw_dev->command_packet_phys[0]);
1030 if (tw_dev->generic_buffer_virt[0])
1031 pci_free_consistent(tw_dev->tw_pci_dev,
1032 TW_SECTOR_SIZE*TW_Q_LENGTH,
1033 tw_dev->generic_buffer_virt[0],
1034 tw_dev->generic_buffer_phys[0]);
1036 kfree(tw_dev->event_queue[0]);
1037 } /* End twa_free_device_extension() */
1039 /* This function will free a request id */
1040 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1042 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1043 tw_dev->state[request_id] = TW_S_FINISHED;
1044 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1045 } /* End twa_free_request_id() */
1047 /* This function will get parameter table entries from the firmware */
1048 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1050 TW_Command_Full *full_command_packet;
1051 TW_Command *command_packet;
1052 TW_Param_Apache *param;
1053 void *retval = NULL;
1055 /* Setup the command packet */
1056 full_command_packet = tw_dev->command_packet_virt[request_id];
1057 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1058 command_packet = &full_command_packet->command.oldcommand;
1060 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1061 command_packet->size = TW_COMMAND_SIZE;
1062 command_packet->request_id = request_id;
1063 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1065 /* Now setup the param */
1066 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1067 memset(param, 0, TW_SECTOR_SIZE);
1068 param->table_id = cpu_to_le16(table_id | 0x8000);
1069 param->parameter_id = cpu_to_le16(parameter_id);
1070 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1072 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1073 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1075 /* Post the command packet to the board */
1076 twa_post_command_packet(tw_dev, request_id, 1);
1078 /* Poll for completion */
1079 if (twa_poll_response(tw_dev, request_id, 30))
1080 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1081 else
1082 retval = (void *)&(param->data[0]);
1084 tw_dev->posted_request_count--;
1085 tw_dev->state[request_id] = TW_S_INITIAL;
1087 return retval;
1088 } /* End twa_get_param() */
1090 /* This function will assign an available request id */
1091 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1093 *request_id = tw_dev->free_queue[tw_dev->free_head];
1094 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1095 tw_dev->state[*request_id] = TW_S_STARTED;
1096 } /* End twa_get_request_id() */
1098 /* This function will send an initconnection command to controller */
1099 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1100 u32 set_features, unsigned short current_fw_srl,
1101 unsigned short current_fw_arch_id,
1102 unsigned short current_fw_branch,
1103 unsigned short current_fw_build,
1104 unsigned short *fw_on_ctlr_srl,
1105 unsigned short *fw_on_ctlr_arch_id,
1106 unsigned short *fw_on_ctlr_branch,
1107 unsigned short *fw_on_ctlr_build,
1108 u32 *init_connect_result)
1110 TW_Command_Full *full_command_packet;
1111 TW_Initconnect *tw_initconnect;
1112 int request_id = 0, retval = 1;
1114 /* Initialize InitConnection command packet */
1115 full_command_packet = tw_dev->command_packet_virt[request_id];
1116 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1117 full_command_packet->header.header_desc.size_header = 128;
1119 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1120 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1121 tw_initconnect->request_id = request_id;
1122 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1123 tw_initconnect->features = set_features;
1125 /* Turn on 64-bit sgl support if we need to */
1126 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1128 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1130 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1131 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1132 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1133 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1134 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1135 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1136 } else
1137 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1139 /* Send command packet to the board */
1140 twa_post_command_packet(tw_dev, request_id, 1);
1142 /* Poll for completion */
1143 if (twa_poll_response(tw_dev, request_id, 30)) {
1144 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1145 } else {
1146 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1147 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1148 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1149 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1150 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1151 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1153 retval = 0;
1156 tw_dev->posted_request_count--;
1157 tw_dev->state[request_id] = TW_S_INITIAL;
1159 return retval;
1160 } /* End twa_initconnection() */
1162 /* This function will initialize the fields of a device extension */
1163 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1165 int i, retval = 1;
1167 /* Initialize command packet buffers */
1168 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1169 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1170 goto out;
1173 /* Initialize generic buffer */
1174 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1175 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1176 goto out;
1179 /* Allocate event info space */
1180 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1181 if (!tw_dev->event_queue[0]) {
1182 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1183 goto out;
1187 for (i = 0; i < TW_Q_LENGTH; i++) {
1188 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1189 tw_dev->free_queue[i] = i;
1190 tw_dev->state[i] = TW_S_INITIAL;
1193 tw_dev->pending_head = TW_Q_START;
1194 tw_dev->pending_tail = TW_Q_START;
1195 tw_dev->free_head = TW_Q_START;
1196 tw_dev->free_tail = TW_Q_START;
1197 tw_dev->error_sequence_id = 1;
1198 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1200 mutex_init(&tw_dev->ioctl_lock);
1201 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1203 retval = 0;
1204 out:
1205 return retval;
1206 } /* End twa_initialize_device_extension() */
1208 /* This function is the interrupt service routine */
1209 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1211 int request_id, error = 0;
1212 u32 status_reg_value;
1213 TW_Response_Queue response_que;
1214 TW_Command_Full *full_command_packet;
1215 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1216 int handled = 0;
1218 /* Get the per adapter lock */
1219 spin_lock(tw_dev->host->host_lock);
1221 /* Read the registers */
1222 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1224 /* Check if this is our interrupt, otherwise bail */
1225 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1226 goto twa_interrupt_bail;
1228 handled = 1;
1230 /* If we are resetting, bail */
1231 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1232 goto twa_interrupt_bail;
1234 /* Check controller for errors */
1235 if (twa_check_bits(status_reg_value)) {
1236 if (twa_decode_bits(tw_dev, status_reg_value)) {
1237 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1238 goto twa_interrupt_bail;
1242 /* Handle host interrupt */
1243 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1244 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1246 /* Handle attention interrupt */
1247 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1248 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1249 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1250 twa_get_request_id(tw_dev, &request_id);
1252 error = twa_aen_read_queue(tw_dev, request_id);
1253 if (error) {
1254 tw_dev->state[request_id] = TW_S_COMPLETED;
1255 twa_free_request_id(tw_dev, request_id);
1256 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1261 /* Handle command interrupt */
1262 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1263 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1264 /* Drain as many pending commands as we can */
1265 while (tw_dev->pending_request_count > 0) {
1266 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1267 if (tw_dev->state[request_id] != TW_S_PENDING) {
1268 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1269 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1270 goto twa_interrupt_bail;
1272 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1273 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1274 tw_dev->pending_request_count--;
1275 } else {
1276 /* If we get here, we will continue re-posting on the next command interrupt */
1277 break;
1282 /* Handle response interrupt */
1283 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1285 /* Drain the response queue from the board */
1286 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1287 /* Complete the response */
1288 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1289 request_id = TW_RESID_OUT(response_que.response_id);
1290 full_command_packet = tw_dev->command_packet_virt[request_id];
1291 error = 0;
1292 /* Check for command packet errors */
1293 if (full_command_packet->command.newcommand.status != 0) {
1294 if (tw_dev->srb[request_id] != NULL) {
1295 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1296 } else {
1297 /* Skip ioctl error prints */
1298 if (request_id != tw_dev->chrdev_request_id) {
1299 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1304 /* Check for correct state */
1305 if (tw_dev->state[request_id] != TW_S_POSTED) {
1306 if (tw_dev->srb[request_id] != NULL) {
1307 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1308 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1309 goto twa_interrupt_bail;
1313 /* Check for internal command completion */
1314 if (tw_dev->srb[request_id] == NULL) {
1315 if (request_id != tw_dev->chrdev_request_id) {
1316 if (twa_aen_complete(tw_dev, request_id))
1317 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1318 } else {
1319 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1320 wake_up(&tw_dev->ioctl_wqueue);
1322 } else {
1323 struct scsi_cmnd *cmd;
1325 cmd = tw_dev->srb[request_id];
1327 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1328 /* If no error command was a success */
1329 if (error == 0) {
1330 cmd->result = (DID_OK << 16);
1333 /* If error, command failed */
1334 if (error == 1) {
1335 /* Ask for a host reset */
1336 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1339 /* Report residual bytes for single sgl */
1340 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1341 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1342 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1345 /* Now complete the io */
1346 if (twa_command_mapped(cmd))
1347 scsi_dma_unmap(cmd);
1348 cmd->scsi_done(cmd);
1349 tw_dev->state[request_id] = TW_S_COMPLETED;
1350 twa_free_request_id(tw_dev, request_id);
1351 tw_dev->posted_request_count--;
1354 /* Check for valid status after each drain */
1355 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1356 if (twa_check_bits(status_reg_value)) {
1357 if (twa_decode_bits(tw_dev, status_reg_value)) {
1358 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1359 goto twa_interrupt_bail;
1365 twa_interrupt_bail:
1366 spin_unlock(tw_dev->host->host_lock);
1367 return IRQ_RETVAL(handled);
1368 } /* End twa_interrupt() */
1370 /* This function will load the request id and various sgls for ioctls */
1371 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1373 TW_Command *oldcommand;
1374 TW_Command_Apache *newcommand;
1375 TW_SG_Entry *sgl;
1376 unsigned int pae = 0;
1378 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1379 pae = 1;
1381 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1382 newcommand = &full_command_packet->command.newcommand;
1383 newcommand->request_id__lunl =
1384 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1385 if (length) {
1386 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1387 newcommand->sg_list[0].length = cpu_to_le32(length);
1389 newcommand->sgl_entries__lunh =
1390 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1391 } else {
1392 oldcommand = &full_command_packet->command.oldcommand;
1393 oldcommand->request_id = request_id;
1395 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1396 /* Load the sg list */
1397 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1398 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1399 else
1400 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1401 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1402 sgl->length = cpu_to_le32(length);
1404 oldcommand->size += pae;
1407 } /* End twa_load_sgl() */
1409 /* This function will poll for a response interrupt of a request */
1410 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1412 int retval = 1, found = 0, response_request_id;
1413 TW_Response_Queue response_queue;
1414 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1416 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1417 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1418 response_request_id = TW_RESID_OUT(response_queue.response_id);
1419 if (request_id != response_request_id) {
1420 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1421 goto out;
1423 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1424 if (full_command_packet->command.newcommand.status != 0) {
1425 /* bad response */
1426 twa_fill_sense(tw_dev, request_id, 0, 0);
1427 goto out;
1429 found = 1;
1430 } else {
1431 if (full_command_packet->command.oldcommand.status != 0) {
1432 /* bad response */
1433 twa_fill_sense(tw_dev, request_id, 0, 0);
1434 goto out;
1436 found = 1;
1440 if (found)
1441 retval = 0;
1442 out:
1443 return retval;
1444 } /* End twa_poll_response() */
1446 /* This function will poll the status register for a flag */
1447 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1449 u32 status_reg_value;
1450 unsigned long before;
1451 int retval = 1;
1453 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1454 before = jiffies;
1456 if (twa_check_bits(status_reg_value))
1457 twa_decode_bits(tw_dev, status_reg_value);
1459 while ((status_reg_value & flag) != flag) {
1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1462 if (twa_check_bits(status_reg_value))
1463 twa_decode_bits(tw_dev, status_reg_value);
1465 if (time_after(jiffies, before + HZ * seconds))
1466 goto out;
1468 msleep(50);
1470 retval = 0;
1471 out:
1472 return retval;
1473 } /* End twa_poll_status() */
1475 /* This function will poll the status register for disappearance of a flag */
1476 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1478 u32 status_reg_value;
1479 unsigned long before;
1480 int retval = 1;
1482 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1483 before = jiffies;
1485 if (twa_check_bits(status_reg_value))
1486 twa_decode_bits(tw_dev, status_reg_value);
1488 while ((status_reg_value & flag) != 0) {
1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 if (twa_check_bits(status_reg_value))
1491 twa_decode_bits(tw_dev, status_reg_value);
1493 if (time_after(jiffies, before + HZ * seconds))
1494 goto out;
1496 msleep(50);
1498 retval = 0;
1499 out:
1500 return retval;
1501 } /* End twa_poll_status_gone() */
1503 /* This function will attempt to post a command packet to the board */
1504 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1506 u32 status_reg_value;
1507 dma_addr_t command_que_value;
1508 int retval = 1;
1510 command_que_value = tw_dev->command_packet_phys[request_id];
1512 /* For 9650SE write low 4 bytes first */
1513 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1514 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1515 command_que_value += TW_COMMAND_OFFSET;
1516 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1519 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1521 if (twa_check_bits(status_reg_value))
1522 twa_decode_bits(tw_dev, status_reg_value);
1524 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1526 /* Only pend internal driver commands */
1527 if (!internal) {
1528 retval = SCSI_MLQUEUE_HOST_BUSY;
1529 goto out;
1532 /* Couldn't post the command packet, so we do it later */
1533 if (tw_dev->state[request_id] != TW_S_PENDING) {
1534 tw_dev->state[request_id] = TW_S_PENDING;
1535 tw_dev->pending_request_count++;
1536 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1537 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1539 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1540 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1542 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1543 goto out;
1544 } else {
1545 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1546 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1547 /* Now write upper 4 bytes */
1548 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1549 } else {
1550 if (sizeof(dma_addr_t) > 4) {
1551 command_que_value += TW_COMMAND_OFFSET;
1552 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1553 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1554 } else {
1555 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1558 tw_dev->state[request_id] = TW_S_POSTED;
1559 tw_dev->posted_request_count++;
1560 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1561 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1564 retval = 0;
1565 out:
1566 return retval;
1567 } /* End twa_post_command_packet() */
1569 /* This function will reset a device extension */
1570 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1572 int i = 0;
1573 int retval = 1;
1574 unsigned long flags = 0;
1576 set_bit(TW_IN_RESET, &tw_dev->flags);
1577 TW_DISABLE_INTERRUPTS(tw_dev);
1578 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1579 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1581 /* Abort all requests that are in progress */
1582 for (i = 0; i < TW_Q_LENGTH; i++) {
1583 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1584 (tw_dev->state[i] != TW_S_INITIAL) &&
1585 (tw_dev->state[i] != TW_S_COMPLETED)) {
1586 if (tw_dev->srb[i]) {
1587 struct scsi_cmnd *cmd = tw_dev->srb[i];
1589 cmd->result = (DID_RESET << 16);
1590 if (twa_command_mapped(cmd))
1591 scsi_dma_unmap(cmd);
1592 cmd->scsi_done(cmd);
1597 /* Reset queues and counts */
1598 for (i = 0; i < TW_Q_LENGTH; i++) {
1599 tw_dev->free_queue[i] = i;
1600 tw_dev->state[i] = TW_S_INITIAL;
1602 tw_dev->free_head = TW_Q_START;
1603 tw_dev->free_tail = TW_Q_START;
1604 tw_dev->posted_request_count = 0;
1605 tw_dev->pending_request_count = 0;
1606 tw_dev->pending_head = TW_Q_START;
1607 tw_dev->pending_tail = TW_Q_START;
1608 tw_dev->reset_print = 0;
1610 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1612 if (twa_reset_sequence(tw_dev, 1))
1613 goto out;
1615 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1616 clear_bit(TW_IN_RESET, &tw_dev->flags);
1617 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1619 retval = 0;
1620 out:
1621 return retval;
1622 } /* End twa_reset_device_extension() */
1624 /* This function will reset a controller */
1625 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1627 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1629 while (tries < TW_MAX_RESET_TRIES) {
1630 if (do_soft_reset) {
1631 TW_SOFT_RESET(tw_dev);
1632 /* Clear pchip/response queue on 9550SX */
1633 if (twa_empty_response_queue_large(tw_dev)) {
1634 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1635 do_soft_reset = 1;
1636 tries++;
1637 continue;
1641 /* Make sure controller is in a good state */
1642 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1643 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1644 do_soft_reset = 1;
1645 tries++;
1646 continue;
1649 /* Empty response queue */
1650 if (twa_empty_response_queue(tw_dev)) {
1651 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1652 do_soft_reset = 1;
1653 tries++;
1654 continue;
1657 flashed = 0;
1659 /* Check for compatibility/flash */
1660 if (twa_check_srl(tw_dev, &flashed)) {
1661 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1662 do_soft_reset = 1;
1663 tries++;
1664 continue;
1665 } else {
1666 if (flashed) {
1667 tries++;
1668 continue;
1672 /* Drain the AEN queue */
1673 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1674 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1675 do_soft_reset = 1;
1676 tries++;
1677 continue;
1680 /* If we got here, controller is in a good state */
1681 retval = 0;
1682 goto out;
1684 out:
1685 return retval;
1686 } /* End twa_reset_sequence() */
1688 /* This funciton returns unit geometry in cylinders/heads/sectors */
1689 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1691 int heads, sectors, cylinders;
1692 TW_Device_Extension *tw_dev;
1694 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1696 if (capacity >= 0x200000) {
1697 heads = 255;
1698 sectors = 63;
1699 cylinders = sector_div(capacity, heads * sectors);
1700 } else {
1701 heads = 64;
1702 sectors = 32;
1703 cylinders = sector_div(capacity, heads * sectors);
1706 geom[0] = heads;
1707 geom[1] = sectors;
1708 geom[2] = cylinders;
1710 return 0;
1711 } /* End twa_scsi_biosparam() */
1713 /* This is the new scsi eh reset function */
1714 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1716 TW_Device_Extension *tw_dev = NULL;
1717 int retval = FAILED;
1719 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1721 tw_dev->num_resets++;
1723 sdev_printk(KERN_WARNING, SCpnt->device,
1724 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1725 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1727 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1728 mutex_lock(&tw_dev->ioctl_lock);
1730 /* Now reset the card and some of the device extension data */
1731 if (twa_reset_device_extension(tw_dev)) {
1732 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1733 goto out;
1736 retval = SUCCESS;
1737 out:
1738 mutex_unlock(&tw_dev->ioctl_lock);
1739 return retval;
1740 } /* End twa_scsi_eh_reset() */
1742 /* This is the main scsi queue function to handle scsi opcodes */
1743 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1745 int request_id, retval;
1746 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1748 /* If we are resetting due to timed out ioctl, report as busy */
1749 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1750 retval = SCSI_MLQUEUE_HOST_BUSY;
1751 goto out;
1754 /* Check if this FW supports luns */
1755 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1756 SCpnt->result = (DID_BAD_TARGET << 16);
1757 done(SCpnt);
1758 retval = 0;
1759 goto out;
1762 /* Save done function into scsi_cmnd struct */
1763 SCpnt->scsi_done = done;
1765 /* Get a free request id */
1766 twa_get_request_id(tw_dev, &request_id);
1768 /* Save the scsi command for use by the ISR */
1769 tw_dev->srb[request_id] = SCpnt;
1771 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1772 switch (retval) {
1773 case SCSI_MLQUEUE_HOST_BUSY:
1774 if (twa_command_mapped(SCpnt))
1775 scsi_dma_unmap(SCpnt);
1776 twa_free_request_id(tw_dev, request_id);
1777 break;
1778 case 1:
1779 SCpnt->result = (DID_ERROR << 16);
1780 if (twa_command_mapped(SCpnt))
1781 scsi_dma_unmap(SCpnt);
1782 done(SCpnt);
1783 tw_dev->state[request_id] = TW_S_COMPLETED;
1784 twa_free_request_id(tw_dev, request_id);
1785 retval = 0;
1787 out:
1788 return retval;
1789 } /* End twa_scsi_queue() */
1791 static DEF_SCSI_QCMD(twa_scsi_queue)
1793 /* This function hands scsi cdb's to the firmware */
1794 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1796 TW_Command_Full *full_command_packet;
1797 TW_Command_Apache *command_packet;
1798 u32 num_sectors = 0x0;
1799 int i, sg_count;
1800 struct scsi_cmnd *srb = NULL;
1801 struct scatterlist *sglist = NULL, *sg;
1802 int retval = 1;
1804 if (tw_dev->srb[request_id]) {
1805 srb = tw_dev->srb[request_id];
1806 if (scsi_sglist(srb))
1807 sglist = scsi_sglist(srb);
1810 /* Initialize command packet */
1811 full_command_packet = tw_dev->command_packet_virt[request_id];
1812 full_command_packet->header.header_desc.size_header = 128;
1813 full_command_packet->header.status_block.error = 0;
1814 full_command_packet->header.status_block.severity__reserved = 0;
1816 command_packet = &full_command_packet->command.newcommand;
1817 command_packet->status = 0;
1818 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1820 /* We forced 16 byte cdb use earlier */
1821 if (!cdb)
1822 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1823 else
1824 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1826 if (srb) {
1827 command_packet->unit = srb->device->id;
1828 command_packet->request_id__lunl =
1829 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1830 } else {
1831 command_packet->request_id__lunl =
1832 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1833 command_packet->unit = 0;
1836 command_packet->sgl_offset = 16;
1838 if (!sglistarg) {
1839 /* Map sglist from scsi layer to cmd packet */
1841 if (scsi_sg_count(srb)) {
1842 if (!twa_command_mapped(srb)) {
1843 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1844 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1845 scsi_sg_copy_to_buffer(srb,
1846 tw_dev->generic_buffer_virt[request_id],
1847 TW_SECTOR_SIZE);
1848 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1849 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1850 } else {
1851 sg_count = scsi_dma_map(srb);
1852 if (sg_count < 0)
1853 goto out;
1855 scsi_for_each_sg(srb, sg, sg_count, i) {
1856 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1857 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1858 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1859 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1860 goto out;
1864 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1866 } else {
1867 /* Internal cdb post */
1868 for (i = 0; i < use_sg; i++) {
1869 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1870 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1871 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1872 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1873 goto out;
1876 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1879 if (srb) {
1880 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1881 num_sectors = (u32)srb->cmnd[4];
1883 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1884 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1887 /* Update sector statistic */
1888 tw_dev->sector_count = num_sectors;
1889 if (tw_dev->sector_count > tw_dev->max_sector_count)
1890 tw_dev->max_sector_count = tw_dev->sector_count;
1892 /* Update SG statistics */
1893 if (srb) {
1894 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1895 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1896 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1899 /* Now post the command to the board */
1900 if (srb) {
1901 retval = twa_post_command_packet(tw_dev, request_id, 0);
1902 } else {
1903 twa_post_command_packet(tw_dev, request_id, 1);
1904 retval = 0;
1906 out:
1907 return retval;
1908 } /* End twa_scsiop_execute_scsi() */
1910 /* This function completes an execute scsi operation */
1911 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1913 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1915 if (!twa_command_mapped(cmd) &&
1916 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1917 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1918 if (scsi_sg_count(cmd) == 1) {
1919 void *buf = tw_dev->generic_buffer_virt[request_id];
1921 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1924 } /* End twa_scsiop_execute_scsi_complete() */
1926 /* This function tells the controller to shut down */
1927 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1929 /* Disable interrupts */
1930 TW_DISABLE_INTERRUPTS(tw_dev);
1932 /* Free up the IRQ */
1933 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1935 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1937 /* Tell the card we are shutting down */
1938 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1939 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1940 } else {
1941 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1944 /* Clear all interrupts just before exit */
1945 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1946 } /* End __twa_shutdown() */
1948 /* Wrapper for __twa_shutdown */
1949 static void twa_shutdown(struct pci_dev *pdev)
1951 struct Scsi_Host *host = pci_get_drvdata(pdev);
1952 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1954 __twa_shutdown(tw_dev);
1955 } /* End twa_shutdown() */
1957 /* This function will look up a string */
1958 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1960 int index;
1962 for (index = 0; ((code != table[index].code) &&
1963 (table[index].text != (char *)0)); index++);
1964 return(table[index].text);
1965 } /* End twa_string_lookup() */
1967 /* This function gets called when a disk is coming on-line */
1968 static int twa_slave_configure(struct scsi_device *sdev)
1970 /* Force 60 second timeout */
1971 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1973 return 0;
1974 } /* End twa_slave_configure() */
1976 /* scsi_host_template initializer */
1977 static struct scsi_host_template driver_template = {
1978 .module = THIS_MODULE,
1979 .name = "3ware 9000 Storage Controller",
1980 .queuecommand = twa_scsi_queue,
1981 .eh_host_reset_handler = twa_scsi_eh_reset,
1982 .bios_param = twa_scsi_biosparam,
1983 .change_queue_depth = scsi_change_queue_depth,
1984 .can_queue = TW_Q_LENGTH-2,
1985 .slave_configure = twa_slave_configure,
1986 .this_id = -1,
1987 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1988 .max_sectors = TW_MAX_SECTORS,
1989 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1990 .use_clustering = ENABLE_CLUSTERING,
1991 .shost_attrs = twa_host_attrs,
1992 .emulated = 1,
1993 .no_write_same = 1,
1996 /* This function will probe and initialize a card */
1997 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1999 struct Scsi_Host *host = NULL;
2000 TW_Device_Extension *tw_dev;
2001 unsigned long mem_addr, mem_len;
2002 int retval = -ENODEV;
2004 retval = pci_enable_device(pdev);
2005 if (retval) {
2006 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2007 goto out_disable_device;
2010 pci_set_master(pdev);
2011 pci_try_set_mwi(pdev);
2013 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2014 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2015 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2016 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2017 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2018 retval = -ENODEV;
2019 goto out_disable_device;
2022 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2023 if (!host) {
2024 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2025 retval = -ENOMEM;
2026 goto out_disable_device;
2028 tw_dev = (TW_Device_Extension *)host->hostdata;
2030 /* Save values to device extension */
2031 tw_dev->host = host;
2032 tw_dev->tw_pci_dev = pdev;
2034 if (twa_initialize_device_extension(tw_dev)) {
2035 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2036 goto out_free_device_extension;
2039 /* Request IO regions */
2040 retval = pci_request_regions(pdev, "3w-9xxx");
2041 if (retval) {
2042 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2043 goto out_free_device_extension;
2046 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2047 mem_addr = pci_resource_start(pdev, 1);
2048 mem_len = pci_resource_len(pdev, 1);
2049 } else {
2050 mem_addr = pci_resource_start(pdev, 2);
2051 mem_len = pci_resource_len(pdev, 2);
2054 /* Save base address */
2055 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2056 if (!tw_dev->base_addr) {
2057 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2058 goto out_release_mem_region;
2061 /* Disable interrupts on the card */
2062 TW_DISABLE_INTERRUPTS(tw_dev);
2064 /* Initialize the card */
2065 if (twa_reset_sequence(tw_dev, 0))
2066 goto out_iounmap;
2068 /* Set host specific parameters */
2069 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2070 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2071 host->max_id = TW_MAX_UNITS_9650SE;
2072 else
2073 host->max_id = TW_MAX_UNITS;
2075 host->max_cmd_len = TW_MAX_CDB_LEN;
2077 /* Channels aren't supported by adapter */
2078 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2079 host->max_channel = 0;
2081 /* Register the card with the kernel SCSI layer */
2082 retval = scsi_add_host(host, &pdev->dev);
2083 if (retval) {
2084 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2085 goto out_iounmap;
2088 pci_set_drvdata(pdev, host);
2090 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2091 host->host_no, mem_addr, pdev->irq);
2092 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2093 host->host_no,
2094 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2095 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2096 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2097 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2098 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2099 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2101 /* Try to enable MSI */
2102 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2103 !pci_enable_msi(pdev))
2104 set_bit(TW_USING_MSI, &tw_dev->flags);
2106 /* Now setup the interrupt handler */
2107 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2108 if (retval) {
2109 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2110 goto out_remove_host;
2113 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2114 twa_device_extension_count++;
2116 /* Re-enable interrupts on the card */
2117 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2119 /* Finally, scan the host */
2120 scsi_scan_host(host);
2122 if (twa_major == -1) {
2123 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2124 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2126 return 0;
2128 out_remove_host:
2129 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2130 pci_disable_msi(pdev);
2131 scsi_remove_host(host);
2132 out_iounmap:
2133 iounmap(tw_dev->base_addr);
2134 out_release_mem_region:
2135 pci_release_regions(pdev);
2136 out_free_device_extension:
2137 twa_free_device_extension(tw_dev);
2138 scsi_host_put(host);
2139 out_disable_device:
2140 pci_disable_device(pdev);
2142 return retval;
2143 } /* End twa_probe() */
2145 /* This function is called to remove a device */
2146 static void twa_remove(struct pci_dev *pdev)
2148 struct Scsi_Host *host = pci_get_drvdata(pdev);
2149 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2151 scsi_remove_host(tw_dev->host);
2153 /* Unregister character device */
2154 if (twa_major >= 0) {
2155 unregister_chrdev(twa_major, "twa");
2156 twa_major = -1;
2159 /* Shutdown the card */
2160 __twa_shutdown(tw_dev);
2162 /* Disable MSI if enabled */
2163 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2164 pci_disable_msi(pdev);
2166 /* Free IO remapping */
2167 iounmap(tw_dev->base_addr);
2169 /* Free up the mem region */
2170 pci_release_regions(pdev);
2172 /* Free up device extension resources */
2173 twa_free_device_extension(tw_dev);
2175 scsi_host_put(tw_dev->host);
2176 pci_disable_device(pdev);
2177 twa_device_extension_count--;
2178 } /* End twa_remove() */
2180 #ifdef CONFIG_PM
2181 /* This function is called on PCI suspend */
2182 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2184 struct Scsi_Host *host = pci_get_drvdata(pdev);
2185 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2187 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2189 TW_DISABLE_INTERRUPTS(tw_dev);
2190 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2192 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2193 pci_disable_msi(pdev);
2195 /* Tell the card we are shutting down */
2196 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2197 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2198 } else {
2199 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2201 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2203 pci_save_state(pdev);
2204 pci_disable_device(pdev);
2205 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2207 return 0;
2208 } /* End twa_suspend() */
2210 /* This function is called on PCI resume */
2211 static int twa_resume(struct pci_dev *pdev)
2213 int retval = 0;
2214 struct Scsi_Host *host = pci_get_drvdata(pdev);
2215 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2217 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2218 pci_set_power_state(pdev, PCI_D0);
2219 pci_enable_wake(pdev, PCI_D0, 0);
2220 pci_restore_state(pdev);
2222 retval = pci_enable_device(pdev);
2223 if (retval) {
2224 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2225 return retval;
2228 pci_set_master(pdev);
2229 pci_try_set_mwi(pdev);
2231 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2232 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2233 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2234 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2235 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2236 retval = -ENODEV;
2237 goto out_disable_device;
2240 /* Initialize the card */
2241 if (twa_reset_sequence(tw_dev, 0)) {
2242 retval = -ENODEV;
2243 goto out_disable_device;
2246 /* Now setup the interrupt handler */
2247 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2248 if (retval) {
2249 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2250 retval = -ENODEV;
2251 goto out_disable_device;
2254 /* Now enable MSI if enabled */
2255 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2256 pci_enable_msi(pdev);
2258 /* Re-enable interrupts on the card */
2259 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2261 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2262 return 0;
2264 out_disable_device:
2265 scsi_remove_host(host);
2266 pci_disable_device(pdev);
2268 return retval;
2269 } /* End twa_resume() */
2270 #endif
2272 /* PCI Devices supported by this driver */
2273 static struct pci_device_id twa_pci_tbl[] = {
2274 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2276 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2278 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2284 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2286 /* pci_driver initializer */
2287 static struct pci_driver twa_driver = {
2288 .name = "3w-9xxx",
2289 .id_table = twa_pci_tbl,
2290 .probe = twa_probe,
2291 .remove = twa_remove,
2292 #ifdef CONFIG_PM
2293 .suspend = twa_suspend,
2294 .resume = twa_resume,
2295 #endif
2296 .shutdown = twa_shutdown
2299 /* This function is called on driver initialization */
2300 static int __init twa_init(void)
2302 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2304 return pci_register_driver(&twa_driver);
2305 } /* End twa_init() */
2307 /* This function is called on driver exit */
2308 static void __exit twa_exit(void)
2310 pci_unregister_driver(&twa_driver);
2311 } /* End twa_exit() */
2313 module_init(twa_init);
2314 module_exit(twa_exit);