cciss: fix residual count for block pc requests
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / cciss.c
blobf15b177085370b6bab16e4f4d67cf1f2df65f14e
1 /*
2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
50 #include <scsi/sg.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
64 " Smart Array G2 Series SAS/SATA Controllers");
65 MODULE_VERSION("3.6.20");
66 MODULE_LICENSE("GPL");
68 #include "cciss_cmd.h"
69 #include "cciss.h"
70 #include <linux/cciss_ioctl.h>
72 /* define the PCI info for the cards we can control */
73 static const struct pci_device_id cciss_pci_device_id[] = {
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
82 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
102 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
103 {0,}
106 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
112 static struct board_type products[] = {
113 {0x40700E11, "Smart Array 5300", &SA5_access},
114 {0x40800E11, "Smart Array 5i", &SA5B_access},
115 {0x40820E11, "Smart Array 532", &SA5B_access},
116 {0x40830E11, "Smart Array 5312", &SA5B_access},
117 {0x409A0E11, "Smart Array 641", &SA5_access},
118 {0x409B0E11, "Smart Array 642", &SA5_access},
119 {0x409C0E11, "Smart Array 6400", &SA5_access},
120 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 {0x40910E11, "Smart Array 6i", &SA5_access},
122 {0x3225103C, "Smart Array P600", &SA5_access},
123 {0x3223103C, "Smart Array P800", &SA5_access},
124 {0x3234103C, "Smart Array P400", &SA5_access},
125 {0x3235103C, "Smart Array P400i", &SA5_access},
126 {0x3211103C, "Smart Array E200i", &SA5_access},
127 {0x3212103C, "Smart Array E200", &SA5_access},
128 {0x3213103C, "Smart Array E200i", &SA5_access},
129 {0x3214103C, "Smart Array E200i", &SA5_access},
130 {0x3215103C, "Smart Array E200i", &SA5_access},
131 {0x3237103C, "Smart Array E500", &SA5_access},
132 {0x323D103C, "Smart Array P700m", &SA5_access},
133 {0x3241103C, "Smart Array P212", &SA5_access},
134 {0x3243103C, "Smart Array P410", &SA5_access},
135 {0x3245103C, "Smart Array P410i", &SA5_access},
136 {0x3247103C, "Smart Array P411", &SA5_access},
137 {0x3249103C, "Smart Array P812", &SA5_access},
138 {0x324A103C, "Smart Array P712m", &SA5_access},
139 {0x324B103C, "Smart Array P711m", &SA5_access},
140 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
143 /* How long to wait (in milliseconds) for board to go into simple mode */
144 #define MAX_CONFIG_WAIT 30000
145 #define MAX_IOCTL_CONFIG_WAIT 1000
147 /*define how many times we will try a command because of bus resets */
148 #define MAX_CMD_RETRIES 3
150 #define MAX_CTLR 32
152 /* Originally cciss driver only supports 8 major numbers */
153 #define MAX_CTLR_ORIG 8
155 static ctlr_info_t *hba[MAX_CTLR];
157 static void do_cciss_request(struct request_queue *q);
158 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
159 static int cciss_open(struct block_device *bdev, fmode_t mode);
160 static int cciss_release(struct gendisk *disk, fmode_t mode);
161 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
162 unsigned int cmd, unsigned long arg);
163 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
165 static int cciss_revalidate(struct gendisk *disk);
166 static int rebuild_lun_table(ctlr_info_t *h, int first_time);
167 static int deregister_disk(ctlr_info_t *h, int drv_index,
168 int clear_all);
170 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
171 sector_t *total_size, unsigned int *block_size);
172 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
173 sector_t *total_size, unsigned int *block_size);
174 static void cciss_geometry_inquiry(int ctlr, int logvol,
175 int withirq, sector_t total_size,
176 unsigned int block_size, InquiryData_struct *inq_buff,
177 drive_info_struct *drv);
178 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
179 __u32);
180 static void start_io(ctlr_info_t *h);
181 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
182 unsigned int use_unit_num, unsigned int log_unit,
183 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
184 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
185 unsigned int use_unit_num, unsigned int log_unit,
186 __u8 page_code, int cmd_type);
188 static void fail_all_cmds(unsigned long ctlr);
190 #ifdef CONFIG_PROC_FS
191 static void cciss_procinit(int i);
192 #else
193 static void cciss_procinit(int i)
196 #endif /* CONFIG_PROC_FS */
198 #ifdef CONFIG_COMPAT
199 static int cciss_compat_ioctl(struct block_device *, fmode_t,
200 unsigned, unsigned long);
201 #endif
203 static struct block_device_operations cciss_fops = {
204 .owner = THIS_MODULE,
205 .open = cciss_open,
206 .release = cciss_release,
207 .locked_ioctl = cciss_ioctl,
208 .getgeo = cciss_getgeo,
209 #ifdef CONFIG_COMPAT
210 .compat_ioctl = cciss_compat_ioctl,
211 #endif
212 .revalidate_disk = cciss_revalidate,
216 * Enqueuing and dequeuing functions for cmdlists.
218 static inline void addQ(struct hlist_head *list, CommandList_struct *c)
220 hlist_add_head(&c->list, list);
223 static inline void removeQ(CommandList_struct *c)
225 if (WARN_ON(hlist_unhashed(&c->list)))
226 return;
228 hlist_del_init(&c->list);
231 #include "cciss_scsi.c" /* For SCSI tape support */
233 #define RAID_UNKNOWN 6
235 #ifdef CONFIG_PROC_FS
238 * Report information about this controller.
240 #define ENG_GIG 1000000000
241 #define ENG_GIG_FACTOR (ENG_GIG/512)
242 #define ENGAGE_SCSI "engage scsi"
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
244 "UNKNOWN"
247 static struct proc_dir_entry *proc_cciss;
249 static void cciss_seq_show_header(struct seq_file *seq)
251 ctlr_info_t *h = seq->private;
253 seq_printf(seq, "%s: HP %s Controller\n"
254 "Board ID: 0x%08lx\n"
255 "Firmware Version: %c%c%c%c\n"
256 "IRQ: %d\n"
257 "Logical drives: %d\n"
258 "Current Q depth: %d\n"
259 "Current # commands on controller: %d\n"
260 "Max Q depth since init: %d\n"
261 "Max # commands on controller since init: %d\n"
262 "Max SG entries since init: %d\n",
263 h->devname,
264 h->product_name,
265 (unsigned long)h->board_id,
266 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
267 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
268 h->num_luns,
269 h->Qdepth, h->commands_outstanding,
270 h->maxQsinceinit, h->max_outstanding, h->maxSG);
272 #ifdef CONFIG_CISS_SCSI_TAPE
273 cciss_seq_tape_report(seq, h->ctlr);
274 #endif /* CONFIG_CISS_SCSI_TAPE */
277 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
279 ctlr_info_t *h = seq->private;
280 unsigned ctlr = h->ctlr;
281 unsigned long flags;
283 /* prevent displaying bogus info during configuration
284 * or deconfiguration of a logical volume
286 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
287 if (h->busy_configuring) {
288 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
289 return ERR_PTR(-EBUSY);
291 h->busy_configuring = 1;
292 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
294 if (*pos == 0)
295 cciss_seq_show_header(seq);
297 return pos;
300 static int cciss_seq_show(struct seq_file *seq, void *v)
302 sector_t vol_sz, vol_sz_frac;
303 ctlr_info_t *h = seq->private;
304 unsigned ctlr = h->ctlr;
305 loff_t *pos = v;
306 drive_info_struct *drv = &h->drv[*pos];
308 if (*pos > h->highest_lun)
309 return 0;
311 if (drv->heads == 0)
312 return 0;
314 vol_sz = drv->nr_blocks;
315 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
316 vol_sz_frac *= 100;
317 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
319 if (drv->raid_level > 5)
320 drv->raid_level = RAID_UNKNOWN;
321 seq_printf(seq, "cciss/c%dd%d:"
322 "\t%4u.%02uGB\tRAID %s\n",
323 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
324 raid_label[drv->raid_level]);
325 return 0;
328 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
330 ctlr_info_t *h = seq->private;
332 if (*pos > h->highest_lun)
333 return NULL;
334 *pos += 1;
336 return pos;
339 static void cciss_seq_stop(struct seq_file *seq, void *v)
341 ctlr_info_t *h = seq->private;
343 /* Only reset h->busy_configuring if we succeeded in setting
344 * it during cciss_seq_start. */
345 if (v == ERR_PTR(-EBUSY))
346 return;
348 h->busy_configuring = 0;
351 static struct seq_operations cciss_seq_ops = {
352 .start = cciss_seq_start,
353 .show = cciss_seq_show,
354 .next = cciss_seq_next,
355 .stop = cciss_seq_stop,
358 static int cciss_seq_open(struct inode *inode, struct file *file)
360 int ret = seq_open(file, &cciss_seq_ops);
361 struct seq_file *seq = file->private_data;
363 if (!ret)
364 seq->private = PDE(inode)->data;
366 return ret;
369 static ssize_t
370 cciss_proc_write(struct file *file, const char __user *buf,
371 size_t length, loff_t *ppos)
373 int err;
374 char *buffer;
376 #ifndef CONFIG_CISS_SCSI_TAPE
377 return -EINVAL;
378 #endif
380 if (!buf || length > PAGE_SIZE - 1)
381 return -EINVAL;
383 buffer = (char *)__get_free_page(GFP_KERNEL);
384 if (!buffer)
385 return -ENOMEM;
387 err = -EFAULT;
388 if (copy_from_user(buffer, buf, length))
389 goto out;
390 buffer[length] = '\0';
392 #ifdef CONFIG_CISS_SCSI_TAPE
393 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
394 struct seq_file *seq = file->private_data;
395 ctlr_info_t *h = seq->private;
396 int rc;
398 rc = cciss_engage_scsi(h->ctlr);
399 if (rc != 0)
400 err = -rc;
401 else
402 err = length;
403 } else
404 #endif /* CONFIG_CISS_SCSI_TAPE */
405 err = -EINVAL;
406 /* might be nice to have "disengage" too, but it's not
407 safely possible. (only 1 module use count, lock issues.) */
409 out:
410 free_page((unsigned long)buffer);
411 return err;
414 static struct file_operations cciss_proc_fops = {
415 .owner = THIS_MODULE,
416 .open = cciss_seq_open,
417 .read = seq_read,
418 .llseek = seq_lseek,
419 .release = seq_release,
420 .write = cciss_proc_write,
423 static void __devinit cciss_procinit(int i)
425 struct proc_dir_entry *pde;
427 if (proc_cciss == NULL)
428 proc_cciss = proc_mkdir("driver/cciss", NULL);
429 if (!proc_cciss)
430 return;
431 pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
432 S_IROTH, proc_cciss,
433 &cciss_proc_fops, hba[i]);
435 #endif /* CONFIG_PROC_FS */
438 * For operations that cannot sleep, a command block is allocated at init,
439 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
440 * which ones are free or in use. For operations that can wait for kmalloc
441 * to possible sleep, this routine can be called with get_from_pool set to 0.
442 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
444 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
446 CommandList_struct *c;
447 int i;
448 u64bit temp64;
449 dma_addr_t cmd_dma_handle, err_dma_handle;
451 if (!get_from_pool) {
452 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
453 sizeof(CommandList_struct), &cmd_dma_handle);
454 if (c == NULL)
455 return NULL;
456 memset(c, 0, sizeof(CommandList_struct));
458 c->cmdindex = -1;
460 c->err_info = (ErrorInfo_struct *)
461 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
462 &err_dma_handle);
464 if (c->err_info == NULL) {
465 pci_free_consistent(h->pdev,
466 sizeof(CommandList_struct), c, cmd_dma_handle);
467 return NULL;
469 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
470 } else { /* get it out of the controllers pool */
472 do {
473 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
474 if (i == h->nr_cmds)
475 return NULL;
476 } while (test_and_set_bit
477 (i & (BITS_PER_LONG - 1),
478 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
479 #ifdef CCISS_DEBUG
480 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
481 #endif
482 c = h->cmd_pool + i;
483 memset(c, 0, sizeof(CommandList_struct));
484 cmd_dma_handle = h->cmd_pool_dhandle
485 + i * sizeof(CommandList_struct);
486 c->err_info = h->errinfo_pool + i;
487 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
488 err_dma_handle = h->errinfo_pool_dhandle
489 + i * sizeof(ErrorInfo_struct);
490 h->nr_allocs++;
492 c->cmdindex = i;
495 INIT_HLIST_NODE(&c->list);
496 c->busaddr = (__u32) cmd_dma_handle;
497 temp64.val = (__u64) err_dma_handle;
498 c->ErrDesc.Addr.lower = temp64.val32.lower;
499 c->ErrDesc.Addr.upper = temp64.val32.upper;
500 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
502 c->ctlr = h->ctlr;
503 return c;
507 * Frees a command block that was previously allocated with cmd_alloc().
509 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
511 int i;
512 u64bit temp64;
514 if (!got_from_pool) {
515 temp64.val32.lower = c->ErrDesc.Addr.lower;
516 temp64.val32.upper = c->ErrDesc.Addr.upper;
517 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
518 c->err_info, (dma_addr_t) temp64.val);
519 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
520 c, (dma_addr_t) c->busaddr);
521 } else {
522 i = c - h->cmd_pool;
523 clear_bit(i & (BITS_PER_LONG - 1),
524 h->cmd_pool_bits + (i / BITS_PER_LONG));
525 h->nr_frees++;
529 static inline ctlr_info_t *get_host(struct gendisk *disk)
531 return disk->queue->queuedata;
534 static inline drive_info_struct *get_drv(struct gendisk *disk)
536 return disk->private_data;
540 * Open. Make sure the device is really there.
542 static int cciss_open(struct block_device *bdev, fmode_t mode)
544 ctlr_info_t *host = get_host(bdev->bd_disk);
545 drive_info_struct *drv = get_drv(bdev->bd_disk);
547 #ifdef CCISS_DEBUG
548 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
549 #endif /* CCISS_DEBUG */
551 if (host->busy_initializing || drv->busy_configuring)
552 return -EBUSY;
554 * Root is allowed to open raw volume zero even if it's not configured
555 * so array config can still work. Root is also allowed to open any
556 * volume that has a LUN ID, so it can issue IOCTL to reread the
557 * disk information. I don't think I really like this
558 * but I'm already using way to many device nodes to claim another one
559 * for "raw controller".
561 if (drv->heads == 0) {
562 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
563 /* if not node 0 make sure it is a partition = 0 */
564 if (MINOR(bdev->bd_dev) & 0x0f) {
565 return -ENXIO;
566 /* if it is, make sure we have a LUN ID */
567 } else if (drv->LunID == 0) {
568 return -ENXIO;
571 if (!capable(CAP_SYS_ADMIN))
572 return -EPERM;
574 drv->usage_count++;
575 host->usage_count++;
576 return 0;
580 * Close. Sync first.
582 static int cciss_release(struct gendisk *disk, fmode_t mode)
584 ctlr_info_t *host = get_host(disk);
585 drive_info_struct *drv = get_drv(disk);
587 #ifdef CCISS_DEBUG
588 printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
589 #endif /* CCISS_DEBUG */
591 drv->usage_count--;
592 host->usage_count--;
593 return 0;
596 #ifdef CONFIG_COMPAT
598 static int do_ioctl(struct block_device *bdev, fmode_t mode,
599 unsigned cmd, unsigned long arg)
601 int ret;
602 lock_kernel();
603 ret = cciss_ioctl(bdev, mode, cmd, arg);
604 unlock_kernel();
605 return ret;
608 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
609 unsigned cmd, unsigned long arg);
610 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
611 unsigned cmd, unsigned long arg);
613 static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
614 unsigned cmd, unsigned long arg)
616 switch (cmd) {
617 case CCISS_GETPCIINFO:
618 case CCISS_GETINTINFO:
619 case CCISS_SETINTINFO:
620 case CCISS_GETNODENAME:
621 case CCISS_SETNODENAME:
622 case CCISS_GETHEARTBEAT:
623 case CCISS_GETBUSTYPES:
624 case CCISS_GETFIRMVER:
625 case CCISS_GETDRIVVER:
626 case CCISS_REVALIDVOLS:
627 case CCISS_DEREGDISK:
628 case CCISS_REGNEWDISK:
629 case CCISS_REGNEWD:
630 case CCISS_RESCANDISK:
631 case CCISS_GETLUNINFO:
632 return do_ioctl(bdev, mode, cmd, arg);
634 case CCISS_PASSTHRU32:
635 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
636 case CCISS_BIG_PASSTHRU32:
637 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
639 default:
640 return -ENOIOCTLCMD;
644 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
645 unsigned cmd, unsigned long arg)
647 IOCTL32_Command_struct __user *arg32 =
648 (IOCTL32_Command_struct __user *) arg;
649 IOCTL_Command_struct arg64;
650 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
651 int err;
652 u32 cp;
654 err = 0;
655 err |=
656 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
657 sizeof(arg64.LUN_info));
658 err |=
659 copy_from_user(&arg64.Request, &arg32->Request,
660 sizeof(arg64.Request));
661 err |=
662 copy_from_user(&arg64.error_info, &arg32->error_info,
663 sizeof(arg64.error_info));
664 err |= get_user(arg64.buf_size, &arg32->buf_size);
665 err |= get_user(cp, &arg32->buf);
666 arg64.buf = compat_ptr(cp);
667 err |= copy_to_user(p, &arg64, sizeof(arg64));
669 if (err)
670 return -EFAULT;
672 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
673 if (err)
674 return err;
675 err |=
676 copy_in_user(&arg32->error_info, &p->error_info,
677 sizeof(arg32->error_info));
678 if (err)
679 return -EFAULT;
680 return err;
683 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
684 unsigned cmd, unsigned long arg)
686 BIG_IOCTL32_Command_struct __user *arg32 =
687 (BIG_IOCTL32_Command_struct __user *) arg;
688 BIG_IOCTL_Command_struct arg64;
689 BIG_IOCTL_Command_struct __user *p =
690 compat_alloc_user_space(sizeof(arg64));
691 int err;
692 u32 cp;
694 err = 0;
695 err |=
696 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
697 sizeof(arg64.LUN_info));
698 err |=
699 copy_from_user(&arg64.Request, &arg32->Request,
700 sizeof(arg64.Request));
701 err |=
702 copy_from_user(&arg64.error_info, &arg32->error_info,
703 sizeof(arg64.error_info));
704 err |= get_user(arg64.buf_size, &arg32->buf_size);
705 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
706 err |= get_user(cp, &arg32->buf);
707 arg64.buf = compat_ptr(cp);
708 err |= copy_to_user(p, &arg64, sizeof(arg64));
710 if (err)
711 return -EFAULT;
713 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
714 if (err)
715 return err;
716 err |=
717 copy_in_user(&arg32->error_info, &p->error_info,
718 sizeof(arg32->error_info));
719 if (err)
720 return -EFAULT;
721 return err;
723 #endif
725 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
727 drive_info_struct *drv = get_drv(bdev->bd_disk);
729 if (!drv->cylinders)
730 return -ENXIO;
732 geo->heads = drv->heads;
733 geo->sectors = drv->sectors;
734 geo->cylinders = drv->cylinders;
735 return 0;
739 * ioctl
741 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
742 unsigned int cmd, unsigned long arg)
744 struct gendisk *disk = bdev->bd_disk;
745 ctlr_info_t *host = get_host(disk);
746 drive_info_struct *drv = get_drv(disk);
747 int ctlr = host->ctlr;
748 void __user *argp = (void __user *)arg;
750 #ifdef CCISS_DEBUG
751 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
752 #endif /* CCISS_DEBUG */
754 switch (cmd) {
755 case CCISS_GETPCIINFO:
757 cciss_pci_info_struct pciinfo;
759 if (!arg)
760 return -EINVAL;
761 pciinfo.domain = pci_domain_nr(host->pdev->bus);
762 pciinfo.bus = host->pdev->bus->number;
763 pciinfo.dev_fn = host->pdev->devfn;
764 pciinfo.board_id = host->board_id;
765 if (copy_to_user
766 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
767 return -EFAULT;
768 return 0;
770 case CCISS_GETINTINFO:
772 cciss_coalint_struct intinfo;
773 if (!arg)
774 return -EINVAL;
775 intinfo.delay =
776 readl(&host->cfgtable->HostWrite.CoalIntDelay);
777 intinfo.count =
778 readl(&host->cfgtable->HostWrite.CoalIntCount);
779 if (copy_to_user
780 (argp, &intinfo, sizeof(cciss_coalint_struct)))
781 return -EFAULT;
782 return 0;
784 case CCISS_SETINTINFO:
786 cciss_coalint_struct intinfo;
787 unsigned long flags;
788 int i;
790 if (!arg)
791 return -EINVAL;
792 if (!capable(CAP_SYS_ADMIN))
793 return -EPERM;
794 if (copy_from_user
795 (&intinfo, argp, sizeof(cciss_coalint_struct)))
796 return -EFAULT;
797 if ((intinfo.delay == 0) && (intinfo.count == 0))
799 // printk("cciss_ioctl: delay and count cannot be 0\n");
800 return -EINVAL;
802 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
803 /* Update the field, and then ring the doorbell */
804 writel(intinfo.delay,
805 &(host->cfgtable->HostWrite.CoalIntDelay));
806 writel(intinfo.count,
807 &(host->cfgtable->HostWrite.CoalIntCount));
808 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
810 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
811 if (!(readl(host->vaddr + SA5_DOORBELL)
812 & CFGTBL_ChangeReq))
813 break;
814 /* delay and try again */
815 udelay(1000);
817 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
818 if (i >= MAX_IOCTL_CONFIG_WAIT)
819 return -EAGAIN;
820 return 0;
822 case CCISS_GETNODENAME:
824 NodeName_type NodeName;
825 int i;
827 if (!arg)
828 return -EINVAL;
829 for (i = 0; i < 16; i++)
830 NodeName[i] =
831 readb(&host->cfgtable->ServerName[i]);
832 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
833 return -EFAULT;
834 return 0;
836 case CCISS_SETNODENAME:
838 NodeName_type NodeName;
839 unsigned long flags;
840 int i;
842 if (!arg)
843 return -EINVAL;
844 if (!capable(CAP_SYS_ADMIN))
845 return -EPERM;
847 if (copy_from_user
848 (NodeName, argp, sizeof(NodeName_type)))
849 return -EFAULT;
851 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
853 /* Update the field, and then ring the doorbell */
854 for (i = 0; i < 16; i++)
855 writeb(NodeName[i],
856 &host->cfgtable->ServerName[i]);
858 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
860 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
861 if (!(readl(host->vaddr + SA5_DOORBELL)
862 & CFGTBL_ChangeReq))
863 break;
864 /* delay and try again */
865 udelay(1000);
867 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
868 if (i >= MAX_IOCTL_CONFIG_WAIT)
869 return -EAGAIN;
870 return 0;
873 case CCISS_GETHEARTBEAT:
875 Heartbeat_type heartbeat;
877 if (!arg)
878 return -EINVAL;
879 heartbeat = readl(&host->cfgtable->HeartBeat);
880 if (copy_to_user
881 (argp, &heartbeat, sizeof(Heartbeat_type)))
882 return -EFAULT;
883 return 0;
885 case CCISS_GETBUSTYPES:
887 BusTypes_type BusTypes;
889 if (!arg)
890 return -EINVAL;
891 BusTypes = readl(&host->cfgtable->BusTypes);
892 if (copy_to_user
893 (argp, &BusTypes, sizeof(BusTypes_type)))
894 return -EFAULT;
895 return 0;
897 case CCISS_GETFIRMVER:
899 FirmwareVer_type firmware;
901 if (!arg)
902 return -EINVAL;
903 memcpy(firmware, host->firm_ver, 4);
905 if (copy_to_user
906 (argp, firmware, sizeof(FirmwareVer_type)))
907 return -EFAULT;
908 return 0;
910 case CCISS_GETDRIVVER:
912 DriverVer_type DriverVer = DRIVER_VERSION;
914 if (!arg)
915 return -EINVAL;
917 if (copy_to_user
918 (argp, &DriverVer, sizeof(DriverVer_type)))
919 return -EFAULT;
920 return 0;
923 case CCISS_DEREGDISK:
924 case CCISS_REGNEWD:
925 case CCISS_REVALIDVOLS:
926 return rebuild_lun_table(host, 0);
928 case CCISS_GETLUNINFO:{
929 LogvolInfo_struct luninfo;
931 luninfo.LunID = drv->LunID;
932 luninfo.num_opens = drv->usage_count;
933 luninfo.num_parts = 0;
934 if (copy_to_user(argp, &luninfo,
935 sizeof(LogvolInfo_struct)))
936 return -EFAULT;
937 return 0;
939 case CCISS_PASSTHRU:
941 IOCTL_Command_struct iocommand;
942 CommandList_struct *c;
943 char *buff = NULL;
944 u64bit temp64;
945 unsigned long flags;
946 DECLARE_COMPLETION_ONSTACK(wait);
948 if (!arg)
949 return -EINVAL;
951 if (!capable(CAP_SYS_RAWIO))
952 return -EPERM;
954 if (copy_from_user
955 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
956 return -EFAULT;
957 if ((iocommand.buf_size < 1) &&
958 (iocommand.Request.Type.Direction != XFER_NONE)) {
959 return -EINVAL;
961 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
962 /* Check kmalloc limits */
963 if (iocommand.buf_size > 128000)
964 return -EINVAL;
965 #endif
966 if (iocommand.buf_size > 0) {
967 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
968 if (buff == NULL)
969 return -EFAULT;
971 if (iocommand.Request.Type.Direction == XFER_WRITE) {
972 /* Copy the data into the buffer we created */
973 if (copy_from_user
974 (buff, iocommand.buf, iocommand.buf_size)) {
975 kfree(buff);
976 return -EFAULT;
978 } else {
979 memset(buff, 0, iocommand.buf_size);
981 if ((c = cmd_alloc(host, 0)) == NULL) {
982 kfree(buff);
983 return -ENOMEM;
985 // Fill in the command type
986 c->cmd_type = CMD_IOCTL_PEND;
987 // Fill in Command Header
988 c->Header.ReplyQueue = 0; // unused in simple mode
989 if (iocommand.buf_size > 0) // buffer to fill
991 c->Header.SGList = 1;
992 c->Header.SGTotal = 1;
993 } else // no buffers to fill
995 c->Header.SGList = 0;
996 c->Header.SGTotal = 0;
998 c->Header.LUN = iocommand.LUN_info;
999 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1001 // Fill in Request block
1002 c->Request = iocommand.Request;
1004 // Fill in the scatter gather information
1005 if (iocommand.buf_size > 0) {
1006 temp64.val = pci_map_single(host->pdev, buff,
1007 iocommand.buf_size,
1008 PCI_DMA_BIDIRECTIONAL);
1009 c->SG[0].Addr.lower = temp64.val32.lower;
1010 c->SG[0].Addr.upper = temp64.val32.upper;
1011 c->SG[0].Len = iocommand.buf_size;
1012 c->SG[0].Ext = 0; // we are not chaining
1014 c->waiting = &wait;
1016 /* Put the request on the tail of the request queue */
1017 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1018 addQ(&host->reqQ, c);
1019 host->Qdepth++;
1020 start_io(host);
1021 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1023 wait_for_completion(&wait);
1025 /* unlock the buffers from DMA */
1026 temp64.val32.lower = c->SG[0].Addr.lower;
1027 temp64.val32.upper = c->SG[0].Addr.upper;
1028 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1029 iocommand.buf_size,
1030 PCI_DMA_BIDIRECTIONAL);
1032 /* Copy the error information out */
1033 iocommand.error_info = *(c->err_info);
1034 if (copy_to_user
1035 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1036 kfree(buff);
1037 cmd_free(host, c, 0);
1038 return -EFAULT;
1041 if (iocommand.Request.Type.Direction == XFER_READ) {
1042 /* Copy the data out of the buffer we created */
1043 if (copy_to_user
1044 (iocommand.buf, buff, iocommand.buf_size)) {
1045 kfree(buff);
1046 cmd_free(host, c, 0);
1047 return -EFAULT;
1050 kfree(buff);
1051 cmd_free(host, c, 0);
1052 return 0;
1054 case CCISS_BIG_PASSTHRU:{
1055 BIG_IOCTL_Command_struct *ioc;
1056 CommandList_struct *c;
1057 unsigned char **buff = NULL;
1058 int *buff_size = NULL;
1059 u64bit temp64;
1060 unsigned long flags;
1061 BYTE sg_used = 0;
1062 int status = 0;
1063 int i;
1064 DECLARE_COMPLETION_ONSTACK(wait);
1065 __u32 left;
1066 __u32 sz;
1067 BYTE __user *data_ptr;
1069 if (!arg)
1070 return -EINVAL;
1071 if (!capable(CAP_SYS_RAWIO))
1072 return -EPERM;
1073 ioc = (BIG_IOCTL_Command_struct *)
1074 kmalloc(sizeof(*ioc), GFP_KERNEL);
1075 if (!ioc) {
1076 status = -ENOMEM;
1077 goto cleanup1;
1079 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1080 status = -EFAULT;
1081 goto cleanup1;
1083 if ((ioc->buf_size < 1) &&
1084 (ioc->Request.Type.Direction != XFER_NONE)) {
1085 status = -EINVAL;
1086 goto cleanup1;
1088 /* Check kmalloc limits using all SGs */
1089 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1090 status = -EINVAL;
1091 goto cleanup1;
1093 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1094 status = -EINVAL;
1095 goto cleanup1;
1097 buff =
1098 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1099 if (!buff) {
1100 status = -ENOMEM;
1101 goto cleanup1;
1103 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1104 GFP_KERNEL);
1105 if (!buff_size) {
1106 status = -ENOMEM;
1107 goto cleanup1;
1109 left = ioc->buf_size;
1110 data_ptr = ioc->buf;
1111 while (left) {
1112 sz = (left >
1113 ioc->malloc_size) ? ioc->
1114 malloc_size : left;
1115 buff_size[sg_used] = sz;
1116 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1117 if (buff[sg_used] == NULL) {
1118 status = -ENOMEM;
1119 goto cleanup1;
1121 if (ioc->Request.Type.Direction == XFER_WRITE) {
1122 if (copy_from_user
1123 (buff[sg_used], data_ptr, sz)) {
1124 status = -EFAULT;
1125 goto cleanup1;
1127 } else {
1128 memset(buff[sg_used], 0, sz);
1130 left -= sz;
1131 data_ptr += sz;
1132 sg_used++;
1134 if ((c = cmd_alloc(host, 0)) == NULL) {
1135 status = -ENOMEM;
1136 goto cleanup1;
1138 c->cmd_type = CMD_IOCTL_PEND;
1139 c->Header.ReplyQueue = 0;
1141 if (ioc->buf_size > 0) {
1142 c->Header.SGList = sg_used;
1143 c->Header.SGTotal = sg_used;
1144 } else {
1145 c->Header.SGList = 0;
1146 c->Header.SGTotal = 0;
1148 c->Header.LUN = ioc->LUN_info;
1149 c->Header.Tag.lower = c->busaddr;
1151 c->Request = ioc->Request;
1152 if (ioc->buf_size > 0) {
1153 int i;
1154 for (i = 0; i < sg_used; i++) {
1155 temp64.val =
1156 pci_map_single(host->pdev, buff[i],
1157 buff_size[i],
1158 PCI_DMA_BIDIRECTIONAL);
1159 c->SG[i].Addr.lower =
1160 temp64.val32.lower;
1161 c->SG[i].Addr.upper =
1162 temp64.val32.upper;
1163 c->SG[i].Len = buff_size[i];
1164 c->SG[i].Ext = 0; /* we are not chaining */
1167 c->waiting = &wait;
1168 /* Put the request on the tail of the request queue */
1169 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1170 addQ(&host->reqQ, c);
1171 host->Qdepth++;
1172 start_io(host);
1173 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1174 wait_for_completion(&wait);
1175 /* unlock the buffers from DMA */
1176 for (i = 0; i < sg_used; i++) {
1177 temp64.val32.lower = c->SG[i].Addr.lower;
1178 temp64.val32.upper = c->SG[i].Addr.upper;
1179 pci_unmap_single(host->pdev,
1180 (dma_addr_t) temp64.val, buff_size[i],
1181 PCI_DMA_BIDIRECTIONAL);
1183 /* Copy the error information out */
1184 ioc->error_info = *(c->err_info);
1185 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1186 cmd_free(host, c, 0);
1187 status = -EFAULT;
1188 goto cleanup1;
1190 if (ioc->Request.Type.Direction == XFER_READ) {
1191 /* Copy the data out of the buffer we created */
1192 BYTE __user *ptr = ioc->buf;
1193 for (i = 0; i < sg_used; i++) {
1194 if (copy_to_user
1195 (ptr, buff[i], buff_size[i])) {
1196 cmd_free(host, c, 0);
1197 status = -EFAULT;
1198 goto cleanup1;
1200 ptr += buff_size[i];
1203 cmd_free(host, c, 0);
1204 status = 0;
1205 cleanup1:
1206 if (buff) {
1207 for (i = 0; i < sg_used; i++)
1208 kfree(buff[i]);
1209 kfree(buff);
1211 kfree(buff_size);
1212 kfree(ioc);
1213 return status;
1216 /* scsi_cmd_ioctl handles these, below, though some are not */
1217 /* very meaningful for cciss. SG_IO is the main one people want. */
1219 case SG_GET_VERSION_NUM:
1220 case SG_SET_TIMEOUT:
1221 case SG_GET_TIMEOUT:
1222 case SG_GET_RESERVED_SIZE:
1223 case SG_SET_RESERVED_SIZE:
1224 case SG_EMULATED_HOST:
1225 case SG_IO:
1226 case SCSI_IOCTL_SEND_COMMAND:
1227 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1229 /* scsi_cmd_ioctl would normally handle these, below, but */
1230 /* they aren't a good fit for cciss, as CD-ROMs are */
1231 /* not supported, and we don't have any bus/target/lun */
1232 /* which we present to the kernel. */
1234 case CDROM_SEND_PACKET:
1235 case CDROMCLOSETRAY:
1236 case CDROMEJECT:
1237 case SCSI_IOCTL_GET_IDLUN:
1238 case SCSI_IOCTL_GET_BUS_NUMBER:
1239 default:
1240 return -ENOTTY;
1244 static void cciss_check_queues(ctlr_info_t *h)
1246 int start_queue = h->next_to_run;
1247 int i;
1249 /* check to see if we have maxed out the number of commands that can
1250 * be placed on the queue. If so then exit. We do this check here
1251 * in case the interrupt we serviced was from an ioctl and did not
1252 * free any new commands.
1254 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1255 return;
1257 /* We have room on the queue for more commands. Now we need to queue
1258 * them up. We will also keep track of the next queue to run so
1259 * that every queue gets a chance to be started first.
1261 for (i = 0; i < h->highest_lun + 1; i++) {
1262 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1263 /* make sure the disk has been added and the drive is real
1264 * because this can be called from the middle of init_one.
1266 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1267 continue;
1268 blk_start_queue(h->gendisk[curr_queue]->queue);
1270 /* check to see if we have maxed out the number of commands
1271 * that can be placed on the queue.
1273 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1274 if (curr_queue == start_queue) {
1275 h->next_to_run =
1276 (start_queue + 1) % (h->highest_lun + 1);
1277 break;
1278 } else {
1279 h->next_to_run = curr_queue;
1280 break;
1286 static void cciss_softirq_done(struct request *rq)
1288 CommandList_struct *cmd = rq->completion_data;
1289 ctlr_info_t *h = hba[cmd->ctlr];
1290 unsigned int nr_bytes;
1291 unsigned long flags;
1292 u64bit temp64;
1293 int i, ddir;
1295 if (cmd->Request.Type.Direction == XFER_READ)
1296 ddir = PCI_DMA_FROMDEVICE;
1297 else
1298 ddir = PCI_DMA_TODEVICE;
1300 /* command did not need to be retried */
1301 /* unmap the DMA mapping for all the scatter gather elements */
1302 for (i = 0; i < cmd->Header.SGList; i++) {
1303 temp64.val32.lower = cmd->SG[i].Addr.lower;
1304 temp64.val32.upper = cmd->SG[i].Addr.upper;
1305 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1308 #ifdef CCISS_DEBUG
1309 printk("Done with %p\n", rq);
1310 #endif /* CCISS_DEBUG */
1313 * Store the full size and set the residual count for pc requests
1315 nr_bytes = blk_rq_bytes(rq);
1316 if (blk_pc_request(rq))
1317 rq->data_len = cmd->err_info->ResidualCnt;
1319 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes))
1320 BUG();
1322 spin_lock_irqsave(&h->lock, flags);
1323 cmd_free(h, cmd, 1);
1324 cciss_check_queues(h);
1325 spin_unlock_irqrestore(&h->lock, flags);
1328 /* This function gets the serial number of a logical drive via
1329 * inquiry page 0x83. Serial no. is 16 bytes. If the serial
1330 * number cannot be had, for whatever reason, 16 bytes of 0xff
1331 * are returned instead.
1333 static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1334 unsigned char *serial_no, int buflen)
1336 #define PAGE_83_INQ_BYTES 64
1337 int rc;
1338 unsigned char *buf;
1340 if (buflen > 16)
1341 buflen = 16;
1342 memset(serial_no, 0xff, buflen);
1343 buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
1344 if (!buf)
1345 return;
1346 memset(serial_no, 0, buflen);
1347 if (withirq)
1348 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
1349 PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD);
1350 else
1351 rc = sendcmd(CISS_INQUIRY, ctlr, buf,
1352 PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD);
1353 if (rc == IO_OK)
1354 memcpy(serial_no, &buf[8], buflen);
1355 kfree(buf);
1356 return;
1359 static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1360 int drv_index)
1362 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1363 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1364 disk->major = h->major;
1365 disk->first_minor = drv_index << NWD_SHIFT;
1366 disk->fops = &cciss_fops;
1367 disk->private_data = &h->drv[drv_index];
1368 disk->driverfs_dev = &h->pdev->dev;
1370 /* Set up queue information */
1371 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1373 /* This is a hardware imposed limit. */
1374 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1376 /* This is a limit in the driver and could be eliminated. */
1377 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1379 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1381 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1383 disk->queue->queuedata = h;
1385 blk_queue_hardsect_size(disk->queue,
1386 h->drv[drv_index].block_size);
1388 /* Make sure all queue data is written out before */
1389 /* setting h->drv[drv_index].queue, as setting this */
1390 /* allows the interrupt handler to start the queue */
1391 wmb();
1392 h->drv[drv_index].queue = disk->queue;
1393 add_disk(disk);
1396 /* This function will check the usage_count of the drive to be updated/added.
1397 * If the usage_count is zero and it is a heretofore unknown drive, or,
1398 * the drive's capacity, geometry, or serial number has changed,
1399 * then the drive information will be updated and the disk will be
1400 * re-registered with the kernel. If these conditions don't hold,
1401 * then it will be left alone for the next reboot. The exception to this
1402 * is disk 0 which will always be left registered with the kernel since it
1403 * is also the controller node. Any changes to disk 0 will show up on
1404 * the next reboot.
1406 static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1408 ctlr_info_t *h = hba[ctlr];
1409 struct gendisk *disk;
1410 InquiryData_struct *inq_buff = NULL;
1411 unsigned int block_size;
1412 sector_t total_size;
1413 unsigned long flags = 0;
1414 int ret = 0;
1415 drive_info_struct *drvinfo;
1416 int was_only_controller_node;
1418 /* Get information about the disk and modify the driver structure */
1419 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1420 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL);
1421 if (inq_buff == NULL || drvinfo == NULL)
1422 goto mem_msg;
1424 /* See if we're trying to update the "controller node"
1425 * this will happen the when the first logical drive gets
1426 * created by ACU.
1428 was_only_controller_node = (drv_index == 0 &&
1429 h->drv[0].raid_level == -1);
1431 /* testing to see if 16-byte CDBs are already being used */
1432 if (h->cciss_read == CCISS_READ_16) {
1433 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1434 &total_size, &block_size);
1436 } else {
1437 cciss_read_capacity(ctlr, drv_index, 1,
1438 &total_size, &block_size);
1440 /* if read_capacity returns all F's this volume is >2TB */
1441 /* in size so we switch to 16-byte CDB's for all */
1442 /* read/write ops */
1443 if (total_size == 0xFFFFFFFFULL) {
1444 cciss_read_capacity_16(ctlr, drv_index, 1,
1445 &total_size, &block_size);
1446 h->cciss_read = CCISS_READ_16;
1447 h->cciss_write = CCISS_WRITE_16;
1448 } else {
1449 h->cciss_read = CCISS_READ_10;
1450 h->cciss_write = CCISS_WRITE_10;
1454 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1455 inq_buff, drvinfo);
1456 drvinfo->block_size = block_size;
1457 drvinfo->nr_blocks = total_size + 1;
1459 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1460 sizeof(drvinfo->serial_no));
1462 /* Is it the same disk we already know, and nothing's changed? */
1463 if (h->drv[drv_index].raid_level != -1 &&
1464 ((memcmp(drvinfo->serial_no,
1465 h->drv[drv_index].serial_no, 16) == 0) &&
1466 drvinfo->block_size == h->drv[drv_index].block_size &&
1467 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks &&
1468 drvinfo->heads == h->drv[drv_index].heads &&
1469 drvinfo->sectors == h->drv[drv_index].sectors &&
1470 drvinfo->cylinders == h->drv[drv_index].cylinders))
1471 /* The disk is unchanged, nothing to update */
1472 goto freeret;
1474 /* If we get here it's not the same disk, or something's changed,
1475 * so we need to * deregister it, and re-register it, if it's not
1476 * in use.
1477 * If the disk already exists then deregister it before proceeding
1478 * (unless it's the first disk (for the controller node).
1480 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) {
1481 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1482 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1483 h->drv[drv_index].busy_configuring = 1;
1484 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1486 /* deregister_disk sets h->drv[drv_index].queue = NULL
1487 * which keeps the interrupt handler from starting
1488 * the queue.
1490 ret = deregister_disk(h, drv_index, 0);
1491 h->drv[drv_index].busy_configuring = 0;
1494 /* If the disk is in use return */
1495 if (ret)
1496 goto freeret;
1498 /* Save the new information from cciss_geometry_inquiry
1499 * and serial number inquiry.
1501 h->drv[drv_index].block_size = drvinfo->block_size;
1502 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks;
1503 h->drv[drv_index].heads = drvinfo->heads;
1504 h->drv[drv_index].sectors = drvinfo->sectors;
1505 h->drv[drv_index].cylinders = drvinfo->cylinders;
1506 h->drv[drv_index].raid_level = drvinfo->raid_level;
1507 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
1509 ++h->num_luns;
1510 disk = h->gendisk[drv_index];
1511 set_capacity(disk, h->drv[drv_index].nr_blocks);
1513 /* If it's not disk 0 (drv_index != 0)
1514 * or if it was disk 0, but there was previously
1515 * no actual corresponding configured logical drive
1516 * (raid_leve == -1) then we want to update the
1517 * logical drive's information.
1519 if (drv_index || first_time)
1520 cciss_add_disk(h, disk, drv_index);
1522 freeret:
1523 kfree(inq_buff);
1524 kfree(drvinfo);
1525 return;
1526 mem_msg:
1527 printk(KERN_ERR "cciss: out of memory\n");
1528 goto freeret;
1531 /* This function will find the first index of the controllers drive array
1532 * that has a -1 for the raid_level and will return that index. This is
1533 * where new drives will be added. If the index to be returned is greater
1534 * than the highest_lun index for the controller then highest_lun is set
1535 * to this new index. If there are no available indexes then -1 is returned.
1536 * "controller_node" is used to know if this is a real logical drive, or just
1537 * the controller node, which determines if this counts towards highest_lun.
1539 static int cciss_find_free_drive_index(int ctlr, int controller_node)
1541 int i;
1543 for (i = 0; i < CISS_MAX_LUN; i++) {
1544 if (hba[ctlr]->drv[i].raid_level == -1) {
1545 if (i > hba[ctlr]->highest_lun)
1546 if (!controller_node)
1547 hba[ctlr]->highest_lun = i;
1548 return i;
1551 return -1;
1554 /* cciss_add_gendisk finds a free hba[]->drv structure
1555 * and allocates a gendisk if needed, and sets the lunid
1556 * in the drvinfo structure. It returns the index into
1557 * the ->drv[] array, or -1 if none are free.
1558 * is_controller_node indicates whether highest_lun should
1559 * count this disk, or if it's only being added to provide
1560 * a means to talk to the controller in case no logical
1561 * drives have yet been configured.
1563 static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1565 int drv_index;
1567 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node);
1568 if (drv_index == -1)
1569 return -1;
1570 /*Check if the gendisk needs to be allocated */
1571 if (!h->gendisk[drv_index]) {
1572 h->gendisk[drv_index] =
1573 alloc_disk(1 << NWD_SHIFT);
1574 if (!h->gendisk[drv_index]) {
1575 printk(KERN_ERR "cciss%d: could not "
1576 "allocate a new disk %d\n",
1577 h->ctlr, drv_index);
1578 return -1;
1581 h->drv[drv_index].LunID = lunid;
1583 /* Don't need to mark this busy because nobody */
1584 /* else knows about this disk yet to contend */
1585 /* for access to it. */
1586 h->drv[drv_index].busy_configuring = 0;
1587 wmb();
1588 return drv_index;
1591 /* This is for the special case of a controller which
1592 * has no logical drives. In this case, we still need
1593 * to register a disk so the controller can be accessed
1594 * by the Array Config Utility.
1596 static void cciss_add_controller_node(ctlr_info_t *h)
1598 struct gendisk *disk;
1599 int drv_index;
1601 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1602 return;
1604 drv_index = cciss_add_gendisk(h, 0, 1);
1605 if (drv_index == -1) {
1606 printk(KERN_WARNING "cciss%d: could not "
1607 "add disk 0.\n", h->ctlr);
1608 return;
1610 h->drv[drv_index].block_size = 512;
1611 h->drv[drv_index].nr_blocks = 0;
1612 h->drv[drv_index].heads = 0;
1613 h->drv[drv_index].sectors = 0;
1614 h->drv[drv_index].cylinders = 0;
1615 h->drv[drv_index].raid_level = -1;
1616 memset(h->drv[drv_index].serial_no, 0, 16);
1617 disk = h->gendisk[drv_index];
1618 cciss_add_disk(h, disk, drv_index);
1621 /* This function will add and remove logical drives from the Logical
1622 * drive array of the controller and maintain persistency of ordering
1623 * so that mount points are preserved until the next reboot. This allows
1624 * for the removal of logical drives in the middle of the drive array
1625 * without a re-ordering of those drives.
1626 * INPUT
1627 * h = The controller to perform the operations on
1629 static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1631 int ctlr = h->ctlr;
1632 int num_luns;
1633 ReportLunData_struct *ld_buff = NULL;
1634 int return_code;
1635 int listlength = 0;
1636 int i;
1637 int drv_found;
1638 int drv_index = 0;
1639 __u32 lunid = 0;
1640 unsigned long flags;
1642 if (!capable(CAP_SYS_RAWIO))
1643 return -EPERM;
1645 /* Set busy_configuring flag for this operation */
1646 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1647 if (h->busy_configuring) {
1648 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1649 return -EBUSY;
1651 h->busy_configuring = 1;
1652 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1654 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1655 if (ld_buff == NULL)
1656 goto mem_msg;
1658 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1659 sizeof(ReportLunData_struct), 0,
1660 0, 0, TYPE_CMD);
1662 if (return_code == IO_OK)
1663 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1664 else { /* reading number of logical volumes failed */
1665 printk(KERN_WARNING "cciss: report logical volume"
1666 " command failed\n");
1667 listlength = 0;
1668 goto freeret;
1671 num_luns = listlength / 8; /* 8 bytes per entry */
1672 if (num_luns > CISS_MAX_LUN) {
1673 num_luns = CISS_MAX_LUN;
1674 printk(KERN_WARNING "cciss: more luns configured"
1675 " on controller than can be handled by"
1676 " this driver.\n");
1679 if (num_luns == 0)
1680 cciss_add_controller_node(h);
1682 /* Compare controller drive array to driver's drive array
1683 * to see if any drives are missing on the controller due
1684 * to action of Array Config Utility (user deletes drive)
1685 * and deregister logical drives which have disappeared.
1687 for (i = 0; i <= h->highest_lun; i++) {
1688 int j;
1689 drv_found = 0;
1691 /* skip holes in the array from already deleted drives */
1692 if (h->drv[i].raid_level == -1)
1693 continue;
1695 for (j = 0; j < num_luns; j++) {
1696 memcpy(&lunid, &ld_buff->LUN[j][0], 4);
1697 lunid = le32_to_cpu(lunid);
1698 if (h->drv[i].LunID == lunid) {
1699 drv_found = 1;
1700 break;
1703 if (!drv_found) {
1704 /* Deregister it from the OS, it's gone. */
1705 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1706 h->drv[i].busy_configuring = 1;
1707 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1708 return_code = deregister_disk(h, i, 1);
1709 h->drv[i].busy_configuring = 0;
1713 /* Compare controller drive array to driver's drive array.
1714 * Check for updates in the drive information and any new drives
1715 * on the controller due to ACU adding logical drives, or changing
1716 * a logical drive's size, etc. Reregister any new/changed drives
1718 for (i = 0; i < num_luns; i++) {
1719 int j;
1721 drv_found = 0;
1723 memcpy(&lunid, &ld_buff->LUN[i][0], 4);
1724 lunid = le32_to_cpu(lunid);
1726 /* Find if the LUN is already in the drive array
1727 * of the driver. If so then update its info
1728 * if not in use. If it does not exist then find
1729 * the first free index and add it.
1731 for (j = 0; j <= h->highest_lun; j++) {
1732 if (h->drv[j].raid_level != -1 &&
1733 h->drv[j].LunID == lunid) {
1734 drv_index = j;
1735 drv_found = 1;
1736 break;
1740 /* check if the drive was found already in the array */
1741 if (!drv_found) {
1742 drv_index = cciss_add_gendisk(h, lunid, 0);
1743 if (drv_index == -1)
1744 goto freeret;
1746 cciss_update_drive_info(ctlr, drv_index, first_time);
1747 } /* end for */
1749 freeret:
1750 kfree(ld_buff);
1751 h->busy_configuring = 0;
1752 /* We return -1 here to tell the ACU that we have registered/updated
1753 * all of the drives that we can and to keep it from calling us
1754 * additional times.
1756 return -1;
1757 mem_msg:
1758 printk(KERN_ERR "cciss: out of memory\n");
1759 h->busy_configuring = 0;
1760 goto freeret;
1763 /* This function will deregister the disk and it's queue from the
1764 * kernel. It must be called with the controller lock held and the
1765 * drv structures busy_configuring flag set. It's parameters are:
1767 * disk = This is the disk to be deregistered
1768 * drv = This is the drive_info_struct associated with the disk to be
1769 * deregistered. It contains information about the disk used
1770 * by the driver.
1771 * clear_all = This flag determines whether or not the disk information
1772 * is going to be completely cleared out and the highest_lun
1773 * reset. Sometimes we want to clear out information about
1774 * the disk in preparation for re-adding it. In this case
1775 * the highest_lun should be left unchanged and the LunID
1776 * should not be cleared.
1778 static int deregister_disk(ctlr_info_t *h, int drv_index,
1779 int clear_all)
1781 int i;
1782 struct gendisk *disk;
1783 drive_info_struct *drv;
1785 if (!capable(CAP_SYS_RAWIO))
1786 return -EPERM;
1788 drv = &h->drv[drv_index];
1789 disk = h->gendisk[drv_index];
1791 /* make sure logical volume is NOT is use */
1792 if (clear_all || (h->gendisk[0] == disk)) {
1793 if (drv->usage_count > 1)
1794 return -EBUSY;
1795 } else if (drv->usage_count > 0)
1796 return -EBUSY;
1798 /* invalidate the devices and deregister the disk. If it is disk
1799 * zero do not deregister it but just zero out it's values. This
1800 * allows us to delete disk zero but keep the controller registered.
1802 if (h->gendisk[0] != disk) {
1803 struct request_queue *q = disk->queue;
1804 if (disk->flags & GENHD_FL_UP)
1805 del_gendisk(disk);
1806 if (q) {
1807 blk_cleanup_queue(q);
1808 /* Set drv->queue to NULL so that we do not try
1809 * to call blk_start_queue on this queue in the
1810 * interrupt handler
1812 drv->queue = NULL;
1814 /* If clear_all is set then we are deleting the logical
1815 * drive, not just refreshing its info. For drives
1816 * other than disk 0 we will call put_disk. We do not
1817 * do this for disk 0 as we need it to be able to
1818 * configure the controller.
1820 if (clear_all){
1821 /* This isn't pretty, but we need to find the
1822 * disk in our array and NULL our the pointer.
1823 * This is so that we will call alloc_disk if
1824 * this index is used again later.
1826 for (i=0; i < CISS_MAX_LUN; i++){
1827 if (h->gendisk[i] == disk) {
1828 h->gendisk[i] = NULL;
1829 break;
1832 put_disk(disk);
1834 } else {
1835 set_capacity(disk, 0);
1838 --h->num_luns;
1839 /* zero out the disk size info */
1840 drv->nr_blocks = 0;
1841 drv->block_size = 0;
1842 drv->heads = 0;
1843 drv->sectors = 0;
1844 drv->cylinders = 0;
1845 drv->raid_level = -1; /* This can be used as a flag variable to
1846 * indicate that this element of the drive
1847 * array is free.
1850 if (clear_all) {
1851 /* check to see if it was the last disk */
1852 if (drv == h->drv + h->highest_lun) {
1853 /* if so, find the new hightest lun */
1854 int i, newhighest = -1;
1855 for (i = 0; i <= h->highest_lun; i++) {
1856 /* if the disk has size > 0, it is available */
1857 if (h->drv[i].heads)
1858 newhighest = i;
1860 h->highest_lun = newhighest;
1863 drv->LunID = 0;
1865 return 0;
1868 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1869 1: address logical volume log_unit,
1870 2: periph device address is scsi3addr */
1871 unsigned int log_unit, __u8 page_code,
1872 unsigned char *scsi3addr, int cmd_type)
1874 ctlr_info_t *h = hba[ctlr];
1875 u64bit buff_dma_handle;
1876 int status = IO_OK;
1878 c->cmd_type = CMD_IOCTL_PEND;
1879 c->Header.ReplyQueue = 0;
1880 if (buff != NULL) {
1881 c->Header.SGList = 1;
1882 c->Header.SGTotal = 1;
1883 } else {
1884 c->Header.SGList = 0;
1885 c->Header.SGTotal = 0;
1887 c->Header.Tag.lower = c->busaddr;
1889 c->Request.Type.Type = cmd_type;
1890 if (cmd_type == TYPE_CMD) {
1891 switch (cmd) {
1892 case CISS_INQUIRY:
1893 /* If the logical unit number is 0 then, this is going
1894 to controller so It's a physical command
1895 mode = 0 target = 0. So we have nothing to write.
1896 otherwise, if use_unit_num == 1,
1897 mode = 1(volume set addressing) target = LUNID
1898 otherwise, if use_unit_num == 2,
1899 mode = 0(periph dev addr) target = scsi3addr */
1900 if (use_unit_num == 1) {
1901 c->Header.LUN.LogDev.VolId =
1902 h->drv[log_unit].LunID;
1903 c->Header.LUN.LogDev.Mode = 1;
1904 } else if (use_unit_num == 2) {
1905 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1907 c->Header.LUN.LogDev.Mode = 0;
1909 /* are we trying to read a vital product page */
1910 if (page_code != 0) {
1911 c->Request.CDB[1] = 0x01;
1912 c->Request.CDB[2] = page_code;
1914 c->Request.CDBLen = 6;
1915 c->Request.Type.Attribute = ATTR_SIMPLE;
1916 c->Request.Type.Direction = XFER_READ;
1917 c->Request.Timeout = 0;
1918 c->Request.CDB[0] = CISS_INQUIRY;
1919 c->Request.CDB[4] = size & 0xFF;
1920 break;
1921 case CISS_REPORT_LOG:
1922 case CISS_REPORT_PHYS:
1923 /* Talking to controller so It's a physical command
1924 mode = 00 target = 0. Nothing to write.
1926 c->Request.CDBLen = 12;
1927 c->Request.Type.Attribute = ATTR_SIMPLE;
1928 c->Request.Type.Direction = XFER_READ;
1929 c->Request.Timeout = 0;
1930 c->Request.CDB[0] = cmd;
1931 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1932 c->Request.CDB[7] = (size >> 16) & 0xFF;
1933 c->Request.CDB[8] = (size >> 8) & 0xFF;
1934 c->Request.CDB[9] = size & 0xFF;
1935 break;
1937 case CCISS_READ_CAPACITY:
1938 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1939 c->Header.LUN.LogDev.Mode = 1;
1940 c->Request.CDBLen = 10;
1941 c->Request.Type.Attribute = ATTR_SIMPLE;
1942 c->Request.Type.Direction = XFER_READ;
1943 c->Request.Timeout = 0;
1944 c->Request.CDB[0] = cmd;
1945 break;
1946 case CCISS_READ_CAPACITY_16:
1947 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1948 c->Header.LUN.LogDev.Mode = 1;
1949 c->Request.CDBLen = 16;
1950 c->Request.Type.Attribute = ATTR_SIMPLE;
1951 c->Request.Type.Direction = XFER_READ;
1952 c->Request.Timeout = 0;
1953 c->Request.CDB[0] = cmd;
1954 c->Request.CDB[1] = 0x10;
1955 c->Request.CDB[10] = (size >> 24) & 0xFF;
1956 c->Request.CDB[11] = (size >> 16) & 0xFF;
1957 c->Request.CDB[12] = (size >> 8) & 0xFF;
1958 c->Request.CDB[13] = size & 0xFF;
1959 c->Request.Timeout = 0;
1960 c->Request.CDB[0] = cmd;
1961 break;
1962 case CCISS_CACHE_FLUSH:
1963 c->Request.CDBLen = 12;
1964 c->Request.Type.Attribute = ATTR_SIMPLE;
1965 c->Request.Type.Direction = XFER_WRITE;
1966 c->Request.Timeout = 0;
1967 c->Request.CDB[0] = BMIC_WRITE;
1968 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1969 break;
1970 default:
1971 printk(KERN_WARNING
1972 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1973 return IO_ERROR;
1975 } else if (cmd_type == TYPE_MSG) {
1976 switch (cmd) {
1977 case 0: /* ABORT message */
1978 c->Request.CDBLen = 12;
1979 c->Request.Type.Attribute = ATTR_SIMPLE;
1980 c->Request.Type.Direction = XFER_WRITE;
1981 c->Request.Timeout = 0;
1982 c->Request.CDB[0] = cmd; /* abort */
1983 c->Request.CDB[1] = 0; /* abort a command */
1984 /* buff contains the tag of the command to abort */
1985 memcpy(&c->Request.CDB[4], buff, 8);
1986 break;
1987 case 1: /* RESET message */
1988 c->Request.CDBLen = 12;
1989 c->Request.Type.Attribute = ATTR_SIMPLE;
1990 c->Request.Type.Direction = XFER_WRITE;
1991 c->Request.Timeout = 0;
1992 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1993 c->Request.CDB[0] = cmd; /* reset */
1994 c->Request.CDB[1] = 0x04; /* reset a LUN */
1995 break;
1996 case 3: /* No-Op message */
1997 c->Request.CDBLen = 1;
1998 c->Request.Type.Attribute = ATTR_SIMPLE;
1999 c->Request.Type.Direction = XFER_WRITE;
2000 c->Request.Timeout = 0;
2001 c->Request.CDB[0] = cmd;
2002 break;
2003 default:
2004 printk(KERN_WARNING
2005 "cciss%d: unknown message type %d\n", ctlr, cmd);
2006 return IO_ERROR;
2008 } else {
2009 printk(KERN_WARNING
2010 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
2011 return IO_ERROR;
2013 /* Fill in the scatter gather information */
2014 if (size > 0) {
2015 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
2016 buff, size,
2017 PCI_DMA_BIDIRECTIONAL);
2018 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
2019 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
2020 c->SG[0].Len = size;
2021 c->SG[0].Ext = 0; /* we are not chaining */
2023 return status;
2026 static int sendcmd_withirq(__u8 cmd,
2027 int ctlr,
2028 void *buff,
2029 size_t size,
2030 unsigned int use_unit_num,
2031 unsigned int log_unit, __u8 page_code, int cmd_type)
2033 ctlr_info_t *h = hba[ctlr];
2034 CommandList_struct *c;
2035 u64bit buff_dma_handle;
2036 unsigned long flags;
2037 int return_status;
2038 DECLARE_COMPLETION_ONSTACK(wait);
2040 if ((c = cmd_alloc(h, 0)) == NULL)
2041 return -ENOMEM;
2042 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2043 log_unit, page_code, NULL, cmd_type);
2044 if (return_status != IO_OK) {
2045 cmd_free(h, c, 0);
2046 return return_status;
2048 resend_cmd2:
2049 c->waiting = &wait;
2051 /* Put the request on the tail of the queue and send it */
2052 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
2053 addQ(&h->reqQ, c);
2054 h->Qdepth++;
2055 start_io(h);
2056 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
2058 wait_for_completion(&wait);
2060 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
2061 switch (c->err_info->CommandStatus) {
2062 case CMD_TARGET_STATUS:
2063 printk(KERN_WARNING "cciss: cmd %p has "
2064 " completed with errors\n", c);
2065 if (c->err_info->ScsiStatus) {
2066 printk(KERN_WARNING "cciss: cmd %p "
2067 "has SCSI Status = %x\n",
2068 c, c->err_info->ScsiStatus);
2071 break;
2072 case CMD_DATA_UNDERRUN:
2073 case CMD_DATA_OVERRUN:
2074 /* expected for inquire and report lun commands */
2075 break;
2076 case CMD_INVALID:
2077 printk(KERN_WARNING "cciss: Cmd %p is "
2078 "reported invalid\n", c);
2079 return_status = IO_ERROR;
2080 break;
2081 case CMD_PROTOCOL_ERR:
2082 printk(KERN_WARNING "cciss: cmd %p has "
2083 "protocol error \n", c);
2084 return_status = IO_ERROR;
2085 break;
2086 case CMD_HARDWARE_ERR:
2087 printk(KERN_WARNING "cciss: cmd %p had "
2088 " hardware error\n", c);
2089 return_status = IO_ERROR;
2090 break;
2091 case CMD_CONNECTION_LOST:
2092 printk(KERN_WARNING "cciss: cmd %p had "
2093 "connection lost\n", c);
2094 return_status = IO_ERROR;
2095 break;
2096 case CMD_ABORTED:
2097 printk(KERN_WARNING "cciss: cmd %p was "
2098 "aborted\n", c);
2099 return_status = IO_ERROR;
2100 break;
2101 case CMD_ABORT_FAILED:
2102 printk(KERN_WARNING "cciss: cmd %p reports "
2103 "abort failed\n", c);
2104 return_status = IO_ERROR;
2105 break;
2106 case CMD_UNSOLICITED_ABORT:
2107 printk(KERN_WARNING
2108 "cciss%d: unsolicited abort %p\n", ctlr, c);
2109 if (c->retry_count < MAX_CMD_RETRIES) {
2110 printk(KERN_WARNING
2111 "cciss%d: retrying %p\n", ctlr, c);
2112 c->retry_count++;
2113 /* erase the old error information */
2114 memset(c->err_info, 0,
2115 sizeof(ErrorInfo_struct));
2116 return_status = IO_OK;
2117 INIT_COMPLETION(wait);
2118 goto resend_cmd2;
2120 return_status = IO_ERROR;
2121 break;
2122 default:
2123 printk(KERN_WARNING "cciss: cmd %p returned "
2124 "unknown status %x\n", c,
2125 c->err_info->CommandStatus);
2126 return_status = IO_ERROR;
2129 /* unlock the buffers from DMA */
2130 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2131 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2132 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2133 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2134 cmd_free(h, c, 0);
2135 return return_status;
2138 static void cciss_geometry_inquiry(int ctlr, int logvol,
2139 int withirq, sector_t total_size,
2140 unsigned int block_size,
2141 InquiryData_struct *inq_buff,
2142 drive_info_struct *drv)
2144 int return_code;
2145 unsigned long t;
2147 memset(inq_buff, 0, sizeof(InquiryData_struct));
2148 if (withirq)
2149 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
2150 inq_buff, sizeof(*inq_buff), 1,
2151 logvol, 0xC1, TYPE_CMD);
2152 else
2153 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
2154 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
2155 TYPE_CMD);
2156 if (return_code == IO_OK) {
2157 if (inq_buff->data_byte[8] == 0xFF) {
2158 printk(KERN_WARNING
2159 "cciss: reading geometry failed, volume "
2160 "does not support reading geometry\n");
2161 drv->heads = 255;
2162 drv->sectors = 32; // Sectors per track
2163 drv->cylinders = total_size + 1;
2164 drv->raid_level = RAID_UNKNOWN;
2165 } else {
2166 drv->heads = inq_buff->data_byte[6];
2167 drv->sectors = inq_buff->data_byte[7];
2168 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
2169 drv->cylinders += inq_buff->data_byte[5];
2170 drv->raid_level = inq_buff->data_byte[8];
2172 drv->block_size = block_size;
2173 drv->nr_blocks = total_size + 1;
2174 t = drv->heads * drv->sectors;
2175 if (t > 1) {
2176 sector_t real_size = total_size + 1;
2177 unsigned long rem = sector_div(real_size, t);
2178 if (rem)
2179 real_size++;
2180 drv->cylinders = real_size;
2182 } else { /* Get geometry failed */
2183 printk(KERN_WARNING "cciss: reading geometry failed\n");
2185 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2186 drv->heads, drv->sectors, drv->cylinders);
2189 static void
2190 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2191 unsigned int *block_size)
2193 ReadCapdata_struct *buf;
2194 int return_code;
2196 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2197 if (!buf) {
2198 printk(KERN_WARNING "cciss: out of memory\n");
2199 return;
2202 if (withirq)
2203 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2204 ctlr, buf, sizeof(ReadCapdata_struct),
2205 1, logvol, 0, TYPE_CMD);
2206 else
2207 return_code = sendcmd(CCISS_READ_CAPACITY,
2208 ctlr, buf, sizeof(ReadCapdata_struct),
2209 1, logvol, 0, NULL, TYPE_CMD);
2210 if (return_code == IO_OK) {
2211 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2212 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2213 } else { /* read capacity command failed */
2214 printk(KERN_WARNING "cciss: read capacity failed\n");
2215 *total_size = 0;
2216 *block_size = BLOCK_SIZE;
2218 if (*total_size != 0)
2219 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2220 (unsigned long long)*total_size+1, *block_size);
2221 kfree(buf);
2224 static void
2225 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2227 ReadCapdata_struct_16 *buf;
2228 int return_code;
2230 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2231 if (!buf) {
2232 printk(KERN_WARNING "cciss: out of memory\n");
2233 return;
2236 if (withirq) {
2237 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2238 ctlr, buf, sizeof(ReadCapdata_struct_16),
2239 1, logvol, 0, TYPE_CMD);
2241 else {
2242 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2243 ctlr, buf, sizeof(ReadCapdata_struct_16),
2244 1, logvol, 0, NULL, TYPE_CMD);
2246 if (return_code == IO_OK) {
2247 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2248 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2249 } else { /* read capacity command failed */
2250 printk(KERN_WARNING "cciss: read capacity failed\n");
2251 *total_size = 0;
2252 *block_size = BLOCK_SIZE;
2254 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2255 (unsigned long long)*total_size+1, *block_size);
2256 kfree(buf);
2259 static int cciss_revalidate(struct gendisk *disk)
2261 ctlr_info_t *h = get_host(disk);
2262 drive_info_struct *drv = get_drv(disk);
2263 int logvol;
2264 int FOUND = 0;
2265 unsigned int block_size;
2266 sector_t total_size;
2267 InquiryData_struct *inq_buff = NULL;
2269 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2270 if (h->drv[logvol].LunID == drv->LunID) {
2271 FOUND = 1;
2272 break;
2276 if (!FOUND)
2277 return 1;
2279 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2280 if (inq_buff == NULL) {
2281 printk(KERN_WARNING "cciss: out of memory\n");
2282 return 1;
2284 if (h->cciss_read == CCISS_READ_10) {
2285 cciss_read_capacity(h->ctlr, logvol, 1,
2286 &total_size, &block_size);
2287 } else {
2288 cciss_read_capacity_16(h->ctlr, logvol, 1,
2289 &total_size, &block_size);
2291 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2292 inq_buff, drv);
2294 blk_queue_hardsect_size(drv->queue, drv->block_size);
2295 set_capacity(disk, drv->nr_blocks);
2297 kfree(inq_buff);
2298 return 0;
2302 * Wait polling for a command to complete.
2303 * The memory mapped FIFO is polled for the completion.
2304 * Used only at init time, interrupts from the HBA are disabled.
2306 static unsigned long pollcomplete(int ctlr)
2308 unsigned long done;
2309 int i;
2311 /* Wait (up to 20 seconds) for a command to complete */
2313 for (i = 20 * HZ; i > 0; i--) {
2314 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2315 if (done == FIFO_EMPTY)
2316 schedule_timeout_uninterruptible(1);
2317 else
2318 return done;
2320 /* Invalid address to tell caller we ran out of time */
2321 return 1;
2324 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2326 /* We get in here if sendcmd() is polling for completions
2327 and gets some command back that it wasn't expecting --
2328 something other than that which it just sent down.
2329 Ordinarily, that shouldn't happen, but it can happen when
2330 the scsi tape stuff gets into error handling mode, and
2331 starts using sendcmd() to try to abort commands and
2332 reset tape drives. In that case, sendcmd may pick up
2333 completions of commands that were sent to logical drives
2334 through the block i/o system, or cciss ioctls completing, etc.
2335 In that case, we need to save those completions for later
2336 processing by the interrupt handler.
2339 #ifdef CONFIG_CISS_SCSI_TAPE
2340 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2342 /* If it's not the scsi tape stuff doing error handling, (abort */
2343 /* or reset) then we don't expect anything weird. */
2344 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2345 #endif
2346 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2347 "Invalid command list address returned! (%lx)\n",
2348 ctlr, complete);
2349 /* not much we can do. */
2350 #ifdef CONFIG_CISS_SCSI_TAPE
2351 return 1;
2354 /* We've sent down an abort or reset, but something else
2355 has completed */
2356 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2357 /* Uh oh. No room to save it for later... */
2358 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2359 "reject list overflow, command lost!\n", ctlr);
2360 return 1;
2362 /* Save it for later */
2363 srl->complete[srl->ncompletions] = complete;
2364 srl->ncompletions++;
2365 #endif
2366 return 0;
2370 * Send a command to the controller, and wait for it to complete.
2371 * Only used at init time.
2373 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2374 1: address logical volume log_unit,
2375 2: periph device address is scsi3addr */
2376 unsigned int log_unit,
2377 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2379 CommandList_struct *c;
2380 int i;
2381 unsigned long complete;
2382 ctlr_info_t *info_p = hba[ctlr];
2383 u64bit buff_dma_handle;
2384 int status, done = 0;
2386 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2387 printk(KERN_WARNING "cciss: unable to get memory");
2388 return IO_ERROR;
2390 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2391 log_unit, page_code, scsi3addr, cmd_type);
2392 if (status != IO_OK) {
2393 cmd_free(info_p, c, 1);
2394 return status;
2396 resend_cmd1:
2398 * Disable interrupt
2400 #ifdef CCISS_DEBUG
2401 printk(KERN_DEBUG "cciss: turning intr off\n");
2402 #endif /* CCISS_DEBUG */
2403 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2405 /* Make sure there is room in the command FIFO */
2406 /* Actually it should be completely empty at this time */
2407 /* unless we are in here doing error handling for the scsi */
2408 /* tape side of the driver. */
2409 for (i = 200000; i > 0; i--) {
2410 /* if fifo isn't full go */
2411 if (!(info_p->access.fifo_full(info_p))) {
2413 break;
2415 udelay(10);
2416 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2417 " waiting!\n", ctlr);
2420 * Send the cmd
2422 info_p->access.submit_command(info_p, c);
2423 done = 0;
2424 do {
2425 complete = pollcomplete(ctlr);
2427 #ifdef CCISS_DEBUG
2428 printk(KERN_DEBUG "cciss: command completed\n");
2429 #endif /* CCISS_DEBUG */
2431 if (complete == 1) {
2432 printk(KERN_WARNING
2433 "cciss cciss%d: SendCmd Timeout out, "
2434 "No command list address returned!\n", ctlr);
2435 status = IO_ERROR;
2436 done = 1;
2437 break;
2440 /* This will need to change for direct lookup completions */
2441 if ((complete & CISS_ERROR_BIT)
2442 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2443 /* if data overrun or underun on Report command
2444 ignore it
2446 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2447 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2448 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2449 ((c->err_info->CommandStatus ==
2450 CMD_DATA_OVERRUN) ||
2451 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2452 )) {
2453 complete = c->busaddr;
2454 } else {
2455 if (c->err_info->CommandStatus ==
2456 CMD_UNSOLICITED_ABORT) {
2457 printk(KERN_WARNING "cciss%d: "
2458 "unsolicited abort %p\n",
2459 ctlr, c);
2460 if (c->retry_count < MAX_CMD_RETRIES) {
2461 printk(KERN_WARNING
2462 "cciss%d: retrying %p\n",
2463 ctlr, c);
2464 c->retry_count++;
2465 /* erase the old error */
2466 /* information */
2467 memset(c->err_info, 0,
2468 sizeof
2469 (ErrorInfo_struct));
2470 goto resend_cmd1;
2471 } else {
2472 printk(KERN_WARNING
2473 "cciss%d: retried %p too "
2474 "many times\n", ctlr, c);
2475 status = IO_ERROR;
2476 goto cleanup1;
2478 } else if (c->err_info->CommandStatus ==
2479 CMD_UNABORTABLE) {
2480 printk(KERN_WARNING
2481 "cciss%d: command could not be aborted.\n",
2482 ctlr);
2483 status = IO_ERROR;
2484 goto cleanup1;
2486 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2487 " Error %x \n", ctlr,
2488 c->err_info->CommandStatus);
2489 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2490 " offensive info\n"
2491 " size %x\n num %x value %x\n",
2492 ctlr,
2493 c->err_info->MoreErrInfo.Invalid_Cmd.
2494 offense_size,
2495 c->err_info->MoreErrInfo.Invalid_Cmd.
2496 offense_num,
2497 c->err_info->MoreErrInfo.Invalid_Cmd.
2498 offense_value);
2499 status = IO_ERROR;
2500 goto cleanup1;
2503 /* This will need changing for direct lookup completions */
2504 if (complete != c->busaddr) {
2505 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2506 BUG(); /* we are pretty much hosed if we get here. */
2508 continue;
2509 } else
2510 done = 1;
2511 } while (!done);
2513 cleanup1:
2514 /* unlock the data buffer from DMA */
2515 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2516 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2517 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2518 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2519 #ifdef CONFIG_CISS_SCSI_TAPE
2520 /* if we saved some commands for later, process them now. */
2521 if (info_p->scsi_rejects.ncompletions > 0)
2522 do_cciss_intr(0, info_p);
2523 #endif
2524 cmd_free(info_p, c, 1);
2525 return status;
2529 * Map (physical) PCI mem into (virtual) kernel space
2531 static void __iomem *remap_pci_mem(ulong base, ulong size)
2533 ulong page_base = ((ulong) base) & PAGE_MASK;
2534 ulong page_offs = ((ulong) base) - page_base;
2535 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2537 return page_remapped ? (page_remapped + page_offs) : NULL;
2541 * Takes jobs of the Q and sends them to the hardware, then puts it on
2542 * the Q to wait for completion.
2544 static void start_io(ctlr_info_t *h)
2546 CommandList_struct *c;
2548 while (!hlist_empty(&h->reqQ)) {
2549 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
2550 /* can't do anything if fifo is full */
2551 if ((h->access.fifo_full(h))) {
2552 printk(KERN_WARNING "cciss: fifo full\n");
2553 break;
2556 /* Get the first entry from the Request Q */
2557 removeQ(c);
2558 h->Qdepth--;
2560 /* Tell the controller execute command */
2561 h->access.submit_command(h, c);
2563 /* Put job onto the completed Q */
2564 addQ(&h->cmpQ, c);
2568 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2569 /* Zeros out the error record and then resends the command back */
2570 /* to the controller */
2571 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2573 /* erase the old error information */
2574 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2576 /* add it to software queue and then send it to the controller */
2577 addQ(&h->reqQ, c);
2578 h->Qdepth++;
2579 if (h->Qdepth > h->maxQsinceinit)
2580 h->maxQsinceinit = h->Qdepth;
2582 start_io(h);
2585 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2586 unsigned int msg_byte, unsigned int host_byte,
2587 unsigned int driver_byte)
2589 /* inverse of macros in scsi.h */
2590 return (scsi_status_byte & 0xff) |
2591 ((msg_byte & 0xff) << 8) |
2592 ((host_byte & 0xff) << 16) |
2593 ((driver_byte & 0xff) << 24);
2596 static inline int evaluate_target_status(CommandList_struct *cmd)
2598 unsigned char sense_key;
2599 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2600 int error_value;
2602 /* If we get in here, it means we got "target status", that is, scsi status */
2603 status_byte = cmd->err_info->ScsiStatus;
2604 driver_byte = DRIVER_OK;
2605 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2607 if (blk_pc_request(cmd->rq))
2608 host_byte = DID_PASSTHROUGH;
2609 else
2610 host_byte = DID_OK;
2612 error_value = make_status_bytes(status_byte, msg_byte,
2613 host_byte, driver_byte);
2615 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2616 if (!blk_pc_request(cmd->rq))
2617 printk(KERN_WARNING "cciss: cmd %p "
2618 "has SCSI Status 0x%x\n",
2619 cmd, cmd->err_info->ScsiStatus);
2620 return error_value;
2623 /* check the sense key */
2624 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2625 /* no status or recovered error */
2626 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2627 error_value = 0;
2629 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2630 if (error_value != 0)
2631 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2632 " sense key = 0x%x\n", cmd, sense_key);
2633 return error_value;
2636 /* SG_IO or similar, copy sense data back */
2637 if (cmd->rq->sense) {
2638 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2639 cmd->rq->sense_len = cmd->err_info->SenseLen;
2640 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2641 cmd->rq->sense_len);
2642 } else
2643 cmd->rq->sense_len = 0;
2645 return error_value;
2648 /* checks the status of the job and calls complete buffers to mark all
2649 * buffers for the completed job. Note that this function does not need
2650 * to hold the hba/queue lock.
2652 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2653 int timeout)
2655 int retry_cmd = 0;
2656 struct request *rq = cmd->rq;
2658 rq->errors = 0;
2660 if (timeout)
2661 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2663 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2664 goto after_error_processing;
2666 switch (cmd->err_info->CommandStatus) {
2667 case CMD_TARGET_STATUS:
2668 rq->errors = evaluate_target_status(cmd);
2669 break;
2670 case CMD_DATA_UNDERRUN:
2671 if (blk_fs_request(cmd->rq)) {
2672 printk(KERN_WARNING "cciss: cmd %p has"
2673 " completed with data underrun "
2674 "reported\n", cmd);
2675 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2677 break;
2678 case CMD_DATA_OVERRUN:
2679 if (blk_fs_request(cmd->rq))
2680 printk(KERN_WARNING "cciss: cmd %p has"
2681 " completed with data overrun "
2682 "reported\n", cmd);
2683 break;
2684 case CMD_INVALID:
2685 printk(KERN_WARNING "cciss: cmd %p is "
2686 "reported invalid\n", cmd);
2687 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2688 cmd->err_info->CommandStatus, DRIVER_OK,
2689 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2690 break;
2691 case CMD_PROTOCOL_ERR:
2692 printk(KERN_WARNING "cciss: cmd %p has "
2693 "protocol error \n", cmd);
2694 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2695 cmd->err_info->CommandStatus, DRIVER_OK,
2696 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2697 break;
2698 case CMD_HARDWARE_ERR:
2699 printk(KERN_WARNING "cciss: cmd %p had "
2700 " hardware error\n", cmd);
2701 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2702 cmd->err_info->CommandStatus, DRIVER_OK,
2703 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2704 break;
2705 case CMD_CONNECTION_LOST:
2706 printk(KERN_WARNING "cciss: cmd %p had "
2707 "connection lost\n", cmd);
2708 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2709 cmd->err_info->CommandStatus, DRIVER_OK,
2710 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2711 break;
2712 case CMD_ABORTED:
2713 printk(KERN_WARNING "cciss: cmd %p was "
2714 "aborted\n", cmd);
2715 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2716 cmd->err_info->CommandStatus, DRIVER_OK,
2717 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2718 break;
2719 case CMD_ABORT_FAILED:
2720 printk(KERN_WARNING "cciss: cmd %p reports "
2721 "abort failed\n", cmd);
2722 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2723 cmd->err_info->CommandStatus, DRIVER_OK,
2724 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2725 break;
2726 case CMD_UNSOLICITED_ABORT:
2727 printk(KERN_WARNING "cciss%d: unsolicited "
2728 "abort %p\n", h->ctlr, cmd);
2729 if (cmd->retry_count < MAX_CMD_RETRIES) {
2730 retry_cmd = 1;
2731 printk(KERN_WARNING
2732 "cciss%d: retrying %p\n", h->ctlr, cmd);
2733 cmd->retry_count++;
2734 } else
2735 printk(KERN_WARNING
2736 "cciss%d: %p retried too "
2737 "many times\n", h->ctlr, cmd);
2738 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2739 cmd->err_info->CommandStatus, DRIVER_OK,
2740 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2741 break;
2742 case CMD_TIMEOUT:
2743 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2744 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2745 cmd->err_info->CommandStatus, DRIVER_OK,
2746 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2747 break;
2748 default:
2749 printk(KERN_WARNING "cciss: cmd %p returned "
2750 "unknown status %x\n", cmd,
2751 cmd->err_info->CommandStatus);
2752 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2753 cmd->err_info->CommandStatus, DRIVER_OK,
2754 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2757 after_error_processing:
2759 /* We need to return this command */
2760 if (retry_cmd) {
2761 resend_cciss_cmd(h, cmd);
2762 return;
2764 cmd->rq->completion_data = cmd;
2765 blk_complete_request(cmd->rq);
2769 * Get a request and submit it to the controller.
2771 static void do_cciss_request(struct request_queue *q)
2773 ctlr_info_t *h = q->queuedata;
2774 CommandList_struct *c;
2775 sector_t start_blk;
2776 int seg;
2777 struct request *creq;
2778 u64bit temp64;
2779 struct scatterlist tmp_sg[MAXSGENTRIES];
2780 drive_info_struct *drv;
2781 int i, dir;
2783 /* We call start_io here in case there is a command waiting on the
2784 * queue that has not been sent.
2786 if (blk_queue_plugged(q))
2787 goto startio;
2789 queue:
2790 creq = elv_next_request(q);
2791 if (!creq)
2792 goto startio;
2794 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2796 if ((c = cmd_alloc(h, 1)) == NULL)
2797 goto full;
2799 blkdev_dequeue_request(creq);
2801 spin_unlock_irq(q->queue_lock);
2803 c->cmd_type = CMD_RWREQ;
2804 c->rq = creq;
2806 /* fill in the request */
2807 drv = creq->rq_disk->private_data;
2808 c->Header.ReplyQueue = 0; // unused in simple mode
2809 /* got command from pool, so use the command block index instead */
2810 /* for direct lookups. */
2811 /* The first 2 bits are reserved for controller error reporting. */
2812 c->Header.Tag.lower = (c->cmdindex << 3);
2813 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2814 c->Header.LUN.LogDev.VolId = drv->LunID;
2815 c->Header.LUN.LogDev.Mode = 1;
2816 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2817 c->Request.Type.Type = TYPE_CMD; // It is a command.
2818 c->Request.Type.Attribute = ATTR_SIMPLE;
2819 c->Request.Type.Direction =
2820 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2821 c->Request.Timeout = 0; // Don't time out
2822 c->Request.CDB[0] =
2823 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2824 start_blk = creq->sector;
2825 #ifdef CCISS_DEBUG
2826 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2827 (int)creq->nr_sectors);
2828 #endif /* CCISS_DEBUG */
2830 sg_init_table(tmp_sg, MAXSGENTRIES);
2831 seg = blk_rq_map_sg(q, creq, tmp_sg);
2833 /* get the DMA records for the setup */
2834 if (c->Request.Type.Direction == XFER_READ)
2835 dir = PCI_DMA_FROMDEVICE;
2836 else
2837 dir = PCI_DMA_TODEVICE;
2839 for (i = 0; i < seg; i++) {
2840 c->SG[i].Len = tmp_sg[i].length;
2841 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2842 tmp_sg[i].offset,
2843 tmp_sg[i].length, dir);
2844 c->SG[i].Addr.lower = temp64.val32.lower;
2845 c->SG[i].Addr.upper = temp64.val32.upper;
2846 c->SG[i].Ext = 0; // we are not chaining
2848 /* track how many SG entries we are using */
2849 if (seg > h->maxSG)
2850 h->maxSG = seg;
2852 #ifdef CCISS_DEBUG
2853 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
2854 creq->nr_sectors, seg);
2855 #endif /* CCISS_DEBUG */
2857 c->Header.SGList = c->Header.SGTotal = seg;
2858 if (likely(blk_fs_request(creq))) {
2859 if(h->cciss_read == CCISS_READ_10) {
2860 c->Request.CDB[1] = 0;
2861 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2862 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2863 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2864 c->Request.CDB[5] = start_blk & 0xff;
2865 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2866 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2867 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2868 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2869 } else {
2870 u32 upper32 = upper_32_bits(start_blk);
2872 c->Request.CDBLen = 16;
2873 c->Request.CDB[1]= 0;
2874 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2875 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2876 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2877 c->Request.CDB[5]= upper32 & 0xff;
2878 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2879 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2880 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2881 c->Request.CDB[9]= start_blk & 0xff;
2882 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2883 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2884 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2885 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2886 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2888 } else if (blk_pc_request(creq)) {
2889 c->Request.CDBLen = creq->cmd_len;
2890 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2891 } else {
2892 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2893 BUG();
2896 spin_lock_irq(q->queue_lock);
2898 addQ(&h->reqQ, c);
2899 h->Qdepth++;
2900 if (h->Qdepth > h->maxQsinceinit)
2901 h->maxQsinceinit = h->Qdepth;
2903 goto queue;
2904 full:
2905 blk_stop_queue(q);
2906 startio:
2907 /* We will already have the driver lock here so not need
2908 * to lock it.
2910 start_io(h);
2913 static inline unsigned long get_next_completion(ctlr_info_t *h)
2915 #ifdef CONFIG_CISS_SCSI_TAPE
2916 /* Any rejects from sendcmd() lying around? Process them first */
2917 if (h->scsi_rejects.ncompletions == 0)
2918 return h->access.command_completed(h);
2919 else {
2920 struct sendcmd_reject_list *srl;
2921 int n;
2922 srl = &h->scsi_rejects;
2923 n = --srl->ncompletions;
2924 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2925 printk("p");
2926 return srl->complete[n];
2928 #else
2929 return h->access.command_completed(h);
2930 #endif
2933 static inline int interrupt_pending(ctlr_info_t *h)
2935 #ifdef CONFIG_CISS_SCSI_TAPE
2936 return (h->access.intr_pending(h)
2937 || (h->scsi_rejects.ncompletions > 0));
2938 #else
2939 return h->access.intr_pending(h);
2940 #endif
2943 static inline long interrupt_not_for_us(ctlr_info_t *h)
2945 #ifdef CONFIG_CISS_SCSI_TAPE
2946 return (((h->access.intr_pending(h) == 0) ||
2947 (h->interrupts_enabled == 0))
2948 && (h->scsi_rejects.ncompletions == 0));
2949 #else
2950 return (((h->access.intr_pending(h) == 0) ||
2951 (h->interrupts_enabled == 0)));
2952 #endif
2955 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2957 ctlr_info_t *h = dev_id;
2958 CommandList_struct *c;
2959 unsigned long flags;
2960 __u32 a, a1, a2;
2962 if (interrupt_not_for_us(h))
2963 return IRQ_NONE;
2965 * If there are completed commands in the completion queue,
2966 * we had better do something about it.
2968 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2969 while (interrupt_pending(h)) {
2970 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2971 a1 = a;
2972 if ((a & 0x04)) {
2973 a2 = (a >> 3);
2974 if (a2 >= h->nr_cmds) {
2975 printk(KERN_WARNING
2976 "cciss: controller cciss%d failed, stopping.\n",
2977 h->ctlr);
2978 fail_all_cmds(h->ctlr);
2979 return IRQ_HANDLED;
2982 c = h->cmd_pool + a2;
2983 a = c->busaddr;
2985 } else {
2986 struct hlist_node *tmp;
2988 a &= ~3;
2989 c = NULL;
2990 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2991 if (c->busaddr == a)
2992 break;
2996 * If we've found the command, take it off the
2997 * completion Q and free it
2999 if (c && c->busaddr == a) {
3000 removeQ(c);
3001 if (c->cmd_type == CMD_RWREQ) {
3002 complete_command(h, c, 0);
3003 } else if (c->cmd_type == CMD_IOCTL_PEND) {
3004 complete(c->waiting);
3006 # ifdef CONFIG_CISS_SCSI_TAPE
3007 else if (c->cmd_type == CMD_SCSI)
3008 complete_scsi_command(c, 0, a1);
3009 # endif
3010 continue;
3015 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
3016 return IRQ_HANDLED;
3020 * We cannot read the structure directly, for portability we must use
3021 * the io functions.
3022 * This is for debug only.
3024 #ifdef CCISS_DEBUG
3025 static void print_cfg_table(CfgTable_struct *tb)
3027 int i;
3028 char temp_name[17];
3030 printk("Controller Configuration information\n");
3031 printk("------------------------------------\n");
3032 for (i = 0; i < 4; i++)
3033 temp_name[i] = readb(&(tb->Signature[i]));
3034 temp_name[4] = '\0';
3035 printk(" Signature = %s\n", temp_name);
3036 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
3037 printk(" Transport methods supported = 0x%x\n",
3038 readl(&(tb->TransportSupport)));
3039 printk(" Transport methods active = 0x%x\n",
3040 readl(&(tb->TransportActive)));
3041 printk(" Requested transport Method = 0x%x\n",
3042 readl(&(tb->HostWrite.TransportRequest)));
3043 printk(" Coalesce Interrupt Delay = 0x%x\n",
3044 readl(&(tb->HostWrite.CoalIntDelay)));
3045 printk(" Coalesce Interrupt Count = 0x%x\n",
3046 readl(&(tb->HostWrite.CoalIntCount)));
3047 printk(" Max outstanding commands = 0x%d\n",
3048 readl(&(tb->CmdsOutMax)));
3049 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3050 for (i = 0; i < 16; i++)
3051 temp_name[i] = readb(&(tb->ServerName[i]));
3052 temp_name[16] = '\0';
3053 printk(" Server Name = %s\n", temp_name);
3054 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
3056 #endif /* CCISS_DEBUG */
3058 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3060 int i, offset, mem_type, bar_type;
3061 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3062 return 0;
3063 offset = 0;
3064 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3065 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3066 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3067 offset += 4;
3068 else {
3069 mem_type = pci_resource_flags(pdev, i) &
3070 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3071 switch (mem_type) {
3072 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3073 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3074 offset += 4; /* 32 bit */
3075 break;
3076 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3077 offset += 8;
3078 break;
3079 default: /* reserved in PCI 2.2 */
3080 printk(KERN_WARNING
3081 "Base address is invalid\n");
3082 return -1;
3083 break;
3086 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3087 return i + 1;
3089 return -1;
3092 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3093 * controllers that are capable. If not, we use IO-APIC mode.
3096 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
3097 struct pci_dev *pdev, __u32 board_id)
3099 #ifdef CONFIG_PCI_MSI
3100 int err;
3101 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
3102 {0, 2}, {0, 3}
3105 /* Some boards advertise MSI but don't really support it */
3106 if ((board_id == 0x40700E11) ||
3107 (board_id == 0x40800E11) ||
3108 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3109 goto default_int_mode;
3111 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3112 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
3113 if (!err) {
3114 c->intr[0] = cciss_msix_entries[0].vector;
3115 c->intr[1] = cciss_msix_entries[1].vector;
3116 c->intr[2] = cciss_msix_entries[2].vector;
3117 c->intr[3] = cciss_msix_entries[3].vector;
3118 c->msix_vector = 1;
3119 return;
3121 if (err > 0) {
3122 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
3123 "available\n", err);
3124 goto default_int_mode;
3125 } else {
3126 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
3127 err);
3128 goto default_int_mode;
3131 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3132 if (!pci_enable_msi(pdev)) {
3133 c->msi_vector = 1;
3134 } else {
3135 printk(KERN_WARNING "cciss: MSI init failed\n");
3138 default_int_mode:
3139 #endif /* CONFIG_PCI_MSI */
3140 /* if we get here we're going to use the default interrupt mode */
3141 c->intr[SIMPLE_MODE_INT] = pdev->irq;
3142 return;
3145 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3147 ushort subsystem_vendor_id, subsystem_device_id, command;
3148 __u32 board_id, scratchpad = 0;
3149 __u64 cfg_offset;
3150 __u32 cfg_base_addr;
3151 __u64 cfg_base_addr_index;
3152 int i, err;
3154 /* check to see if controller has been disabled */
3155 /* BEFORE trying to enable it */
3156 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3157 if (!(command & 0x02)) {
3158 printk(KERN_WARNING
3159 "cciss: controller appears to be disabled\n");
3160 return -ENODEV;
3163 err = pci_enable_device(pdev);
3164 if (err) {
3165 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
3166 return err;
3169 err = pci_request_regions(pdev, "cciss");
3170 if (err) {
3171 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3172 "aborting\n");
3173 return err;
3176 subsystem_vendor_id = pdev->subsystem_vendor;
3177 subsystem_device_id = pdev->subsystem_device;
3178 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3179 subsystem_vendor_id);
3181 #ifdef CCISS_DEBUG
3182 printk("command = %x\n", command);
3183 printk("irq = %x\n", pdev->irq);
3184 printk("board_id = %x\n", board_id);
3185 #endif /* CCISS_DEBUG */
3187 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3188 * else we use the IO-APIC interrupt assigned to us by system ROM.
3190 cciss_interrupt_mode(c, pdev, board_id);
3193 * Memory base addr is first addr , the second points to the config
3194 * table
3197 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3198 #ifdef CCISS_DEBUG
3199 printk("address 0 = %lx\n", c->paddr);
3200 #endif /* CCISS_DEBUG */
3201 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3203 /* Wait for the board to become ready. (PCI hotplug needs this.)
3204 * We poll for up to 120 secs, once per 100ms. */
3205 for (i = 0; i < 1200; i++) {
3206 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3207 if (scratchpad == CCISS_FIRMWARE_READY)
3208 break;
3209 set_current_state(TASK_INTERRUPTIBLE);
3210 schedule_timeout(HZ / 10); /* wait 100ms */
3212 if (scratchpad != CCISS_FIRMWARE_READY) {
3213 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3214 err = -ENODEV;
3215 goto err_out_free_res;
3218 /* get the address index number */
3219 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3220 cfg_base_addr &= (__u32) 0x0000ffff;
3221 #ifdef CCISS_DEBUG
3222 printk("cfg base address = %x\n", cfg_base_addr);
3223 #endif /* CCISS_DEBUG */
3224 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3225 #ifdef CCISS_DEBUG
3226 printk("cfg base address index = %llx\n",
3227 (unsigned long long)cfg_base_addr_index);
3228 #endif /* CCISS_DEBUG */
3229 if (cfg_base_addr_index == -1) {
3230 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3231 err = -ENODEV;
3232 goto err_out_free_res;
3235 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3236 #ifdef CCISS_DEBUG
3237 printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
3238 #endif /* CCISS_DEBUG */
3239 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3240 cfg_base_addr_index) +
3241 cfg_offset, sizeof(CfgTable_struct));
3242 c->board_id = board_id;
3244 #ifdef CCISS_DEBUG
3245 print_cfg_table(c->cfgtable);
3246 #endif /* CCISS_DEBUG */
3248 /* Some controllers support Zero Memory Raid (ZMR).
3249 * When configured in ZMR mode the number of supported
3250 * commands drops to 64. So instead of just setting an
3251 * arbitrary value we make the driver a little smarter.
3252 * We read the config table to tell us how many commands
3253 * are supported on the controller then subtract 4 to
3254 * leave a little room for ioctl calls.
3256 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3257 for (i = 0; i < ARRAY_SIZE(products); i++) {
3258 if (board_id == products[i].board_id) {
3259 c->product_name = products[i].product_name;
3260 c->access = *(products[i].access);
3261 c->nr_cmds = c->max_commands - 4;
3262 break;
3265 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3266 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3267 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3268 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3269 printk("Does not appear to be a valid CISS config table\n");
3270 err = -ENODEV;
3271 goto err_out_free_res;
3273 /* We didn't find the controller in our list. We know the
3274 * signature is valid. If it's an HP device let's try to
3275 * bind to the device and fire it up. Otherwise we bail.
3277 if (i == ARRAY_SIZE(products)) {
3278 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3279 c->product_name = products[i-1].product_name;
3280 c->access = *(products[i-1].access);
3281 c->nr_cmds = c->max_commands - 4;
3282 printk(KERN_WARNING "cciss: This is an unknown "
3283 "Smart Array controller.\n"
3284 "cciss: Please update to the latest driver "
3285 "available from www.hp.com.\n");
3286 } else {
3287 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3288 " to access the Smart Array controller %08lx\n"
3289 , (unsigned long)board_id);
3290 err = -ENODEV;
3291 goto err_out_free_res;
3294 #ifdef CONFIG_X86
3296 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3297 __u32 prefetch;
3298 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3299 prefetch |= 0x100;
3300 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3302 #endif
3304 /* Disabling DMA prefetch and refetch for the P600.
3305 * An ASIC bug may result in accesses to invalid memory addresses.
3306 * We've disabled prefetch for some time now. Testing with XEN
3307 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3309 if(board_id == 0x3225103C) {
3310 __u32 dma_prefetch;
3311 __u32 dma_refetch;
3312 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3313 dma_prefetch |= 0x8000;
3314 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3315 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3316 dma_refetch |= 0x1;
3317 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3320 #ifdef CCISS_DEBUG
3321 printk("Trying to put board into Simple mode\n");
3322 #endif /* CCISS_DEBUG */
3323 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3324 /* Update the field, and then ring the doorbell */
3325 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3326 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3328 /* under certain very rare conditions, this can take awhile.
3329 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3330 * as we enter this code.) */
3331 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3332 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3333 break;
3334 /* delay and try again */
3335 set_current_state(TASK_INTERRUPTIBLE);
3336 schedule_timeout(10);
3339 #ifdef CCISS_DEBUG
3340 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3341 readl(c->vaddr + SA5_DOORBELL));
3342 #endif /* CCISS_DEBUG */
3343 #ifdef CCISS_DEBUG
3344 print_cfg_table(c->cfgtable);
3345 #endif /* CCISS_DEBUG */
3347 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3348 printk(KERN_WARNING "cciss: unable to get board into"
3349 " simple mode\n");
3350 err = -ENODEV;
3351 goto err_out_free_res;
3353 return 0;
3355 err_out_free_res:
3357 * Deliberately omit pci_disable_device(): it does something nasty to
3358 * Smart Array controllers that pci_enable_device does not undo
3360 pci_release_regions(pdev);
3361 return err;
3364 /* Function to find the first free pointer into our hba[] array
3365 * Returns -1 if no free entries are left.
3367 static int alloc_cciss_hba(void)
3369 int i;
3371 for (i = 0; i < MAX_CTLR; i++) {
3372 if (!hba[i]) {
3373 ctlr_info_t *p;
3375 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3376 if (!p)
3377 goto Enomem;
3378 hba[i] = p;
3379 return i;
3382 printk(KERN_WARNING "cciss: This driver supports a maximum"
3383 " of %d controllers.\n", MAX_CTLR);
3384 return -1;
3385 Enomem:
3386 printk(KERN_ERR "cciss: out of memory.\n");
3387 return -1;
3390 static void free_hba(int i)
3392 ctlr_info_t *p = hba[i];
3393 int n;
3395 hba[i] = NULL;
3396 for (n = 0; n < CISS_MAX_LUN; n++)
3397 put_disk(p->gendisk[n]);
3398 kfree(p);
3401 /* Send a message CDB to the firmware. */
3402 static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type)
3404 typedef struct {
3405 CommandListHeader_struct CommandHeader;
3406 RequestBlock_struct Request;
3407 ErrDescriptor_struct ErrorDescriptor;
3408 } Command;
3409 static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct);
3410 Command *cmd;
3411 dma_addr_t paddr64;
3412 uint32_t paddr32, tag;
3413 void __iomem *vaddr;
3414 int i, err;
3416 vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
3417 if (vaddr == NULL)
3418 return -ENOMEM;
3420 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3421 CCISS commands, so they must be allocated from the lower 4GiB of
3422 memory. */
3423 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3424 if (err) {
3425 iounmap(vaddr);
3426 return -ENOMEM;
3429 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3430 if (cmd == NULL) {
3431 iounmap(vaddr);
3432 return -ENOMEM;
3435 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3436 although there's no guarantee, we assume that the address is at
3437 least 4-byte aligned (most likely, it's page-aligned). */
3438 paddr32 = paddr64;
3440 cmd->CommandHeader.ReplyQueue = 0;
3441 cmd->CommandHeader.SGList = 0;
3442 cmd->CommandHeader.SGTotal = 0;
3443 cmd->CommandHeader.Tag.lower = paddr32;
3444 cmd->CommandHeader.Tag.upper = 0;
3445 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3447 cmd->Request.CDBLen = 16;
3448 cmd->Request.Type.Type = TYPE_MSG;
3449 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3450 cmd->Request.Type.Direction = XFER_NONE;
3451 cmd->Request.Timeout = 0; /* Don't time out */
3452 cmd->Request.CDB[0] = opcode;
3453 cmd->Request.CDB[1] = type;
3454 memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */
3456 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command);
3457 cmd->ErrorDescriptor.Addr.upper = 0;
3458 cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct);
3460 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3462 for (i = 0; i < 10; i++) {
3463 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3464 if ((tag & ~3) == paddr32)
3465 break;
3466 schedule_timeout_uninterruptible(HZ);
3469 iounmap(vaddr);
3471 /* we leak the DMA buffer here ... no choice since the controller could
3472 still complete the command. */
3473 if (i == 10) {
3474 printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n",
3475 opcode, type);
3476 return -ETIMEDOUT;
3479 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3481 if (tag & 2) {
3482 printk(KERN_ERR "cciss: controller message %02x:%02x failed\n",
3483 opcode, type);
3484 return -EIO;
3487 printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n",
3488 opcode, type);
3489 return 0;
3492 #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
3493 #define cciss_noop(p) cciss_message(p, 3, 0)
3495 static __devinit int cciss_reset_msi(struct pci_dev *pdev)
3497 /* the #defines are stolen from drivers/pci/msi.h. */
3498 #define msi_control_reg(base) (base + PCI_MSI_FLAGS)
3499 #define PCI_MSIX_FLAGS_ENABLE (1 << 15)
3501 int pos;
3502 u16 control = 0;
3504 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
3505 if (pos) {
3506 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3507 if (control & PCI_MSI_FLAGS_ENABLE) {
3508 printk(KERN_INFO "cciss: resetting MSI\n");
3509 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
3513 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3514 if (pos) {
3515 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3516 if (control & PCI_MSIX_FLAGS_ENABLE) {
3517 printk(KERN_INFO "cciss: resetting MSI-X\n");
3518 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
3522 return 0;
3525 /* This does a hard reset of the controller using PCI power management
3526 * states. */
3527 static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
3529 u16 pmcsr, saved_config_space[32];
3530 int i, pos;
3532 printk(KERN_INFO "cciss: using PCI PM to reset controller\n");
3534 /* This is very nearly the same thing as
3536 pci_save_state(pci_dev);
3537 pci_set_power_state(pci_dev, PCI_D3hot);
3538 pci_set_power_state(pci_dev, PCI_D0);
3539 pci_restore_state(pci_dev);
3541 but we can't use these nice canned kernel routines on
3542 kexec, because they also check the MSI/MSI-X state in PCI
3543 configuration space and do the wrong thing when it is
3544 set/cleared. Also, the pci_save/restore_state functions
3545 violate the ordering requirements for restoring the
3546 configuration space from the CCISS document (see the
3547 comment below). So we roll our own .... */
3549 for (i = 0; i < 32; i++)
3550 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
3552 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3553 if (pos == 0) {
3554 printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n");
3555 return -ENODEV;
3558 /* Quoting from the Open CISS Specification: "The Power
3559 * Management Control/Status Register (CSR) controls the power
3560 * state of the device. The normal operating state is D0,
3561 * CSR=00h. The software off state is D3, CSR=03h. To reset
3562 * the controller, place the interface device in D3 then to
3563 * D0, this causes a secondary PCI reset which will reset the
3564 * controller." */
3566 /* enter the D3hot power management state */
3567 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3568 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3569 pmcsr |= PCI_D3hot;
3570 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3572 schedule_timeout_uninterruptible(HZ >> 1);
3574 /* enter the D0 power management state */
3575 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3576 pmcsr |= PCI_D0;
3577 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3579 schedule_timeout_uninterruptible(HZ >> 1);
3581 /* Restore the PCI configuration space. The Open CISS
3582 * Specification says, "Restore the PCI Configuration
3583 * Registers, offsets 00h through 60h. It is important to
3584 * restore the command register, 16-bits at offset 04h,
3585 * last. Do not restore the configuration status register,
3586 * 16-bits at offset 06h." Note that the offset is 2*i. */
3587 for (i = 0; i < 32; i++) {
3588 if (i == 2 || i == 3)
3589 continue;
3590 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3592 wmb();
3593 pci_write_config_word(pdev, 4, saved_config_space[2]);
3595 return 0;
3599 * This is it. Find all the controllers and register them. I really hate
3600 * stealing all these major device numbers.
3601 * returns the number of block devices registered.
3603 static int __devinit cciss_init_one(struct pci_dev *pdev,
3604 const struct pci_device_id *ent)
3606 int i;
3607 int j = 0;
3608 int rc;
3609 int dac, return_code;
3610 InquiryData_struct *inq_buff = NULL;
3612 if (reset_devices) {
3613 /* Reset the controller with a PCI power-cycle */
3614 if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
3615 return -ENODEV;
3617 /* Now try to get the controller to respond to a no-op. Some
3618 devices (notably the HP Smart Array 5i Controller) need
3619 up to 30 seconds to respond. */
3620 for (i=0; i<30; i++) {
3621 if (cciss_noop(pdev) == 0)
3622 break;
3624 schedule_timeout_uninterruptible(HZ);
3626 if (i == 30) {
3627 printk(KERN_ERR "cciss: controller seems dead\n");
3628 return -EBUSY;
3632 i = alloc_cciss_hba();
3633 if (i < 0)
3634 return -1;
3636 hba[i]->busy_initializing = 1;
3637 INIT_HLIST_HEAD(&hba[i]->cmpQ);
3638 INIT_HLIST_HEAD(&hba[i]->reqQ);
3640 if (cciss_pci_init(hba[i], pdev) != 0)
3641 goto clean1;
3643 sprintf(hba[i]->devname, "cciss%d", i);
3644 hba[i]->ctlr = i;
3645 hba[i]->pdev = pdev;
3647 /* configure PCI DMA stuff */
3648 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3649 dac = 1;
3650 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3651 dac = 0;
3652 else {
3653 printk(KERN_ERR "cciss: no suitable DMA available\n");
3654 goto clean1;
3658 * register with the major number, or get a dynamic major number
3659 * by passing 0 as argument. This is done for greater than
3660 * 8 controller support.
3662 if (i < MAX_CTLR_ORIG)
3663 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3664 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3665 if (rc == -EBUSY || rc == -EINVAL) {
3666 printk(KERN_ERR
3667 "cciss: Unable to get major number %d for %s "
3668 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3669 goto clean1;
3670 } else {
3671 if (i >= MAX_CTLR_ORIG)
3672 hba[i]->major = rc;
3675 /* make sure the board interrupts are off */
3676 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3677 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3678 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3679 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3680 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3681 goto clean2;
3684 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3685 hba[i]->devname, pdev->device, pci_name(pdev),
3686 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3688 hba[i]->cmd_pool_bits =
3689 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3690 * sizeof(unsigned long), GFP_KERNEL);
3691 hba[i]->cmd_pool = (CommandList_struct *)
3692 pci_alloc_consistent(hba[i]->pdev,
3693 hba[i]->nr_cmds * sizeof(CommandList_struct),
3694 &(hba[i]->cmd_pool_dhandle));
3695 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3696 pci_alloc_consistent(hba[i]->pdev,
3697 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3698 &(hba[i]->errinfo_pool_dhandle));
3699 if ((hba[i]->cmd_pool_bits == NULL)
3700 || (hba[i]->cmd_pool == NULL)
3701 || (hba[i]->errinfo_pool == NULL)) {
3702 printk(KERN_ERR "cciss: out of memory");
3703 goto clean4;
3705 #ifdef CONFIG_CISS_SCSI_TAPE
3706 hba[i]->scsi_rejects.complete =
3707 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3708 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3709 if (hba[i]->scsi_rejects.complete == NULL) {
3710 printk(KERN_ERR "cciss: out of memory");
3711 goto clean4;
3713 #endif
3714 spin_lock_init(&hba[i]->lock);
3716 /* Initialize the pdev driver private data.
3717 have it point to hba[i]. */
3718 pci_set_drvdata(pdev, hba[i]);
3719 /* command and error info recs zeroed out before
3720 they are used */
3721 memset(hba[i]->cmd_pool_bits, 0,
3722 DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3723 * sizeof(unsigned long));
3725 hba[i]->num_luns = 0;
3726 hba[i]->highest_lun = -1;
3727 for (j = 0; j < CISS_MAX_LUN; j++) {
3728 hba[i]->drv[j].raid_level = -1;
3729 hba[i]->drv[j].queue = NULL;
3730 hba[i]->gendisk[j] = NULL;
3733 cciss_scsi_setup(i);
3735 /* Turn the interrupts on so we can service requests */
3736 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3738 /* Get the firmware version */
3739 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3740 if (inq_buff == NULL) {
3741 printk(KERN_ERR "cciss: out of memory\n");
3742 goto clean4;
3745 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
3746 sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
3747 if (return_code == IO_OK) {
3748 hba[i]->firm_ver[0] = inq_buff->data_byte[32];
3749 hba[i]->firm_ver[1] = inq_buff->data_byte[33];
3750 hba[i]->firm_ver[2] = inq_buff->data_byte[34];
3751 hba[i]->firm_ver[3] = inq_buff->data_byte[35];
3752 } else { /* send command failed */
3753 printk(KERN_WARNING "cciss: unable to determine firmware"
3754 " version of controller\n");
3757 cciss_procinit(i);
3759 hba[i]->cciss_max_sectors = 2048;
3761 hba[i]->busy_initializing = 0;
3763 rebuild_lun_table(hba[i], 1);
3764 return 1;
3766 clean4:
3767 kfree(inq_buff);
3768 #ifdef CONFIG_CISS_SCSI_TAPE
3769 kfree(hba[i]->scsi_rejects.complete);
3770 #endif
3771 kfree(hba[i]->cmd_pool_bits);
3772 if (hba[i]->cmd_pool)
3773 pci_free_consistent(hba[i]->pdev,
3774 hba[i]->nr_cmds * sizeof(CommandList_struct),
3775 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3776 if (hba[i]->errinfo_pool)
3777 pci_free_consistent(hba[i]->pdev,
3778 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3779 hba[i]->errinfo_pool,
3780 hba[i]->errinfo_pool_dhandle);
3781 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3782 clean2:
3783 unregister_blkdev(hba[i]->major, hba[i]->devname);
3784 clean1:
3785 hba[i]->busy_initializing = 0;
3786 /* cleanup any queues that may have been initialized */
3787 for (j=0; j <= hba[i]->highest_lun; j++){
3788 drive_info_struct *drv = &(hba[i]->drv[j]);
3789 if (drv->queue)
3790 blk_cleanup_queue(drv->queue);
3793 * Deliberately omit pci_disable_device(): it does something nasty to
3794 * Smart Array controllers that pci_enable_device does not undo
3796 pci_release_regions(pdev);
3797 pci_set_drvdata(pdev, NULL);
3798 free_hba(i);
3799 return -1;
3802 static void cciss_shutdown(struct pci_dev *pdev)
3804 ctlr_info_t *tmp_ptr;
3805 int i;
3806 char flush_buf[4];
3807 int return_code;
3809 tmp_ptr = pci_get_drvdata(pdev);
3810 if (tmp_ptr == NULL)
3811 return;
3812 i = tmp_ptr->ctlr;
3813 if (hba[i] == NULL)
3814 return;
3816 /* Turn board interrupts off and send the flush cache command */
3817 /* sendcmd will turn off interrupt, and send the flush...
3818 * To write all data in the battery backed cache to disks */
3819 memset(flush_buf, 0, 4);
3820 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3821 TYPE_CMD);
3822 if (return_code == IO_OK) {
3823 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3824 } else {
3825 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3827 free_irq(hba[i]->intr[2], hba[i]);
3830 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3832 ctlr_info_t *tmp_ptr;
3833 int i, j;
3835 if (pci_get_drvdata(pdev) == NULL) {
3836 printk(KERN_ERR "cciss: Unable to remove device \n");
3837 return;
3839 tmp_ptr = pci_get_drvdata(pdev);
3840 i = tmp_ptr->ctlr;
3841 if (hba[i] == NULL) {
3842 printk(KERN_ERR "cciss: device appears to "
3843 "already be removed \n");
3844 return;
3847 remove_proc_entry(hba[i]->devname, proc_cciss);
3848 unregister_blkdev(hba[i]->major, hba[i]->devname);
3850 /* remove it from the disk list */
3851 for (j = 0; j < CISS_MAX_LUN; j++) {
3852 struct gendisk *disk = hba[i]->gendisk[j];
3853 if (disk) {
3854 struct request_queue *q = disk->queue;
3856 if (disk->flags & GENHD_FL_UP)
3857 del_gendisk(disk);
3858 if (q)
3859 blk_cleanup_queue(q);
3863 #ifdef CONFIG_CISS_SCSI_TAPE
3864 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3865 #endif
3867 cciss_shutdown(pdev);
3869 #ifdef CONFIG_PCI_MSI
3870 if (hba[i]->msix_vector)
3871 pci_disable_msix(hba[i]->pdev);
3872 else if (hba[i]->msi_vector)
3873 pci_disable_msi(hba[i]->pdev);
3874 #endif /* CONFIG_PCI_MSI */
3876 iounmap(hba[i]->vaddr);
3878 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3879 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3880 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3881 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3882 kfree(hba[i]->cmd_pool_bits);
3883 #ifdef CONFIG_CISS_SCSI_TAPE
3884 kfree(hba[i]->scsi_rejects.complete);
3885 #endif
3887 * Deliberately omit pci_disable_device(): it does something nasty to
3888 * Smart Array controllers that pci_enable_device does not undo
3890 pci_release_regions(pdev);
3891 pci_set_drvdata(pdev, NULL);
3892 free_hba(i);
3895 static struct pci_driver cciss_pci_driver = {
3896 .name = "cciss",
3897 .probe = cciss_init_one,
3898 .remove = __devexit_p(cciss_remove_one),
3899 .id_table = cciss_pci_device_id, /* id_table */
3900 .shutdown = cciss_shutdown,
3904 * This is it. Register the PCI driver information for the cards we control
3905 * the OS will call our registered routines when it finds one of our cards.
3907 static int __init cciss_init(void)
3910 * The hardware requires that commands are aligned on a 64-bit
3911 * boundary. Given that we use pci_alloc_consistent() to allocate an
3912 * array of them, the size must be a multiple of 8 bytes.
3914 BUILD_BUG_ON(sizeof(CommandList_struct) % 8);
3916 printk(KERN_INFO DRIVER_NAME "\n");
3918 /* Register for our PCI devices */
3919 return pci_register_driver(&cciss_pci_driver);
3922 static void __exit cciss_cleanup(void)
3924 int i;
3926 pci_unregister_driver(&cciss_pci_driver);
3927 /* double check that all controller entrys have been removed */
3928 for (i = 0; i < MAX_CTLR; i++) {
3929 if (hba[i] != NULL) {
3930 printk(KERN_WARNING "cciss: had to remove"
3931 " controller %d\n", i);
3932 cciss_remove_one(hba[i]->pdev);
3935 remove_proc_entry("driver/cciss", NULL);
3938 static void fail_all_cmds(unsigned long ctlr)
3940 /* If we get here, the board is apparently dead. */
3941 ctlr_info_t *h = hba[ctlr];
3942 CommandList_struct *c;
3943 unsigned long flags;
3945 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3946 h->alive = 0; /* the controller apparently died... */
3948 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3950 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3952 /* move everything off the request queue onto the completed queue */
3953 while (!hlist_empty(&h->reqQ)) {
3954 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
3955 removeQ(c);
3956 h->Qdepth--;
3957 addQ(&h->cmpQ, c);
3960 /* Now, fail everything on the completed queue with a HW error */
3961 while (!hlist_empty(&h->cmpQ)) {
3962 c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
3963 removeQ(c);
3964 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3965 if (c->cmd_type == CMD_RWREQ) {
3966 complete_command(h, c, 0);
3967 } else if (c->cmd_type == CMD_IOCTL_PEND)
3968 complete(c->waiting);
3969 #ifdef CONFIG_CISS_SCSI_TAPE
3970 else if (c->cmd_type == CMD_SCSI)
3971 complete_scsi_command(c, 0, 0);
3972 #endif
3974 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3975 return;
3978 module_init(cciss_init);
3979 module_exit(cciss_cleanup);