cciss: new hardware support
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / cciss.c
blob00048bd26e7da8cbfcfa70c55dbaa5c2df2ad906
1 /*
2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
50 #include <scsi/sg.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
64 " Smart Array G2 Series SAS/SATA Controllers");
65 MODULE_VERSION("3.6.20");
66 MODULE_LICENSE("GPL");
68 #include "cciss_cmd.h"
69 #include "cciss.h"
70 #include <linux/cciss_ioctl.h>
72 /* define the PCI info for the cards we can control */
73 static const struct pci_device_id cciss_pci_device_id[] = {
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
82 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
102 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
103 {0,}
106 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
112 static struct board_type products[] = {
113 {0x40700E11, "Smart Array 5300", &SA5_access},
114 {0x40800E11, "Smart Array 5i", &SA5B_access},
115 {0x40820E11, "Smart Array 532", &SA5B_access},
116 {0x40830E11, "Smart Array 5312", &SA5B_access},
117 {0x409A0E11, "Smart Array 641", &SA5_access},
118 {0x409B0E11, "Smart Array 642", &SA5_access},
119 {0x409C0E11, "Smart Array 6400", &SA5_access},
120 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 {0x40910E11, "Smart Array 6i", &SA5_access},
122 {0x3225103C, "Smart Array P600", &SA5_access},
123 {0x3223103C, "Smart Array P800", &SA5_access},
124 {0x3234103C, "Smart Array P400", &SA5_access},
125 {0x3235103C, "Smart Array P400i", &SA5_access},
126 {0x3211103C, "Smart Array E200i", &SA5_access},
127 {0x3212103C, "Smart Array E200", &SA5_access},
128 {0x3213103C, "Smart Array E200i", &SA5_access},
129 {0x3214103C, "Smart Array E200i", &SA5_access},
130 {0x3215103C, "Smart Array E200i", &SA5_access},
131 {0x3237103C, "Smart Array E500", &SA5_access},
132 {0x323D103C, "Smart Array P700m", &SA5_access},
133 {0x3241103C, "Smart Array P212", &SA5_access},
134 {0x3243103C, "Smart Array P410", &SA5_access},
135 {0x3245103C, "Smart Array P410i", &SA5_access},
136 {0x3247103C, "Smart Array P411", &SA5_access},
137 {0x3249103C, "Smart Array P812", &SA5_access},
138 {0x324A103C, "Smart Array P712m", &SA5_access},
139 {0x324B103C, "Smart Array P711m", &SA5_access},
140 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
143 /* How long to wait (in milliseconds) for board to go into simple mode */
144 #define MAX_CONFIG_WAIT 30000
145 #define MAX_IOCTL_CONFIG_WAIT 1000
147 /*define how many times we will try a command because of bus resets */
148 #define MAX_CMD_RETRIES 3
150 #define MAX_CTLR 32
152 /* Originally cciss driver only supports 8 major numbers */
153 #define MAX_CTLR_ORIG 8
155 static ctlr_info_t *hba[MAX_CTLR];
157 static void do_cciss_request(struct request_queue *q);
158 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
159 static int cciss_open(struct block_device *bdev, fmode_t mode);
160 static int cciss_release(struct gendisk *disk, fmode_t mode);
161 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
162 unsigned int cmd, unsigned long arg);
163 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
165 static int cciss_revalidate(struct gendisk *disk);
166 static int rebuild_lun_table(ctlr_info_t *h, int first_time);
167 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
168 int clear_all);
170 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
171 sector_t *total_size, unsigned int *block_size);
172 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
173 sector_t *total_size, unsigned int *block_size);
174 static void cciss_geometry_inquiry(int ctlr, int logvol,
175 int withirq, sector_t total_size,
176 unsigned int block_size, InquiryData_struct *inq_buff,
177 drive_info_struct *drv);
178 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
179 __u32);
180 static void start_io(ctlr_info_t *h);
181 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
182 unsigned int use_unit_num, unsigned int log_unit,
183 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
184 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
185 unsigned int use_unit_num, unsigned int log_unit,
186 __u8 page_code, int cmd_type);
188 static void fail_all_cmds(unsigned long ctlr);
190 #ifdef CONFIG_PROC_FS
191 static void cciss_procinit(int i);
192 #else
193 static void cciss_procinit(int i)
196 #endif /* CONFIG_PROC_FS */
198 #ifdef CONFIG_COMPAT
199 static int cciss_compat_ioctl(struct block_device *, fmode_t,
200 unsigned, unsigned long);
201 #endif
203 static struct block_device_operations cciss_fops = {
204 .owner = THIS_MODULE,
205 .open = cciss_open,
206 .release = cciss_release,
207 .locked_ioctl = cciss_ioctl,
208 .getgeo = cciss_getgeo,
209 #ifdef CONFIG_COMPAT
210 .compat_ioctl = cciss_compat_ioctl,
211 #endif
212 .revalidate_disk = cciss_revalidate,
216 * Enqueuing and dequeuing functions for cmdlists.
218 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
220 if (*Qptr == NULL) {
221 *Qptr = c;
222 c->next = c->prev = c;
223 } else {
224 c->prev = (*Qptr)->prev;
225 c->next = (*Qptr);
226 (*Qptr)->prev->next = c;
227 (*Qptr)->prev = c;
231 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
232 CommandList_struct *c)
234 if (c && c->next != c) {
235 if (*Qptr == c)
236 *Qptr = c->next;
237 c->prev->next = c->next;
238 c->next->prev = c->prev;
239 } else {
240 *Qptr = NULL;
242 return c;
245 #include "cciss_scsi.c" /* For SCSI tape support */
247 #define RAID_UNKNOWN 6
249 #ifdef CONFIG_PROC_FS
252 * Report information about this controller.
254 #define ENG_GIG 1000000000
255 #define ENG_GIG_FACTOR (ENG_GIG/512)
256 #define ENGAGE_SCSI "engage scsi"
257 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
258 "UNKNOWN"
261 static struct proc_dir_entry *proc_cciss;
263 static void cciss_seq_show_header(struct seq_file *seq)
265 ctlr_info_t *h = seq->private;
267 seq_printf(seq, "%s: HP %s Controller\n"
268 "Board ID: 0x%08lx\n"
269 "Firmware Version: %c%c%c%c\n"
270 "IRQ: %d\n"
271 "Logical drives: %d\n"
272 "Current Q depth: %d\n"
273 "Current # commands on controller: %d\n"
274 "Max Q depth since init: %d\n"
275 "Max # commands on controller since init: %d\n"
276 "Max SG entries since init: %d\n",
277 h->devname,
278 h->product_name,
279 (unsigned long)h->board_id,
280 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
281 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
282 h->num_luns,
283 h->Qdepth, h->commands_outstanding,
284 h->maxQsinceinit, h->max_outstanding, h->maxSG);
286 #ifdef CONFIG_CISS_SCSI_TAPE
287 cciss_seq_tape_report(seq, h->ctlr);
288 #endif /* CONFIG_CISS_SCSI_TAPE */
291 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
293 ctlr_info_t *h = seq->private;
294 unsigned ctlr = h->ctlr;
295 unsigned long flags;
297 /* prevent displaying bogus info during configuration
298 * or deconfiguration of a logical volume
300 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
301 if (h->busy_configuring) {
302 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
303 return ERR_PTR(-EBUSY);
305 h->busy_configuring = 1;
306 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
308 if (*pos == 0)
309 cciss_seq_show_header(seq);
311 return pos;
314 static int cciss_seq_show(struct seq_file *seq, void *v)
316 sector_t vol_sz, vol_sz_frac;
317 ctlr_info_t *h = seq->private;
318 unsigned ctlr = h->ctlr;
319 loff_t *pos = v;
320 drive_info_struct *drv = &h->drv[*pos];
322 if (*pos > h->highest_lun)
323 return 0;
325 if (drv->heads == 0)
326 return 0;
328 vol_sz = drv->nr_blocks;
329 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
330 vol_sz_frac *= 100;
331 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
333 if (drv->raid_level > 5)
334 drv->raid_level = RAID_UNKNOWN;
335 seq_printf(seq, "cciss/c%dd%d:"
336 "\t%4u.%02uGB\tRAID %s\n",
337 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
338 raid_label[drv->raid_level]);
339 return 0;
342 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
344 ctlr_info_t *h = seq->private;
346 if (*pos > h->highest_lun)
347 return NULL;
348 *pos += 1;
350 return pos;
353 static void cciss_seq_stop(struct seq_file *seq, void *v)
355 ctlr_info_t *h = seq->private;
357 /* Only reset h->busy_configuring if we succeeded in setting
358 * it during cciss_seq_start. */
359 if (v == ERR_PTR(-EBUSY))
360 return;
362 h->busy_configuring = 0;
365 static struct seq_operations cciss_seq_ops = {
366 .start = cciss_seq_start,
367 .show = cciss_seq_show,
368 .next = cciss_seq_next,
369 .stop = cciss_seq_stop,
372 static int cciss_seq_open(struct inode *inode, struct file *file)
374 int ret = seq_open(file, &cciss_seq_ops);
375 struct seq_file *seq = file->private_data;
377 if (!ret)
378 seq->private = PDE(inode)->data;
380 return ret;
383 static ssize_t
384 cciss_proc_write(struct file *file, const char __user *buf,
385 size_t length, loff_t *ppos)
387 int err;
388 char *buffer;
390 #ifndef CONFIG_CISS_SCSI_TAPE
391 return -EINVAL;
392 #endif
394 if (!buf || length > PAGE_SIZE - 1)
395 return -EINVAL;
397 buffer = (char *)__get_free_page(GFP_KERNEL);
398 if (!buffer)
399 return -ENOMEM;
401 err = -EFAULT;
402 if (copy_from_user(buffer, buf, length))
403 goto out;
404 buffer[length] = '\0';
406 #ifdef CONFIG_CISS_SCSI_TAPE
407 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
408 struct seq_file *seq = file->private_data;
409 ctlr_info_t *h = seq->private;
410 int rc;
412 rc = cciss_engage_scsi(h->ctlr);
413 if (rc != 0)
414 err = -rc;
415 else
416 err = length;
417 } else
418 #endif /* CONFIG_CISS_SCSI_TAPE */
419 err = -EINVAL;
420 /* might be nice to have "disengage" too, but it's not
421 safely possible. (only 1 module use count, lock issues.) */
423 out:
424 free_page((unsigned long)buffer);
425 return err;
428 static struct file_operations cciss_proc_fops = {
429 .owner = THIS_MODULE,
430 .open = cciss_seq_open,
431 .read = seq_read,
432 .llseek = seq_lseek,
433 .release = seq_release,
434 .write = cciss_proc_write,
437 static void __devinit cciss_procinit(int i)
439 struct proc_dir_entry *pde;
441 if (proc_cciss == NULL)
442 proc_cciss = proc_mkdir("driver/cciss", NULL);
443 if (!proc_cciss)
444 return;
445 pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
446 S_IROTH, proc_cciss,
447 &cciss_proc_fops, hba[i]);
449 #endif /* CONFIG_PROC_FS */
452 * For operations that cannot sleep, a command block is allocated at init,
453 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
454 * which ones are free or in use. For operations that can wait for kmalloc
455 * to possible sleep, this routine can be called with get_from_pool set to 0.
456 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
458 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
460 CommandList_struct *c;
461 int i;
462 u64bit temp64;
463 dma_addr_t cmd_dma_handle, err_dma_handle;
465 if (!get_from_pool) {
466 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
467 sizeof(CommandList_struct), &cmd_dma_handle);
468 if (c == NULL)
469 return NULL;
470 memset(c, 0, sizeof(CommandList_struct));
472 c->cmdindex = -1;
474 c->err_info = (ErrorInfo_struct *)
475 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
476 &err_dma_handle);
478 if (c->err_info == NULL) {
479 pci_free_consistent(h->pdev,
480 sizeof(CommandList_struct), c, cmd_dma_handle);
481 return NULL;
483 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
484 } else { /* get it out of the controllers pool */
486 do {
487 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
488 if (i == h->nr_cmds)
489 return NULL;
490 } while (test_and_set_bit
491 (i & (BITS_PER_LONG - 1),
492 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
493 #ifdef CCISS_DEBUG
494 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
495 #endif
496 c = h->cmd_pool + i;
497 memset(c, 0, sizeof(CommandList_struct));
498 cmd_dma_handle = h->cmd_pool_dhandle
499 + i * sizeof(CommandList_struct);
500 c->err_info = h->errinfo_pool + i;
501 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
502 err_dma_handle = h->errinfo_pool_dhandle
503 + i * sizeof(ErrorInfo_struct);
504 h->nr_allocs++;
506 c->cmdindex = i;
509 c->busaddr = (__u32) cmd_dma_handle;
510 temp64.val = (__u64) err_dma_handle;
511 c->ErrDesc.Addr.lower = temp64.val32.lower;
512 c->ErrDesc.Addr.upper = temp64.val32.upper;
513 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
515 c->ctlr = h->ctlr;
516 return c;
520 * Frees a command block that was previously allocated with cmd_alloc().
522 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
524 int i;
525 u64bit temp64;
527 if (!got_from_pool) {
528 temp64.val32.lower = c->ErrDesc.Addr.lower;
529 temp64.val32.upper = c->ErrDesc.Addr.upper;
530 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
531 c->err_info, (dma_addr_t) temp64.val);
532 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
533 c, (dma_addr_t) c->busaddr);
534 } else {
535 i = c - h->cmd_pool;
536 clear_bit(i & (BITS_PER_LONG - 1),
537 h->cmd_pool_bits + (i / BITS_PER_LONG));
538 h->nr_frees++;
542 static inline ctlr_info_t *get_host(struct gendisk *disk)
544 return disk->queue->queuedata;
547 static inline drive_info_struct *get_drv(struct gendisk *disk)
549 return disk->private_data;
553 * Open. Make sure the device is really there.
555 static int cciss_open(struct block_device *bdev, fmode_t mode)
557 ctlr_info_t *host = get_host(bdev->bd_disk);
558 drive_info_struct *drv = get_drv(bdev->bd_disk);
560 #ifdef CCISS_DEBUG
561 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
562 #endif /* CCISS_DEBUG */
564 if (host->busy_initializing || drv->busy_configuring)
565 return -EBUSY;
567 * Root is allowed to open raw volume zero even if it's not configured
568 * so array config can still work. Root is also allowed to open any
569 * volume that has a LUN ID, so it can issue IOCTL to reread the
570 * disk information. I don't think I really like this
571 * but I'm already using way to many device nodes to claim another one
572 * for "raw controller".
574 if (drv->heads == 0) {
575 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
576 /* if not node 0 make sure it is a partition = 0 */
577 if (MINOR(bdev->bd_dev) & 0x0f) {
578 return -ENXIO;
579 /* if it is, make sure we have a LUN ID */
580 } else if (drv->LunID == 0) {
581 return -ENXIO;
584 if (!capable(CAP_SYS_ADMIN))
585 return -EPERM;
587 drv->usage_count++;
588 host->usage_count++;
589 return 0;
593 * Close. Sync first.
595 static int cciss_release(struct gendisk *disk, fmode_t mode)
597 ctlr_info_t *host = get_host(disk);
598 drive_info_struct *drv = get_drv(disk);
600 #ifdef CCISS_DEBUG
601 printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
602 #endif /* CCISS_DEBUG */
604 drv->usage_count--;
605 host->usage_count--;
606 return 0;
609 #ifdef CONFIG_COMPAT
611 static int do_ioctl(struct block_device *bdev, fmode_t mode,
612 unsigned cmd, unsigned long arg)
614 int ret;
615 lock_kernel();
616 ret = cciss_ioctl(bdev, mode, cmd, arg);
617 unlock_kernel();
618 return ret;
621 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
622 unsigned cmd, unsigned long arg);
623 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
624 unsigned cmd, unsigned long arg);
626 static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
627 unsigned cmd, unsigned long arg)
629 switch (cmd) {
630 case CCISS_GETPCIINFO:
631 case CCISS_GETINTINFO:
632 case CCISS_SETINTINFO:
633 case CCISS_GETNODENAME:
634 case CCISS_SETNODENAME:
635 case CCISS_GETHEARTBEAT:
636 case CCISS_GETBUSTYPES:
637 case CCISS_GETFIRMVER:
638 case CCISS_GETDRIVVER:
639 case CCISS_REVALIDVOLS:
640 case CCISS_DEREGDISK:
641 case CCISS_REGNEWDISK:
642 case CCISS_REGNEWD:
643 case CCISS_RESCANDISK:
644 case CCISS_GETLUNINFO:
645 return do_ioctl(bdev, mode, cmd, arg);
647 case CCISS_PASSTHRU32:
648 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
649 case CCISS_BIG_PASSTHRU32:
650 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
652 default:
653 return -ENOIOCTLCMD;
657 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
658 unsigned cmd, unsigned long arg)
660 IOCTL32_Command_struct __user *arg32 =
661 (IOCTL32_Command_struct __user *) arg;
662 IOCTL_Command_struct arg64;
663 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
664 int err;
665 u32 cp;
667 err = 0;
668 err |=
669 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
670 sizeof(arg64.LUN_info));
671 err |=
672 copy_from_user(&arg64.Request, &arg32->Request,
673 sizeof(arg64.Request));
674 err |=
675 copy_from_user(&arg64.error_info, &arg32->error_info,
676 sizeof(arg64.error_info));
677 err |= get_user(arg64.buf_size, &arg32->buf_size);
678 err |= get_user(cp, &arg32->buf);
679 arg64.buf = compat_ptr(cp);
680 err |= copy_to_user(p, &arg64, sizeof(arg64));
682 if (err)
683 return -EFAULT;
685 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
686 if (err)
687 return err;
688 err |=
689 copy_in_user(&arg32->error_info, &p->error_info,
690 sizeof(arg32->error_info));
691 if (err)
692 return -EFAULT;
693 return err;
696 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
697 unsigned cmd, unsigned long arg)
699 BIG_IOCTL32_Command_struct __user *arg32 =
700 (BIG_IOCTL32_Command_struct __user *) arg;
701 BIG_IOCTL_Command_struct arg64;
702 BIG_IOCTL_Command_struct __user *p =
703 compat_alloc_user_space(sizeof(arg64));
704 int err;
705 u32 cp;
707 err = 0;
708 err |=
709 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
710 sizeof(arg64.LUN_info));
711 err |=
712 copy_from_user(&arg64.Request, &arg32->Request,
713 sizeof(arg64.Request));
714 err |=
715 copy_from_user(&arg64.error_info, &arg32->error_info,
716 sizeof(arg64.error_info));
717 err |= get_user(arg64.buf_size, &arg32->buf_size);
718 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
719 err |= get_user(cp, &arg32->buf);
720 arg64.buf = compat_ptr(cp);
721 err |= copy_to_user(p, &arg64, sizeof(arg64));
723 if (err)
724 return -EFAULT;
726 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
727 if (err)
728 return err;
729 err |=
730 copy_in_user(&arg32->error_info, &p->error_info,
731 sizeof(arg32->error_info));
732 if (err)
733 return -EFAULT;
734 return err;
736 #endif
738 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
740 drive_info_struct *drv = get_drv(bdev->bd_disk);
742 if (!drv->cylinders)
743 return -ENXIO;
745 geo->heads = drv->heads;
746 geo->sectors = drv->sectors;
747 geo->cylinders = drv->cylinders;
748 return 0;
752 * ioctl
754 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
755 unsigned int cmd, unsigned long arg)
757 struct gendisk *disk = bdev->bd_disk;
758 ctlr_info_t *host = get_host(disk);
759 drive_info_struct *drv = get_drv(disk);
760 int ctlr = host->ctlr;
761 void __user *argp = (void __user *)arg;
763 #ifdef CCISS_DEBUG
764 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
765 #endif /* CCISS_DEBUG */
767 switch (cmd) {
768 case CCISS_GETPCIINFO:
770 cciss_pci_info_struct pciinfo;
772 if (!arg)
773 return -EINVAL;
774 pciinfo.domain = pci_domain_nr(host->pdev->bus);
775 pciinfo.bus = host->pdev->bus->number;
776 pciinfo.dev_fn = host->pdev->devfn;
777 pciinfo.board_id = host->board_id;
778 if (copy_to_user
779 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
780 return -EFAULT;
781 return 0;
783 case CCISS_GETINTINFO:
785 cciss_coalint_struct intinfo;
786 if (!arg)
787 return -EINVAL;
788 intinfo.delay =
789 readl(&host->cfgtable->HostWrite.CoalIntDelay);
790 intinfo.count =
791 readl(&host->cfgtable->HostWrite.CoalIntCount);
792 if (copy_to_user
793 (argp, &intinfo, sizeof(cciss_coalint_struct)))
794 return -EFAULT;
795 return 0;
797 case CCISS_SETINTINFO:
799 cciss_coalint_struct intinfo;
800 unsigned long flags;
801 int i;
803 if (!arg)
804 return -EINVAL;
805 if (!capable(CAP_SYS_ADMIN))
806 return -EPERM;
807 if (copy_from_user
808 (&intinfo, argp, sizeof(cciss_coalint_struct)))
809 return -EFAULT;
810 if ((intinfo.delay == 0) && (intinfo.count == 0))
812 // printk("cciss_ioctl: delay and count cannot be 0\n");
813 return -EINVAL;
815 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
816 /* Update the field, and then ring the doorbell */
817 writel(intinfo.delay,
818 &(host->cfgtable->HostWrite.CoalIntDelay));
819 writel(intinfo.count,
820 &(host->cfgtable->HostWrite.CoalIntCount));
821 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
823 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
824 if (!(readl(host->vaddr + SA5_DOORBELL)
825 & CFGTBL_ChangeReq))
826 break;
827 /* delay and try again */
828 udelay(1000);
830 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
831 if (i >= MAX_IOCTL_CONFIG_WAIT)
832 return -EAGAIN;
833 return 0;
835 case CCISS_GETNODENAME:
837 NodeName_type NodeName;
838 int i;
840 if (!arg)
841 return -EINVAL;
842 for (i = 0; i < 16; i++)
843 NodeName[i] =
844 readb(&host->cfgtable->ServerName[i]);
845 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
846 return -EFAULT;
847 return 0;
849 case CCISS_SETNODENAME:
851 NodeName_type NodeName;
852 unsigned long flags;
853 int i;
855 if (!arg)
856 return -EINVAL;
857 if (!capable(CAP_SYS_ADMIN))
858 return -EPERM;
860 if (copy_from_user
861 (NodeName, argp, sizeof(NodeName_type)))
862 return -EFAULT;
864 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
866 /* Update the field, and then ring the doorbell */
867 for (i = 0; i < 16; i++)
868 writeb(NodeName[i],
869 &host->cfgtable->ServerName[i]);
871 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
873 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
874 if (!(readl(host->vaddr + SA5_DOORBELL)
875 & CFGTBL_ChangeReq))
876 break;
877 /* delay and try again */
878 udelay(1000);
880 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
881 if (i >= MAX_IOCTL_CONFIG_WAIT)
882 return -EAGAIN;
883 return 0;
886 case CCISS_GETHEARTBEAT:
888 Heartbeat_type heartbeat;
890 if (!arg)
891 return -EINVAL;
892 heartbeat = readl(&host->cfgtable->HeartBeat);
893 if (copy_to_user
894 (argp, &heartbeat, sizeof(Heartbeat_type)))
895 return -EFAULT;
896 return 0;
898 case CCISS_GETBUSTYPES:
900 BusTypes_type BusTypes;
902 if (!arg)
903 return -EINVAL;
904 BusTypes = readl(&host->cfgtable->BusTypes);
905 if (copy_to_user
906 (argp, &BusTypes, sizeof(BusTypes_type)))
907 return -EFAULT;
908 return 0;
910 case CCISS_GETFIRMVER:
912 FirmwareVer_type firmware;
914 if (!arg)
915 return -EINVAL;
916 memcpy(firmware, host->firm_ver, 4);
918 if (copy_to_user
919 (argp, firmware, sizeof(FirmwareVer_type)))
920 return -EFAULT;
921 return 0;
923 case CCISS_GETDRIVVER:
925 DriverVer_type DriverVer = DRIVER_VERSION;
927 if (!arg)
928 return -EINVAL;
930 if (copy_to_user
931 (argp, &DriverVer, sizeof(DriverVer_type)))
932 return -EFAULT;
933 return 0;
936 case CCISS_DEREGDISK:
937 case CCISS_REGNEWD:
938 case CCISS_REVALIDVOLS:
939 return rebuild_lun_table(host, 0);
941 case CCISS_GETLUNINFO:{
942 LogvolInfo_struct luninfo;
944 luninfo.LunID = drv->LunID;
945 luninfo.num_opens = drv->usage_count;
946 luninfo.num_parts = 0;
947 if (copy_to_user(argp, &luninfo,
948 sizeof(LogvolInfo_struct)))
949 return -EFAULT;
950 return 0;
952 case CCISS_PASSTHRU:
954 IOCTL_Command_struct iocommand;
955 CommandList_struct *c;
956 char *buff = NULL;
957 u64bit temp64;
958 unsigned long flags;
959 DECLARE_COMPLETION_ONSTACK(wait);
961 if (!arg)
962 return -EINVAL;
964 if (!capable(CAP_SYS_RAWIO))
965 return -EPERM;
967 if (copy_from_user
968 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
969 return -EFAULT;
970 if ((iocommand.buf_size < 1) &&
971 (iocommand.Request.Type.Direction != XFER_NONE)) {
972 return -EINVAL;
974 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
975 /* Check kmalloc limits */
976 if (iocommand.buf_size > 128000)
977 return -EINVAL;
978 #endif
979 if (iocommand.buf_size > 0) {
980 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
981 if (buff == NULL)
982 return -EFAULT;
984 if (iocommand.Request.Type.Direction == XFER_WRITE) {
985 /* Copy the data into the buffer we created */
986 if (copy_from_user
987 (buff, iocommand.buf, iocommand.buf_size)) {
988 kfree(buff);
989 return -EFAULT;
991 } else {
992 memset(buff, 0, iocommand.buf_size);
994 if ((c = cmd_alloc(host, 0)) == NULL) {
995 kfree(buff);
996 return -ENOMEM;
998 // Fill in the command type
999 c->cmd_type = CMD_IOCTL_PEND;
1000 // Fill in Command Header
1001 c->Header.ReplyQueue = 0; // unused in simple mode
1002 if (iocommand.buf_size > 0) // buffer to fill
1004 c->Header.SGList = 1;
1005 c->Header.SGTotal = 1;
1006 } else // no buffers to fill
1008 c->Header.SGList = 0;
1009 c->Header.SGTotal = 0;
1011 c->Header.LUN = iocommand.LUN_info;
1012 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1014 // Fill in Request block
1015 c->Request = iocommand.Request;
1017 // Fill in the scatter gather information
1018 if (iocommand.buf_size > 0) {
1019 temp64.val = pci_map_single(host->pdev, buff,
1020 iocommand.buf_size,
1021 PCI_DMA_BIDIRECTIONAL);
1022 c->SG[0].Addr.lower = temp64.val32.lower;
1023 c->SG[0].Addr.upper = temp64.val32.upper;
1024 c->SG[0].Len = iocommand.buf_size;
1025 c->SG[0].Ext = 0; // we are not chaining
1027 c->waiting = &wait;
1029 /* Put the request on the tail of the request queue */
1030 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1031 addQ(&host->reqQ, c);
1032 host->Qdepth++;
1033 start_io(host);
1034 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1036 wait_for_completion(&wait);
1038 /* unlock the buffers from DMA */
1039 temp64.val32.lower = c->SG[0].Addr.lower;
1040 temp64.val32.upper = c->SG[0].Addr.upper;
1041 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1042 iocommand.buf_size,
1043 PCI_DMA_BIDIRECTIONAL);
1045 /* Copy the error information out */
1046 iocommand.error_info = *(c->err_info);
1047 if (copy_to_user
1048 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1049 kfree(buff);
1050 cmd_free(host, c, 0);
1051 return -EFAULT;
1054 if (iocommand.Request.Type.Direction == XFER_READ) {
1055 /* Copy the data out of the buffer we created */
1056 if (copy_to_user
1057 (iocommand.buf, buff, iocommand.buf_size)) {
1058 kfree(buff);
1059 cmd_free(host, c, 0);
1060 return -EFAULT;
1063 kfree(buff);
1064 cmd_free(host, c, 0);
1065 return 0;
1067 case CCISS_BIG_PASSTHRU:{
1068 BIG_IOCTL_Command_struct *ioc;
1069 CommandList_struct *c;
1070 unsigned char **buff = NULL;
1071 int *buff_size = NULL;
1072 u64bit temp64;
1073 unsigned long flags;
1074 BYTE sg_used = 0;
1075 int status = 0;
1076 int i;
1077 DECLARE_COMPLETION_ONSTACK(wait);
1078 __u32 left;
1079 __u32 sz;
1080 BYTE __user *data_ptr;
1082 if (!arg)
1083 return -EINVAL;
1084 if (!capable(CAP_SYS_RAWIO))
1085 return -EPERM;
1086 ioc = (BIG_IOCTL_Command_struct *)
1087 kmalloc(sizeof(*ioc), GFP_KERNEL);
1088 if (!ioc) {
1089 status = -ENOMEM;
1090 goto cleanup1;
1092 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1093 status = -EFAULT;
1094 goto cleanup1;
1096 if ((ioc->buf_size < 1) &&
1097 (ioc->Request.Type.Direction != XFER_NONE)) {
1098 status = -EINVAL;
1099 goto cleanup1;
1101 /* Check kmalloc limits using all SGs */
1102 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1103 status = -EINVAL;
1104 goto cleanup1;
1106 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1107 status = -EINVAL;
1108 goto cleanup1;
1110 buff =
1111 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1112 if (!buff) {
1113 status = -ENOMEM;
1114 goto cleanup1;
1116 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1117 GFP_KERNEL);
1118 if (!buff_size) {
1119 status = -ENOMEM;
1120 goto cleanup1;
1122 left = ioc->buf_size;
1123 data_ptr = ioc->buf;
1124 while (left) {
1125 sz = (left >
1126 ioc->malloc_size) ? ioc->
1127 malloc_size : left;
1128 buff_size[sg_used] = sz;
1129 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1130 if (buff[sg_used] == NULL) {
1131 status = -ENOMEM;
1132 goto cleanup1;
1134 if (ioc->Request.Type.Direction == XFER_WRITE) {
1135 if (copy_from_user
1136 (buff[sg_used], data_ptr, sz)) {
1137 status = -EFAULT;
1138 goto cleanup1;
1140 } else {
1141 memset(buff[sg_used], 0, sz);
1143 left -= sz;
1144 data_ptr += sz;
1145 sg_used++;
1147 if ((c = cmd_alloc(host, 0)) == NULL) {
1148 status = -ENOMEM;
1149 goto cleanup1;
1151 c->cmd_type = CMD_IOCTL_PEND;
1152 c->Header.ReplyQueue = 0;
1154 if (ioc->buf_size > 0) {
1155 c->Header.SGList = sg_used;
1156 c->Header.SGTotal = sg_used;
1157 } else {
1158 c->Header.SGList = 0;
1159 c->Header.SGTotal = 0;
1161 c->Header.LUN = ioc->LUN_info;
1162 c->Header.Tag.lower = c->busaddr;
1164 c->Request = ioc->Request;
1165 if (ioc->buf_size > 0) {
1166 int i;
1167 for (i = 0; i < sg_used; i++) {
1168 temp64.val =
1169 pci_map_single(host->pdev, buff[i],
1170 buff_size[i],
1171 PCI_DMA_BIDIRECTIONAL);
1172 c->SG[i].Addr.lower =
1173 temp64.val32.lower;
1174 c->SG[i].Addr.upper =
1175 temp64.val32.upper;
1176 c->SG[i].Len = buff_size[i];
1177 c->SG[i].Ext = 0; /* we are not chaining */
1180 c->waiting = &wait;
1181 /* Put the request on the tail of the request queue */
1182 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1183 addQ(&host->reqQ, c);
1184 host->Qdepth++;
1185 start_io(host);
1186 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1187 wait_for_completion(&wait);
1188 /* unlock the buffers from DMA */
1189 for (i = 0; i < sg_used; i++) {
1190 temp64.val32.lower = c->SG[i].Addr.lower;
1191 temp64.val32.upper = c->SG[i].Addr.upper;
1192 pci_unmap_single(host->pdev,
1193 (dma_addr_t) temp64.val, buff_size[i],
1194 PCI_DMA_BIDIRECTIONAL);
1196 /* Copy the error information out */
1197 ioc->error_info = *(c->err_info);
1198 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1199 cmd_free(host, c, 0);
1200 status = -EFAULT;
1201 goto cleanup1;
1203 if (ioc->Request.Type.Direction == XFER_READ) {
1204 /* Copy the data out of the buffer we created */
1205 BYTE __user *ptr = ioc->buf;
1206 for (i = 0; i < sg_used; i++) {
1207 if (copy_to_user
1208 (ptr, buff[i], buff_size[i])) {
1209 cmd_free(host, c, 0);
1210 status = -EFAULT;
1211 goto cleanup1;
1213 ptr += buff_size[i];
1216 cmd_free(host, c, 0);
1217 status = 0;
1218 cleanup1:
1219 if (buff) {
1220 for (i = 0; i < sg_used; i++)
1221 kfree(buff[i]);
1222 kfree(buff);
1224 kfree(buff_size);
1225 kfree(ioc);
1226 return status;
1229 /* scsi_cmd_ioctl handles these, below, though some are not */
1230 /* very meaningful for cciss. SG_IO is the main one people want. */
1232 case SG_GET_VERSION_NUM:
1233 case SG_SET_TIMEOUT:
1234 case SG_GET_TIMEOUT:
1235 case SG_GET_RESERVED_SIZE:
1236 case SG_SET_RESERVED_SIZE:
1237 case SG_EMULATED_HOST:
1238 case SG_IO:
1239 case SCSI_IOCTL_SEND_COMMAND:
1240 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1242 /* scsi_cmd_ioctl would normally handle these, below, but */
1243 /* they aren't a good fit for cciss, as CD-ROMs are */
1244 /* not supported, and we don't have any bus/target/lun */
1245 /* which we present to the kernel. */
1247 case CDROM_SEND_PACKET:
1248 case CDROMCLOSETRAY:
1249 case CDROMEJECT:
1250 case SCSI_IOCTL_GET_IDLUN:
1251 case SCSI_IOCTL_GET_BUS_NUMBER:
1252 default:
1253 return -ENOTTY;
1257 static void cciss_check_queues(ctlr_info_t *h)
1259 int start_queue = h->next_to_run;
1260 int i;
1262 /* check to see if we have maxed out the number of commands that can
1263 * be placed on the queue. If so then exit. We do this check here
1264 * in case the interrupt we serviced was from an ioctl and did not
1265 * free any new commands.
1267 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1268 return;
1270 /* We have room on the queue for more commands. Now we need to queue
1271 * them up. We will also keep track of the next queue to run so
1272 * that every queue gets a chance to be started first.
1274 for (i = 0; i < h->highest_lun + 1; i++) {
1275 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1276 /* make sure the disk has been added and the drive is real
1277 * because this can be called from the middle of init_one.
1279 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1280 continue;
1281 blk_start_queue(h->gendisk[curr_queue]->queue);
1283 /* check to see if we have maxed out the number of commands
1284 * that can be placed on the queue.
1286 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1287 if (curr_queue == start_queue) {
1288 h->next_to_run =
1289 (start_queue + 1) % (h->highest_lun + 1);
1290 break;
1291 } else {
1292 h->next_to_run = curr_queue;
1293 break;
1299 static void cciss_softirq_done(struct request *rq)
1301 CommandList_struct *cmd = rq->completion_data;
1302 ctlr_info_t *h = hba[cmd->ctlr];
1303 unsigned long flags;
1304 u64bit temp64;
1305 int i, ddir;
1307 if (cmd->Request.Type.Direction == XFER_READ)
1308 ddir = PCI_DMA_FROMDEVICE;
1309 else
1310 ddir = PCI_DMA_TODEVICE;
1312 /* command did not need to be retried */
1313 /* unmap the DMA mapping for all the scatter gather elements */
1314 for (i = 0; i < cmd->Header.SGList; i++) {
1315 temp64.val32.lower = cmd->SG[i].Addr.lower;
1316 temp64.val32.upper = cmd->SG[i].Addr.upper;
1317 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1320 #ifdef CCISS_DEBUG
1321 printk("Done with %p\n", rq);
1322 #endif /* CCISS_DEBUG */
1324 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1325 BUG();
1327 spin_lock_irqsave(&h->lock, flags);
1328 cmd_free(h, cmd, 1);
1329 cciss_check_queues(h);
1330 spin_unlock_irqrestore(&h->lock, flags);
1333 /* This function gets the serial number of a logical drive via
1334 * inquiry page 0x83. Serial no. is 16 bytes. If the serial
1335 * number cannot be had, for whatever reason, 16 bytes of 0xff
1336 * are returned instead.
1338 static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1339 unsigned char *serial_no, int buflen)
1341 #define PAGE_83_INQ_BYTES 64
1342 int rc;
1343 unsigned char *buf;
1345 if (buflen > 16)
1346 buflen = 16;
1347 memset(serial_no, 0xff, buflen);
1348 buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
1349 if (!buf)
1350 return;
1351 memset(serial_no, 0, buflen);
1352 if (withirq)
1353 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
1354 PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD);
1355 else
1356 rc = sendcmd(CISS_INQUIRY, ctlr, buf,
1357 PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD);
1358 if (rc == IO_OK)
1359 memcpy(serial_no, &buf[8], buflen);
1360 kfree(buf);
1361 return;
1364 static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1365 int drv_index)
1367 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1368 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1369 disk->major = h->major;
1370 disk->first_minor = drv_index << NWD_SHIFT;
1371 disk->fops = &cciss_fops;
1372 disk->private_data = &h->drv[drv_index];
1374 /* Set up queue information */
1375 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1377 /* This is a hardware imposed limit. */
1378 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1380 /* This is a limit in the driver and could be eliminated. */
1381 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1383 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1385 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1387 disk->queue->queuedata = h;
1389 blk_queue_hardsect_size(disk->queue,
1390 h->drv[drv_index].block_size);
1392 /* Make sure all queue data is written out before */
1393 /* setting h->drv[drv_index].queue, as setting this */
1394 /* allows the interrupt handler to start the queue */
1395 wmb();
1396 h->drv[drv_index].queue = disk->queue;
1397 add_disk(disk);
1400 /* This function will check the usage_count of the drive to be updated/added.
1401 * If the usage_count is zero and it is a heretofore unknown drive, or,
1402 * the drive's capacity, geometry, or serial number has changed,
1403 * then the drive information will be updated and the disk will be
1404 * re-registered with the kernel. If these conditions don't hold,
1405 * then it will be left alone for the next reboot. The exception to this
1406 * is disk 0 which will always be left registered with the kernel since it
1407 * is also the controller node. Any changes to disk 0 will show up on
1408 * the next reboot.
1410 static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1412 ctlr_info_t *h = hba[ctlr];
1413 struct gendisk *disk;
1414 InquiryData_struct *inq_buff = NULL;
1415 unsigned int block_size;
1416 sector_t total_size;
1417 unsigned long flags = 0;
1418 int ret = 0;
1419 drive_info_struct *drvinfo;
1420 int was_only_controller_node;
1422 /* Get information about the disk and modify the driver structure */
1423 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1424 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL);
1425 if (inq_buff == NULL || drvinfo == NULL)
1426 goto mem_msg;
1428 /* See if we're trying to update the "controller node"
1429 * this will happen the when the first logical drive gets
1430 * created by ACU.
1432 was_only_controller_node = (drv_index == 0 &&
1433 h->drv[0].raid_level == -1);
1435 /* testing to see if 16-byte CDBs are already being used */
1436 if (h->cciss_read == CCISS_READ_16) {
1437 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1438 &total_size, &block_size);
1440 } else {
1441 cciss_read_capacity(ctlr, drv_index, 1,
1442 &total_size, &block_size);
1444 /* if read_capacity returns all F's this volume is >2TB */
1445 /* in size so we switch to 16-byte CDB's for all */
1446 /* read/write ops */
1447 if (total_size == 0xFFFFFFFFULL) {
1448 cciss_read_capacity_16(ctlr, drv_index, 1,
1449 &total_size, &block_size);
1450 h->cciss_read = CCISS_READ_16;
1451 h->cciss_write = CCISS_WRITE_16;
1452 } else {
1453 h->cciss_read = CCISS_READ_10;
1454 h->cciss_write = CCISS_WRITE_10;
1458 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1459 inq_buff, drvinfo);
1460 drvinfo->block_size = block_size;
1461 drvinfo->nr_blocks = total_size + 1;
1463 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1464 sizeof(drvinfo->serial_no));
1466 /* Is it the same disk we already know, and nothing's changed? */
1467 if (h->drv[drv_index].raid_level != -1 &&
1468 ((memcmp(drvinfo->serial_no,
1469 h->drv[drv_index].serial_no, 16) == 0) &&
1470 drvinfo->block_size == h->drv[drv_index].block_size &&
1471 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks &&
1472 drvinfo->heads == h->drv[drv_index].heads &&
1473 drvinfo->sectors == h->drv[drv_index].sectors &&
1474 drvinfo->cylinders == h->drv[drv_index].cylinders))
1475 /* The disk is unchanged, nothing to update */
1476 goto freeret;
1478 /* If we get here it's not the same disk, or something's changed,
1479 * so we need to * deregister it, and re-register it, if it's not
1480 * in use.
1481 * If the disk already exists then deregister it before proceeding
1482 * (unless it's the first disk (for the controller node).
1484 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) {
1485 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1486 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1487 h->drv[drv_index].busy_configuring = 1;
1488 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1490 /* deregister_disk sets h->drv[drv_index].queue = NULL
1491 * which keeps the interrupt handler from starting
1492 * the queue.
1494 ret = deregister_disk(h->gendisk[drv_index],
1495 &h->drv[drv_index], 0);
1496 h->drv[drv_index].busy_configuring = 0;
1499 /* If the disk is in use return */
1500 if (ret)
1501 goto freeret;
1503 /* Save the new information from cciss_geometry_inquiry
1504 * and serial number inquiry.
1506 h->drv[drv_index].block_size = drvinfo->block_size;
1507 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks;
1508 h->drv[drv_index].heads = drvinfo->heads;
1509 h->drv[drv_index].sectors = drvinfo->sectors;
1510 h->drv[drv_index].cylinders = drvinfo->cylinders;
1511 h->drv[drv_index].raid_level = drvinfo->raid_level;
1512 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
1514 ++h->num_luns;
1515 disk = h->gendisk[drv_index];
1516 set_capacity(disk, h->drv[drv_index].nr_blocks);
1518 /* If it's not disk 0 (drv_index != 0)
1519 * or if it was disk 0, but there was previously
1520 * no actual corresponding configured logical drive
1521 * (raid_leve == -1) then we want to update the
1522 * logical drive's information.
1524 if (drv_index || first_time)
1525 cciss_add_disk(h, disk, drv_index);
1527 freeret:
1528 kfree(inq_buff);
1529 kfree(drvinfo);
1530 return;
1531 mem_msg:
1532 printk(KERN_ERR "cciss: out of memory\n");
1533 goto freeret;
1536 /* This function will find the first index of the controllers drive array
1537 * that has a -1 for the raid_level and will return that index. This is
1538 * where new drives will be added. If the index to be returned is greater
1539 * than the highest_lun index for the controller then highest_lun is set
1540 * to this new index. If there are no available indexes then -1 is returned.
1541 * "controller_node" is used to know if this is a real logical drive, or just
1542 * the controller node, which determines if this counts towards highest_lun.
1544 static int cciss_find_free_drive_index(int ctlr, int controller_node)
1546 int i;
1548 for (i = 0; i < CISS_MAX_LUN; i++) {
1549 if (hba[ctlr]->drv[i].raid_level == -1) {
1550 if (i > hba[ctlr]->highest_lun)
1551 if (!controller_node)
1552 hba[ctlr]->highest_lun = i;
1553 return i;
1556 return -1;
1559 /* cciss_add_gendisk finds a free hba[]->drv structure
1560 * and allocates a gendisk if needed, and sets the lunid
1561 * in the drvinfo structure. It returns the index into
1562 * the ->drv[] array, or -1 if none are free.
1563 * is_controller_node indicates whether highest_lun should
1564 * count this disk, or if it's only being added to provide
1565 * a means to talk to the controller in case no logical
1566 * drives have yet been configured.
1568 static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1570 int drv_index;
1572 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node);
1573 if (drv_index == -1)
1574 return -1;
1575 /*Check if the gendisk needs to be allocated */
1576 if (!h->gendisk[drv_index]) {
1577 h->gendisk[drv_index] =
1578 alloc_disk(1 << NWD_SHIFT);
1579 if (!h->gendisk[drv_index]) {
1580 printk(KERN_ERR "cciss%d: could not "
1581 "allocate a new disk %d\n",
1582 h->ctlr, drv_index);
1583 return -1;
1586 h->drv[drv_index].LunID = lunid;
1588 /* Don't need to mark this busy because nobody */
1589 /* else knows about this disk yet to contend */
1590 /* for access to it. */
1591 h->drv[drv_index].busy_configuring = 0;
1592 wmb();
1593 return drv_index;
1596 /* This is for the special case of a controller which
1597 * has no logical drives. In this case, we still need
1598 * to register a disk so the controller can be accessed
1599 * by the Array Config Utility.
1601 static void cciss_add_controller_node(ctlr_info_t *h)
1603 struct gendisk *disk;
1604 int drv_index;
1606 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1607 return;
1609 drv_index = cciss_add_gendisk(h, 0, 1);
1610 if (drv_index == -1) {
1611 printk(KERN_WARNING "cciss%d: could not "
1612 "add disk 0.\n", h->ctlr);
1613 return;
1615 h->drv[drv_index].block_size = 512;
1616 h->drv[drv_index].nr_blocks = 0;
1617 h->drv[drv_index].heads = 0;
1618 h->drv[drv_index].sectors = 0;
1619 h->drv[drv_index].cylinders = 0;
1620 h->drv[drv_index].raid_level = -1;
1621 memset(h->drv[drv_index].serial_no, 0, 16);
1622 disk = h->gendisk[drv_index];
1623 cciss_add_disk(h, disk, drv_index);
1626 /* This function will add and remove logical drives from the Logical
1627 * drive array of the controller and maintain persistency of ordering
1628 * so that mount points are preserved until the next reboot. This allows
1629 * for the removal of logical drives in the middle of the drive array
1630 * without a re-ordering of those drives.
1631 * INPUT
1632 * h = The controller to perform the operations on
1634 static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1636 int ctlr = h->ctlr;
1637 int num_luns;
1638 ReportLunData_struct *ld_buff = NULL;
1639 int return_code;
1640 int listlength = 0;
1641 int i;
1642 int drv_found;
1643 int drv_index = 0;
1644 __u32 lunid = 0;
1645 unsigned long flags;
1647 if (!capable(CAP_SYS_RAWIO))
1648 return -EPERM;
1650 /* Set busy_configuring flag for this operation */
1651 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1652 if (h->busy_configuring) {
1653 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1654 return -EBUSY;
1656 h->busy_configuring = 1;
1657 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1659 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1660 if (ld_buff == NULL)
1661 goto mem_msg;
1663 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1664 sizeof(ReportLunData_struct), 0,
1665 0, 0, TYPE_CMD);
1667 if (return_code == IO_OK)
1668 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1669 else { /* reading number of logical volumes failed */
1670 printk(KERN_WARNING "cciss: report logical volume"
1671 " command failed\n");
1672 listlength = 0;
1673 goto freeret;
1676 num_luns = listlength / 8; /* 8 bytes per entry */
1677 if (num_luns > CISS_MAX_LUN) {
1678 num_luns = CISS_MAX_LUN;
1679 printk(KERN_WARNING "cciss: more luns configured"
1680 " on controller than can be handled by"
1681 " this driver.\n");
1684 if (num_luns == 0)
1685 cciss_add_controller_node(h);
1687 /* Compare controller drive array to driver's drive array
1688 * to see if any drives are missing on the controller due
1689 * to action of Array Config Utility (user deletes drive)
1690 * and deregister logical drives which have disappeared.
1692 for (i = 0; i <= h->highest_lun; i++) {
1693 int j;
1694 drv_found = 0;
1695 for (j = 0; j < num_luns; j++) {
1696 memcpy(&lunid, &ld_buff->LUN[j][0], 4);
1697 lunid = le32_to_cpu(lunid);
1698 if (h->drv[i].LunID == lunid) {
1699 drv_found = 1;
1700 break;
1703 if (!drv_found) {
1704 /* Deregister it from the OS, it's gone. */
1705 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1706 h->drv[i].busy_configuring = 1;
1707 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1708 return_code = deregister_disk(h->gendisk[i],
1709 &h->drv[i], 1);
1710 h->drv[i].busy_configuring = 0;
1714 /* Compare controller drive array to driver's drive array.
1715 * Check for updates in the drive information and any new drives
1716 * on the controller due to ACU adding logical drives, or changing
1717 * a logical drive's size, etc. Reregister any new/changed drives
1719 for (i = 0; i < num_luns; i++) {
1720 int j;
1722 drv_found = 0;
1724 memcpy(&lunid, &ld_buff->LUN[i][0], 4);
1725 lunid = le32_to_cpu(lunid);
1727 /* Find if the LUN is already in the drive array
1728 * of the driver. If so then update its info
1729 * if not in use. If it does not exist then find
1730 * the first free index and add it.
1732 for (j = 0; j <= h->highest_lun; j++) {
1733 if (h->drv[j].raid_level != -1 &&
1734 h->drv[j].LunID == lunid) {
1735 drv_index = j;
1736 drv_found = 1;
1737 break;
1741 /* check if the drive was found already in the array */
1742 if (!drv_found) {
1743 drv_index = cciss_add_gendisk(h, lunid, 0);
1744 if (drv_index == -1)
1745 goto freeret;
1747 cciss_update_drive_info(ctlr, drv_index, first_time);
1748 } /* end for */
1750 freeret:
1751 kfree(ld_buff);
1752 h->busy_configuring = 0;
1753 /* We return -1 here to tell the ACU that we have registered/updated
1754 * all of the drives that we can and to keep it from calling us
1755 * additional times.
1757 return -1;
1758 mem_msg:
1759 printk(KERN_ERR "cciss: out of memory\n");
1760 h->busy_configuring = 0;
1761 goto freeret;
1764 /* This function will deregister the disk and it's queue from the
1765 * kernel. It must be called with the controller lock held and the
1766 * drv structures busy_configuring flag set. It's parameters are:
1768 * disk = This is the disk to be deregistered
1769 * drv = This is the drive_info_struct associated with the disk to be
1770 * deregistered. It contains information about the disk used
1771 * by the driver.
1772 * clear_all = This flag determines whether or not the disk information
1773 * is going to be completely cleared out and the highest_lun
1774 * reset. Sometimes we want to clear out information about
1775 * the disk in preparation for re-adding it. In this case
1776 * the highest_lun should be left unchanged and the LunID
1777 * should not be cleared.
1779 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1780 int clear_all)
1782 int i;
1783 ctlr_info_t *h = get_host(disk);
1785 if (!capable(CAP_SYS_RAWIO))
1786 return -EPERM;
1788 /* make sure logical volume is NOT is use */
1789 if (clear_all || (h->gendisk[0] == disk)) {
1790 if (drv->usage_count > 1)
1791 return -EBUSY;
1792 } else if (drv->usage_count > 0)
1793 return -EBUSY;
1795 /* invalidate the devices and deregister the disk. If it is disk
1796 * zero do not deregister it but just zero out it's values. This
1797 * allows us to delete disk zero but keep the controller registered.
1799 if (h->gendisk[0] != disk) {
1800 struct request_queue *q = disk->queue;
1801 if (disk->flags & GENHD_FL_UP)
1802 del_gendisk(disk);
1803 if (q) {
1804 blk_cleanup_queue(q);
1805 /* Set drv->queue to NULL so that we do not try
1806 * to call blk_start_queue on this queue in the
1807 * interrupt handler
1809 drv->queue = NULL;
1811 /* If clear_all is set then we are deleting the logical
1812 * drive, not just refreshing its info. For drives
1813 * other than disk 0 we will call put_disk. We do not
1814 * do this for disk 0 as we need it to be able to
1815 * configure the controller.
1817 if (clear_all){
1818 /* This isn't pretty, but we need to find the
1819 * disk in our array and NULL our the pointer.
1820 * This is so that we will call alloc_disk if
1821 * this index is used again later.
1823 for (i=0; i < CISS_MAX_LUN; i++){
1824 if (h->gendisk[i] == disk) {
1825 h->gendisk[i] = NULL;
1826 break;
1829 put_disk(disk);
1831 } else {
1832 set_capacity(disk, 0);
1835 --h->num_luns;
1836 /* zero out the disk size info */
1837 drv->nr_blocks = 0;
1838 drv->block_size = 0;
1839 drv->heads = 0;
1840 drv->sectors = 0;
1841 drv->cylinders = 0;
1842 drv->raid_level = -1; /* This can be used as a flag variable to
1843 * indicate that this element of the drive
1844 * array is free.
1847 if (clear_all) {
1848 /* check to see if it was the last disk */
1849 if (drv == h->drv + h->highest_lun) {
1850 /* if so, find the new hightest lun */
1851 int i, newhighest = -1;
1852 for (i = 0; i <= h->highest_lun; i++) {
1853 /* if the disk has size > 0, it is available */
1854 if (h->drv[i].heads)
1855 newhighest = i;
1857 h->highest_lun = newhighest;
1860 drv->LunID = 0;
1862 return 0;
1865 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1866 1: address logical volume log_unit,
1867 2: periph device address is scsi3addr */
1868 unsigned int log_unit, __u8 page_code,
1869 unsigned char *scsi3addr, int cmd_type)
1871 ctlr_info_t *h = hba[ctlr];
1872 u64bit buff_dma_handle;
1873 int status = IO_OK;
1875 c->cmd_type = CMD_IOCTL_PEND;
1876 c->Header.ReplyQueue = 0;
1877 if (buff != NULL) {
1878 c->Header.SGList = 1;
1879 c->Header.SGTotal = 1;
1880 } else {
1881 c->Header.SGList = 0;
1882 c->Header.SGTotal = 0;
1884 c->Header.Tag.lower = c->busaddr;
1886 c->Request.Type.Type = cmd_type;
1887 if (cmd_type == TYPE_CMD) {
1888 switch (cmd) {
1889 case CISS_INQUIRY:
1890 /* If the logical unit number is 0 then, this is going
1891 to controller so It's a physical command
1892 mode = 0 target = 0. So we have nothing to write.
1893 otherwise, if use_unit_num == 1,
1894 mode = 1(volume set addressing) target = LUNID
1895 otherwise, if use_unit_num == 2,
1896 mode = 0(periph dev addr) target = scsi3addr */
1897 if (use_unit_num == 1) {
1898 c->Header.LUN.LogDev.VolId =
1899 h->drv[log_unit].LunID;
1900 c->Header.LUN.LogDev.Mode = 1;
1901 } else if (use_unit_num == 2) {
1902 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1904 c->Header.LUN.LogDev.Mode = 0;
1906 /* are we trying to read a vital product page */
1907 if (page_code != 0) {
1908 c->Request.CDB[1] = 0x01;
1909 c->Request.CDB[2] = page_code;
1911 c->Request.CDBLen = 6;
1912 c->Request.Type.Attribute = ATTR_SIMPLE;
1913 c->Request.Type.Direction = XFER_READ;
1914 c->Request.Timeout = 0;
1915 c->Request.CDB[0] = CISS_INQUIRY;
1916 c->Request.CDB[4] = size & 0xFF;
1917 break;
1918 case CISS_REPORT_LOG:
1919 case CISS_REPORT_PHYS:
1920 /* Talking to controller so It's a physical command
1921 mode = 00 target = 0. Nothing to write.
1923 c->Request.CDBLen = 12;
1924 c->Request.Type.Attribute = ATTR_SIMPLE;
1925 c->Request.Type.Direction = XFER_READ;
1926 c->Request.Timeout = 0;
1927 c->Request.CDB[0] = cmd;
1928 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1929 c->Request.CDB[7] = (size >> 16) & 0xFF;
1930 c->Request.CDB[8] = (size >> 8) & 0xFF;
1931 c->Request.CDB[9] = size & 0xFF;
1932 break;
1934 case CCISS_READ_CAPACITY:
1935 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1936 c->Header.LUN.LogDev.Mode = 1;
1937 c->Request.CDBLen = 10;
1938 c->Request.Type.Attribute = ATTR_SIMPLE;
1939 c->Request.Type.Direction = XFER_READ;
1940 c->Request.Timeout = 0;
1941 c->Request.CDB[0] = cmd;
1942 break;
1943 case CCISS_READ_CAPACITY_16:
1944 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1945 c->Header.LUN.LogDev.Mode = 1;
1946 c->Request.CDBLen = 16;
1947 c->Request.Type.Attribute = ATTR_SIMPLE;
1948 c->Request.Type.Direction = XFER_READ;
1949 c->Request.Timeout = 0;
1950 c->Request.CDB[0] = cmd;
1951 c->Request.CDB[1] = 0x10;
1952 c->Request.CDB[10] = (size >> 24) & 0xFF;
1953 c->Request.CDB[11] = (size >> 16) & 0xFF;
1954 c->Request.CDB[12] = (size >> 8) & 0xFF;
1955 c->Request.CDB[13] = size & 0xFF;
1956 c->Request.Timeout = 0;
1957 c->Request.CDB[0] = cmd;
1958 break;
1959 case CCISS_CACHE_FLUSH:
1960 c->Request.CDBLen = 12;
1961 c->Request.Type.Attribute = ATTR_SIMPLE;
1962 c->Request.Type.Direction = XFER_WRITE;
1963 c->Request.Timeout = 0;
1964 c->Request.CDB[0] = BMIC_WRITE;
1965 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1966 break;
1967 default:
1968 printk(KERN_WARNING
1969 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1970 return IO_ERROR;
1972 } else if (cmd_type == TYPE_MSG) {
1973 switch (cmd) {
1974 case 0: /* ABORT message */
1975 c->Request.CDBLen = 12;
1976 c->Request.Type.Attribute = ATTR_SIMPLE;
1977 c->Request.Type.Direction = XFER_WRITE;
1978 c->Request.Timeout = 0;
1979 c->Request.CDB[0] = cmd; /* abort */
1980 c->Request.CDB[1] = 0; /* abort a command */
1981 /* buff contains the tag of the command to abort */
1982 memcpy(&c->Request.CDB[4], buff, 8);
1983 break;
1984 case 1: /* RESET message */
1985 c->Request.CDBLen = 12;
1986 c->Request.Type.Attribute = ATTR_SIMPLE;
1987 c->Request.Type.Direction = XFER_WRITE;
1988 c->Request.Timeout = 0;
1989 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1990 c->Request.CDB[0] = cmd; /* reset */
1991 c->Request.CDB[1] = 0x04; /* reset a LUN */
1992 break;
1993 case 3: /* No-Op message */
1994 c->Request.CDBLen = 1;
1995 c->Request.Type.Attribute = ATTR_SIMPLE;
1996 c->Request.Type.Direction = XFER_WRITE;
1997 c->Request.Timeout = 0;
1998 c->Request.CDB[0] = cmd;
1999 break;
2000 default:
2001 printk(KERN_WARNING
2002 "cciss%d: unknown message type %d\n", ctlr, cmd);
2003 return IO_ERROR;
2005 } else {
2006 printk(KERN_WARNING
2007 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
2008 return IO_ERROR;
2010 /* Fill in the scatter gather information */
2011 if (size > 0) {
2012 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
2013 buff, size,
2014 PCI_DMA_BIDIRECTIONAL);
2015 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
2016 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
2017 c->SG[0].Len = size;
2018 c->SG[0].Ext = 0; /* we are not chaining */
2020 return status;
2023 static int sendcmd_withirq(__u8 cmd,
2024 int ctlr,
2025 void *buff,
2026 size_t size,
2027 unsigned int use_unit_num,
2028 unsigned int log_unit, __u8 page_code, int cmd_type)
2030 ctlr_info_t *h = hba[ctlr];
2031 CommandList_struct *c;
2032 u64bit buff_dma_handle;
2033 unsigned long flags;
2034 int return_status;
2035 DECLARE_COMPLETION_ONSTACK(wait);
2037 if ((c = cmd_alloc(h, 0)) == NULL)
2038 return -ENOMEM;
2039 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2040 log_unit, page_code, NULL, cmd_type);
2041 if (return_status != IO_OK) {
2042 cmd_free(h, c, 0);
2043 return return_status;
2045 resend_cmd2:
2046 c->waiting = &wait;
2048 /* Put the request on the tail of the queue and send it */
2049 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
2050 addQ(&h->reqQ, c);
2051 h->Qdepth++;
2052 start_io(h);
2053 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
2055 wait_for_completion(&wait);
2057 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
2058 switch (c->err_info->CommandStatus) {
2059 case CMD_TARGET_STATUS:
2060 printk(KERN_WARNING "cciss: cmd %p has "
2061 " completed with errors\n", c);
2062 if (c->err_info->ScsiStatus) {
2063 printk(KERN_WARNING "cciss: cmd %p "
2064 "has SCSI Status = %x\n",
2065 c, c->err_info->ScsiStatus);
2068 break;
2069 case CMD_DATA_UNDERRUN:
2070 case CMD_DATA_OVERRUN:
2071 /* expected for inquire and report lun commands */
2072 break;
2073 case CMD_INVALID:
2074 printk(KERN_WARNING "cciss: Cmd %p is "
2075 "reported invalid\n", c);
2076 return_status = IO_ERROR;
2077 break;
2078 case CMD_PROTOCOL_ERR:
2079 printk(KERN_WARNING "cciss: cmd %p has "
2080 "protocol error \n", c);
2081 return_status = IO_ERROR;
2082 break;
2083 case CMD_HARDWARE_ERR:
2084 printk(KERN_WARNING "cciss: cmd %p had "
2085 " hardware error\n", c);
2086 return_status = IO_ERROR;
2087 break;
2088 case CMD_CONNECTION_LOST:
2089 printk(KERN_WARNING "cciss: cmd %p had "
2090 "connection lost\n", c);
2091 return_status = IO_ERROR;
2092 break;
2093 case CMD_ABORTED:
2094 printk(KERN_WARNING "cciss: cmd %p was "
2095 "aborted\n", c);
2096 return_status = IO_ERROR;
2097 break;
2098 case CMD_ABORT_FAILED:
2099 printk(KERN_WARNING "cciss: cmd %p reports "
2100 "abort failed\n", c);
2101 return_status = IO_ERROR;
2102 break;
2103 case CMD_UNSOLICITED_ABORT:
2104 printk(KERN_WARNING
2105 "cciss%d: unsolicited abort %p\n", ctlr, c);
2106 if (c->retry_count < MAX_CMD_RETRIES) {
2107 printk(KERN_WARNING
2108 "cciss%d: retrying %p\n", ctlr, c);
2109 c->retry_count++;
2110 /* erase the old error information */
2111 memset(c->err_info, 0,
2112 sizeof(ErrorInfo_struct));
2113 return_status = IO_OK;
2114 INIT_COMPLETION(wait);
2115 goto resend_cmd2;
2117 return_status = IO_ERROR;
2118 break;
2119 default:
2120 printk(KERN_WARNING "cciss: cmd %p returned "
2121 "unknown status %x\n", c,
2122 c->err_info->CommandStatus);
2123 return_status = IO_ERROR;
2126 /* unlock the buffers from DMA */
2127 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2128 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2129 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2130 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2131 cmd_free(h, c, 0);
2132 return return_status;
2135 static void cciss_geometry_inquiry(int ctlr, int logvol,
2136 int withirq, sector_t total_size,
2137 unsigned int block_size,
2138 InquiryData_struct *inq_buff,
2139 drive_info_struct *drv)
2141 int return_code;
2142 unsigned long t;
2144 memset(inq_buff, 0, sizeof(InquiryData_struct));
2145 if (withirq)
2146 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
2147 inq_buff, sizeof(*inq_buff), 1,
2148 logvol, 0xC1, TYPE_CMD);
2149 else
2150 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
2151 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
2152 TYPE_CMD);
2153 if (return_code == IO_OK) {
2154 if (inq_buff->data_byte[8] == 0xFF) {
2155 printk(KERN_WARNING
2156 "cciss: reading geometry failed, volume "
2157 "does not support reading geometry\n");
2158 drv->heads = 255;
2159 drv->sectors = 32; // Sectors per track
2160 drv->cylinders = total_size + 1;
2161 drv->raid_level = RAID_UNKNOWN;
2162 } else {
2163 drv->heads = inq_buff->data_byte[6];
2164 drv->sectors = inq_buff->data_byte[7];
2165 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
2166 drv->cylinders += inq_buff->data_byte[5];
2167 drv->raid_level = inq_buff->data_byte[8];
2169 drv->block_size = block_size;
2170 drv->nr_blocks = total_size + 1;
2171 t = drv->heads * drv->sectors;
2172 if (t > 1) {
2173 sector_t real_size = total_size + 1;
2174 unsigned long rem = sector_div(real_size, t);
2175 if (rem)
2176 real_size++;
2177 drv->cylinders = real_size;
2179 } else { /* Get geometry failed */
2180 printk(KERN_WARNING "cciss: reading geometry failed\n");
2182 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2183 drv->heads, drv->sectors, drv->cylinders);
2186 static void
2187 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2188 unsigned int *block_size)
2190 ReadCapdata_struct *buf;
2191 int return_code;
2193 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2194 if (!buf) {
2195 printk(KERN_WARNING "cciss: out of memory\n");
2196 return;
2199 if (withirq)
2200 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2201 ctlr, buf, sizeof(ReadCapdata_struct),
2202 1, logvol, 0, TYPE_CMD);
2203 else
2204 return_code = sendcmd(CCISS_READ_CAPACITY,
2205 ctlr, buf, sizeof(ReadCapdata_struct),
2206 1, logvol, 0, NULL, TYPE_CMD);
2207 if (return_code == IO_OK) {
2208 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2209 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2210 } else { /* read capacity command failed */
2211 printk(KERN_WARNING "cciss: read capacity failed\n");
2212 *total_size = 0;
2213 *block_size = BLOCK_SIZE;
2215 if (*total_size != 0)
2216 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2217 (unsigned long long)*total_size+1, *block_size);
2218 kfree(buf);
2221 static void
2222 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2224 ReadCapdata_struct_16 *buf;
2225 int return_code;
2227 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2228 if (!buf) {
2229 printk(KERN_WARNING "cciss: out of memory\n");
2230 return;
2233 if (withirq) {
2234 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2235 ctlr, buf, sizeof(ReadCapdata_struct_16),
2236 1, logvol, 0, TYPE_CMD);
2238 else {
2239 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2240 ctlr, buf, sizeof(ReadCapdata_struct_16),
2241 1, logvol, 0, NULL, TYPE_CMD);
2243 if (return_code == IO_OK) {
2244 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2245 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2246 } else { /* read capacity command failed */
2247 printk(KERN_WARNING "cciss: read capacity failed\n");
2248 *total_size = 0;
2249 *block_size = BLOCK_SIZE;
2251 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2252 (unsigned long long)*total_size+1, *block_size);
2253 kfree(buf);
2256 static int cciss_revalidate(struct gendisk *disk)
2258 ctlr_info_t *h = get_host(disk);
2259 drive_info_struct *drv = get_drv(disk);
2260 int logvol;
2261 int FOUND = 0;
2262 unsigned int block_size;
2263 sector_t total_size;
2264 InquiryData_struct *inq_buff = NULL;
2266 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2267 if (h->drv[logvol].LunID == drv->LunID) {
2268 FOUND = 1;
2269 break;
2273 if (!FOUND)
2274 return 1;
2276 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2277 if (inq_buff == NULL) {
2278 printk(KERN_WARNING "cciss: out of memory\n");
2279 return 1;
2281 if (h->cciss_read == CCISS_READ_10) {
2282 cciss_read_capacity(h->ctlr, logvol, 1,
2283 &total_size, &block_size);
2284 } else {
2285 cciss_read_capacity_16(h->ctlr, logvol, 1,
2286 &total_size, &block_size);
2288 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2289 inq_buff, drv);
2291 blk_queue_hardsect_size(drv->queue, drv->block_size);
2292 set_capacity(disk, drv->nr_blocks);
2294 kfree(inq_buff);
2295 return 0;
2299 * Wait polling for a command to complete.
2300 * The memory mapped FIFO is polled for the completion.
2301 * Used only at init time, interrupts from the HBA are disabled.
2303 static unsigned long pollcomplete(int ctlr)
2305 unsigned long done;
2306 int i;
2308 /* Wait (up to 20 seconds) for a command to complete */
2310 for (i = 20 * HZ; i > 0; i--) {
2311 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2312 if (done == FIFO_EMPTY)
2313 schedule_timeout_uninterruptible(1);
2314 else
2315 return done;
2317 /* Invalid address to tell caller we ran out of time */
2318 return 1;
2321 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2323 /* We get in here if sendcmd() is polling for completions
2324 and gets some command back that it wasn't expecting --
2325 something other than that which it just sent down.
2326 Ordinarily, that shouldn't happen, but it can happen when
2327 the scsi tape stuff gets into error handling mode, and
2328 starts using sendcmd() to try to abort commands and
2329 reset tape drives. In that case, sendcmd may pick up
2330 completions of commands that were sent to logical drives
2331 through the block i/o system, or cciss ioctls completing, etc.
2332 In that case, we need to save those completions for later
2333 processing by the interrupt handler.
2336 #ifdef CONFIG_CISS_SCSI_TAPE
2337 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2339 /* If it's not the scsi tape stuff doing error handling, (abort */
2340 /* or reset) then we don't expect anything weird. */
2341 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2342 #endif
2343 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2344 "Invalid command list address returned! (%lx)\n",
2345 ctlr, complete);
2346 /* not much we can do. */
2347 #ifdef CONFIG_CISS_SCSI_TAPE
2348 return 1;
2351 /* We've sent down an abort or reset, but something else
2352 has completed */
2353 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2354 /* Uh oh. No room to save it for later... */
2355 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2356 "reject list overflow, command lost!\n", ctlr);
2357 return 1;
2359 /* Save it for later */
2360 srl->complete[srl->ncompletions] = complete;
2361 srl->ncompletions++;
2362 #endif
2363 return 0;
2367 * Send a command to the controller, and wait for it to complete.
2368 * Only used at init time.
2370 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2371 1: address logical volume log_unit,
2372 2: periph device address is scsi3addr */
2373 unsigned int log_unit,
2374 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2376 CommandList_struct *c;
2377 int i;
2378 unsigned long complete;
2379 ctlr_info_t *info_p = hba[ctlr];
2380 u64bit buff_dma_handle;
2381 int status, done = 0;
2383 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2384 printk(KERN_WARNING "cciss: unable to get memory");
2385 return IO_ERROR;
2387 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2388 log_unit, page_code, scsi3addr, cmd_type);
2389 if (status != IO_OK) {
2390 cmd_free(info_p, c, 1);
2391 return status;
2393 resend_cmd1:
2395 * Disable interrupt
2397 #ifdef CCISS_DEBUG
2398 printk(KERN_DEBUG "cciss: turning intr off\n");
2399 #endif /* CCISS_DEBUG */
2400 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2402 /* Make sure there is room in the command FIFO */
2403 /* Actually it should be completely empty at this time */
2404 /* unless we are in here doing error handling for the scsi */
2405 /* tape side of the driver. */
2406 for (i = 200000; i > 0; i--) {
2407 /* if fifo isn't full go */
2408 if (!(info_p->access.fifo_full(info_p))) {
2410 break;
2412 udelay(10);
2413 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2414 " waiting!\n", ctlr);
2417 * Send the cmd
2419 info_p->access.submit_command(info_p, c);
2420 done = 0;
2421 do {
2422 complete = pollcomplete(ctlr);
2424 #ifdef CCISS_DEBUG
2425 printk(KERN_DEBUG "cciss: command completed\n");
2426 #endif /* CCISS_DEBUG */
2428 if (complete == 1) {
2429 printk(KERN_WARNING
2430 "cciss cciss%d: SendCmd Timeout out, "
2431 "No command list address returned!\n", ctlr);
2432 status = IO_ERROR;
2433 done = 1;
2434 break;
2437 /* This will need to change for direct lookup completions */
2438 if ((complete & CISS_ERROR_BIT)
2439 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2440 /* if data overrun or underun on Report command
2441 ignore it
2443 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2444 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2445 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2446 ((c->err_info->CommandStatus ==
2447 CMD_DATA_OVERRUN) ||
2448 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2449 )) {
2450 complete = c->busaddr;
2451 } else {
2452 if (c->err_info->CommandStatus ==
2453 CMD_UNSOLICITED_ABORT) {
2454 printk(KERN_WARNING "cciss%d: "
2455 "unsolicited abort %p\n",
2456 ctlr, c);
2457 if (c->retry_count < MAX_CMD_RETRIES) {
2458 printk(KERN_WARNING
2459 "cciss%d: retrying %p\n",
2460 ctlr, c);
2461 c->retry_count++;
2462 /* erase the old error */
2463 /* information */
2464 memset(c->err_info, 0,
2465 sizeof
2466 (ErrorInfo_struct));
2467 goto resend_cmd1;
2468 } else {
2469 printk(KERN_WARNING
2470 "cciss%d: retried %p too "
2471 "many times\n", ctlr, c);
2472 status = IO_ERROR;
2473 goto cleanup1;
2475 } else if (c->err_info->CommandStatus ==
2476 CMD_UNABORTABLE) {
2477 printk(KERN_WARNING
2478 "cciss%d: command could not be aborted.\n",
2479 ctlr);
2480 status = IO_ERROR;
2481 goto cleanup1;
2483 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2484 " Error %x \n", ctlr,
2485 c->err_info->CommandStatus);
2486 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2487 " offensive info\n"
2488 " size %x\n num %x value %x\n",
2489 ctlr,
2490 c->err_info->MoreErrInfo.Invalid_Cmd.
2491 offense_size,
2492 c->err_info->MoreErrInfo.Invalid_Cmd.
2493 offense_num,
2494 c->err_info->MoreErrInfo.Invalid_Cmd.
2495 offense_value);
2496 status = IO_ERROR;
2497 goto cleanup1;
2500 /* This will need changing for direct lookup completions */
2501 if (complete != c->busaddr) {
2502 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2503 BUG(); /* we are pretty much hosed if we get here. */
2505 continue;
2506 } else
2507 done = 1;
2508 } while (!done);
2510 cleanup1:
2511 /* unlock the data buffer from DMA */
2512 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2513 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2514 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2515 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2516 #ifdef CONFIG_CISS_SCSI_TAPE
2517 /* if we saved some commands for later, process them now. */
2518 if (info_p->scsi_rejects.ncompletions > 0)
2519 do_cciss_intr(0, info_p);
2520 #endif
2521 cmd_free(info_p, c, 1);
2522 return status;
2526 * Map (physical) PCI mem into (virtual) kernel space
2528 static void __iomem *remap_pci_mem(ulong base, ulong size)
2530 ulong page_base = ((ulong) base) & PAGE_MASK;
2531 ulong page_offs = ((ulong) base) - page_base;
2532 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2534 return page_remapped ? (page_remapped + page_offs) : NULL;
2538 * Takes jobs of the Q and sends them to the hardware, then puts it on
2539 * the Q to wait for completion.
2541 static void start_io(ctlr_info_t *h)
2543 CommandList_struct *c;
2545 while ((c = h->reqQ) != NULL) {
2546 /* can't do anything if fifo is full */
2547 if ((h->access.fifo_full(h))) {
2548 printk(KERN_WARNING "cciss: fifo full\n");
2549 break;
2552 /* Get the first entry from the Request Q */
2553 removeQ(&(h->reqQ), c);
2554 h->Qdepth--;
2556 /* Tell the controller execute command */
2557 h->access.submit_command(h, c);
2559 /* Put job onto the completed Q */
2560 addQ(&(h->cmpQ), c);
2564 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2565 /* Zeros out the error record and then resends the command back */
2566 /* to the controller */
2567 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2569 /* erase the old error information */
2570 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2572 /* add it to software queue and then send it to the controller */
2573 addQ(&(h->reqQ), c);
2574 h->Qdepth++;
2575 if (h->Qdepth > h->maxQsinceinit)
2576 h->maxQsinceinit = h->Qdepth;
2578 start_io(h);
2581 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2582 unsigned int msg_byte, unsigned int host_byte,
2583 unsigned int driver_byte)
2585 /* inverse of macros in scsi.h */
2586 return (scsi_status_byte & 0xff) |
2587 ((msg_byte & 0xff) << 8) |
2588 ((host_byte & 0xff) << 16) |
2589 ((driver_byte & 0xff) << 24);
2592 static inline int evaluate_target_status(CommandList_struct *cmd)
2594 unsigned char sense_key;
2595 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2596 int error_value;
2598 /* If we get in here, it means we got "target status", that is, scsi status */
2599 status_byte = cmd->err_info->ScsiStatus;
2600 driver_byte = DRIVER_OK;
2601 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2603 if (blk_pc_request(cmd->rq))
2604 host_byte = DID_PASSTHROUGH;
2605 else
2606 host_byte = DID_OK;
2608 error_value = make_status_bytes(status_byte, msg_byte,
2609 host_byte, driver_byte);
2611 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2612 if (!blk_pc_request(cmd->rq))
2613 printk(KERN_WARNING "cciss: cmd %p "
2614 "has SCSI Status 0x%x\n",
2615 cmd, cmd->err_info->ScsiStatus);
2616 return error_value;
2619 /* check the sense key */
2620 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2621 /* no status or recovered error */
2622 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2623 error_value = 0;
2625 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2626 if (error_value != 0)
2627 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2628 " sense key = 0x%x\n", cmd, sense_key);
2629 return error_value;
2632 /* SG_IO or similar, copy sense data back */
2633 if (cmd->rq->sense) {
2634 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2635 cmd->rq->sense_len = cmd->err_info->SenseLen;
2636 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2637 cmd->rq->sense_len);
2638 } else
2639 cmd->rq->sense_len = 0;
2641 return error_value;
2644 /* checks the status of the job and calls complete buffers to mark all
2645 * buffers for the completed job. Note that this function does not need
2646 * to hold the hba/queue lock.
2648 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2649 int timeout)
2651 int retry_cmd = 0;
2652 struct request *rq = cmd->rq;
2654 rq->errors = 0;
2656 if (timeout)
2657 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2659 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2660 goto after_error_processing;
2662 switch (cmd->err_info->CommandStatus) {
2663 case CMD_TARGET_STATUS:
2664 rq->errors = evaluate_target_status(cmd);
2665 break;
2666 case CMD_DATA_UNDERRUN:
2667 if (blk_fs_request(cmd->rq)) {
2668 printk(KERN_WARNING "cciss: cmd %p has"
2669 " completed with data underrun "
2670 "reported\n", cmd);
2671 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2673 break;
2674 case CMD_DATA_OVERRUN:
2675 if (blk_fs_request(cmd->rq))
2676 printk(KERN_WARNING "cciss: cmd %p has"
2677 " completed with data overrun "
2678 "reported\n", cmd);
2679 break;
2680 case CMD_INVALID:
2681 printk(KERN_WARNING "cciss: cmd %p is "
2682 "reported invalid\n", cmd);
2683 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2684 cmd->err_info->CommandStatus, DRIVER_OK,
2685 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2686 break;
2687 case CMD_PROTOCOL_ERR:
2688 printk(KERN_WARNING "cciss: cmd %p has "
2689 "protocol error \n", cmd);
2690 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2691 cmd->err_info->CommandStatus, DRIVER_OK,
2692 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2693 break;
2694 case CMD_HARDWARE_ERR:
2695 printk(KERN_WARNING "cciss: cmd %p had "
2696 " hardware error\n", cmd);
2697 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2698 cmd->err_info->CommandStatus, DRIVER_OK,
2699 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2700 break;
2701 case CMD_CONNECTION_LOST:
2702 printk(KERN_WARNING "cciss: cmd %p had "
2703 "connection lost\n", cmd);
2704 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2705 cmd->err_info->CommandStatus, DRIVER_OK,
2706 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2707 break;
2708 case CMD_ABORTED:
2709 printk(KERN_WARNING "cciss: cmd %p was "
2710 "aborted\n", cmd);
2711 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2712 cmd->err_info->CommandStatus, DRIVER_OK,
2713 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2714 break;
2715 case CMD_ABORT_FAILED:
2716 printk(KERN_WARNING "cciss: cmd %p reports "
2717 "abort failed\n", cmd);
2718 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2719 cmd->err_info->CommandStatus, DRIVER_OK,
2720 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2721 break;
2722 case CMD_UNSOLICITED_ABORT:
2723 printk(KERN_WARNING "cciss%d: unsolicited "
2724 "abort %p\n", h->ctlr, cmd);
2725 if (cmd->retry_count < MAX_CMD_RETRIES) {
2726 retry_cmd = 1;
2727 printk(KERN_WARNING
2728 "cciss%d: retrying %p\n", h->ctlr, cmd);
2729 cmd->retry_count++;
2730 } else
2731 printk(KERN_WARNING
2732 "cciss%d: %p retried too "
2733 "many times\n", h->ctlr, cmd);
2734 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2735 cmd->err_info->CommandStatus, DRIVER_OK,
2736 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2737 break;
2738 case CMD_TIMEOUT:
2739 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2740 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2741 cmd->err_info->CommandStatus, DRIVER_OK,
2742 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2743 break;
2744 default:
2745 printk(KERN_WARNING "cciss: cmd %p returned "
2746 "unknown status %x\n", cmd,
2747 cmd->err_info->CommandStatus);
2748 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2749 cmd->err_info->CommandStatus, DRIVER_OK,
2750 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2753 after_error_processing:
2755 /* We need to return this command */
2756 if (retry_cmd) {
2757 resend_cciss_cmd(h, cmd);
2758 return;
2760 cmd->rq->completion_data = cmd;
2761 blk_complete_request(cmd->rq);
2765 * Get a request and submit it to the controller.
2767 static void do_cciss_request(struct request_queue *q)
2769 ctlr_info_t *h = q->queuedata;
2770 CommandList_struct *c;
2771 sector_t start_blk;
2772 int seg;
2773 struct request *creq;
2774 u64bit temp64;
2775 struct scatterlist tmp_sg[MAXSGENTRIES];
2776 drive_info_struct *drv;
2777 int i, dir;
2779 /* We call start_io here in case there is a command waiting on the
2780 * queue that has not been sent.
2782 if (blk_queue_plugged(q))
2783 goto startio;
2785 queue:
2786 creq = elv_next_request(q);
2787 if (!creq)
2788 goto startio;
2790 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2792 if ((c = cmd_alloc(h, 1)) == NULL)
2793 goto full;
2795 blkdev_dequeue_request(creq);
2797 spin_unlock_irq(q->queue_lock);
2799 c->cmd_type = CMD_RWREQ;
2800 c->rq = creq;
2802 /* fill in the request */
2803 drv = creq->rq_disk->private_data;
2804 c->Header.ReplyQueue = 0; // unused in simple mode
2805 /* got command from pool, so use the command block index instead */
2806 /* for direct lookups. */
2807 /* The first 2 bits are reserved for controller error reporting. */
2808 c->Header.Tag.lower = (c->cmdindex << 3);
2809 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2810 c->Header.LUN.LogDev.VolId = drv->LunID;
2811 c->Header.LUN.LogDev.Mode = 1;
2812 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2813 c->Request.Type.Type = TYPE_CMD; // It is a command.
2814 c->Request.Type.Attribute = ATTR_SIMPLE;
2815 c->Request.Type.Direction =
2816 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2817 c->Request.Timeout = 0; // Don't time out
2818 c->Request.CDB[0] =
2819 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2820 start_blk = creq->sector;
2821 #ifdef CCISS_DEBUG
2822 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2823 (int)creq->nr_sectors);
2824 #endif /* CCISS_DEBUG */
2826 sg_init_table(tmp_sg, MAXSGENTRIES);
2827 seg = blk_rq_map_sg(q, creq, tmp_sg);
2829 /* get the DMA records for the setup */
2830 if (c->Request.Type.Direction == XFER_READ)
2831 dir = PCI_DMA_FROMDEVICE;
2832 else
2833 dir = PCI_DMA_TODEVICE;
2835 for (i = 0; i < seg; i++) {
2836 c->SG[i].Len = tmp_sg[i].length;
2837 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2838 tmp_sg[i].offset,
2839 tmp_sg[i].length, dir);
2840 c->SG[i].Addr.lower = temp64.val32.lower;
2841 c->SG[i].Addr.upper = temp64.val32.upper;
2842 c->SG[i].Ext = 0; // we are not chaining
2844 /* track how many SG entries we are using */
2845 if (seg > h->maxSG)
2846 h->maxSG = seg;
2848 #ifdef CCISS_DEBUG
2849 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2850 creq->nr_sectors, seg);
2851 #endif /* CCISS_DEBUG */
2853 c->Header.SGList = c->Header.SGTotal = seg;
2854 if (likely(blk_fs_request(creq))) {
2855 if(h->cciss_read == CCISS_READ_10) {
2856 c->Request.CDB[1] = 0;
2857 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2858 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2859 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2860 c->Request.CDB[5] = start_blk & 0xff;
2861 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2862 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2863 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2864 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2865 } else {
2866 u32 upper32 = upper_32_bits(start_blk);
2868 c->Request.CDBLen = 16;
2869 c->Request.CDB[1]= 0;
2870 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2871 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2872 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2873 c->Request.CDB[5]= upper32 & 0xff;
2874 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2875 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2876 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2877 c->Request.CDB[9]= start_blk & 0xff;
2878 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2879 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2880 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2881 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2882 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2884 } else if (blk_pc_request(creq)) {
2885 c->Request.CDBLen = creq->cmd_len;
2886 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2887 } else {
2888 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2889 BUG();
2892 spin_lock_irq(q->queue_lock);
2894 addQ(&(h->reqQ), c);
2895 h->Qdepth++;
2896 if (h->Qdepth > h->maxQsinceinit)
2897 h->maxQsinceinit = h->Qdepth;
2899 goto queue;
2900 full:
2901 blk_stop_queue(q);
2902 startio:
2903 /* We will already have the driver lock here so not need
2904 * to lock it.
2906 start_io(h);
2909 static inline unsigned long get_next_completion(ctlr_info_t *h)
2911 #ifdef CONFIG_CISS_SCSI_TAPE
2912 /* Any rejects from sendcmd() lying around? Process them first */
2913 if (h->scsi_rejects.ncompletions == 0)
2914 return h->access.command_completed(h);
2915 else {
2916 struct sendcmd_reject_list *srl;
2917 int n;
2918 srl = &h->scsi_rejects;
2919 n = --srl->ncompletions;
2920 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2921 printk("p");
2922 return srl->complete[n];
2924 #else
2925 return h->access.command_completed(h);
2926 #endif
2929 static inline int interrupt_pending(ctlr_info_t *h)
2931 #ifdef CONFIG_CISS_SCSI_TAPE
2932 return (h->access.intr_pending(h)
2933 || (h->scsi_rejects.ncompletions > 0));
2934 #else
2935 return h->access.intr_pending(h);
2936 #endif
2939 static inline long interrupt_not_for_us(ctlr_info_t *h)
2941 #ifdef CONFIG_CISS_SCSI_TAPE
2942 return (((h->access.intr_pending(h) == 0) ||
2943 (h->interrupts_enabled == 0))
2944 && (h->scsi_rejects.ncompletions == 0));
2945 #else
2946 return (((h->access.intr_pending(h) == 0) ||
2947 (h->interrupts_enabled == 0)));
2948 #endif
2951 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2953 ctlr_info_t *h = dev_id;
2954 CommandList_struct *c;
2955 unsigned long flags;
2956 __u32 a, a1, a2;
2958 if (interrupt_not_for_us(h))
2959 return IRQ_NONE;
2961 * If there are completed commands in the completion queue,
2962 * we had better do something about it.
2964 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2965 while (interrupt_pending(h)) {
2966 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2967 a1 = a;
2968 if ((a & 0x04)) {
2969 a2 = (a >> 3);
2970 if (a2 >= h->nr_cmds) {
2971 printk(KERN_WARNING
2972 "cciss: controller cciss%d failed, stopping.\n",
2973 h->ctlr);
2974 fail_all_cmds(h->ctlr);
2975 return IRQ_HANDLED;
2978 c = h->cmd_pool + a2;
2979 a = c->busaddr;
2981 } else {
2982 a &= ~3;
2983 if ((c = h->cmpQ) == NULL) {
2984 printk(KERN_WARNING
2985 "cciss: Completion of %08x ignored\n",
2986 a1);
2987 continue;
2989 while (c->busaddr != a) {
2990 c = c->next;
2991 if (c == h->cmpQ)
2992 break;
2996 * If we've found the command, take it off the
2997 * completion Q and free it
2999 if (c->busaddr == a) {
3000 removeQ(&h->cmpQ, c);
3001 if (c->cmd_type == CMD_RWREQ) {
3002 complete_command(h, c, 0);
3003 } else if (c->cmd_type == CMD_IOCTL_PEND) {
3004 complete(c->waiting);
3006 # ifdef CONFIG_CISS_SCSI_TAPE
3007 else if (c->cmd_type == CMD_SCSI)
3008 complete_scsi_command(c, 0, a1);
3009 # endif
3010 continue;
3015 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
3016 return IRQ_HANDLED;
3020 * We cannot read the structure directly, for portability we must use
3021 * the io functions.
3022 * This is for debug only.
3024 #ifdef CCISS_DEBUG
3025 static void print_cfg_table(CfgTable_struct *tb)
3027 int i;
3028 char temp_name[17];
3030 printk("Controller Configuration information\n");
3031 printk("------------------------------------\n");
3032 for (i = 0; i < 4; i++)
3033 temp_name[i] = readb(&(tb->Signature[i]));
3034 temp_name[4] = '\0';
3035 printk(" Signature = %s\n", temp_name);
3036 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
3037 printk(" Transport methods supported = 0x%x\n",
3038 readl(&(tb->TransportSupport)));
3039 printk(" Transport methods active = 0x%x\n",
3040 readl(&(tb->TransportActive)));
3041 printk(" Requested transport Method = 0x%x\n",
3042 readl(&(tb->HostWrite.TransportRequest)));
3043 printk(" Coalesce Interrupt Delay = 0x%x\n",
3044 readl(&(tb->HostWrite.CoalIntDelay)));
3045 printk(" Coalesce Interrupt Count = 0x%x\n",
3046 readl(&(tb->HostWrite.CoalIntCount)));
3047 printk(" Max outstanding commands = 0x%d\n",
3048 readl(&(tb->CmdsOutMax)));
3049 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3050 for (i = 0; i < 16; i++)
3051 temp_name[i] = readb(&(tb->ServerName[i]));
3052 temp_name[16] = '\0';
3053 printk(" Server Name = %s\n", temp_name);
3054 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
3056 #endif /* CCISS_DEBUG */
3058 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3060 int i, offset, mem_type, bar_type;
3061 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3062 return 0;
3063 offset = 0;
3064 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3065 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3066 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3067 offset += 4;
3068 else {
3069 mem_type = pci_resource_flags(pdev, i) &
3070 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3071 switch (mem_type) {
3072 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3073 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3074 offset += 4; /* 32 bit */
3075 break;
3076 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3077 offset += 8;
3078 break;
3079 default: /* reserved in PCI 2.2 */
3080 printk(KERN_WARNING
3081 "Base address is invalid\n");
3082 return -1;
3083 break;
3086 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3087 return i + 1;
3089 return -1;
3092 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3093 * controllers that are capable. If not, we use IO-APIC mode.
3096 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
3097 struct pci_dev *pdev, __u32 board_id)
3099 #ifdef CONFIG_PCI_MSI
3100 int err;
3101 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
3102 {0, 2}, {0, 3}
3105 /* Some boards advertise MSI but don't really support it */
3106 if ((board_id == 0x40700E11) ||
3107 (board_id == 0x40800E11) ||
3108 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3109 goto default_int_mode;
3111 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3112 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
3113 if (!err) {
3114 c->intr[0] = cciss_msix_entries[0].vector;
3115 c->intr[1] = cciss_msix_entries[1].vector;
3116 c->intr[2] = cciss_msix_entries[2].vector;
3117 c->intr[3] = cciss_msix_entries[3].vector;
3118 c->msix_vector = 1;
3119 return;
3121 if (err > 0) {
3122 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
3123 "available\n", err);
3124 goto default_int_mode;
3125 } else {
3126 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
3127 err);
3128 goto default_int_mode;
3131 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3132 if (!pci_enable_msi(pdev)) {
3133 c->msi_vector = 1;
3134 } else {
3135 printk(KERN_WARNING "cciss: MSI init failed\n");
3138 default_int_mode:
3139 #endif /* CONFIG_PCI_MSI */
3140 /* if we get here we're going to use the default interrupt mode */
3141 c->intr[SIMPLE_MODE_INT] = pdev->irq;
3142 return;
3145 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3147 ushort subsystem_vendor_id, subsystem_device_id, command;
3148 __u32 board_id, scratchpad = 0;
3149 __u64 cfg_offset;
3150 __u32 cfg_base_addr;
3151 __u64 cfg_base_addr_index;
3152 int i, err;
3154 /* check to see if controller has been disabled */
3155 /* BEFORE trying to enable it */
3156 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3157 if (!(command & 0x02)) {
3158 printk(KERN_WARNING
3159 "cciss: controller appears to be disabled\n");
3160 return -ENODEV;
3163 err = pci_enable_device(pdev);
3164 if (err) {
3165 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
3166 return err;
3169 err = pci_request_regions(pdev, "cciss");
3170 if (err) {
3171 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3172 "aborting\n");
3173 return err;
3176 subsystem_vendor_id = pdev->subsystem_vendor;
3177 subsystem_device_id = pdev->subsystem_device;
3178 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3179 subsystem_vendor_id);
3181 #ifdef CCISS_DEBUG
3182 printk("command = %x\n", command);
3183 printk("irq = %x\n", pdev->irq);
3184 printk("board_id = %x\n", board_id);
3185 #endif /* CCISS_DEBUG */
3187 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3188 * else we use the IO-APIC interrupt assigned to us by system ROM.
3190 cciss_interrupt_mode(c, pdev, board_id);
3193 * Memory base addr is first addr , the second points to the config
3194 * table
3197 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3198 #ifdef CCISS_DEBUG
3199 printk("address 0 = %x\n", c->paddr);
3200 #endif /* CCISS_DEBUG */
3201 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3203 /* Wait for the board to become ready. (PCI hotplug needs this.)
3204 * We poll for up to 120 secs, once per 100ms. */
3205 for (i = 0; i < 1200; i++) {
3206 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3207 if (scratchpad == CCISS_FIRMWARE_READY)
3208 break;
3209 set_current_state(TASK_INTERRUPTIBLE);
3210 schedule_timeout(HZ / 10); /* wait 100ms */
3212 if (scratchpad != CCISS_FIRMWARE_READY) {
3213 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3214 err = -ENODEV;
3215 goto err_out_free_res;
3218 /* get the address index number */
3219 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3220 cfg_base_addr &= (__u32) 0x0000ffff;
3221 #ifdef CCISS_DEBUG
3222 printk("cfg base address = %x\n", cfg_base_addr);
3223 #endif /* CCISS_DEBUG */
3224 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3225 #ifdef CCISS_DEBUG
3226 printk("cfg base address index = %x\n", cfg_base_addr_index);
3227 #endif /* CCISS_DEBUG */
3228 if (cfg_base_addr_index == -1) {
3229 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3230 err = -ENODEV;
3231 goto err_out_free_res;
3234 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3235 #ifdef CCISS_DEBUG
3236 printk("cfg offset = %x\n", cfg_offset);
3237 #endif /* CCISS_DEBUG */
3238 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3239 cfg_base_addr_index) +
3240 cfg_offset, sizeof(CfgTable_struct));
3241 c->board_id = board_id;
3243 #ifdef CCISS_DEBUG
3244 print_cfg_table(c->cfgtable);
3245 #endif /* CCISS_DEBUG */
3247 /* Some controllers support Zero Memory Raid (ZMR).
3248 * When configured in ZMR mode the number of supported
3249 * commands drops to 64. So instead of just setting an
3250 * arbitrary value we make the driver a little smarter.
3251 * We read the config table to tell us how many commands
3252 * are supported on the controller then subtract 4 to
3253 * leave a little room for ioctl calls.
3255 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3256 for (i = 0; i < ARRAY_SIZE(products); i++) {
3257 if (board_id == products[i].board_id) {
3258 c->product_name = products[i].product_name;
3259 c->access = *(products[i].access);
3260 c->nr_cmds = c->max_commands - 4;
3261 break;
3264 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3265 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3266 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3267 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3268 printk("Does not appear to be a valid CISS config table\n");
3269 err = -ENODEV;
3270 goto err_out_free_res;
3272 /* We didn't find the controller in our list. We know the
3273 * signature is valid. If it's an HP device let's try to
3274 * bind to the device and fire it up. Otherwise we bail.
3276 if (i == ARRAY_SIZE(products)) {
3277 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3278 c->product_name = products[i-1].product_name;
3279 c->access = *(products[i-1].access);
3280 c->nr_cmds = c->max_commands - 4;
3281 printk(KERN_WARNING "cciss: This is an unknown "
3282 "Smart Array controller.\n"
3283 "cciss: Please update to the latest driver "
3284 "available from www.hp.com.\n");
3285 } else {
3286 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3287 " to access the Smart Array controller %08lx\n"
3288 , (unsigned long)board_id);
3289 err = -ENODEV;
3290 goto err_out_free_res;
3293 #ifdef CONFIG_X86
3295 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3296 __u32 prefetch;
3297 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3298 prefetch |= 0x100;
3299 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3301 #endif
3303 /* Disabling DMA prefetch and refetch for the P600.
3304 * An ASIC bug may result in accesses to invalid memory addresses.
3305 * We've disabled prefetch for some time now. Testing with XEN
3306 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3308 if(board_id == 0x3225103C) {
3309 __u32 dma_prefetch;
3310 __u32 dma_refetch;
3311 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3312 dma_prefetch |= 0x8000;
3313 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3314 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3315 dma_refetch |= 0x1;
3316 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3319 #ifdef CCISS_DEBUG
3320 printk("Trying to put board into Simple mode\n");
3321 #endif /* CCISS_DEBUG */
3322 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3323 /* Update the field, and then ring the doorbell */
3324 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3325 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3327 /* under certain very rare conditions, this can take awhile.
3328 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3329 * as we enter this code.) */
3330 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3331 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3332 break;
3333 /* delay and try again */
3334 set_current_state(TASK_INTERRUPTIBLE);
3335 schedule_timeout(10);
3338 #ifdef CCISS_DEBUG
3339 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3340 readl(c->vaddr + SA5_DOORBELL));
3341 #endif /* CCISS_DEBUG */
3342 #ifdef CCISS_DEBUG
3343 print_cfg_table(c->cfgtable);
3344 #endif /* CCISS_DEBUG */
3346 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3347 printk(KERN_WARNING "cciss: unable to get board into"
3348 " simple mode\n");
3349 err = -ENODEV;
3350 goto err_out_free_res;
3352 return 0;
3354 err_out_free_res:
3356 * Deliberately omit pci_disable_device(): it does something nasty to
3357 * Smart Array controllers that pci_enable_device does not undo
3359 pci_release_regions(pdev);
3360 return err;
3363 /* Function to find the first free pointer into our hba[] array
3364 * Returns -1 if no free entries are left.
3366 static int alloc_cciss_hba(void)
3368 int i;
3370 for (i = 0; i < MAX_CTLR; i++) {
3371 if (!hba[i]) {
3372 ctlr_info_t *p;
3374 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3375 if (!p)
3376 goto Enomem;
3377 hba[i] = p;
3378 return i;
3381 printk(KERN_WARNING "cciss: This driver supports a maximum"
3382 " of %d controllers.\n", MAX_CTLR);
3383 return -1;
3384 Enomem:
3385 printk(KERN_ERR "cciss: out of memory.\n");
3386 return -1;
3389 static void free_hba(int i)
3391 ctlr_info_t *p = hba[i];
3392 int n;
3394 hba[i] = NULL;
3395 for (n = 0; n < CISS_MAX_LUN; n++)
3396 put_disk(p->gendisk[n]);
3397 kfree(p);
3401 * This is it. Find all the controllers and register them. I really hate
3402 * stealing all these major device numbers.
3403 * returns the number of block devices registered.
3405 static int __devinit cciss_init_one(struct pci_dev *pdev,
3406 const struct pci_device_id *ent)
3408 int i;
3409 int j = 0;
3410 int rc;
3411 int dac;
3413 i = alloc_cciss_hba();
3414 if (i < 0)
3415 return -1;
3417 hba[i]->busy_initializing = 1;
3419 if (cciss_pci_init(hba[i], pdev) != 0)
3420 goto clean1;
3422 sprintf(hba[i]->devname, "cciss%d", i);
3423 hba[i]->ctlr = i;
3424 hba[i]->pdev = pdev;
3426 /* configure PCI DMA stuff */
3427 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3428 dac = 1;
3429 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3430 dac = 0;
3431 else {
3432 printk(KERN_ERR "cciss: no suitable DMA available\n");
3433 goto clean1;
3437 * register with the major number, or get a dynamic major number
3438 * by passing 0 as argument. This is done for greater than
3439 * 8 controller support.
3441 if (i < MAX_CTLR_ORIG)
3442 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3443 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3444 if (rc == -EBUSY || rc == -EINVAL) {
3445 printk(KERN_ERR
3446 "cciss: Unable to get major number %d for %s "
3447 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3448 goto clean1;
3449 } else {
3450 if (i >= MAX_CTLR_ORIG)
3451 hba[i]->major = rc;
3454 /* make sure the board interrupts are off */
3455 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3456 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3457 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3458 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3459 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3460 goto clean2;
3463 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3464 hba[i]->devname, pdev->device, pci_name(pdev),
3465 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3467 hba[i]->cmd_pool_bits =
3468 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3469 * sizeof(unsigned long), GFP_KERNEL);
3470 hba[i]->cmd_pool = (CommandList_struct *)
3471 pci_alloc_consistent(hba[i]->pdev,
3472 hba[i]->nr_cmds * sizeof(CommandList_struct),
3473 &(hba[i]->cmd_pool_dhandle));
3474 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3475 pci_alloc_consistent(hba[i]->pdev,
3476 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3477 &(hba[i]->errinfo_pool_dhandle));
3478 if ((hba[i]->cmd_pool_bits == NULL)
3479 || (hba[i]->cmd_pool == NULL)
3480 || (hba[i]->errinfo_pool == NULL)) {
3481 printk(KERN_ERR "cciss: out of memory");
3482 goto clean4;
3484 #ifdef CONFIG_CISS_SCSI_TAPE
3485 hba[i]->scsi_rejects.complete =
3486 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3487 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3488 if (hba[i]->scsi_rejects.complete == NULL) {
3489 printk(KERN_ERR "cciss: out of memory");
3490 goto clean4;
3492 #endif
3493 spin_lock_init(&hba[i]->lock);
3495 /* Initialize the pdev driver private data.
3496 have it point to hba[i]. */
3497 pci_set_drvdata(pdev, hba[i]);
3498 /* command and error info recs zeroed out before
3499 they are used */
3500 memset(hba[i]->cmd_pool_bits, 0,
3501 DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3502 * sizeof(unsigned long));
3504 hba[i]->num_luns = 0;
3505 hba[i]->highest_lun = -1;
3506 for (j = 0; j < CISS_MAX_LUN; j++) {
3507 hba[i]->drv[j].raid_level = -1;
3508 hba[i]->drv[j].queue = NULL;
3509 hba[i]->gendisk[j] = NULL;
3512 cciss_scsi_setup(i);
3514 /* Turn the interrupts on so we can service requests */
3515 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3517 cciss_procinit(i);
3519 hba[i]->cciss_max_sectors = 2048;
3521 hba[i]->busy_initializing = 0;
3523 rebuild_lun_table(hba[i], 1);
3524 return 1;
3526 clean4:
3527 #ifdef CONFIG_CISS_SCSI_TAPE
3528 kfree(hba[i]->scsi_rejects.complete);
3529 #endif
3530 kfree(hba[i]->cmd_pool_bits);
3531 if (hba[i]->cmd_pool)
3532 pci_free_consistent(hba[i]->pdev,
3533 hba[i]->nr_cmds * sizeof(CommandList_struct),
3534 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3535 if (hba[i]->errinfo_pool)
3536 pci_free_consistent(hba[i]->pdev,
3537 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3538 hba[i]->errinfo_pool,
3539 hba[i]->errinfo_pool_dhandle);
3540 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3541 clean2:
3542 unregister_blkdev(hba[i]->major, hba[i]->devname);
3543 clean1:
3544 hba[i]->busy_initializing = 0;
3545 /* cleanup any queues that may have been initialized */
3546 for (j=0; j <= hba[i]->highest_lun; j++){
3547 drive_info_struct *drv = &(hba[i]->drv[j]);
3548 if (drv->queue)
3549 blk_cleanup_queue(drv->queue);
3552 * Deliberately omit pci_disable_device(): it does something nasty to
3553 * Smart Array controllers that pci_enable_device does not undo
3555 pci_release_regions(pdev);
3556 pci_set_drvdata(pdev, NULL);
3557 free_hba(i);
3558 return -1;
3561 static void cciss_shutdown(struct pci_dev *pdev)
3563 ctlr_info_t *tmp_ptr;
3564 int i;
3565 char flush_buf[4];
3566 int return_code;
3568 tmp_ptr = pci_get_drvdata(pdev);
3569 if (tmp_ptr == NULL)
3570 return;
3571 i = tmp_ptr->ctlr;
3572 if (hba[i] == NULL)
3573 return;
3575 /* Turn board interrupts off and send the flush cache command */
3576 /* sendcmd will turn off interrupt, and send the flush...
3577 * To write all data in the battery backed cache to disks */
3578 memset(flush_buf, 0, 4);
3579 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3580 TYPE_CMD);
3581 if (return_code == IO_OK) {
3582 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3583 } else {
3584 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3586 free_irq(hba[i]->intr[2], hba[i]);
3589 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3591 ctlr_info_t *tmp_ptr;
3592 int i, j;
3594 if (pci_get_drvdata(pdev) == NULL) {
3595 printk(KERN_ERR "cciss: Unable to remove device \n");
3596 return;
3598 tmp_ptr = pci_get_drvdata(pdev);
3599 i = tmp_ptr->ctlr;
3600 if (hba[i] == NULL) {
3601 printk(KERN_ERR "cciss: device appears to "
3602 "already be removed \n");
3603 return;
3606 remove_proc_entry(hba[i]->devname, proc_cciss);
3607 unregister_blkdev(hba[i]->major, hba[i]->devname);
3609 /* remove it from the disk list */
3610 for (j = 0; j < CISS_MAX_LUN; j++) {
3611 struct gendisk *disk = hba[i]->gendisk[j];
3612 if (disk) {
3613 struct request_queue *q = disk->queue;
3615 if (disk->flags & GENHD_FL_UP)
3616 del_gendisk(disk);
3617 if (q)
3618 blk_cleanup_queue(q);
3622 #ifdef CONFIG_CISS_SCSI_TAPE
3623 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3624 #endif
3626 cciss_shutdown(pdev);
3628 #ifdef CONFIG_PCI_MSI
3629 if (hba[i]->msix_vector)
3630 pci_disable_msix(hba[i]->pdev);
3631 else if (hba[i]->msi_vector)
3632 pci_disable_msi(hba[i]->pdev);
3633 #endif /* CONFIG_PCI_MSI */
3635 iounmap(hba[i]->vaddr);
3637 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3638 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3639 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3640 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3641 kfree(hba[i]->cmd_pool_bits);
3642 #ifdef CONFIG_CISS_SCSI_TAPE
3643 kfree(hba[i]->scsi_rejects.complete);
3644 #endif
3646 * Deliberately omit pci_disable_device(): it does something nasty to
3647 * Smart Array controllers that pci_enable_device does not undo
3649 pci_release_regions(pdev);
3650 pci_set_drvdata(pdev, NULL);
3651 free_hba(i);
3654 static struct pci_driver cciss_pci_driver = {
3655 .name = "cciss",
3656 .probe = cciss_init_one,
3657 .remove = __devexit_p(cciss_remove_one),
3658 .id_table = cciss_pci_device_id, /* id_table */
3659 .shutdown = cciss_shutdown,
3663 * This is it. Register the PCI driver information for the cards we control
3664 * the OS will call our registered routines when it finds one of our cards.
3666 static int __init cciss_init(void)
3668 printk(KERN_INFO DRIVER_NAME "\n");
3670 /* Register for our PCI devices */
3671 return pci_register_driver(&cciss_pci_driver);
3674 static void __exit cciss_cleanup(void)
3676 int i;
3678 pci_unregister_driver(&cciss_pci_driver);
3679 /* double check that all controller entrys have been removed */
3680 for (i = 0; i < MAX_CTLR; i++) {
3681 if (hba[i] != NULL) {
3682 printk(KERN_WARNING "cciss: had to remove"
3683 " controller %d\n", i);
3684 cciss_remove_one(hba[i]->pdev);
3687 remove_proc_entry("driver/cciss", NULL);
3690 static void fail_all_cmds(unsigned long ctlr)
3692 /* If we get here, the board is apparently dead. */
3693 ctlr_info_t *h = hba[ctlr];
3694 CommandList_struct *c;
3695 unsigned long flags;
3697 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3698 h->alive = 0; /* the controller apparently died... */
3700 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3702 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3704 /* move everything off the request queue onto the completed queue */
3705 while ((c = h->reqQ) != NULL) {
3706 removeQ(&(h->reqQ), c);
3707 h->Qdepth--;
3708 addQ(&(h->cmpQ), c);
3711 /* Now, fail everything on the completed queue with a HW error */
3712 while ((c = h->cmpQ) != NULL) {
3713 removeQ(&h->cmpQ, c);
3714 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3715 if (c->cmd_type == CMD_RWREQ) {
3716 complete_command(h, c, 0);
3717 } else if (c->cmd_type == CMD_IOCTL_PEND)
3718 complete(c->waiting);
3719 #ifdef CONFIG_CISS_SCSI_TAPE
3720 else if (c->cmd_type == CMD_SCSI)
3721 complete_scsi_command(c, 0, 0);
3722 #endif
3724 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3725 return;
3728 module_init(cciss_init);
3729 module_exit(cciss_cleanup);