[SCSI] st: convert get_location to use st_scsi_kern_execute
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / cciss.c
blob9f7c543cc04b265344b0e37443c4349147e88574
1 /*
2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
50 #include <scsi/sg.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
64 " Smart Array G2 Series SAS/SATA Controllers");
65 MODULE_VERSION("3.6.20");
66 MODULE_LICENSE("GPL");
68 #include "cciss_cmd.h"
69 #include "cciss.h"
70 #include <linux/cciss_ioctl.h>
72 /* define the PCI info for the cards we can control */
73 static const struct pci_device_id cciss_pci_device_id[] = {
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
82 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
102 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
103 {0,}
106 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
112 static struct board_type products[] = {
113 {0x40700E11, "Smart Array 5300", &SA5_access},
114 {0x40800E11, "Smart Array 5i", &SA5B_access},
115 {0x40820E11, "Smart Array 532", &SA5B_access},
116 {0x40830E11, "Smart Array 5312", &SA5B_access},
117 {0x409A0E11, "Smart Array 641", &SA5_access},
118 {0x409B0E11, "Smart Array 642", &SA5_access},
119 {0x409C0E11, "Smart Array 6400", &SA5_access},
120 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 {0x40910E11, "Smart Array 6i", &SA5_access},
122 {0x3225103C, "Smart Array P600", &SA5_access},
123 {0x3223103C, "Smart Array P800", &SA5_access},
124 {0x3234103C, "Smart Array P400", &SA5_access},
125 {0x3235103C, "Smart Array P400i", &SA5_access},
126 {0x3211103C, "Smart Array E200i", &SA5_access},
127 {0x3212103C, "Smart Array E200", &SA5_access},
128 {0x3213103C, "Smart Array E200i", &SA5_access},
129 {0x3214103C, "Smart Array E200i", &SA5_access},
130 {0x3215103C, "Smart Array E200i", &SA5_access},
131 {0x3237103C, "Smart Array E500", &SA5_access},
132 {0x323D103C, "Smart Array P700m", &SA5_access},
133 {0x3241103C, "Smart Array P212", &SA5_access},
134 {0x3243103C, "Smart Array P410", &SA5_access},
135 {0x3245103C, "Smart Array P410i", &SA5_access},
136 {0x3247103C, "Smart Array P411", &SA5_access},
137 {0x3249103C, "Smart Array P812", &SA5_access},
138 {0x324A103C, "Smart Array P712m", &SA5_access},
139 {0x324B103C, "Smart Array P711m", &SA5_access},
140 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
143 /* How long to wait (in milliseconds) for board to go into simple mode */
144 #define MAX_CONFIG_WAIT 30000
145 #define MAX_IOCTL_CONFIG_WAIT 1000
147 /*define how many times we will try a command because of bus resets */
148 #define MAX_CMD_RETRIES 3
150 #define MAX_CTLR 32
152 /* Originally cciss driver only supports 8 major numbers */
153 #define MAX_CTLR_ORIG 8
155 static ctlr_info_t *hba[MAX_CTLR];
157 static void do_cciss_request(struct request_queue *q);
158 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
159 static int cciss_open(struct block_device *bdev, fmode_t mode);
160 static int cciss_release(struct gendisk *disk, fmode_t mode);
161 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
162 unsigned int cmd, unsigned long arg);
163 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
165 static int cciss_revalidate(struct gendisk *disk);
166 static int rebuild_lun_table(ctlr_info_t *h, int first_time);
167 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
168 int clear_all);
170 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
171 sector_t *total_size, unsigned int *block_size);
172 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
173 sector_t *total_size, unsigned int *block_size);
174 static void cciss_geometry_inquiry(int ctlr, int logvol,
175 int withirq, sector_t total_size,
176 unsigned int block_size, InquiryData_struct *inq_buff,
177 drive_info_struct *drv);
178 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
179 __u32);
180 static void start_io(ctlr_info_t *h);
181 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
182 unsigned int use_unit_num, unsigned int log_unit,
183 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
184 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
185 unsigned int use_unit_num, unsigned int log_unit,
186 __u8 page_code, int cmd_type);
188 static void fail_all_cmds(unsigned long ctlr);
190 #ifdef CONFIG_PROC_FS
191 static void cciss_procinit(int i);
192 #else
193 static void cciss_procinit(int i)
196 #endif /* CONFIG_PROC_FS */
198 #ifdef CONFIG_COMPAT
199 static int cciss_compat_ioctl(struct block_device *, fmode_t,
200 unsigned, unsigned long);
201 #endif
203 static struct block_device_operations cciss_fops = {
204 .owner = THIS_MODULE,
205 .open = cciss_open,
206 .release = cciss_release,
207 .locked_ioctl = cciss_ioctl,
208 .getgeo = cciss_getgeo,
209 #ifdef CONFIG_COMPAT
210 .compat_ioctl = cciss_compat_ioctl,
211 #endif
212 .revalidate_disk = cciss_revalidate,
216 * Enqueuing and dequeuing functions for cmdlists.
218 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
220 if (*Qptr == NULL) {
221 *Qptr = c;
222 c->next = c->prev = c;
223 } else {
224 c->prev = (*Qptr)->prev;
225 c->next = (*Qptr);
226 (*Qptr)->prev->next = c;
227 (*Qptr)->prev = c;
231 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
232 CommandList_struct *c)
234 if (c && c->next != c) {
235 if (*Qptr == c)
236 *Qptr = c->next;
237 c->prev->next = c->next;
238 c->next->prev = c->prev;
239 } else {
240 *Qptr = NULL;
242 return c;
245 #include "cciss_scsi.c" /* For SCSI tape support */
247 #define RAID_UNKNOWN 6
249 #ifdef CONFIG_PROC_FS
252 * Report information about this controller.
254 #define ENG_GIG 1000000000
255 #define ENG_GIG_FACTOR (ENG_GIG/512)
256 #define ENGAGE_SCSI "engage scsi"
257 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
258 "UNKNOWN"
261 static struct proc_dir_entry *proc_cciss;
263 static void cciss_seq_show_header(struct seq_file *seq)
265 ctlr_info_t *h = seq->private;
267 seq_printf(seq, "%s: HP %s Controller\n"
268 "Board ID: 0x%08lx\n"
269 "Firmware Version: %c%c%c%c\n"
270 "IRQ: %d\n"
271 "Logical drives: %d\n"
272 "Current Q depth: %d\n"
273 "Current # commands on controller: %d\n"
274 "Max Q depth since init: %d\n"
275 "Max # commands on controller since init: %d\n"
276 "Max SG entries since init: %d\n",
277 h->devname,
278 h->product_name,
279 (unsigned long)h->board_id,
280 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
281 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
282 h->num_luns,
283 h->Qdepth, h->commands_outstanding,
284 h->maxQsinceinit, h->max_outstanding, h->maxSG);
286 #ifdef CONFIG_CISS_SCSI_TAPE
287 cciss_seq_tape_report(seq, h->ctlr);
288 #endif /* CONFIG_CISS_SCSI_TAPE */
291 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
293 ctlr_info_t *h = seq->private;
294 unsigned ctlr = h->ctlr;
295 unsigned long flags;
297 /* prevent displaying bogus info during configuration
298 * or deconfiguration of a logical volume
300 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
301 if (h->busy_configuring) {
302 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
303 return ERR_PTR(-EBUSY);
305 h->busy_configuring = 1;
306 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
308 if (*pos == 0)
309 cciss_seq_show_header(seq);
311 return pos;
314 static int cciss_seq_show(struct seq_file *seq, void *v)
316 sector_t vol_sz, vol_sz_frac;
317 ctlr_info_t *h = seq->private;
318 unsigned ctlr = h->ctlr;
319 loff_t *pos = v;
320 drive_info_struct *drv = &h->drv[*pos];
322 if (*pos > h->highest_lun)
323 return 0;
325 if (drv->heads == 0)
326 return 0;
328 vol_sz = drv->nr_blocks;
329 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
330 vol_sz_frac *= 100;
331 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
333 if (drv->raid_level > 5)
334 drv->raid_level = RAID_UNKNOWN;
335 seq_printf(seq, "cciss/c%dd%d:"
336 "\t%4u.%02uGB\tRAID %s\n",
337 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
338 raid_label[drv->raid_level]);
339 return 0;
342 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
344 ctlr_info_t *h = seq->private;
346 if (*pos > h->highest_lun)
347 return NULL;
348 *pos += 1;
350 return pos;
353 static void cciss_seq_stop(struct seq_file *seq, void *v)
355 ctlr_info_t *h = seq->private;
357 /* Only reset h->busy_configuring if we succeeded in setting
358 * it during cciss_seq_start. */
359 if (v == ERR_PTR(-EBUSY))
360 return;
362 h->busy_configuring = 0;
365 static struct seq_operations cciss_seq_ops = {
366 .start = cciss_seq_start,
367 .show = cciss_seq_show,
368 .next = cciss_seq_next,
369 .stop = cciss_seq_stop,
372 static int cciss_seq_open(struct inode *inode, struct file *file)
374 int ret = seq_open(file, &cciss_seq_ops);
375 struct seq_file *seq = file->private_data;
377 if (!ret)
378 seq->private = PDE(inode)->data;
380 return ret;
383 static ssize_t
384 cciss_proc_write(struct file *file, const char __user *buf,
385 size_t length, loff_t *ppos)
387 int err;
388 char *buffer;
390 #ifndef CONFIG_CISS_SCSI_TAPE
391 return -EINVAL;
392 #endif
394 if (!buf || length > PAGE_SIZE - 1)
395 return -EINVAL;
397 buffer = (char *)__get_free_page(GFP_KERNEL);
398 if (!buffer)
399 return -ENOMEM;
401 err = -EFAULT;
402 if (copy_from_user(buffer, buf, length))
403 goto out;
404 buffer[length] = '\0';
406 #ifdef CONFIG_CISS_SCSI_TAPE
407 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
408 struct seq_file *seq = file->private_data;
409 ctlr_info_t *h = seq->private;
410 int rc;
412 rc = cciss_engage_scsi(h->ctlr);
413 if (rc != 0)
414 err = -rc;
415 else
416 err = length;
417 } else
418 #endif /* CONFIG_CISS_SCSI_TAPE */
419 err = -EINVAL;
420 /* might be nice to have "disengage" too, but it's not
421 safely possible. (only 1 module use count, lock issues.) */
423 out:
424 free_page((unsigned long)buffer);
425 return err;
428 static struct file_operations cciss_proc_fops = {
429 .owner = THIS_MODULE,
430 .open = cciss_seq_open,
431 .read = seq_read,
432 .llseek = seq_lseek,
433 .release = seq_release,
434 .write = cciss_proc_write,
437 static void __devinit cciss_procinit(int i)
439 struct proc_dir_entry *pde;
441 if (proc_cciss == NULL)
442 proc_cciss = proc_mkdir("driver/cciss", NULL);
443 if (!proc_cciss)
444 return;
445 pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
446 S_IROTH, proc_cciss,
447 &cciss_proc_fops, hba[i]);
449 #endif /* CONFIG_PROC_FS */
452 * For operations that cannot sleep, a command block is allocated at init,
453 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
454 * which ones are free or in use. For operations that can wait for kmalloc
455 * to possible sleep, this routine can be called with get_from_pool set to 0.
456 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
458 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
460 CommandList_struct *c;
461 int i;
462 u64bit temp64;
463 dma_addr_t cmd_dma_handle, err_dma_handle;
465 if (!get_from_pool) {
466 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
467 sizeof(CommandList_struct), &cmd_dma_handle);
468 if (c == NULL)
469 return NULL;
470 memset(c, 0, sizeof(CommandList_struct));
472 c->cmdindex = -1;
474 c->err_info = (ErrorInfo_struct *)
475 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
476 &err_dma_handle);
478 if (c->err_info == NULL) {
479 pci_free_consistent(h->pdev,
480 sizeof(CommandList_struct), c, cmd_dma_handle);
481 return NULL;
483 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
484 } else { /* get it out of the controllers pool */
486 do {
487 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
488 if (i == h->nr_cmds)
489 return NULL;
490 } while (test_and_set_bit
491 (i & (BITS_PER_LONG - 1),
492 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
493 #ifdef CCISS_DEBUG
494 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
495 #endif
496 c = h->cmd_pool + i;
497 memset(c, 0, sizeof(CommandList_struct));
498 cmd_dma_handle = h->cmd_pool_dhandle
499 + i * sizeof(CommandList_struct);
500 c->err_info = h->errinfo_pool + i;
501 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
502 err_dma_handle = h->errinfo_pool_dhandle
503 + i * sizeof(ErrorInfo_struct);
504 h->nr_allocs++;
506 c->cmdindex = i;
509 c->busaddr = (__u32) cmd_dma_handle;
510 temp64.val = (__u64) err_dma_handle;
511 c->ErrDesc.Addr.lower = temp64.val32.lower;
512 c->ErrDesc.Addr.upper = temp64.val32.upper;
513 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
515 c->ctlr = h->ctlr;
516 return c;
520 * Frees a command block that was previously allocated with cmd_alloc().
522 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
524 int i;
525 u64bit temp64;
527 if (!got_from_pool) {
528 temp64.val32.lower = c->ErrDesc.Addr.lower;
529 temp64.val32.upper = c->ErrDesc.Addr.upper;
530 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
531 c->err_info, (dma_addr_t) temp64.val);
532 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
533 c, (dma_addr_t) c->busaddr);
534 } else {
535 i = c - h->cmd_pool;
536 clear_bit(i & (BITS_PER_LONG - 1),
537 h->cmd_pool_bits + (i / BITS_PER_LONG));
538 h->nr_frees++;
542 static inline ctlr_info_t *get_host(struct gendisk *disk)
544 return disk->queue->queuedata;
547 static inline drive_info_struct *get_drv(struct gendisk *disk)
549 return disk->private_data;
553 * Open. Make sure the device is really there.
555 static int cciss_open(struct block_device *bdev, fmode_t mode)
557 ctlr_info_t *host = get_host(bdev->bd_disk);
558 drive_info_struct *drv = get_drv(bdev->bd_disk);
560 #ifdef CCISS_DEBUG
561 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
562 #endif /* CCISS_DEBUG */
564 if (host->busy_initializing || drv->busy_configuring)
565 return -EBUSY;
567 * Root is allowed to open raw volume zero even if it's not configured
568 * so array config can still work. Root is also allowed to open any
569 * volume that has a LUN ID, so it can issue IOCTL to reread the
570 * disk information. I don't think I really like this
571 * but I'm already using way to many device nodes to claim another one
572 * for "raw controller".
574 if (drv->heads == 0) {
575 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
576 /* if not node 0 make sure it is a partition = 0 */
577 if (MINOR(bdev->bd_dev) & 0x0f) {
578 return -ENXIO;
579 /* if it is, make sure we have a LUN ID */
580 } else if (drv->LunID == 0) {
581 return -ENXIO;
584 if (!capable(CAP_SYS_ADMIN))
585 return -EPERM;
587 drv->usage_count++;
588 host->usage_count++;
589 return 0;
593 * Close. Sync first.
595 static int cciss_release(struct gendisk *disk, fmode_t mode)
597 ctlr_info_t *host = get_host(disk);
598 drive_info_struct *drv = get_drv(disk);
600 #ifdef CCISS_DEBUG
601 printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
602 #endif /* CCISS_DEBUG */
604 drv->usage_count--;
605 host->usage_count--;
606 return 0;
609 #ifdef CONFIG_COMPAT
611 static int do_ioctl(struct block_device *bdev, fmode_t mode,
612 unsigned cmd, unsigned long arg)
614 int ret;
615 lock_kernel();
616 ret = cciss_ioctl(bdev, mode, cmd, arg);
617 unlock_kernel();
618 return ret;
621 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
622 unsigned cmd, unsigned long arg);
623 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
624 unsigned cmd, unsigned long arg);
626 static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
627 unsigned cmd, unsigned long arg)
629 switch (cmd) {
630 case CCISS_GETPCIINFO:
631 case CCISS_GETINTINFO:
632 case CCISS_SETINTINFO:
633 case CCISS_GETNODENAME:
634 case CCISS_SETNODENAME:
635 case CCISS_GETHEARTBEAT:
636 case CCISS_GETBUSTYPES:
637 case CCISS_GETFIRMVER:
638 case CCISS_GETDRIVVER:
639 case CCISS_REVALIDVOLS:
640 case CCISS_DEREGDISK:
641 case CCISS_REGNEWDISK:
642 case CCISS_REGNEWD:
643 case CCISS_RESCANDISK:
644 case CCISS_GETLUNINFO:
645 return do_ioctl(bdev, mode, cmd, arg);
647 case CCISS_PASSTHRU32:
648 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
649 case CCISS_BIG_PASSTHRU32:
650 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
652 default:
653 return -ENOIOCTLCMD;
657 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
658 unsigned cmd, unsigned long arg)
660 IOCTL32_Command_struct __user *arg32 =
661 (IOCTL32_Command_struct __user *) arg;
662 IOCTL_Command_struct arg64;
663 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
664 int err;
665 u32 cp;
667 err = 0;
668 err |=
669 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
670 sizeof(arg64.LUN_info));
671 err |=
672 copy_from_user(&arg64.Request, &arg32->Request,
673 sizeof(arg64.Request));
674 err |=
675 copy_from_user(&arg64.error_info, &arg32->error_info,
676 sizeof(arg64.error_info));
677 err |= get_user(arg64.buf_size, &arg32->buf_size);
678 err |= get_user(cp, &arg32->buf);
679 arg64.buf = compat_ptr(cp);
680 err |= copy_to_user(p, &arg64, sizeof(arg64));
682 if (err)
683 return -EFAULT;
685 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
686 if (err)
687 return err;
688 err |=
689 copy_in_user(&arg32->error_info, &p->error_info,
690 sizeof(arg32->error_info));
691 if (err)
692 return -EFAULT;
693 return err;
696 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
697 unsigned cmd, unsigned long arg)
699 BIG_IOCTL32_Command_struct __user *arg32 =
700 (BIG_IOCTL32_Command_struct __user *) arg;
701 BIG_IOCTL_Command_struct arg64;
702 BIG_IOCTL_Command_struct __user *p =
703 compat_alloc_user_space(sizeof(arg64));
704 int err;
705 u32 cp;
707 err = 0;
708 err |=
709 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
710 sizeof(arg64.LUN_info));
711 err |=
712 copy_from_user(&arg64.Request, &arg32->Request,
713 sizeof(arg64.Request));
714 err |=
715 copy_from_user(&arg64.error_info, &arg32->error_info,
716 sizeof(arg64.error_info));
717 err |= get_user(arg64.buf_size, &arg32->buf_size);
718 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
719 err |= get_user(cp, &arg32->buf);
720 arg64.buf = compat_ptr(cp);
721 err |= copy_to_user(p, &arg64, sizeof(arg64));
723 if (err)
724 return -EFAULT;
726 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
727 if (err)
728 return err;
729 err |=
730 copy_in_user(&arg32->error_info, &p->error_info,
731 sizeof(arg32->error_info));
732 if (err)
733 return -EFAULT;
734 return err;
736 #endif
738 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
740 drive_info_struct *drv = get_drv(bdev->bd_disk);
742 if (!drv->cylinders)
743 return -ENXIO;
745 geo->heads = drv->heads;
746 geo->sectors = drv->sectors;
747 geo->cylinders = drv->cylinders;
748 return 0;
752 * ioctl
754 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
755 unsigned int cmd, unsigned long arg)
757 struct gendisk *disk = bdev->bd_disk;
758 ctlr_info_t *host = get_host(disk);
759 drive_info_struct *drv = get_drv(disk);
760 int ctlr = host->ctlr;
761 void __user *argp = (void __user *)arg;
763 #ifdef CCISS_DEBUG
764 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
765 #endif /* CCISS_DEBUG */
767 switch (cmd) {
768 case CCISS_GETPCIINFO:
770 cciss_pci_info_struct pciinfo;
772 if (!arg)
773 return -EINVAL;
774 pciinfo.domain = pci_domain_nr(host->pdev->bus);
775 pciinfo.bus = host->pdev->bus->number;
776 pciinfo.dev_fn = host->pdev->devfn;
777 pciinfo.board_id = host->board_id;
778 if (copy_to_user
779 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
780 return -EFAULT;
781 return 0;
783 case CCISS_GETINTINFO:
785 cciss_coalint_struct intinfo;
786 if (!arg)
787 return -EINVAL;
788 intinfo.delay =
789 readl(&host->cfgtable->HostWrite.CoalIntDelay);
790 intinfo.count =
791 readl(&host->cfgtable->HostWrite.CoalIntCount);
792 if (copy_to_user
793 (argp, &intinfo, sizeof(cciss_coalint_struct)))
794 return -EFAULT;
795 return 0;
797 case CCISS_SETINTINFO:
799 cciss_coalint_struct intinfo;
800 unsigned long flags;
801 int i;
803 if (!arg)
804 return -EINVAL;
805 if (!capable(CAP_SYS_ADMIN))
806 return -EPERM;
807 if (copy_from_user
808 (&intinfo, argp, sizeof(cciss_coalint_struct)))
809 return -EFAULT;
810 if ((intinfo.delay == 0) && (intinfo.count == 0))
812 // printk("cciss_ioctl: delay and count cannot be 0\n");
813 return -EINVAL;
815 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
816 /* Update the field, and then ring the doorbell */
817 writel(intinfo.delay,
818 &(host->cfgtable->HostWrite.CoalIntDelay));
819 writel(intinfo.count,
820 &(host->cfgtable->HostWrite.CoalIntCount));
821 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
823 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
824 if (!(readl(host->vaddr + SA5_DOORBELL)
825 & CFGTBL_ChangeReq))
826 break;
827 /* delay and try again */
828 udelay(1000);
830 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
831 if (i >= MAX_IOCTL_CONFIG_WAIT)
832 return -EAGAIN;
833 return 0;
835 case CCISS_GETNODENAME:
837 NodeName_type NodeName;
838 int i;
840 if (!arg)
841 return -EINVAL;
842 for (i = 0; i < 16; i++)
843 NodeName[i] =
844 readb(&host->cfgtable->ServerName[i]);
845 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
846 return -EFAULT;
847 return 0;
849 case CCISS_SETNODENAME:
851 NodeName_type NodeName;
852 unsigned long flags;
853 int i;
855 if (!arg)
856 return -EINVAL;
857 if (!capable(CAP_SYS_ADMIN))
858 return -EPERM;
860 if (copy_from_user
861 (NodeName, argp, sizeof(NodeName_type)))
862 return -EFAULT;
864 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
866 /* Update the field, and then ring the doorbell */
867 for (i = 0; i < 16; i++)
868 writeb(NodeName[i],
869 &host->cfgtable->ServerName[i]);
871 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
873 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
874 if (!(readl(host->vaddr + SA5_DOORBELL)
875 & CFGTBL_ChangeReq))
876 break;
877 /* delay and try again */
878 udelay(1000);
880 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
881 if (i >= MAX_IOCTL_CONFIG_WAIT)
882 return -EAGAIN;
883 return 0;
886 case CCISS_GETHEARTBEAT:
888 Heartbeat_type heartbeat;
890 if (!arg)
891 return -EINVAL;
892 heartbeat = readl(&host->cfgtable->HeartBeat);
893 if (copy_to_user
894 (argp, &heartbeat, sizeof(Heartbeat_type)))
895 return -EFAULT;
896 return 0;
898 case CCISS_GETBUSTYPES:
900 BusTypes_type BusTypes;
902 if (!arg)
903 return -EINVAL;
904 BusTypes = readl(&host->cfgtable->BusTypes);
905 if (copy_to_user
906 (argp, &BusTypes, sizeof(BusTypes_type)))
907 return -EFAULT;
908 return 0;
910 case CCISS_GETFIRMVER:
912 FirmwareVer_type firmware;
914 if (!arg)
915 return -EINVAL;
916 memcpy(firmware, host->firm_ver, 4);
918 if (copy_to_user
919 (argp, firmware, sizeof(FirmwareVer_type)))
920 return -EFAULT;
921 return 0;
923 case CCISS_GETDRIVVER:
925 DriverVer_type DriverVer = DRIVER_VERSION;
927 if (!arg)
928 return -EINVAL;
930 if (copy_to_user
931 (argp, &DriverVer, sizeof(DriverVer_type)))
932 return -EFAULT;
933 return 0;
936 case CCISS_DEREGDISK:
937 case CCISS_REGNEWD:
938 case CCISS_REVALIDVOLS:
939 return rebuild_lun_table(host, 0);
941 case CCISS_GETLUNINFO:{
942 LogvolInfo_struct luninfo;
944 luninfo.LunID = drv->LunID;
945 luninfo.num_opens = drv->usage_count;
946 luninfo.num_parts = 0;
947 if (copy_to_user(argp, &luninfo,
948 sizeof(LogvolInfo_struct)))
949 return -EFAULT;
950 return 0;
952 case CCISS_PASSTHRU:
954 IOCTL_Command_struct iocommand;
955 CommandList_struct *c;
956 char *buff = NULL;
957 u64bit temp64;
958 unsigned long flags;
959 DECLARE_COMPLETION_ONSTACK(wait);
961 if (!arg)
962 return -EINVAL;
964 if (!capable(CAP_SYS_RAWIO))
965 return -EPERM;
967 if (copy_from_user
968 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
969 return -EFAULT;
970 if ((iocommand.buf_size < 1) &&
971 (iocommand.Request.Type.Direction != XFER_NONE)) {
972 return -EINVAL;
974 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
975 /* Check kmalloc limits */
976 if (iocommand.buf_size > 128000)
977 return -EINVAL;
978 #endif
979 if (iocommand.buf_size > 0) {
980 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
981 if (buff == NULL)
982 return -EFAULT;
984 if (iocommand.Request.Type.Direction == XFER_WRITE) {
985 /* Copy the data into the buffer we created */
986 if (copy_from_user
987 (buff, iocommand.buf, iocommand.buf_size)) {
988 kfree(buff);
989 return -EFAULT;
991 } else {
992 memset(buff, 0, iocommand.buf_size);
994 if ((c = cmd_alloc(host, 0)) == NULL) {
995 kfree(buff);
996 return -ENOMEM;
998 // Fill in the command type
999 c->cmd_type = CMD_IOCTL_PEND;
1000 // Fill in Command Header
1001 c->Header.ReplyQueue = 0; // unused in simple mode
1002 if (iocommand.buf_size > 0) // buffer to fill
1004 c->Header.SGList = 1;
1005 c->Header.SGTotal = 1;
1006 } else // no buffers to fill
1008 c->Header.SGList = 0;
1009 c->Header.SGTotal = 0;
1011 c->Header.LUN = iocommand.LUN_info;
1012 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1014 // Fill in Request block
1015 c->Request = iocommand.Request;
1017 // Fill in the scatter gather information
1018 if (iocommand.buf_size > 0) {
1019 temp64.val = pci_map_single(host->pdev, buff,
1020 iocommand.buf_size,
1021 PCI_DMA_BIDIRECTIONAL);
1022 c->SG[0].Addr.lower = temp64.val32.lower;
1023 c->SG[0].Addr.upper = temp64.val32.upper;
1024 c->SG[0].Len = iocommand.buf_size;
1025 c->SG[0].Ext = 0; // we are not chaining
1027 c->waiting = &wait;
1029 /* Put the request on the tail of the request queue */
1030 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1031 addQ(&host->reqQ, c);
1032 host->Qdepth++;
1033 start_io(host);
1034 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1036 wait_for_completion(&wait);
1038 /* unlock the buffers from DMA */
1039 temp64.val32.lower = c->SG[0].Addr.lower;
1040 temp64.val32.upper = c->SG[0].Addr.upper;
1041 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1042 iocommand.buf_size,
1043 PCI_DMA_BIDIRECTIONAL);
1045 /* Copy the error information out */
1046 iocommand.error_info = *(c->err_info);
1047 if (copy_to_user
1048 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1049 kfree(buff);
1050 cmd_free(host, c, 0);
1051 return -EFAULT;
1054 if (iocommand.Request.Type.Direction == XFER_READ) {
1055 /* Copy the data out of the buffer we created */
1056 if (copy_to_user
1057 (iocommand.buf, buff, iocommand.buf_size)) {
1058 kfree(buff);
1059 cmd_free(host, c, 0);
1060 return -EFAULT;
1063 kfree(buff);
1064 cmd_free(host, c, 0);
1065 return 0;
1067 case CCISS_BIG_PASSTHRU:{
1068 BIG_IOCTL_Command_struct *ioc;
1069 CommandList_struct *c;
1070 unsigned char **buff = NULL;
1071 int *buff_size = NULL;
1072 u64bit temp64;
1073 unsigned long flags;
1074 BYTE sg_used = 0;
1075 int status = 0;
1076 int i;
1077 DECLARE_COMPLETION_ONSTACK(wait);
1078 __u32 left;
1079 __u32 sz;
1080 BYTE __user *data_ptr;
1082 if (!arg)
1083 return -EINVAL;
1084 if (!capable(CAP_SYS_RAWIO))
1085 return -EPERM;
1086 ioc = (BIG_IOCTL_Command_struct *)
1087 kmalloc(sizeof(*ioc), GFP_KERNEL);
1088 if (!ioc) {
1089 status = -ENOMEM;
1090 goto cleanup1;
1092 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1093 status = -EFAULT;
1094 goto cleanup1;
1096 if ((ioc->buf_size < 1) &&
1097 (ioc->Request.Type.Direction != XFER_NONE)) {
1098 status = -EINVAL;
1099 goto cleanup1;
1101 /* Check kmalloc limits using all SGs */
1102 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1103 status = -EINVAL;
1104 goto cleanup1;
1106 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1107 status = -EINVAL;
1108 goto cleanup1;
1110 buff =
1111 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1112 if (!buff) {
1113 status = -ENOMEM;
1114 goto cleanup1;
1116 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1117 GFP_KERNEL);
1118 if (!buff_size) {
1119 status = -ENOMEM;
1120 goto cleanup1;
1122 left = ioc->buf_size;
1123 data_ptr = ioc->buf;
1124 while (left) {
1125 sz = (left >
1126 ioc->malloc_size) ? ioc->
1127 malloc_size : left;
1128 buff_size[sg_used] = sz;
1129 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1130 if (buff[sg_used] == NULL) {
1131 status = -ENOMEM;
1132 goto cleanup1;
1134 if (ioc->Request.Type.Direction == XFER_WRITE) {
1135 if (copy_from_user
1136 (buff[sg_used], data_ptr, sz)) {
1137 status = -EFAULT;
1138 goto cleanup1;
1140 } else {
1141 memset(buff[sg_used], 0, sz);
1143 left -= sz;
1144 data_ptr += sz;
1145 sg_used++;
1147 if ((c = cmd_alloc(host, 0)) == NULL) {
1148 status = -ENOMEM;
1149 goto cleanup1;
1151 c->cmd_type = CMD_IOCTL_PEND;
1152 c->Header.ReplyQueue = 0;
1154 if (ioc->buf_size > 0) {
1155 c->Header.SGList = sg_used;
1156 c->Header.SGTotal = sg_used;
1157 } else {
1158 c->Header.SGList = 0;
1159 c->Header.SGTotal = 0;
1161 c->Header.LUN = ioc->LUN_info;
1162 c->Header.Tag.lower = c->busaddr;
1164 c->Request = ioc->Request;
1165 if (ioc->buf_size > 0) {
1166 int i;
1167 for (i = 0; i < sg_used; i++) {
1168 temp64.val =
1169 pci_map_single(host->pdev, buff[i],
1170 buff_size[i],
1171 PCI_DMA_BIDIRECTIONAL);
1172 c->SG[i].Addr.lower =
1173 temp64.val32.lower;
1174 c->SG[i].Addr.upper =
1175 temp64.val32.upper;
1176 c->SG[i].Len = buff_size[i];
1177 c->SG[i].Ext = 0; /* we are not chaining */
1180 c->waiting = &wait;
1181 /* Put the request on the tail of the request queue */
1182 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1183 addQ(&host->reqQ, c);
1184 host->Qdepth++;
1185 start_io(host);
1186 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1187 wait_for_completion(&wait);
1188 /* unlock the buffers from DMA */
1189 for (i = 0; i < sg_used; i++) {
1190 temp64.val32.lower = c->SG[i].Addr.lower;
1191 temp64.val32.upper = c->SG[i].Addr.upper;
1192 pci_unmap_single(host->pdev,
1193 (dma_addr_t) temp64.val, buff_size[i],
1194 PCI_DMA_BIDIRECTIONAL);
1196 /* Copy the error information out */
1197 ioc->error_info = *(c->err_info);
1198 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1199 cmd_free(host, c, 0);
1200 status = -EFAULT;
1201 goto cleanup1;
1203 if (ioc->Request.Type.Direction == XFER_READ) {
1204 /* Copy the data out of the buffer we created */
1205 BYTE __user *ptr = ioc->buf;
1206 for (i = 0; i < sg_used; i++) {
1207 if (copy_to_user
1208 (ptr, buff[i], buff_size[i])) {
1209 cmd_free(host, c, 0);
1210 status = -EFAULT;
1211 goto cleanup1;
1213 ptr += buff_size[i];
1216 cmd_free(host, c, 0);
1217 status = 0;
1218 cleanup1:
1219 if (buff) {
1220 for (i = 0; i < sg_used; i++)
1221 kfree(buff[i]);
1222 kfree(buff);
1224 kfree(buff_size);
1225 kfree(ioc);
1226 return status;
1229 /* scsi_cmd_ioctl handles these, below, though some are not */
1230 /* very meaningful for cciss. SG_IO is the main one people want. */
1232 case SG_GET_VERSION_NUM:
1233 case SG_SET_TIMEOUT:
1234 case SG_GET_TIMEOUT:
1235 case SG_GET_RESERVED_SIZE:
1236 case SG_SET_RESERVED_SIZE:
1237 case SG_EMULATED_HOST:
1238 case SG_IO:
1239 case SCSI_IOCTL_SEND_COMMAND:
1240 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1242 /* scsi_cmd_ioctl would normally handle these, below, but */
1243 /* they aren't a good fit for cciss, as CD-ROMs are */
1244 /* not supported, and we don't have any bus/target/lun */
1245 /* which we present to the kernel. */
1247 case CDROM_SEND_PACKET:
1248 case CDROMCLOSETRAY:
1249 case CDROMEJECT:
1250 case SCSI_IOCTL_GET_IDLUN:
1251 case SCSI_IOCTL_GET_BUS_NUMBER:
1252 default:
1253 return -ENOTTY;
1257 static void cciss_check_queues(ctlr_info_t *h)
1259 int start_queue = h->next_to_run;
1260 int i;
1262 /* check to see if we have maxed out the number of commands that can
1263 * be placed on the queue. If so then exit. We do this check here
1264 * in case the interrupt we serviced was from an ioctl and did not
1265 * free any new commands.
1267 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1268 return;
1270 /* We have room on the queue for more commands. Now we need to queue
1271 * them up. We will also keep track of the next queue to run so
1272 * that every queue gets a chance to be started first.
1274 for (i = 0; i < h->highest_lun + 1; i++) {
1275 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1276 /* make sure the disk has been added and the drive is real
1277 * because this can be called from the middle of init_one.
1279 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1280 continue;
1281 blk_start_queue(h->gendisk[curr_queue]->queue);
1283 /* check to see if we have maxed out the number of commands
1284 * that can be placed on the queue.
1286 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1287 if (curr_queue == start_queue) {
1288 h->next_to_run =
1289 (start_queue + 1) % (h->highest_lun + 1);
1290 break;
1291 } else {
1292 h->next_to_run = curr_queue;
1293 break;
1299 static void cciss_softirq_done(struct request *rq)
1301 CommandList_struct *cmd = rq->completion_data;
1302 ctlr_info_t *h = hba[cmd->ctlr];
1303 unsigned long flags;
1304 u64bit temp64;
1305 int i, ddir;
1307 if (cmd->Request.Type.Direction == XFER_READ)
1308 ddir = PCI_DMA_FROMDEVICE;
1309 else
1310 ddir = PCI_DMA_TODEVICE;
1312 /* command did not need to be retried */
1313 /* unmap the DMA mapping for all the scatter gather elements */
1314 for (i = 0; i < cmd->Header.SGList; i++) {
1315 temp64.val32.lower = cmd->SG[i].Addr.lower;
1316 temp64.val32.upper = cmd->SG[i].Addr.upper;
1317 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1320 #ifdef CCISS_DEBUG
1321 printk("Done with %p\n", rq);
1322 #endif /* CCISS_DEBUG */
1324 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1325 BUG();
1327 spin_lock_irqsave(&h->lock, flags);
1328 cmd_free(h, cmd, 1);
1329 cciss_check_queues(h);
1330 spin_unlock_irqrestore(&h->lock, flags);
1333 /* This function gets the serial number of a logical drive via
1334 * inquiry page 0x83. Serial no. is 16 bytes. If the serial
1335 * number cannot be had, for whatever reason, 16 bytes of 0xff
1336 * are returned instead.
1338 static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1339 unsigned char *serial_no, int buflen)
1341 #define PAGE_83_INQ_BYTES 64
1342 int rc;
1343 unsigned char *buf;
1345 if (buflen > 16)
1346 buflen = 16;
1347 memset(serial_no, 0xff, buflen);
1348 buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
1349 if (!buf)
1350 return;
1351 memset(serial_no, 0, buflen);
1352 if (withirq)
1353 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
1354 PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD);
1355 else
1356 rc = sendcmd(CISS_INQUIRY, ctlr, buf,
1357 PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD);
1358 if (rc == IO_OK)
1359 memcpy(serial_no, &buf[8], buflen);
1360 kfree(buf);
1361 return;
1364 static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1365 int drv_index)
1367 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1368 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1369 disk->major = h->major;
1370 disk->first_minor = drv_index << NWD_SHIFT;
1371 disk->fops = &cciss_fops;
1372 disk->private_data = &h->drv[drv_index];
1373 disk->driverfs_dev = &h->pdev->dev;
1375 /* Set up queue information */
1376 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1378 /* This is a hardware imposed limit. */
1379 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1381 /* This is a limit in the driver and could be eliminated. */
1382 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1384 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1386 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1388 disk->queue->queuedata = h;
1390 blk_queue_hardsect_size(disk->queue,
1391 h->drv[drv_index].block_size);
1393 /* Make sure all queue data is written out before */
1394 /* setting h->drv[drv_index].queue, as setting this */
1395 /* allows the interrupt handler to start the queue */
1396 wmb();
1397 h->drv[drv_index].queue = disk->queue;
1398 add_disk(disk);
1401 /* This function will check the usage_count of the drive to be updated/added.
1402 * If the usage_count is zero and it is a heretofore unknown drive, or,
1403 * the drive's capacity, geometry, or serial number has changed,
1404 * then the drive information will be updated and the disk will be
1405 * re-registered with the kernel. If these conditions don't hold,
1406 * then it will be left alone for the next reboot. The exception to this
1407 * is disk 0 which will always be left registered with the kernel since it
1408 * is also the controller node. Any changes to disk 0 will show up on
1409 * the next reboot.
1411 static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1413 ctlr_info_t *h = hba[ctlr];
1414 struct gendisk *disk;
1415 InquiryData_struct *inq_buff = NULL;
1416 unsigned int block_size;
1417 sector_t total_size;
1418 unsigned long flags = 0;
1419 int ret = 0;
1420 drive_info_struct *drvinfo;
1421 int was_only_controller_node;
1423 /* Get information about the disk and modify the driver structure */
1424 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1425 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL);
1426 if (inq_buff == NULL || drvinfo == NULL)
1427 goto mem_msg;
1429 /* See if we're trying to update the "controller node"
1430 * this will happen the when the first logical drive gets
1431 * created by ACU.
1433 was_only_controller_node = (drv_index == 0 &&
1434 h->drv[0].raid_level == -1);
1436 /* testing to see if 16-byte CDBs are already being used */
1437 if (h->cciss_read == CCISS_READ_16) {
1438 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1439 &total_size, &block_size);
1441 } else {
1442 cciss_read_capacity(ctlr, drv_index, 1,
1443 &total_size, &block_size);
1445 /* if read_capacity returns all F's this volume is >2TB */
1446 /* in size so we switch to 16-byte CDB's for all */
1447 /* read/write ops */
1448 if (total_size == 0xFFFFFFFFULL) {
1449 cciss_read_capacity_16(ctlr, drv_index, 1,
1450 &total_size, &block_size);
1451 h->cciss_read = CCISS_READ_16;
1452 h->cciss_write = CCISS_WRITE_16;
1453 } else {
1454 h->cciss_read = CCISS_READ_10;
1455 h->cciss_write = CCISS_WRITE_10;
1459 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1460 inq_buff, drvinfo);
1461 drvinfo->block_size = block_size;
1462 drvinfo->nr_blocks = total_size + 1;
1464 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1465 sizeof(drvinfo->serial_no));
1467 /* Is it the same disk we already know, and nothing's changed? */
1468 if (h->drv[drv_index].raid_level != -1 &&
1469 ((memcmp(drvinfo->serial_no,
1470 h->drv[drv_index].serial_no, 16) == 0) &&
1471 drvinfo->block_size == h->drv[drv_index].block_size &&
1472 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks &&
1473 drvinfo->heads == h->drv[drv_index].heads &&
1474 drvinfo->sectors == h->drv[drv_index].sectors &&
1475 drvinfo->cylinders == h->drv[drv_index].cylinders))
1476 /* The disk is unchanged, nothing to update */
1477 goto freeret;
1479 /* If we get here it's not the same disk, or something's changed,
1480 * so we need to * deregister it, and re-register it, if it's not
1481 * in use.
1482 * If the disk already exists then deregister it before proceeding
1483 * (unless it's the first disk (for the controller node).
1485 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) {
1486 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1487 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1488 h->drv[drv_index].busy_configuring = 1;
1489 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1491 /* deregister_disk sets h->drv[drv_index].queue = NULL
1492 * which keeps the interrupt handler from starting
1493 * the queue.
1495 ret = deregister_disk(h->gendisk[drv_index],
1496 &h->drv[drv_index], 0);
1497 h->drv[drv_index].busy_configuring = 0;
1500 /* If the disk is in use return */
1501 if (ret)
1502 goto freeret;
1504 /* Save the new information from cciss_geometry_inquiry
1505 * and serial number inquiry.
1507 h->drv[drv_index].block_size = drvinfo->block_size;
1508 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks;
1509 h->drv[drv_index].heads = drvinfo->heads;
1510 h->drv[drv_index].sectors = drvinfo->sectors;
1511 h->drv[drv_index].cylinders = drvinfo->cylinders;
1512 h->drv[drv_index].raid_level = drvinfo->raid_level;
1513 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
1515 ++h->num_luns;
1516 disk = h->gendisk[drv_index];
1517 set_capacity(disk, h->drv[drv_index].nr_blocks);
1519 /* If it's not disk 0 (drv_index != 0)
1520 * or if it was disk 0, but there was previously
1521 * no actual corresponding configured logical drive
1522 * (raid_leve == -1) then we want to update the
1523 * logical drive's information.
1525 if (drv_index || first_time)
1526 cciss_add_disk(h, disk, drv_index);
1528 freeret:
1529 kfree(inq_buff);
1530 kfree(drvinfo);
1531 return;
1532 mem_msg:
1533 printk(KERN_ERR "cciss: out of memory\n");
1534 goto freeret;
1537 /* This function will find the first index of the controllers drive array
1538 * that has a -1 for the raid_level and will return that index. This is
1539 * where new drives will be added. If the index to be returned is greater
1540 * than the highest_lun index for the controller then highest_lun is set
1541 * to this new index. If there are no available indexes then -1 is returned.
1542 * "controller_node" is used to know if this is a real logical drive, or just
1543 * the controller node, which determines if this counts towards highest_lun.
1545 static int cciss_find_free_drive_index(int ctlr, int controller_node)
1547 int i;
1549 for (i = 0; i < CISS_MAX_LUN; i++) {
1550 if (hba[ctlr]->drv[i].raid_level == -1) {
1551 if (i > hba[ctlr]->highest_lun)
1552 if (!controller_node)
1553 hba[ctlr]->highest_lun = i;
1554 return i;
1557 return -1;
1560 /* cciss_add_gendisk finds a free hba[]->drv structure
1561 * and allocates a gendisk if needed, and sets the lunid
1562 * in the drvinfo structure. It returns the index into
1563 * the ->drv[] array, or -1 if none are free.
1564 * is_controller_node indicates whether highest_lun should
1565 * count this disk, or if it's only being added to provide
1566 * a means to talk to the controller in case no logical
1567 * drives have yet been configured.
1569 static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1571 int drv_index;
1573 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node);
1574 if (drv_index == -1)
1575 return -1;
1576 /*Check if the gendisk needs to be allocated */
1577 if (!h->gendisk[drv_index]) {
1578 h->gendisk[drv_index] =
1579 alloc_disk(1 << NWD_SHIFT);
1580 if (!h->gendisk[drv_index]) {
1581 printk(KERN_ERR "cciss%d: could not "
1582 "allocate a new disk %d\n",
1583 h->ctlr, drv_index);
1584 return -1;
1587 h->drv[drv_index].LunID = lunid;
1589 /* Don't need to mark this busy because nobody */
1590 /* else knows about this disk yet to contend */
1591 /* for access to it. */
1592 h->drv[drv_index].busy_configuring = 0;
1593 wmb();
1594 return drv_index;
1597 /* This is for the special case of a controller which
1598 * has no logical drives. In this case, we still need
1599 * to register a disk so the controller can be accessed
1600 * by the Array Config Utility.
1602 static void cciss_add_controller_node(ctlr_info_t *h)
1604 struct gendisk *disk;
1605 int drv_index;
1607 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1608 return;
1610 drv_index = cciss_add_gendisk(h, 0, 1);
1611 if (drv_index == -1) {
1612 printk(KERN_WARNING "cciss%d: could not "
1613 "add disk 0.\n", h->ctlr);
1614 return;
1616 h->drv[drv_index].block_size = 512;
1617 h->drv[drv_index].nr_blocks = 0;
1618 h->drv[drv_index].heads = 0;
1619 h->drv[drv_index].sectors = 0;
1620 h->drv[drv_index].cylinders = 0;
1621 h->drv[drv_index].raid_level = -1;
1622 memset(h->drv[drv_index].serial_no, 0, 16);
1623 disk = h->gendisk[drv_index];
1624 cciss_add_disk(h, disk, drv_index);
1627 /* This function will add and remove logical drives from the Logical
1628 * drive array of the controller and maintain persistency of ordering
1629 * so that mount points are preserved until the next reboot. This allows
1630 * for the removal of logical drives in the middle of the drive array
1631 * without a re-ordering of those drives.
1632 * INPUT
1633 * h = The controller to perform the operations on
1635 static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1637 int ctlr = h->ctlr;
1638 int num_luns;
1639 ReportLunData_struct *ld_buff = NULL;
1640 int return_code;
1641 int listlength = 0;
1642 int i;
1643 int drv_found;
1644 int drv_index = 0;
1645 __u32 lunid = 0;
1646 unsigned long flags;
1648 if (!capable(CAP_SYS_RAWIO))
1649 return -EPERM;
1651 /* Set busy_configuring flag for this operation */
1652 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1653 if (h->busy_configuring) {
1654 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1655 return -EBUSY;
1657 h->busy_configuring = 1;
1658 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1660 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1661 if (ld_buff == NULL)
1662 goto mem_msg;
1664 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1665 sizeof(ReportLunData_struct), 0,
1666 0, 0, TYPE_CMD);
1668 if (return_code == IO_OK)
1669 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1670 else { /* reading number of logical volumes failed */
1671 printk(KERN_WARNING "cciss: report logical volume"
1672 " command failed\n");
1673 listlength = 0;
1674 goto freeret;
1677 num_luns = listlength / 8; /* 8 bytes per entry */
1678 if (num_luns > CISS_MAX_LUN) {
1679 num_luns = CISS_MAX_LUN;
1680 printk(KERN_WARNING "cciss: more luns configured"
1681 " on controller than can be handled by"
1682 " this driver.\n");
1685 if (num_luns == 0)
1686 cciss_add_controller_node(h);
1688 /* Compare controller drive array to driver's drive array
1689 * to see if any drives are missing on the controller due
1690 * to action of Array Config Utility (user deletes drive)
1691 * and deregister logical drives which have disappeared.
1693 for (i = 0; i <= h->highest_lun; i++) {
1694 int j;
1695 drv_found = 0;
1697 /* skip holes in the array from already deleted drives */
1698 if (h->drv[i].raid_level == -1)
1699 continue;
1701 for (j = 0; j < num_luns; j++) {
1702 memcpy(&lunid, &ld_buff->LUN[j][0], 4);
1703 lunid = le32_to_cpu(lunid);
1704 if (h->drv[i].LunID == lunid) {
1705 drv_found = 1;
1706 break;
1709 if (!drv_found) {
1710 /* Deregister it from the OS, it's gone. */
1711 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1712 h->drv[i].busy_configuring = 1;
1713 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1714 return_code = deregister_disk(h->gendisk[i],
1715 &h->drv[i], 1);
1716 h->drv[i].busy_configuring = 0;
1720 /* Compare controller drive array to driver's drive array.
1721 * Check for updates in the drive information and any new drives
1722 * on the controller due to ACU adding logical drives, or changing
1723 * a logical drive's size, etc. Reregister any new/changed drives
1725 for (i = 0; i < num_luns; i++) {
1726 int j;
1728 drv_found = 0;
1730 memcpy(&lunid, &ld_buff->LUN[i][0], 4);
1731 lunid = le32_to_cpu(lunid);
1733 /* Find if the LUN is already in the drive array
1734 * of the driver. If so then update its info
1735 * if not in use. If it does not exist then find
1736 * the first free index and add it.
1738 for (j = 0; j <= h->highest_lun; j++) {
1739 if (h->drv[j].raid_level != -1 &&
1740 h->drv[j].LunID == lunid) {
1741 drv_index = j;
1742 drv_found = 1;
1743 break;
1747 /* check if the drive was found already in the array */
1748 if (!drv_found) {
1749 drv_index = cciss_add_gendisk(h, lunid, 0);
1750 if (drv_index == -1)
1751 goto freeret;
1753 cciss_update_drive_info(ctlr, drv_index, first_time);
1754 } /* end for */
1756 freeret:
1757 kfree(ld_buff);
1758 h->busy_configuring = 0;
1759 /* We return -1 here to tell the ACU that we have registered/updated
1760 * all of the drives that we can and to keep it from calling us
1761 * additional times.
1763 return -1;
1764 mem_msg:
1765 printk(KERN_ERR "cciss: out of memory\n");
1766 h->busy_configuring = 0;
1767 goto freeret;
1770 /* This function will deregister the disk and it's queue from the
1771 * kernel. It must be called with the controller lock held and the
1772 * drv structures busy_configuring flag set. It's parameters are:
1774 * disk = This is the disk to be deregistered
1775 * drv = This is the drive_info_struct associated with the disk to be
1776 * deregistered. It contains information about the disk used
1777 * by the driver.
1778 * clear_all = This flag determines whether or not the disk information
1779 * is going to be completely cleared out and the highest_lun
1780 * reset. Sometimes we want to clear out information about
1781 * the disk in preparation for re-adding it. In this case
1782 * the highest_lun should be left unchanged and the LunID
1783 * should not be cleared.
1785 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1786 int clear_all)
1788 int i;
1789 ctlr_info_t *h = get_host(disk);
1791 if (!capable(CAP_SYS_RAWIO))
1792 return -EPERM;
1794 /* make sure logical volume is NOT is use */
1795 if (clear_all || (h->gendisk[0] == disk)) {
1796 if (drv->usage_count > 1)
1797 return -EBUSY;
1798 } else if (drv->usage_count > 0)
1799 return -EBUSY;
1801 /* invalidate the devices and deregister the disk. If it is disk
1802 * zero do not deregister it but just zero out it's values. This
1803 * allows us to delete disk zero but keep the controller registered.
1805 if (h->gendisk[0] != disk) {
1806 struct request_queue *q = disk->queue;
1807 if (disk->flags & GENHD_FL_UP)
1808 del_gendisk(disk);
1809 if (q) {
1810 blk_cleanup_queue(q);
1811 /* Set drv->queue to NULL so that we do not try
1812 * to call blk_start_queue on this queue in the
1813 * interrupt handler
1815 drv->queue = NULL;
1817 /* If clear_all is set then we are deleting the logical
1818 * drive, not just refreshing its info. For drives
1819 * other than disk 0 we will call put_disk. We do not
1820 * do this for disk 0 as we need it to be able to
1821 * configure the controller.
1823 if (clear_all){
1824 /* This isn't pretty, but we need to find the
1825 * disk in our array and NULL our the pointer.
1826 * This is so that we will call alloc_disk if
1827 * this index is used again later.
1829 for (i=0; i < CISS_MAX_LUN; i++){
1830 if (h->gendisk[i] == disk) {
1831 h->gendisk[i] = NULL;
1832 break;
1835 put_disk(disk);
1837 } else {
1838 set_capacity(disk, 0);
1841 --h->num_luns;
1842 /* zero out the disk size info */
1843 drv->nr_blocks = 0;
1844 drv->block_size = 0;
1845 drv->heads = 0;
1846 drv->sectors = 0;
1847 drv->cylinders = 0;
1848 drv->raid_level = -1; /* This can be used as a flag variable to
1849 * indicate that this element of the drive
1850 * array is free.
1853 if (clear_all) {
1854 /* check to see if it was the last disk */
1855 if (drv == h->drv + h->highest_lun) {
1856 /* if so, find the new hightest lun */
1857 int i, newhighest = -1;
1858 for (i = 0; i <= h->highest_lun; i++) {
1859 /* if the disk has size > 0, it is available */
1860 if (h->drv[i].heads)
1861 newhighest = i;
1863 h->highest_lun = newhighest;
1866 drv->LunID = 0;
1868 return 0;
1871 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1872 1: address logical volume log_unit,
1873 2: periph device address is scsi3addr */
1874 unsigned int log_unit, __u8 page_code,
1875 unsigned char *scsi3addr, int cmd_type)
1877 ctlr_info_t *h = hba[ctlr];
1878 u64bit buff_dma_handle;
1879 int status = IO_OK;
1881 c->cmd_type = CMD_IOCTL_PEND;
1882 c->Header.ReplyQueue = 0;
1883 if (buff != NULL) {
1884 c->Header.SGList = 1;
1885 c->Header.SGTotal = 1;
1886 } else {
1887 c->Header.SGList = 0;
1888 c->Header.SGTotal = 0;
1890 c->Header.Tag.lower = c->busaddr;
1892 c->Request.Type.Type = cmd_type;
1893 if (cmd_type == TYPE_CMD) {
1894 switch (cmd) {
1895 case CISS_INQUIRY:
1896 /* If the logical unit number is 0 then, this is going
1897 to controller so It's a physical command
1898 mode = 0 target = 0. So we have nothing to write.
1899 otherwise, if use_unit_num == 1,
1900 mode = 1(volume set addressing) target = LUNID
1901 otherwise, if use_unit_num == 2,
1902 mode = 0(periph dev addr) target = scsi3addr */
1903 if (use_unit_num == 1) {
1904 c->Header.LUN.LogDev.VolId =
1905 h->drv[log_unit].LunID;
1906 c->Header.LUN.LogDev.Mode = 1;
1907 } else if (use_unit_num == 2) {
1908 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1910 c->Header.LUN.LogDev.Mode = 0;
1912 /* are we trying to read a vital product page */
1913 if (page_code != 0) {
1914 c->Request.CDB[1] = 0x01;
1915 c->Request.CDB[2] = page_code;
1917 c->Request.CDBLen = 6;
1918 c->Request.Type.Attribute = ATTR_SIMPLE;
1919 c->Request.Type.Direction = XFER_READ;
1920 c->Request.Timeout = 0;
1921 c->Request.CDB[0] = CISS_INQUIRY;
1922 c->Request.CDB[4] = size & 0xFF;
1923 break;
1924 case CISS_REPORT_LOG:
1925 case CISS_REPORT_PHYS:
1926 /* Talking to controller so It's a physical command
1927 mode = 00 target = 0. Nothing to write.
1929 c->Request.CDBLen = 12;
1930 c->Request.Type.Attribute = ATTR_SIMPLE;
1931 c->Request.Type.Direction = XFER_READ;
1932 c->Request.Timeout = 0;
1933 c->Request.CDB[0] = cmd;
1934 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1935 c->Request.CDB[7] = (size >> 16) & 0xFF;
1936 c->Request.CDB[8] = (size >> 8) & 0xFF;
1937 c->Request.CDB[9] = size & 0xFF;
1938 break;
1940 case CCISS_READ_CAPACITY:
1941 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1942 c->Header.LUN.LogDev.Mode = 1;
1943 c->Request.CDBLen = 10;
1944 c->Request.Type.Attribute = ATTR_SIMPLE;
1945 c->Request.Type.Direction = XFER_READ;
1946 c->Request.Timeout = 0;
1947 c->Request.CDB[0] = cmd;
1948 break;
1949 case CCISS_READ_CAPACITY_16:
1950 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1951 c->Header.LUN.LogDev.Mode = 1;
1952 c->Request.CDBLen = 16;
1953 c->Request.Type.Attribute = ATTR_SIMPLE;
1954 c->Request.Type.Direction = XFER_READ;
1955 c->Request.Timeout = 0;
1956 c->Request.CDB[0] = cmd;
1957 c->Request.CDB[1] = 0x10;
1958 c->Request.CDB[10] = (size >> 24) & 0xFF;
1959 c->Request.CDB[11] = (size >> 16) & 0xFF;
1960 c->Request.CDB[12] = (size >> 8) & 0xFF;
1961 c->Request.CDB[13] = size & 0xFF;
1962 c->Request.Timeout = 0;
1963 c->Request.CDB[0] = cmd;
1964 break;
1965 case CCISS_CACHE_FLUSH:
1966 c->Request.CDBLen = 12;
1967 c->Request.Type.Attribute = ATTR_SIMPLE;
1968 c->Request.Type.Direction = XFER_WRITE;
1969 c->Request.Timeout = 0;
1970 c->Request.CDB[0] = BMIC_WRITE;
1971 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1972 break;
1973 default:
1974 printk(KERN_WARNING
1975 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1976 return IO_ERROR;
1978 } else if (cmd_type == TYPE_MSG) {
1979 switch (cmd) {
1980 case 0: /* ABORT message */
1981 c->Request.CDBLen = 12;
1982 c->Request.Type.Attribute = ATTR_SIMPLE;
1983 c->Request.Type.Direction = XFER_WRITE;
1984 c->Request.Timeout = 0;
1985 c->Request.CDB[0] = cmd; /* abort */
1986 c->Request.CDB[1] = 0; /* abort a command */
1987 /* buff contains the tag of the command to abort */
1988 memcpy(&c->Request.CDB[4], buff, 8);
1989 break;
1990 case 1: /* RESET message */
1991 c->Request.CDBLen = 12;
1992 c->Request.Type.Attribute = ATTR_SIMPLE;
1993 c->Request.Type.Direction = XFER_WRITE;
1994 c->Request.Timeout = 0;
1995 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1996 c->Request.CDB[0] = cmd; /* reset */
1997 c->Request.CDB[1] = 0x04; /* reset a LUN */
1998 break;
1999 case 3: /* No-Op message */
2000 c->Request.CDBLen = 1;
2001 c->Request.Type.Attribute = ATTR_SIMPLE;
2002 c->Request.Type.Direction = XFER_WRITE;
2003 c->Request.Timeout = 0;
2004 c->Request.CDB[0] = cmd;
2005 break;
2006 default:
2007 printk(KERN_WARNING
2008 "cciss%d: unknown message type %d\n", ctlr, cmd);
2009 return IO_ERROR;
2011 } else {
2012 printk(KERN_WARNING
2013 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
2014 return IO_ERROR;
2016 /* Fill in the scatter gather information */
2017 if (size > 0) {
2018 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
2019 buff, size,
2020 PCI_DMA_BIDIRECTIONAL);
2021 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
2022 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
2023 c->SG[0].Len = size;
2024 c->SG[0].Ext = 0; /* we are not chaining */
2026 return status;
2029 static int sendcmd_withirq(__u8 cmd,
2030 int ctlr,
2031 void *buff,
2032 size_t size,
2033 unsigned int use_unit_num,
2034 unsigned int log_unit, __u8 page_code, int cmd_type)
2036 ctlr_info_t *h = hba[ctlr];
2037 CommandList_struct *c;
2038 u64bit buff_dma_handle;
2039 unsigned long flags;
2040 int return_status;
2041 DECLARE_COMPLETION_ONSTACK(wait);
2043 if ((c = cmd_alloc(h, 0)) == NULL)
2044 return -ENOMEM;
2045 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2046 log_unit, page_code, NULL, cmd_type);
2047 if (return_status != IO_OK) {
2048 cmd_free(h, c, 0);
2049 return return_status;
2051 resend_cmd2:
2052 c->waiting = &wait;
2054 /* Put the request on the tail of the queue and send it */
2055 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
2056 addQ(&h->reqQ, c);
2057 h->Qdepth++;
2058 start_io(h);
2059 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
2061 wait_for_completion(&wait);
2063 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
2064 switch (c->err_info->CommandStatus) {
2065 case CMD_TARGET_STATUS:
2066 printk(KERN_WARNING "cciss: cmd %p has "
2067 " completed with errors\n", c);
2068 if (c->err_info->ScsiStatus) {
2069 printk(KERN_WARNING "cciss: cmd %p "
2070 "has SCSI Status = %x\n",
2071 c, c->err_info->ScsiStatus);
2074 break;
2075 case CMD_DATA_UNDERRUN:
2076 case CMD_DATA_OVERRUN:
2077 /* expected for inquire and report lun commands */
2078 break;
2079 case CMD_INVALID:
2080 printk(KERN_WARNING "cciss: Cmd %p is "
2081 "reported invalid\n", c);
2082 return_status = IO_ERROR;
2083 break;
2084 case CMD_PROTOCOL_ERR:
2085 printk(KERN_WARNING "cciss: cmd %p has "
2086 "protocol error \n", c);
2087 return_status = IO_ERROR;
2088 break;
2089 case CMD_HARDWARE_ERR:
2090 printk(KERN_WARNING "cciss: cmd %p had "
2091 " hardware error\n", c);
2092 return_status = IO_ERROR;
2093 break;
2094 case CMD_CONNECTION_LOST:
2095 printk(KERN_WARNING "cciss: cmd %p had "
2096 "connection lost\n", c);
2097 return_status = IO_ERROR;
2098 break;
2099 case CMD_ABORTED:
2100 printk(KERN_WARNING "cciss: cmd %p was "
2101 "aborted\n", c);
2102 return_status = IO_ERROR;
2103 break;
2104 case CMD_ABORT_FAILED:
2105 printk(KERN_WARNING "cciss: cmd %p reports "
2106 "abort failed\n", c);
2107 return_status = IO_ERROR;
2108 break;
2109 case CMD_UNSOLICITED_ABORT:
2110 printk(KERN_WARNING
2111 "cciss%d: unsolicited abort %p\n", ctlr, c);
2112 if (c->retry_count < MAX_CMD_RETRIES) {
2113 printk(KERN_WARNING
2114 "cciss%d: retrying %p\n", ctlr, c);
2115 c->retry_count++;
2116 /* erase the old error information */
2117 memset(c->err_info, 0,
2118 sizeof(ErrorInfo_struct));
2119 return_status = IO_OK;
2120 INIT_COMPLETION(wait);
2121 goto resend_cmd2;
2123 return_status = IO_ERROR;
2124 break;
2125 default:
2126 printk(KERN_WARNING "cciss: cmd %p returned "
2127 "unknown status %x\n", c,
2128 c->err_info->CommandStatus);
2129 return_status = IO_ERROR;
2132 /* unlock the buffers from DMA */
2133 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2134 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2135 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2136 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2137 cmd_free(h, c, 0);
2138 return return_status;
2141 static void cciss_geometry_inquiry(int ctlr, int logvol,
2142 int withirq, sector_t total_size,
2143 unsigned int block_size,
2144 InquiryData_struct *inq_buff,
2145 drive_info_struct *drv)
2147 int return_code;
2148 unsigned long t;
2150 memset(inq_buff, 0, sizeof(InquiryData_struct));
2151 if (withirq)
2152 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
2153 inq_buff, sizeof(*inq_buff), 1,
2154 logvol, 0xC1, TYPE_CMD);
2155 else
2156 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
2157 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
2158 TYPE_CMD);
2159 if (return_code == IO_OK) {
2160 if (inq_buff->data_byte[8] == 0xFF) {
2161 printk(KERN_WARNING
2162 "cciss: reading geometry failed, volume "
2163 "does not support reading geometry\n");
2164 drv->heads = 255;
2165 drv->sectors = 32; // Sectors per track
2166 drv->cylinders = total_size + 1;
2167 drv->raid_level = RAID_UNKNOWN;
2168 } else {
2169 drv->heads = inq_buff->data_byte[6];
2170 drv->sectors = inq_buff->data_byte[7];
2171 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
2172 drv->cylinders += inq_buff->data_byte[5];
2173 drv->raid_level = inq_buff->data_byte[8];
2175 drv->block_size = block_size;
2176 drv->nr_blocks = total_size + 1;
2177 t = drv->heads * drv->sectors;
2178 if (t > 1) {
2179 sector_t real_size = total_size + 1;
2180 unsigned long rem = sector_div(real_size, t);
2181 if (rem)
2182 real_size++;
2183 drv->cylinders = real_size;
2185 } else { /* Get geometry failed */
2186 printk(KERN_WARNING "cciss: reading geometry failed\n");
2188 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2189 drv->heads, drv->sectors, drv->cylinders);
2192 static void
2193 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2194 unsigned int *block_size)
2196 ReadCapdata_struct *buf;
2197 int return_code;
2199 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2200 if (!buf) {
2201 printk(KERN_WARNING "cciss: out of memory\n");
2202 return;
2205 if (withirq)
2206 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2207 ctlr, buf, sizeof(ReadCapdata_struct),
2208 1, logvol, 0, TYPE_CMD);
2209 else
2210 return_code = sendcmd(CCISS_READ_CAPACITY,
2211 ctlr, buf, sizeof(ReadCapdata_struct),
2212 1, logvol, 0, NULL, TYPE_CMD);
2213 if (return_code == IO_OK) {
2214 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2215 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2216 } else { /* read capacity command failed */
2217 printk(KERN_WARNING "cciss: read capacity failed\n");
2218 *total_size = 0;
2219 *block_size = BLOCK_SIZE;
2221 if (*total_size != 0)
2222 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2223 (unsigned long long)*total_size+1, *block_size);
2224 kfree(buf);
2227 static void
2228 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2230 ReadCapdata_struct_16 *buf;
2231 int return_code;
2233 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2234 if (!buf) {
2235 printk(KERN_WARNING "cciss: out of memory\n");
2236 return;
2239 if (withirq) {
2240 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2241 ctlr, buf, sizeof(ReadCapdata_struct_16),
2242 1, logvol, 0, TYPE_CMD);
2244 else {
2245 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2246 ctlr, buf, sizeof(ReadCapdata_struct_16),
2247 1, logvol, 0, NULL, TYPE_CMD);
2249 if (return_code == IO_OK) {
2250 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2251 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2252 } else { /* read capacity command failed */
2253 printk(KERN_WARNING "cciss: read capacity failed\n");
2254 *total_size = 0;
2255 *block_size = BLOCK_SIZE;
2257 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2258 (unsigned long long)*total_size+1, *block_size);
2259 kfree(buf);
2262 static int cciss_revalidate(struct gendisk *disk)
2264 ctlr_info_t *h = get_host(disk);
2265 drive_info_struct *drv = get_drv(disk);
2266 int logvol;
2267 int FOUND = 0;
2268 unsigned int block_size;
2269 sector_t total_size;
2270 InquiryData_struct *inq_buff = NULL;
2272 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2273 if (h->drv[logvol].LunID == drv->LunID) {
2274 FOUND = 1;
2275 break;
2279 if (!FOUND)
2280 return 1;
2282 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2283 if (inq_buff == NULL) {
2284 printk(KERN_WARNING "cciss: out of memory\n");
2285 return 1;
2287 if (h->cciss_read == CCISS_READ_10) {
2288 cciss_read_capacity(h->ctlr, logvol, 1,
2289 &total_size, &block_size);
2290 } else {
2291 cciss_read_capacity_16(h->ctlr, logvol, 1,
2292 &total_size, &block_size);
2294 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2295 inq_buff, drv);
2297 blk_queue_hardsect_size(drv->queue, drv->block_size);
2298 set_capacity(disk, drv->nr_blocks);
2300 kfree(inq_buff);
2301 return 0;
2305 * Wait polling for a command to complete.
2306 * The memory mapped FIFO is polled for the completion.
2307 * Used only at init time, interrupts from the HBA are disabled.
2309 static unsigned long pollcomplete(int ctlr)
2311 unsigned long done;
2312 int i;
2314 /* Wait (up to 20 seconds) for a command to complete */
2316 for (i = 20 * HZ; i > 0; i--) {
2317 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2318 if (done == FIFO_EMPTY)
2319 schedule_timeout_uninterruptible(1);
2320 else
2321 return done;
2323 /* Invalid address to tell caller we ran out of time */
2324 return 1;
2327 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2329 /* We get in here if sendcmd() is polling for completions
2330 and gets some command back that it wasn't expecting --
2331 something other than that which it just sent down.
2332 Ordinarily, that shouldn't happen, but it can happen when
2333 the scsi tape stuff gets into error handling mode, and
2334 starts using sendcmd() to try to abort commands and
2335 reset tape drives. In that case, sendcmd may pick up
2336 completions of commands that were sent to logical drives
2337 through the block i/o system, or cciss ioctls completing, etc.
2338 In that case, we need to save those completions for later
2339 processing by the interrupt handler.
2342 #ifdef CONFIG_CISS_SCSI_TAPE
2343 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2345 /* If it's not the scsi tape stuff doing error handling, (abort */
2346 /* or reset) then we don't expect anything weird. */
2347 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2348 #endif
2349 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2350 "Invalid command list address returned! (%lx)\n",
2351 ctlr, complete);
2352 /* not much we can do. */
2353 #ifdef CONFIG_CISS_SCSI_TAPE
2354 return 1;
2357 /* We've sent down an abort or reset, but something else
2358 has completed */
2359 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2360 /* Uh oh. No room to save it for later... */
2361 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2362 "reject list overflow, command lost!\n", ctlr);
2363 return 1;
2365 /* Save it for later */
2366 srl->complete[srl->ncompletions] = complete;
2367 srl->ncompletions++;
2368 #endif
2369 return 0;
2373 * Send a command to the controller, and wait for it to complete.
2374 * Only used at init time.
2376 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2377 1: address logical volume log_unit,
2378 2: periph device address is scsi3addr */
2379 unsigned int log_unit,
2380 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2382 CommandList_struct *c;
2383 int i;
2384 unsigned long complete;
2385 ctlr_info_t *info_p = hba[ctlr];
2386 u64bit buff_dma_handle;
2387 int status, done = 0;
2389 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2390 printk(KERN_WARNING "cciss: unable to get memory");
2391 return IO_ERROR;
2393 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2394 log_unit, page_code, scsi3addr, cmd_type);
2395 if (status != IO_OK) {
2396 cmd_free(info_p, c, 1);
2397 return status;
2399 resend_cmd1:
2401 * Disable interrupt
2403 #ifdef CCISS_DEBUG
2404 printk(KERN_DEBUG "cciss: turning intr off\n");
2405 #endif /* CCISS_DEBUG */
2406 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2408 /* Make sure there is room in the command FIFO */
2409 /* Actually it should be completely empty at this time */
2410 /* unless we are in here doing error handling for the scsi */
2411 /* tape side of the driver. */
2412 for (i = 200000; i > 0; i--) {
2413 /* if fifo isn't full go */
2414 if (!(info_p->access.fifo_full(info_p))) {
2416 break;
2418 udelay(10);
2419 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2420 " waiting!\n", ctlr);
2423 * Send the cmd
2425 info_p->access.submit_command(info_p, c);
2426 done = 0;
2427 do {
2428 complete = pollcomplete(ctlr);
2430 #ifdef CCISS_DEBUG
2431 printk(KERN_DEBUG "cciss: command completed\n");
2432 #endif /* CCISS_DEBUG */
2434 if (complete == 1) {
2435 printk(KERN_WARNING
2436 "cciss cciss%d: SendCmd Timeout out, "
2437 "No command list address returned!\n", ctlr);
2438 status = IO_ERROR;
2439 done = 1;
2440 break;
2443 /* This will need to change for direct lookup completions */
2444 if ((complete & CISS_ERROR_BIT)
2445 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2446 /* if data overrun or underun on Report command
2447 ignore it
2449 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2450 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2451 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2452 ((c->err_info->CommandStatus ==
2453 CMD_DATA_OVERRUN) ||
2454 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2455 )) {
2456 complete = c->busaddr;
2457 } else {
2458 if (c->err_info->CommandStatus ==
2459 CMD_UNSOLICITED_ABORT) {
2460 printk(KERN_WARNING "cciss%d: "
2461 "unsolicited abort %p\n",
2462 ctlr, c);
2463 if (c->retry_count < MAX_CMD_RETRIES) {
2464 printk(KERN_WARNING
2465 "cciss%d: retrying %p\n",
2466 ctlr, c);
2467 c->retry_count++;
2468 /* erase the old error */
2469 /* information */
2470 memset(c->err_info, 0,
2471 sizeof
2472 (ErrorInfo_struct));
2473 goto resend_cmd1;
2474 } else {
2475 printk(KERN_WARNING
2476 "cciss%d: retried %p too "
2477 "many times\n", ctlr, c);
2478 status = IO_ERROR;
2479 goto cleanup1;
2481 } else if (c->err_info->CommandStatus ==
2482 CMD_UNABORTABLE) {
2483 printk(KERN_WARNING
2484 "cciss%d: command could not be aborted.\n",
2485 ctlr);
2486 status = IO_ERROR;
2487 goto cleanup1;
2489 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2490 " Error %x \n", ctlr,
2491 c->err_info->CommandStatus);
2492 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2493 " offensive info\n"
2494 " size %x\n num %x value %x\n",
2495 ctlr,
2496 c->err_info->MoreErrInfo.Invalid_Cmd.
2497 offense_size,
2498 c->err_info->MoreErrInfo.Invalid_Cmd.
2499 offense_num,
2500 c->err_info->MoreErrInfo.Invalid_Cmd.
2501 offense_value);
2502 status = IO_ERROR;
2503 goto cleanup1;
2506 /* This will need changing for direct lookup completions */
2507 if (complete != c->busaddr) {
2508 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2509 BUG(); /* we are pretty much hosed if we get here. */
2511 continue;
2512 } else
2513 done = 1;
2514 } while (!done);
2516 cleanup1:
2517 /* unlock the data buffer from DMA */
2518 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2519 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2520 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2521 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2522 #ifdef CONFIG_CISS_SCSI_TAPE
2523 /* if we saved some commands for later, process them now. */
2524 if (info_p->scsi_rejects.ncompletions > 0)
2525 do_cciss_intr(0, info_p);
2526 #endif
2527 cmd_free(info_p, c, 1);
2528 return status;
2532 * Map (physical) PCI mem into (virtual) kernel space
2534 static void __iomem *remap_pci_mem(ulong base, ulong size)
2536 ulong page_base = ((ulong) base) & PAGE_MASK;
2537 ulong page_offs = ((ulong) base) - page_base;
2538 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2540 return page_remapped ? (page_remapped + page_offs) : NULL;
2544 * Takes jobs of the Q and sends them to the hardware, then puts it on
2545 * the Q to wait for completion.
2547 static void start_io(ctlr_info_t *h)
2549 CommandList_struct *c;
2551 while ((c = h->reqQ) != NULL) {
2552 /* can't do anything if fifo is full */
2553 if ((h->access.fifo_full(h))) {
2554 printk(KERN_WARNING "cciss: fifo full\n");
2555 break;
2558 /* Get the first entry from the Request Q */
2559 removeQ(&(h->reqQ), c);
2560 h->Qdepth--;
2562 /* Tell the controller execute command */
2563 h->access.submit_command(h, c);
2565 /* Put job onto the completed Q */
2566 addQ(&(h->cmpQ), c);
2570 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2571 /* Zeros out the error record and then resends the command back */
2572 /* to the controller */
2573 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2575 /* erase the old error information */
2576 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2578 /* add it to software queue and then send it to the controller */
2579 addQ(&(h->reqQ), c);
2580 h->Qdepth++;
2581 if (h->Qdepth > h->maxQsinceinit)
2582 h->maxQsinceinit = h->Qdepth;
2584 start_io(h);
2587 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2588 unsigned int msg_byte, unsigned int host_byte,
2589 unsigned int driver_byte)
2591 /* inverse of macros in scsi.h */
2592 return (scsi_status_byte & 0xff) |
2593 ((msg_byte & 0xff) << 8) |
2594 ((host_byte & 0xff) << 16) |
2595 ((driver_byte & 0xff) << 24);
2598 static inline int evaluate_target_status(CommandList_struct *cmd)
2600 unsigned char sense_key;
2601 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2602 int error_value;
2604 /* If we get in here, it means we got "target status", that is, scsi status */
2605 status_byte = cmd->err_info->ScsiStatus;
2606 driver_byte = DRIVER_OK;
2607 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2609 if (blk_pc_request(cmd->rq))
2610 host_byte = DID_PASSTHROUGH;
2611 else
2612 host_byte = DID_OK;
2614 error_value = make_status_bytes(status_byte, msg_byte,
2615 host_byte, driver_byte);
2617 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2618 if (!blk_pc_request(cmd->rq))
2619 printk(KERN_WARNING "cciss: cmd %p "
2620 "has SCSI Status 0x%x\n",
2621 cmd, cmd->err_info->ScsiStatus);
2622 return error_value;
2625 /* check the sense key */
2626 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2627 /* no status or recovered error */
2628 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2629 error_value = 0;
2631 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2632 if (error_value != 0)
2633 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2634 " sense key = 0x%x\n", cmd, sense_key);
2635 return error_value;
2638 /* SG_IO or similar, copy sense data back */
2639 if (cmd->rq->sense) {
2640 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2641 cmd->rq->sense_len = cmd->err_info->SenseLen;
2642 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2643 cmd->rq->sense_len);
2644 } else
2645 cmd->rq->sense_len = 0;
2647 return error_value;
2650 /* checks the status of the job and calls complete buffers to mark all
2651 * buffers for the completed job. Note that this function does not need
2652 * to hold the hba/queue lock.
2654 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2655 int timeout)
2657 int retry_cmd = 0;
2658 struct request *rq = cmd->rq;
2660 rq->errors = 0;
2662 if (timeout)
2663 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2665 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2666 goto after_error_processing;
2668 switch (cmd->err_info->CommandStatus) {
2669 case CMD_TARGET_STATUS:
2670 rq->errors = evaluate_target_status(cmd);
2671 break;
2672 case CMD_DATA_UNDERRUN:
2673 if (blk_fs_request(cmd->rq)) {
2674 printk(KERN_WARNING "cciss: cmd %p has"
2675 " completed with data underrun "
2676 "reported\n", cmd);
2677 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2679 break;
2680 case CMD_DATA_OVERRUN:
2681 if (blk_fs_request(cmd->rq))
2682 printk(KERN_WARNING "cciss: cmd %p has"
2683 " completed with data overrun "
2684 "reported\n", cmd);
2685 break;
2686 case CMD_INVALID:
2687 printk(KERN_WARNING "cciss: cmd %p is "
2688 "reported invalid\n", cmd);
2689 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2690 cmd->err_info->CommandStatus, DRIVER_OK,
2691 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2692 break;
2693 case CMD_PROTOCOL_ERR:
2694 printk(KERN_WARNING "cciss: cmd %p has "
2695 "protocol error \n", cmd);
2696 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2697 cmd->err_info->CommandStatus, DRIVER_OK,
2698 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2699 break;
2700 case CMD_HARDWARE_ERR:
2701 printk(KERN_WARNING "cciss: cmd %p had "
2702 " hardware error\n", cmd);
2703 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2704 cmd->err_info->CommandStatus, DRIVER_OK,
2705 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2706 break;
2707 case CMD_CONNECTION_LOST:
2708 printk(KERN_WARNING "cciss: cmd %p had "
2709 "connection lost\n", cmd);
2710 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2711 cmd->err_info->CommandStatus, DRIVER_OK,
2712 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2713 break;
2714 case CMD_ABORTED:
2715 printk(KERN_WARNING "cciss: cmd %p was "
2716 "aborted\n", cmd);
2717 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2718 cmd->err_info->CommandStatus, DRIVER_OK,
2719 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2720 break;
2721 case CMD_ABORT_FAILED:
2722 printk(KERN_WARNING "cciss: cmd %p reports "
2723 "abort failed\n", cmd);
2724 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2725 cmd->err_info->CommandStatus, DRIVER_OK,
2726 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2727 break;
2728 case CMD_UNSOLICITED_ABORT:
2729 printk(KERN_WARNING "cciss%d: unsolicited "
2730 "abort %p\n", h->ctlr, cmd);
2731 if (cmd->retry_count < MAX_CMD_RETRIES) {
2732 retry_cmd = 1;
2733 printk(KERN_WARNING
2734 "cciss%d: retrying %p\n", h->ctlr, cmd);
2735 cmd->retry_count++;
2736 } else
2737 printk(KERN_WARNING
2738 "cciss%d: %p retried too "
2739 "many times\n", h->ctlr, cmd);
2740 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2741 cmd->err_info->CommandStatus, DRIVER_OK,
2742 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2743 break;
2744 case CMD_TIMEOUT:
2745 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2746 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2747 cmd->err_info->CommandStatus, DRIVER_OK,
2748 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2749 break;
2750 default:
2751 printk(KERN_WARNING "cciss: cmd %p returned "
2752 "unknown status %x\n", cmd,
2753 cmd->err_info->CommandStatus);
2754 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2755 cmd->err_info->CommandStatus, DRIVER_OK,
2756 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2759 after_error_processing:
2761 /* We need to return this command */
2762 if (retry_cmd) {
2763 resend_cciss_cmd(h, cmd);
2764 return;
2766 cmd->rq->completion_data = cmd;
2767 blk_complete_request(cmd->rq);
2771 * Get a request and submit it to the controller.
2773 static void do_cciss_request(struct request_queue *q)
2775 ctlr_info_t *h = q->queuedata;
2776 CommandList_struct *c;
2777 sector_t start_blk;
2778 int seg;
2779 struct request *creq;
2780 u64bit temp64;
2781 struct scatterlist tmp_sg[MAXSGENTRIES];
2782 drive_info_struct *drv;
2783 int i, dir;
2785 /* We call start_io here in case there is a command waiting on the
2786 * queue that has not been sent.
2788 if (blk_queue_plugged(q))
2789 goto startio;
2791 queue:
2792 creq = elv_next_request(q);
2793 if (!creq)
2794 goto startio;
2796 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2798 if ((c = cmd_alloc(h, 1)) == NULL)
2799 goto full;
2801 blkdev_dequeue_request(creq);
2803 spin_unlock_irq(q->queue_lock);
2805 c->cmd_type = CMD_RWREQ;
2806 c->rq = creq;
2808 /* fill in the request */
2809 drv = creq->rq_disk->private_data;
2810 c->Header.ReplyQueue = 0; // unused in simple mode
2811 /* got command from pool, so use the command block index instead */
2812 /* for direct lookups. */
2813 /* The first 2 bits are reserved for controller error reporting. */
2814 c->Header.Tag.lower = (c->cmdindex << 3);
2815 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2816 c->Header.LUN.LogDev.VolId = drv->LunID;
2817 c->Header.LUN.LogDev.Mode = 1;
2818 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2819 c->Request.Type.Type = TYPE_CMD; // It is a command.
2820 c->Request.Type.Attribute = ATTR_SIMPLE;
2821 c->Request.Type.Direction =
2822 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2823 c->Request.Timeout = 0; // Don't time out
2824 c->Request.CDB[0] =
2825 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2826 start_blk = creq->sector;
2827 #ifdef CCISS_DEBUG
2828 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2829 (int)creq->nr_sectors);
2830 #endif /* CCISS_DEBUG */
2832 sg_init_table(tmp_sg, MAXSGENTRIES);
2833 seg = blk_rq_map_sg(q, creq, tmp_sg);
2835 /* get the DMA records for the setup */
2836 if (c->Request.Type.Direction == XFER_READ)
2837 dir = PCI_DMA_FROMDEVICE;
2838 else
2839 dir = PCI_DMA_TODEVICE;
2841 for (i = 0; i < seg; i++) {
2842 c->SG[i].Len = tmp_sg[i].length;
2843 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2844 tmp_sg[i].offset,
2845 tmp_sg[i].length, dir);
2846 c->SG[i].Addr.lower = temp64.val32.lower;
2847 c->SG[i].Addr.upper = temp64.val32.upper;
2848 c->SG[i].Ext = 0; // we are not chaining
2850 /* track how many SG entries we are using */
2851 if (seg > h->maxSG)
2852 h->maxSG = seg;
2854 #ifdef CCISS_DEBUG
2855 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
2856 creq->nr_sectors, seg);
2857 #endif /* CCISS_DEBUG */
2859 c->Header.SGList = c->Header.SGTotal = seg;
2860 if (likely(blk_fs_request(creq))) {
2861 if(h->cciss_read == CCISS_READ_10) {
2862 c->Request.CDB[1] = 0;
2863 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2864 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2865 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2866 c->Request.CDB[5] = start_blk & 0xff;
2867 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2868 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2869 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2870 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2871 } else {
2872 u32 upper32 = upper_32_bits(start_blk);
2874 c->Request.CDBLen = 16;
2875 c->Request.CDB[1]= 0;
2876 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2877 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2878 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2879 c->Request.CDB[5]= upper32 & 0xff;
2880 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2881 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2882 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2883 c->Request.CDB[9]= start_blk & 0xff;
2884 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2885 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2886 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2887 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2888 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2890 } else if (blk_pc_request(creq)) {
2891 c->Request.CDBLen = creq->cmd_len;
2892 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2893 } else {
2894 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2895 BUG();
2898 spin_lock_irq(q->queue_lock);
2900 addQ(&(h->reqQ), c);
2901 h->Qdepth++;
2902 if (h->Qdepth > h->maxQsinceinit)
2903 h->maxQsinceinit = h->Qdepth;
2905 goto queue;
2906 full:
2907 blk_stop_queue(q);
2908 startio:
2909 /* We will already have the driver lock here so not need
2910 * to lock it.
2912 start_io(h);
2915 static inline unsigned long get_next_completion(ctlr_info_t *h)
2917 #ifdef CONFIG_CISS_SCSI_TAPE
2918 /* Any rejects from sendcmd() lying around? Process them first */
2919 if (h->scsi_rejects.ncompletions == 0)
2920 return h->access.command_completed(h);
2921 else {
2922 struct sendcmd_reject_list *srl;
2923 int n;
2924 srl = &h->scsi_rejects;
2925 n = --srl->ncompletions;
2926 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2927 printk("p");
2928 return srl->complete[n];
2930 #else
2931 return h->access.command_completed(h);
2932 #endif
2935 static inline int interrupt_pending(ctlr_info_t *h)
2937 #ifdef CONFIG_CISS_SCSI_TAPE
2938 return (h->access.intr_pending(h)
2939 || (h->scsi_rejects.ncompletions > 0));
2940 #else
2941 return h->access.intr_pending(h);
2942 #endif
2945 static inline long interrupt_not_for_us(ctlr_info_t *h)
2947 #ifdef CONFIG_CISS_SCSI_TAPE
2948 return (((h->access.intr_pending(h) == 0) ||
2949 (h->interrupts_enabled == 0))
2950 && (h->scsi_rejects.ncompletions == 0));
2951 #else
2952 return (((h->access.intr_pending(h) == 0) ||
2953 (h->interrupts_enabled == 0)));
2954 #endif
2957 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2959 ctlr_info_t *h = dev_id;
2960 CommandList_struct *c;
2961 unsigned long flags;
2962 __u32 a, a1, a2;
2964 if (interrupt_not_for_us(h))
2965 return IRQ_NONE;
2967 * If there are completed commands in the completion queue,
2968 * we had better do something about it.
2970 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2971 while (interrupt_pending(h)) {
2972 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2973 a1 = a;
2974 if ((a & 0x04)) {
2975 a2 = (a >> 3);
2976 if (a2 >= h->nr_cmds) {
2977 printk(KERN_WARNING
2978 "cciss: controller cciss%d failed, stopping.\n",
2979 h->ctlr);
2980 fail_all_cmds(h->ctlr);
2981 return IRQ_HANDLED;
2984 c = h->cmd_pool + a2;
2985 a = c->busaddr;
2987 } else {
2988 a &= ~3;
2989 if ((c = h->cmpQ) == NULL) {
2990 printk(KERN_WARNING
2991 "cciss: Completion of %08x ignored\n",
2992 a1);
2993 continue;
2995 while (c->busaddr != a) {
2996 c = c->next;
2997 if (c == h->cmpQ)
2998 break;
3002 * If we've found the command, take it off the
3003 * completion Q and free it
3005 if (c->busaddr == a) {
3006 removeQ(&h->cmpQ, c);
3007 if (c->cmd_type == CMD_RWREQ) {
3008 complete_command(h, c, 0);
3009 } else if (c->cmd_type == CMD_IOCTL_PEND) {
3010 complete(c->waiting);
3012 # ifdef CONFIG_CISS_SCSI_TAPE
3013 else if (c->cmd_type == CMD_SCSI)
3014 complete_scsi_command(c, 0, a1);
3015 # endif
3016 continue;
3021 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
3022 return IRQ_HANDLED;
3026 * We cannot read the structure directly, for portability we must use
3027 * the io functions.
3028 * This is for debug only.
3030 #ifdef CCISS_DEBUG
3031 static void print_cfg_table(CfgTable_struct *tb)
3033 int i;
3034 char temp_name[17];
3036 printk("Controller Configuration information\n");
3037 printk("------------------------------------\n");
3038 for (i = 0; i < 4; i++)
3039 temp_name[i] = readb(&(tb->Signature[i]));
3040 temp_name[4] = '\0';
3041 printk(" Signature = %s\n", temp_name);
3042 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
3043 printk(" Transport methods supported = 0x%x\n",
3044 readl(&(tb->TransportSupport)));
3045 printk(" Transport methods active = 0x%x\n",
3046 readl(&(tb->TransportActive)));
3047 printk(" Requested transport Method = 0x%x\n",
3048 readl(&(tb->HostWrite.TransportRequest)));
3049 printk(" Coalesce Interrupt Delay = 0x%x\n",
3050 readl(&(tb->HostWrite.CoalIntDelay)));
3051 printk(" Coalesce Interrupt Count = 0x%x\n",
3052 readl(&(tb->HostWrite.CoalIntCount)));
3053 printk(" Max outstanding commands = 0x%d\n",
3054 readl(&(tb->CmdsOutMax)));
3055 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3056 for (i = 0; i < 16; i++)
3057 temp_name[i] = readb(&(tb->ServerName[i]));
3058 temp_name[16] = '\0';
3059 printk(" Server Name = %s\n", temp_name);
3060 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
3062 #endif /* CCISS_DEBUG */
3064 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3066 int i, offset, mem_type, bar_type;
3067 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3068 return 0;
3069 offset = 0;
3070 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3071 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3072 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3073 offset += 4;
3074 else {
3075 mem_type = pci_resource_flags(pdev, i) &
3076 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3077 switch (mem_type) {
3078 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3079 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3080 offset += 4; /* 32 bit */
3081 break;
3082 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3083 offset += 8;
3084 break;
3085 default: /* reserved in PCI 2.2 */
3086 printk(KERN_WARNING
3087 "Base address is invalid\n");
3088 return -1;
3089 break;
3092 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3093 return i + 1;
3095 return -1;
3098 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3099 * controllers that are capable. If not, we use IO-APIC mode.
3102 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
3103 struct pci_dev *pdev, __u32 board_id)
3105 #ifdef CONFIG_PCI_MSI
3106 int err;
3107 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
3108 {0, 2}, {0, 3}
3111 /* Some boards advertise MSI but don't really support it */
3112 if ((board_id == 0x40700E11) ||
3113 (board_id == 0x40800E11) ||
3114 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3115 goto default_int_mode;
3117 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3118 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
3119 if (!err) {
3120 c->intr[0] = cciss_msix_entries[0].vector;
3121 c->intr[1] = cciss_msix_entries[1].vector;
3122 c->intr[2] = cciss_msix_entries[2].vector;
3123 c->intr[3] = cciss_msix_entries[3].vector;
3124 c->msix_vector = 1;
3125 return;
3127 if (err > 0) {
3128 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
3129 "available\n", err);
3130 goto default_int_mode;
3131 } else {
3132 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
3133 err);
3134 goto default_int_mode;
3137 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3138 if (!pci_enable_msi(pdev)) {
3139 c->msi_vector = 1;
3140 } else {
3141 printk(KERN_WARNING "cciss: MSI init failed\n");
3144 default_int_mode:
3145 #endif /* CONFIG_PCI_MSI */
3146 /* if we get here we're going to use the default interrupt mode */
3147 c->intr[SIMPLE_MODE_INT] = pdev->irq;
3148 return;
3151 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3153 ushort subsystem_vendor_id, subsystem_device_id, command;
3154 __u32 board_id, scratchpad = 0;
3155 __u64 cfg_offset;
3156 __u32 cfg_base_addr;
3157 __u64 cfg_base_addr_index;
3158 int i, err;
3160 /* check to see if controller has been disabled */
3161 /* BEFORE trying to enable it */
3162 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3163 if (!(command & 0x02)) {
3164 printk(KERN_WARNING
3165 "cciss: controller appears to be disabled\n");
3166 return -ENODEV;
3169 err = pci_enable_device(pdev);
3170 if (err) {
3171 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
3172 return err;
3175 err = pci_request_regions(pdev, "cciss");
3176 if (err) {
3177 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3178 "aborting\n");
3179 return err;
3182 subsystem_vendor_id = pdev->subsystem_vendor;
3183 subsystem_device_id = pdev->subsystem_device;
3184 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3185 subsystem_vendor_id);
3187 #ifdef CCISS_DEBUG
3188 printk("command = %x\n", command);
3189 printk("irq = %x\n", pdev->irq);
3190 printk("board_id = %x\n", board_id);
3191 #endif /* CCISS_DEBUG */
3193 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3194 * else we use the IO-APIC interrupt assigned to us by system ROM.
3196 cciss_interrupt_mode(c, pdev, board_id);
3199 * Memory base addr is first addr , the second points to the config
3200 * table
3203 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3204 #ifdef CCISS_DEBUG
3205 printk("address 0 = %lx\n", c->paddr);
3206 #endif /* CCISS_DEBUG */
3207 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3209 /* Wait for the board to become ready. (PCI hotplug needs this.)
3210 * We poll for up to 120 secs, once per 100ms. */
3211 for (i = 0; i < 1200; i++) {
3212 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3213 if (scratchpad == CCISS_FIRMWARE_READY)
3214 break;
3215 set_current_state(TASK_INTERRUPTIBLE);
3216 schedule_timeout(HZ / 10); /* wait 100ms */
3218 if (scratchpad != CCISS_FIRMWARE_READY) {
3219 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3220 err = -ENODEV;
3221 goto err_out_free_res;
3224 /* get the address index number */
3225 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3226 cfg_base_addr &= (__u32) 0x0000ffff;
3227 #ifdef CCISS_DEBUG
3228 printk("cfg base address = %x\n", cfg_base_addr);
3229 #endif /* CCISS_DEBUG */
3230 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3231 #ifdef CCISS_DEBUG
3232 printk("cfg base address index = %llx\n",
3233 (unsigned long long)cfg_base_addr_index);
3234 #endif /* CCISS_DEBUG */
3235 if (cfg_base_addr_index == -1) {
3236 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3237 err = -ENODEV;
3238 goto err_out_free_res;
3241 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3242 #ifdef CCISS_DEBUG
3243 printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
3244 #endif /* CCISS_DEBUG */
3245 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3246 cfg_base_addr_index) +
3247 cfg_offset, sizeof(CfgTable_struct));
3248 c->board_id = board_id;
3250 #ifdef CCISS_DEBUG
3251 print_cfg_table(c->cfgtable);
3252 #endif /* CCISS_DEBUG */
3254 /* Some controllers support Zero Memory Raid (ZMR).
3255 * When configured in ZMR mode the number of supported
3256 * commands drops to 64. So instead of just setting an
3257 * arbitrary value we make the driver a little smarter.
3258 * We read the config table to tell us how many commands
3259 * are supported on the controller then subtract 4 to
3260 * leave a little room for ioctl calls.
3262 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3263 for (i = 0; i < ARRAY_SIZE(products); i++) {
3264 if (board_id == products[i].board_id) {
3265 c->product_name = products[i].product_name;
3266 c->access = *(products[i].access);
3267 c->nr_cmds = c->max_commands - 4;
3268 break;
3271 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3272 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3273 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3274 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3275 printk("Does not appear to be a valid CISS config table\n");
3276 err = -ENODEV;
3277 goto err_out_free_res;
3279 /* We didn't find the controller in our list. We know the
3280 * signature is valid. If it's an HP device let's try to
3281 * bind to the device and fire it up. Otherwise we bail.
3283 if (i == ARRAY_SIZE(products)) {
3284 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3285 c->product_name = products[i-1].product_name;
3286 c->access = *(products[i-1].access);
3287 c->nr_cmds = c->max_commands - 4;
3288 printk(KERN_WARNING "cciss: This is an unknown "
3289 "Smart Array controller.\n"
3290 "cciss: Please update to the latest driver "
3291 "available from www.hp.com.\n");
3292 } else {
3293 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3294 " to access the Smart Array controller %08lx\n"
3295 , (unsigned long)board_id);
3296 err = -ENODEV;
3297 goto err_out_free_res;
3300 #ifdef CONFIG_X86
3302 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3303 __u32 prefetch;
3304 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3305 prefetch |= 0x100;
3306 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3308 #endif
3310 /* Disabling DMA prefetch and refetch for the P600.
3311 * An ASIC bug may result in accesses to invalid memory addresses.
3312 * We've disabled prefetch for some time now. Testing with XEN
3313 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3315 if(board_id == 0x3225103C) {
3316 __u32 dma_prefetch;
3317 __u32 dma_refetch;
3318 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3319 dma_prefetch |= 0x8000;
3320 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3321 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3322 dma_refetch |= 0x1;
3323 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3326 #ifdef CCISS_DEBUG
3327 printk("Trying to put board into Simple mode\n");
3328 #endif /* CCISS_DEBUG */
3329 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3330 /* Update the field, and then ring the doorbell */
3331 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3332 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3334 /* under certain very rare conditions, this can take awhile.
3335 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3336 * as we enter this code.) */
3337 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3338 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3339 break;
3340 /* delay and try again */
3341 set_current_state(TASK_INTERRUPTIBLE);
3342 schedule_timeout(10);
3345 #ifdef CCISS_DEBUG
3346 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3347 readl(c->vaddr + SA5_DOORBELL));
3348 #endif /* CCISS_DEBUG */
3349 #ifdef CCISS_DEBUG
3350 print_cfg_table(c->cfgtable);
3351 #endif /* CCISS_DEBUG */
3353 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3354 printk(KERN_WARNING "cciss: unable to get board into"
3355 " simple mode\n");
3356 err = -ENODEV;
3357 goto err_out_free_res;
3359 return 0;
3361 err_out_free_res:
3363 * Deliberately omit pci_disable_device(): it does something nasty to
3364 * Smart Array controllers that pci_enable_device does not undo
3366 pci_release_regions(pdev);
3367 return err;
3370 /* Function to find the first free pointer into our hba[] array
3371 * Returns -1 if no free entries are left.
3373 static int alloc_cciss_hba(void)
3375 int i;
3377 for (i = 0; i < MAX_CTLR; i++) {
3378 if (!hba[i]) {
3379 ctlr_info_t *p;
3381 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3382 if (!p)
3383 goto Enomem;
3384 hba[i] = p;
3385 return i;
3388 printk(KERN_WARNING "cciss: This driver supports a maximum"
3389 " of %d controllers.\n", MAX_CTLR);
3390 return -1;
3391 Enomem:
3392 printk(KERN_ERR "cciss: out of memory.\n");
3393 return -1;
3396 static void free_hba(int i)
3398 ctlr_info_t *p = hba[i];
3399 int n;
3401 hba[i] = NULL;
3402 for (n = 0; n < CISS_MAX_LUN; n++)
3403 put_disk(p->gendisk[n]);
3404 kfree(p);
3408 * This is it. Find all the controllers and register them. I really hate
3409 * stealing all these major device numbers.
3410 * returns the number of block devices registered.
3412 static int __devinit cciss_init_one(struct pci_dev *pdev,
3413 const struct pci_device_id *ent)
3415 int i;
3416 int j = 0;
3417 int rc;
3418 int dac, return_code;
3419 InquiryData_struct *inq_buff = NULL;
3421 i = alloc_cciss_hba();
3422 if (i < 0)
3423 return -1;
3425 hba[i]->busy_initializing = 1;
3427 if (cciss_pci_init(hba[i], pdev) != 0)
3428 goto clean1;
3430 sprintf(hba[i]->devname, "cciss%d", i);
3431 hba[i]->ctlr = i;
3432 hba[i]->pdev = pdev;
3434 /* configure PCI DMA stuff */
3435 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3436 dac = 1;
3437 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3438 dac = 0;
3439 else {
3440 printk(KERN_ERR "cciss: no suitable DMA available\n");
3441 goto clean1;
3445 * register with the major number, or get a dynamic major number
3446 * by passing 0 as argument. This is done for greater than
3447 * 8 controller support.
3449 if (i < MAX_CTLR_ORIG)
3450 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3451 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3452 if (rc == -EBUSY || rc == -EINVAL) {
3453 printk(KERN_ERR
3454 "cciss: Unable to get major number %d for %s "
3455 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3456 goto clean1;
3457 } else {
3458 if (i >= MAX_CTLR_ORIG)
3459 hba[i]->major = rc;
3462 /* make sure the board interrupts are off */
3463 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3464 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3465 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3466 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3467 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3468 goto clean2;
3471 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3472 hba[i]->devname, pdev->device, pci_name(pdev),
3473 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3475 hba[i]->cmd_pool_bits =
3476 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3477 * sizeof(unsigned long), GFP_KERNEL);
3478 hba[i]->cmd_pool = (CommandList_struct *)
3479 pci_alloc_consistent(hba[i]->pdev,
3480 hba[i]->nr_cmds * sizeof(CommandList_struct),
3481 &(hba[i]->cmd_pool_dhandle));
3482 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3483 pci_alloc_consistent(hba[i]->pdev,
3484 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3485 &(hba[i]->errinfo_pool_dhandle));
3486 if ((hba[i]->cmd_pool_bits == NULL)
3487 || (hba[i]->cmd_pool == NULL)
3488 || (hba[i]->errinfo_pool == NULL)) {
3489 printk(KERN_ERR "cciss: out of memory");
3490 goto clean4;
3492 #ifdef CONFIG_CISS_SCSI_TAPE
3493 hba[i]->scsi_rejects.complete =
3494 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3495 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3496 if (hba[i]->scsi_rejects.complete == NULL) {
3497 printk(KERN_ERR "cciss: out of memory");
3498 goto clean4;
3500 #endif
3501 spin_lock_init(&hba[i]->lock);
3503 /* Initialize the pdev driver private data.
3504 have it point to hba[i]. */
3505 pci_set_drvdata(pdev, hba[i]);
3506 /* command and error info recs zeroed out before
3507 they are used */
3508 memset(hba[i]->cmd_pool_bits, 0,
3509 DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3510 * sizeof(unsigned long));
3512 hba[i]->num_luns = 0;
3513 hba[i]->highest_lun = -1;
3514 for (j = 0; j < CISS_MAX_LUN; j++) {
3515 hba[i]->drv[j].raid_level = -1;
3516 hba[i]->drv[j].queue = NULL;
3517 hba[i]->gendisk[j] = NULL;
3520 cciss_scsi_setup(i);
3522 /* Turn the interrupts on so we can service requests */
3523 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3525 /* Get the firmware version */
3526 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3527 if (inq_buff == NULL) {
3528 printk(KERN_ERR "cciss: out of memory\n");
3529 goto clean4;
3532 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
3533 sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
3534 if (return_code == IO_OK) {
3535 hba[i]->firm_ver[0] = inq_buff->data_byte[32];
3536 hba[i]->firm_ver[1] = inq_buff->data_byte[33];
3537 hba[i]->firm_ver[2] = inq_buff->data_byte[34];
3538 hba[i]->firm_ver[3] = inq_buff->data_byte[35];
3539 } else { /* send command failed */
3540 printk(KERN_WARNING "cciss: unable to determine firmware"
3541 " version of controller\n");
3544 cciss_procinit(i);
3546 hba[i]->cciss_max_sectors = 2048;
3548 hba[i]->busy_initializing = 0;
3550 rebuild_lun_table(hba[i], 1);
3551 return 1;
3553 clean4:
3554 kfree(inq_buff);
3555 #ifdef CONFIG_CISS_SCSI_TAPE
3556 kfree(hba[i]->scsi_rejects.complete);
3557 #endif
3558 kfree(hba[i]->cmd_pool_bits);
3559 if (hba[i]->cmd_pool)
3560 pci_free_consistent(hba[i]->pdev,
3561 hba[i]->nr_cmds * sizeof(CommandList_struct),
3562 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3563 if (hba[i]->errinfo_pool)
3564 pci_free_consistent(hba[i]->pdev,
3565 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3566 hba[i]->errinfo_pool,
3567 hba[i]->errinfo_pool_dhandle);
3568 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3569 clean2:
3570 unregister_blkdev(hba[i]->major, hba[i]->devname);
3571 clean1:
3572 hba[i]->busy_initializing = 0;
3573 /* cleanup any queues that may have been initialized */
3574 for (j=0; j <= hba[i]->highest_lun; j++){
3575 drive_info_struct *drv = &(hba[i]->drv[j]);
3576 if (drv->queue)
3577 blk_cleanup_queue(drv->queue);
3580 * Deliberately omit pci_disable_device(): it does something nasty to
3581 * Smart Array controllers that pci_enable_device does not undo
3583 pci_release_regions(pdev);
3584 pci_set_drvdata(pdev, NULL);
3585 free_hba(i);
3586 return -1;
3589 static void cciss_shutdown(struct pci_dev *pdev)
3591 ctlr_info_t *tmp_ptr;
3592 int i;
3593 char flush_buf[4];
3594 int return_code;
3596 tmp_ptr = pci_get_drvdata(pdev);
3597 if (tmp_ptr == NULL)
3598 return;
3599 i = tmp_ptr->ctlr;
3600 if (hba[i] == NULL)
3601 return;
3603 /* Turn board interrupts off and send the flush cache command */
3604 /* sendcmd will turn off interrupt, and send the flush...
3605 * To write all data in the battery backed cache to disks */
3606 memset(flush_buf, 0, 4);
3607 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3608 TYPE_CMD);
3609 if (return_code == IO_OK) {
3610 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3611 } else {
3612 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3614 free_irq(hba[i]->intr[2], hba[i]);
3617 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3619 ctlr_info_t *tmp_ptr;
3620 int i, j;
3622 if (pci_get_drvdata(pdev) == NULL) {
3623 printk(KERN_ERR "cciss: Unable to remove device \n");
3624 return;
3626 tmp_ptr = pci_get_drvdata(pdev);
3627 i = tmp_ptr->ctlr;
3628 if (hba[i] == NULL) {
3629 printk(KERN_ERR "cciss: device appears to "
3630 "already be removed \n");
3631 return;
3634 remove_proc_entry(hba[i]->devname, proc_cciss);
3635 unregister_blkdev(hba[i]->major, hba[i]->devname);
3637 /* remove it from the disk list */
3638 for (j = 0; j < CISS_MAX_LUN; j++) {
3639 struct gendisk *disk = hba[i]->gendisk[j];
3640 if (disk) {
3641 struct request_queue *q = disk->queue;
3643 if (disk->flags & GENHD_FL_UP)
3644 del_gendisk(disk);
3645 if (q)
3646 blk_cleanup_queue(q);
3650 #ifdef CONFIG_CISS_SCSI_TAPE
3651 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3652 #endif
3654 cciss_shutdown(pdev);
3656 #ifdef CONFIG_PCI_MSI
3657 if (hba[i]->msix_vector)
3658 pci_disable_msix(hba[i]->pdev);
3659 else if (hba[i]->msi_vector)
3660 pci_disable_msi(hba[i]->pdev);
3661 #endif /* CONFIG_PCI_MSI */
3663 iounmap(hba[i]->vaddr);
3665 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3666 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3667 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3668 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3669 kfree(hba[i]->cmd_pool_bits);
3670 #ifdef CONFIG_CISS_SCSI_TAPE
3671 kfree(hba[i]->scsi_rejects.complete);
3672 #endif
3674 * Deliberately omit pci_disable_device(): it does something nasty to
3675 * Smart Array controllers that pci_enable_device does not undo
3677 pci_release_regions(pdev);
3678 pci_set_drvdata(pdev, NULL);
3679 free_hba(i);
3682 static struct pci_driver cciss_pci_driver = {
3683 .name = "cciss",
3684 .probe = cciss_init_one,
3685 .remove = __devexit_p(cciss_remove_one),
3686 .id_table = cciss_pci_device_id, /* id_table */
3687 .shutdown = cciss_shutdown,
3691 * This is it. Register the PCI driver information for the cards we control
3692 * the OS will call our registered routines when it finds one of our cards.
3694 static int __init cciss_init(void)
3696 printk(KERN_INFO DRIVER_NAME "\n");
3698 /* Register for our PCI devices */
3699 return pci_register_driver(&cciss_pci_driver);
3702 static void __exit cciss_cleanup(void)
3704 int i;
3706 pci_unregister_driver(&cciss_pci_driver);
3707 /* double check that all controller entrys have been removed */
3708 for (i = 0; i < MAX_CTLR; i++) {
3709 if (hba[i] != NULL) {
3710 printk(KERN_WARNING "cciss: had to remove"
3711 " controller %d\n", i);
3712 cciss_remove_one(hba[i]->pdev);
3715 remove_proc_entry("driver/cciss", NULL);
3718 static void fail_all_cmds(unsigned long ctlr)
3720 /* If we get here, the board is apparently dead. */
3721 ctlr_info_t *h = hba[ctlr];
3722 CommandList_struct *c;
3723 unsigned long flags;
3725 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3726 h->alive = 0; /* the controller apparently died... */
3728 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3730 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3732 /* move everything off the request queue onto the completed queue */
3733 while ((c = h->reqQ) != NULL) {
3734 removeQ(&(h->reqQ), c);
3735 h->Qdepth--;
3736 addQ(&(h->cmpQ), c);
3739 /* Now, fail everything on the completed queue with a HW error */
3740 while ((c = h->cmpQ) != NULL) {
3741 removeQ(&h->cmpQ, c);
3742 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3743 if (c->cmd_type == CMD_RWREQ) {
3744 complete_command(h, c, 0);
3745 } else if (c->cmd_type == CMD_IOCTL_PEND)
3746 complete(c->waiting);
3747 #ifdef CONFIG_CISS_SCSI_TAPE
3748 else if (c->cmd_type == CMD_SCSI)
3749 complete_scsi_command(c, 0, 0);
3750 #endif
3752 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3753 return;
3756 module_init(cciss_init);
3757 module_exit(cciss_cleanup);