[ARM] pxa: add codename zylonite for PXA3xx Development Platform
[linux-2.6.git] / drivers / block / cciss.c
blobe336b05fe4a7f5d763dd0db8c61dc740b93635a9
1 /*
2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
50 #include <scsi/sg.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500");
64 MODULE_VERSION("3.6.14");
65 MODULE_LICENSE("GPL");
67 #include "cciss_cmd.h"
68 #include "cciss.h"
69 #include <linux/cciss_ioctl.h>
71 /* define the PCI info for the cards we can control */
72 static const struct pci_device_id cciss_pci_device_id[] = {
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
93 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
94 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
95 {0,}
98 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
100 /* board_id = Subsystem Device ID & Vendor ID
101 * product = Marketing Name for the board
102 * access = Address of the struct of function pointers
103 * nr_cmds = Number of commands supported by controller
105 static struct board_type products[] = {
106 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
107 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
108 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
109 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
110 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
111 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
112 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
114 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
115 {0x3225103C, "Smart Array P600", &SA5_access, 512},
116 {0x3223103C, "Smart Array P800", &SA5_access, 512},
117 {0x3234103C, "Smart Array P400", &SA5_access, 512},
118 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
119 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3212103C, "Smart Array E200", &SA5_access, 120},
121 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
123 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
124 {0x3237103C, "Smart Array E500", &SA5_access, 512},
125 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
126 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
129 /* How long to wait (in milliseconds) for board to go into simple mode */
130 #define MAX_CONFIG_WAIT 30000
131 #define MAX_IOCTL_CONFIG_WAIT 1000
133 /*define how many times we will try a command because of bus resets */
134 #define MAX_CMD_RETRIES 3
136 #define MAX_CTLR 32
138 /* Originally cciss driver only supports 8 major numbers */
139 #define MAX_CTLR_ORIG 8
141 static ctlr_info_t *hba[MAX_CTLR];
143 static void do_cciss_request(struct request_queue *q);
144 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
145 static int cciss_open(struct inode *inode, struct file *filep);
146 static int cciss_release(struct inode *inode, struct file *filep);
147 static int cciss_ioctl(struct inode *inode, struct file *filep,
148 unsigned int cmd, unsigned long arg);
149 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
151 static int cciss_revalidate(struct gendisk *disk);
152 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
153 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
154 int clear_all);
156 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
157 sector_t *total_size, unsigned int *block_size);
158 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
159 sector_t *total_size, unsigned int *block_size);
160 static void cciss_geometry_inquiry(int ctlr, int logvol,
161 int withirq, sector_t total_size,
162 unsigned int block_size, InquiryData_struct *inq_buff,
163 drive_info_struct *drv);
164 static void cciss_getgeometry(int cntl_num);
165 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
166 __u32);
167 static void start_io(ctlr_info_t *h);
168 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
169 unsigned int use_unit_num, unsigned int log_unit,
170 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
171 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
172 unsigned int use_unit_num, unsigned int log_unit,
173 __u8 page_code, int cmd_type);
175 static void fail_all_cmds(unsigned long ctlr);
177 #ifdef CONFIG_PROC_FS
178 static void cciss_procinit(int i);
179 #else
180 static void cciss_procinit(int i)
183 #endif /* CONFIG_PROC_FS */
185 #ifdef CONFIG_COMPAT
186 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
187 #endif
189 static struct block_device_operations cciss_fops = {
190 .owner = THIS_MODULE,
191 .open = cciss_open,
192 .release = cciss_release,
193 .ioctl = cciss_ioctl,
194 .getgeo = cciss_getgeo,
195 #ifdef CONFIG_COMPAT
196 .compat_ioctl = cciss_compat_ioctl,
197 #endif
198 .revalidate_disk = cciss_revalidate,
202 * Enqueuing and dequeuing functions for cmdlists.
204 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
206 if (*Qptr == NULL) {
207 *Qptr = c;
208 c->next = c->prev = c;
209 } else {
210 c->prev = (*Qptr)->prev;
211 c->next = (*Qptr);
212 (*Qptr)->prev->next = c;
213 (*Qptr)->prev = c;
217 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
218 CommandList_struct *c)
220 if (c && c->next != c) {
221 if (*Qptr == c)
222 *Qptr = c->next;
223 c->prev->next = c->next;
224 c->next->prev = c->prev;
225 } else {
226 *Qptr = NULL;
228 return c;
231 #include "cciss_scsi.c" /* For SCSI tape support */
233 #define RAID_UNKNOWN 6
235 #ifdef CONFIG_PROC_FS
238 * Report information about this controller.
240 #define ENG_GIG 1000000000
241 #define ENG_GIG_FACTOR (ENG_GIG/512)
242 #define ENGAGE_SCSI "engage scsi"
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
244 "UNKNOWN"
247 static struct proc_dir_entry *proc_cciss;
249 static void cciss_seq_show_header(struct seq_file *seq)
251 ctlr_info_t *h = seq->private;
253 seq_printf(seq, "%s: HP %s Controller\n"
254 "Board ID: 0x%08lx\n"
255 "Firmware Version: %c%c%c%c\n"
256 "IRQ: %d\n"
257 "Logical drives: %d\n"
258 "Current Q depth: %d\n"
259 "Current # commands on controller: %d\n"
260 "Max Q depth since init: %d\n"
261 "Max # commands on controller since init: %d\n"
262 "Max SG entries since init: %d\n",
263 h->devname,
264 h->product_name,
265 (unsigned long)h->board_id,
266 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
267 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
268 h->num_luns,
269 h->Qdepth, h->commands_outstanding,
270 h->maxQsinceinit, h->max_outstanding, h->maxSG);
272 #ifdef CONFIG_CISS_SCSI_TAPE
273 cciss_seq_tape_report(seq, h->ctlr);
274 #endif /* CONFIG_CISS_SCSI_TAPE */
277 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
279 ctlr_info_t *h = seq->private;
280 unsigned ctlr = h->ctlr;
281 unsigned long flags;
283 /* prevent displaying bogus info during configuration
284 * or deconfiguration of a logical volume
286 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
287 if (h->busy_configuring) {
288 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
289 return ERR_PTR(-EBUSY);
291 h->busy_configuring = 1;
292 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
294 if (*pos == 0)
295 cciss_seq_show_header(seq);
297 return pos;
300 static int cciss_seq_show(struct seq_file *seq, void *v)
302 sector_t vol_sz, vol_sz_frac;
303 ctlr_info_t *h = seq->private;
304 unsigned ctlr = h->ctlr;
305 loff_t *pos = v;
306 drive_info_struct *drv = &h->drv[*pos];
308 if (*pos > h->highest_lun)
309 return 0;
311 if (drv->heads == 0)
312 return 0;
314 vol_sz = drv->nr_blocks;
315 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
316 vol_sz_frac *= 100;
317 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
319 if (drv->raid_level > 5)
320 drv->raid_level = RAID_UNKNOWN;
321 seq_printf(seq, "cciss/c%dd%d:"
322 "\t%4u.%02uGB\tRAID %s\n",
323 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
324 raid_label[drv->raid_level]);
325 return 0;
328 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
330 ctlr_info_t *h = seq->private;
332 if (*pos > h->highest_lun)
333 return NULL;
334 *pos += 1;
336 return pos;
339 static void cciss_seq_stop(struct seq_file *seq, void *v)
341 ctlr_info_t *h = seq->private;
343 /* Only reset h->busy_configuring if we succeeded in setting
344 * it during cciss_seq_start. */
345 if (v == ERR_PTR(-EBUSY))
346 return;
348 h->busy_configuring = 0;
351 static struct seq_operations cciss_seq_ops = {
352 .start = cciss_seq_start,
353 .show = cciss_seq_show,
354 .next = cciss_seq_next,
355 .stop = cciss_seq_stop,
358 static int cciss_seq_open(struct inode *inode, struct file *file)
360 int ret = seq_open(file, &cciss_seq_ops);
361 struct seq_file *seq = file->private_data;
363 if (!ret)
364 seq->private = PDE(inode)->data;
366 return ret;
369 static ssize_t
370 cciss_proc_write(struct file *file, const char __user *buf,
371 size_t length, loff_t *ppos)
373 int err;
374 char *buffer;
376 #ifndef CONFIG_CISS_SCSI_TAPE
377 return -EINVAL;
378 #endif
380 if (!buf || length > PAGE_SIZE - 1)
381 return -EINVAL;
383 buffer = (char *)__get_free_page(GFP_KERNEL);
384 if (!buffer)
385 return -ENOMEM;
387 err = -EFAULT;
388 if (copy_from_user(buffer, buf, length))
389 goto out;
390 buffer[length] = '\0';
392 #ifdef CONFIG_CISS_SCSI_TAPE
393 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
394 struct seq_file *seq = file->private_data;
395 ctlr_info_t *h = seq->private;
396 int rc;
398 rc = cciss_engage_scsi(h->ctlr);
399 if (rc != 0)
400 err = -rc;
401 else
402 err = length;
403 } else
404 #endif /* CONFIG_CISS_SCSI_TAPE */
405 err = -EINVAL;
406 /* might be nice to have "disengage" too, but it's not
407 safely possible. (only 1 module use count, lock issues.) */
409 out:
410 free_page((unsigned long)buffer);
411 return err;
414 static struct file_operations cciss_proc_fops = {
415 .owner = THIS_MODULE,
416 .open = cciss_seq_open,
417 .read = seq_read,
418 .llseek = seq_lseek,
419 .release = seq_release,
420 .write = cciss_proc_write,
423 static void __devinit cciss_procinit(int i)
425 struct proc_dir_entry *pde;
427 if (proc_cciss == NULL)
428 proc_cciss = proc_mkdir("driver/cciss", NULL);
429 if (!proc_cciss)
430 return;
431 pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
432 S_IROTH, proc_cciss,
433 &cciss_proc_fops, hba[i]);
435 #endif /* CONFIG_PROC_FS */
438 * For operations that cannot sleep, a command block is allocated at init,
439 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
440 * which ones are free or in use. For operations that can wait for kmalloc
441 * to possible sleep, this routine can be called with get_from_pool set to 0.
442 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
444 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
446 CommandList_struct *c;
447 int i;
448 u64bit temp64;
449 dma_addr_t cmd_dma_handle, err_dma_handle;
451 if (!get_from_pool) {
452 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
453 sizeof(CommandList_struct), &cmd_dma_handle);
454 if (c == NULL)
455 return NULL;
456 memset(c, 0, sizeof(CommandList_struct));
458 c->cmdindex = -1;
460 c->err_info = (ErrorInfo_struct *)
461 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
462 &err_dma_handle);
464 if (c->err_info == NULL) {
465 pci_free_consistent(h->pdev,
466 sizeof(CommandList_struct), c, cmd_dma_handle);
467 return NULL;
469 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
470 } else { /* get it out of the controllers pool */
472 do {
473 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
474 if (i == h->nr_cmds)
475 return NULL;
476 } while (test_and_set_bit
477 (i & (BITS_PER_LONG - 1),
478 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
479 #ifdef CCISS_DEBUG
480 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
481 #endif
482 c = h->cmd_pool + i;
483 memset(c, 0, sizeof(CommandList_struct));
484 cmd_dma_handle = h->cmd_pool_dhandle
485 + i * sizeof(CommandList_struct);
486 c->err_info = h->errinfo_pool + i;
487 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
488 err_dma_handle = h->errinfo_pool_dhandle
489 + i * sizeof(ErrorInfo_struct);
490 h->nr_allocs++;
492 c->cmdindex = i;
495 c->busaddr = (__u32) cmd_dma_handle;
496 temp64.val = (__u64) err_dma_handle;
497 c->ErrDesc.Addr.lower = temp64.val32.lower;
498 c->ErrDesc.Addr.upper = temp64.val32.upper;
499 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
501 c->ctlr = h->ctlr;
502 return c;
506 * Frees a command block that was previously allocated with cmd_alloc().
508 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
510 int i;
511 u64bit temp64;
513 if (!got_from_pool) {
514 temp64.val32.lower = c->ErrDesc.Addr.lower;
515 temp64.val32.upper = c->ErrDesc.Addr.upper;
516 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
517 c->err_info, (dma_addr_t) temp64.val);
518 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
519 c, (dma_addr_t) c->busaddr);
520 } else {
521 i = c - h->cmd_pool;
522 clear_bit(i & (BITS_PER_LONG - 1),
523 h->cmd_pool_bits + (i / BITS_PER_LONG));
524 h->nr_frees++;
528 static inline ctlr_info_t *get_host(struct gendisk *disk)
530 return disk->queue->queuedata;
533 static inline drive_info_struct *get_drv(struct gendisk *disk)
535 return disk->private_data;
539 * Open. Make sure the device is really there.
541 static int cciss_open(struct inode *inode, struct file *filep)
543 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
544 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
546 #ifdef CCISS_DEBUG
547 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
548 #endif /* CCISS_DEBUG */
550 if (host->busy_initializing || drv->busy_configuring)
551 return -EBUSY;
553 * Root is allowed to open raw volume zero even if it's not configured
554 * so array config can still work. Root is also allowed to open any
555 * volume that has a LUN ID, so it can issue IOCTL to reread the
556 * disk information. I don't think I really like this
557 * but I'm already using way to many device nodes to claim another one
558 * for "raw controller".
560 if (drv->heads == 0) {
561 if (iminor(inode) != 0) { /* not node 0? */
562 /* if not node 0 make sure it is a partition = 0 */
563 if (iminor(inode) & 0x0f) {
564 return -ENXIO;
565 /* if it is, make sure we have a LUN ID */
566 } else if (drv->LunID == 0) {
567 return -ENXIO;
570 if (!capable(CAP_SYS_ADMIN))
571 return -EPERM;
573 drv->usage_count++;
574 host->usage_count++;
575 return 0;
579 * Close. Sync first.
581 static int cciss_release(struct inode *inode, struct file *filep)
583 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
584 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
586 #ifdef CCISS_DEBUG
587 printk(KERN_DEBUG "cciss_release %s\n",
588 inode->i_bdev->bd_disk->disk_name);
589 #endif /* CCISS_DEBUG */
591 drv->usage_count--;
592 host->usage_count--;
593 return 0;
596 #ifdef CONFIG_COMPAT
598 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
600 int ret;
601 lock_kernel();
602 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
603 unlock_kernel();
604 return ret;
607 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
608 unsigned long arg);
609 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
610 unsigned long arg);
612 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
614 switch (cmd) {
615 case CCISS_GETPCIINFO:
616 case CCISS_GETINTINFO:
617 case CCISS_SETINTINFO:
618 case CCISS_GETNODENAME:
619 case CCISS_SETNODENAME:
620 case CCISS_GETHEARTBEAT:
621 case CCISS_GETBUSTYPES:
622 case CCISS_GETFIRMVER:
623 case CCISS_GETDRIVVER:
624 case CCISS_REVALIDVOLS:
625 case CCISS_DEREGDISK:
626 case CCISS_REGNEWDISK:
627 case CCISS_REGNEWD:
628 case CCISS_RESCANDISK:
629 case CCISS_GETLUNINFO:
630 return do_ioctl(f, cmd, arg);
632 case CCISS_PASSTHRU32:
633 return cciss_ioctl32_passthru(f, cmd, arg);
634 case CCISS_BIG_PASSTHRU32:
635 return cciss_ioctl32_big_passthru(f, cmd, arg);
637 default:
638 return -ENOIOCTLCMD;
642 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
643 unsigned long arg)
645 IOCTL32_Command_struct __user *arg32 =
646 (IOCTL32_Command_struct __user *) arg;
647 IOCTL_Command_struct arg64;
648 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
649 int err;
650 u32 cp;
652 err = 0;
653 err |=
654 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
655 sizeof(arg64.LUN_info));
656 err |=
657 copy_from_user(&arg64.Request, &arg32->Request,
658 sizeof(arg64.Request));
659 err |=
660 copy_from_user(&arg64.error_info, &arg32->error_info,
661 sizeof(arg64.error_info));
662 err |= get_user(arg64.buf_size, &arg32->buf_size);
663 err |= get_user(cp, &arg32->buf);
664 arg64.buf = compat_ptr(cp);
665 err |= copy_to_user(p, &arg64, sizeof(arg64));
667 if (err)
668 return -EFAULT;
670 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
671 if (err)
672 return err;
673 err |=
674 copy_in_user(&arg32->error_info, &p->error_info,
675 sizeof(arg32->error_info));
676 if (err)
677 return -EFAULT;
678 return err;
681 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
682 unsigned long arg)
684 BIG_IOCTL32_Command_struct __user *arg32 =
685 (BIG_IOCTL32_Command_struct __user *) arg;
686 BIG_IOCTL_Command_struct arg64;
687 BIG_IOCTL_Command_struct __user *p =
688 compat_alloc_user_space(sizeof(arg64));
689 int err;
690 u32 cp;
692 err = 0;
693 err |=
694 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
695 sizeof(arg64.LUN_info));
696 err |=
697 copy_from_user(&arg64.Request, &arg32->Request,
698 sizeof(arg64.Request));
699 err |=
700 copy_from_user(&arg64.error_info, &arg32->error_info,
701 sizeof(arg64.error_info));
702 err |= get_user(arg64.buf_size, &arg32->buf_size);
703 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
704 err |= get_user(cp, &arg32->buf);
705 arg64.buf = compat_ptr(cp);
706 err |= copy_to_user(p, &arg64, sizeof(arg64));
708 if (err)
709 return -EFAULT;
711 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
712 if (err)
713 return err;
714 err |=
715 copy_in_user(&arg32->error_info, &p->error_info,
716 sizeof(arg32->error_info));
717 if (err)
718 return -EFAULT;
719 return err;
721 #endif
723 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
725 drive_info_struct *drv = get_drv(bdev->bd_disk);
727 if (!drv->cylinders)
728 return -ENXIO;
730 geo->heads = drv->heads;
731 geo->sectors = drv->sectors;
732 geo->cylinders = drv->cylinders;
733 return 0;
737 * ioctl
739 static int cciss_ioctl(struct inode *inode, struct file *filep,
740 unsigned int cmd, unsigned long arg)
742 struct block_device *bdev = inode->i_bdev;
743 struct gendisk *disk = bdev->bd_disk;
744 ctlr_info_t *host = get_host(disk);
745 drive_info_struct *drv = get_drv(disk);
746 int ctlr = host->ctlr;
747 void __user *argp = (void __user *)arg;
749 #ifdef CCISS_DEBUG
750 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
751 #endif /* CCISS_DEBUG */
753 switch (cmd) {
754 case CCISS_GETPCIINFO:
756 cciss_pci_info_struct pciinfo;
758 if (!arg)
759 return -EINVAL;
760 pciinfo.domain = pci_domain_nr(host->pdev->bus);
761 pciinfo.bus = host->pdev->bus->number;
762 pciinfo.dev_fn = host->pdev->devfn;
763 pciinfo.board_id = host->board_id;
764 if (copy_to_user
765 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
766 return -EFAULT;
767 return 0;
769 case CCISS_GETINTINFO:
771 cciss_coalint_struct intinfo;
772 if (!arg)
773 return -EINVAL;
774 intinfo.delay =
775 readl(&host->cfgtable->HostWrite.CoalIntDelay);
776 intinfo.count =
777 readl(&host->cfgtable->HostWrite.CoalIntCount);
778 if (copy_to_user
779 (argp, &intinfo, sizeof(cciss_coalint_struct)))
780 return -EFAULT;
781 return 0;
783 case CCISS_SETINTINFO:
785 cciss_coalint_struct intinfo;
786 unsigned long flags;
787 int i;
789 if (!arg)
790 return -EINVAL;
791 if (!capable(CAP_SYS_ADMIN))
792 return -EPERM;
793 if (copy_from_user
794 (&intinfo, argp, sizeof(cciss_coalint_struct)))
795 return -EFAULT;
796 if ((intinfo.delay == 0) && (intinfo.count == 0))
798 // printk("cciss_ioctl: delay and count cannot be 0\n");
799 return -EINVAL;
801 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
802 /* Update the field, and then ring the doorbell */
803 writel(intinfo.delay,
804 &(host->cfgtable->HostWrite.CoalIntDelay));
805 writel(intinfo.count,
806 &(host->cfgtable->HostWrite.CoalIntCount));
807 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
809 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
810 if (!(readl(host->vaddr + SA5_DOORBELL)
811 & CFGTBL_ChangeReq))
812 break;
813 /* delay and try again */
814 udelay(1000);
816 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
817 if (i >= MAX_IOCTL_CONFIG_WAIT)
818 return -EAGAIN;
819 return 0;
821 case CCISS_GETNODENAME:
823 NodeName_type NodeName;
824 int i;
826 if (!arg)
827 return -EINVAL;
828 for (i = 0; i < 16; i++)
829 NodeName[i] =
830 readb(&host->cfgtable->ServerName[i]);
831 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
832 return -EFAULT;
833 return 0;
835 case CCISS_SETNODENAME:
837 NodeName_type NodeName;
838 unsigned long flags;
839 int i;
841 if (!arg)
842 return -EINVAL;
843 if (!capable(CAP_SYS_ADMIN))
844 return -EPERM;
846 if (copy_from_user
847 (NodeName, argp, sizeof(NodeName_type)))
848 return -EFAULT;
850 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
852 /* Update the field, and then ring the doorbell */
853 for (i = 0; i < 16; i++)
854 writeb(NodeName[i],
855 &host->cfgtable->ServerName[i]);
857 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
859 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
860 if (!(readl(host->vaddr + SA5_DOORBELL)
861 & CFGTBL_ChangeReq))
862 break;
863 /* delay and try again */
864 udelay(1000);
866 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
867 if (i >= MAX_IOCTL_CONFIG_WAIT)
868 return -EAGAIN;
869 return 0;
872 case CCISS_GETHEARTBEAT:
874 Heartbeat_type heartbeat;
876 if (!arg)
877 return -EINVAL;
878 heartbeat = readl(&host->cfgtable->HeartBeat);
879 if (copy_to_user
880 (argp, &heartbeat, sizeof(Heartbeat_type)))
881 return -EFAULT;
882 return 0;
884 case CCISS_GETBUSTYPES:
886 BusTypes_type BusTypes;
888 if (!arg)
889 return -EINVAL;
890 BusTypes = readl(&host->cfgtable->BusTypes);
891 if (copy_to_user
892 (argp, &BusTypes, sizeof(BusTypes_type)))
893 return -EFAULT;
894 return 0;
896 case CCISS_GETFIRMVER:
898 FirmwareVer_type firmware;
900 if (!arg)
901 return -EINVAL;
902 memcpy(firmware, host->firm_ver, 4);
904 if (copy_to_user
905 (argp, firmware, sizeof(FirmwareVer_type)))
906 return -EFAULT;
907 return 0;
909 case CCISS_GETDRIVVER:
911 DriverVer_type DriverVer = DRIVER_VERSION;
913 if (!arg)
914 return -EINVAL;
916 if (copy_to_user
917 (argp, &DriverVer, sizeof(DriverVer_type)))
918 return -EFAULT;
919 return 0;
922 case CCISS_REVALIDVOLS:
923 return rebuild_lun_table(host, NULL);
925 case CCISS_GETLUNINFO:{
926 LogvolInfo_struct luninfo;
928 luninfo.LunID = drv->LunID;
929 luninfo.num_opens = drv->usage_count;
930 luninfo.num_parts = 0;
931 if (copy_to_user(argp, &luninfo,
932 sizeof(LogvolInfo_struct)))
933 return -EFAULT;
934 return 0;
936 case CCISS_DEREGDISK:
937 return rebuild_lun_table(host, disk);
939 case CCISS_REGNEWD:
940 return rebuild_lun_table(host, NULL);
942 case CCISS_PASSTHRU:
944 IOCTL_Command_struct iocommand;
945 CommandList_struct *c;
946 char *buff = NULL;
947 u64bit temp64;
948 unsigned long flags;
949 DECLARE_COMPLETION_ONSTACK(wait);
951 if (!arg)
952 return -EINVAL;
954 if (!capable(CAP_SYS_RAWIO))
955 return -EPERM;
957 if (copy_from_user
958 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
959 return -EFAULT;
960 if ((iocommand.buf_size < 1) &&
961 (iocommand.Request.Type.Direction != XFER_NONE)) {
962 return -EINVAL;
964 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
965 /* Check kmalloc limits */
966 if (iocommand.buf_size > 128000)
967 return -EINVAL;
968 #endif
969 if (iocommand.buf_size > 0) {
970 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
971 if (buff == NULL)
972 return -EFAULT;
974 if (iocommand.Request.Type.Direction == XFER_WRITE) {
975 /* Copy the data into the buffer we created */
976 if (copy_from_user
977 (buff, iocommand.buf, iocommand.buf_size)) {
978 kfree(buff);
979 return -EFAULT;
981 } else {
982 memset(buff, 0, iocommand.buf_size);
984 if ((c = cmd_alloc(host, 0)) == NULL) {
985 kfree(buff);
986 return -ENOMEM;
988 // Fill in the command type
989 c->cmd_type = CMD_IOCTL_PEND;
990 // Fill in Command Header
991 c->Header.ReplyQueue = 0; // unused in simple mode
992 if (iocommand.buf_size > 0) // buffer to fill
994 c->Header.SGList = 1;
995 c->Header.SGTotal = 1;
996 } else // no buffers to fill
998 c->Header.SGList = 0;
999 c->Header.SGTotal = 0;
1001 c->Header.LUN = iocommand.LUN_info;
1002 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1004 // Fill in Request block
1005 c->Request = iocommand.Request;
1007 // Fill in the scatter gather information
1008 if (iocommand.buf_size > 0) {
1009 temp64.val = pci_map_single(host->pdev, buff,
1010 iocommand.buf_size,
1011 PCI_DMA_BIDIRECTIONAL);
1012 c->SG[0].Addr.lower = temp64.val32.lower;
1013 c->SG[0].Addr.upper = temp64.val32.upper;
1014 c->SG[0].Len = iocommand.buf_size;
1015 c->SG[0].Ext = 0; // we are not chaining
1017 c->waiting = &wait;
1019 /* Put the request on the tail of the request queue */
1020 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1021 addQ(&host->reqQ, c);
1022 host->Qdepth++;
1023 start_io(host);
1024 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1026 wait_for_completion(&wait);
1028 /* unlock the buffers from DMA */
1029 temp64.val32.lower = c->SG[0].Addr.lower;
1030 temp64.val32.upper = c->SG[0].Addr.upper;
1031 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1032 iocommand.buf_size,
1033 PCI_DMA_BIDIRECTIONAL);
1035 /* Copy the error information out */
1036 iocommand.error_info = *(c->err_info);
1037 if (copy_to_user
1038 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1039 kfree(buff);
1040 cmd_free(host, c, 0);
1041 return -EFAULT;
1044 if (iocommand.Request.Type.Direction == XFER_READ) {
1045 /* Copy the data out of the buffer we created */
1046 if (copy_to_user
1047 (iocommand.buf, buff, iocommand.buf_size)) {
1048 kfree(buff);
1049 cmd_free(host, c, 0);
1050 return -EFAULT;
1053 kfree(buff);
1054 cmd_free(host, c, 0);
1055 return 0;
1057 case CCISS_BIG_PASSTHRU:{
1058 BIG_IOCTL_Command_struct *ioc;
1059 CommandList_struct *c;
1060 unsigned char **buff = NULL;
1061 int *buff_size = NULL;
1062 u64bit temp64;
1063 unsigned long flags;
1064 BYTE sg_used = 0;
1065 int status = 0;
1066 int i;
1067 DECLARE_COMPLETION_ONSTACK(wait);
1068 __u32 left;
1069 __u32 sz;
1070 BYTE __user *data_ptr;
1072 if (!arg)
1073 return -EINVAL;
1074 if (!capable(CAP_SYS_RAWIO))
1075 return -EPERM;
1076 ioc = (BIG_IOCTL_Command_struct *)
1077 kmalloc(sizeof(*ioc), GFP_KERNEL);
1078 if (!ioc) {
1079 status = -ENOMEM;
1080 goto cleanup1;
1082 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1083 status = -EFAULT;
1084 goto cleanup1;
1086 if ((ioc->buf_size < 1) &&
1087 (ioc->Request.Type.Direction != XFER_NONE)) {
1088 status = -EINVAL;
1089 goto cleanup1;
1091 /* Check kmalloc limits using all SGs */
1092 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1093 status = -EINVAL;
1094 goto cleanup1;
1096 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1097 status = -EINVAL;
1098 goto cleanup1;
1100 buff =
1101 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1102 if (!buff) {
1103 status = -ENOMEM;
1104 goto cleanup1;
1106 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1107 GFP_KERNEL);
1108 if (!buff_size) {
1109 status = -ENOMEM;
1110 goto cleanup1;
1112 left = ioc->buf_size;
1113 data_ptr = ioc->buf;
1114 while (left) {
1115 sz = (left >
1116 ioc->malloc_size) ? ioc->
1117 malloc_size : left;
1118 buff_size[sg_used] = sz;
1119 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1120 if (buff[sg_used] == NULL) {
1121 status = -ENOMEM;
1122 goto cleanup1;
1124 if (ioc->Request.Type.Direction == XFER_WRITE) {
1125 if (copy_from_user
1126 (buff[sg_used], data_ptr, sz)) {
1127 status = -ENOMEM;
1128 goto cleanup1;
1130 } else {
1131 memset(buff[sg_used], 0, sz);
1133 left -= sz;
1134 data_ptr += sz;
1135 sg_used++;
1137 if ((c = cmd_alloc(host, 0)) == NULL) {
1138 status = -ENOMEM;
1139 goto cleanup1;
1141 c->cmd_type = CMD_IOCTL_PEND;
1142 c->Header.ReplyQueue = 0;
1144 if (ioc->buf_size > 0) {
1145 c->Header.SGList = sg_used;
1146 c->Header.SGTotal = sg_used;
1147 } else {
1148 c->Header.SGList = 0;
1149 c->Header.SGTotal = 0;
1151 c->Header.LUN = ioc->LUN_info;
1152 c->Header.Tag.lower = c->busaddr;
1154 c->Request = ioc->Request;
1155 if (ioc->buf_size > 0) {
1156 int i;
1157 for (i = 0; i < sg_used; i++) {
1158 temp64.val =
1159 pci_map_single(host->pdev, buff[i],
1160 buff_size[i],
1161 PCI_DMA_BIDIRECTIONAL);
1162 c->SG[i].Addr.lower =
1163 temp64.val32.lower;
1164 c->SG[i].Addr.upper =
1165 temp64.val32.upper;
1166 c->SG[i].Len = buff_size[i];
1167 c->SG[i].Ext = 0; /* we are not chaining */
1170 c->waiting = &wait;
1171 /* Put the request on the tail of the request queue */
1172 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1173 addQ(&host->reqQ, c);
1174 host->Qdepth++;
1175 start_io(host);
1176 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1177 wait_for_completion(&wait);
1178 /* unlock the buffers from DMA */
1179 for (i = 0; i < sg_used; i++) {
1180 temp64.val32.lower = c->SG[i].Addr.lower;
1181 temp64.val32.upper = c->SG[i].Addr.upper;
1182 pci_unmap_single(host->pdev,
1183 (dma_addr_t) temp64.val, buff_size[i],
1184 PCI_DMA_BIDIRECTIONAL);
1186 /* Copy the error information out */
1187 ioc->error_info = *(c->err_info);
1188 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1189 cmd_free(host, c, 0);
1190 status = -EFAULT;
1191 goto cleanup1;
1193 if (ioc->Request.Type.Direction == XFER_READ) {
1194 /* Copy the data out of the buffer we created */
1195 BYTE __user *ptr = ioc->buf;
1196 for (i = 0; i < sg_used; i++) {
1197 if (copy_to_user
1198 (ptr, buff[i], buff_size[i])) {
1199 cmd_free(host, c, 0);
1200 status = -EFAULT;
1201 goto cleanup1;
1203 ptr += buff_size[i];
1206 cmd_free(host, c, 0);
1207 status = 0;
1208 cleanup1:
1209 if (buff) {
1210 for (i = 0; i < sg_used; i++)
1211 kfree(buff[i]);
1212 kfree(buff);
1214 kfree(buff_size);
1215 kfree(ioc);
1216 return status;
1219 /* scsi_cmd_ioctl handles these, below, though some are not */
1220 /* very meaningful for cciss. SG_IO is the main one people want. */
1222 case SG_GET_VERSION_NUM:
1223 case SG_SET_TIMEOUT:
1224 case SG_GET_TIMEOUT:
1225 case SG_GET_RESERVED_SIZE:
1226 case SG_SET_RESERVED_SIZE:
1227 case SG_EMULATED_HOST:
1228 case SG_IO:
1229 case SCSI_IOCTL_SEND_COMMAND:
1230 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1232 /* scsi_cmd_ioctl would normally handle these, below, but */
1233 /* they aren't a good fit for cciss, as CD-ROMs are */
1234 /* not supported, and we don't have any bus/target/lun */
1235 /* which we present to the kernel. */
1237 case CDROM_SEND_PACKET:
1238 case CDROMCLOSETRAY:
1239 case CDROMEJECT:
1240 case SCSI_IOCTL_GET_IDLUN:
1241 case SCSI_IOCTL_GET_BUS_NUMBER:
1242 default:
1243 return -ENOTTY;
1247 static void cciss_check_queues(ctlr_info_t *h)
1249 int start_queue = h->next_to_run;
1250 int i;
1252 /* check to see if we have maxed out the number of commands that can
1253 * be placed on the queue. If so then exit. We do this check here
1254 * in case the interrupt we serviced was from an ioctl and did not
1255 * free any new commands.
1257 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1258 return;
1260 /* We have room on the queue for more commands. Now we need to queue
1261 * them up. We will also keep track of the next queue to run so
1262 * that every queue gets a chance to be started first.
1264 for (i = 0; i < h->highest_lun + 1; i++) {
1265 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1266 /* make sure the disk has been added and the drive is real
1267 * because this can be called from the middle of init_one.
1269 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1270 continue;
1271 blk_start_queue(h->gendisk[curr_queue]->queue);
1273 /* check to see if we have maxed out the number of commands
1274 * that can be placed on the queue.
1276 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1277 if (curr_queue == start_queue) {
1278 h->next_to_run =
1279 (start_queue + 1) % (h->highest_lun + 1);
1280 break;
1281 } else {
1282 h->next_to_run = curr_queue;
1283 break;
1285 } else {
1286 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1291 static void cciss_softirq_done(struct request *rq)
1293 CommandList_struct *cmd = rq->completion_data;
1294 ctlr_info_t *h = hba[cmd->ctlr];
1295 unsigned long flags;
1296 u64bit temp64;
1297 int i, ddir;
1299 if (cmd->Request.Type.Direction == XFER_READ)
1300 ddir = PCI_DMA_FROMDEVICE;
1301 else
1302 ddir = PCI_DMA_TODEVICE;
1304 /* command did not need to be retried */
1305 /* unmap the DMA mapping for all the scatter gather elements */
1306 for (i = 0; i < cmd->Header.SGList; i++) {
1307 temp64.val32.lower = cmd->SG[i].Addr.lower;
1308 temp64.val32.upper = cmd->SG[i].Addr.upper;
1309 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1312 #ifdef CCISS_DEBUG
1313 printk("Done with %p\n", rq);
1314 #endif /* CCISS_DEBUG */
1316 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1317 BUG();
1319 spin_lock_irqsave(&h->lock, flags);
1320 cmd_free(h, cmd, 1);
1321 cciss_check_queues(h);
1322 spin_unlock_irqrestore(&h->lock, flags);
1325 /* This function will check the usage_count of the drive to be updated/added.
1326 * If the usage_count is zero then the drive information will be updated and
1327 * the disk will be re-registered with the kernel. If not then it will be
1328 * left alone for the next reboot. The exception to this is disk 0 which
1329 * will always be left registered with the kernel since it is also the
1330 * controller node. Any changes to disk 0 will show up on the next
1331 * reboot.
1333 static void cciss_update_drive_info(int ctlr, int drv_index)
1335 ctlr_info_t *h = hba[ctlr];
1336 struct gendisk *disk;
1337 InquiryData_struct *inq_buff = NULL;
1338 unsigned int block_size;
1339 sector_t total_size;
1340 unsigned long flags = 0;
1341 int ret = 0;
1343 /* if the disk already exists then deregister it before proceeding */
1344 if (h->drv[drv_index].raid_level != -1) {
1345 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1346 h->drv[drv_index].busy_configuring = 1;
1347 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1349 /* deregister_disk sets h->drv[drv_index].queue = NULL */
1350 /* which keeps the interrupt handler from starting */
1351 /* the queue. */
1352 ret = deregister_disk(h->gendisk[drv_index],
1353 &h->drv[drv_index], 0);
1354 h->drv[drv_index].busy_configuring = 0;
1357 /* If the disk is in use return */
1358 if (ret)
1359 return;
1361 /* Get information about the disk and modify the driver structure */
1362 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1363 if (inq_buff == NULL)
1364 goto mem_msg;
1366 /* testing to see if 16-byte CDBs are already being used */
1367 if (h->cciss_read == CCISS_READ_16) {
1368 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1369 &total_size, &block_size);
1370 goto geo_inq;
1373 cciss_read_capacity(ctlr, drv_index, 1,
1374 &total_size, &block_size);
1376 /* if read_capacity returns all F's this volume is >2TB in size */
1377 /* so we switch to 16-byte CDB's for all read/write ops */
1378 if (total_size == 0xFFFFFFFFULL) {
1379 cciss_read_capacity_16(ctlr, drv_index, 1,
1380 &total_size, &block_size);
1381 h->cciss_read = CCISS_READ_16;
1382 h->cciss_write = CCISS_WRITE_16;
1383 } else {
1384 h->cciss_read = CCISS_READ_10;
1385 h->cciss_write = CCISS_WRITE_10;
1387 geo_inq:
1388 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1389 inq_buff, &h->drv[drv_index]);
1391 ++h->num_luns;
1392 disk = h->gendisk[drv_index];
1393 set_capacity(disk, h->drv[drv_index].nr_blocks);
1395 /* if it's the controller it's already added */
1396 if (drv_index) {
1397 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1398 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1399 disk->major = h->major;
1400 disk->first_minor = drv_index << NWD_SHIFT;
1401 disk->fops = &cciss_fops;
1402 disk->private_data = &h->drv[drv_index];
1404 /* Set up queue information */
1405 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1407 /* This is a hardware imposed limit. */
1408 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1410 /* This is a limit in the driver and could be eliminated. */
1411 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1413 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1415 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1417 disk->queue->queuedata = hba[ctlr];
1419 blk_queue_hardsect_size(disk->queue,
1420 hba[ctlr]->drv[drv_index].block_size);
1422 /* Make sure all queue data is written out before */
1423 /* setting h->drv[drv_index].queue, as setting this */
1424 /* allows the interrupt handler to start the queue */
1425 wmb();
1426 h->drv[drv_index].queue = disk->queue;
1427 add_disk(disk);
1430 freeret:
1431 kfree(inq_buff);
1432 return;
1433 mem_msg:
1434 printk(KERN_ERR "cciss: out of memory\n");
1435 goto freeret;
1438 /* This function will find the first index of the controllers drive array
1439 * that has a -1 for the raid_level and will return that index. This is
1440 * where new drives will be added. If the index to be returned is greater
1441 * than the highest_lun index for the controller then highest_lun is set
1442 * to this new index. If there are no available indexes then -1 is returned.
1444 static int cciss_find_free_drive_index(int ctlr)
1446 int i;
1448 for (i = 0; i < CISS_MAX_LUN; i++) {
1449 if (hba[ctlr]->drv[i].raid_level == -1) {
1450 if (i > hba[ctlr]->highest_lun)
1451 hba[ctlr]->highest_lun = i;
1452 return i;
1455 return -1;
1458 /* This function will add and remove logical drives from the Logical
1459 * drive array of the controller and maintain persistency of ordering
1460 * so that mount points are preserved until the next reboot. This allows
1461 * for the removal of logical drives in the middle of the drive array
1462 * without a re-ordering of those drives.
1463 * INPUT
1464 * h = The controller to perform the operations on
1465 * del_disk = The disk to remove if specified. If the value given
1466 * is NULL then no disk is removed.
1468 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1470 int ctlr = h->ctlr;
1471 int num_luns;
1472 ReportLunData_struct *ld_buff = NULL;
1473 drive_info_struct *drv = NULL;
1474 int return_code;
1475 int listlength = 0;
1476 int i;
1477 int drv_found;
1478 int drv_index = 0;
1479 __u32 lunid = 0;
1480 unsigned long flags;
1482 /* Set busy_configuring flag for this operation */
1483 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1484 if (h->busy_configuring) {
1485 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1486 return -EBUSY;
1488 h->busy_configuring = 1;
1490 /* if del_disk is NULL then we are being called to add a new disk
1491 * and update the logical drive table. If it is not NULL then
1492 * we will check if the disk is in use or not.
1494 if (del_disk != NULL) {
1495 drv = get_drv(del_disk);
1496 drv->busy_configuring = 1;
1497 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1498 return_code = deregister_disk(del_disk, drv, 1);
1499 drv->busy_configuring = 0;
1500 h->busy_configuring = 0;
1501 return return_code;
1502 } else {
1503 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1504 if (!capable(CAP_SYS_RAWIO))
1505 return -EPERM;
1507 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1508 if (ld_buff == NULL)
1509 goto mem_msg;
1511 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1512 sizeof(ReportLunData_struct), 0,
1513 0, 0, TYPE_CMD);
1515 if (return_code == IO_OK) {
1516 listlength =
1517 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1518 } else { /* reading number of logical volumes failed */
1519 printk(KERN_WARNING "cciss: report logical volume"
1520 " command failed\n");
1521 listlength = 0;
1522 goto freeret;
1525 num_luns = listlength / 8; /* 8 bytes per entry */
1526 if (num_luns > CISS_MAX_LUN) {
1527 num_luns = CISS_MAX_LUN;
1528 printk(KERN_WARNING "cciss: more luns configured"
1529 " on controller than can be handled by"
1530 " this driver.\n");
1533 /* Compare controller drive array to drivers drive array.
1534 * Check for updates in the drive information and any new drives
1535 * on the controller.
1537 for (i = 0; i < num_luns; i++) {
1538 int j;
1540 drv_found = 0;
1542 lunid = (0xff &
1543 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1544 lunid |= (0xff &
1545 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1546 lunid |= (0xff &
1547 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1548 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1550 /* Find if the LUN is already in the drive array
1551 * of the controller. If so then update its info
1552 * if not is use. If it does not exist then find
1553 * the first free index and add it.
1555 for (j = 0; j <= h->highest_lun; j++) {
1556 if (h->drv[j].LunID == lunid) {
1557 drv_index = j;
1558 drv_found = 1;
1562 /* check if the drive was found already in the array */
1563 if (!drv_found) {
1564 drv_index = cciss_find_free_drive_index(ctlr);
1565 if (drv_index == -1)
1566 goto freeret;
1568 /*Check if the gendisk needs to be allocated */
1569 if (!h->gendisk[drv_index]){
1570 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1571 if (!h->gendisk[drv_index]){
1572 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1573 goto mem_msg;
1577 h->drv[drv_index].LunID = lunid;
1578 cciss_update_drive_info(ctlr, drv_index);
1579 } /* end for */
1580 } /* end else */
1582 freeret:
1583 kfree(ld_buff);
1584 h->busy_configuring = 0;
1585 /* We return -1 here to tell the ACU that we have registered/updated
1586 * all of the drives that we can and to keep it from calling us
1587 * additional times.
1589 return -1;
1590 mem_msg:
1591 printk(KERN_ERR "cciss: out of memory\n");
1592 goto freeret;
1595 /* This function will deregister the disk and it's queue from the
1596 * kernel. It must be called with the controller lock held and the
1597 * drv structures busy_configuring flag set. It's parameters are:
1599 * disk = This is the disk to be deregistered
1600 * drv = This is the drive_info_struct associated with the disk to be
1601 * deregistered. It contains information about the disk used
1602 * by the driver.
1603 * clear_all = This flag determines whether or not the disk information
1604 * is going to be completely cleared out and the highest_lun
1605 * reset. Sometimes we want to clear out information about
1606 * the disk in preparation for re-adding it. In this case
1607 * the highest_lun should be left unchanged and the LunID
1608 * should not be cleared.
1610 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1611 int clear_all)
1613 int i;
1614 ctlr_info_t *h = get_host(disk);
1616 if (!capable(CAP_SYS_RAWIO))
1617 return -EPERM;
1619 /* make sure logical volume is NOT is use */
1620 if (clear_all || (h->gendisk[0] == disk)) {
1621 if (drv->usage_count > 1)
1622 return -EBUSY;
1623 } else if (drv->usage_count > 0)
1624 return -EBUSY;
1626 /* invalidate the devices and deregister the disk. If it is disk
1627 * zero do not deregister it but just zero out it's values. This
1628 * allows us to delete disk zero but keep the controller registered.
1630 if (h->gendisk[0] != disk) {
1631 struct request_queue *q = disk->queue;
1632 if (disk->flags & GENHD_FL_UP)
1633 del_gendisk(disk);
1634 if (q) {
1635 blk_cleanup_queue(q);
1636 /* Set drv->queue to NULL so that we do not try
1637 * to call blk_start_queue on this queue in the
1638 * interrupt handler
1640 drv->queue = NULL;
1642 /* If clear_all is set then we are deleting the logical
1643 * drive, not just refreshing its info. For drives
1644 * other than disk 0 we will call put_disk. We do not
1645 * do this for disk 0 as we need it to be able to
1646 * configure the controller.
1648 if (clear_all){
1649 /* This isn't pretty, but we need to find the
1650 * disk in our array and NULL our the pointer.
1651 * This is so that we will call alloc_disk if
1652 * this index is used again later.
1654 for (i=0; i < CISS_MAX_LUN; i++){
1655 if(h->gendisk[i] == disk){
1656 h->gendisk[i] = NULL;
1657 break;
1660 put_disk(disk);
1662 } else {
1663 set_capacity(disk, 0);
1666 --h->num_luns;
1667 /* zero out the disk size info */
1668 drv->nr_blocks = 0;
1669 drv->block_size = 0;
1670 drv->heads = 0;
1671 drv->sectors = 0;
1672 drv->cylinders = 0;
1673 drv->raid_level = -1; /* This can be used as a flag variable to
1674 * indicate that this element of the drive
1675 * array is free.
1678 if (clear_all) {
1679 /* check to see if it was the last disk */
1680 if (drv == h->drv + h->highest_lun) {
1681 /* if so, find the new hightest lun */
1682 int i, newhighest = -1;
1683 for (i = 0; i < h->highest_lun; i++) {
1684 /* if the disk has size > 0, it is available */
1685 if (h->drv[i].heads)
1686 newhighest = i;
1688 h->highest_lun = newhighest;
1691 drv->LunID = 0;
1693 return 0;
1696 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1697 1: address logical volume log_unit,
1698 2: periph device address is scsi3addr */
1699 unsigned int log_unit, __u8 page_code,
1700 unsigned char *scsi3addr, int cmd_type)
1702 ctlr_info_t *h = hba[ctlr];
1703 u64bit buff_dma_handle;
1704 int status = IO_OK;
1706 c->cmd_type = CMD_IOCTL_PEND;
1707 c->Header.ReplyQueue = 0;
1708 if (buff != NULL) {
1709 c->Header.SGList = 1;
1710 c->Header.SGTotal = 1;
1711 } else {
1712 c->Header.SGList = 0;
1713 c->Header.SGTotal = 0;
1715 c->Header.Tag.lower = c->busaddr;
1717 c->Request.Type.Type = cmd_type;
1718 if (cmd_type == TYPE_CMD) {
1719 switch (cmd) {
1720 case CISS_INQUIRY:
1721 /* If the logical unit number is 0 then, this is going
1722 to controller so It's a physical command
1723 mode = 0 target = 0. So we have nothing to write.
1724 otherwise, if use_unit_num == 1,
1725 mode = 1(volume set addressing) target = LUNID
1726 otherwise, if use_unit_num == 2,
1727 mode = 0(periph dev addr) target = scsi3addr */
1728 if (use_unit_num == 1) {
1729 c->Header.LUN.LogDev.VolId =
1730 h->drv[log_unit].LunID;
1731 c->Header.LUN.LogDev.Mode = 1;
1732 } else if (use_unit_num == 2) {
1733 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1735 c->Header.LUN.LogDev.Mode = 0;
1737 /* are we trying to read a vital product page */
1738 if (page_code != 0) {
1739 c->Request.CDB[1] = 0x01;
1740 c->Request.CDB[2] = page_code;
1742 c->Request.CDBLen = 6;
1743 c->Request.Type.Attribute = ATTR_SIMPLE;
1744 c->Request.Type.Direction = XFER_READ;
1745 c->Request.Timeout = 0;
1746 c->Request.CDB[0] = CISS_INQUIRY;
1747 c->Request.CDB[4] = size & 0xFF;
1748 break;
1749 case CISS_REPORT_LOG:
1750 case CISS_REPORT_PHYS:
1751 /* Talking to controller so It's a physical command
1752 mode = 00 target = 0. Nothing to write.
1754 c->Request.CDBLen = 12;
1755 c->Request.Type.Attribute = ATTR_SIMPLE;
1756 c->Request.Type.Direction = XFER_READ;
1757 c->Request.Timeout = 0;
1758 c->Request.CDB[0] = cmd;
1759 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1760 c->Request.CDB[7] = (size >> 16) & 0xFF;
1761 c->Request.CDB[8] = (size >> 8) & 0xFF;
1762 c->Request.CDB[9] = size & 0xFF;
1763 break;
1765 case CCISS_READ_CAPACITY:
1766 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1767 c->Header.LUN.LogDev.Mode = 1;
1768 c->Request.CDBLen = 10;
1769 c->Request.Type.Attribute = ATTR_SIMPLE;
1770 c->Request.Type.Direction = XFER_READ;
1771 c->Request.Timeout = 0;
1772 c->Request.CDB[0] = cmd;
1773 break;
1774 case CCISS_READ_CAPACITY_16:
1775 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1776 c->Header.LUN.LogDev.Mode = 1;
1777 c->Request.CDBLen = 16;
1778 c->Request.Type.Attribute = ATTR_SIMPLE;
1779 c->Request.Type.Direction = XFER_READ;
1780 c->Request.Timeout = 0;
1781 c->Request.CDB[0] = cmd;
1782 c->Request.CDB[1] = 0x10;
1783 c->Request.CDB[10] = (size >> 24) & 0xFF;
1784 c->Request.CDB[11] = (size >> 16) & 0xFF;
1785 c->Request.CDB[12] = (size >> 8) & 0xFF;
1786 c->Request.CDB[13] = size & 0xFF;
1787 c->Request.Timeout = 0;
1788 c->Request.CDB[0] = cmd;
1789 break;
1790 case CCISS_CACHE_FLUSH:
1791 c->Request.CDBLen = 12;
1792 c->Request.Type.Attribute = ATTR_SIMPLE;
1793 c->Request.Type.Direction = XFER_WRITE;
1794 c->Request.Timeout = 0;
1795 c->Request.CDB[0] = BMIC_WRITE;
1796 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1797 break;
1798 default:
1799 printk(KERN_WARNING
1800 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1801 return IO_ERROR;
1803 } else if (cmd_type == TYPE_MSG) {
1804 switch (cmd) {
1805 case 0: /* ABORT message */
1806 c->Request.CDBLen = 12;
1807 c->Request.Type.Attribute = ATTR_SIMPLE;
1808 c->Request.Type.Direction = XFER_WRITE;
1809 c->Request.Timeout = 0;
1810 c->Request.CDB[0] = cmd; /* abort */
1811 c->Request.CDB[1] = 0; /* abort a command */
1812 /* buff contains the tag of the command to abort */
1813 memcpy(&c->Request.CDB[4], buff, 8);
1814 break;
1815 case 1: /* RESET message */
1816 c->Request.CDBLen = 12;
1817 c->Request.Type.Attribute = ATTR_SIMPLE;
1818 c->Request.Type.Direction = XFER_WRITE;
1819 c->Request.Timeout = 0;
1820 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1821 c->Request.CDB[0] = cmd; /* reset */
1822 c->Request.CDB[1] = 0x04; /* reset a LUN */
1823 break;
1824 case 3: /* No-Op message */
1825 c->Request.CDBLen = 1;
1826 c->Request.Type.Attribute = ATTR_SIMPLE;
1827 c->Request.Type.Direction = XFER_WRITE;
1828 c->Request.Timeout = 0;
1829 c->Request.CDB[0] = cmd;
1830 break;
1831 default:
1832 printk(KERN_WARNING
1833 "cciss%d: unknown message type %d\n", ctlr, cmd);
1834 return IO_ERROR;
1836 } else {
1837 printk(KERN_WARNING
1838 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1839 return IO_ERROR;
1841 /* Fill in the scatter gather information */
1842 if (size > 0) {
1843 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1844 buff, size,
1845 PCI_DMA_BIDIRECTIONAL);
1846 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1847 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1848 c->SG[0].Len = size;
1849 c->SG[0].Ext = 0; /* we are not chaining */
1851 return status;
1854 static int sendcmd_withirq(__u8 cmd,
1855 int ctlr,
1856 void *buff,
1857 size_t size,
1858 unsigned int use_unit_num,
1859 unsigned int log_unit, __u8 page_code, int cmd_type)
1861 ctlr_info_t *h = hba[ctlr];
1862 CommandList_struct *c;
1863 u64bit buff_dma_handle;
1864 unsigned long flags;
1865 int return_status;
1866 DECLARE_COMPLETION_ONSTACK(wait);
1868 if ((c = cmd_alloc(h, 0)) == NULL)
1869 return -ENOMEM;
1870 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1871 log_unit, page_code, NULL, cmd_type);
1872 if (return_status != IO_OK) {
1873 cmd_free(h, c, 0);
1874 return return_status;
1876 resend_cmd2:
1877 c->waiting = &wait;
1879 /* Put the request on the tail of the queue and send it */
1880 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1881 addQ(&h->reqQ, c);
1882 h->Qdepth++;
1883 start_io(h);
1884 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1886 wait_for_completion(&wait);
1888 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1889 switch (c->err_info->CommandStatus) {
1890 case CMD_TARGET_STATUS:
1891 printk(KERN_WARNING "cciss: cmd %p has "
1892 " completed with errors\n", c);
1893 if (c->err_info->ScsiStatus) {
1894 printk(KERN_WARNING "cciss: cmd %p "
1895 "has SCSI Status = %x\n",
1896 c, c->err_info->ScsiStatus);
1899 break;
1900 case CMD_DATA_UNDERRUN:
1901 case CMD_DATA_OVERRUN:
1902 /* expected for inquire and report lun commands */
1903 break;
1904 case CMD_INVALID:
1905 printk(KERN_WARNING "cciss: Cmd %p is "
1906 "reported invalid\n", c);
1907 return_status = IO_ERROR;
1908 break;
1909 case CMD_PROTOCOL_ERR:
1910 printk(KERN_WARNING "cciss: cmd %p has "
1911 "protocol error \n", c);
1912 return_status = IO_ERROR;
1913 break;
1914 case CMD_HARDWARE_ERR:
1915 printk(KERN_WARNING "cciss: cmd %p had "
1916 " hardware error\n", c);
1917 return_status = IO_ERROR;
1918 break;
1919 case CMD_CONNECTION_LOST:
1920 printk(KERN_WARNING "cciss: cmd %p had "
1921 "connection lost\n", c);
1922 return_status = IO_ERROR;
1923 break;
1924 case CMD_ABORTED:
1925 printk(KERN_WARNING "cciss: cmd %p was "
1926 "aborted\n", c);
1927 return_status = IO_ERROR;
1928 break;
1929 case CMD_ABORT_FAILED:
1930 printk(KERN_WARNING "cciss: cmd %p reports "
1931 "abort failed\n", c);
1932 return_status = IO_ERROR;
1933 break;
1934 case CMD_UNSOLICITED_ABORT:
1935 printk(KERN_WARNING
1936 "cciss%d: unsolicited abort %p\n", ctlr, c);
1937 if (c->retry_count < MAX_CMD_RETRIES) {
1938 printk(KERN_WARNING
1939 "cciss%d: retrying %p\n", ctlr, c);
1940 c->retry_count++;
1941 /* erase the old error information */
1942 memset(c->err_info, 0,
1943 sizeof(ErrorInfo_struct));
1944 return_status = IO_OK;
1945 INIT_COMPLETION(wait);
1946 goto resend_cmd2;
1948 return_status = IO_ERROR;
1949 break;
1950 default:
1951 printk(KERN_WARNING "cciss: cmd %p returned "
1952 "unknown status %x\n", c,
1953 c->err_info->CommandStatus);
1954 return_status = IO_ERROR;
1957 /* unlock the buffers from DMA */
1958 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1959 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1960 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1961 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1962 cmd_free(h, c, 0);
1963 return return_status;
1966 static void cciss_geometry_inquiry(int ctlr, int logvol,
1967 int withirq, sector_t total_size,
1968 unsigned int block_size,
1969 InquiryData_struct *inq_buff,
1970 drive_info_struct *drv)
1972 int return_code;
1973 unsigned long t;
1975 memset(inq_buff, 0, sizeof(InquiryData_struct));
1976 if (withirq)
1977 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1978 inq_buff, sizeof(*inq_buff), 1,
1979 logvol, 0xC1, TYPE_CMD);
1980 else
1981 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1982 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1983 TYPE_CMD);
1984 if (return_code == IO_OK) {
1985 if (inq_buff->data_byte[8] == 0xFF) {
1986 printk(KERN_WARNING
1987 "cciss: reading geometry failed, volume "
1988 "does not support reading geometry\n");
1989 drv->heads = 255;
1990 drv->sectors = 32; // Sectors per track
1991 drv->cylinders = total_size + 1;
1992 drv->raid_level = RAID_UNKNOWN;
1993 } else {
1994 drv->heads = inq_buff->data_byte[6];
1995 drv->sectors = inq_buff->data_byte[7];
1996 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1997 drv->cylinders += inq_buff->data_byte[5];
1998 drv->raid_level = inq_buff->data_byte[8];
2000 drv->block_size = block_size;
2001 drv->nr_blocks = total_size + 1;
2002 t = drv->heads * drv->sectors;
2003 if (t > 1) {
2004 sector_t real_size = total_size + 1;
2005 unsigned long rem = sector_div(real_size, t);
2006 if (rem)
2007 real_size++;
2008 drv->cylinders = real_size;
2010 } else { /* Get geometry failed */
2011 printk(KERN_WARNING "cciss: reading geometry failed\n");
2013 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2014 drv->heads, drv->sectors, drv->cylinders);
2017 static void
2018 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2019 unsigned int *block_size)
2021 ReadCapdata_struct *buf;
2022 int return_code;
2024 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2025 if (!buf) {
2026 printk(KERN_WARNING "cciss: out of memory\n");
2027 return;
2030 if (withirq)
2031 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2032 ctlr, buf, sizeof(ReadCapdata_struct),
2033 1, logvol, 0, TYPE_CMD);
2034 else
2035 return_code = sendcmd(CCISS_READ_CAPACITY,
2036 ctlr, buf, sizeof(ReadCapdata_struct),
2037 1, logvol, 0, NULL, TYPE_CMD);
2038 if (return_code == IO_OK) {
2039 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2040 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2041 } else { /* read capacity command failed */
2042 printk(KERN_WARNING "cciss: read capacity failed\n");
2043 *total_size = 0;
2044 *block_size = BLOCK_SIZE;
2046 if (*total_size != 0)
2047 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2048 (unsigned long long)*total_size+1, *block_size);
2049 kfree(buf);
2052 static void
2053 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2055 ReadCapdata_struct_16 *buf;
2056 int return_code;
2058 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2059 if (!buf) {
2060 printk(KERN_WARNING "cciss: out of memory\n");
2061 return;
2064 if (withirq) {
2065 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2066 ctlr, buf, sizeof(ReadCapdata_struct_16),
2067 1, logvol, 0, TYPE_CMD);
2069 else {
2070 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2071 ctlr, buf, sizeof(ReadCapdata_struct_16),
2072 1, logvol, 0, NULL, TYPE_CMD);
2074 if (return_code == IO_OK) {
2075 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2076 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2077 } else { /* read capacity command failed */
2078 printk(KERN_WARNING "cciss: read capacity failed\n");
2079 *total_size = 0;
2080 *block_size = BLOCK_SIZE;
2082 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2083 (unsigned long long)*total_size+1, *block_size);
2084 kfree(buf);
2087 static int cciss_revalidate(struct gendisk *disk)
2089 ctlr_info_t *h = get_host(disk);
2090 drive_info_struct *drv = get_drv(disk);
2091 int logvol;
2092 int FOUND = 0;
2093 unsigned int block_size;
2094 sector_t total_size;
2095 InquiryData_struct *inq_buff = NULL;
2097 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2098 if (h->drv[logvol].LunID == drv->LunID) {
2099 FOUND = 1;
2100 break;
2104 if (!FOUND)
2105 return 1;
2107 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2108 if (inq_buff == NULL) {
2109 printk(KERN_WARNING "cciss: out of memory\n");
2110 return 1;
2112 if (h->cciss_read == CCISS_READ_10) {
2113 cciss_read_capacity(h->ctlr, logvol, 1,
2114 &total_size, &block_size);
2115 } else {
2116 cciss_read_capacity_16(h->ctlr, logvol, 1,
2117 &total_size, &block_size);
2119 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2120 inq_buff, drv);
2122 blk_queue_hardsect_size(drv->queue, drv->block_size);
2123 set_capacity(disk, drv->nr_blocks);
2125 kfree(inq_buff);
2126 return 0;
2130 * Wait polling for a command to complete.
2131 * The memory mapped FIFO is polled for the completion.
2132 * Used only at init time, interrupts from the HBA are disabled.
2134 static unsigned long pollcomplete(int ctlr)
2136 unsigned long done;
2137 int i;
2139 /* Wait (up to 20 seconds) for a command to complete */
2141 for (i = 20 * HZ; i > 0; i--) {
2142 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2143 if (done == FIFO_EMPTY)
2144 schedule_timeout_uninterruptible(1);
2145 else
2146 return done;
2148 /* Invalid address to tell caller we ran out of time */
2149 return 1;
2152 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2154 /* We get in here if sendcmd() is polling for completions
2155 and gets some command back that it wasn't expecting --
2156 something other than that which it just sent down.
2157 Ordinarily, that shouldn't happen, but it can happen when
2158 the scsi tape stuff gets into error handling mode, and
2159 starts using sendcmd() to try to abort commands and
2160 reset tape drives. In that case, sendcmd may pick up
2161 completions of commands that were sent to logical drives
2162 through the block i/o system, or cciss ioctls completing, etc.
2163 In that case, we need to save those completions for later
2164 processing by the interrupt handler.
2167 #ifdef CONFIG_CISS_SCSI_TAPE
2168 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2170 /* If it's not the scsi tape stuff doing error handling, (abort */
2171 /* or reset) then we don't expect anything weird. */
2172 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2173 #endif
2174 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2175 "Invalid command list address returned! (%lx)\n",
2176 ctlr, complete);
2177 /* not much we can do. */
2178 #ifdef CONFIG_CISS_SCSI_TAPE
2179 return 1;
2182 /* We've sent down an abort or reset, but something else
2183 has completed */
2184 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2185 /* Uh oh. No room to save it for later... */
2186 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2187 "reject list overflow, command lost!\n", ctlr);
2188 return 1;
2190 /* Save it for later */
2191 srl->complete[srl->ncompletions] = complete;
2192 srl->ncompletions++;
2193 #endif
2194 return 0;
2198 * Send a command to the controller, and wait for it to complete.
2199 * Only used at init time.
2201 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2202 1: address logical volume log_unit,
2203 2: periph device address is scsi3addr */
2204 unsigned int log_unit,
2205 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2207 CommandList_struct *c;
2208 int i;
2209 unsigned long complete;
2210 ctlr_info_t *info_p = hba[ctlr];
2211 u64bit buff_dma_handle;
2212 int status, done = 0;
2214 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2215 printk(KERN_WARNING "cciss: unable to get memory");
2216 return IO_ERROR;
2218 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2219 log_unit, page_code, scsi3addr, cmd_type);
2220 if (status != IO_OK) {
2221 cmd_free(info_p, c, 1);
2222 return status;
2224 resend_cmd1:
2226 * Disable interrupt
2228 #ifdef CCISS_DEBUG
2229 printk(KERN_DEBUG "cciss: turning intr off\n");
2230 #endif /* CCISS_DEBUG */
2231 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2233 /* Make sure there is room in the command FIFO */
2234 /* Actually it should be completely empty at this time */
2235 /* unless we are in here doing error handling for the scsi */
2236 /* tape side of the driver. */
2237 for (i = 200000; i > 0; i--) {
2238 /* if fifo isn't full go */
2239 if (!(info_p->access.fifo_full(info_p))) {
2241 break;
2243 udelay(10);
2244 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2245 " waiting!\n", ctlr);
2248 * Send the cmd
2250 info_p->access.submit_command(info_p, c);
2251 done = 0;
2252 do {
2253 complete = pollcomplete(ctlr);
2255 #ifdef CCISS_DEBUG
2256 printk(KERN_DEBUG "cciss: command completed\n");
2257 #endif /* CCISS_DEBUG */
2259 if (complete == 1) {
2260 printk(KERN_WARNING
2261 "cciss cciss%d: SendCmd Timeout out, "
2262 "No command list address returned!\n", ctlr);
2263 status = IO_ERROR;
2264 done = 1;
2265 break;
2268 /* This will need to change for direct lookup completions */
2269 if ((complete & CISS_ERROR_BIT)
2270 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2271 /* if data overrun or underun on Report command
2272 ignore it
2274 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2275 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2276 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2277 ((c->err_info->CommandStatus ==
2278 CMD_DATA_OVERRUN) ||
2279 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2280 )) {
2281 complete = c->busaddr;
2282 } else {
2283 if (c->err_info->CommandStatus ==
2284 CMD_UNSOLICITED_ABORT) {
2285 printk(KERN_WARNING "cciss%d: "
2286 "unsolicited abort %p\n",
2287 ctlr, c);
2288 if (c->retry_count < MAX_CMD_RETRIES) {
2289 printk(KERN_WARNING
2290 "cciss%d: retrying %p\n",
2291 ctlr, c);
2292 c->retry_count++;
2293 /* erase the old error */
2294 /* information */
2295 memset(c->err_info, 0,
2296 sizeof
2297 (ErrorInfo_struct));
2298 goto resend_cmd1;
2299 } else {
2300 printk(KERN_WARNING
2301 "cciss%d: retried %p too "
2302 "many times\n", ctlr, c);
2303 status = IO_ERROR;
2304 goto cleanup1;
2306 } else if (c->err_info->CommandStatus ==
2307 CMD_UNABORTABLE) {
2308 printk(KERN_WARNING
2309 "cciss%d: command could not be aborted.\n",
2310 ctlr);
2311 status = IO_ERROR;
2312 goto cleanup1;
2314 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2315 " Error %x \n", ctlr,
2316 c->err_info->CommandStatus);
2317 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2318 " offensive info\n"
2319 " size %x\n num %x value %x\n",
2320 ctlr,
2321 c->err_info->MoreErrInfo.Invalid_Cmd.
2322 offense_size,
2323 c->err_info->MoreErrInfo.Invalid_Cmd.
2324 offense_num,
2325 c->err_info->MoreErrInfo.Invalid_Cmd.
2326 offense_value);
2327 status = IO_ERROR;
2328 goto cleanup1;
2331 /* This will need changing for direct lookup completions */
2332 if (complete != c->busaddr) {
2333 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2334 BUG(); /* we are pretty much hosed if we get here. */
2336 continue;
2337 } else
2338 done = 1;
2339 } while (!done);
2341 cleanup1:
2342 /* unlock the data buffer from DMA */
2343 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2344 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2345 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2346 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2347 #ifdef CONFIG_CISS_SCSI_TAPE
2348 /* if we saved some commands for later, process them now. */
2349 if (info_p->scsi_rejects.ncompletions > 0)
2350 do_cciss_intr(0, info_p);
2351 #endif
2352 cmd_free(info_p, c, 1);
2353 return status;
2357 * Map (physical) PCI mem into (virtual) kernel space
2359 static void __iomem *remap_pci_mem(ulong base, ulong size)
2361 ulong page_base = ((ulong) base) & PAGE_MASK;
2362 ulong page_offs = ((ulong) base) - page_base;
2363 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2365 return page_remapped ? (page_remapped + page_offs) : NULL;
2369 * Takes jobs of the Q and sends them to the hardware, then puts it on
2370 * the Q to wait for completion.
2372 static void start_io(ctlr_info_t *h)
2374 CommandList_struct *c;
2376 while ((c = h->reqQ) != NULL) {
2377 /* can't do anything if fifo is full */
2378 if ((h->access.fifo_full(h))) {
2379 printk(KERN_WARNING "cciss: fifo full\n");
2380 break;
2383 /* Get the first entry from the Request Q */
2384 removeQ(&(h->reqQ), c);
2385 h->Qdepth--;
2387 /* Tell the controller execute command */
2388 h->access.submit_command(h, c);
2390 /* Put job onto the completed Q */
2391 addQ(&(h->cmpQ), c);
2395 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2396 /* Zeros out the error record and then resends the command back */
2397 /* to the controller */
2398 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2400 /* erase the old error information */
2401 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2403 /* add it to software queue and then send it to the controller */
2404 addQ(&(h->reqQ), c);
2405 h->Qdepth++;
2406 if (h->Qdepth > h->maxQsinceinit)
2407 h->maxQsinceinit = h->Qdepth;
2409 start_io(h);
2412 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2413 unsigned int msg_byte, unsigned int host_byte,
2414 unsigned int driver_byte)
2416 /* inverse of macros in scsi.h */
2417 return (scsi_status_byte & 0xff) |
2418 ((msg_byte & 0xff) << 8) |
2419 ((host_byte & 0xff) << 16) |
2420 ((driver_byte & 0xff) << 24);
2423 static inline int evaluate_target_status(CommandList_struct *cmd)
2425 unsigned char sense_key;
2426 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2427 int error_value;
2429 /* If we get in here, it means we got "target status", that is, scsi status */
2430 status_byte = cmd->err_info->ScsiStatus;
2431 driver_byte = DRIVER_OK;
2432 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2434 if (blk_pc_request(cmd->rq))
2435 host_byte = DID_PASSTHROUGH;
2436 else
2437 host_byte = DID_OK;
2439 error_value = make_status_bytes(status_byte, msg_byte,
2440 host_byte, driver_byte);
2442 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2443 if (!blk_pc_request(cmd->rq))
2444 printk(KERN_WARNING "cciss: cmd %p "
2445 "has SCSI Status 0x%x\n",
2446 cmd, cmd->err_info->ScsiStatus);
2447 return error_value;
2450 /* check the sense key */
2451 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2452 /* no status or recovered error */
2453 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2454 error_value = 0;
2456 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2457 if (error_value != 0)
2458 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2459 " sense key = 0x%x\n", cmd, sense_key);
2460 return error_value;
2463 /* SG_IO or similar, copy sense data back */
2464 if (cmd->rq->sense) {
2465 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2466 cmd->rq->sense_len = cmd->err_info->SenseLen;
2467 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2468 cmd->rq->sense_len);
2469 } else
2470 cmd->rq->sense_len = 0;
2472 return error_value;
2475 /* checks the status of the job and calls complete buffers to mark all
2476 * buffers for the completed job. Note that this function does not need
2477 * to hold the hba/queue lock.
2479 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2480 int timeout)
2482 int retry_cmd = 0;
2483 struct request *rq = cmd->rq;
2485 rq->errors = 0;
2487 if (timeout)
2488 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2490 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2491 goto after_error_processing;
2493 switch (cmd->err_info->CommandStatus) {
2494 case CMD_TARGET_STATUS:
2495 rq->errors = evaluate_target_status(cmd);
2496 break;
2497 case CMD_DATA_UNDERRUN:
2498 if (blk_fs_request(cmd->rq)) {
2499 printk(KERN_WARNING "cciss: cmd %p has"
2500 " completed with data underrun "
2501 "reported\n", cmd);
2502 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2504 break;
2505 case CMD_DATA_OVERRUN:
2506 if (blk_fs_request(cmd->rq))
2507 printk(KERN_WARNING "cciss: cmd %p has"
2508 " completed with data overrun "
2509 "reported\n", cmd);
2510 break;
2511 case CMD_INVALID:
2512 printk(KERN_WARNING "cciss: cmd %p is "
2513 "reported invalid\n", cmd);
2514 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2515 cmd->err_info->CommandStatus, DRIVER_OK,
2516 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2517 break;
2518 case CMD_PROTOCOL_ERR:
2519 printk(KERN_WARNING "cciss: cmd %p has "
2520 "protocol error \n", cmd);
2521 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2522 cmd->err_info->CommandStatus, DRIVER_OK,
2523 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2524 break;
2525 case CMD_HARDWARE_ERR:
2526 printk(KERN_WARNING "cciss: cmd %p had "
2527 " hardware error\n", cmd);
2528 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2529 cmd->err_info->CommandStatus, DRIVER_OK,
2530 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2531 break;
2532 case CMD_CONNECTION_LOST:
2533 printk(KERN_WARNING "cciss: cmd %p had "
2534 "connection lost\n", cmd);
2535 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2536 cmd->err_info->CommandStatus, DRIVER_OK,
2537 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2538 break;
2539 case CMD_ABORTED:
2540 printk(KERN_WARNING "cciss: cmd %p was "
2541 "aborted\n", cmd);
2542 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2543 cmd->err_info->CommandStatus, DRIVER_OK,
2544 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2545 break;
2546 case CMD_ABORT_FAILED:
2547 printk(KERN_WARNING "cciss: cmd %p reports "
2548 "abort failed\n", cmd);
2549 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2550 cmd->err_info->CommandStatus, DRIVER_OK,
2551 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2552 break;
2553 case CMD_UNSOLICITED_ABORT:
2554 printk(KERN_WARNING "cciss%d: unsolicited "
2555 "abort %p\n", h->ctlr, cmd);
2556 if (cmd->retry_count < MAX_CMD_RETRIES) {
2557 retry_cmd = 1;
2558 printk(KERN_WARNING
2559 "cciss%d: retrying %p\n", h->ctlr, cmd);
2560 cmd->retry_count++;
2561 } else
2562 printk(KERN_WARNING
2563 "cciss%d: %p retried too "
2564 "many times\n", h->ctlr, cmd);
2565 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2566 cmd->err_info->CommandStatus, DRIVER_OK,
2567 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2568 break;
2569 case CMD_TIMEOUT:
2570 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2571 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2572 cmd->err_info->CommandStatus, DRIVER_OK,
2573 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2574 break;
2575 default:
2576 printk(KERN_WARNING "cciss: cmd %p returned "
2577 "unknown status %x\n", cmd,
2578 cmd->err_info->CommandStatus);
2579 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2580 cmd->err_info->CommandStatus, DRIVER_OK,
2581 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2584 after_error_processing:
2586 /* We need to return this command */
2587 if (retry_cmd) {
2588 resend_cciss_cmd(h, cmd);
2589 return;
2591 cmd->rq->completion_data = cmd;
2592 blk_complete_request(cmd->rq);
2596 * Get a request and submit it to the controller.
2598 static void do_cciss_request(struct request_queue *q)
2600 ctlr_info_t *h = q->queuedata;
2601 CommandList_struct *c;
2602 sector_t start_blk;
2603 int seg;
2604 struct request *creq;
2605 u64bit temp64;
2606 struct scatterlist tmp_sg[MAXSGENTRIES];
2607 drive_info_struct *drv;
2608 int i, dir;
2610 /* We call start_io here in case there is a command waiting on the
2611 * queue that has not been sent.
2613 if (blk_queue_plugged(q))
2614 goto startio;
2616 queue:
2617 creq = elv_next_request(q);
2618 if (!creq)
2619 goto startio;
2621 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2623 if ((c = cmd_alloc(h, 1)) == NULL)
2624 goto full;
2626 blkdev_dequeue_request(creq);
2628 spin_unlock_irq(q->queue_lock);
2630 c->cmd_type = CMD_RWREQ;
2631 c->rq = creq;
2633 /* fill in the request */
2634 drv = creq->rq_disk->private_data;
2635 c->Header.ReplyQueue = 0; // unused in simple mode
2636 /* got command from pool, so use the command block index instead */
2637 /* for direct lookups. */
2638 /* The first 2 bits are reserved for controller error reporting. */
2639 c->Header.Tag.lower = (c->cmdindex << 3);
2640 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2641 c->Header.LUN.LogDev.VolId = drv->LunID;
2642 c->Header.LUN.LogDev.Mode = 1;
2643 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2644 c->Request.Type.Type = TYPE_CMD; // It is a command.
2645 c->Request.Type.Attribute = ATTR_SIMPLE;
2646 c->Request.Type.Direction =
2647 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2648 c->Request.Timeout = 0; // Don't time out
2649 c->Request.CDB[0] =
2650 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2651 start_blk = creq->sector;
2652 #ifdef CCISS_DEBUG
2653 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2654 (int)creq->nr_sectors);
2655 #endif /* CCISS_DEBUG */
2657 sg_init_table(tmp_sg, MAXSGENTRIES);
2658 seg = blk_rq_map_sg(q, creq, tmp_sg);
2660 /* get the DMA records for the setup */
2661 if (c->Request.Type.Direction == XFER_READ)
2662 dir = PCI_DMA_FROMDEVICE;
2663 else
2664 dir = PCI_DMA_TODEVICE;
2666 for (i = 0; i < seg; i++) {
2667 c->SG[i].Len = tmp_sg[i].length;
2668 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2669 tmp_sg[i].offset,
2670 tmp_sg[i].length, dir);
2671 c->SG[i].Addr.lower = temp64.val32.lower;
2672 c->SG[i].Addr.upper = temp64.val32.upper;
2673 c->SG[i].Ext = 0; // we are not chaining
2675 /* track how many SG entries we are using */
2676 if (seg > h->maxSG)
2677 h->maxSG = seg;
2679 #ifdef CCISS_DEBUG
2680 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2681 creq->nr_sectors, seg);
2682 #endif /* CCISS_DEBUG */
2684 c->Header.SGList = c->Header.SGTotal = seg;
2685 if (likely(blk_fs_request(creq))) {
2686 if(h->cciss_read == CCISS_READ_10) {
2687 c->Request.CDB[1] = 0;
2688 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2689 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2690 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2691 c->Request.CDB[5] = start_blk & 0xff;
2692 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2693 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2694 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2695 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2696 } else {
2697 u32 upper32 = upper_32_bits(start_blk);
2699 c->Request.CDBLen = 16;
2700 c->Request.CDB[1]= 0;
2701 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2702 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2703 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2704 c->Request.CDB[5]= upper32 & 0xff;
2705 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2706 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2707 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2708 c->Request.CDB[9]= start_blk & 0xff;
2709 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2710 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2711 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2712 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2713 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2715 } else if (blk_pc_request(creq)) {
2716 c->Request.CDBLen = creq->cmd_len;
2717 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2718 } else {
2719 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2720 BUG();
2723 spin_lock_irq(q->queue_lock);
2725 addQ(&(h->reqQ), c);
2726 h->Qdepth++;
2727 if (h->Qdepth > h->maxQsinceinit)
2728 h->maxQsinceinit = h->Qdepth;
2730 goto queue;
2731 full:
2732 blk_stop_queue(q);
2733 startio:
2734 /* We will already have the driver lock here so not need
2735 * to lock it.
2737 start_io(h);
2740 static inline unsigned long get_next_completion(ctlr_info_t *h)
2742 #ifdef CONFIG_CISS_SCSI_TAPE
2743 /* Any rejects from sendcmd() lying around? Process them first */
2744 if (h->scsi_rejects.ncompletions == 0)
2745 return h->access.command_completed(h);
2746 else {
2747 struct sendcmd_reject_list *srl;
2748 int n;
2749 srl = &h->scsi_rejects;
2750 n = --srl->ncompletions;
2751 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2752 printk("p");
2753 return srl->complete[n];
2755 #else
2756 return h->access.command_completed(h);
2757 #endif
2760 static inline int interrupt_pending(ctlr_info_t *h)
2762 #ifdef CONFIG_CISS_SCSI_TAPE
2763 return (h->access.intr_pending(h)
2764 || (h->scsi_rejects.ncompletions > 0));
2765 #else
2766 return h->access.intr_pending(h);
2767 #endif
2770 static inline long interrupt_not_for_us(ctlr_info_t *h)
2772 #ifdef CONFIG_CISS_SCSI_TAPE
2773 return (((h->access.intr_pending(h) == 0) ||
2774 (h->interrupts_enabled == 0))
2775 && (h->scsi_rejects.ncompletions == 0));
2776 #else
2777 return (((h->access.intr_pending(h) == 0) ||
2778 (h->interrupts_enabled == 0)));
2779 #endif
2782 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2784 ctlr_info_t *h = dev_id;
2785 CommandList_struct *c;
2786 unsigned long flags;
2787 __u32 a, a1, a2;
2789 if (interrupt_not_for_us(h))
2790 return IRQ_NONE;
2792 * If there are completed commands in the completion queue,
2793 * we had better do something about it.
2795 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2796 while (interrupt_pending(h)) {
2797 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2798 a1 = a;
2799 if ((a & 0x04)) {
2800 a2 = (a >> 3);
2801 if (a2 >= h->nr_cmds) {
2802 printk(KERN_WARNING
2803 "cciss: controller cciss%d failed, stopping.\n",
2804 h->ctlr);
2805 fail_all_cmds(h->ctlr);
2806 return IRQ_HANDLED;
2809 c = h->cmd_pool + a2;
2810 a = c->busaddr;
2812 } else {
2813 a &= ~3;
2814 if ((c = h->cmpQ) == NULL) {
2815 printk(KERN_WARNING
2816 "cciss: Completion of %08x ignored\n",
2817 a1);
2818 continue;
2820 while (c->busaddr != a) {
2821 c = c->next;
2822 if (c == h->cmpQ)
2823 break;
2827 * If we've found the command, take it off the
2828 * completion Q and free it
2830 if (c->busaddr == a) {
2831 removeQ(&h->cmpQ, c);
2832 if (c->cmd_type == CMD_RWREQ) {
2833 complete_command(h, c, 0);
2834 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2835 complete(c->waiting);
2837 # ifdef CONFIG_CISS_SCSI_TAPE
2838 else if (c->cmd_type == CMD_SCSI)
2839 complete_scsi_command(c, 0, a1);
2840 # endif
2841 continue;
2846 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2847 return IRQ_HANDLED;
2851 * We cannot read the structure directly, for portability we must use
2852 * the io functions.
2853 * This is for debug only.
2855 #ifdef CCISS_DEBUG
2856 static void print_cfg_table(CfgTable_struct *tb)
2858 int i;
2859 char temp_name[17];
2861 printk("Controller Configuration information\n");
2862 printk("------------------------------------\n");
2863 for (i = 0; i < 4; i++)
2864 temp_name[i] = readb(&(tb->Signature[i]));
2865 temp_name[4] = '\0';
2866 printk(" Signature = %s\n", temp_name);
2867 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2868 printk(" Transport methods supported = 0x%x\n",
2869 readl(&(tb->TransportSupport)));
2870 printk(" Transport methods active = 0x%x\n",
2871 readl(&(tb->TransportActive)));
2872 printk(" Requested transport Method = 0x%x\n",
2873 readl(&(tb->HostWrite.TransportRequest)));
2874 printk(" Coalesce Interrupt Delay = 0x%x\n",
2875 readl(&(tb->HostWrite.CoalIntDelay)));
2876 printk(" Coalesce Interrupt Count = 0x%x\n",
2877 readl(&(tb->HostWrite.CoalIntCount)));
2878 printk(" Max outstanding commands = 0x%d\n",
2879 readl(&(tb->CmdsOutMax)));
2880 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2881 for (i = 0; i < 16; i++)
2882 temp_name[i] = readb(&(tb->ServerName[i]));
2883 temp_name[16] = '\0';
2884 printk(" Server Name = %s\n", temp_name);
2885 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2887 #endif /* CCISS_DEBUG */
2889 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2891 int i, offset, mem_type, bar_type;
2892 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2893 return 0;
2894 offset = 0;
2895 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2896 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2897 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2898 offset += 4;
2899 else {
2900 mem_type = pci_resource_flags(pdev, i) &
2901 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2902 switch (mem_type) {
2903 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2904 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2905 offset += 4; /* 32 bit */
2906 break;
2907 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2908 offset += 8;
2909 break;
2910 default: /* reserved in PCI 2.2 */
2911 printk(KERN_WARNING
2912 "Base address is invalid\n");
2913 return -1;
2914 break;
2917 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2918 return i + 1;
2920 return -1;
2923 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2924 * controllers that are capable. If not, we use IO-APIC mode.
2927 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2928 struct pci_dev *pdev, __u32 board_id)
2930 #ifdef CONFIG_PCI_MSI
2931 int err;
2932 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2933 {0, 2}, {0, 3}
2936 /* Some boards advertise MSI but don't really support it */
2937 if ((board_id == 0x40700E11) ||
2938 (board_id == 0x40800E11) ||
2939 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2940 goto default_int_mode;
2942 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2943 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2944 if (!err) {
2945 c->intr[0] = cciss_msix_entries[0].vector;
2946 c->intr[1] = cciss_msix_entries[1].vector;
2947 c->intr[2] = cciss_msix_entries[2].vector;
2948 c->intr[3] = cciss_msix_entries[3].vector;
2949 c->msix_vector = 1;
2950 return;
2952 if (err > 0) {
2953 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2954 "available\n", err);
2955 goto default_int_mode;
2956 } else {
2957 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2958 err);
2959 goto default_int_mode;
2962 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2963 if (!pci_enable_msi(pdev)) {
2964 c->msi_vector = 1;
2965 } else {
2966 printk(KERN_WARNING "cciss: MSI init failed\n");
2969 default_int_mode:
2970 #endif /* CONFIG_PCI_MSI */
2971 /* if we get here we're going to use the default interrupt mode */
2972 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2973 return;
2976 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2978 ushort subsystem_vendor_id, subsystem_device_id, command;
2979 __u32 board_id, scratchpad = 0;
2980 __u64 cfg_offset;
2981 __u32 cfg_base_addr;
2982 __u64 cfg_base_addr_index;
2983 int i, err;
2985 /* check to see if controller has been disabled */
2986 /* BEFORE trying to enable it */
2987 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2988 if (!(command & 0x02)) {
2989 printk(KERN_WARNING
2990 "cciss: controller appears to be disabled\n");
2991 return -ENODEV;
2994 err = pci_enable_device(pdev);
2995 if (err) {
2996 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2997 return err;
3000 err = pci_request_regions(pdev, "cciss");
3001 if (err) {
3002 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3003 "aborting\n");
3004 return err;
3007 subsystem_vendor_id = pdev->subsystem_vendor;
3008 subsystem_device_id = pdev->subsystem_device;
3009 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3010 subsystem_vendor_id);
3012 #ifdef CCISS_DEBUG
3013 printk("command = %x\n", command);
3014 printk("irq = %x\n", pdev->irq);
3015 printk("board_id = %x\n", board_id);
3016 #endif /* CCISS_DEBUG */
3018 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3019 * else we use the IO-APIC interrupt assigned to us by system ROM.
3021 cciss_interrupt_mode(c, pdev, board_id);
3024 * Memory base addr is first addr , the second points to the config
3025 * table
3028 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3029 #ifdef CCISS_DEBUG
3030 printk("address 0 = %x\n", c->paddr);
3031 #endif /* CCISS_DEBUG */
3032 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3034 /* Wait for the board to become ready. (PCI hotplug needs this.)
3035 * We poll for up to 120 secs, once per 100ms. */
3036 for (i = 0; i < 1200; i++) {
3037 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3038 if (scratchpad == CCISS_FIRMWARE_READY)
3039 break;
3040 set_current_state(TASK_INTERRUPTIBLE);
3041 schedule_timeout(HZ / 10); /* wait 100ms */
3043 if (scratchpad != CCISS_FIRMWARE_READY) {
3044 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3045 err = -ENODEV;
3046 goto err_out_free_res;
3049 /* get the address index number */
3050 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3051 cfg_base_addr &= (__u32) 0x0000ffff;
3052 #ifdef CCISS_DEBUG
3053 printk("cfg base address = %x\n", cfg_base_addr);
3054 #endif /* CCISS_DEBUG */
3055 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3056 #ifdef CCISS_DEBUG
3057 printk("cfg base address index = %x\n", cfg_base_addr_index);
3058 #endif /* CCISS_DEBUG */
3059 if (cfg_base_addr_index == -1) {
3060 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3061 err = -ENODEV;
3062 goto err_out_free_res;
3065 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3066 #ifdef CCISS_DEBUG
3067 printk("cfg offset = %x\n", cfg_offset);
3068 #endif /* CCISS_DEBUG */
3069 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3070 cfg_base_addr_index) +
3071 cfg_offset, sizeof(CfgTable_struct));
3072 c->board_id = board_id;
3074 #ifdef CCISS_DEBUG
3075 print_cfg_table(c->cfgtable);
3076 #endif /* CCISS_DEBUG */
3078 for (i = 0; i < ARRAY_SIZE(products); i++) {
3079 if (board_id == products[i].board_id) {
3080 c->product_name = products[i].product_name;
3081 c->access = *(products[i].access);
3082 c->nr_cmds = products[i].nr_cmds;
3083 break;
3086 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3087 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3088 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3089 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3090 printk("Does not appear to be a valid CISS config table\n");
3091 err = -ENODEV;
3092 goto err_out_free_res;
3094 /* We didn't find the controller in our list. We know the
3095 * signature is valid. If it's an HP device let's try to
3096 * bind to the device and fire it up. Otherwise we bail.
3098 if (i == ARRAY_SIZE(products)) {
3099 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3100 c->product_name = products[i-1].product_name;
3101 c->access = *(products[i-1].access);
3102 c->nr_cmds = products[i-1].nr_cmds;
3103 printk(KERN_WARNING "cciss: This is an unknown "
3104 "Smart Array controller.\n"
3105 "cciss: Please update to the latest driver "
3106 "available from www.hp.com.\n");
3107 } else {
3108 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3109 " to access the Smart Array controller %08lx\n"
3110 , (unsigned long)board_id);
3111 err = -ENODEV;
3112 goto err_out_free_res;
3115 #ifdef CONFIG_X86
3117 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3118 __u32 prefetch;
3119 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3120 prefetch |= 0x100;
3121 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3123 #endif
3125 /* Disabling DMA prefetch and refetch for the P600.
3126 * An ASIC bug may result in accesses to invalid memory addresses.
3127 * We've disabled prefetch for some time now. Testing with XEN
3128 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3130 if(board_id == 0x3225103C) {
3131 __u32 dma_prefetch;
3132 __u32 dma_refetch;
3133 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3134 dma_prefetch |= 0x8000;
3135 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3136 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3137 dma_refetch |= 0x1;
3138 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3141 #ifdef CCISS_DEBUG
3142 printk("Trying to put board into Simple mode\n");
3143 #endif /* CCISS_DEBUG */
3144 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3145 /* Update the field, and then ring the doorbell */
3146 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3147 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3149 /* under certain very rare conditions, this can take awhile.
3150 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3151 * as we enter this code.) */
3152 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3153 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3154 break;
3155 /* delay and try again */
3156 set_current_state(TASK_INTERRUPTIBLE);
3157 schedule_timeout(10);
3160 #ifdef CCISS_DEBUG
3161 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3162 readl(c->vaddr + SA5_DOORBELL));
3163 #endif /* CCISS_DEBUG */
3164 #ifdef CCISS_DEBUG
3165 print_cfg_table(c->cfgtable);
3166 #endif /* CCISS_DEBUG */
3168 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3169 printk(KERN_WARNING "cciss: unable to get board into"
3170 " simple mode\n");
3171 err = -ENODEV;
3172 goto err_out_free_res;
3174 return 0;
3176 err_out_free_res:
3178 * Deliberately omit pci_disable_device(): it does something nasty to
3179 * Smart Array controllers that pci_enable_device does not undo
3181 pci_release_regions(pdev);
3182 return err;
3186 * Gets information about the local volumes attached to the controller.
3188 static void cciss_getgeometry(int cntl_num)
3190 ReportLunData_struct *ld_buff;
3191 InquiryData_struct *inq_buff;
3192 int return_code;
3193 int i;
3194 int listlength = 0;
3195 __u32 lunid = 0;
3196 unsigned block_size;
3197 sector_t total_size;
3199 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3200 if (ld_buff == NULL) {
3201 printk(KERN_ERR "cciss: out of memory\n");
3202 return;
3204 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3205 if (inq_buff == NULL) {
3206 printk(KERN_ERR "cciss: out of memory\n");
3207 kfree(ld_buff);
3208 return;
3210 /* Get the firmware version */
3211 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3212 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3213 TYPE_CMD);
3214 if (return_code == IO_OK) {
3215 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3216 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3217 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3218 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3219 } else { /* send command failed */
3221 printk(KERN_WARNING "cciss: unable to determine firmware"
3222 " version of controller\n");
3224 /* Get the number of logical volumes */
3225 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3226 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3227 TYPE_CMD);
3229 if (return_code == IO_OK) {
3230 #ifdef CCISS_DEBUG
3231 printk("LUN Data\n--------------------------\n");
3232 #endif /* CCISS_DEBUG */
3234 listlength |=
3235 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3236 listlength |=
3237 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3238 listlength |=
3239 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3240 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3241 } else { /* reading number of logical volumes failed */
3243 printk(KERN_WARNING "cciss: report logical volume"
3244 " command failed\n");
3245 listlength = 0;
3247 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3248 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3249 printk(KERN_ERR
3250 "ciss: only %d number of logical volumes supported\n",
3251 CISS_MAX_LUN);
3252 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3254 #ifdef CCISS_DEBUG
3255 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3256 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3257 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3258 hba[cntl_num]->num_luns);
3259 #endif /* CCISS_DEBUG */
3261 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3262 for (i = 0; i < CISS_MAX_LUN; i++) {
3263 if (i < hba[cntl_num]->num_luns) {
3264 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3265 << 24;
3266 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3267 << 16;
3268 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3269 << 8;
3270 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3272 hba[cntl_num]->drv[i].LunID = lunid;
3274 #ifdef CCISS_DEBUG
3275 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3276 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3277 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3278 hba[cntl_num]->drv[i].LunID);
3279 #endif /* CCISS_DEBUG */
3281 /* testing to see if 16-byte CDBs are already being used */
3282 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3283 cciss_read_capacity_16(cntl_num, i, 0,
3284 &total_size, &block_size);
3285 goto geo_inq;
3287 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3289 /* If read_capacity returns all F's the logical is >2TB */
3290 /* so we switch to 16-byte CDBs for all read/write ops */
3291 if(total_size == 0xFFFFFFFFULL) {
3292 cciss_read_capacity_16(cntl_num, i, 0,
3293 &total_size, &block_size);
3294 hba[cntl_num]->cciss_read = CCISS_READ_16;
3295 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3296 } else {
3297 hba[cntl_num]->cciss_read = CCISS_READ_10;
3298 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3300 geo_inq:
3301 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3302 block_size, inq_buff,
3303 &hba[cntl_num]->drv[i]);
3304 } else {
3305 /* initialize raid_level to indicate a free space */
3306 hba[cntl_num]->drv[i].raid_level = -1;
3309 kfree(ld_buff);
3310 kfree(inq_buff);
3313 /* Function to find the first free pointer into our hba[] array */
3314 /* Returns -1 if no free entries are left. */
3315 static int alloc_cciss_hba(void)
3317 int i;
3319 for (i = 0; i < MAX_CTLR; i++) {
3320 if (!hba[i]) {
3321 ctlr_info_t *p;
3323 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3324 if (!p)
3325 goto Enomem;
3326 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3327 if (!p->gendisk[0]) {
3328 kfree(p);
3329 goto Enomem;
3331 hba[i] = p;
3332 return i;
3335 printk(KERN_WARNING "cciss: This driver supports a maximum"
3336 " of %d controllers.\n", MAX_CTLR);
3337 return -1;
3338 Enomem:
3339 printk(KERN_ERR "cciss: out of memory.\n");
3340 return -1;
3343 static void free_hba(int i)
3345 ctlr_info_t *p = hba[i];
3346 int n;
3348 hba[i] = NULL;
3349 for (n = 0; n < CISS_MAX_LUN; n++)
3350 put_disk(p->gendisk[n]);
3351 kfree(p);
3355 * This is it. Find all the controllers and register them. I really hate
3356 * stealing all these major device numbers.
3357 * returns the number of block devices registered.
3359 static int __devinit cciss_init_one(struct pci_dev *pdev,
3360 const struct pci_device_id *ent)
3362 int i;
3363 int j = 0;
3364 int rc;
3365 int dac;
3367 i = alloc_cciss_hba();
3368 if (i < 0)
3369 return -1;
3371 hba[i]->busy_initializing = 1;
3373 if (cciss_pci_init(hba[i], pdev) != 0)
3374 goto clean1;
3376 sprintf(hba[i]->devname, "cciss%d", i);
3377 hba[i]->ctlr = i;
3378 hba[i]->pdev = pdev;
3380 /* configure PCI DMA stuff */
3381 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3382 dac = 1;
3383 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3384 dac = 0;
3385 else {
3386 printk(KERN_ERR "cciss: no suitable DMA available\n");
3387 goto clean1;
3391 * register with the major number, or get a dynamic major number
3392 * by passing 0 as argument. This is done for greater than
3393 * 8 controller support.
3395 if (i < MAX_CTLR_ORIG)
3396 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3397 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3398 if (rc == -EBUSY || rc == -EINVAL) {
3399 printk(KERN_ERR
3400 "cciss: Unable to get major number %d for %s "
3401 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3402 goto clean1;
3403 } else {
3404 if (i >= MAX_CTLR_ORIG)
3405 hba[i]->major = rc;
3408 /* make sure the board interrupts are off */
3409 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3410 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3411 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3412 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3413 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3414 goto clean2;
3417 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3418 hba[i]->devname, pdev->device, pci_name(pdev),
3419 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3421 hba[i]->cmd_pool_bits =
3422 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3423 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3424 hba[i]->cmd_pool = (CommandList_struct *)
3425 pci_alloc_consistent(hba[i]->pdev,
3426 hba[i]->nr_cmds * sizeof(CommandList_struct),
3427 &(hba[i]->cmd_pool_dhandle));
3428 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3429 pci_alloc_consistent(hba[i]->pdev,
3430 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3431 &(hba[i]->errinfo_pool_dhandle));
3432 if ((hba[i]->cmd_pool_bits == NULL)
3433 || (hba[i]->cmd_pool == NULL)
3434 || (hba[i]->errinfo_pool == NULL)) {
3435 printk(KERN_ERR "cciss: out of memory");
3436 goto clean4;
3438 #ifdef CONFIG_CISS_SCSI_TAPE
3439 hba[i]->scsi_rejects.complete =
3440 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3441 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3442 if (hba[i]->scsi_rejects.complete == NULL) {
3443 printk(KERN_ERR "cciss: out of memory");
3444 goto clean4;
3446 #endif
3447 spin_lock_init(&hba[i]->lock);
3449 /* Initialize the pdev driver private data.
3450 have it point to hba[i]. */
3451 pci_set_drvdata(pdev, hba[i]);
3452 /* command and error info recs zeroed out before
3453 they are used */
3454 memset(hba[i]->cmd_pool_bits, 0,
3455 ((hba[i]->nr_cmds + BITS_PER_LONG -
3456 1) / BITS_PER_LONG) * sizeof(unsigned long));
3458 #ifdef CCISS_DEBUG
3459 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3460 #endif /* CCISS_DEBUG */
3462 cciss_getgeometry(i);
3464 cciss_scsi_setup(i);
3466 /* Turn the interrupts on so we can service requests */
3467 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3469 cciss_procinit(i);
3471 hba[i]->cciss_max_sectors = 2048;
3473 hba[i]->busy_initializing = 0;
3475 do {
3476 drive_info_struct *drv = &(hba[i]->drv[j]);
3477 struct gendisk *disk = hba[i]->gendisk[j];
3478 struct request_queue *q;
3480 /* Check if the disk was allocated already */
3481 if (!disk){
3482 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3483 disk = hba[i]->gendisk[j];
3486 /* Check that the disk was able to be allocated */
3487 if (!disk) {
3488 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3489 goto clean4;
3492 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3493 if (!q) {
3494 printk(KERN_ERR
3495 "cciss: unable to allocate queue for disk %d\n",
3497 goto clean4;
3499 drv->queue = q;
3501 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3503 /* This is a hardware imposed limit. */
3504 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3506 /* This is a limit in the driver and could be eliminated. */
3507 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3509 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3511 blk_queue_softirq_done(q, cciss_softirq_done);
3513 q->queuedata = hba[i];
3514 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3515 disk->major = hba[i]->major;
3516 disk->first_minor = j << NWD_SHIFT;
3517 disk->fops = &cciss_fops;
3518 disk->queue = q;
3519 disk->private_data = drv;
3520 disk->driverfs_dev = &pdev->dev;
3521 /* we must register the controller even if no disks exist */
3522 /* this is for the online array utilities */
3523 if (!drv->heads && j)
3524 continue;
3525 blk_queue_hardsect_size(q, drv->block_size);
3526 set_capacity(disk, drv->nr_blocks);
3527 j++;
3528 } while (j <= hba[i]->highest_lun);
3530 /* Make sure all queue data is written out before */
3531 /* interrupt handler, triggered by add_disk, */
3532 /* is allowed to start them. */
3533 wmb();
3535 for (j = 0; j <= hba[i]->highest_lun; j++)
3536 add_disk(hba[i]->gendisk[j]);
3538 return 1;
3540 clean4:
3541 #ifdef CONFIG_CISS_SCSI_TAPE
3542 kfree(hba[i]->scsi_rejects.complete);
3543 #endif
3544 kfree(hba[i]->cmd_pool_bits);
3545 if (hba[i]->cmd_pool)
3546 pci_free_consistent(hba[i]->pdev,
3547 hba[i]->nr_cmds * sizeof(CommandList_struct),
3548 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3549 if (hba[i]->errinfo_pool)
3550 pci_free_consistent(hba[i]->pdev,
3551 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3552 hba[i]->errinfo_pool,
3553 hba[i]->errinfo_pool_dhandle);
3554 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3555 clean2:
3556 unregister_blkdev(hba[i]->major, hba[i]->devname);
3557 clean1:
3558 hba[i]->busy_initializing = 0;
3559 /* cleanup any queues that may have been initialized */
3560 for (j=0; j <= hba[i]->highest_lun; j++){
3561 drive_info_struct *drv = &(hba[i]->drv[j]);
3562 if (drv->queue)
3563 blk_cleanup_queue(drv->queue);
3566 * Deliberately omit pci_disable_device(): it does something nasty to
3567 * Smart Array controllers that pci_enable_device does not undo
3569 pci_release_regions(pdev);
3570 pci_set_drvdata(pdev, NULL);
3571 free_hba(i);
3572 return -1;
3575 static void cciss_shutdown(struct pci_dev *pdev)
3577 ctlr_info_t *tmp_ptr;
3578 int i;
3579 char flush_buf[4];
3580 int return_code;
3582 tmp_ptr = pci_get_drvdata(pdev);
3583 if (tmp_ptr == NULL)
3584 return;
3585 i = tmp_ptr->ctlr;
3586 if (hba[i] == NULL)
3587 return;
3589 /* Turn board interrupts off and send the flush cache command */
3590 /* sendcmd will turn off interrupt, and send the flush...
3591 * To write all data in the battery backed cache to disks */
3592 memset(flush_buf, 0, 4);
3593 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3594 TYPE_CMD);
3595 if (return_code == IO_OK) {
3596 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3597 } else {
3598 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3600 free_irq(hba[i]->intr[2], hba[i]);
3603 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3605 ctlr_info_t *tmp_ptr;
3606 int i, j;
3608 if (pci_get_drvdata(pdev) == NULL) {
3609 printk(KERN_ERR "cciss: Unable to remove device \n");
3610 return;
3612 tmp_ptr = pci_get_drvdata(pdev);
3613 i = tmp_ptr->ctlr;
3614 if (hba[i] == NULL) {
3615 printk(KERN_ERR "cciss: device appears to "
3616 "already be removed \n");
3617 return;
3620 remove_proc_entry(hba[i]->devname, proc_cciss);
3621 unregister_blkdev(hba[i]->major, hba[i]->devname);
3623 /* remove it from the disk list */
3624 for (j = 0; j < CISS_MAX_LUN; j++) {
3625 struct gendisk *disk = hba[i]->gendisk[j];
3626 if (disk) {
3627 struct request_queue *q = disk->queue;
3629 if (disk->flags & GENHD_FL_UP)
3630 del_gendisk(disk);
3631 if (q)
3632 blk_cleanup_queue(q);
3636 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3638 cciss_shutdown(pdev);
3640 #ifdef CONFIG_PCI_MSI
3641 if (hba[i]->msix_vector)
3642 pci_disable_msix(hba[i]->pdev);
3643 else if (hba[i]->msi_vector)
3644 pci_disable_msi(hba[i]->pdev);
3645 #endif /* CONFIG_PCI_MSI */
3647 iounmap(hba[i]->vaddr);
3649 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3650 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3651 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3652 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3653 kfree(hba[i]->cmd_pool_bits);
3654 #ifdef CONFIG_CISS_SCSI_TAPE
3655 kfree(hba[i]->scsi_rejects.complete);
3656 #endif
3658 * Deliberately omit pci_disable_device(): it does something nasty to
3659 * Smart Array controllers that pci_enable_device does not undo
3661 pci_release_regions(pdev);
3662 pci_set_drvdata(pdev, NULL);
3663 free_hba(i);
3666 static struct pci_driver cciss_pci_driver = {
3667 .name = "cciss",
3668 .probe = cciss_init_one,
3669 .remove = __devexit_p(cciss_remove_one),
3670 .id_table = cciss_pci_device_id, /* id_table */
3671 .shutdown = cciss_shutdown,
3675 * This is it. Register the PCI driver information for the cards we control
3676 * the OS will call our registered routines when it finds one of our cards.
3678 static int __init cciss_init(void)
3680 printk(KERN_INFO DRIVER_NAME "\n");
3682 /* Register for our PCI devices */
3683 return pci_register_driver(&cciss_pci_driver);
3686 static void __exit cciss_cleanup(void)
3688 int i;
3690 pci_unregister_driver(&cciss_pci_driver);
3691 /* double check that all controller entrys have been removed */
3692 for (i = 0; i < MAX_CTLR; i++) {
3693 if (hba[i] != NULL) {
3694 printk(KERN_WARNING "cciss: had to remove"
3695 " controller %d\n", i);
3696 cciss_remove_one(hba[i]->pdev);
3699 remove_proc_entry("driver/cciss", NULL);
3702 static void fail_all_cmds(unsigned long ctlr)
3704 /* If we get here, the board is apparently dead. */
3705 ctlr_info_t *h = hba[ctlr];
3706 CommandList_struct *c;
3707 unsigned long flags;
3709 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3710 h->alive = 0; /* the controller apparently died... */
3712 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3714 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3716 /* move everything off the request queue onto the completed queue */
3717 while ((c = h->reqQ) != NULL) {
3718 removeQ(&(h->reqQ), c);
3719 h->Qdepth--;
3720 addQ(&(h->cmpQ), c);
3723 /* Now, fail everything on the completed queue with a HW error */
3724 while ((c = h->cmpQ) != NULL) {
3725 removeQ(&h->cmpQ, c);
3726 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3727 if (c->cmd_type == CMD_RWREQ) {
3728 complete_command(h, c, 0);
3729 } else if (c->cmd_type == CMD_IOCTL_PEND)
3730 complete(c->waiting);
3731 #ifdef CONFIG_CISS_SCSI_TAPE
3732 else if (c->cmd_type == CMD_SCSI)
3733 complete_scsi_command(c, 0, 0);
3734 #endif
3736 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3737 return;
3740 module_init(cciss_init);
3741 module_exit(cciss_cleanup);