[PATCH] Fix coding style and output of the mptable parser
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / cciss.c
blob2cd3391ff8783dcb8497600d833f4cedf4a24eb4
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
50 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
51 #define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
52 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
54 /* Embedded module documentation macros - see modules.h */
55 MODULE_AUTHOR("Hewlett-Packard Company");
56 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
57 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
58 " SA6i P600 P800 P400 P400i E200 E200i E500");
59 MODULE_LICENSE("GPL");
61 #include "cciss_cmd.h"
62 #include "cciss.h"
63 #include <linux/cciss_ioctl.h>
65 /* define the PCI info for the cards we can control */
66 static const struct pci_device_id cciss_pci_device_id[] = {
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
86 {0,}
89 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
91 /* board_id = Subsystem Device ID & Vendor ID
92 * product = Marketing Name for the board
93 * access = Address of the struct of function pointers
95 static struct board_type products[] = {
96 {0x40700E11, "Smart Array 5300", &SA5_access},
97 {0x40800E11, "Smart Array 5i", &SA5B_access},
98 {0x40820E11, "Smart Array 532", &SA5B_access},
99 {0x40830E11, "Smart Array 5312", &SA5B_access},
100 {0x409A0E11, "Smart Array 641", &SA5_access},
101 {0x409B0E11, "Smart Array 642", &SA5_access},
102 {0x409C0E11, "Smart Array 6400", &SA5_access},
103 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
104 {0x40910E11, "Smart Array 6i", &SA5_access},
105 {0x3225103C, "Smart Array P600", &SA5_access},
106 {0x3223103C, "Smart Array P800", &SA5_access},
107 {0x3234103C, "Smart Array P400", &SA5_access},
108 {0x3235103C, "Smart Array P400i", &SA5_access},
109 {0x3211103C, "Smart Array E200i", &SA5_access},
110 {0x3212103C, "Smart Array E200", &SA5_access},
111 {0x3213103C, "Smart Array E200i", &SA5_access},
112 {0x3214103C, "Smart Array E200i", &SA5_access},
113 {0x3215103C, "Smart Array E200i", &SA5_access},
114 {0x3233103C, "Smart Array E500", &SA5_access},
117 /* How long to wait (in milliseconds) for board to go into simple mode */
118 #define MAX_CONFIG_WAIT 30000
119 #define MAX_IOCTL_CONFIG_WAIT 1000
121 /*define how many times we will try a command because of bus resets */
122 #define MAX_CMD_RETRIES 3
124 #define READ_AHEAD 1024
125 #define NR_CMDS 384 /* #commands that can be outstanding */
126 #define MAX_CTLR 32
128 /* Originally cciss driver only supports 8 major numbers */
129 #define MAX_CTLR_ORIG 8
131 static ctlr_info_t *hba[MAX_CTLR];
133 static void do_cciss_request(request_queue_t *q);
134 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
135 static int cciss_open(struct inode *inode, struct file *filep);
136 static int cciss_release(struct inode *inode, struct file *filep);
137 static int cciss_ioctl(struct inode *inode, struct file *filep,
138 unsigned int cmd, unsigned long arg);
139 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
141 static int revalidate_allvol(ctlr_info_t *host);
142 static int cciss_revalidate(struct gendisk *disk);
143 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
144 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
145 int clear_all);
147 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
148 int withirq, unsigned int *total_size,
149 unsigned int *block_size);
150 static void cciss_geometry_inquiry(int ctlr, int logvol, int withirq,
151 unsigned int total_size,
152 unsigned int block_size,
153 InquiryData_struct *inq_buff,
154 drive_info_struct *drv);
155 static void cciss_getgeometry(int cntl_num);
156 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
157 __u32);
158 static void start_io(ctlr_info_t *h);
159 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
160 unsigned int use_unit_num, unsigned int log_unit,
161 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
162 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
163 unsigned int use_unit_num, unsigned int log_unit,
164 __u8 page_code, int cmd_type);
166 static void fail_all_cmds(unsigned long ctlr);
168 #ifdef CONFIG_PROC_FS
169 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
170 int length, int *eof, void *data);
171 static void cciss_procinit(int i);
172 #else
173 static void cciss_procinit(int i)
176 #endif /* CONFIG_PROC_FS */
178 #ifdef CONFIG_COMPAT
179 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
180 #endif
182 static struct block_device_operations cciss_fops = {
183 .owner = THIS_MODULE,
184 .open = cciss_open,
185 .release = cciss_release,
186 .ioctl = cciss_ioctl,
187 .getgeo = cciss_getgeo,
188 #ifdef CONFIG_COMPAT
189 .compat_ioctl = cciss_compat_ioctl,
190 #endif
191 .revalidate_disk = cciss_revalidate,
195 * Enqueuing and dequeuing functions for cmdlists.
197 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
199 if (*Qptr == NULL) {
200 *Qptr = c;
201 c->next = c->prev = c;
202 } else {
203 c->prev = (*Qptr)->prev;
204 c->next = (*Qptr);
205 (*Qptr)->prev->next = c;
206 (*Qptr)->prev = c;
210 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
211 CommandList_struct *c)
213 if (c && c->next != c) {
214 if (*Qptr == c)
215 *Qptr = c->next;
216 c->prev->next = c->next;
217 c->next->prev = c->prev;
218 } else {
219 *Qptr = NULL;
221 return c;
224 #include "cciss_scsi.c" /* For SCSI tape support */
226 #ifdef CONFIG_PROC_FS
229 * Report information about this controller.
231 #define ENG_GIG 1000000000
232 #define ENG_GIG_FACTOR (ENG_GIG/512)
233 #define RAID_UNKNOWN 6
234 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
235 "UNKNOWN"
238 static struct proc_dir_entry *proc_cciss;
240 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
241 int length, int *eof, void *data)
243 off_t pos = 0;
244 off_t len = 0;
245 int size, i, ctlr;
246 ctlr_info_t *h = (ctlr_info_t *) data;
247 drive_info_struct *drv;
248 unsigned long flags;
249 sector_t vol_sz, vol_sz_frac;
251 ctlr = h->ctlr;
253 /* prevent displaying bogus info during configuration
254 * or deconfiguration of a logical volume
256 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
257 if (h->busy_configuring) {
258 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
259 return -EBUSY;
261 h->busy_configuring = 1;
262 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
264 size = sprintf(buffer, "%s: HP %s Controller\n"
265 "Board ID: 0x%08lx\n"
266 "Firmware Version: %c%c%c%c\n"
267 "IRQ: %d\n"
268 "Logical drives: %d\n"
269 "Current Q depth: %d\n"
270 "Current # commands on controller: %d\n"
271 "Max Q depth since init: %d\n"
272 "Max # commands on controller since init: %d\n"
273 "Max SG entries since init: %d\n\n",
274 h->devname,
275 h->product_name,
276 (unsigned long)h->board_id,
277 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
278 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
279 h->num_luns, h->Qdepth, h->commands_outstanding,
280 h->maxQsinceinit, h->max_outstanding, h->maxSG);
282 pos += size;
283 len += size;
284 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
285 for (i = 0; i <= h->highest_lun; i++) {
287 drv = &h->drv[i];
288 if (drv->heads == 0)
289 continue;
291 vol_sz = drv->nr_blocks;
292 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
293 vol_sz_frac *= 100;
294 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
296 if (drv->raid_level > 5)
297 drv->raid_level = RAID_UNKNOWN;
298 size = sprintf(buffer + len, "cciss/c%dd%d:"
299 "\t%4u.%02uGB\tRAID %s\n",
300 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
301 raid_label[drv->raid_level]);
302 pos += size;
303 len += size;
306 *eof = 1;
307 *start = buffer + offset;
308 len -= offset;
309 if (len > length)
310 len = length;
311 h->busy_configuring = 0;
312 return len;
315 static int
316 cciss_proc_write(struct file *file, const char __user *buffer,
317 unsigned long count, void *data)
319 unsigned char cmd[80];
320 int len;
321 #ifdef CONFIG_CISS_SCSI_TAPE
322 ctlr_info_t *h = (ctlr_info_t *) data;
323 int rc;
324 #endif
326 if (count > sizeof(cmd) - 1)
327 return -EINVAL;
328 if (copy_from_user(cmd, buffer, count))
329 return -EFAULT;
330 cmd[count] = '\0';
331 len = strlen(cmd); // above 3 lines ensure safety
332 if (len && cmd[len - 1] == '\n')
333 cmd[--len] = '\0';
334 # ifdef CONFIG_CISS_SCSI_TAPE
335 if (strcmp("engage scsi", cmd) == 0) {
336 rc = cciss_engage_scsi(h->ctlr);
337 if (rc != 0)
338 return -rc;
339 return count;
341 /* might be nice to have "disengage" too, but it's not
342 safely possible. (only 1 module use count, lock issues.) */
343 # endif
344 return -EINVAL;
348 * Get us a file in /proc/cciss that says something about each controller.
349 * Create /proc/cciss if it doesn't exist yet.
351 static void __devinit cciss_procinit(int i)
353 struct proc_dir_entry *pde;
355 if (proc_cciss == NULL) {
356 proc_cciss = proc_mkdir("cciss", proc_root_driver);
357 if (!proc_cciss)
358 return;
361 pde = create_proc_read_entry(hba[i]->devname,
362 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
363 proc_cciss, cciss_proc_get_info, hba[i]);
364 pde->write_proc = cciss_proc_write;
366 #endif /* CONFIG_PROC_FS */
369 * For operations that cannot sleep, a command block is allocated at init,
370 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
371 * which ones are free or in use. For operations that can wait for kmalloc
372 * to possible sleep, this routine can be called with get_from_pool set to 0.
373 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
375 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
377 CommandList_struct *c;
378 int i;
379 u64bit temp64;
380 dma_addr_t cmd_dma_handle, err_dma_handle;
382 if (!get_from_pool) {
383 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
384 sizeof(CommandList_struct), &cmd_dma_handle);
385 if (c == NULL)
386 return NULL;
387 memset(c, 0, sizeof(CommandList_struct));
389 c->cmdindex = -1;
391 c->err_info = (ErrorInfo_struct *)
392 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
393 &err_dma_handle);
395 if (c->err_info == NULL) {
396 pci_free_consistent(h->pdev,
397 sizeof(CommandList_struct), c, cmd_dma_handle);
398 return NULL;
400 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
401 } else { /* get it out of the controllers pool */
403 do {
404 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
405 if (i == NR_CMDS)
406 return NULL;
407 } while (test_and_set_bit
408 (i & (BITS_PER_LONG - 1),
409 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
410 #ifdef CCISS_DEBUG
411 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
412 #endif
413 c = h->cmd_pool + i;
414 memset(c, 0, sizeof(CommandList_struct));
415 cmd_dma_handle = h->cmd_pool_dhandle
416 + i * sizeof(CommandList_struct);
417 c->err_info = h->errinfo_pool + i;
418 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
419 err_dma_handle = h->errinfo_pool_dhandle
420 + i * sizeof(ErrorInfo_struct);
421 h->nr_allocs++;
423 c->cmdindex = i;
426 c->busaddr = (__u32) cmd_dma_handle;
427 temp64.val = (__u64) err_dma_handle;
428 c->ErrDesc.Addr.lower = temp64.val32.lower;
429 c->ErrDesc.Addr.upper = temp64.val32.upper;
430 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
432 c->ctlr = h->ctlr;
433 return c;
437 * Frees a command block that was previously allocated with cmd_alloc().
439 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
441 int i;
442 u64bit temp64;
444 if (!got_from_pool) {
445 temp64.val32.lower = c->ErrDesc.Addr.lower;
446 temp64.val32.upper = c->ErrDesc.Addr.upper;
447 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
448 c->err_info, (dma_addr_t) temp64.val);
449 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
450 c, (dma_addr_t) c->busaddr);
451 } else {
452 i = c - h->cmd_pool;
453 clear_bit(i & (BITS_PER_LONG - 1),
454 h->cmd_pool_bits + (i / BITS_PER_LONG));
455 h->nr_frees++;
459 static inline ctlr_info_t *get_host(struct gendisk *disk)
461 return disk->queue->queuedata;
464 static inline drive_info_struct *get_drv(struct gendisk *disk)
466 return disk->private_data;
470 * Open. Make sure the device is really there.
472 static int cciss_open(struct inode *inode, struct file *filep)
474 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
475 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
477 #ifdef CCISS_DEBUG
478 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
479 #endif /* CCISS_DEBUG */
481 if (host->busy_initializing || drv->busy_configuring)
482 return -EBUSY;
484 * Root is allowed to open raw volume zero even if it's not configured
485 * so array config can still work. Root is also allowed to open any
486 * volume that has a LUN ID, so it can issue IOCTL to reread the
487 * disk information. I don't think I really like this
488 * but I'm already using way to many device nodes to claim another one
489 * for "raw controller".
491 if (drv->nr_blocks == 0) {
492 if (iminor(inode) != 0) { /* not node 0? */
493 /* if not node 0 make sure it is a partition = 0 */
494 if (iminor(inode) & 0x0f) {
495 return -ENXIO;
496 /* if it is, make sure we have a LUN ID */
497 } else if (drv->LunID == 0) {
498 return -ENXIO;
501 if (!capable(CAP_SYS_ADMIN))
502 return -EPERM;
504 drv->usage_count++;
505 host->usage_count++;
506 return 0;
510 * Close. Sync first.
512 static int cciss_release(struct inode *inode, struct file *filep)
514 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
515 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
517 #ifdef CCISS_DEBUG
518 printk(KERN_DEBUG "cciss_release %s\n",
519 inode->i_bdev->bd_disk->disk_name);
520 #endif /* CCISS_DEBUG */
522 drv->usage_count--;
523 host->usage_count--;
524 return 0;
527 #ifdef CONFIG_COMPAT
529 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
531 int ret;
532 lock_kernel();
533 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
534 unlock_kernel();
535 return ret;
538 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
539 unsigned long arg);
540 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
541 unsigned long arg);
543 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
545 switch (cmd) {
546 case CCISS_GETPCIINFO:
547 case CCISS_GETINTINFO:
548 case CCISS_SETINTINFO:
549 case CCISS_GETNODENAME:
550 case CCISS_SETNODENAME:
551 case CCISS_GETHEARTBEAT:
552 case CCISS_GETBUSTYPES:
553 case CCISS_GETFIRMVER:
554 case CCISS_GETDRIVVER:
555 case CCISS_REVALIDVOLS:
556 case CCISS_DEREGDISK:
557 case CCISS_REGNEWDISK:
558 case CCISS_REGNEWD:
559 case CCISS_RESCANDISK:
560 case CCISS_GETLUNINFO:
561 return do_ioctl(f, cmd, arg);
563 case CCISS_PASSTHRU32:
564 return cciss_ioctl32_passthru(f, cmd, arg);
565 case CCISS_BIG_PASSTHRU32:
566 return cciss_ioctl32_big_passthru(f, cmd, arg);
568 default:
569 return -ENOIOCTLCMD;
573 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
574 unsigned long arg)
576 IOCTL32_Command_struct __user *arg32 =
577 (IOCTL32_Command_struct __user *) arg;
578 IOCTL_Command_struct arg64;
579 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
580 int err;
581 u32 cp;
583 err = 0;
584 err |=
585 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
586 sizeof(arg64.LUN_info));
587 err |=
588 copy_from_user(&arg64.Request, &arg32->Request,
589 sizeof(arg64.Request));
590 err |=
591 copy_from_user(&arg64.error_info, &arg32->error_info,
592 sizeof(arg64.error_info));
593 err |= get_user(arg64.buf_size, &arg32->buf_size);
594 err |= get_user(cp, &arg32->buf);
595 arg64.buf = compat_ptr(cp);
596 err |= copy_to_user(p, &arg64, sizeof(arg64));
598 if (err)
599 return -EFAULT;
601 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
602 if (err)
603 return err;
604 err |=
605 copy_in_user(&arg32->error_info, &p->error_info,
606 sizeof(arg32->error_info));
607 if (err)
608 return -EFAULT;
609 return err;
612 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
613 unsigned long arg)
615 BIG_IOCTL32_Command_struct __user *arg32 =
616 (BIG_IOCTL32_Command_struct __user *) arg;
617 BIG_IOCTL_Command_struct arg64;
618 BIG_IOCTL_Command_struct __user *p =
619 compat_alloc_user_space(sizeof(arg64));
620 int err;
621 u32 cp;
623 err = 0;
624 err |=
625 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
626 sizeof(arg64.LUN_info));
627 err |=
628 copy_from_user(&arg64.Request, &arg32->Request,
629 sizeof(arg64.Request));
630 err |=
631 copy_from_user(&arg64.error_info, &arg32->error_info,
632 sizeof(arg64.error_info));
633 err |= get_user(arg64.buf_size, &arg32->buf_size);
634 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
635 err |= get_user(cp, &arg32->buf);
636 arg64.buf = compat_ptr(cp);
637 err |= copy_to_user(p, &arg64, sizeof(arg64));
639 if (err)
640 return -EFAULT;
642 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
643 if (err)
644 return err;
645 err |=
646 copy_in_user(&arg32->error_info, &p->error_info,
647 sizeof(arg32->error_info));
648 if (err)
649 return -EFAULT;
650 return err;
652 #endif
654 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
656 drive_info_struct *drv = get_drv(bdev->bd_disk);
658 if (!drv->cylinders)
659 return -ENXIO;
661 geo->heads = drv->heads;
662 geo->sectors = drv->sectors;
663 geo->cylinders = drv->cylinders;
664 return 0;
668 * ioctl
670 static int cciss_ioctl(struct inode *inode, struct file *filep,
671 unsigned int cmd, unsigned long arg)
673 struct block_device *bdev = inode->i_bdev;
674 struct gendisk *disk = bdev->bd_disk;
675 ctlr_info_t *host = get_host(disk);
676 drive_info_struct *drv = get_drv(disk);
677 int ctlr = host->ctlr;
678 void __user *argp = (void __user *)arg;
680 #ifdef CCISS_DEBUG
681 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
682 #endif /* CCISS_DEBUG */
684 switch (cmd) {
685 case CCISS_GETPCIINFO:
687 cciss_pci_info_struct pciinfo;
689 if (!arg)
690 return -EINVAL;
691 pciinfo.domain = pci_domain_nr(host->pdev->bus);
692 pciinfo.bus = host->pdev->bus->number;
693 pciinfo.dev_fn = host->pdev->devfn;
694 pciinfo.board_id = host->board_id;
695 if (copy_to_user
696 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
697 return -EFAULT;
698 return 0;
700 case CCISS_GETINTINFO:
702 cciss_coalint_struct intinfo;
703 if (!arg)
704 return -EINVAL;
705 intinfo.delay =
706 readl(&host->cfgtable->HostWrite.CoalIntDelay);
707 intinfo.count =
708 readl(&host->cfgtable->HostWrite.CoalIntCount);
709 if (copy_to_user
710 (argp, &intinfo, sizeof(cciss_coalint_struct)))
711 return -EFAULT;
712 return 0;
714 case CCISS_SETINTINFO:
716 cciss_coalint_struct intinfo;
717 unsigned long flags;
718 int i;
720 if (!arg)
721 return -EINVAL;
722 if (!capable(CAP_SYS_ADMIN))
723 return -EPERM;
724 if (copy_from_user
725 (&intinfo, argp, sizeof(cciss_coalint_struct)))
726 return -EFAULT;
727 if ((intinfo.delay == 0) && (intinfo.count == 0))
729 // printk("cciss_ioctl: delay and count cannot be 0\n");
730 return -EINVAL;
732 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
733 /* Update the field, and then ring the doorbell */
734 writel(intinfo.delay,
735 &(host->cfgtable->HostWrite.CoalIntDelay));
736 writel(intinfo.count,
737 &(host->cfgtable->HostWrite.CoalIntCount));
738 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
740 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
741 if (!(readl(host->vaddr + SA5_DOORBELL)
742 & CFGTBL_ChangeReq))
743 break;
744 /* delay and try again */
745 udelay(1000);
747 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
748 if (i >= MAX_IOCTL_CONFIG_WAIT)
749 return -EAGAIN;
750 return 0;
752 case CCISS_GETNODENAME:
754 NodeName_type NodeName;
755 int i;
757 if (!arg)
758 return -EINVAL;
759 for (i = 0; i < 16; i++)
760 NodeName[i] =
761 readb(&host->cfgtable->ServerName[i]);
762 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
763 return -EFAULT;
764 return 0;
766 case CCISS_SETNODENAME:
768 NodeName_type NodeName;
769 unsigned long flags;
770 int i;
772 if (!arg)
773 return -EINVAL;
774 if (!capable(CAP_SYS_ADMIN))
775 return -EPERM;
777 if (copy_from_user
778 (NodeName, argp, sizeof(NodeName_type)))
779 return -EFAULT;
781 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
783 /* Update the field, and then ring the doorbell */
784 for (i = 0; i < 16; i++)
785 writeb(NodeName[i],
786 &host->cfgtable->ServerName[i]);
788 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
790 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
791 if (!(readl(host->vaddr + SA5_DOORBELL)
792 & CFGTBL_ChangeReq))
793 break;
794 /* delay and try again */
795 udelay(1000);
797 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
798 if (i >= MAX_IOCTL_CONFIG_WAIT)
799 return -EAGAIN;
800 return 0;
803 case CCISS_GETHEARTBEAT:
805 Heartbeat_type heartbeat;
807 if (!arg)
808 return -EINVAL;
809 heartbeat = readl(&host->cfgtable->HeartBeat);
810 if (copy_to_user
811 (argp, &heartbeat, sizeof(Heartbeat_type)))
812 return -EFAULT;
813 return 0;
815 case CCISS_GETBUSTYPES:
817 BusTypes_type BusTypes;
819 if (!arg)
820 return -EINVAL;
821 BusTypes = readl(&host->cfgtable->BusTypes);
822 if (copy_to_user
823 (argp, &BusTypes, sizeof(BusTypes_type)))
824 return -EFAULT;
825 return 0;
827 case CCISS_GETFIRMVER:
829 FirmwareVer_type firmware;
831 if (!arg)
832 return -EINVAL;
833 memcpy(firmware, host->firm_ver, 4);
835 if (copy_to_user
836 (argp, firmware, sizeof(FirmwareVer_type)))
837 return -EFAULT;
838 return 0;
840 case CCISS_GETDRIVVER:
842 DriverVer_type DriverVer = DRIVER_VERSION;
844 if (!arg)
845 return -EINVAL;
847 if (copy_to_user
848 (argp, &DriverVer, sizeof(DriverVer_type)))
849 return -EFAULT;
850 return 0;
853 case CCISS_REVALIDVOLS:
854 if (bdev != bdev->bd_contains || drv != host->drv)
855 return -ENXIO;
856 return revalidate_allvol(host);
858 case CCISS_GETLUNINFO:{
859 LogvolInfo_struct luninfo;
861 luninfo.LunID = drv->LunID;
862 luninfo.num_opens = drv->usage_count;
863 luninfo.num_parts = 0;
864 if (copy_to_user(argp, &luninfo,
865 sizeof(LogvolInfo_struct)))
866 return -EFAULT;
867 return 0;
869 case CCISS_DEREGDISK:
870 return rebuild_lun_table(host, disk);
872 case CCISS_REGNEWD:
873 return rebuild_lun_table(host, NULL);
875 case CCISS_PASSTHRU:
877 IOCTL_Command_struct iocommand;
878 CommandList_struct *c;
879 char *buff = NULL;
880 u64bit temp64;
881 unsigned long flags;
882 DECLARE_COMPLETION(wait);
884 if (!arg)
885 return -EINVAL;
887 if (!capable(CAP_SYS_RAWIO))
888 return -EPERM;
890 if (copy_from_user
891 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
892 return -EFAULT;
893 if ((iocommand.buf_size < 1) &&
894 (iocommand.Request.Type.Direction != XFER_NONE)) {
895 return -EINVAL;
897 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
898 /* Check kmalloc limits */
899 if (iocommand.buf_size > 128000)
900 return -EINVAL;
901 #endif
902 if (iocommand.buf_size > 0) {
903 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
904 if (buff == NULL)
905 return -EFAULT;
907 if (iocommand.Request.Type.Direction == XFER_WRITE) {
908 /* Copy the data into the buffer we created */
909 if (copy_from_user
910 (buff, iocommand.buf, iocommand.buf_size)) {
911 kfree(buff);
912 return -EFAULT;
914 } else {
915 memset(buff, 0, iocommand.buf_size);
917 if ((c = cmd_alloc(host, 0)) == NULL) {
918 kfree(buff);
919 return -ENOMEM;
921 // Fill in the command type
922 c->cmd_type = CMD_IOCTL_PEND;
923 // Fill in Command Header
924 c->Header.ReplyQueue = 0; // unused in simple mode
925 if (iocommand.buf_size > 0) // buffer to fill
927 c->Header.SGList = 1;
928 c->Header.SGTotal = 1;
929 } else // no buffers to fill
931 c->Header.SGList = 0;
932 c->Header.SGTotal = 0;
934 c->Header.LUN = iocommand.LUN_info;
935 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
937 // Fill in Request block
938 c->Request = iocommand.Request;
940 // Fill in the scatter gather information
941 if (iocommand.buf_size > 0) {
942 temp64.val = pci_map_single(host->pdev, buff,
943 iocommand.buf_size,
944 PCI_DMA_BIDIRECTIONAL);
945 c->SG[0].Addr.lower = temp64.val32.lower;
946 c->SG[0].Addr.upper = temp64.val32.upper;
947 c->SG[0].Len = iocommand.buf_size;
948 c->SG[0].Ext = 0; // we are not chaining
950 c->waiting = &wait;
952 /* Put the request on the tail of the request queue */
953 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
954 addQ(&host->reqQ, c);
955 host->Qdepth++;
956 start_io(host);
957 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
959 wait_for_completion(&wait);
961 /* unlock the buffers from DMA */
962 temp64.val32.lower = c->SG[0].Addr.lower;
963 temp64.val32.upper = c->SG[0].Addr.upper;
964 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
965 iocommand.buf_size,
966 PCI_DMA_BIDIRECTIONAL);
968 /* Copy the error information out */
969 iocommand.error_info = *(c->err_info);
970 if (copy_to_user
971 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
972 kfree(buff);
973 cmd_free(host, c, 0);
974 return -EFAULT;
977 if (iocommand.Request.Type.Direction == XFER_READ) {
978 /* Copy the data out of the buffer we created */
979 if (copy_to_user
980 (iocommand.buf, buff, iocommand.buf_size)) {
981 kfree(buff);
982 cmd_free(host, c, 0);
983 return -EFAULT;
986 kfree(buff);
987 cmd_free(host, c, 0);
988 return 0;
990 case CCISS_BIG_PASSTHRU:{
991 BIG_IOCTL_Command_struct *ioc;
992 CommandList_struct *c;
993 unsigned char **buff = NULL;
994 int *buff_size = NULL;
995 u64bit temp64;
996 unsigned long flags;
997 BYTE sg_used = 0;
998 int status = 0;
999 int i;
1000 DECLARE_COMPLETION(wait);
1001 __u32 left;
1002 __u32 sz;
1003 BYTE __user *data_ptr;
1005 if (!arg)
1006 return -EINVAL;
1007 if (!capable(CAP_SYS_RAWIO))
1008 return -EPERM;
1009 ioc = (BIG_IOCTL_Command_struct *)
1010 kmalloc(sizeof(*ioc), GFP_KERNEL);
1011 if (!ioc) {
1012 status = -ENOMEM;
1013 goto cleanup1;
1015 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1016 status = -EFAULT;
1017 goto cleanup1;
1019 if ((ioc->buf_size < 1) &&
1020 (ioc->Request.Type.Direction != XFER_NONE)) {
1021 status = -EINVAL;
1022 goto cleanup1;
1024 /* Check kmalloc limits using all SGs */
1025 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1026 status = -EINVAL;
1027 goto cleanup1;
1029 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1030 status = -EINVAL;
1031 goto cleanup1;
1033 buff =
1034 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1035 if (!buff) {
1036 status = -ENOMEM;
1037 goto cleanup1;
1039 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1040 GFP_KERNEL);
1041 if (!buff_size) {
1042 status = -ENOMEM;
1043 goto cleanup1;
1045 left = ioc->buf_size;
1046 data_ptr = ioc->buf;
1047 while (left) {
1048 sz = (left >
1049 ioc->malloc_size) ? ioc->
1050 malloc_size : left;
1051 buff_size[sg_used] = sz;
1052 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1053 if (buff[sg_used] == NULL) {
1054 status = -ENOMEM;
1055 goto cleanup1;
1057 if (ioc->Request.Type.Direction == XFER_WRITE) {
1058 if (copy_from_user
1059 (buff[sg_used], data_ptr, sz)) {
1060 status = -ENOMEM;
1061 goto cleanup1;
1063 } else {
1064 memset(buff[sg_used], 0, sz);
1066 left -= sz;
1067 data_ptr += sz;
1068 sg_used++;
1070 if ((c = cmd_alloc(host, 0)) == NULL) {
1071 status = -ENOMEM;
1072 goto cleanup1;
1074 c->cmd_type = CMD_IOCTL_PEND;
1075 c->Header.ReplyQueue = 0;
1077 if (ioc->buf_size > 0) {
1078 c->Header.SGList = sg_used;
1079 c->Header.SGTotal = sg_used;
1080 } else {
1081 c->Header.SGList = 0;
1082 c->Header.SGTotal = 0;
1084 c->Header.LUN = ioc->LUN_info;
1085 c->Header.Tag.lower = c->busaddr;
1087 c->Request = ioc->Request;
1088 if (ioc->buf_size > 0) {
1089 int i;
1090 for (i = 0; i < sg_used; i++) {
1091 temp64.val =
1092 pci_map_single(host->pdev, buff[i],
1093 buff_size[i],
1094 PCI_DMA_BIDIRECTIONAL);
1095 c->SG[i].Addr.lower =
1096 temp64.val32.lower;
1097 c->SG[i].Addr.upper =
1098 temp64.val32.upper;
1099 c->SG[i].Len = buff_size[i];
1100 c->SG[i].Ext = 0; /* we are not chaining */
1103 c->waiting = &wait;
1104 /* Put the request on the tail of the request queue */
1105 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1106 addQ(&host->reqQ, c);
1107 host->Qdepth++;
1108 start_io(host);
1109 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1110 wait_for_completion(&wait);
1111 /* unlock the buffers from DMA */
1112 for (i = 0; i < sg_used; i++) {
1113 temp64.val32.lower = c->SG[i].Addr.lower;
1114 temp64.val32.upper = c->SG[i].Addr.upper;
1115 pci_unmap_single(host->pdev,
1116 (dma_addr_t) temp64.val, buff_size[i],
1117 PCI_DMA_BIDIRECTIONAL);
1119 /* Copy the error information out */
1120 ioc->error_info = *(c->err_info);
1121 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1122 cmd_free(host, c, 0);
1123 status = -EFAULT;
1124 goto cleanup1;
1126 if (ioc->Request.Type.Direction == XFER_READ) {
1127 /* Copy the data out of the buffer we created */
1128 BYTE __user *ptr = ioc->buf;
1129 for (i = 0; i < sg_used; i++) {
1130 if (copy_to_user
1131 (ptr, buff[i], buff_size[i])) {
1132 cmd_free(host, c, 0);
1133 status = -EFAULT;
1134 goto cleanup1;
1136 ptr += buff_size[i];
1139 cmd_free(host, c, 0);
1140 status = 0;
1141 cleanup1:
1142 if (buff) {
1143 for (i = 0; i < sg_used; i++)
1144 kfree(buff[i]);
1145 kfree(buff);
1147 kfree(buff_size);
1148 kfree(ioc);
1149 return status;
1151 default:
1152 return -ENOTTY;
1157 * revalidate_allvol is for online array config utilities. After a
1158 * utility reconfigures the drives in the array, it can use this function
1159 * (through an ioctl) to make the driver zap any previous disk structs for
1160 * that controller and get new ones.
1162 * Right now I'm using the getgeometry() function to do this, but this
1163 * function should probably be finer grained and allow you to revalidate one
1164 * particular logical volume (instead of all of them on a particular
1165 * controller).
1167 static int revalidate_allvol(ctlr_info_t *host)
1169 int ctlr = host->ctlr, i;
1170 unsigned long flags;
1172 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1173 if (host->usage_count > 1) {
1174 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1175 printk(KERN_WARNING "cciss: Device busy for volume"
1176 " revalidation (usage=%d)\n", host->usage_count);
1177 return -EBUSY;
1179 host->usage_count++;
1180 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1182 for (i = 0; i < NWD; i++) {
1183 struct gendisk *disk = host->gendisk[i];
1184 if (disk) {
1185 request_queue_t *q = disk->queue;
1187 if (disk->flags & GENHD_FL_UP)
1188 del_gendisk(disk);
1189 if (q)
1190 blk_cleanup_queue(q);
1195 * Set the partition and block size structures for all volumes
1196 * on this controller to zero. We will reread all of this data
1198 memset(host->drv, 0, sizeof(drive_info_struct)
1199 * CISS_MAX_LUN);
1201 * Tell the array controller not to give us any interrupts while
1202 * we check the new geometry. Then turn interrupts back on when
1203 * we're done.
1205 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1206 cciss_getgeometry(ctlr);
1207 host->access.set_intr_mask(host, CCISS_INTR_ON);
1209 /* Loop through each real device */
1210 for (i = 0; i < NWD; i++) {
1211 struct gendisk *disk = host->gendisk[i];
1212 drive_info_struct *drv = &(host->drv[i]);
1213 /* we must register the controller even if no disks exist */
1214 /* this is for the online array utilities */
1215 if (!drv->heads && i)
1216 continue;
1217 blk_queue_hardsect_size(drv->queue, drv->block_size);
1218 set_capacity(disk, drv->nr_blocks);
1219 add_disk(disk);
1221 host->usage_count--;
1222 return 0;
1225 static inline void complete_buffers(struct bio *bio, int status)
1227 while (bio) {
1228 struct bio *xbh = bio->bi_next;
1229 int nr_sectors = bio_sectors(bio);
1231 bio->bi_next = NULL;
1232 blk_finished_io(len);
1233 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1234 bio = xbh;
1238 static void cciss_check_queues(ctlr_info_t *h)
1240 int start_queue = h->next_to_run;
1241 int i;
1243 /* check to see if we have maxed out the number of commands that can
1244 * be placed on the queue. If so then exit. We do this check here
1245 * in case the interrupt we serviced was from an ioctl and did not
1246 * free any new commands.
1248 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1249 return;
1251 /* We have room on the queue for more commands. Now we need to queue
1252 * them up. We will also keep track of the next queue to run so
1253 * that every queue gets a chance to be started first.
1255 for (i = 0; i < h->highest_lun + 1; i++) {
1256 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1257 /* make sure the disk has been added and the drive is real
1258 * because this can be called from the middle of init_one.
1260 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1261 continue;
1262 blk_start_queue(h->gendisk[curr_queue]->queue);
1264 /* check to see if we have maxed out the number of commands
1265 * that can be placed on the queue.
1267 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1268 if (curr_queue == start_queue) {
1269 h->next_to_run =
1270 (start_queue + 1) % (h->highest_lun + 1);
1271 break;
1272 } else {
1273 h->next_to_run = curr_queue;
1274 break;
1276 } else {
1277 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1282 static void cciss_softirq_done(struct request *rq)
1284 CommandList_struct *cmd = rq->completion_data;
1285 ctlr_info_t *h = hba[cmd->ctlr];
1286 unsigned long flags;
1287 u64bit temp64;
1288 int i, ddir;
1290 if (cmd->Request.Type.Direction == XFER_READ)
1291 ddir = PCI_DMA_FROMDEVICE;
1292 else
1293 ddir = PCI_DMA_TODEVICE;
1295 /* command did not need to be retried */
1296 /* unmap the DMA mapping for all the scatter gather elements */
1297 for (i = 0; i < cmd->Header.SGList; i++) {
1298 temp64.val32.lower = cmd->SG[i].Addr.lower;
1299 temp64.val32.upper = cmd->SG[i].Addr.upper;
1300 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1303 complete_buffers(rq->bio, rq->errors);
1305 #ifdef CCISS_DEBUG
1306 printk("Done with %p\n", rq);
1307 #endif /* CCISS_DEBUG */
1309 add_disk_randomness(rq->rq_disk);
1310 spin_lock_irqsave(&h->lock, flags);
1311 end_that_request_last(rq, rq->errors);
1312 cmd_free(h, cmd, 1);
1313 cciss_check_queues(h);
1314 spin_unlock_irqrestore(&h->lock, flags);
1317 /* This function will check the usage_count of the drive to be updated/added.
1318 * If the usage_count is zero then the drive information will be updated and
1319 * the disk will be re-registered with the kernel. If not then it will be
1320 * left alone for the next reboot. The exception to this is disk 0 which
1321 * will always be left registered with the kernel since it is also the
1322 * controller node. Any changes to disk 0 will show up on the next
1323 * reboot.
1325 static void cciss_update_drive_info(int ctlr, int drv_index)
1327 ctlr_info_t *h = hba[ctlr];
1328 struct gendisk *disk;
1329 ReadCapdata_struct *size_buff = NULL;
1330 InquiryData_struct *inq_buff = NULL;
1331 unsigned int block_size;
1332 unsigned int total_size;
1333 unsigned long flags = 0;
1334 int ret = 0;
1336 /* if the disk already exists then deregister it before proceeding */
1337 if (h->drv[drv_index].raid_level != -1) {
1338 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1339 h->drv[drv_index].busy_configuring = 1;
1340 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1341 ret = deregister_disk(h->gendisk[drv_index],
1342 &h->drv[drv_index], 0);
1343 h->drv[drv_index].busy_configuring = 0;
1346 /* If the disk is in use return */
1347 if (ret)
1348 return;
1350 /* Get information about the disk and modify the driver structure */
1351 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1352 if (size_buff == NULL)
1353 goto mem_msg;
1354 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1355 if (inq_buff == NULL)
1356 goto mem_msg;
1358 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1359 &total_size, &block_size);
1360 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1361 inq_buff, &h->drv[drv_index]);
1363 ++h->num_luns;
1364 disk = h->gendisk[drv_index];
1365 set_capacity(disk, h->drv[drv_index].nr_blocks);
1367 /* if it's the controller it's already added */
1368 if (drv_index) {
1369 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1371 /* Set up queue information */
1372 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1373 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1375 /* This is a hardware imposed limit. */
1376 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1378 /* This is a limit in the driver and could be eliminated. */
1379 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1381 blk_queue_max_sectors(disk->queue, 512);
1383 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1385 disk->queue->queuedata = hba[ctlr];
1387 blk_queue_hardsect_size(disk->queue,
1388 hba[ctlr]->drv[drv_index].block_size);
1390 h->drv[drv_index].queue = disk->queue;
1391 add_disk(disk);
1394 freeret:
1395 kfree(size_buff);
1396 kfree(inq_buff);
1397 return;
1398 mem_msg:
1399 printk(KERN_ERR "cciss: out of memory\n");
1400 goto freeret;
1403 /* This function will find the first index of the controllers drive array
1404 * that has a -1 for the raid_level and will return that index. This is
1405 * where new drives will be added. If the index to be returned is greater
1406 * than the highest_lun index for the controller then highest_lun is set
1407 * to this new index. If there are no available indexes then -1 is returned.
1409 static int cciss_find_free_drive_index(int ctlr)
1411 int i;
1413 for (i = 0; i < CISS_MAX_LUN; i++) {
1414 if (hba[ctlr]->drv[i].raid_level == -1) {
1415 if (i > hba[ctlr]->highest_lun)
1416 hba[ctlr]->highest_lun = i;
1417 return i;
1420 return -1;
1423 /* This function will add and remove logical drives from the Logical
1424 * drive array of the controller and maintain persistency of ordering
1425 * so that mount points are preserved until the next reboot. This allows
1426 * for the removal of logical drives in the middle of the drive array
1427 * without a re-ordering of those drives.
1428 * INPUT
1429 * h = The controller to perform the operations on
1430 * del_disk = The disk to remove if specified. If the value given
1431 * is NULL then no disk is removed.
1433 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1435 int ctlr = h->ctlr;
1436 int num_luns;
1437 ReportLunData_struct *ld_buff = NULL;
1438 drive_info_struct *drv = NULL;
1439 int return_code;
1440 int listlength = 0;
1441 int i;
1442 int drv_found;
1443 int drv_index = 0;
1444 __u32 lunid = 0;
1445 unsigned long flags;
1447 /* Set busy_configuring flag for this operation */
1448 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1449 if (h->num_luns >= CISS_MAX_LUN) {
1450 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1451 return -EINVAL;
1454 if (h->busy_configuring) {
1455 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1456 return -EBUSY;
1458 h->busy_configuring = 1;
1460 /* if del_disk is NULL then we are being called to add a new disk
1461 * and update the logical drive table. If it is not NULL then
1462 * we will check if the disk is in use or not.
1464 if (del_disk != NULL) {
1465 drv = get_drv(del_disk);
1466 drv->busy_configuring = 1;
1467 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1468 return_code = deregister_disk(del_disk, drv, 1);
1469 drv->busy_configuring = 0;
1470 h->busy_configuring = 0;
1471 return return_code;
1472 } else {
1473 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1474 if (!capable(CAP_SYS_RAWIO))
1475 return -EPERM;
1477 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1478 if (ld_buff == NULL)
1479 goto mem_msg;
1481 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1482 sizeof(ReportLunData_struct), 0,
1483 0, 0, TYPE_CMD);
1485 if (return_code == IO_OK) {
1486 listlength |=
1487 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1488 << 24;
1489 listlength |=
1490 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1491 << 16;
1492 listlength |=
1493 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1494 << 8;
1495 listlength |=
1496 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1497 } else { /* reading number of logical volumes failed */
1498 printk(KERN_WARNING "cciss: report logical volume"
1499 " command failed\n");
1500 listlength = 0;
1501 goto freeret;
1504 num_luns = listlength / 8; /* 8 bytes per entry */
1505 if (num_luns > CISS_MAX_LUN) {
1506 num_luns = CISS_MAX_LUN;
1507 printk(KERN_WARNING "cciss: more luns configured"
1508 " on controller than can be handled by"
1509 " this driver.\n");
1512 /* Compare controller drive array to drivers drive array.
1513 * Check for updates in the drive information and any new drives
1514 * on the controller.
1516 for (i = 0; i < num_luns; i++) {
1517 int j;
1519 drv_found = 0;
1521 lunid = (0xff &
1522 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1523 lunid |= (0xff &
1524 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1525 lunid |= (0xff &
1526 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1527 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1529 /* Find if the LUN is already in the drive array
1530 * of the controller. If so then update its info
1531 * if not is use. If it does not exist then find
1532 * the first free index and add it.
1534 for (j = 0; j <= h->highest_lun; j++) {
1535 if (h->drv[j].LunID == lunid) {
1536 drv_index = j;
1537 drv_found = 1;
1541 /* check if the drive was found already in the array */
1542 if (!drv_found) {
1543 drv_index = cciss_find_free_drive_index(ctlr);
1544 if (drv_index == -1)
1545 goto freeret;
1548 h->drv[drv_index].LunID = lunid;
1549 cciss_update_drive_info(ctlr, drv_index);
1550 } /* end for */
1551 } /* end else */
1553 freeret:
1554 kfree(ld_buff);
1555 h->busy_configuring = 0;
1556 /* We return -1 here to tell the ACU that we have registered/updated
1557 * all of the drives that we can and to keep it from calling us
1558 * additional times.
1560 return -1;
1561 mem_msg:
1562 printk(KERN_ERR "cciss: out of memory\n");
1563 goto freeret;
1566 /* This function will deregister the disk and it's queue from the
1567 * kernel. It must be called with the controller lock held and the
1568 * drv structures busy_configuring flag set. It's parameters are:
1570 * disk = This is the disk to be deregistered
1571 * drv = This is the drive_info_struct associated with the disk to be
1572 * deregistered. It contains information about the disk used
1573 * by the driver.
1574 * clear_all = This flag determines whether or not the disk information
1575 * is going to be completely cleared out and the highest_lun
1576 * reset. Sometimes we want to clear out information about
1577 * the disk in preparation for re-adding it. In this case
1578 * the highest_lun should be left unchanged and the LunID
1579 * should not be cleared.
1581 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1582 int clear_all)
1584 ctlr_info_t *h = get_host(disk);
1586 if (!capable(CAP_SYS_RAWIO))
1587 return -EPERM;
1589 /* make sure logical volume is NOT is use */
1590 if (clear_all || (h->gendisk[0] == disk)) {
1591 if (drv->usage_count > 1)
1592 return -EBUSY;
1593 } else if (drv->usage_count > 0)
1594 return -EBUSY;
1596 /* invalidate the devices and deregister the disk. If it is disk
1597 * zero do not deregister it but just zero out it's values. This
1598 * allows us to delete disk zero but keep the controller registered.
1600 if (h->gendisk[0] != disk) {
1601 if (disk) {
1602 request_queue_t *q = disk->queue;
1603 if (disk->flags & GENHD_FL_UP)
1604 del_gendisk(disk);
1605 if (q) {
1606 blk_cleanup_queue(q);
1607 drv->queue = NULL;
1612 --h->num_luns;
1613 /* zero out the disk size info */
1614 drv->nr_blocks = 0;
1615 drv->block_size = 0;
1616 drv->heads = 0;
1617 drv->sectors = 0;
1618 drv->cylinders = 0;
1619 drv->raid_level = -1; /* This can be used as a flag variable to
1620 * indicate that this element of the drive
1621 * array is free.
1624 if (clear_all) {
1625 /* check to see if it was the last disk */
1626 if (drv == h->drv + h->highest_lun) {
1627 /* if so, find the new hightest lun */
1628 int i, newhighest = -1;
1629 for (i = 0; i < h->highest_lun; i++) {
1630 /* if the disk has size > 0, it is available */
1631 if (h->drv[i].heads)
1632 newhighest = i;
1634 h->highest_lun = newhighest;
1637 drv->LunID = 0;
1639 return 0;
1642 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1643 1: address logical volume log_unit,
1644 2: periph device address is scsi3addr */
1645 unsigned int log_unit, __u8 page_code,
1646 unsigned char *scsi3addr, int cmd_type)
1648 ctlr_info_t *h = hba[ctlr];
1649 u64bit buff_dma_handle;
1650 int status = IO_OK;
1652 c->cmd_type = CMD_IOCTL_PEND;
1653 c->Header.ReplyQueue = 0;
1654 if (buff != NULL) {
1655 c->Header.SGList = 1;
1656 c->Header.SGTotal = 1;
1657 } else {
1658 c->Header.SGList = 0;
1659 c->Header.SGTotal = 0;
1661 c->Header.Tag.lower = c->busaddr;
1663 c->Request.Type.Type = cmd_type;
1664 if (cmd_type == TYPE_CMD) {
1665 switch (cmd) {
1666 case CISS_INQUIRY:
1667 /* If the logical unit number is 0 then, this is going
1668 to controller so It's a physical command
1669 mode = 0 target = 0. So we have nothing to write.
1670 otherwise, if use_unit_num == 1,
1671 mode = 1(volume set addressing) target = LUNID
1672 otherwise, if use_unit_num == 2,
1673 mode = 0(periph dev addr) target = scsi3addr */
1674 if (use_unit_num == 1) {
1675 c->Header.LUN.LogDev.VolId =
1676 h->drv[log_unit].LunID;
1677 c->Header.LUN.LogDev.Mode = 1;
1678 } else if (use_unit_num == 2) {
1679 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1681 c->Header.LUN.LogDev.Mode = 0;
1683 /* are we trying to read a vital product page */
1684 if (page_code != 0) {
1685 c->Request.CDB[1] = 0x01;
1686 c->Request.CDB[2] = page_code;
1688 c->Request.CDBLen = 6;
1689 c->Request.Type.Attribute = ATTR_SIMPLE;
1690 c->Request.Type.Direction = XFER_READ;
1691 c->Request.Timeout = 0;
1692 c->Request.CDB[0] = CISS_INQUIRY;
1693 c->Request.CDB[4] = size & 0xFF;
1694 break;
1695 case CISS_REPORT_LOG:
1696 case CISS_REPORT_PHYS:
1697 /* Talking to controller so It's a physical command
1698 mode = 00 target = 0. Nothing to write.
1700 c->Request.CDBLen = 12;
1701 c->Request.Type.Attribute = ATTR_SIMPLE;
1702 c->Request.Type.Direction = XFER_READ;
1703 c->Request.Timeout = 0;
1704 c->Request.CDB[0] = cmd;
1705 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1706 c->Request.CDB[7] = (size >> 16) & 0xFF;
1707 c->Request.CDB[8] = (size >> 8) & 0xFF;
1708 c->Request.CDB[9] = size & 0xFF;
1709 break;
1711 case CCISS_READ_CAPACITY:
1712 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1713 c->Header.LUN.LogDev.Mode = 1;
1714 c->Request.CDBLen = 10;
1715 c->Request.Type.Attribute = ATTR_SIMPLE;
1716 c->Request.Type.Direction = XFER_READ;
1717 c->Request.Timeout = 0;
1718 c->Request.CDB[0] = cmd;
1719 break;
1720 case CCISS_CACHE_FLUSH:
1721 c->Request.CDBLen = 12;
1722 c->Request.Type.Attribute = ATTR_SIMPLE;
1723 c->Request.Type.Direction = XFER_WRITE;
1724 c->Request.Timeout = 0;
1725 c->Request.CDB[0] = BMIC_WRITE;
1726 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1727 break;
1728 default:
1729 printk(KERN_WARNING
1730 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1731 return IO_ERROR;
1733 } else if (cmd_type == TYPE_MSG) {
1734 switch (cmd) {
1735 case 0: /* ABORT message */
1736 c->Request.CDBLen = 12;
1737 c->Request.Type.Attribute = ATTR_SIMPLE;
1738 c->Request.Type.Direction = XFER_WRITE;
1739 c->Request.Timeout = 0;
1740 c->Request.CDB[0] = cmd; /* abort */
1741 c->Request.CDB[1] = 0; /* abort a command */
1742 /* buff contains the tag of the command to abort */
1743 memcpy(&c->Request.CDB[4], buff, 8);
1744 break;
1745 case 1: /* RESET message */
1746 c->Request.CDBLen = 12;
1747 c->Request.Type.Attribute = ATTR_SIMPLE;
1748 c->Request.Type.Direction = XFER_WRITE;
1749 c->Request.Timeout = 0;
1750 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1751 c->Request.CDB[0] = cmd; /* reset */
1752 c->Request.CDB[1] = 0x04; /* reset a LUN */
1753 case 3: /* No-Op message */
1754 c->Request.CDBLen = 1;
1755 c->Request.Type.Attribute = ATTR_SIMPLE;
1756 c->Request.Type.Direction = XFER_WRITE;
1757 c->Request.Timeout = 0;
1758 c->Request.CDB[0] = cmd;
1759 break;
1760 default:
1761 printk(KERN_WARNING
1762 "cciss%d: unknown message type %d\n", ctlr, cmd);
1763 return IO_ERROR;
1765 } else {
1766 printk(KERN_WARNING
1767 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1768 return IO_ERROR;
1770 /* Fill in the scatter gather information */
1771 if (size > 0) {
1772 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1773 buff, size,
1774 PCI_DMA_BIDIRECTIONAL);
1775 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1776 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1777 c->SG[0].Len = size;
1778 c->SG[0].Ext = 0; /* we are not chaining */
1780 return status;
1783 static int sendcmd_withirq(__u8 cmd,
1784 int ctlr,
1785 void *buff,
1786 size_t size,
1787 unsigned int use_unit_num,
1788 unsigned int log_unit, __u8 page_code, int cmd_type)
1790 ctlr_info_t *h = hba[ctlr];
1791 CommandList_struct *c;
1792 u64bit buff_dma_handle;
1793 unsigned long flags;
1794 int return_status;
1795 DECLARE_COMPLETION(wait);
1797 if ((c = cmd_alloc(h, 0)) == NULL)
1798 return -ENOMEM;
1799 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1800 log_unit, page_code, NULL, cmd_type);
1801 if (return_status != IO_OK) {
1802 cmd_free(h, c, 0);
1803 return return_status;
1805 resend_cmd2:
1806 c->waiting = &wait;
1808 /* Put the request on the tail of the queue and send it */
1809 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1810 addQ(&h->reqQ, c);
1811 h->Qdepth++;
1812 start_io(h);
1813 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1815 wait_for_completion(&wait);
1817 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1818 switch (c->err_info->CommandStatus) {
1819 case CMD_TARGET_STATUS:
1820 printk(KERN_WARNING "cciss: cmd %p has "
1821 " completed with errors\n", c);
1822 if (c->err_info->ScsiStatus) {
1823 printk(KERN_WARNING "cciss: cmd %p "
1824 "has SCSI Status = %x\n",
1825 c, c->err_info->ScsiStatus);
1828 break;
1829 case CMD_DATA_UNDERRUN:
1830 case CMD_DATA_OVERRUN:
1831 /* expected for inquire and report lun commands */
1832 break;
1833 case CMD_INVALID:
1834 printk(KERN_WARNING "cciss: Cmd %p is "
1835 "reported invalid\n", c);
1836 return_status = IO_ERROR;
1837 break;
1838 case CMD_PROTOCOL_ERR:
1839 printk(KERN_WARNING "cciss: cmd %p has "
1840 "protocol error \n", c);
1841 return_status = IO_ERROR;
1842 break;
1843 case CMD_HARDWARE_ERR:
1844 printk(KERN_WARNING "cciss: cmd %p had "
1845 " hardware error\n", c);
1846 return_status = IO_ERROR;
1847 break;
1848 case CMD_CONNECTION_LOST:
1849 printk(KERN_WARNING "cciss: cmd %p had "
1850 "connection lost\n", c);
1851 return_status = IO_ERROR;
1852 break;
1853 case CMD_ABORTED:
1854 printk(KERN_WARNING "cciss: cmd %p was "
1855 "aborted\n", c);
1856 return_status = IO_ERROR;
1857 break;
1858 case CMD_ABORT_FAILED:
1859 printk(KERN_WARNING "cciss: cmd %p reports "
1860 "abort failed\n", c);
1861 return_status = IO_ERROR;
1862 break;
1863 case CMD_UNSOLICITED_ABORT:
1864 printk(KERN_WARNING
1865 "cciss%d: unsolicited abort %p\n", ctlr, c);
1866 if (c->retry_count < MAX_CMD_RETRIES) {
1867 printk(KERN_WARNING
1868 "cciss%d: retrying %p\n", ctlr, c);
1869 c->retry_count++;
1870 /* erase the old error information */
1871 memset(c->err_info, 0,
1872 sizeof(ErrorInfo_struct));
1873 return_status = IO_OK;
1874 INIT_COMPLETION(wait);
1875 goto resend_cmd2;
1877 return_status = IO_ERROR;
1878 break;
1879 default:
1880 printk(KERN_WARNING "cciss: cmd %p returned "
1881 "unknown status %x\n", c,
1882 c->err_info->CommandStatus);
1883 return_status = IO_ERROR;
1886 /* unlock the buffers from DMA */
1887 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1888 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1889 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1890 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1891 cmd_free(h, c, 0);
1892 return return_status;
1895 static void cciss_geometry_inquiry(int ctlr, int logvol,
1896 int withirq, unsigned int total_size,
1897 unsigned int block_size,
1898 InquiryData_struct *inq_buff,
1899 drive_info_struct *drv)
1901 int return_code;
1902 memset(inq_buff, 0, sizeof(InquiryData_struct));
1903 if (withirq)
1904 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1905 inq_buff, sizeof(*inq_buff), 1,
1906 logvol, 0xC1, TYPE_CMD);
1907 else
1908 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1909 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1910 TYPE_CMD);
1911 if (return_code == IO_OK) {
1912 if (inq_buff->data_byte[8] == 0xFF) {
1913 printk(KERN_WARNING
1914 "cciss: reading geometry failed, volume "
1915 "does not support reading geometry\n");
1916 drv->block_size = block_size;
1917 drv->nr_blocks = total_size;
1918 drv->heads = 255;
1919 drv->sectors = 32; // Sectors per track
1920 drv->cylinders = total_size / 255 / 32;
1921 } else {
1922 unsigned int t;
1924 drv->block_size = block_size;
1925 drv->nr_blocks = total_size;
1926 drv->heads = inq_buff->data_byte[6];
1927 drv->sectors = inq_buff->data_byte[7];
1928 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1929 drv->cylinders += inq_buff->data_byte[5];
1930 drv->raid_level = inq_buff->data_byte[8];
1931 t = drv->heads * drv->sectors;
1932 if (t > 1) {
1933 drv->cylinders = total_size / t;
1936 } else { /* Get geometry failed */
1937 printk(KERN_WARNING "cciss: reading geometry failed\n");
1939 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1940 drv->heads, drv->sectors, drv->cylinders);
1943 static void
1944 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1945 int withirq, unsigned int *total_size,
1946 unsigned int *block_size)
1948 int return_code;
1949 memset(buf, 0, sizeof(*buf));
1950 if (withirq)
1951 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1952 ctlr, buf, sizeof(*buf), 1,
1953 logvol, 0, TYPE_CMD);
1954 else
1955 return_code = sendcmd(CCISS_READ_CAPACITY,
1956 ctlr, buf, sizeof(*buf), 1, logvol, 0,
1957 NULL, TYPE_CMD);
1958 if (return_code == IO_OK) {
1959 *total_size =
1960 be32_to_cpu(*((__be32 *) & buf->total_size[0])) + 1;
1961 *block_size = be32_to_cpu(*((__be32 *) & buf->block_size[0]));
1962 } else { /* read capacity command failed */
1963 printk(KERN_WARNING "cciss: read capacity failed\n");
1964 *total_size = 0;
1965 *block_size = BLOCK_SIZE;
1967 printk(KERN_INFO " blocks= %u block_size= %d\n",
1968 *total_size, *block_size);
1969 return;
1972 static int cciss_revalidate(struct gendisk *disk)
1974 ctlr_info_t *h = get_host(disk);
1975 drive_info_struct *drv = get_drv(disk);
1976 int logvol;
1977 int FOUND = 0;
1978 unsigned int block_size;
1979 unsigned int total_size;
1980 ReadCapdata_struct *size_buff = NULL;
1981 InquiryData_struct *inq_buff = NULL;
1983 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
1984 if (h->drv[logvol].LunID == drv->LunID) {
1985 FOUND = 1;
1986 break;
1990 if (!FOUND)
1991 return 1;
1993 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1994 if (size_buff == NULL) {
1995 printk(KERN_WARNING "cciss: out of memory\n");
1996 return 1;
1998 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1999 if (inq_buff == NULL) {
2000 printk(KERN_WARNING "cciss: out of memory\n");
2001 kfree(size_buff);
2002 return 1;
2005 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size,
2006 &block_size);
2007 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2008 inq_buff, drv);
2010 blk_queue_hardsect_size(drv->queue, drv->block_size);
2011 set_capacity(disk, drv->nr_blocks);
2013 kfree(size_buff);
2014 kfree(inq_buff);
2015 return 0;
2019 * Wait polling for a command to complete.
2020 * The memory mapped FIFO is polled for the completion.
2021 * Used only at init time, interrupts from the HBA are disabled.
2023 static unsigned long pollcomplete(int ctlr)
2025 unsigned long done;
2026 int i;
2028 /* Wait (up to 20 seconds) for a command to complete */
2030 for (i = 20 * HZ; i > 0; i--) {
2031 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2032 if (done == FIFO_EMPTY)
2033 schedule_timeout_uninterruptible(1);
2034 else
2035 return done;
2037 /* Invalid address to tell caller we ran out of time */
2038 return 1;
2041 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2043 /* We get in here if sendcmd() is polling for completions
2044 and gets some command back that it wasn't expecting --
2045 something other than that which it just sent down.
2046 Ordinarily, that shouldn't happen, but it can happen when
2047 the scsi tape stuff gets into error handling mode, and
2048 starts using sendcmd() to try to abort commands and
2049 reset tape drives. In that case, sendcmd may pick up
2050 completions of commands that were sent to logical drives
2051 through the block i/o system, or cciss ioctls completing, etc.
2052 In that case, we need to save those completions for later
2053 processing by the interrupt handler.
2056 #ifdef CONFIG_CISS_SCSI_TAPE
2057 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2059 /* If it's not the scsi tape stuff doing error handling, (abort */
2060 /* or reset) then we don't expect anything weird. */
2061 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2062 #endif
2063 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2064 "Invalid command list address returned! (%lx)\n",
2065 ctlr, complete);
2066 /* not much we can do. */
2067 #ifdef CONFIG_CISS_SCSI_TAPE
2068 return 1;
2071 /* We've sent down an abort or reset, but something else
2072 has completed */
2073 if (srl->ncompletions >= (NR_CMDS + 2)) {
2074 /* Uh oh. No room to save it for later... */
2075 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2076 "reject list overflow, command lost!\n", ctlr);
2077 return 1;
2079 /* Save it for later */
2080 srl->complete[srl->ncompletions] = complete;
2081 srl->ncompletions++;
2082 #endif
2083 return 0;
2087 * Send a command to the controller, and wait for it to complete.
2088 * Only used at init time.
2090 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2091 1: address logical volume log_unit,
2092 2: periph device address is scsi3addr */
2093 unsigned int log_unit,
2094 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2096 CommandList_struct *c;
2097 int i;
2098 unsigned long complete;
2099 ctlr_info_t *info_p = hba[ctlr];
2100 u64bit buff_dma_handle;
2101 int status, done = 0;
2103 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2104 printk(KERN_WARNING "cciss: unable to get memory");
2105 return IO_ERROR;
2107 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2108 log_unit, page_code, scsi3addr, cmd_type);
2109 if (status != IO_OK) {
2110 cmd_free(info_p, c, 1);
2111 return status;
2113 resend_cmd1:
2115 * Disable interrupt
2117 #ifdef CCISS_DEBUG
2118 printk(KERN_DEBUG "cciss: turning intr off\n");
2119 #endif /* CCISS_DEBUG */
2120 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2122 /* Make sure there is room in the command FIFO */
2123 /* Actually it should be completely empty at this time */
2124 /* unless we are in here doing error handling for the scsi */
2125 /* tape side of the driver. */
2126 for (i = 200000; i > 0; i--) {
2127 /* if fifo isn't full go */
2128 if (!(info_p->access.fifo_full(info_p))) {
2130 break;
2132 udelay(10);
2133 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2134 " waiting!\n", ctlr);
2137 * Send the cmd
2139 info_p->access.submit_command(info_p, c);
2140 done = 0;
2141 do {
2142 complete = pollcomplete(ctlr);
2144 #ifdef CCISS_DEBUG
2145 printk(KERN_DEBUG "cciss: command completed\n");
2146 #endif /* CCISS_DEBUG */
2148 if (complete == 1) {
2149 printk(KERN_WARNING
2150 "cciss cciss%d: SendCmd Timeout out, "
2151 "No command list address returned!\n", ctlr);
2152 status = IO_ERROR;
2153 done = 1;
2154 break;
2157 /* This will need to change for direct lookup completions */
2158 if ((complete & CISS_ERROR_BIT)
2159 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2160 /* if data overrun or underun on Report command
2161 ignore it
2163 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2164 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2165 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2166 ((c->err_info->CommandStatus ==
2167 CMD_DATA_OVERRUN) ||
2168 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2169 )) {
2170 complete = c->busaddr;
2171 } else {
2172 if (c->err_info->CommandStatus ==
2173 CMD_UNSOLICITED_ABORT) {
2174 printk(KERN_WARNING "cciss%d: "
2175 "unsolicited abort %p\n",
2176 ctlr, c);
2177 if (c->retry_count < MAX_CMD_RETRIES) {
2178 printk(KERN_WARNING
2179 "cciss%d: retrying %p\n",
2180 ctlr, c);
2181 c->retry_count++;
2182 /* erase the old error */
2183 /* information */
2184 memset(c->err_info, 0,
2185 sizeof
2186 (ErrorInfo_struct));
2187 goto resend_cmd1;
2188 } else {
2189 printk(KERN_WARNING
2190 "cciss%d: retried %p too "
2191 "many times\n", ctlr, c);
2192 status = IO_ERROR;
2193 goto cleanup1;
2195 } else if (c->err_info->CommandStatus ==
2196 CMD_UNABORTABLE) {
2197 printk(KERN_WARNING
2198 "cciss%d: command could not be aborted.\n",
2199 ctlr);
2200 status = IO_ERROR;
2201 goto cleanup1;
2203 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2204 " Error %x \n", ctlr,
2205 c->err_info->CommandStatus);
2206 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2207 " offensive info\n"
2208 " size %x\n num %x value %x\n",
2209 ctlr,
2210 c->err_info->MoreErrInfo.Invalid_Cmd.
2211 offense_size,
2212 c->err_info->MoreErrInfo.Invalid_Cmd.
2213 offense_num,
2214 c->err_info->MoreErrInfo.Invalid_Cmd.
2215 offense_value);
2216 status = IO_ERROR;
2217 goto cleanup1;
2220 /* This will need changing for direct lookup completions */
2221 if (complete != c->busaddr) {
2222 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2223 BUG(); /* we are pretty much hosed if we get here. */
2225 continue;
2226 } else
2227 done = 1;
2228 } while (!done);
2230 cleanup1:
2231 /* unlock the data buffer from DMA */
2232 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2233 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2234 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2235 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2236 #ifdef CONFIG_CISS_SCSI_TAPE
2237 /* if we saved some commands for later, process them now. */
2238 if (info_p->scsi_rejects.ncompletions > 0)
2239 do_cciss_intr(0, info_p, NULL);
2240 #endif
2241 cmd_free(info_p, c, 1);
2242 return status;
2246 * Map (physical) PCI mem into (virtual) kernel space
2248 static void __iomem *remap_pci_mem(ulong base, ulong size)
2250 ulong page_base = ((ulong) base) & PAGE_MASK;
2251 ulong page_offs = ((ulong) base) - page_base;
2252 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2254 return page_remapped ? (page_remapped + page_offs) : NULL;
2258 * Takes jobs of the Q and sends them to the hardware, then puts it on
2259 * the Q to wait for completion.
2261 static void start_io(ctlr_info_t *h)
2263 CommandList_struct *c;
2265 while ((c = h->reqQ) != NULL) {
2266 /* can't do anything if fifo is full */
2267 if ((h->access.fifo_full(h))) {
2268 printk(KERN_WARNING "cciss: fifo full\n");
2269 break;
2272 /* Get the first entry from the Request Q */
2273 removeQ(&(h->reqQ), c);
2274 h->Qdepth--;
2276 /* Tell the controller execute command */
2277 h->access.submit_command(h, c);
2279 /* Put job onto the completed Q */
2280 addQ(&(h->cmpQ), c);
2284 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2285 /* Zeros out the error record and then resends the command back */
2286 /* to the controller */
2287 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2289 /* erase the old error information */
2290 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2292 /* add it to software queue and then send it to the controller */
2293 addQ(&(h->reqQ), c);
2294 h->Qdepth++;
2295 if (h->Qdepth > h->maxQsinceinit)
2296 h->maxQsinceinit = h->Qdepth;
2298 start_io(h);
2301 /* checks the status of the job and calls complete buffers to mark all
2302 * buffers for the completed job. Note that this function does not need
2303 * to hold the hba/queue lock.
2305 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2306 int timeout)
2308 int status = 1;
2309 int retry_cmd = 0;
2311 if (timeout)
2312 status = 0;
2314 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2315 switch (cmd->err_info->CommandStatus) {
2316 unsigned char sense_key;
2317 case CMD_TARGET_STATUS:
2318 status = 0;
2320 if (cmd->err_info->ScsiStatus == 0x02) {
2321 printk(KERN_WARNING "cciss: cmd %p "
2322 "has CHECK CONDITION "
2323 " byte 2 = 0x%x\n", cmd,
2324 cmd->err_info->SenseInfo[2]
2326 /* check the sense key */
2327 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2328 /* no status or recovered error */
2329 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2330 status = 1;
2332 } else {
2333 printk(KERN_WARNING "cciss: cmd %p "
2334 "has SCSI Status 0x%x\n",
2335 cmd, cmd->err_info->ScsiStatus);
2337 break;
2338 case CMD_DATA_UNDERRUN:
2339 printk(KERN_WARNING "cciss: cmd %p has"
2340 " completed with data underrun "
2341 "reported\n", cmd);
2342 break;
2343 case CMD_DATA_OVERRUN:
2344 printk(KERN_WARNING "cciss: cmd %p has"
2345 " completed with data overrun "
2346 "reported\n", cmd);
2347 break;
2348 case CMD_INVALID:
2349 printk(KERN_WARNING "cciss: cmd %p is "
2350 "reported invalid\n", cmd);
2351 status = 0;
2352 break;
2353 case CMD_PROTOCOL_ERR:
2354 printk(KERN_WARNING "cciss: cmd %p has "
2355 "protocol error \n", cmd);
2356 status = 0;
2357 break;
2358 case CMD_HARDWARE_ERR:
2359 printk(KERN_WARNING "cciss: cmd %p had "
2360 " hardware error\n", cmd);
2361 status = 0;
2362 break;
2363 case CMD_CONNECTION_LOST:
2364 printk(KERN_WARNING "cciss: cmd %p had "
2365 "connection lost\n", cmd);
2366 status = 0;
2367 break;
2368 case CMD_ABORTED:
2369 printk(KERN_WARNING "cciss: cmd %p was "
2370 "aborted\n", cmd);
2371 status = 0;
2372 break;
2373 case CMD_ABORT_FAILED:
2374 printk(KERN_WARNING "cciss: cmd %p reports "
2375 "abort failed\n", cmd);
2376 status = 0;
2377 break;
2378 case CMD_UNSOLICITED_ABORT:
2379 printk(KERN_WARNING "cciss%d: unsolicited "
2380 "abort %p\n", h->ctlr, cmd);
2381 if (cmd->retry_count < MAX_CMD_RETRIES) {
2382 retry_cmd = 1;
2383 printk(KERN_WARNING
2384 "cciss%d: retrying %p\n", h->ctlr, cmd);
2385 cmd->retry_count++;
2386 } else
2387 printk(KERN_WARNING
2388 "cciss%d: %p retried too "
2389 "many times\n", h->ctlr, cmd);
2390 status = 0;
2391 break;
2392 case CMD_TIMEOUT:
2393 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2394 status = 0;
2395 break;
2396 default:
2397 printk(KERN_WARNING "cciss: cmd %p returned "
2398 "unknown status %x\n", cmd,
2399 cmd->err_info->CommandStatus);
2400 status = 0;
2403 /* We need to return this command */
2404 if (retry_cmd) {
2405 resend_cciss_cmd(h, cmd);
2406 return;
2409 cmd->rq->completion_data = cmd;
2410 cmd->rq->errors = status;
2411 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2412 blk_complete_request(cmd->rq);
2416 * Get a request and submit it to the controller.
2418 static void do_cciss_request(request_queue_t *q)
2420 ctlr_info_t *h = q->queuedata;
2421 CommandList_struct *c;
2422 int start_blk, seg;
2423 struct request *creq;
2424 u64bit temp64;
2425 struct scatterlist tmp_sg[MAXSGENTRIES];
2426 drive_info_struct *drv;
2427 int i, dir;
2429 /* We call start_io here in case there is a command waiting on the
2430 * queue that has not been sent.
2432 if (blk_queue_plugged(q))
2433 goto startio;
2435 queue:
2436 creq = elv_next_request(q);
2437 if (!creq)
2438 goto startio;
2440 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2442 if ((c = cmd_alloc(h, 1)) == NULL)
2443 goto full;
2445 blkdev_dequeue_request(creq);
2447 spin_unlock_irq(q->queue_lock);
2449 c->cmd_type = CMD_RWREQ;
2450 c->rq = creq;
2452 /* fill in the request */
2453 drv = creq->rq_disk->private_data;
2454 c->Header.ReplyQueue = 0; // unused in simple mode
2455 /* got command from pool, so use the command block index instead */
2456 /* for direct lookups. */
2457 /* The first 2 bits are reserved for controller error reporting. */
2458 c->Header.Tag.lower = (c->cmdindex << 3);
2459 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2460 c->Header.LUN.LogDev.VolId = drv->LunID;
2461 c->Header.LUN.LogDev.Mode = 1;
2462 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2463 c->Request.Type.Type = TYPE_CMD; // It is a command.
2464 c->Request.Type.Attribute = ATTR_SIMPLE;
2465 c->Request.Type.Direction =
2466 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2467 c->Request.Timeout = 0; // Don't time out
2468 c->Request.CDB[0] =
2469 (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2470 start_blk = creq->sector;
2471 #ifdef CCISS_DEBUG
2472 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2473 (int)creq->nr_sectors);
2474 #endif /* CCISS_DEBUG */
2476 seg = blk_rq_map_sg(q, creq, tmp_sg);
2478 /* get the DMA records for the setup */
2479 if (c->Request.Type.Direction == XFER_READ)
2480 dir = PCI_DMA_FROMDEVICE;
2481 else
2482 dir = PCI_DMA_TODEVICE;
2484 for (i = 0; i < seg; i++) {
2485 c->SG[i].Len = tmp_sg[i].length;
2486 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2487 tmp_sg[i].offset,
2488 tmp_sg[i].length, dir);
2489 c->SG[i].Addr.lower = temp64.val32.lower;
2490 c->SG[i].Addr.upper = temp64.val32.upper;
2491 c->SG[i].Ext = 0; // we are not chaining
2493 /* track how many SG entries we are using */
2494 if (seg > h->maxSG)
2495 h->maxSG = seg;
2497 #ifdef CCISS_DEBUG
2498 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2499 creq->nr_sectors, seg);
2500 #endif /* CCISS_DEBUG */
2502 c->Header.SGList = c->Header.SGTotal = seg;
2503 c->Request.CDB[1] = 0;
2504 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2505 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2506 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2507 c->Request.CDB[5] = start_blk & 0xff;
2508 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2509 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2510 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2511 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2513 spin_lock_irq(q->queue_lock);
2515 addQ(&(h->reqQ), c);
2516 h->Qdepth++;
2517 if (h->Qdepth > h->maxQsinceinit)
2518 h->maxQsinceinit = h->Qdepth;
2520 goto queue;
2521 full:
2522 blk_stop_queue(q);
2523 startio:
2524 /* We will already have the driver lock here so not need
2525 * to lock it.
2527 start_io(h);
2530 static inline unsigned long get_next_completion(ctlr_info_t *h)
2532 #ifdef CONFIG_CISS_SCSI_TAPE
2533 /* Any rejects from sendcmd() lying around? Process them first */
2534 if (h->scsi_rejects.ncompletions == 0)
2535 return h->access.command_completed(h);
2536 else {
2537 struct sendcmd_reject_list *srl;
2538 int n;
2539 srl = &h->scsi_rejects;
2540 n = --srl->ncompletions;
2541 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2542 printk("p");
2543 return srl->complete[n];
2545 #else
2546 return h->access.command_completed(h);
2547 #endif
2550 static inline int interrupt_pending(ctlr_info_t *h)
2552 #ifdef CONFIG_CISS_SCSI_TAPE
2553 return (h->access.intr_pending(h)
2554 || (h->scsi_rejects.ncompletions > 0));
2555 #else
2556 return h->access.intr_pending(h);
2557 #endif
2560 static inline long interrupt_not_for_us(ctlr_info_t *h)
2562 #ifdef CONFIG_CISS_SCSI_TAPE
2563 return (((h->access.intr_pending(h) == 0) ||
2564 (h->interrupts_enabled == 0))
2565 && (h->scsi_rejects.ncompletions == 0));
2566 #else
2567 return (((h->access.intr_pending(h) == 0) ||
2568 (h->interrupts_enabled == 0)));
2569 #endif
2572 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2574 ctlr_info_t *h = dev_id;
2575 CommandList_struct *c;
2576 unsigned long flags;
2577 __u32 a, a1, a2;
2579 if (interrupt_not_for_us(h))
2580 return IRQ_NONE;
2582 * If there are completed commands in the completion queue,
2583 * we had better do something about it.
2585 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2586 while (interrupt_pending(h)) {
2587 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2588 a1 = a;
2589 if ((a & 0x04)) {
2590 a2 = (a >> 3);
2591 if (a2 >= NR_CMDS) {
2592 printk(KERN_WARNING
2593 "cciss: controller cciss%d failed, stopping.\n",
2594 h->ctlr);
2595 fail_all_cmds(h->ctlr);
2596 return IRQ_HANDLED;
2599 c = h->cmd_pool + a2;
2600 a = c->busaddr;
2602 } else {
2603 a &= ~3;
2604 if ((c = h->cmpQ) == NULL) {
2605 printk(KERN_WARNING
2606 "cciss: Completion of %08x ignored\n",
2607 a1);
2608 continue;
2610 while (c->busaddr != a) {
2611 c = c->next;
2612 if (c == h->cmpQ)
2613 break;
2617 * If we've found the command, take it off the
2618 * completion Q and free it
2620 if (c->busaddr == a) {
2621 removeQ(&h->cmpQ, c);
2622 if (c->cmd_type == CMD_RWREQ) {
2623 complete_command(h, c, 0);
2624 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2625 complete(c->waiting);
2627 # ifdef CONFIG_CISS_SCSI_TAPE
2628 else if (c->cmd_type == CMD_SCSI)
2629 complete_scsi_command(c, 0, a1);
2630 # endif
2631 continue;
2636 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2637 return IRQ_HANDLED;
2641 * We cannot read the structure directly, for portability we must use
2642 * the io functions.
2643 * This is for debug only.
2645 #ifdef CCISS_DEBUG
2646 static void print_cfg_table(CfgTable_struct *tb)
2648 int i;
2649 char temp_name[17];
2651 printk("Controller Configuration information\n");
2652 printk("------------------------------------\n");
2653 for (i = 0; i < 4; i++)
2654 temp_name[i] = readb(&(tb->Signature[i]));
2655 temp_name[4] = '\0';
2656 printk(" Signature = %s\n", temp_name);
2657 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2658 printk(" Transport methods supported = 0x%x\n",
2659 readl(&(tb->TransportSupport)));
2660 printk(" Transport methods active = 0x%x\n",
2661 readl(&(tb->TransportActive)));
2662 printk(" Requested transport Method = 0x%x\n",
2663 readl(&(tb->HostWrite.TransportRequest)));
2664 printk(" Coalesce Interrupt Delay = 0x%x\n",
2665 readl(&(tb->HostWrite.CoalIntDelay)));
2666 printk(" Coalesce Interrupt Count = 0x%x\n",
2667 readl(&(tb->HostWrite.CoalIntCount)));
2668 printk(" Max outstanding commands = 0x%d\n",
2669 readl(&(tb->CmdsOutMax)));
2670 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2671 for (i = 0; i < 16; i++)
2672 temp_name[i] = readb(&(tb->ServerName[i]));
2673 temp_name[16] = '\0';
2674 printk(" Server Name = %s\n", temp_name);
2675 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2677 #endif /* CCISS_DEBUG */
2679 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2681 int i, offset, mem_type, bar_type;
2682 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2683 return 0;
2684 offset = 0;
2685 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2686 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2687 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2688 offset += 4;
2689 else {
2690 mem_type = pci_resource_flags(pdev, i) &
2691 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2692 switch (mem_type) {
2693 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2694 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2695 offset += 4; /* 32 bit */
2696 break;
2697 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2698 offset += 8;
2699 break;
2700 default: /* reserved in PCI 2.2 */
2701 printk(KERN_WARNING
2702 "Base address is invalid\n");
2703 return -1;
2704 break;
2707 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2708 return i + 1;
2710 return -1;
2713 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2714 * controllers that are capable. If not, we use IO-APIC mode.
2717 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2718 struct pci_dev *pdev, __u32 board_id)
2720 #ifdef CONFIG_PCI_MSI
2721 int err;
2722 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2723 {0, 2}, {0, 3}
2726 /* Some boards advertise MSI but don't really support it */
2727 if ((board_id == 0x40700E11) ||
2728 (board_id == 0x40800E11) ||
2729 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2730 goto default_int_mode;
2732 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2733 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2734 if (!err) {
2735 c->intr[0] = cciss_msix_entries[0].vector;
2736 c->intr[1] = cciss_msix_entries[1].vector;
2737 c->intr[2] = cciss_msix_entries[2].vector;
2738 c->intr[3] = cciss_msix_entries[3].vector;
2739 c->msix_vector = 1;
2740 return;
2742 if (err > 0) {
2743 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2744 "available\n", err);
2745 } else {
2746 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2747 err);
2750 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2751 if (!pci_enable_msi(pdev)) {
2752 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2753 c->msi_vector = 1;
2754 return;
2755 } else {
2756 printk(KERN_WARNING "cciss: MSI init failed\n");
2757 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2758 return;
2761 default_int_mode:
2762 #endif /* CONFIG_PCI_MSI */
2763 /* if we get here we're going to use the default interrupt mode */
2764 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2765 return;
2768 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2770 ushort subsystem_vendor_id, subsystem_device_id, command;
2771 __u32 board_id, scratchpad = 0;
2772 __u64 cfg_offset;
2773 __u32 cfg_base_addr;
2774 __u64 cfg_base_addr_index;
2775 int i, err;
2777 /* check to see if controller has been disabled */
2778 /* BEFORE trying to enable it */
2779 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2780 if (!(command & 0x02)) {
2781 printk(KERN_WARNING
2782 "cciss: controller appears to be disabled\n");
2783 return -ENODEV;
2786 err = pci_enable_device(pdev);
2787 if (err) {
2788 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2789 return err;
2792 err = pci_request_regions(pdev, "cciss");
2793 if (err) {
2794 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2795 "aborting\n");
2796 goto err_out_disable_pdev;
2799 subsystem_vendor_id = pdev->subsystem_vendor;
2800 subsystem_device_id = pdev->subsystem_device;
2801 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2802 subsystem_vendor_id);
2804 #ifdef CCISS_DEBUG
2805 printk("command = %x\n", command);
2806 printk("irq = %x\n", pdev->irq);
2807 printk("board_id = %x\n", board_id);
2808 #endif /* CCISS_DEBUG */
2810 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2811 * else we use the IO-APIC interrupt assigned to us by system ROM.
2813 cciss_interrupt_mode(c, pdev, board_id);
2816 * Memory base addr is first addr , the second points to the config
2817 * table
2820 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2821 #ifdef CCISS_DEBUG
2822 printk("address 0 = %x\n", c->paddr);
2823 #endif /* CCISS_DEBUG */
2824 c->vaddr = remap_pci_mem(c->paddr, 200);
2826 /* Wait for the board to become ready. (PCI hotplug needs this.)
2827 * We poll for up to 120 secs, once per 100ms. */
2828 for (i = 0; i < 1200; i++) {
2829 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2830 if (scratchpad == CCISS_FIRMWARE_READY)
2831 break;
2832 set_current_state(TASK_INTERRUPTIBLE);
2833 schedule_timeout(HZ / 10); /* wait 100ms */
2835 if (scratchpad != CCISS_FIRMWARE_READY) {
2836 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2837 err = -ENODEV;
2838 goto err_out_free_res;
2841 /* get the address index number */
2842 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2843 cfg_base_addr &= (__u32) 0x0000ffff;
2844 #ifdef CCISS_DEBUG
2845 printk("cfg base address = %x\n", cfg_base_addr);
2846 #endif /* CCISS_DEBUG */
2847 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2848 #ifdef CCISS_DEBUG
2849 printk("cfg base address index = %x\n", cfg_base_addr_index);
2850 #endif /* CCISS_DEBUG */
2851 if (cfg_base_addr_index == -1) {
2852 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2853 err = -ENODEV;
2854 goto err_out_free_res;
2857 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2858 #ifdef CCISS_DEBUG
2859 printk("cfg offset = %x\n", cfg_offset);
2860 #endif /* CCISS_DEBUG */
2861 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2862 cfg_base_addr_index) +
2863 cfg_offset, sizeof(CfgTable_struct));
2864 c->board_id = board_id;
2866 #ifdef CCISS_DEBUG
2867 print_cfg_table(c->cfgtable);
2868 #endif /* CCISS_DEBUG */
2870 for (i = 0; i < ARRAY_SIZE(products); i++) {
2871 if (board_id == products[i].board_id) {
2872 c->product_name = products[i].product_name;
2873 c->access = *(products[i].access);
2874 break;
2877 if (i == ARRAY_SIZE(products)) {
2878 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2879 " to access the Smart Array controller %08lx\n",
2880 (unsigned long)board_id);
2881 err = -ENODEV;
2882 goto err_out_free_res;
2884 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2885 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2886 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2887 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2888 printk("Does not appear to be a valid CISS config table\n");
2889 err = -ENODEV;
2890 goto err_out_free_res;
2892 #ifdef CONFIG_X86
2894 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2895 __u32 prefetch;
2896 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2897 prefetch |= 0x100;
2898 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2900 #endif
2902 #ifdef CCISS_DEBUG
2903 printk("Trying to put board into Simple mode\n");
2904 #endif /* CCISS_DEBUG */
2905 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2906 /* Update the field, and then ring the doorbell */
2907 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2908 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2910 /* under certain very rare conditions, this can take awhile.
2911 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2912 * as we enter this code.) */
2913 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
2914 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2915 break;
2916 /* delay and try again */
2917 set_current_state(TASK_INTERRUPTIBLE);
2918 schedule_timeout(10);
2921 #ifdef CCISS_DEBUG
2922 printk(KERN_DEBUG "I counter got to %d %x\n", i,
2923 readl(c->vaddr + SA5_DOORBELL));
2924 #endif /* CCISS_DEBUG */
2925 #ifdef CCISS_DEBUG
2926 print_cfg_table(c->cfgtable);
2927 #endif /* CCISS_DEBUG */
2929 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
2930 printk(KERN_WARNING "cciss: unable to get board into"
2931 " simple mode\n");
2932 err = -ENODEV;
2933 goto err_out_free_res;
2935 return 0;
2937 err_out_free_res:
2938 pci_release_regions(pdev);
2940 err_out_disable_pdev:
2941 pci_disable_device(pdev);
2942 return err;
2946 * Gets information about the local volumes attached to the controller.
2948 static void cciss_getgeometry(int cntl_num)
2950 ReportLunData_struct *ld_buff;
2951 ReadCapdata_struct *size_buff;
2952 InquiryData_struct *inq_buff;
2953 int return_code;
2954 int i;
2955 int listlength = 0;
2956 __u32 lunid = 0;
2957 int block_size;
2958 int total_size;
2960 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2961 if (ld_buff == NULL) {
2962 printk(KERN_ERR "cciss: out of memory\n");
2963 return;
2965 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2966 if (size_buff == NULL) {
2967 printk(KERN_ERR "cciss: out of memory\n");
2968 kfree(ld_buff);
2969 return;
2971 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2972 if (inq_buff == NULL) {
2973 printk(KERN_ERR "cciss: out of memory\n");
2974 kfree(ld_buff);
2975 kfree(size_buff);
2976 return;
2978 /* Get the firmware version */
2979 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2980 sizeof(InquiryData_struct), 0, 0, 0, NULL,
2981 TYPE_CMD);
2982 if (return_code == IO_OK) {
2983 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2984 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2985 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2986 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2987 } else { /* send command failed */
2989 printk(KERN_WARNING "cciss: unable to determine firmware"
2990 " version of controller\n");
2992 /* Get the number of logical volumes */
2993 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2994 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
2995 TYPE_CMD);
2997 if (return_code == IO_OK) {
2998 #ifdef CCISS_DEBUG
2999 printk("LUN Data\n--------------------------\n");
3000 #endif /* CCISS_DEBUG */
3002 listlength |=
3003 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3004 listlength |=
3005 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3006 listlength |=
3007 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3008 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3009 } else { /* reading number of logical volumes failed */
3011 printk(KERN_WARNING "cciss: report logical volume"
3012 " command failed\n");
3013 listlength = 0;
3015 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3016 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3017 printk(KERN_ERR
3018 "ciss: only %d number of logical volumes supported\n",
3019 CISS_MAX_LUN);
3020 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3022 #ifdef CCISS_DEBUG
3023 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3024 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3025 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3026 hba[cntl_num]->num_luns);
3027 #endif /* CCISS_DEBUG */
3029 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3030 // for(i=0; i< hba[cntl_num]->num_luns; i++)
3031 for (i = 0; i < CISS_MAX_LUN; i++) {
3032 if (i < hba[cntl_num]->num_luns) {
3033 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3034 << 24;
3035 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3036 << 16;
3037 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3038 << 8;
3039 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3041 hba[cntl_num]->drv[i].LunID = lunid;
3043 #ifdef CCISS_DEBUG
3044 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3045 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3046 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3047 hba[cntl_num]->drv[i].LunID);
3048 #endif /* CCISS_DEBUG */
3049 cciss_read_capacity(cntl_num, i, size_buff, 0,
3050 &total_size, &block_size);
3051 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3052 block_size, inq_buff,
3053 &hba[cntl_num]->drv[i]);
3054 } else {
3055 /* initialize raid_level to indicate a free space */
3056 hba[cntl_num]->drv[i].raid_level = -1;
3059 kfree(ld_buff);
3060 kfree(size_buff);
3061 kfree(inq_buff);
3064 /* Function to find the first free pointer into our hba[] array */
3065 /* Returns -1 if no free entries are left. */
3066 static int alloc_cciss_hba(void)
3068 struct gendisk *disk[NWD];
3069 int i, n;
3070 for (n = 0; n < NWD; n++) {
3071 disk[n] = alloc_disk(1 << NWD_SHIFT);
3072 if (!disk[n])
3073 goto out;
3076 for (i = 0; i < MAX_CTLR; i++) {
3077 if (!hba[i]) {
3078 ctlr_info_t *p;
3079 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3080 if (!p)
3081 goto Enomem;
3082 for (n = 0; n < NWD; n++)
3083 p->gendisk[n] = disk[n];
3084 hba[i] = p;
3085 return i;
3088 printk(KERN_WARNING "cciss: This driver supports a maximum"
3089 " of %d controllers.\n", MAX_CTLR);
3090 goto out;
3091 Enomem:
3092 printk(KERN_ERR "cciss: out of memory.\n");
3093 out:
3094 while (n--)
3095 put_disk(disk[n]);
3096 return -1;
3099 static void free_hba(int i)
3101 ctlr_info_t *p = hba[i];
3102 int n;
3104 hba[i] = NULL;
3105 for (n = 0; n < NWD; n++)
3106 put_disk(p->gendisk[n]);
3107 kfree(p);
3111 * This is it. Find all the controllers and register them. I really hate
3112 * stealing all these major device numbers.
3113 * returns the number of block devices registered.
3115 static int __devinit cciss_init_one(struct pci_dev *pdev,
3116 const struct pci_device_id *ent)
3118 request_queue_t *q;
3119 int i;
3120 int j;
3121 int rc;
3122 int dac;
3124 i = alloc_cciss_hba();
3125 if (i < 0)
3126 return -1;
3128 hba[i]->busy_initializing = 1;
3130 if (cciss_pci_init(hba[i], pdev) != 0)
3131 goto clean1;
3133 sprintf(hba[i]->devname, "cciss%d", i);
3134 hba[i]->ctlr = i;
3135 hba[i]->pdev = pdev;
3137 /* configure PCI DMA stuff */
3138 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3139 dac = 1;
3140 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3141 dac = 0;
3142 else {
3143 printk(KERN_ERR "cciss: no suitable DMA available\n");
3144 goto clean1;
3148 * register with the major number, or get a dynamic major number
3149 * by passing 0 as argument. This is done for greater than
3150 * 8 controller support.
3152 if (i < MAX_CTLR_ORIG)
3153 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3154 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3155 if (rc == -EBUSY || rc == -EINVAL) {
3156 printk(KERN_ERR
3157 "cciss: Unable to get major number %d for %s "
3158 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3159 goto clean1;
3160 } else {
3161 if (i >= MAX_CTLR_ORIG)
3162 hba[i]->major = rc;
3165 /* make sure the board interrupts are off */
3166 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3167 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3168 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3169 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3170 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3171 goto clean2;
3174 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3175 hba[i]->devname, pdev->device, pci_name(pdev),
3176 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3178 hba[i]->cmd_pool_bits =
3179 kmalloc(((NR_CMDS + BITS_PER_LONG -
3180 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3181 hba[i]->cmd_pool = (CommandList_struct *)
3182 pci_alloc_consistent(hba[i]->pdev,
3183 NR_CMDS * sizeof(CommandList_struct),
3184 &(hba[i]->cmd_pool_dhandle));
3185 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3186 pci_alloc_consistent(hba[i]->pdev,
3187 NR_CMDS * sizeof(ErrorInfo_struct),
3188 &(hba[i]->errinfo_pool_dhandle));
3189 if ((hba[i]->cmd_pool_bits == NULL)
3190 || (hba[i]->cmd_pool == NULL)
3191 || (hba[i]->errinfo_pool == NULL)) {
3192 printk(KERN_ERR "cciss: out of memory");
3193 goto clean4;
3195 #ifdef CONFIG_CISS_SCSI_TAPE
3196 hba[i]->scsi_rejects.complete =
3197 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3198 (NR_CMDS + 5), GFP_KERNEL);
3199 if (hba[i]->scsi_rejects.complete == NULL) {
3200 printk(KERN_ERR "cciss: out of memory");
3201 goto clean4;
3203 #endif
3204 spin_lock_init(&hba[i]->lock);
3206 /* Initialize the pdev driver private data.
3207 have it point to hba[i]. */
3208 pci_set_drvdata(pdev, hba[i]);
3209 /* command and error info recs zeroed out before
3210 they are used */
3211 memset(hba[i]->cmd_pool_bits, 0,
3212 ((NR_CMDS + BITS_PER_LONG -
3213 1) / BITS_PER_LONG) * sizeof(unsigned long));
3215 #ifdef CCISS_DEBUG
3216 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3217 #endif /* CCISS_DEBUG */
3219 cciss_getgeometry(i);
3221 cciss_scsi_setup(i);
3223 /* Turn the interrupts on so we can service requests */
3224 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3226 cciss_procinit(i);
3227 hba[i]->busy_initializing = 0;
3229 for (j = 0; j < NWD; j++) { /* mfm */
3230 drive_info_struct *drv = &(hba[i]->drv[j]);
3231 struct gendisk *disk = hba[i]->gendisk[j];
3233 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3234 if (!q) {
3235 printk(KERN_ERR
3236 "cciss: unable to allocate queue for disk %d\n",
3238 break;
3240 drv->queue = q;
3242 q->backing_dev_info.ra_pages = READ_AHEAD;
3243 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3245 /* This is a hardware imposed limit. */
3246 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3248 /* This is a limit in the driver and could be eliminated. */
3249 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3251 blk_queue_max_sectors(q, 512);
3253 blk_queue_softirq_done(q, cciss_softirq_done);
3255 q->queuedata = hba[i];
3256 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3257 disk->major = hba[i]->major;
3258 disk->first_minor = j << NWD_SHIFT;
3259 disk->fops = &cciss_fops;
3260 disk->queue = q;
3261 disk->private_data = drv;
3262 disk->driverfs_dev = &pdev->dev;
3263 /* we must register the controller even if no disks exist */
3264 /* this is for the online array utilities */
3265 if (!drv->heads && j)
3266 continue;
3267 blk_queue_hardsect_size(q, drv->block_size);
3268 set_capacity(disk, drv->nr_blocks);
3269 add_disk(disk);
3272 return 1;
3274 clean4:
3275 #ifdef CONFIG_CISS_SCSI_TAPE
3276 kfree(hba[i]->scsi_rejects.complete);
3277 #endif
3278 kfree(hba[i]->cmd_pool_bits);
3279 if (hba[i]->cmd_pool)
3280 pci_free_consistent(hba[i]->pdev,
3281 NR_CMDS * sizeof(CommandList_struct),
3282 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3283 if (hba[i]->errinfo_pool)
3284 pci_free_consistent(hba[i]->pdev,
3285 NR_CMDS * sizeof(ErrorInfo_struct),
3286 hba[i]->errinfo_pool,
3287 hba[i]->errinfo_pool_dhandle);
3288 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3289 clean2:
3290 unregister_blkdev(hba[i]->major, hba[i]->devname);
3291 clean1:
3292 hba[i]->busy_initializing = 0;
3293 free_hba(i);
3294 return -1;
3297 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3299 ctlr_info_t *tmp_ptr;
3300 int i, j;
3301 char flush_buf[4];
3302 int return_code;
3304 if (pci_get_drvdata(pdev) == NULL) {
3305 printk(KERN_ERR "cciss: Unable to remove device \n");
3306 return;
3308 tmp_ptr = pci_get_drvdata(pdev);
3309 i = tmp_ptr->ctlr;
3310 if (hba[i] == NULL) {
3311 printk(KERN_ERR "cciss: device appears to "
3312 "already be removed \n");
3313 return;
3315 /* Turn board interrupts off and send the flush cache command */
3316 /* sendcmd will turn off interrupt, and send the flush...
3317 * To write all data in the battery backed cache to disks */
3318 memset(flush_buf, 0, 4);
3319 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3320 TYPE_CMD);
3321 if (return_code != IO_OK) {
3322 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3325 free_irq(hba[i]->intr[2], hba[i]);
3327 #ifdef CONFIG_PCI_MSI
3328 if (hba[i]->msix_vector)
3329 pci_disable_msix(hba[i]->pdev);
3330 else if (hba[i]->msi_vector)
3331 pci_disable_msi(hba[i]->pdev);
3332 #endif /* CONFIG_PCI_MSI */
3334 iounmap(hba[i]->vaddr);
3335 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3336 unregister_blkdev(hba[i]->major, hba[i]->devname);
3337 remove_proc_entry(hba[i]->devname, proc_cciss);
3339 /* remove it from the disk list */
3340 for (j = 0; j < NWD; j++) {
3341 struct gendisk *disk = hba[i]->gendisk[j];
3342 if (disk) {
3343 request_queue_t *q = disk->queue;
3345 if (disk->flags & GENHD_FL_UP)
3346 del_gendisk(disk);
3347 if (q)
3348 blk_cleanup_queue(q);
3352 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3353 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3354 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3355 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3356 kfree(hba[i]->cmd_pool_bits);
3357 #ifdef CONFIG_CISS_SCSI_TAPE
3358 kfree(hba[i]->scsi_rejects.complete);
3359 #endif
3360 pci_release_regions(pdev);
3361 pci_disable_device(pdev);
3362 pci_set_drvdata(pdev, NULL);
3363 free_hba(i);
3366 static struct pci_driver cciss_pci_driver = {
3367 .name = "cciss",
3368 .probe = cciss_init_one,
3369 .remove = __devexit_p(cciss_remove_one),
3370 .id_table = cciss_pci_device_id, /* id_table */
3374 * This is it. Register the PCI driver information for the cards we control
3375 * the OS will call our registered routines when it finds one of our cards.
3377 static int __init cciss_init(void)
3379 printk(KERN_INFO DRIVER_NAME "\n");
3381 /* Register for our PCI devices */
3382 return pci_register_driver(&cciss_pci_driver);
3385 static void __exit cciss_cleanup(void)
3387 int i;
3389 pci_unregister_driver(&cciss_pci_driver);
3390 /* double check that all controller entrys have been removed */
3391 for (i = 0; i < MAX_CTLR; i++) {
3392 if (hba[i] != NULL) {
3393 printk(KERN_WARNING "cciss: had to remove"
3394 " controller %d\n", i);
3395 cciss_remove_one(hba[i]->pdev);
3398 remove_proc_entry("cciss", proc_root_driver);
3401 static void fail_all_cmds(unsigned long ctlr)
3403 /* If we get here, the board is apparently dead. */
3404 ctlr_info_t *h = hba[ctlr];
3405 CommandList_struct *c;
3406 unsigned long flags;
3408 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3409 h->alive = 0; /* the controller apparently died... */
3411 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3413 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3415 /* move everything off the request queue onto the completed queue */
3416 while ((c = h->reqQ) != NULL) {
3417 removeQ(&(h->reqQ), c);
3418 h->Qdepth--;
3419 addQ(&(h->cmpQ), c);
3422 /* Now, fail everything on the completed queue with a HW error */
3423 while ((c = h->cmpQ) != NULL) {
3424 removeQ(&h->cmpQ, c);
3425 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3426 if (c->cmd_type == CMD_RWREQ) {
3427 complete_command(h, c, 0);
3428 } else if (c->cmd_type == CMD_IOCTL_PEND)
3429 complete(c->waiting);
3430 #ifdef CONFIG_CISS_SCSI_TAPE
3431 else if (c->cmd_type == CMD_SCSI)
3432 complete_scsi_command(c, 0, 0);
3433 #endif
3435 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3436 return;
3439 module_init(cciss_init);
3440 module_exit(cciss_cleanup);