Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / cciss.c
blob1f2890989b56086f7b175475bfb2cf3a6a6d48c8
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
50 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
51 #define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
52 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
54 /* Embedded module documentation macros - see modules.h */
55 MODULE_AUTHOR("Hewlett-Packard Company");
56 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
57 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
58 " SA6i P600 P800 P400 P400i E200 E200i");
59 MODULE_LICENSE("GPL");
61 #include "cciss_cmd.h"
62 #include "cciss.h"
63 #include <linux/cciss_ioctl.h>
65 /* define the PCI info for the cards we can control */
66 static const struct pci_device_id cciss_pci_device_id[] = {
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
68 0x0E11, 0x4070, 0, 0, 0},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
70 0x0E11, 0x4080, 0, 0, 0},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
72 0x0E11, 0x4082, 0, 0, 0},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
74 0x0E11, 0x4083, 0, 0, 0},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
76 0x0E11, 0x409A, 0, 0, 0},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
78 0x0E11, 0x409B, 0, 0, 0},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
80 0x0E11, 0x409C, 0, 0, 0},
81 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
82 0x0E11, 0x409D, 0, 0, 0},
83 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
84 0x0E11, 0x4091, 0, 0, 0},
85 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
86 0x103C, 0x3225, 0, 0, 0},
87 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
88 0x103c, 0x3223, 0, 0, 0},
89 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
90 0x103c, 0x3234, 0, 0, 0},
91 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
92 0x103c, 0x3235, 0, 0, 0},
93 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
94 0x103c, 0x3211, 0, 0, 0},
95 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
96 0x103c, 0x3212, 0, 0, 0},
97 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
98 0x103c, 0x3213, 0, 0, 0},
99 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
100 0x103c, 0x3214, 0, 0, 0},
101 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
102 0x103c, 0x3215, 0, 0, 0},
103 {0,}
105 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
107 #define NR_PRODUCTS ARRAY_SIZE(products)
109 /* board_id = Subsystem Device ID & Vendor ID
110 * product = Marketing Name for the board
111 * access = Address of the struct of function pointers
113 static struct board_type products[] = {
114 { 0x40700E11, "Smart Array 5300", &SA5_access },
115 { 0x40800E11, "Smart Array 5i", &SA5B_access},
116 { 0x40820E11, "Smart Array 532", &SA5B_access},
117 { 0x40830E11, "Smart Array 5312", &SA5B_access},
118 { 0x409A0E11, "Smart Array 641", &SA5_access},
119 { 0x409B0E11, "Smart Array 642", &SA5_access},
120 { 0x409C0E11, "Smart Array 6400", &SA5_access},
121 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
122 { 0x40910E11, "Smart Array 6i", &SA5_access},
123 { 0x3225103C, "Smart Array P600", &SA5_access},
124 { 0x3223103C, "Smart Array P800", &SA5_access},
125 { 0x3234103C, "Smart Array P400", &SA5_access},
126 { 0x3235103C, "Smart Array P400i", &SA5_access},
127 { 0x3211103C, "Smart Array E200i", &SA5_access},
128 { 0x3212103C, "Smart Array E200", &SA5_access},
129 { 0x3213103C, "Smart Array E200i", &SA5_access},
130 { 0x3214103C, "Smart Array E200i", &SA5_access},
131 { 0x3215103C, "Smart Array E200i", &SA5_access},
134 /* How long to wait (in millesconds) for board to go into simple mode */
135 #define MAX_CONFIG_WAIT 30000
136 #define MAX_IOCTL_CONFIG_WAIT 1000
138 /*define how many times we will try a command because of bus resets */
139 #define MAX_CMD_RETRIES 3
141 #define READ_AHEAD 1024
142 #define NR_CMDS 384 /* #commands that can be outstanding */
143 #define MAX_CTLR 32
145 /* Originally cciss driver only supports 8 major numbers */
146 #define MAX_CTLR_ORIG 8
149 static ctlr_info_t *hba[MAX_CTLR];
151 static void do_cciss_request(request_queue_t *q);
152 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
153 static int cciss_open(struct inode *inode, struct file *filep);
154 static int cciss_release(struct inode *inode, struct file *filep);
155 static int cciss_ioctl(struct inode *inode, struct file *filep,
156 unsigned int cmd, unsigned long arg);
157 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
159 static int revalidate_allvol(ctlr_info_t *host);
160 static int cciss_revalidate(struct gendisk *disk);
161 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
162 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
164 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
165 int withirq, unsigned int *total_size, unsigned int *block_size);
166 static void cciss_geometry_inquiry(int ctlr, int logvol,
167 int withirq, unsigned int total_size,
168 unsigned int block_size, InquiryData_struct *inq_buff,
169 drive_info_struct *drv);
170 static void cciss_getgeometry(int cntl_num);
171 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
172 static void start_io( ctlr_info_t *h);
173 static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
174 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
175 unsigned char *scsi3addr, int cmd_type);
176 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
177 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
178 int cmd_type);
180 static void fail_all_cmds(unsigned long ctlr);
182 #ifdef CONFIG_PROC_FS
183 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
184 int length, int *eof, void *data);
185 static void cciss_procinit(int i);
186 #else
187 static void cciss_procinit(int i) {}
188 #endif /* CONFIG_PROC_FS */
190 #ifdef CONFIG_COMPAT
191 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
192 #endif
194 static struct block_device_operations cciss_fops = {
195 .owner = THIS_MODULE,
196 .open = cciss_open,
197 .release = cciss_release,
198 .ioctl = cciss_ioctl,
199 .getgeo = cciss_getgeo,
200 #ifdef CONFIG_COMPAT
201 .compat_ioctl = cciss_compat_ioctl,
202 #endif
203 .revalidate_disk= cciss_revalidate,
207 * Enqueuing and dequeuing functions for cmdlists.
209 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
211 if (*Qptr == NULL) {
212 *Qptr = c;
213 c->next = c->prev = c;
214 } else {
215 c->prev = (*Qptr)->prev;
216 c->next = (*Qptr);
217 (*Qptr)->prev->next = c;
218 (*Qptr)->prev = c;
222 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
223 CommandList_struct *c)
225 if (c && c->next != c) {
226 if (*Qptr == c) *Qptr = c->next;
227 c->prev->next = c->next;
228 c->next->prev = c->prev;
229 } else {
230 *Qptr = NULL;
232 return c;
235 #include "cciss_scsi.c" /* For SCSI tape support */
237 #ifdef CONFIG_PROC_FS
240 * Report information about this controller.
242 #define ENG_GIG 1000000000
243 #define ENG_GIG_FACTOR (ENG_GIG/512)
244 #define RAID_UNKNOWN 6
245 static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
246 "UNKNOWN"};
248 static struct proc_dir_entry *proc_cciss;
250 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
251 int length, int *eof, void *data)
253 off_t pos = 0;
254 off_t len = 0;
255 int size, i, ctlr;
256 ctlr_info_t *h = (ctlr_info_t*)data;
257 drive_info_struct *drv;
258 unsigned long flags;
259 sector_t vol_sz, vol_sz_frac;
261 ctlr = h->ctlr;
263 /* prevent displaying bogus info during configuration
264 * or deconfiguration of a logical volume
266 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
267 if (h->busy_configuring) {
268 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
269 return -EBUSY;
271 h->busy_configuring = 1;
272 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
274 size = sprintf(buffer, "%s: HP %s Controller\n"
275 "Board ID: 0x%08lx\n"
276 "Firmware Version: %c%c%c%c\n"
277 "IRQ: %d\n"
278 "Logical drives: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
284 h->devname,
285 h->product_name,
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
288 (unsigned int)h->intr[SIMPLE_MODE_INT],
289 h->num_luns,
290 h->Qdepth, h->commands_outstanding,
291 h->maxQsinceinit, h->max_outstanding, h->maxSG);
293 pos += size; len += size;
294 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
295 for(i=0; i<=h->highest_lun; i++) {
297 drv = &h->drv[i];
298 if (drv->heads == 0)
299 continue;
301 vol_sz = drv->nr_blocks;
302 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
303 vol_sz_frac *= 100;
304 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
306 if (drv->raid_level > 5)
307 drv->raid_level = RAID_UNKNOWN;
308 size = sprintf(buffer+len, "cciss/c%dd%d:"
309 "\t%4u.%02uGB\tRAID %s\n",
310 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
311 raid_label[drv->raid_level]);
312 pos += size; len += size;
315 *eof = 1;
316 *start = buffer+offset;
317 len -= offset;
318 if (len>length)
319 len = length;
320 h->busy_configuring = 0;
321 return len;
324 static int
325 cciss_proc_write(struct file *file, const char __user *buffer,
326 unsigned long count, void *data)
328 unsigned char cmd[80];
329 int len;
330 #ifdef CONFIG_CISS_SCSI_TAPE
331 ctlr_info_t *h = (ctlr_info_t *) data;
332 int rc;
333 #endif
335 if (count > sizeof(cmd)-1) return -EINVAL;
336 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
337 cmd[count] = '\0';
338 len = strlen(cmd); // above 3 lines ensure safety
339 if (len && cmd[len-1] == '\n')
340 cmd[--len] = '\0';
341 # ifdef CONFIG_CISS_SCSI_TAPE
342 if (strcmp("engage scsi", cmd)==0) {
343 rc = cciss_engage_scsi(h->ctlr);
344 if (rc != 0) return -rc;
345 return count;
347 /* might be nice to have "disengage" too, but it's not
348 safely possible. (only 1 module use count, lock issues.) */
349 # endif
350 return -EINVAL;
354 * Get us a file in /proc/cciss that says something about each controller.
355 * Create /proc/cciss if it doesn't exist yet.
357 static void __devinit cciss_procinit(int i)
359 struct proc_dir_entry *pde;
361 if (proc_cciss == NULL) {
362 proc_cciss = proc_mkdir("cciss", proc_root_driver);
363 if (!proc_cciss)
364 return;
367 pde = create_proc_read_entry(hba[i]->devname,
368 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
369 proc_cciss, cciss_proc_get_info, hba[i]);
370 pde->write_proc = cciss_proc_write;
372 #endif /* CONFIG_PROC_FS */
375 * For operations that cannot sleep, a command block is allocated at init,
376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
377 * which ones are free or in use. For operations that can wait for kmalloc
378 * to possible sleep, this routine can be called with get_from_pool set to 0.
379 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
381 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
383 CommandList_struct *c;
384 int i;
385 u64bit temp64;
386 dma_addr_t cmd_dma_handle, err_dma_handle;
388 if (!get_from_pool)
390 c = (CommandList_struct *) pci_alloc_consistent(
391 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
392 if(c==NULL)
393 return NULL;
394 memset(c, 0, sizeof(CommandList_struct));
396 c->cmdindex = -1;
398 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
399 h->pdev, sizeof(ErrorInfo_struct),
400 &err_dma_handle);
402 if (c->err_info == NULL)
404 pci_free_consistent(h->pdev,
405 sizeof(CommandList_struct), c, cmd_dma_handle);
406 return NULL;
408 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
409 } else /* get it out of the controllers pool */
411 do {
412 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
413 if (i == NR_CMDS)
414 return NULL;
415 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
416 #ifdef CCISS_DEBUG
417 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
418 #endif
419 c = h->cmd_pool + i;
420 memset(c, 0, sizeof(CommandList_struct));
421 cmd_dma_handle = h->cmd_pool_dhandle
422 + i*sizeof(CommandList_struct);
423 c->err_info = h->errinfo_pool + i;
424 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
425 err_dma_handle = h->errinfo_pool_dhandle
426 + i*sizeof(ErrorInfo_struct);
427 h->nr_allocs++;
429 c->cmdindex = i;
432 c->busaddr = (__u32) cmd_dma_handle;
433 temp64.val = (__u64) err_dma_handle;
434 c->ErrDesc.Addr.lower = temp64.val32.lower;
435 c->ErrDesc.Addr.upper = temp64.val32.upper;
436 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
438 c->ctlr = h->ctlr;
439 return c;
445 * Frees a command block that was previously allocated with cmd_alloc().
447 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
449 int i;
450 u64bit temp64;
452 if( !got_from_pool)
454 temp64.val32.lower = c->ErrDesc.Addr.lower;
455 temp64.val32.upper = c->ErrDesc.Addr.upper;
456 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
457 c->err_info, (dma_addr_t) temp64.val);
458 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
459 c, (dma_addr_t) c->busaddr);
460 } else
462 i = c - h->cmd_pool;
463 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
464 h->nr_frees++;
468 static inline ctlr_info_t *get_host(struct gendisk *disk)
470 return disk->queue->queuedata;
473 static inline drive_info_struct *get_drv(struct gendisk *disk)
475 return disk->private_data;
479 * Open. Make sure the device is really there.
481 static int cciss_open(struct inode *inode, struct file *filep)
483 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
484 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
486 #ifdef CCISS_DEBUG
487 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
488 #endif /* CCISS_DEBUG */
490 if (host->busy_initializing || drv->busy_configuring)
491 return -EBUSY;
493 * Root is allowed to open raw volume zero even if it's not configured
494 * so array config can still work. Root is also allowed to open any
495 * volume that has a LUN ID, so it can issue IOCTL to reread the
496 * disk information. I don't think I really like this
497 * but I'm already using way to many device nodes to claim another one
498 * for "raw controller".
500 if (drv->nr_blocks == 0) {
501 if (iminor(inode) != 0) { /* not node 0? */
502 /* if not node 0 make sure it is a partition = 0 */
503 if (iminor(inode) & 0x0f) {
504 return -ENXIO;
505 /* if it is, make sure we have a LUN ID */
506 } else if (drv->LunID == 0) {
507 return -ENXIO;
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
513 drv->usage_count++;
514 host->usage_count++;
515 return 0;
518 * Close. Sync first.
520 static int cciss_release(struct inode *inode, struct file *filep)
522 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
523 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
525 #ifdef CCISS_DEBUG
526 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
527 #endif /* CCISS_DEBUG */
529 drv->usage_count--;
530 host->usage_count--;
531 return 0;
534 #ifdef CONFIG_COMPAT
536 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
538 int ret;
539 lock_kernel();
540 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
541 unlock_kernel();
542 return ret;
545 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
546 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
548 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
550 switch (cmd) {
551 case CCISS_GETPCIINFO:
552 case CCISS_GETINTINFO:
553 case CCISS_SETINTINFO:
554 case CCISS_GETNODENAME:
555 case CCISS_SETNODENAME:
556 case CCISS_GETHEARTBEAT:
557 case CCISS_GETBUSTYPES:
558 case CCISS_GETFIRMVER:
559 case CCISS_GETDRIVVER:
560 case CCISS_REVALIDVOLS:
561 case CCISS_DEREGDISK:
562 case CCISS_REGNEWDISK:
563 case CCISS_REGNEWD:
564 case CCISS_RESCANDISK:
565 case CCISS_GETLUNINFO:
566 return do_ioctl(f, cmd, arg);
568 case CCISS_PASSTHRU32:
569 return cciss_ioctl32_passthru(f, cmd, arg);
570 case CCISS_BIG_PASSTHRU32:
571 return cciss_ioctl32_big_passthru(f, cmd, arg);
573 default:
574 return -ENOIOCTLCMD;
578 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
580 IOCTL32_Command_struct __user *arg32 =
581 (IOCTL32_Command_struct __user *) arg;
582 IOCTL_Command_struct arg64;
583 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
584 int err;
585 u32 cp;
587 err = 0;
588 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
589 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
590 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
591 err |= get_user(arg64.buf_size, &arg32->buf_size);
592 err |= get_user(cp, &arg32->buf);
593 arg64.buf = compat_ptr(cp);
594 err |= copy_to_user(p, &arg64, sizeof(arg64));
596 if (err)
597 return -EFAULT;
599 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
600 if (err)
601 return err;
602 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
603 if (err)
604 return -EFAULT;
605 return err;
608 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
610 BIG_IOCTL32_Command_struct __user *arg32 =
611 (BIG_IOCTL32_Command_struct __user *) arg;
612 BIG_IOCTL_Command_struct arg64;
613 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
614 int err;
615 u32 cp;
617 err = 0;
618 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
619 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
620 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
621 err |= get_user(arg64.buf_size, &arg32->buf_size);
622 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
623 err |= get_user(cp, &arg32->buf);
624 arg64.buf = compat_ptr(cp);
625 err |= copy_to_user(p, &arg64, sizeof(arg64));
627 if (err)
628 return -EFAULT;
630 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
631 if (err)
632 return err;
633 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
634 if (err)
635 return -EFAULT;
636 return err;
638 #endif
640 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
642 drive_info_struct *drv = get_drv(bdev->bd_disk);
644 if (!drv->cylinders)
645 return -ENXIO;
647 geo->heads = drv->heads;
648 geo->sectors = drv->sectors;
649 geo->cylinders = drv->cylinders;
650 return 0;
654 * ioctl
656 static int cciss_ioctl(struct inode *inode, struct file *filep,
657 unsigned int cmd, unsigned long arg)
659 struct block_device *bdev = inode->i_bdev;
660 struct gendisk *disk = bdev->bd_disk;
661 ctlr_info_t *host = get_host(disk);
662 drive_info_struct *drv = get_drv(disk);
663 int ctlr = host->ctlr;
664 void __user *argp = (void __user *)arg;
666 #ifdef CCISS_DEBUG
667 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
668 #endif /* CCISS_DEBUG */
670 switch(cmd) {
671 case CCISS_GETPCIINFO:
673 cciss_pci_info_struct pciinfo;
675 if (!arg) return -EINVAL;
676 pciinfo.domain = pci_domain_nr(host->pdev->bus);
677 pciinfo.bus = host->pdev->bus->number;
678 pciinfo.dev_fn = host->pdev->devfn;
679 pciinfo.board_id = host->board_id;
680 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
681 return -EFAULT;
682 return(0);
684 case CCISS_GETINTINFO:
686 cciss_coalint_struct intinfo;
687 if (!arg) return -EINVAL;
688 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
689 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
690 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
691 return -EFAULT;
692 return(0);
694 case CCISS_SETINTINFO:
696 cciss_coalint_struct intinfo;
697 unsigned long flags;
698 int i;
700 if (!arg) return -EINVAL;
701 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
702 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
703 return -EFAULT;
704 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
707 // printk("cciss_ioctl: delay and count cannot be 0\n");
708 return( -EINVAL);
710 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
711 /* Update the field, and then ring the doorbell */
712 writel( intinfo.delay,
713 &(host->cfgtable->HostWrite.CoalIntDelay));
714 writel( intinfo.count,
715 &(host->cfgtable->HostWrite.CoalIntCount));
716 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
718 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
719 if (!(readl(host->vaddr + SA5_DOORBELL)
720 & CFGTBL_ChangeReq))
721 break;
722 /* delay and try again */
723 udelay(1000);
725 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
726 if (i >= MAX_IOCTL_CONFIG_WAIT)
727 return -EAGAIN;
728 return(0);
730 case CCISS_GETNODENAME:
732 NodeName_type NodeName;
733 int i;
735 if (!arg) return -EINVAL;
736 for(i=0;i<16;i++)
737 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
738 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
739 return -EFAULT;
740 return(0);
742 case CCISS_SETNODENAME:
744 NodeName_type NodeName;
745 unsigned long flags;
746 int i;
748 if (!arg) return -EINVAL;
749 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
751 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
752 return -EFAULT;
754 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
756 /* Update the field, and then ring the doorbell */
757 for(i=0;i<16;i++)
758 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
760 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
762 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
763 if (!(readl(host->vaddr + SA5_DOORBELL)
764 & CFGTBL_ChangeReq))
765 break;
766 /* delay and try again */
767 udelay(1000);
769 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
770 if (i >= MAX_IOCTL_CONFIG_WAIT)
771 return -EAGAIN;
772 return(0);
775 case CCISS_GETHEARTBEAT:
777 Heartbeat_type heartbeat;
779 if (!arg) return -EINVAL;
780 heartbeat = readl(&host->cfgtable->HeartBeat);
781 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
782 return -EFAULT;
783 return(0);
785 case CCISS_GETBUSTYPES:
787 BusTypes_type BusTypes;
789 if (!arg) return -EINVAL;
790 BusTypes = readl(&host->cfgtable->BusTypes);
791 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
792 return -EFAULT;
793 return(0);
795 case CCISS_GETFIRMVER:
797 FirmwareVer_type firmware;
799 if (!arg) return -EINVAL;
800 memcpy(firmware, host->firm_ver, 4);
802 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
803 return -EFAULT;
804 return(0);
806 case CCISS_GETDRIVVER:
808 DriverVer_type DriverVer = DRIVER_VERSION;
810 if (!arg) return -EINVAL;
812 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
813 return -EFAULT;
814 return(0);
817 case CCISS_REVALIDVOLS:
818 if (bdev != bdev->bd_contains || drv != host->drv)
819 return -ENXIO;
820 return revalidate_allvol(host);
822 case CCISS_GETLUNINFO: {
823 LogvolInfo_struct luninfo;
825 luninfo.LunID = drv->LunID;
826 luninfo.num_opens = drv->usage_count;
827 luninfo.num_parts = 0;
828 if (copy_to_user(argp, &luninfo,
829 sizeof(LogvolInfo_struct)))
830 return -EFAULT;
831 return(0);
833 case CCISS_DEREGDISK:
834 return rebuild_lun_table(host, disk);
836 case CCISS_REGNEWD:
837 return rebuild_lun_table(host, NULL);
839 case CCISS_PASSTHRU:
841 IOCTL_Command_struct iocommand;
842 CommandList_struct *c;
843 char *buff = NULL;
844 u64bit temp64;
845 unsigned long flags;
846 DECLARE_COMPLETION(wait);
848 if (!arg) return -EINVAL;
850 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
852 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
853 return -EFAULT;
854 if((iocommand.buf_size < 1) &&
855 (iocommand.Request.Type.Direction != XFER_NONE))
857 return -EINVAL;
859 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
860 /* Check kmalloc limits */
861 if(iocommand.buf_size > 128000)
862 return -EINVAL;
863 #endif
864 if(iocommand.buf_size > 0)
866 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
867 if( buff == NULL)
868 return -EFAULT;
870 if (iocommand.Request.Type.Direction == XFER_WRITE)
872 /* Copy the data into the buffer we created */
873 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
875 kfree(buff);
876 return -EFAULT;
878 } else {
879 memset(buff, 0, iocommand.buf_size);
881 if ((c = cmd_alloc(host , 0)) == NULL)
883 kfree(buff);
884 return -ENOMEM;
886 // Fill in the command type
887 c->cmd_type = CMD_IOCTL_PEND;
888 // Fill in Command Header
889 c->Header.ReplyQueue = 0; // unused in simple mode
890 if( iocommand.buf_size > 0) // buffer to fill
892 c->Header.SGList = 1;
893 c->Header.SGTotal= 1;
894 } else // no buffers to fill
896 c->Header.SGList = 0;
897 c->Header.SGTotal= 0;
899 c->Header.LUN = iocommand.LUN_info;
900 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
902 // Fill in Request block
903 c->Request = iocommand.Request;
905 // Fill in the scatter gather information
906 if (iocommand.buf_size > 0 )
908 temp64.val = pci_map_single( host->pdev, buff,
909 iocommand.buf_size,
910 PCI_DMA_BIDIRECTIONAL);
911 c->SG[0].Addr.lower = temp64.val32.lower;
912 c->SG[0].Addr.upper = temp64.val32.upper;
913 c->SG[0].Len = iocommand.buf_size;
914 c->SG[0].Ext = 0; // we are not chaining
916 c->waiting = &wait;
918 /* Put the request on the tail of the request queue */
919 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
920 addQ(&host->reqQ, c);
921 host->Qdepth++;
922 start_io(host);
923 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
925 wait_for_completion(&wait);
927 /* unlock the buffers from DMA */
928 temp64.val32.lower = c->SG[0].Addr.lower;
929 temp64.val32.upper = c->SG[0].Addr.upper;
930 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
931 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
933 /* Copy the error information out */
934 iocommand.error_info = *(c->err_info);
935 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
937 kfree(buff);
938 cmd_free(host, c, 0);
939 return( -EFAULT);
942 if (iocommand.Request.Type.Direction == XFER_READ)
944 /* Copy the data out of the buffer we created */
945 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
947 kfree(buff);
948 cmd_free(host, c, 0);
949 return -EFAULT;
952 kfree(buff);
953 cmd_free(host, c, 0);
954 return(0);
956 case CCISS_BIG_PASSTHRU: {
957 BIG_IOCTL_Command_struct *ioc;
958 CommandList_struct *c;
959 unsigned char **buff = NULL;
960 int *buff_size = NULL;
961 u64bit temp64;
962 unsigned long flags;
963 BYTE sg_used = 0;
964 int status = 0;
965 int i;
966 DECLARE_COMPLETION(wait);
967 __u32 left;
968 __u32 sz;
969 BYTE __user *data_ptr;
971 if (!arg)
972 return -EINVAL;
973 if (!capable(CAP_SYS_RAWIO))
974 return -EPERM;
975 ioc = (BIG_IOCTL_Command_struct *)
976 kmalloc(sizeof(*ioc), GFP_KERNEL);
977 if (!ioc) {
978 status = -ENOMEM;
979 goto cleanup1;
981 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
982 status = -EFAULT;
983 goto cleanup1;
985 if ((ioc->buf_size < 1) &&
986 (ioc->Request.Type.Direction != XFER_NONE)) {
987 status = -EINVAL;
988 goto cleanup1;
990 /* Check kmalloc limits using all SGs */
991 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
992 status = -EINVAL;
993 goto cleanup1;
995 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
996 status = -EINVAL;
997 goto cleanup1;
999 buff = (unsigned char **) kmalloc(MAXSGENTRIES *
1000 sizeof(char *), GFP_KERNEL);
1001 if (!buff) {
1002 status = -ENOMEM;
1003 goto cleanup1;
1005 memset(buff, 0, MAXSGENTRIES);
1006 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1007 GFP_KERNEL);
1008 if (!buff_size) {
1009 status = -ENOMEM;
1010 goto cleanup1;
1012 left = ioc->buf_size;
1013 data_ptr = ioc->buf;
1014 while (left) {
1015 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1016 buff_size[sg_used] = sz;
1017 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1018 if (buff[sg_used] == NULL) {
1019 status = -ENOMEM;
1020 goto cleanup1;
1022 if (ioc->Request.Type.Direction == XFER_WRITE) {
1023 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1024 status = -ENOMEM;
1025 goto cleanup1;
1027 } else {
1028 memset(buff[sg_used], 0, sz);
1030 left -= sz;
1031 data_ptr += sz;
1032 sg_used++;
1034 if ((c = cmd_alloc(host , 0)) == NULL) {
1035 status = -ENOMEM;
1036 goto cleanup1;
1038 c->cmd_type = CMD_IOCTL_PEND;
1039 c->Header.ReplyQueue = 0;
1041 if( ioc->buf_size > 0) {
1042 c->Header.SGList = sg_used;
1043 c->Header.SGTotal= sg_used;
1044 } else {
1045 c->Header.SGList = 0;
1046 c->Header.SGTotal= 0;
1048 c->Header.LUN = ioc->LUN_info;
1049 c->Header.Tag.lower = c->busaddr;
1051 c->Request = ioc->Request;
1052 if (ioc->buf_size > 0 ) {
1053 int i;
1054 for(i=0; i<sg_used; i++) {
1055 temp64.val = pci_map_single( host->pdev, buff[i],
1056 buff_size[i],
1057 PCI_DMA_BIDIRECTIONAL);
1058 c->SG[i].Addr.lower = temp64.val32.lower;
1059 c->SG[i].Addr.upper = temp64.val32.upper;
1060 c->SG[i].Len = buff_size[i];
1061 c->SG[i].Ext = 0; /* we are not chaining */
1064 c->waiting = &wait;
1065 /* Put the request on the tail of the request queue */
1066 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1067 addQ(&host->reqQ, c);
1068 host->Qdepth++;
1069 start_io(host);
1070 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1071 wait_for_completion(&wait);
1072 /* unlock the buffers from DMA */
1073 for(i=0; i<sg_used; i++) {
1074 temp64.val32.lower = c->SG[i].Addr.lower;
1075 temp64.val32.upper = c->SG[i].Addr.upper;
1076 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1077 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1079 /* Copy the error information out */
1080 ioc->error_info = *(c->err_info);
1081 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1082 cmd_free(host, c, 0);
1083 status = -EFAULT;
1084 goto cleanup1;
1086 if (ioc->Request.Type.Direction == XFER_READ) {
1087 /* Copy the data out of the buffer we created */
1088 BYTE __user *ptr = ioc->buf;
1089 for(i=0; i< sg_used; i++) {
1090 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1091 cmd_free(host, c, 0);
1092 status = -EFAULT;
1093 goto cleanup1;
1095 ptr += buff_size[i];
1098 cmd_free(host, c, 0);
1099 status = 0;
1100 cleanup1:
1101 if (buff) {
1102 for(i=0; i<sg_used; i++)
1103 kfree(buff[i]);
1104 kfree(buff);
1106 kfree(buff_size);
1107 kfree(ioc);
1108 return(status);
1110 default:
1111 return -ENOTTY;
1117 * revalidate_allvol is for online array config utilities. After a
1118 * utility reconfigures the drives in the array, it can use this function
1119 * (through an ioctl) to make the driver zap any previous disk structs for
1120 * that controller and get new ones.
1122 * Right now I'm using the getgeometry() function to do this, but this
1123 * function should probably be finer grained and allow you to revalidate one
1124 * particualar logical volume (instead of all of them on a particular
1125 * controller).
1127 static int revalidate_allvol(ctlr_info_t *host)
1129 int ctlr = host->ctlr, i;
1130 unsigned long flags;
1132 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1133 if (host->usage_count > 1) {
1134 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1135 printk(KERN_WARNING "cciss: Device busy for volume"
1136 " revalidation (usage=%d)\n", host->usage_count);
1137 return -EBUSY;
1139 host->usage_count++;
1140 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1142 for(i=0; i< NWD; i++) {
1143 struct gendisk *disk = host->gendisk[i];
1144 if (disk) {
1145 request_queue_t *q = disk->queue;
1147 if (disk->flags & GENHD_FL_UP)
1148 del_gendisk(disk);
1149 if (q)
1150 blk_cleanup_queue(q);
1155 * Set the partition and block size structures for all volumes
1156 * on this controller to zero. We will reread all of this data
1158 memset(host->drv, 0, sizeof(drive_info_struct)
1159 * CISS_MAX_LUN);
1161 * Tell the array controller not to give us any interrupts while
1162 * we check the new geometry. Then turn interrupts back on when
1163 * we're done.
1165 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1166 cciss_getgeometry(ctlr);
1167 host->access.set_intr_mask(host, CCISS_INTR_ON);
1169 /* Loop through each real device */
1170 for (i = 0; i < NWD; i++) {
1171 struct gendisk *disk = host->gendisk[i];
1172 drive_info_struct *drv = &(host->drv[i]);
1173 /* we must register the controller even if no disks exist */
1174 /* this is for the online array utilities */
1175 if (!drv->heads && i)
1176 continue;
1177 blk_queue_hardsect_size(drv->queue, drv->block_size);
1178 set_capacity(disk, drv->nr_blocks);
1179 add_disk(disk);
1181 host->usage_count--;
1182 return 0;
1185 /* This function will check the usage_count of the drive to be updated/added.
1186 * If the usage_count is zero then the drive information will be updated and
1187 * the disk will be re-registered with the kernel. If not then it will be
1188 * left alone for the next reboot. The exception to this is disk 0 which
1189 * will always be left registered with the kernel since it is also the
1190 * controller node. Any changes to disk 0 will show up on the next
1191 * reboot.
1193 static void cciss_update_drive_info(int ctlr, int drv_index)
1195 ctlr_info_t *h = hba[ctlr];
1196 struct gendisk *disk;
1197 ReadCapdata_struct *size_buff = NULL;
1198 InquiryData_struct *inq_buff = NULL;
1199 unsigned int block_size;
1200 unsigned int total_size;
1201 unsigned long flags = 0;
1202 int ret = 0;
1204 /* if the disk already exists then deregister it before proceeding*/
1205 if (h->drv[drv_index].raid_level != -1){
1206 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1207 h->drv[drv_index].busy_configuring = 1;
1208 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1209 ret = deregister_disk(h->gendisk[drv_index],
1210 &h->drv[drv_index], 0);
1211 h->drv[drv_index].busy_configuring = 0;
1214 /* If the disk is in use return */
1215 if (ret)
1216 return;
1219 /* Get information about the disk and modify the driver sturcture */
1220 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1221 if (size_buff == NULL)
1222 goto mem_msg;
1223 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1224 if (inq_buff == NULL)
1225 goto mem_msg;
1227 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1228 &total_size, &block_size);
1229 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1230 inq_buff, &h->drv[drv_index]);
1232 ++h->num_luns;
1233 disk = h->gendisk[drv_index];
1234 set_capacity(disk, h->drv[drv_index].nr_blocks);
1237 /* if it's the controller it's already added */
1238 if (drv_index){
1239 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1241 /* Set up queue information */
1242 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1243 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1245 /* This is a hardware imposed limit. */
1246 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1248 /* This is a limit in the driver and could be eliminated. */
1249 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1251 blk_queue_max_sectors(disk->queue, 512);
1253 disk->queue->queuedata = hba[ctlr];
1255 blk_queue_hardsect_size(disk->queue,
1256 hba[ctlr]->drv[drv_index].block_size);
1258 h->drv[drv_index].queue = disk->queue;
1259 add_disk(disk);
1262 freeret:
1263 kfree(size_buff);
1264 kfree(inq_buff);
1265 return;
1266 mem_msg:
1267 printk(KERN_ERR "cciss: out of memory\n");
1268 goto freeret;
1271 /* This function will find the first index of the controllers drive array
1272 * that has a -1 for the raid_level and will return that index. This is
1273 * where new drives will be added. If the index to be returned is greater
1274 * than the highest_lun index for the controller then highest_lun is set
1275 * to this new index. If there are no available indexes then -1 is returned.
1277 static int cciss_find_free_drive_index(int ctlr)
1279 int i;
1281 for (i=0; i < CISS_MAX_LUN; i++){
1282 if (hba[ctlr]->drv[i].raid_level == -1){
1283 if (i > hba[ctlr]->highest_lun)
1284 hba[ctlr]->highest_lun = i;
1285 return i;
1288 return -1;
1291 /* This function will add and remove logical drives from the Logical
1292 * drive array of the controller and maintain persistancy of ordering
1293 * so that mount points are preserved until the next reboot. This allows
1294 * for the removal of logical drives in the middle of the drive array
1295 * without a re-ordering of those drives.
1296 * INPUT
1297 * h = The controller to perform the operations on
1298 * del_disk = The disk to remove if specified. If the value given
1299 * is NULL then no disk is removed.
1301 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1303 int ctlr = h->ctlr;
1304 int num_luns;
1305 ReportLunData_struct *ld_buff = NULL;
1306 drive_info_struct *drv = NULL;
1307 int return_code;
1308 int listlength = 0;
1309 int i;
1310 int drv_found;
1311 int drv_index = 0;
1312 __u32 lunid = 0;
1313 unsigned long flags;
1315 /* Set busy_configuring flag for this operation */
1316 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1317 if (h->num_luns >= CISS_MAX_LUN){
1318 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1319 return -EINVAL;
1322 if (h->busy_configuring){
1323 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1324 return -EBUSY;
1326 h->busy_configuring = 1;
1328 /* if del_disk is NULL then we are being called to add a new disk
1329 * and update the logical drive table. If it is not NULL then
1330 * we will check if the disk is in use or not.
1332 if (del_disk != NULL){
1333 drv = get_drv(del_disk);
1334 drv->busy_configuring = 1;
1335 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1336 return_code = deregister_disk(del_disk, drv, 1);
1337 drv->busy_configuring = 0;
1338 h->busy_configuring = 0;
1339 return return_code;
1340 } else {
1341 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1342 if (!capable(CAP_SYS_RAWIO))
1343 return -EPERM;
1345 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1346 if (ld_buff == NULL)
1347 goto mem_msg;
1349 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1350 sizeof(ReportLunData_struct), 0, 0, 0,
1351 TYPE_CMD);
1353 if (return_code == IO_OK){
1354 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1355 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1356 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1357 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1358 } else{ /* reading number of logical volumes failed */
1359 printk(KERN_WARNING "cciss: report logical volume"
1360 " command failed\n");
1361 listlength = 0;
1362 goto freeret;
1365 num_luns = listlength / 8; /* 8 bytes per entry */
1366 if (num_luns > CISS_MAX_LUN){
1367 num_luns = CISS_MAX_LUN;
1368 printk(KERN_WARNING "cciss: more luns configured"
1369 " on controller than can be handled by"
1370 " this driver.\n");
1373 /* Compare controller drive array to drivers drive array.
1374 * Check for updates in the drive information and any new drives
1375 * on the controller.
1377 for (i=0; i < num_luns; i++){
1378 int j;
1380 drv_found = 0;
1382 lunid = (0xff &
1383 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1384 lunid |= (0xff &
1385 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1386 lunid |= (0xff &
1387 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1388 lunid |= 0xff &
1389 (unsigned int)(ld_buff->LUN[i][0]);
1391 /* Find if the LUN is already in the drive array
1392 * of the controller. If so then update its info
1393 * if not is use. If it does not exist then find
1394 * the first free index and add it.
1396 for (j=0; j <= h->highest_lun; j++){
1397 if (h->drv[j].LunID == lunid){
1398 drv_index = j;
1399 drv_found = 1;
1403 /* check if the drive was found already in the array */
1404 if (!drv_found){
1405 drv_index = cciss_find_free_drive_index(ctlr);
1406 if (drv_index == -1)
1407 goto freeret;
1410 h->drv[drv_index].LunID = lunid;
1411 cciss_update_drive_info(ctlr, drv_index);
1412 } /* end for */
1413 } /* end else */
1415 freeret:
1416 kfree(ld_buff);
1417 h->busy_configuring = 0;
1418 /* We return -1 here to tell the ACU that we have registered/updated
1419 * all of the drives that we can and to keep it from calling us
1420 * additional times.
1422 return -1;
1423 mem_msg:
1424 printk(KERN_ERR "cciss: out of memory\n");
1425 goto freeret;
1428 /* This function will deregister the disk and it's queue from the
1429 * kernel. It must be called with the controller lock held and the
1430 * drv structures busy_configuring flag set. It's parameters are:
1432 * disk = This is the disk to be deregistered
1433 * drv = This is the drive_info_struct associated with the disk to be
1434 * deregistered. It contains information about the disk used
1435 * by the driver.
1436 * clear_all = This flag determines whether or not the disk information
1437 * is going to be completely cleared out and the highest_lun
1438 * reset. Sometimes we want to clear out information about
1439 * the disk in preperation for re-adding it. In this case
1440 * the highest_lun should be left unchanged and the LunID
1441 * should not be cleared.
1443 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1444 int clear_all)
1446 ctlr_info_t *h = get_host(disk);
1448 if (!capable(CAP_SYS_RAWIO))
1449 return -EPERM;
1451 /* make sure logical volume is NOT is use */
1452 if(clear_all || (h->gendisk[0] == disk)) {
1453 if (drv->usage_count > 1)
1454 return -EBUSY;
1456 else
1457 if( drv->usage_count > 0 )
1458 return -EBUSY;
1460 /* invalidate the devices and deregister the disk. If it is disk
1461 * zero do not deregister it but just zero out it's values. This
1462 * allows us to delete disk zero but keep the controller registered.
1464 if (h->gendisk[0] != disk){
1465 if (disk) {
1466 request_queue_t *q = disk->queue;
1467 if (disk->flags & GENHD_FL_UP)
1468 del_gendisk(disk);
1469 if (q) {
1470 blk_cleanup_queue(q);
1471 drv->queue = NULL;
1476 --h->num_luns;
1477 /* zero out the disk size info */
1478 drv->nr_blocks = 0;
1479 drv->block_size = 0;
1480 drv->heads = 0;
1481 drv->sectors = 0;
1482 drv->cylinders = 0;
1483 drv->raid_level = -1; /* This can be used as a flag variable to
1484 * indicate that this element of the drive
1485 * array is free.
1488 if (clear_all){
1489 /* check to see if it was the last disk */
1490 if (drv == h->drv + h->highest_lun) {
1491 /* if so, find the new hightest lun */
1492 int i, newhighest =-1;
1493 for(i=0; i<h->highest_lun; i++) {
1494 /* if the disk has size > 0, it is available */
1495 if (h->drv[i].heads)
1496 newhighest = i;
1498 h->highest_lun = newhighest;
1501 drv->LunID = 0;
1503 return(0);
1506 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1507 size_t size,
1508 unsigned int use_unit_num, /* 0: address the controller,
1509 1: address logical volume log_unit,
1510 2: periph device address is scsi3addr */
1511 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1512 int cmd_type)
1514 ctlr_info_t *h= hba[ctlr];
1515 u64bit buff_dma_handle;
1516 int status = IO_OK;
1518 c->cmd_type = CMD_IOCTL_PEND;
1519 c->Header.ReplyQueue = 0;
1520 if( buff != NULL) {
1521 c->Header.SGList = 1;
1522 c->Header.SGTotal= 1;
1523 } else {
1524 c->Header.SGList = 0;
1525 c->Header.SGTotal= 0;
1527 c->Header.Tag.lower = c->busaddr;
1529 c->Request.Type.Type = cmd_type;
1530 if (cmd_type == TYPE_CMD) {
1531 switch(cmd) {
1532 case CISS_INQUIRY:
1533 /* If the logical unit number is 0 then, this is going
1534 to controller so It's a physical command
1535 mode = 0 target = 0. So we have nothing to write.
1536 otherwise, if use_unit_num == 1,
1537 mode = 1(volume set addressing) target = LUNID
1538 otherwise, if use_unit_num == 2,
1539 mode = 0(periph dev addr) target = scsi3addr */
1540 if (use_unit_num == 1) {
1541 c->Header.LUN.LogDev.VolId=
1542 h->drv[log_unit].LunID;
1543 c->Header.LUN.LogDev.Mode = 1;
1544 } else if (use_unit_num == 2) {
1545 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1546 c->Header.LUN.LogDev.Mode = 0;
1548 /* are we trying to read a vital product page */
1549 if(page_code != 0) {
1550 c->Request.CDB[1] = 0x01;
1551 c->Request.CDB[2] = page_code;
1553 c->Request.CDBLen = 6;
1554 c->Request.Type.Attribute = ATTR_SIMPLE;
1555 c->Request.Type.Direction = XFER_READ;
1556 c->Request.Timeout = 0;
1557 c->Request.CDB[0] = CISS_INQUIRY;
1558 c->Request.CDB[4] = size & 0xFF;
1559 break;
1560 case CISS_REPORT_LOG:
1561 case CISS_REPORT_PHYS:
1562 /* Talking to controller so It's a physical command
1563 mode = 00 target = 0. Nothing to write.
1565 c->Request.CDBLen = 12;
1566 c->Request.Type.Attribute = ATTR_SIMPLE;
1567 c->Request.Type.Direction = XFER_READ;
1568 c->Request.Timeout = 0;
1569 c->Request.CDB[0] = cmd;
1570 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1571 c->Request.CDB[7] = (size >> 16) & 0xFF;
1572 c->Request.CDB[8] = (size >> 8) & 0xFF;
1573 c->Request.CDB[9] = size & 0xFF;
1574 break;
1576 case CCISS_READ_CAPACITY:
1577 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1578 c->Header.LUN.LogDev.Mode = 1;
1579 c->Request.CDBLen = 10;
1580 c->Request.Type.Attribute = ATTR_SIMPLE;
1581 c->Request.Type.Direction = XFER_READ;
1582 c->Request.Timeout = 0;
1583 c->Request.CDB[0] = cmd;
1584 break;
1585 case CCISS_CACHE_FLUSH:
1586 c->Request.CDBLen = 12;
1587 c->Request.Type.Attribute = ATTR_SIMPLE;
1588 c->Request.Type.Direction = XFER_WRITE;
1589 c->Request.Timeout = 0;
1590 c->Request.CDB[0] = BMIC_WRITE;
1591 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1592 break;
1593 default:
1594 printk(KERN_WARNING
1595 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1596 return(IO_ERROR);
1598 } else if (cmd_type == TYPE_MSG) {
1599 switch (cmd) {
1600 case 0: /* ABORT message */
1601 c->Request.CDBLen = 12;
1602 c->Request.Type.Attribute = ATTR_SIMPLE;
1603 c->Request.Type.Direction = XFER_WRITE;
1604 c->Request.Timeout = 0;
1605 c->Request.CDB[0] = cmd; /* abort */
1606 c->Request.CDB[1] = 0; /* abort a command */
1607 /* buff contains the tag of the command to abort */
1608 memcpy(&c->Request.CDB[4], buff, 8);
1609 break;
1610 case 1: /* RESET message */
1611 c->Request.CDBLen = 12;
1612 c->Request.Type.Attribute = ATTR_SIMPLE;
1613 c->Request.Type.Direction = XFER_WRITE;
1614 c->Request.Timeout = 0;
1615 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1616 c->Request.CDB[0] = cmd; /* reset */
1617 c->Request.CDB[1] = 0x04; /* reset a LUN */
1618 case 3: /* No-Op message */
1619 c->Request.CDBLen = 1;
1620 c->Request.Type.Attribute = ATTR_SIMPLE;
1621 c->Request.Type.Direction = XFER_WRITE;
1622 c->Request.Timeout = 0;
1623 c->Request.CDB[0] = cmd;
1624 break;
1625 default:
1626 printk(KERN_WARNING
1627 "cciss%d: unknown message type %d\n",
1628 ctlr, cmd);
1629 return IO_ERROR;
1631 } else {
1632 printk(KERN_WARNING
1633 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1634 return IO_ERROR;
1636 /* Fill in the scatter gather information */
1637 if (size > 0) {
1638 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1639 buff, size, PCI_DMA_BIDIRECTIONAL);
1640 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1641 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1642 c->SG[0].Len = size;
1643 c->SG[0].Ext = 0; /* we are not chaining */
1645 return status;
1647 static int sendcmd_withirq(__u8 cmd,
1648 int ctlr,
1649 void *buff,
1650 size_t size,
1651 unsigned int use_unit_num,
1652 unsigned int log_unit,
1653 __u8 page_code,
1654 int cmd_type)
1656 ctlr_info_t *h = hba[ctlr];
1657 CommandList_struct *c;
1658 u64bit buff_dma_handle;
1659 unsigned long flags;
1660 int return_status;
1661 DECLARE_COMPLETION(wait);
1663 if ((c = cmd_alloc(h , 0)) == NULL)
1664 return -ENOMEM;
1665 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1666 log_unit, page_code, NULL, cmd_type);
1667 if (return_status != IO_OK) {
1668 cmd_free(h, c, 0);
1669 return return_status;
1671 resend_cmd2:
1672 c->waiting = &wait;
1674 /* Put the request on the tail of the queue and send it */
1675 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1676 addQ(&h->reqQ, c);
1677 h->Qdepth++;
1678 start_io(h);
1679 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1681 wait_for_completion(&wait);
1683 if(c->err_info->CommandStatus != 0)
1684 { /* an error has occurred */
1685 switch(c->err_info->CommandStatus)
1687 case CMD_TARGET_STATUS:
1688 printk(KERN_WARNING "cciss: cmd %p has "
1689 " completed with errors\n", c);
1690 if( c->err_info->ScsiStatus)
1692 printk(KERN_WARNING "cciss: cmd %p "
1693 "has SCSI Status = %x\n",
1695 c->err_info->ScsiStatus);
1698 break;
1699 case CMD_DATA_UNDERRUN:
1700 case CMD_DATA_OVERRUN:
1701 /* expected for inquire and report lun commands */
1702 break;
1703 case CMD_INVALID:
1704 printk(KERN_WARNING "cciss: Cmd %p is "
1705 "reported invalid\n", c);
1706 return_status = IO_ERROR;
1707 break;
1708 case CMD_PROTOCOL_ERR:
1709 printk(KERN_WARNING "cciss: cmd %p has "
1710 "protocol error \n", c);
1711 return_status = IO_ERROR;
1712 break;
1713 case CMD_HARDWARE_ERR:
1714 printk(KERN_WARNING "cciss: cmd %p had "
1715 " hardware error\n", c);
1716 return_status = IO_ERROR;
1717 break;
1718 case CMD_CONNECTION_LOST:
1719 printk(KERN_WARNING "cciss: cmd %p had "
1720 "connection lost\n", c);
1721 return_status = IO_ERROR;
1722 break;
1723 case CMD_ABORTED:
1724 printk(KERN_WARNING "cciss: cmd %p was "
1725 "aborted\n", c);
1726 return_status = IO_ERROR;
1727 break;
1728 case CMD_ABORT_FAILED:
1729 printk(KERN_WARNING "cciss: cmd %p reports "
1730 "abort failed\n", c);
1731 return_status = IO_ERROR;
1732 break;
1733 case CMD_UNSOLICITED_ABORT:
1734 printk(KERN_WARNING
1735 "cciss%d: unsolicited abort %p\n",
1736 ctlr, c);
1737 if (c->retry_count < MAX_CMD_RETRIES) {
1738 printk(KERN_WARNING
1739 "cciss%d: retrying %p\n",
1740 ctlr, c);
1741 c->retry_count++;
1742 /* erase the old error information */
1743 memset(c->err_info, 0,
1744 sizeof(ErrorInfo_struct));
1745 return_status = IO_OK;
1746 INIT_COMPLETION(wait);
1747 goto resend_cmd2;
1749 return_status = IO_ERROR;
1750 break;
1751 default:
1752 printk(KERN_WARNING "cciss: cmd %p returned "
1753 "unknown status %x\n", c,
1754 c->err_info->CommandStatus);
1755 return_status = IO_ERROR;
1758 /* unlock the buffers from DMA */
1759 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1760 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1761 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1762 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1763 cmd_free(h, c, 0);
1764 return(return_status);
1767 static void cciss_geometry_inquiry(int ctlr, int logvol,
1768 int withirq, unsigned int total_size,
1769 unsigned int block_size, InquiryData_struct *inq_buff,
1770 drive_info_struct *drv)
1772 int return_code;
1773 memset(inq_buff, 0, sizeof(InquiryData_struct));
1774 if (withirq)
1775 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1776 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1777 else
1778 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1779 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1780 if (return_code == IO_OK) {
1781 if(inq_buff->data_byte[8] == 0xFF) {
1782 printk(KERN_WARNING
1783 "cciss: reading geometry failed, volume "
1784 "does not support reading geometry\n");
1785 drv->block_size = block_size;
1786 drv->nr_blocks = total_size;
1787 drv->heads = 255;
1788 drv->sectors = 32; // Sectors per track
1789 drv->cylinders = total_size / 255 / 32;
1790 } else {
1791 unsigned int t;
1793 drv->block_size = block_size;
1794 drv->nr_blocks = total_size;
1795 drv->heads = inq_buff->data_byte[6];
1796 drv->sectors = inq_buff->data_byte[7];
1797 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1798 drv->cylinders += inq_buff->data_byte[5];
1799 drv->raid_level = inq_buff->data_byte[8];
1800 t = drv->heads * drv->sectors;
1801 if (t > 1) {
1802 drv->cylinders = total_size/t;
1805 } else { /* Get geometry failed */
1806 printk(KERN_WARNING "cciss: reading geometry failed\n");
1808 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1809 drv->heads, drv->sectors, drv->cylinders);
1811 static void
1812 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1813 int withirq, unsigned int *total_size, unsigned int *block_size)
1815 int return_code;
1816 memset(buf, 0, sizeof(*buf));
1817 if (withirq)
1818 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1819 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1820 else
1821 return_code = sendcmd(CCISS_READ_CAPACITY,
1822 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1823 if (return_code == IO_OK) {
1824 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1825 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1826 } else { /* read capacity command failed */
1827 printk(KERN_WARNING "cciss: read capacity failed\n");
1828 *total_size = 0;
1829 *block_size = BLOCK_SIZE;
1831 printk(KERN_INFO " blocks= %u block_size= %d\n",
1832 *total_size, *block_size);
1833 return;
1836 static int cciss_revalidate(struct gendisk *disk)
1838 ctlr_info_t *h = get_host(disk);
1839 drive_info_struct *drv = get_drv(disk);
1840 int logvol;
1841 int FOUND=0;
1842 unsigned int block_size;
1843 unsigned int total_size;
1844 ReadCapdata_struct *size_buff = NULL;
1845 InquiryData_struct *inq_buff = NULL;
1847 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1849 if(h->drv[logvol].LunID == drv->LunID) {
1850 FOUND=1;
1851 break;
1855 if (!FOUND) return 1;
1857 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1858 if (size_buff == NULL)
1860 printk(KERN_WARNING "cciss: out of memory\n");
1861 return 1;
1863 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1864 if (inq_buff == NULL)
1866 printk(KERN_WARNING "cciss: out of memory\n");
1867 kfree(size_buff);
1868 return 1;
1871 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1872 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1874 blk_queue_hardsect_size(drv->queue, drv->block_size);
1875 set_capacity(disk, drv->nr_blocks);
1877 kfree(size_buff);
1878 kfree(inq_buff);
1879 return 0;
1883 * Wait polling for a command to complete.
1884 * The memory mapped FIFO is polled for the completion.
1885 * Used only at init time, interrupts from the HBA are disabled.
1887 static unsigned long pollcomplete(int ctlr)
1889 unsigned long done;
1890 int i;
1892 /* Wait (up to 20 seconds) for a command to complete */
1894 for (i = 20 * HZ; i > 0; i--) {
1895 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1896 if (done == FIFO_EMPTY)
1897 schedule_timeout_uninterruptible(1);
1898 else
1899 return (done);
1901 /* Invalid address to tell caller we ran out of time */
1902 return 1;
1905 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1907 /* We get in here if sendcmd() is polling for completions
1908 and gets some command back that it wasn't expecting --
1909 something other than that which it just sent down.
1910 Ordinarily, that shouldn't happen, but it can happen when
1911 the scsi tape stuff gets into error handling mode, and
1912 starts using sendcmd() to try to abort commands and
1913 reset tape drives. In that case, sendcmd may pick up
1914 completions of commands that were sent to logical drives
1915 through the block i/o system, or cciss ioctls completing, etc.
1916 In that case, we need to save those completions for later
1917 processing by the interrupt handler.
1920 #ifdef CONFIG_CISS_SCSI_TAPE
1921 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1923 /* If it's not the scsi tape stuff doing error handling, (abort */
1924 /* or reset) then we don't expect anything weird. */
1925 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1926 #endif
1927 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1928 "Invalid command list address returned! (%lx)\n",
1929 ctlr, complete);
1930 /* not much we can do. */
1931 #ifdef CONFIG_CISS_SCSI_TAPE
1932 return 1;
1935 /* We've sent down an abort or reset, but something else
1936 has completed */
1937 if (srl->ncompletions >= (NR_CMDS + 2)) {
1938 /* Uh oh. No room to save it for later... */
1939 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1940 "reject list overflow, command lost!\n", ctlr);
1941 return 1;
1943 /* Save it for later */
1944 srl->complete[srl->ncompletions] = complete;
1945 srl->ncompletions++;
1946 #endif
1947 return 0;
1951 * Send a command to the controller, and wait for it to complete.
1952 * Only used at init time.
1954 static int sendcmd(
1955 __u8 cmd,
1956 int ctlr,
1957 void *buff,
1958 size_t size,
1959 unsigned int use_unit_num, /* 0: address the controller,
1960 1: address logical volume log_unit,
1961 2: periph device address is scsi3addr */
1962 unsigned int log_unit,
1963 __u8 page_code,
1964 unsigned char *scsi3addr,
1965 int cmd_type)
1967 CommandList_struct *c;
1968 int i;
1969 unsigned long complete;
1970 ctlr_info_t *info_p= hba[ctlr];
1971 u64bit buff_dma_handle;
1972 int status, done = 0;
1974 if ((c = cmd_alloc(info_p, 1)) == NULL) {
1975 printk(KERN_WARNING "cciss: unable to get memory");
1976 return(IO_ERROR);
1978 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1979 log_unit, page_code, scsi3addr, cmd_type);
1980 if (status != IO_OK) {
1981 cmd_free(info_p, c, 1);
1982 return status;
1984 resend_cmd1:
1986 * Disable interrupt
1988 #ifdef CCISS_DEBUG
1989 printk(KERN_DEBUG "cciss: turning intr off\n");
1990 #endif /* CCISS_DEBUG */
1991 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1993 /* Make sure there is room in the command FIFO */
1994 /* Actually it should be completely empty at this time */
1995 /* unless we are in here doing error handling for the scsi */
1996 /* tape side of the driver. */
1997 for (i = 200000; i > 0; i--)
1999 /* if fifo isn't full go */
2000 if (!(info_p->access.fifo_full(info_p)))
2003 break;
2005 udelay(10);
2006 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2007 " waiting!\n", ctlr);
2010 * Send the cmd
2012 info_p->access.submit_command(info_p, c);
2013 done = 0;
2014 do {
2015 complete = pollcomplete(ctlr);
2017 #ifdef CCISS_DEBUG
2018 printk(KERN_DEBUG "cciss: command completed\n");
2019 #endif /* CCISS_DEBUG */
2021 if (complete == 1) {
2022 printk( KERN_WARNING
2023 "cciss cciss%d: SendCmd Timeout out, "
2024 "No command list address returned!\n",
2025 ctlr);
2026 status = IO_ERROR;
2027 done = 1;
2028 break;
2031 /* This will need to change for direct lookup completions */
2032 if ( (complete & CISS_ERROR_BIT)
2033 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2035 /* if data overrun or underun on Report command
2036 ignore it
2038 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2039 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2040 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2041 ((c->err_info->CommandStatus ==
2042 CMD_DATA_OVERRUN) ||
2043 (c->err_info->CommandStatus ==
2044 CMD_DATA_UNDERRUN)
2047 complete = c->busaddr;
2048 } else {
2049 if (c->err_info->CommandStatus ==
2050 CMD_UNSOLICITED_ABORT) {
2051 printk(KERN_WARNING "cciss%d: "
2052 "unsolicited abort %p\n",
2053 ctlr, c);
2054 if (c->retry_count < MAX_CMD_RETRIES) {
2055 printk(KERN_WARNING
2056 "cciss%d: retrying %p\n",
2057 ctlr, c);
2058 c->retry_count++;
2059 /* erase the old error */
2060 /* information */
2061 memset(c->err_info, 0,
2062 sizeof(ErrorInfo_struct));
2063 goto resend_cmd1;
2064 } else {
2065 printk(KERN_WARNING
2066 "cciss%d: retried %p too "
2067 "many times\n", ctlr, c);
2068 status = IO_ERROR;
2069 goto cleanup1;
2071 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2072 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2073 status = IO_ERROR;
2074 goto cleanup1;
2076 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2077 " Error %x \n", ctlr,
2078 c->err_info->CommandStatus);
2079 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2080 " offensive info\n"
2081 " size %x\n num %x value %x\n", ctlr,
2082 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2083 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2084 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2085 status = IO_ERROR;
2086 goto cleanup1;
2089 /* This will need changing for direct lookup completions */
2090 if (complete != c->busaddr) {
2091 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2092 BUG(); /* we are pretty much hosed if we get here. */
2094 continue;
2095 } else
2096 done = 1;
2097 } while (!done);
2099 cleanup1:
2100 /* unlock the data buffer from DMA */
2101 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2102 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2103 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2104 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2105 #ifdef CONFIG_CISS_SCSI_TAPE
2106 /* if we saved some commands for later, process them now. */
2107 if (info_p->scsi_rejects.ncompletions > 0)
2108 do_cciss_intr(0, info_p, NULL);
2109 #endif
2110 cmd_free(info_p, c, 1);
2111 return (status);
2114 * Map (physical) PCI mem into (virtual) kernel space
2116 static void __iomem *remap_pci_mem(ulong base, ulong size)
2118 ulong page_base = ((ulong) base) & PAGE_MASK;
2119 ulong page_offs = ((ulong) base) - page_base;
2120 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2122 return page_remapped ? (page_remapped + page_offs) : NULL;
2126 * Takes jobs of the Q and sends them to the hardware, then puts it on
2127 * the Q to wait for completion.
2129 static void start_io( ctlr_info_t *h)
2131 CommandList_struct *c;
2133 while(( c = h->reqQ) != NULL )
2135 /* can't do anything if fifo is full */
2136 if ((h->access.fifo_full(h))) {
2137 printk(KERN_WARNING "cciss: fifo full\n");
2138 break;
2141 /* Get the first entry from the Request Q */
2142 removeQ(&(h->reqQ), c);
2143 h->Qdepth--;
2145 /* Tell the controller execute command */
2146 h->access.submit_command(h, c);
2148 /* Put job onto the completed Q */
2149 addQ (&(h->cmpQ), c);
2153 static inline void complete_buffers(struct bio *bio, int status)
2155 while (bio) {
2156 struct bio *xbh = bio->bi_next;
2157 int nr_sectors = bio_sectors(bio);
2159 bio->bi_next = NULL;
2160 blk_finished_io(len);
2161 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
2162 bio = xbh;
2166 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2167 /* Zeros out the error record and then resends the command back */
2168 /* to the controller */
2169 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2171 /* erase the old error information */
2172 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2174 /* add it to software queue and then send it to the controller */
2175 addQ(&(h->reqQ),c);
2176 h->Qdepth++;
2177 if(h->Qdepth > h->maxQsinceinit)
2178 h->maxQsinceinit = h->Qdepth;
2180 start_io(h);
2183 static void cciss_softirq_done(struct request *rq)
2185 CommandList_struct *cmd = rq->completion_data;
2186 ctlr_info_t *h = hba[cmd->ctlr];
2187 unsigned long flags;
2188 u64bit temp64;
2189 int i, ddir;
2191 if (cmd->Request.Type.Direction == XFER_READ)
2192 ddir = PCI_DMA_FROMDEVICE;
2193 else
2194 ddir = PCI_DMA_TODEVICE;
2196 /* command did not need to be retried */
2197 /* unmap the DMA mapping for all the scatter gather elements */
2198 for(i=0; i<cmd->Header.SGList; i++) {
2199 temp64.val32.lower = cmd->SG[i].Addr.lower;
2200 temp64.val32.upper = cmd->SG[i].Addr.upper;
2201 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
2204 complete_buffers(rq->bio, rq->errors);
2206 #ifdef CCISS_DEBUG
2207 printk("Done with %p\n", rq);
2208 #endif /* CCISS_DEBUG */
2210 spin_lock_irqsave(&h->lock, flags);
2211 end_that_request_last(rq, rq->errors);
2212 cmd_free(h, cmd,1);
2213 spin_unlock_irqrestore(&h->lock, flags);
2216 /* checks the status of the job and calls complete buffers to mark all
2217 * buffers for the completed job. Note that this function does not need
2218 * to hold the hba/queue lock.
2220 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2221 int timeout)
2223 int status = 1;
2224 int retry_cmd = 0;
2226 if (timeout)
2227 status = 0;
2229 if(cmd->err_info->CommandStatus != 0)
2230 { /* an error has occurred */
2231 switch(cmd->err_info->CommandStatus)
2233 unsigned char sense_key;
2234 case CMD_TARGET_STATUS:
2235 status = 0;
2237 if( cmd->err_info->ScsiStatus == 0x02)
2239 printk(KERN_WARNING "cciss: cmd %p "
2240 "has CHECK CONDITION "
2241 " byte 2 = 0x%x\n", cmd,
2242 cmd->err_info->SenseInfo[2]
2244 /* check the sense key */
2245 sense_key = 0xf &
2246 cmd->err_info->SenseInfo[2];
2247 /* no status or recovered error */
2248 if((sense_key == 0x0) ||
2249 (sense_key == 0x1))
2251 status = 1;
2253 } else
2255 printk(KERN_WARNING "cciss: cmd %p "
2256 "has SCSI Status 0x%x\n",
2257 cmd, cmd->err_info->ScsiStatus);
2259 break;
2260 case CMD_DATA_UNDERRUN:
2261 printk(KERN_WARNING "cciss: cmd %p has"
2262 " completed with data underrun "
2263 "reported\n", cmd);
2264 break;
2265 case CMD_DATA_OVERRUN:
2266 printk(KERN_WARNING "cciss: cmd %p has"
2267 " completed with data overrun "
2268 "reported\n", cmd);
2269 break;
2270 case CMD_INVALID:
2271 printk(KERN_WARNING "cciss: cmd %p is "
2272 "reported invalid\n", cmd);
2273 status = 0;
2274 break;
2275 case CMD_PROTOCOL_ERR:
2276 printk(KERN_WARNING "cciss: cmd %p has "
2277 "protocol error \n", cmd);
2278 status = 0;
2279 break;
2280 case CMD_HARDWARE_ERR:
2281 printk(KERN_WARNING "cciss: cmd %p had "
2282 " hardware error\n", cmd);
2283 status = 0;
2284 break;
2285 case CMD_CONNECTION_LOST:
2286 printk(KERN_WARNING "cciss: cmd %p had "
2287 "connection lost\n", cmd);
2288 status=0;
2289 break;
2290 case CMD_ABORTED:
2291 printk(KERN_WARNING "cciss: cmd %p was "
2292 "aborted\n", cmd);
2293 status=0;
2294 break;
2295 case CMD_ABORT_FAILED:
2296 printk(KERN_WARNING "cciss: cmd %p reports "
2297 "abort failed\n", cmd);
2298 status=0;
2299 break;
2300 case CMD_UNSOLICITED_ABORT:
2301 printk(KERN_WARNING "cciss%d: unsolicited "
2302 "abort %p\n", h->ctlr, cmd);
2303 if (cmd->retry_count < MAX_CMD_RETRIES) {
2304 retry_cmd=1;
2305 printk(KERN_WARNING
2306 "cciss%d: retrying %p\n",
2307 h->ctlr, cmd);
2308 cmd->retry_count++;
2309 } else
2310 printk(KERN_WARNING
2311 "cciss%d: %p retried too "
2312 "many times\n", h->ctlr, cmd);
2313 status=0;
2314 break;
2315 case CMD_TIMEOUT:
2316 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2317 cmd);
2318 status=0;
2319 break;
2320 default:
2321 printk(KERN_WARNING "cciss: cmd %p returned "
2322 "unknown status %x\n", cmd,
2323 cmd->err_info->CommandStatus);
2324 status=0;
2327 /* We need to return this command */
2328 if(retry_cmd) {
2329 resend_cciss_cmd(h,cmd);
2330 return;
2333 cmd->rq->completion_data = cmd;
2334 cmd->rq->errors = status;
2335 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2336 blk_complete_request(cmd->rq);
2340 * Get a request and submit it to the controller.
2342 static void do_cciss_request(request_queue_t *q)
2344 ctlr_info_t *h= q->queuedata;
2345 CommandList_struct *c;
2346 int start_blk, seg;
2347 struct request *creq;
2348 u64bit temp64;
2349 struct scatterlist tmp_sg[MAXSGENTRIES];
2350 drive_info_struct *drv;
2351 int i, dir;
2353 /* We call start_io here in case there is a command waiting on the
2354 * queue that has not been sent.
2356 if (blk_queue_plugged(q))
2357 goto startio;
2359 queue:
2360 creq = elv_next_request(q);
2361 if (!creq)
2362 goto startio;
2364 if (creq->nr_phys_segments > MAXSGENTRIES)
2365 BUG();
2367 if (( c = cmd_alloc(h, 1)) == NULL)
2368 goto full;
2370 blkdev_dequeue_request(creq);
2372 spin_unlock_irq(q->queue_lock);
2374 c->cmd_type = CMD_RWREQ;
2375 c->rq = creq;
2377 /* fill in the request */
2378 drv = creq->rq_disk->private_data;
2379 c->Header.ReplyQueue = 0; // unused in simple mode
2380 /* got command from pool, so use the command block index instead */
2381 /* for direct lookups. */
2382 /* The first 2 bits are reserved for controller error reporting. */
2383 c->Header.Tag.lower = (c->cmdindex << 3);
2384 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2385 c->Header.LUN.LogDev.VolId= drv->LunID;
2386 c->Header.LUN.LogDev.Mode = 1;
2387 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2388 c->Request.Type.Type = TYPE_CMD; // It is a command.
2389 c->Request.Type.Attribute = ATTR_SIMPLE;
2390 c->Request.Type.Direction =
2391 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2392 c->Request.Timeout = 0; // Don't time out
2393 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2394 start_blk = creq->sector;
2395 #ifdef CCISS_DEBUG
2396 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2397 (int) creq->nr_sectors);
2398 #endif /* CCISS_DEBUG */
2400 seg = blk_rq_map_sg(q, creq, tmp_sg);
2402 /* get the DMA records for the setup */
2403 if (c->Request.Type.Direction == XFER_READ)
2404 dir = PCI_DMA_FROMDEVICE;
2405 else
2406 dir = PCI_DMA_TODEVICE;
2408 for (i=0; i<seg; i++)
2410 c->SG[i].Len = tmp_sg[i].length;
2411 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2412 tmp_sg[i].offset, tmp_sg[i].length,
2413 dir);
2414 c->SG[i].Addr.lower = temp64.val32.lower;
2415 c->SG[i].Addr.upper = temp64.val32.upper;
2416 c->SG[i].Ext = 0; // we are not chaining
2418 /* track how many SG entries we are using */
2419 if( seg > h->maxSG)
2420 h->maxSG = seg;
2422 #ifdef CCISS_DEBUG
2423 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2424 #endif /* CCISS_DEBUG */
2426 c->Header.SGList = c->Header.SGTotal = seg;
2427 c->Request.CDB[1]= 0;
2428 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2429 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2430 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2431 c->Request.CDB[5]= start_blk & 0xff;
2432 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2433 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2434 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2435 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2437 spin_lock_irq(q->queue_lock);
2439 addQ(&(h->reqQ),c);
2440 h->Qdepth++;
2441 if(h->Qdepth > h->maxQsinceinit)
2442 h->maxQsinceinit = h->Qdepth;
2444 goto queue;
2445 full:
2446 blk_stop_queue(q);
2447 startio:
2448 /* We will already have the driver lock here so not need
2449 * to lock it.
2451 start_io(h);
2454 static inline unsigned long get_next_completion(ctlr_info_t *h)
2456 #ifdef CONFIG_CISS_SCSI_TAPE
2457 /* Any rejects from sendcmd() lying around? Process them first */
2458 if (h->scsi_rejects.ncompletions == 0)
2459 return h->access.command_completed(h);
2460 else {
2461 struct sendcmd_reject_list *srl;
2462 int n;
2463 srl = &h->scsi_rejects;
2464 n = --srl->ncompletions;
2465 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2466 printk("p");
2467 return srl->complete[n];
2469 #else
2470 return h->access.command_completed(h);
2471 #endif
2474 static inline int interrupt_pending(ctlr_info_t *h)
2476 #ifdef CONFIG_CISS_SCSI_TAPE
2477 return ( h->access.intr_pending(h)
2478 || (h->scsi_rejects.ncompletions > 0));
2479 #else
2480 return h->access.intr_pending(h);
2481 #endif
2484 static inline long interrupt_not_for_us(ctlr_info_t *h)
2486 #ifdef CONFIG_CISS_SCSI_TAPE
2487 return (((h->access.intr_pending(h) == 0) ||
2488 (h->interrupts_enabled == 0))
2489 && (h->scsi_rejects.ncompletions == 0));
2490 #else
2491 return (((h->access.intr_pending(h) == 0) ||
2492 (h->interrupts_enabled == 0)));
2493 #endif
2496 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2498 ctlr_info_t *h = dev_id;
2499 CommandList_struct *c;
2500 unsigned long flags;
2501 __u32 a, a1, a2;
2502 int j;
2503 int start_queue = h->next_to_run;
2505 if (interrupt_not_for_us(h))
2506 return IRQ_NONE;
2508 * If there are completed commands in the completion queue,
2509 * we had better do something about it.
2511 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2512 while (interrupt_pending(h)) {
2513 while((a = get_next_completion(h)) != FIFO_EMPTY) {
2514 a1 = a;
2515 if ((a & 0x04)) {
2516 a2 = (a >> 3);
2517 if (a2 >= NR_CMDS) {
2518 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2519 fail_all_cmds(h->ctlr);
2520 return IRQ_HANDLED;
2523 c = h->cmd_pool + a2;
2524 a = c->busaddr;
2526 } else {
2527 a &= ~3;
2528 if ((c = h->cmpQ) == NULL) {
2529 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2530 continue;
2532 while(c->busaddr != a) {
2533 c = c->next;
2534 if (c == h->cmpQ)
2535 break;
2539 * If we've found the command, take it off the
2540 * completion Q and free it
2542 if (c->busaddr == a) {
2543 removeQ(&h->cmpQ, c);
2544 if (c->cmd_type == CMD_RWREQ) {
2545 complete_command(h, c, 0);
2546 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2547 complete(c->waiting);
2549 # ifdef CONFIG_CISS_SCSI_TAPE
2550 else if (c->cmd_type == CMD_SCSI)
2551 complete_scsi_command(c, 0, a1);
2552 # endif
2553 continue;
2558 /* check to see if we have maxed out the number of commands that can
2559 * be placed on the queue. If so then exit. We do this check here
2560 * in case the interrupt we serviced was from an ioctl and did not
2561 * free any new commands.
2563 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2564 goto cleanup;
2566 /* We have room on the queue for more commands. Now we need to queue
2567 * them up. We will also keep track of the next queue to run so
2568 * that every queue gets a chance to be started first.
2570 for (j=0; j < h->highest_lun + 1; j++){
2571 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2572 /* make sure the disk has been added and the drive is real
2573 * because this can be called from the middle of init_one.
2575 if(!(h->drv[curr_queue].queue) ||
2576 !(h->drv[curr_queue].heads))
2577 continue;
2578 blk_start_queue(h->gendisk[curr_queue]->queue);
2580 /* check to see if we have maxed out the number of commands
2581 * that can be placed on the queue.
2583 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2585 if (curr_queue == start_queue){
2586 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
2587 goto cleanup;
2588 } else {
2589 h->next_to_run = curr_queue;
2590 goto cleanup;
2592 } else {
2593 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2597 cleanup:
2598 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2599 return IRQ_HANDLED;
2602 * We cannot read the structure directly, for portablity we must use
2603 * the io functions.
2604 * This is for debug only.
2606 #ifdef CCISS_DEBUG
2607 static void print_cfg_table( CfgTable_struct *tb)
2609 int i;
2610 char temp_name[17];
2612 printk("Controller Configuration information\n");
2613 printk("------------------------------------\n");
2614 for(i=0;i<4;i++)
2615 temp_name[i] = readb(&(tb->Signature[i]));
2616 temp_name[4]='\0';
2617 printk(" Signature = %s\n", temp_name);
2618 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2619 printk(" Transport methods supported = 0x%x\n",
2620 readl(&(tb-> TransportSupport)));
2621 printk(" Transport methods active = 0x%x\n",
2622 readl(&(tb->TransportActive)));
2623 printk(" Requested transport Method = 0x%x\n",
2624 readl(&(tb->HostWrite.TransportRequest)));
2625 printk(" Coalese Interrupt Delay = 0x%x\n",
2626 readl(&(tb->HostWrite.CoalIntDelay)));
2627 printk(" Coalese Interrupt Count = 0x%x\n",
2628 readl(&(tb->HostWrite.CoalIntCount)));
2629 printk(" Max outstanding commands = 0x%d\n",
2630 readl(&(tb->CmdsOutMax)));
2631 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2632 for(i=0;i<16;i++)
2633 temp_name[i] = readb(&(tb->ServerName[i]));
2634 temp_name[16] = '\0';
2635 printk(" Server Name = %s\n", temp_name);
2636 printk(" Heartbeat Counter = 0x%x\n\n\n",
2637 readl(&(tb->HeartBeat)));
2639 #endif /* CCISS_DEBUG */
2641 static void release_io_mem(ctlr_info_t *c)
2643 /* if IO mem was not protected do nothing */
2644 if( c->io_mem_addr == 0)
2645 return;
2646 release_region(c->io_mem_addr, c->io_mem_length);
2647 c->io_mem_addr = 0;
2648 c->io_mem_length = 0;
2651 static int find_PCI_BAR_index(struct pci_dev *pdev,
2652 unsigned long pci_bar_addr)
2654 int i, offset, mem_type, bar_type;
2655 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2656 return 0;
2657 offset = 0;
2658 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2659 bar_type = pci_resource_flags(pdev, i) &
2660 PCI_BASE_ADDRESS_SPACE;
2661 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2662 offset += 4;
2663 else {
2664 mem_type = pci_resource_flags(pdev, i) &
2665 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2666 switch (mem_type) {
2667 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2668 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2669 offset += 4; /* 32 bit */
2670 break;
2671 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2672 offset += 8;
2673 break;
2674 default: /* reserved in PCI 2.2 */
2675 printk(KERN_WARNING "Base address is invalid\n");
2676 return -1;
2677 break;
2680 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2681 return i+1;
2683 return -1;
2686 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2687 * controllers that are capable. If not, we use IO-APIC mode.
2690 static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
2692 #ifdef CONFIG_PCI_MSI
2693 int err;
2694 struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1},
2695 {0,2}, {0,3}};
2697 /* Some boards advertise MSI but don't really support it */
2698 if ((board_id == 0x40700E11) ||
2699 (board_id == 0x40800E11) ||
2700 (board_id == 0x40820E11) ||
2701 (board_id == 0x40830E11))
2702 goto default_int_mode;
2704 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2705 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2706 if (!err) {
2707 c->intr[0] = cciss_msix_entries[0].vector;
2708 c->intr[1] = cciss_msix_entries[1].vector;
2709 c->intr[2] = cciss_msix_entries[2].vector;
2710 c->intr[3] = cciss_msix_entries[3].vector;
2711 c->msix_vector = 1;
2712 return;
2714 if (err > 0) {
2715 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2716 "available\n", err);
2717 } else {
2718 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2719 err);
2722 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2723 if (!pci_enable_msi(pdev)) {
2724 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2725 c->msi_vector = 1;
2726 return;
2727 } else {
2728 printk(KERN_WARNING "cciss: MSI init failed\n");
2729 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2730 return;
2733 #endif /* CONFIG_PCI_MSI */
2734 /* if we get here we're going to use the default interrupt mode */
2735 default_int_mode:
2736 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2737 return;
2740 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2742 ushort subsystem_vendor_id, subsystem_device_id, command;
2743 __u32 board_id, scratchpad = 0;
2744 __u64 cfg_offset;
2745 __u32 cfg_base_addr;
2746 __u64 cfg_base_addr_index;
2747 int i;
2749 /* check to see if controller has been disabled */
2750 /* BEFORE trying to enable it */
2751 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2752 if(!(command & 0x02))
2754 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2755 return(-1);
2758 if (pci_enable_device(pdev))
2760 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2761 return( -1);
2764 subsystem_vendor_id = pdev->subsystem_vendor;
2765 subsystem_device_id = pdev->subsystem_device;
2766 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2767 subsystem_vendor_id);
2769 /* search for our IO range so we can protect it */
2770 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2772 /* is this an IO range */
2773 if( pci_resource_flags(pdev, i) & 0x01 ) {
2774 c->io_mem_addr = pci_resource_start(pdev, i);
2775 c->io_mem_length = pci_resource_end(pdev, i) -
2776 pci_resource_start(pdev, i) +1;
2777 #ifdef CCISS_DEBUG
2778 printk("IO value found base_addr[%d] %lx %lx\n", i,
2779 c->io_mem_addr, c->io_mem_length);
2780 #endif /* CCISS_DEBUG */
2781 /* register the IO range */
2782 if(!request_region( c->io_mem_addr,
2783 c->io_mem_length, "cciss"))
2785 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2786 c->io_mem_addr, c->io_mem_length);
2787 c->io_mem_addr= 0;
2788 c->io_mem_length = 0;
2790 break;
2794 #ifdef CCISS_DEBUG
2795 printk("command = %x\n", command);
2796 printk("irq = %x\n", pdev->irq);
2797 printk("board_id = %x\n", board_id);
2798 #endif /* CCISS_DEBUG */
2800 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2801 * else we use the IO-APIC interrupt assigned to us by system ROM.
2803 cciss_interrupt_mode(c, pdev, board_id);
2806 * Memory base addr is first addr , the second points to the config
2807 * table
2810 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2811 #ifdef CCISS_DEBUG
2812 printk("address 0 = %x\n", c->paddr);
2813 #endif /* CCISS_DEBUG */
2814 c->vaddr = remap_pci_mem(c->paddr, 200);
2816 /* Wait for the board to become ready. (PCI hotplug needs this.)
2817 * We poll for up to 120 secs, once per 100ms. */
2818 for (i=0; i < 1200; i++) {
2819 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2820 if (scratchpad == CCISS_FIRMWARE_READY)
2821 break;
2822 set_current_state(TASK_INTERRUPTIBLE);
2823 schedule_timeout(HZ / 10); /* wait 100ms */
2825 if (scratchpad != CCISS_FIRMWARE_READY) {
2826 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2827 return -1;
2830 /* get the address index number */
2831 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2832 cfg_base_addr &= (__u32) 0x0000ffff;
2833 #ifdef CCISS_DEBUG
2834 printk("cfg base address = %x\n", cfg_base_addr);
2835 #endif /* CCISS_DEBUG */
2836 cfg_base_addr_index =
2837 find_PCI_BAR_index(pdev, cfg_base_addr);
2838 #ifdef CCISS_DEBUG
2839 printk("cfg base address index = %x\n", cfg_base_addr_index);
2840 #endif /* CCISS_DEBUG */
2841 if (cfg_base_addr_index == -1) {
2842 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2843 release_io_mem(c);
2844 return -1;
2847 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2848 #ifdef CCISS_DEBUG
2849 printk("cfg offset = %x\n", cfg_offset);
2850 #endif /* CCISS_DEBUG */
2851 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2852 cfg_base_addr_index) + cfg_offset,
2853 sizeof(CfgTable_struct));
2854 c->board_id = board_id;
2856 #ifdef CCISS_DEBUG
2857 print_cfg_table(c->cfgtable);
2858 #endif /* CCISS_DEBUG */
2860 for(i=0; i<NR_PRODUCTS; i++) {
2861 if (board_id == products[i].board_id) {
2862 c->product_name = products[i].product_name;
2863 c->access = *(products[i].access);
2864 break;
2867 if (i == NR_PRODUCTS) {
2868 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2869 " to access the Smart Array controller %08lx\n",
2870 (unsigned long)board_id);
2871 return -1;
2873 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2874 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2875 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2876 (readb(&c->cfgtable->Signature[3]) != 'S') )
2878 printk("Does not appear to be a valid CISS config table\n");
2879 return -1;
2882 #ifdef CONFIG_X86
2884 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2885 __u32 prefetch;
2886 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2887 prefetch |= 0x100;
2888 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2890 #endif
2892 #ifdef CCISS_DEBUG
2893 printk("Trying to put board into Simple mode\n");
2894 #endif /* CCISS_DEBUG */
2895 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2896 /* Update the field, and then ring the doorbell */
2897 writel( CFGTBL_Trans_Simple,
2898 &(c->cfgtable->HostWrite.TransportRequest));
2899 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2901 /* under certain very rare conditions, this can take awhile.
2902 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2903 * as we enter this code.) */
2904 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2905 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2906 break;
2907 /* delay and try again */
2908 set_current_state(TASK_INTERRUPTIBLE);
2909 schedule_timeout(10);
2912 #ifdef CCISS_DEBUG
2913 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2914 #endif /* CCISS_DEBUG */
2915 #ifdef CCISS_DEBUG
2916 print_cfg_table(c->cfgtable);
2917 #endif /* CCISS_DEBUG */
2919 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2921 printk(KERN_WARNING "cciss: unable to get board into"
2922 " simple mode\n");
2923 return -1;
2925 return 0;
2930 * Gets information about the local volumes attached to the controller.
2932 static void cciss_getgeometry(int cntl_num)
2934 ReportLunData_struct *ld_buff;
2935 ReadCapdata_struct *size_buff;
2936 InquiryData_struct *inq_buff;
2937 int return_code;
2938 int i;
2939 int listlength = 0;
2940 __u32 lunid = 0;
2941 int block_size;
2942 int total_size;
2944 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2945 if (ld_buff == NULL)
2947 printk(KERN_ERR "cciss: out of memory\n");
2948 return;
2950 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2951 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2952 if (size_buff == NULL)
2954 printk(KERN_ERR "cciss: out of memory\n");
2955 kfree(ld_buff);
2956 return;
2958 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2959 if (inq_buff == NULL)
2961 printk(KERN_ERR "cciss: out of memory\n");
2962 kfree(ld_buff);
2963 kfree(size_buff);
2964 return;
2966 /* Get the firmware version */
2967 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2968 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2969 if (return_code == IO_OK)
2971 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2972 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2973 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2974 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2975 } else /* send command failed */
2977 printk(KERN_WARNING "cciss: unable to determine firmware"
2978 " version of controller\n");
2980 /* Get the number of logical volumes */
2981 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2982 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2984 if( return_code == IO_OK)
2986 #ifdef CCISS_DEBUG
2987 printk("LUN Data\n--------------------------\n");
2988 #endif /* CCISS_DEBUG */
2990 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2991 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2992 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2993 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2994 } else /* reading number of logical volumes failed */
2996 printk(KERN_WARNING "cciss: report logical volume"
2997 " command failed\n");
2998 listlength = 0;
3000 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3001 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
3003 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
3004 CISS_MAX_LUN);
3005 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3007 #ifdef CCISS_DEBUG
3008 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
3009 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
3010 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
3011 #endif /* CCISS_DEBUG */
3013 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
3014 // for(i=0; i< hba[cntl_num]->num_luns; i++)
3015 for(i=0; i < CISS_MAX_LUN; i++)
3017 if (i < hba[cntl_num]->num_luns){
3018 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3019 << 24;
3020 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3021 << 16;
3022 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3023 << 8;
3024 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3026 hba[cntl_num]->drv[i].LunID = lunid;
3029 #ifdef CCISS_DEBUG
3030 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3031 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3032 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3033 hba[cntl_num]->drv[i].LunID);
3034 #endif /* CCISS_DEBUG */
3035 cciss_read_capacity(cntl_num, i, size_buff, 0,
3036 &total_size, &block_size);
3037 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3038 block_size, inq_buff, &hba[cntl_num]->drv[i]);
3039 } else {
3040 /* initialize raid_level to indicate a free space */
3041 hba[cntl_num]->drv[i].raid_level = -1;
3044 kfree(ld_buff);
3045 kfree(size_buff);
3046 kfree(inq_buff);
3049 /* Function to find the first free pointer into our hba[] array */
3050 /* Returns -1 if no free entries are left. */
3051 static int alloc_cciss_hba(void)
3053 struct gendisk *disk[NWD];
3054 int i, n;
3055 for (n = 0; n < NWD; n++) {
3056 disk[n] = alloc_disk(1 << NWD_SHIFT);
3057 if (!disk[n])
3058 goto out;
3061 for(i=0; i< MAX_CTLR; i++) {
3062 if (!hba[i]) {
3063 ctlr_info_t *p;
3064 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3065 if (!p)
3066 goto Enomem;
3067 memset(p, 0, sizeof(ctlr_info_t));
3068 for (n = 0; n < NWD; n++)
3069 p->gendisk[n] = disk[n];
3070 hba[i] = p;
3071 return i;
3074 printk(KERN_WARNING "cciss: This driver supports a maximum"
3075 " of %d controllers.\n", MAX_CTLR);
3076 goto out;
3077 Enomem:
3078 printk(KERN_ERR "cciss: out of memory.\n");
3079 out:
3080 while (n--)
3081 put_disk(disk[n]);
3082 return -1;
3085 static void free_hba(int i)
3087 ctlr_info_t *p = hba[i];
3088 int n;
3090 hba[i] = NULL;
3091 for (n = 0; n < NWD; n++)
3092 put_disk(p->gendisk[n]);
3093 kfree(p);
3097 * This is it. Find all the controllers and register them. I really hate
3098 * stealing all these major device numbers.
3099 * returns the number of block devices registered.
3101 static int __devinit cciss_init_one(struct pci_dev *pdev,
3102 const struct pci_device_id *ent)
3104 request_queue_t *q;
3105 int i;
3106 int j;
3107 int rc;
3109 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3110 " bus %d dev %d func %d\n",
3111 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3112 PCI_FUNC(pdev->devfn));
3113 i = alloc_cciss_hba();
3114 if(i < 0)
3115 return (-1);
3117 hba[i]->busy_initializing = 1;
3119 if (cciss_pci_init(hba[i], pdev) != 0)
3120 goto clean1;
3122 sprintf(hba[i]->devname, "cciss%d", i);
3123 hba[i]->ctlr = i;
3124 hba[i]->pdev = pdev;
3126 /* configure PCI DMA stuff */
3127 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3128 printk("cciss: using DAC cycles\n");
3129 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3130 printk("cciss: not using DAC cycles\n");
3131 else {
3132 printk("cciss: no suitable DMA available\n");
3133 goto clean1;
3137 * register with the major number, or get a dynamic major number
3138 * by passing 0 as argument. This is done for greater than
3139 * 8 controller support.
3141 if (i < MAX_CTLR_ORIG)
3142 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3143 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3144 if(rc == -EBUSY || rc == -EINVAL) {
3145 printk(KERN_ERR
3146 "cciss: Unable to get major number %d for %s "
3147 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3148 goto clean1;
3150 else {
3151 if (i >= MAX_CTLR_ORIG)
3152 hba[i]->major = rc;
3155 /* make sure the board interrupts are off */
3156 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3157 if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3158 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3159 hba[i]->devname, hba[i])) {
3160 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3161 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3162 goto clean2;
3164 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3165 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3166 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3167 &(hba[i]->cmd_pool_dhandle));
3168 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3169 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3170 &(hba[i]->errinfo_pool_dhandle));
3171 if((hba[i]->cmd_pool_bits == NULL)
3172 || (hba[i]->cmd_pool == NULL)
3173 || (hba[i]->errinfo_pool == NULL)) {
3174 printk( KERN_ERR "cciss: out of memory");
3175 goto clean4;
3177 #ifdef CONFIG_CISS_SCSI_TAPE
3178 hba[i]->scsi_rejects.complete =
3179 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3180 (NR_CMDS + 5), GFP_KERNEL);
3181 if (hba[i]->scsi_rejects.complete == NULL) {
3182 printk( KERN_ERR "cciss: out of memory");
3183 goto clean4;
3185 #endif
3186 spin_lock_init(&hba[i]->lock);
3188 /* Initialize the pdev driver private data.
3189 have it point to hba[i]. */
3190 pci_set_drvdata(pdev, hba[i]);
3191 /* command and error info recs zeroed out before
3192 they are used */
3193 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3195 #ifdef CCISS_DEBUG
3196 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3197 #endif /* CCISS_DEBUG */
3199 cciss_getgeometry(i);
3201 cciss_scsi_setup(i);
3203 /* Turn the interrupts on so we can service requests */
3204 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3206 cciss_procinit(i);
3207 hba[i]->busy_initializing = 0;
3209 for(j=0; j < NWD; j++) { /* mfm */
3210 drive_info_struct *drv = &(hba[i]->drv[j]);
3211 struct gendisk *disk = hba[i]->gendisk[j];
3213 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3214 if (!q) {
3215 printk(KERN_ERR
3216 "cciss: unable to allocate queue for disk %d\n",
3218 break;
3220 drv->queue = q;
3222 q->backing_dev_info.ra_pages = READ_AHEAD;
3223 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3225 /* This is a hardware imposed limit. */
3226 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3228 /* This is a limit in the driver and could be eliminated. */
3229 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3231 blk_queue_max_sectors(q, 512);
3233 blk_queue_softirq_done(q, cciss_softirq_done);
3235 q->queuedata = hba[i];
3236 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3237 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3238 disk->major = hba[i]->major;
3239 disk->first_minor = j << NWD_SHIFT;
3240 disk->fops = &cciss_fops;
3241 disk->queue = q;
3242 disk->private_data = drv;
3243 /* we must register the controller even if no disks exist */
3244 /* this is for the online array utilities */
3245 if(!drv->heads && j)
3246 continue;
3247 blk_queue_hardsect_size(q, drv->block_size);
3248 set_capacity(disk, drv->nr_blocks);
3249 add_disk(disk);
3252 return(1);
3254 clean4:
3255 #ifdef CONFIG_CISS_SCSI_TAPE
3256 kfree(hba[i]->scsi_rejects.complete);
3257 #endif
3258 kfree(hba[i]->cmd_pool_bits);
3259 if(hba[i]->cmd_pool)
3260 pci_free_consistent(hba[i]->pdev,
3261 NR_CMDS * sizeof(CommandList_struct),
3262 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3263 if(hba[i]->errinfo_pool)
3264 pci_free_consistent(hba[i]->pdev,
3265 NR_CMDS * sizeof( ErrorInfo_struct),
3266 hba[i]->errinfo_pool,
3267 hba[i]->errinfo_pool_dhandle);
3268 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3269 clean2:
3270 unregister_blkdev(hba[i]->major, hba[i]->devname);
3271 clean1:
3272 release_io_mem(hba[i]);
3273 hba[i]->busy_initializing = 0;
3274 free_hba(i);
3275 return(-1);
3278 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3280 ctlr_info_t *tmp_ptr;
3281 int i, j;
3282 char flush_buf[4];
3283 int return_code;
3285 if (pci_get_drvdata(pdev) == NULL)
3287 printk( KERN_ERR "cciss: Unable to remove device \n");
3288 return;
3290 tmp_ptr = pci_get_drvdata(pdev);
3291 i = tmp_ptr->ctlr;
3292 if (hba[i] == NULL)
3294 printk(KERN_ERR "cciss: device appears to "
3295 "already be removed \n");
3296 return;
3298 /* Turn board interrupts off and send the flush cache command */
3299 /* sendcmd will turn off interrupt, and send the flush...
3300 * To write all data in the battery backed cache to disks */
3301 memset(flush_buf, 0, 4);
3302 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3303 TYPE_CMD);
3304 if(return_code != IO_OK)
3306 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3309 free_irq(hba[i]->intr[2], hba[i]);
3311 #ifdef CONFIG_PCI_MSI
3312 if (hba[i]->msix_vector)
3313 pci_disable_msix(hba[i]->pdev);
3314 else if (hba[i]->msi_vector)
3315 pci_disable_msi(hba[i]->pdev);
3316 #endif /* CONFIG_PCI_MSI */
3318 pci_set_drvdata(pdev, NULL);
3319 iounmap(hba[i]->vaddr);
3320 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3321 unregister_blkdev(hba[i]->major, hba[i]->devname);
3322 remove_proc_entry(hba[i]->devname, proc_cciss);
3324 /* remove it from the disk list */
3325 for (j = 0; j < NWD; j++) {
3326 struct gendisk *disk = hba[i]->gendisk[j];
3327 if (disk) {
3328 request_queue_t *q = disk->queue;
3330 if (disk->flags & GENHD_FL_UP)
3331 del_gendisk(disk);
3332 if (q)
3333 blk_cleanup_queue(q);
3337 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3338 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3339 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3340 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3341 kfree(hba[i]->cmd_pool_bits);
3342 #ifdef CONFIG_CISS_SCSI_TAPE
3343 kfree(hba[i]->scsi_rejects.complete);
3344 #endif
3345 release_io_mem(hba[i]);
3346 free_hba(i);
3349 static struct pci_driver cciss_pci_driver = {
3350 .name = "cciss",
3351 .probe = cciss_init_one,
3352 .remove = __devexit_p(cciss_remove_one),
3353 .id_table = cciss_pci_device_id, /* id_table */
3357 * This is it. Register the PCI driver information for the cards we control
3358 * the OS will call our registered routines when it finds one of our cards.
3360 static int __init cciss_init(void)
3362 printk(KERN_INFO DRIVER_NAME "\n");
3364 /* Register for our PCI devices */
3365 return pci_register_driver(&cciss_pci_driver);
3368 static void __exit cciss_cleanup(void)
3370 int i;
3372 pci_unregister_driver(&cciss_pci_driver);
3373 /* double check that all controller entrys have been removed */
3374 for (i=0; i< MAX_CTLR; i++)
3376 if (hba[i] != NULL)
3378 printk(KERN_WARNING "cciss: had to remove"
3379 " controller %d\n", i);
3380 cciss_remove_one(hba[i]->pdev);
3383 remove_proc_entry("cciss", proc_root_driver);
3386 static void fail_all_cmds(unsigned long ctlr)
3388 /* If we get here, the board is apparently dead. */
3389 ctlr_info_t *h = hba[ctlr];
3390 CommandList_struct *c;
3391 unsigned long flags;
3393 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3394 h->alive = 0; /* the controller apparently died... */
3396 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3398 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3400 /* move everything off the request queue onto the completed queue */
3401 while( (c = h->reqQ) != NULL ) {
3402 removeQ(&(h->reqQ), c);
3403 h->Qdepth--;
3404 addQ (&(h->cmpQ), c);
3407 /* Now, fail everything on the completed queue with a HW error */
3408 while( (c = h->cmpQ) != NULL ) {
3409 removeQ(&h->cmpQ, c);
3410 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3411 if (c->cmd_type == CMD_RWREQ) {
3412 complete_command(h, c, 0);
3413 } else if (c->cmd_type == CMD_IOCTL_PEND)
3414 complete(c->waiting);
3415 #ifdef CONFIG_CISS_SCSI_TAPE
3416 else if (c->cmd_type == CMD_SCSI)
3417 complete_scsi_command(c, 0, 0);
3418 #endif
3420 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3421 return;
3424 module_init(cciss_init);
3425 module_exit(cciss_cleanup);