pwc-uncompress.c shouldn't #include <asm/current.h>
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / cciss.c
blob93e23d67087da4f2ad3c558673a8d5c8643288ef
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i");
58 MODULE_LICENSE("GPL");
60 #include "cciss_cmd.h"
61 #include "cciss.h"
62 #include <linux/cciss_ioctl.h>
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
67 0x0E11, 0x4070, 0, 0, 0},
68 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
69 0x0E11, 0x4080, 0, 0, 0},
70 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
71 0x0E11, 0x4082, 0, 0, 0},
72 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
73 0x0E11, 0x4083, 0, 0, 0},
74 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
75 0x0E11, 0x409A, 0, 0, 0},
76 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
77 0x0E11, 0x409B, 0, 0, 0},
78 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
79 0x0E11, 0x409C, 0, 0, 0},
80 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
81 0x0E11, 0x409D, 0, 0, 0},
82 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
83 0x0E11, 0x4091, 0, 0, 0},
84 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
85 0x103C, 0x3225, 0, 0, 0},
86 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
87 0x103c, 0x3223, 0, 0, 0},
88 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
89 0x103c, 0x3234, 0, 0, 0},
90 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
91 0x103c, 0x3235, 0, 0, 0},
92 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
93 0x103c, 0x3211, 0, 0, 0},
94 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
95 0x103c, 0x3212, 0, 0, 0},
96 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
97 0x103c, 0x3213, 0, 0, 0},
98 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
99 0x103c, 0x3214, 0, 0, 0},
100 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
101 0x103c, 0x3215, 0, 0, 0},
102 {0,}
104 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
106 #define NR_PRODUCTS ARRAY_SIZE(products)
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
112 static struct board_type products[] = {
113 { 0x40700E11, "Smart Array 5300", &SA5_access },
114 { 0x40800E11, "Smart Array 5i", &SA5B_access},
115 { 0x40820E11, "Smart Array 532", &SA5B_access},
116 { 0x40830E11, "Smart Array 5312", &SA5B_access},
117 { 0x409A0E11, "Smart Array 641", &SA5_access},
118 { 0x409B0E11, "Smart Array 642", &SA5_access},
119 { 0x409C0E11, "Smart Array 6400", &SA5_access},
120 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 { 0x40910E11, "Smart Array 6i", &SA5_access},
122 { 0x3225103C, "Smart Array P600", &SA5_access},
123 { 0x3223103C, "Smart Array P800", &SA5_access},
124 { 0x3234103C, "Smart Array P400", &SA5_access},
125 { 0x3235103C, "Smart Array P400i", &SA5_access},
126 { 0x3211103C, "Smart Array E200i", &SA5_access},
127 { 0x3212103C, "Smart Array E200", &SA5_access},
128 { 0x3213103C, "Smart Array E200i", &SA5_access},
129 { 0x3214103C, "Smart Array E200i", &SA5_access},
130 { 0x3215103C, "Smart Array E200i", &SA5_access},
133 /* How long to wait (in millesconds) for board to go into simple mode */
134 #define MAX_CONFIG_WAIT 30000
135 #define MAX_IOCTL_CONFIG_WAIT 1000
137 /*define how many times we will try a command because of bus resets */
138 #define MAX_CMD_RETRIES 3
140 #define READ_AHEAD 1024
141 #define NR_CMDS 384 /* #commands that can be outstanding */
142 #define MAX_CTLR 32
144 /* Originally cciss driver only supports 8 major numbers */
145 #define MAX_CTLR_ORIG 8
148 static ctlr_info_t *hba[MAX_CTLR];
150 static void do_cciss_request(request_queue_t *q);
151 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
152 static int cciss_open(struct inode *inode, struct file *filep);
153 static int cciss_release(struct inode *inode, struct file *filep);
154 static int cciss_ioctl(struct inode *inode, struct file *filep,
155 unsigned int cmd, unsigned long arg);
156 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
158 static int revalidate_allvol(ctlr_info_t *host);
159 static int cciss_revalidate(struct gendisk *disk);
160 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
161 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
163 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
164 int withirq, unsigned int *total_size, unsigned int *block_size);
165 static void cciss_geometry_inquiry(int ctlr, int logvol,
166 int withirq, unsigned int total_size,
167 unsigned int block_size, InquiryData_struct *inq_buff,
168 drive_info_struct *drv);
169 static void cciss_getgeometry(int cntl_num);
170 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
171 static void start_io( ctlr_info_t *h);
172 static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
173 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
174 unsigned char *scsi3addr, int cmd_type);
175 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
176 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
177 int cmd_type);
179 static void fail_all_cmds(unsigned long ctlr);
181 #ifdef CONFIG_PROC_FS
182 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
183 int length, int *eof, void *data);
184 static void cciss_procinit(int i);
185 #else
186 static void cciss_procinit(int i) {}
187 #endif /* CONFIG_PROC_FS */
189 #ifdef CONFIG_COMPAT
190 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
191 #endif
193 static struct block_device_operations cciss_fops = {
194 .owner = THIS_MODULE,
195 .open = cciss_open,
196 .release = cciss_release,
197 .ioctl = cciss_ioctl,
198 .getgeo = cciss_getgeo,
199 #ifdef CONFIG_COMPAT
200 .compat_ioctl = cciss_compat_ioctl,
201 #endif
202 .revalidate_disk= cciss_revalidate,
206 * Enqueuing and dequeuing functions for cmdlists.
208 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
210 if (*Qptr == NULL) {
211 *Qptr = c;
212 c->next = c->prev = c;
213 } else {
214 c->prev = (*Qptr)->prev;
215 c->next = (*Qptr);
216 (*Qptr)->prev->next = c;
217 (*Qptr)->prev = c;
221 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
222 CommandList_struct *c)
224 if (c && c->next != c) {
225 if (*Qptr == c) *Qptr = c->next;
226 c->prev->next = c->next;
227 c->next->prev = c->prev;
228 } else {
229 *Qptr = NULL;
231 return c;
234 #include "cciss_scsi.c" /* For SCSI tape support */
236 #ifdef CONFIG_PROC_FS
239 * Report information about this controller.
241 #define ENG_GIG 1000000000
242 #define ENG_GIG_FACTOR (ENG_GIG/512)
243 #define RAID_UNKNOWN 6
244 static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
245 "UNKNOWN"};
247 static struct proc_dir_entry *proc_cciss;
249 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
250 int length, int *eof, void *data)
252 off_t pos = 0;
253 off_t len = 0;
254 int size, i, ctlr;
255 ctlr_info_t *h = (ctlr_info_t*)data;
256 drive_info_struct *drv;
257 unsigned long flags;
258 sector_t vol_sz, vol_sz_frac;
260 ctlr = h->ctlr;
262 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
268 return -EBUSY;
270 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
273 size = sprintf(buffer, "%s: HP %s Controller\n"
274 "Board ID: 0x%08lx\n"
275 "Firmware Version: %c%c%c%c\n"
276 "IRQ: %d\n"
277 "Logical drives: %d\n"
278 "Current Q depth: %d\n"
279 "Current # commands on controller: %d\n"
280 "Max Q depth since init: %d\n"
281 "Max # commands on controller since init: %d\n"
282 "Max SG entries since init: %d\n\n",
283 h->devname,
284 h->product_name,
285 (unsigned long)h->board_id,
286 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
287 (unsigned int)h->intr[SIMPLE_MODE_INT],
288 h->num_luns,
289 h->Qdepth, h->commands_outstanding,
290 h->maxQsinceinit, h->max_outstanding, h->maxSG);
292 pos += size; len += size;
293 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
294 for(i=0; i<=h->highest_lun; i++) {
296 drv = &h->drv[i];
297 if (drv->heads == 0)
298 continue;
300 vol_sz = drv->nr_blocks;
301 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
302 vol_sz_frac *= 100;
303 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
305 if (drv->raid_level > 5)
306 drv->raid_level = RAID_UNKNOWN;
307 size = sprintf(buffer+len, "cciss/c%dd%d:"
308 "\t%4u.%02uGB\tRAID %s\n",
309 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
310 raid_label[drv->raid_level]);
311 pos += size; len += size;
314 *eof = 1;
315 *start = buffer+offset;
316 len -= offset;
317 if (len>length)
318 len = length;
319 h->busy_configuring = 0;
320 return len;
323 static int
324 cciss_proc_write(struct file *file, const char __user *buffer,
325 unsigned long count, void *data)
327 unsigned char cmd[80];
328 int len;
329 #ifdef CONFIG_CISS_SCSI_TAPE
330 ctlr_info_t *h = (ctlr_info_t *) data;
331 int rc;
332 #endif
334 if (count > sizeof(cmd)-1) return -EINVAL;
335 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
336 cmd[count] = '\0';
337 len = strlen(cmd); // above 3 lines ensure safety
338 if (len && cmd[len-1] == '\n')
339 cmd[--len] = '\0';
340 # ifdef CONFIG_CISS_SCSI_TAPE
341 if (strcmp("engage scsi", cmd)==0) {
342 rc = cciss_engage_scsi(h->ctlr);
343 if (rc != 0) return -rc;
344 return count;
346 /* might be nice to have "disengage" too, but it's not
347 safely possible. (only 1 module use count, lock issues.) */
348 # endif
349 return -EINVAL;
353 * Get us a file in /proc/cciss that says something about each controller.
354 * Create /proc/cciss if it doesn't exist yet.
356 static void __devinit cciss_procinit(int i)
358 struct proc_dir_entry *pde;
360 if (proc_cciss == NULL) {
361 proc_cciss = proc_mkdir("cciss", proc_root_driver);
362 if (!proc_cciss)
363 return;
366 pde = create_proc_read_entry(hba[i]->devname,
367 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
368 proc_cciss, cciss_proc_get_info, hba[i]);
369 pde->write_proc = cciss_proc_write;
371 #endif /* CONFIG_PROC_FS */
374 * For operations that cannot sleep, a command block is allocated at init,
375 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
376 * which ones are free or in use. For operations that can wait for kmalloc
377 * to possible sleep, this routine can be called with get_from_pool set to 0.
378 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
380 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
382 CommandList_struct *c;
383 int i;
384 u64bit temp64;
385 dma_addr_t cmd_dma_handle, err_dma_handle;
387 if (!get_from_pool)
389 c = (CommandList_struct *) pci_alloc_consistent(
390 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
391 if(c==NULL)
392 return NULL;
393 memset(c, 0, sizeof(CommandList_struct));
395 c->cmdindex = -1;
397 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
398 h->pdev, sizeof(ErrorInfo_struct),
399 &err_dma_handle);
401 if (c->err_info == NULL)
403 pci_free_consistent(h->pdev,
404 sizeof(CommandList_struct), c, cmd_dma_handle);
405 return NULL;
407 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
408 } else /* get it out of the controllers pool */
410 do {
411 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
412 if (i == NR_CMDS)
413 return NULL;
414 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
415 #ifdef CCISS_DEBUG
416 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
417 #endif
418 c = h->cmd_pool + i;
419 memset(c, 0, sizeof(CommandList_struct));
420 cmd_dma_handle = h->cmd_pool_dhandle
421 + i*sizeof(CommandList_struct);
422 c->err_info = h->errinfo_pool + i;
423 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
424 err_dma_handle = h->errinfo_pool_dhandle
425 + i*sizeof(ErrorInfo_struct);
426 h->nr_allocs++;
428 c->cmdindex = i;
431 c->busaddr = (__u32) cmd_dma_handle;
432 temp64.val = (__u64) err_dma_handle;
433 c->ErrDesc.Addr.lower = temp64.val32.lower;
434 c->ErrDesc.Addr.upper = temp64.val32.upper;
435 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
437 c->ctlr = h->ctlr;
438 return c;
444 * Frees a command block that was previously allocated with cmd_alloc().
446 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
448 int i;
449 u64bit temp64;
451 if( !got_from_pool)
453 temp64.val32.lower = c->ErrDesc.Addr.lower;
454 temp64.val32.upper = c->ErrDesc.Addr.upper;
455 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
456 c->err_info, (dma_addr_t) temp64.val);
457 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
458 c, (dma_addr_t) c->busaddr);
459 } else
461 i = c - h->cmd_pool;
462 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
463 h->nr_frees++;
467 static inline ctlr_info_t *get_host(struct gendisk *disk)
469 return disk->queue->queuedata;
472 static inline drive_info_struct *get_drv(struct gendisk *disk)
474 return disk->private_data;
478 * Open. Make sure the device is really there.
480 static int cciss_open(struct inode *inode, struct file *filep)
482 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
483 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
485 #ifdef CCISS_DEBUG
486 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
487 #endif /* CCISS_DEBUG */
489 if (host->busy_initializing || drv->busy_configuring)
490 return -EBUSY;
492 * Root is allowed to open raw volume zero even if it's not configured
493 * so array config can still work. Root is also allowed to open any
494 * volume that has a LUN ID, so it can issue IOCTL to reread the
495 * disk information. I don't think I really like this
496 * but I'm already using way to many device nodes to claim another one
497 * for "raw controller".
499 if (drv->nr_blocks == 0) {
500 if (iminor(inode) != 0) { /* not node 0? */
501 /* if not node 0 make sure it is a partition = 0 */
502 if (iminor(inode) & 0x0f) {
503 return -ENXIO;
504 /* if it is, make sure we have a LUN ID */
505 } else if (drv->LunID == 0) {
506 return -ENXIO;
509 if (!capable(CAP_SYS_ADMIN))
510 return -EPERM;
512 drv->usage_count++;
513 host->usage_count++;
514 return 0;
517 * Close. Sync first.
519 static int cciss_release(struct inode *inode, struct file *filep)
521 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
522 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
524 #ifdef CCISS_DEBUG
525 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
526 #endif /* CCISS_DEBUG */
528 drv->usage_count--;
529 host->usage_count--;
530 return 0;
533 #ifdef CONFIG_COMPAT
535 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
537 int ret;
538 lock_kernel();
539 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
540 unlock_kernel();
541 return ret;
544 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
545 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
547 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
549 switch (cmd) {
550 case CCISS_GETPCIINFO:
551 case CCISS_GETINTINFO:
552 case CCISS_SETINTINFO:
553 case CCISS_GETNODENAME:
554 case CCISS_SETNODENAME:
555 case CCISS_GETHEARTBEAT:
556 case CCISS_GETBUSTYPES:
557 case CCISS_GETFIRMVER:
558 case CCISS_GETDRIVVER:
559 case CCISS_REVALIDVOLS:
560 case CCISS_DEREGDISK:
561 case CCISS_REGNEWDISK:
562 case CCISS_REGNEWD:
563 case CCISS_RESCANDISK:
564 case CCISS_GETLUNINFO:
565 return do_ioctl(f, cmd, arg);
567 case CCISS_PASSTHRU32:
568 return cciss_ioctl32_passthru(f, cmd, arg);
569 case CCISS_BIG_PASSTHRU32:
570 return cciss_ioctl32_big_passthru(f, cmd, arg);
572 default:
573 return -ENOIOCTLCMD;
577 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
579 IOCTL32_Command_struct __user *arg32 =
580 (IOCTL32_Command_struct __user *) arg;
581 IOCTL_Command_struct arg64;
582 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
583 int err;
584 u32 cp;
586 err = 0;
587 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
588 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
589 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
590 err |= get_user(arg64.buf_size, &arg32->buf_size);
591 err |= get_user(cp, &arg32->buf);
592 arg64.buf = compat_ptr(cp);
593 err |= copy_to_user(p, &arg64, sizeof(arg64));
595 if (err)
596 return -EFAULT;
598 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
599 if (err)
600 return err;
601 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
602 if (err)
603 return -EFAULT;
604 return err;
607 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
609 BIG_IOCTL32_Command_struct __user *arg32 =
610 (BIG_IOCTL32_Command_struct __user *) arg;
611 BIG_IOCTL_Command_struct arg64;
612 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
613 int err;
614 u32 cp;
616 err = 0;
617 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
618 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
619 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
620 err |= get_user(arg64.buf_size, &arg32->buf_size);
621 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
622 err |= get_user(cp, &arg32->buf);
623 arg64.buf = compat_ptr(cp);
624 err |= copy_to_user(p, &arg64, sizeof(arg64));
626 if (err)
627 return -EFAULT;
629 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
630 if (err)
631 return err;
632 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
633 if (err)
634 return -EFAULT;
635 return err;
637 #endif
639 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
641 drive_info_struct *drv = get_drv(bdev->bd_disk);
643 if (!drv->cylinders)
644 return -ENXIO;
646 geo->heads = drv->heads;
647 geo->sectors = drv->sectors;
648 geo->cylinders = drv->cylinders;
649 return 0;
653 * ioctl
655 static int cciss_ioctl(struct inode *inode, struct file *filep,
656 unsigned int cmd, unsigned long arg)
658 struct block_device *bdev = inode->i_bdev;
659 struct gendisk *disk = bdev->bd_disk;
660 ctlr_info_t *host = get_host(disk);
661 drive_info_struct *drv = get_drv(disk);
662 int ctlr = host->ctlr;
663 void __user *argp = (void __user *)arg;
665 #ifdef CCISS_DEBUG
666 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
667 #endif /* CCISS_DEBUG */
669 switch(cmd) {
670 case CCISS_GETPCIINFO:
672 cciss_pci_info_struct pciinfo;
674 if (!arg) return -EINVAL;
675 pciinfo.domain = pci_domain_nr(host->pdev->bus);
676 pciinfo.bus = host->pdev->bus->number;
677 pciinfo.dev_fn = host->pdev->devfn;
678 pciinfo.board_id = host->board_id;
679 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
680 return -EFAULT;
681 return(0);
683 case CCISS_GETINTINFO:
685 cciss_coalint_struct intinfo;
686 if (!arg) return -EINVAL;
687 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
688 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
689 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
690 return -EFAULT;
691 return(0);
693 case CCISS_SETINTINFO:
695 cciss_coalint_struct intinfo;
696 unsigned long flags;
697 int i;
699 if (!arg) return -EINVAL;
700 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
701 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
702 return -EFAULT;
703 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
706 // printk("cciss_ioctl: delay and count cannot be 0\n");
707 return( -EINVAL);
709 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
710 /* Update the field, and then ring the doorbell */
711 writel( intinfo.delay,
712 &(host->cfgtable->HostWrite.CoalIntDelay));
713 writel( intinfo.count,
714 &(host->cfgtable->HostWrite.CoalIntCount));
715 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
717 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
718 if (!(readl(host->vaddr + SA5_DOORBELL)
719 & CFGTBL_ChangeReq))
720 break;
721 /* delay and try again */
722 udelay(1000);
724 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
725 if (i >= MAX_IOCTL_CONFIG_WAIT)
726 return -EAGAIN;
727 return(0);
729 case CCISS_GETNODENAME:
731 NodeName_type NodeName;
732 int i;
734 if (!arg) return -EINVAL;
735 for(i=0;i<16;i++)
736 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
737 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
738 return -EFAULT;
739 return(0);
741 case CCISS_SETNODENAME:
743 NodeName_type NodeName;
744 unsigned long flags;
745 int i;
747 if (!arg) return -EINVAL;
748 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
750 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
751 return -EFAULT;
753 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
755 /* Update the field, and then ring the doorbell */
756 for(i=0;i<16;i++)
757 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
759 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
761 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
762 if (!(readl(host->vaddr + SA5_DOORBELL)
763 & CFGTBL_ChangeReq))
764 break;
765 /* delay and try again */
766 udelay(1000);
768 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
769 if (i >= MAX_IOCTL_CONFIG_WAIT)
770 return -EAGAIN;
771 return(0);
774 case CCISS_GETHEARTBEAT:
776 Heartbeat_type heartbeat;
778 if (!arg) return -EINVAL;
779 heartbeat = readl(&host->cfgtable->HeartBeat);
780 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
781 return -EFAULT;
782 return(0);
784 case CCISS_GETBUSTYPES:
786 BusTypes_type BusTypes;
788 if (!arg) return -EINVAL;
789 BusTypes = readl(&host->cfgtable->BusTypes);
790 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
791 return -EFAULT;
792 return(0);
794 case CCISS_GETFIRMVER:
796 FirmwareVer_type firmware;
798 if (!arg) return -EINVAL;
799 memcpy(firmware, host->firm_ver, 4);
801 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
802 return -EFAULT;
803 return(0);
805 case CCISS_GETDRIVVER:
807 DriverVer_type DriverVer = DRIVER_VERSION;
809 if (!arg) return -EINVAL;
811 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
812 return -EFAULT;
813 return(0);
816 case CCISS_REVALIDVOLS:
817 if (bdev != bdev->bd_contains || drv != host->drv)
818 return -ENXIO;
819 return revalidate_allvol(host);
821 case CCISS_GETLUNINFO: {
822 LogvolInfo_struct luninfo;
824 luninfo.LunID = drv->LunID;
825 luninfo.num_opens = drv->usage_count;
826 luninfo.num_parts = 0;
827 if (copy_to_user(argp, &luninfo,
828 sizeof(LogvolInfo_struct)))
829 return -EFAULT;
830 return(0);
832 case CCISS_DEREGDISK:
833 return rebuild_lun_table(host, disk);
835 case CCISS_REGNEWD:
836 return rebuild_lun_table(host, NULL);
838 case CCISS_PASSTHRU:
840 IOCTL_Command_struct iocommand;
841 CommandList_struct *c;
842 char *buff = NULL;
843 u64bit temp64;
844 unsigned long flags;
845 DECLARE_COMPLETION(wait);
847 if (!arg) return -EINVAL;
849 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
851 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
852 return -EFAULT;
853 if((iocommand.buf_size < 1) &&
854 (iocommand.Request.Type.Direction != XFER_NONE))
856 return -EINVAL;
858 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
859 /* Check kmalloc limits */
860 if(iocommand.buf_size > 128000)
861 return -EINVAL;
862 #endif
863 if(iocommand.buf_size > 0)
865 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
866 if( buff == NULL)
867 return -EFAULT;
869 if (iocommand.Request.Type.Direction == XFER_WRITE)
871 /* Copy the data into the buffer we created */
872 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
874 kfree(buff);
875 return -EFAULT;
877 } else {
878 memset(buff, 0, iocommand.buf_size);
880 if ((c = cmd_alloc(host , 0)) == NULL)
882 kfree(buff);
883 return -ENOMEM;
885 // Fill in the command type
886 c->cmd_type = CMD_IOCTL_PEND;
887 // Fill in Command Header
888 c->Header.ReplyQueue = 0; // unused in simple mode
889 if( iocommand.buf_size > 0) // buffer to fill
891 c->Header.SGList = 1;
892 c->Header.SGTotal= 1;
893 } else // no buffers to fill
895 c->Header.SGList = 0;
896 c->Header.SGTotal= 0;
898 c->Header.LUN = iocommand.LUN_info;
899 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
901 // Fill in Request block
902 c->Request = iocommand.Request;
904 // Fill in the scatter gather information
905 if (iocommand.buf_size > 0 )
907 temp64.val = pci_map_single( host->pdev, buff,
908 iocommand.buf_size,
909 PCI_DMA_BIDIRECTIONAL);
910 c->SG[0].Addr.lower = temp64.val32.lower;
911 c->SG[0].Addr.upper = temp64.val32.upper;
912 c->SG[0].Len = iocommand.buf_size;
913 c->SG[0].Ext = 0; // we are not chaining
915 c->waiting = &wait;
917 /* Put the request on the tail of the request queue */
918 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
919 addQ(&host->reqQ, c);
920 host->Qdepth++;
921 start_io(host);
922 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
924 wait_for_completion(&wait);
926 /* unlock the buffers from DMA */
927 temp64.val32.lower = c->SG[0].Addr.lower;
928 temp64.val32.upper = c->SG[0].Addr.upper;
929 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
930 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
932 /* Copy the error information out */
933 iocommand.error_info = *(c->err_info);
934 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
936 kfree(buff);
937 cmd_free(host, c, 0);
938 return( -EFAULT);
941 if (iocommand.Request.Type.Direction == XFER_READ)
943 /* Copy the data out of the buffer we created */
944 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
946 kfree(buff);
947 cmd_free(host, c, 0);
948 return -EFAULT;
951 kfree(buff);
952 cmd_free(host, c, 0);
953 return(0);
955 case CCISS_BIG_PASSTHRU: {
956 BIG_IOCTL_Command_struct *ioc;
957 CommandList_struct *c;
958 unsigned char **buff = NULL;
959 int *buff_size = NULL;
960 u64bit temp64;
961 unsigned long flags;
962 BYTE sg_used = 0;
963 int status = 0;
964 int i;
965 DECLARE_COMPLETION(wait);
966 __u32 left;
967 __u32 sz;
968 BYTE __user *data_ptr;
970 if (!arg)
971 return -EINVAL;
972 if (!capable(CAP_SYS_RAWIO))
973 return -EPERM;
974 ioc = (BIG_IOCTL_Command_struct *)
975 kmalloc(sizeof(*ioc), GFP_KERNEL);
976 if (!ioc) {
977 status = -ENOMEM;
978 goto cleanup1;
980 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
981 status = -EFAULT;
982 goto cleanup1;
984 if ((ioc->buf_size < 1) &&
985 (ioc->Request.Type.Direction != XFER_NONE)) {
986 status = -EINVAL;
987 goto cleanup1;
989 /* Check kmalloc limits using all SGs */
990 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
991 status = -EINVAL;
992 goto cleanup1;
994 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
995 status = -EINVAL;
996 goto cleanup1;
998 buff = (unsigned char **) kmalloc(MAXSGENTRIES *
999 sizeof(char *), GFP_KERNEL);
1000 if (!buff) {
1001 status = -ENOMEM;
1002 goto cleanup1;
1004 memset(buff, 0, MAXSGENTRIES);
1005 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1006 GFP_KERNEL);
1007 if (!buff_size) {
1008 status = -ENOMEM;
1009 goto cleanup1;
1011 left = ioc->buf_size;
1012 data_ptr = ioc->buf;
1013 while (left) {
1014 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1015 buff_size[sg_used] = sz;
1016 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1017 if (buff[sg_used] == NULL) {
1018 status = -ENOMEM;
1019 goto cleanup1;
1021 if (ioc->Request.Type.Direction == XFER_WRITE) {
1022 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1023 status = -ENOMEM;
1024 goto cleanup1;
1026 } else {
1027 memset(buff[sg_used], 0, sz);
1029 left -= sz;
1030 data_ptr += sz;
1031 sg_used++;
1033 if ((c = cmd_alloc(host , 0)) == NULL) {
1034 status = -ENOMEM;
1035 goto cleanup1;
1037 c->cmd_type = CMD_IOCTL_PEND;
1038 c->Header.ReplyQueue = 0;
1040 if( ioc->buf_size > 0) {
1041 c->Header.SGList = sg_used;
1042 c->Header.SGTotal= sg_used;
1043 } else {
1044 c->Header.SGList = 0;
1045 c->Header.SGTotal= 0;
1047 c->Header.LUN = ioc->LUN_info;
1048 c->Header.Tag.lower = c->busaddr;
1050 c->Request = ioc->Request;
1051 if (ioc->buf_size > 0 ) {
1052 int i;
1053 for(i=0; i<sg_used; i++) {
1054 temp64.val = pci_map_single( host->pdev, buff[i],
1055 buff_size[i],
1056 PCI_DMA_BIDIRECTIONAL);
1057 c->SG[i].Addr.lower = temp64.val32.lower;
1058 c->SG[i].Addr.upper = temp64.val32.upper;
1059 c->SG[i].Len = buff_size[i];
1060 c->SG[i].Ext = 0; /* we are not chaining */
1063 c->waiting = &wait;
1064 /* Put the request on the tail of the request queue */
1065 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1066 addQ(&host->reqQ, c);
1067 host->Qdepth++;
1068 start_io(host);
1069 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1070 wait_for_completion(&wait);
1071 /* unlock the buffers from DMA */
1072 for(i=0; i<sg_used; i++) {
1073 temp64.val32.lower = c->SG[i].Addr.lower;
1074 temp64.val32.upper = c->SG[i].Addr.upper;
1075 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1076 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1078 /* Copy the error information out */
1079 ioc->error_info = *(c->err_info);
1080 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1081 cmd_free(host, c, 0);
1082 status = -EFAULT;
1083 goto cleanup1;
1085 if (ioc->Request.Type.Direction == XFER_READ) {
1086 /* Copy the data out of the buffer we created */
1087 BYTE __user *ptr = ioc->buf;
1088 for(i=0; i< sg_used; i++) {
1089 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1090 cmd_free(host, c, 0);
1091 status = -EFAULT;
1092 goto cleanup1;
1094 ptr += buff_size[i];
1097 cmd_free(host, c, 0);
1098 status = 0;
1099 cleanup1:
1100 if (buff) {
1101 for(i=0; i<sg_used; i++)
1102 kfree(buff[i]);
1103 kfree(buff);
1105 kfree(buff_size);
1106 kfree(ioc);
1107 return(status);
1109 default:
1110 return -ENOTTY;
1116 * revalidate_allvol is for online array config utilities. After a
1117 * utility reconfigures the drives in the array, it can use this function
1118 * (through an ioctl) to make the driver zap any previous disk structs for
1119 * that controller and get new ones.
1121 * Right now I'm using the getgeometry() function to do this, but this
1122 * function should probably be finer grained and allow you to revalidate one
1123 * particualar logical volume (instead of all of them on a particular
1124 * controller).
1126 static int revalidate_allvol(ctlr_info_t *host)
1128 int ctlr = host->ctlr, i;
1129 unsigned long flags;
1131 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1132 if (host->usage_count > 1) {
1133 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1134 printk(KERN_WARNING "cciss: Device busy for volume"
1135 " revalidation (usage=%d)\n", host->usage_count);
1136 return -EBUSY;
1138 host->usage_count++;
1139 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1141 for(i=0; i< NWD; i++) {
1142 struct gendisk *disk = host->gendisk[i];
1143 if (disk) {
1144 request_queue_t *q = disk->queue;
1146 if (disk->flags & GENHD_FL_UP)
1147 del_gendisk(disk);
1148 if (q)
1149 blk_cleanup_queue(q);
1154 * Set the partition and block size structures for all volumes
1155 * on this controller to zero. We will reread all of this data
1157 memset(host->drv, 0, sizeof(drive_info_struct)
1158 * CISS_MAX_LUN);
1160 * Tell the array controller not to give us any interrupts while
1161 * we check the new geometry. Then turn interrupts back on when
1162 * we're done.
1164 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1165 cciss_getgeometry(ctlr);
1166 host->access.set_intr_mask(host, CCISS_INTR_ON);
1168 /* Loop through each real device */
1169 for (i = 0; i < NWD; i++) {
1170 struct gendisk *disk = host->gendisk[i];
1171 drive_info_struct *drv = &(host->drv[i]);
1172 /* we must register the controller even if no disks exist */
1173 /* this is for the online array utilities */
1174 if (!drv->heads && i)
1175 continue;
1176 blk_queue_hardsect_size(drv->queue, drv->block_size);
1177 set_capacity(disk, drv->nr_blocks);
1178 add_disk(disk);
1180 host->usage_count--;
1181 return 0;
1184 static inline void complete_buffers(struct bio *bio, int status)
1186 while (bio) {
1187 struct bio *xbh = bio->bi_next;
1188 int nr_sectors = bio_sectors(bio);
1190 bio->bi_next = NULL;
1191 blk_finished_io(len);
1192 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1193 bio = xbh;
1198 static void cciss_softirq_done(struct request *rq)
1200 CommandList_struct *cmd = rq->completion_data;
1201 ctlr_info_t *h = hba[cmd->ctlr];
1202 unsigned long flags;
1203 u64bit temp64;
1204 int i, ddir;
1206 if (cmd->Request.Type.Direction == XFER_READ)
1207 ddir = PCI_DMA_FROMDEVICE;
1208 else
1209 ddir = PCI_DMA_TODEVICE;
1211 /* command did not need to be retried */
1212 /* unmap the DMA mapping for all the scatter gather elements */
1213 for(i=0; i<cmd->Header.SGList; i++) {
1214 temp64.val32.lower = cmd->SG[i].Addr.lower;
1215 temp64.val32.upper = cmd->SG[i].Addr.upper;
1216 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1219 complete_buffers(rq->bio, rq->errors);
1221 if (blk_fs_request(rq)) {
1222 const int rw = rq_data_dir(rq);
1224 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1227 #ifdef CCISS_DEBUG
1228 printk("Done with %p\n", rq);
1229 #endif /* CCISS_DEBUG */
1231 spin_lock_irqsave(&h->lock, flags);
1232 end_that_request_last(rq, rq->errors);
1233 cmd_free(h, cmd,1);
1234 spin_unlock_irqrestore(&h->lock, flags);
1237 /* This function will check the usage_count of the drive to be updated/added.
1238 * If the usage_count is zero then the drive information will be updated and
1239 * the disk will be re-registered with the kernel. If not then it will be
1240 * left alone for the next reboot. The exception to this is disk 0 which
1241 * will always be left registered with the kernel since it is also the
1242 * controller node. Any changes to disk 0 will show up on the next
1243 * reboot.
1245 static void cciss_update_drive_info(int ctlr, int drv_index)
1247 ctlr_info_t *h = hba[ctlr];
1248 struct gendisk *disk;
1249 ReadCapdata_struct *size_buff = NULL;
1250 InquiryData_struct *inq_buff = NULL;
1251 unsigned int block_size;
1252 unsigned int total_size;
1253 unsigned long flags = 0;
1254 int ret = 0;
1256 /* if the disk already exists then deregister it before proceeding*/
1257 if (h->drv[drv_index].raid_level != -1){
1258 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1259 h->drv[drv_index].busy_configuring = 1;
1260 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1261 ret = deregister_disk(h->gendisk[drv_index],
1262 &h->drv[drv_index], 0);
1263 h->drv[drv_index].busy_configuring = 0;
1266 /* If the disk is in use return */
1267 if (ret)
1268 return;
1271 /* Get information about the disk and modify the driver sturcture */
1272 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1273 if (size_buff == NULL)
1274 goto mem_msg;
1275 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1276 if (inq_buff == NULL)
1277 goto mem_msg;
1279 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1280 &total_size, &block_size);
1281 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1282 inq_buff, &h->drv[drv_index]);
1284 ++h->num_luns;
1285 disk = h->gendisk[drv_index];
1286 set_capacity(disk, h->drv[drv_index].nr_blocks);
1289 /* if it's the controller it's already added */
1290 if (drv_index){
1291 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1293 /* Set up queue information */
1294 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1295 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1297 /* This is a hardware imposed limit. */
1298 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1300 /* This is a limit in the driver and could be eliminated. */
1301 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1303 blk_queue_max_sectors(disk->queue, 512);
1305 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1307 disk->queue->queuedata = hba[ctlr];
1309 blk_queue_hardsect_size(disk->queue,
1310 hba[ctlr]->drv[drv_index].block_size);
1312 h->drv[drv_index].queue = disk->queue;
1313 add_disk(disk);
1316 freeret:
1317 kfree(size_buff);
1318 kfree(inq_buff);
1319 return;
1320 mem_msg:
1321 printk(KERN_ERR "cciss: out of memory\n");
1322 goto freeret;
1325 /* This function will find the first index of the controllers drive array
1326 * that has a -1 for the raid_level and will return that index. This is
1327 * where new drives will be added. If the index to be returned is greater
1328 * than the highest_lun index for the controller then highest_lun is set
1329 * to this new index. If there are no available indexes then -1 is returned.
1331 static int cciss_find_free_drive_index(int ctlr)
1333 int i;
1335 for (i=0; i < CISS_MAX_LUN; i++){
1336 if (hba[ctlr]->drv[i].raid_level == -1){
1337 if (i > hba[ctlr]->highest_lun)
1338 hba[ctlr]->highest_lun = i;
1339 return i;
1342 return -1;
1345 /* This function will add and remove logical drives from the Logical
1346 * drive array of the controller and maintain persistancy of ordering
1347 * so that mount points are preserved until the next reboot. This allows
1348 * for the removal of logical drives in the middle of the drive array
1349 * without a re-ordering of those drives.
1350 * INPUT
1351 * h = The controller to perform the operations on
1352 * del_disk = The disk to remove if specified. If the value given
1353 * is NULL then no disk is removed.
1355 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1357 int ctlr = h->ctlr;
1358 int num_luns;
1359 ReportLunData_struct *ld_buff = NULL;
1360 drive_info_struct *drv = NULL;
1361 int return_code;
1362 int listlength = 0;
1363 int i;
1364 int drv_found;
1365 int drv_index = 0;
1366 __u32 lunid = 0;
1367 unsigned long flags;
1369 /* Set busy_configuring flag for this operation */
1370 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1371 if (h->num_luns >= CISS_MAX_LUN){
1372 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1373 return -EINVAL;
1376 if (h->busy_configuring){
1377 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1378 return -EBUSY;
1380 h->busy_configuring = 1;
1382 /* if del_disk is NULL then we are being called to add a new disk
1383 * and update the logical drive table. If it is not NULL then
1384 * we will check if the disk is in use or not.
1386 if (del_disk != NULL){
1387 drv = get_drv(del_disk);
1388 drv->busy_configuring = 1;
1389 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1390 return_code = deregister_disk(del_disk, drv, 1);
1391 drv->busy_configuring = 0;
1392 h->busy_configuring = 0;
1393 return return_code;
1394 } else {
1395 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1396 if (!capable(CAP_SYS_RAWIO))
1397 return -EPERM;
1399 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1400 if (ld_buff == NULL)
1401 goto mem_msg;
1403 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1404 sizeof(ReportLunData_struct), 0, 0, 0,
1405 TYPE_CMD);
1407 if (return_code == IO_OK){
1408 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1409 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1410 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1411 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1412 } else{ /* reading number of logical volumes failed */
1413 printk(KERN_WARNING "cciss: report logical volume"
1414 " command failed\n");
1415 listlength = 0;
1416 goto freeret;
1419 num_luns = listlength / 8; /* 8 bytes per entry */
1420 if (num_luns > CISS_MAX_LUN){
1421 num_luns = CISS_MAX_LUN;
1422 printk(KERN_WARNING "cciss: more luns configured"
1423 " on controller than can be handled by"
1424 " this driver.\n");
1427 /* Compare controller drive array to drivers drive array.
1428 * Check for updates in the drive information and any new drives
1429 * on the controller.
1431 for (i=0; i < num_luns; i++){
1432 int j;
1434 drv_found = 0;
1436 lunid = (0xff &
1437 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1438 lunid |= (0xff &
1439 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1440 lunid |= (0xff &
1441 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1442 lunid |= 0xff &
1443 (unsigned int)(ld_buff->LUN[i][0]);
1445 /* Find if the LUN is already in the drive array
1446 * of the controller. If so then update its info
1447 * if not is use. If it does not exist then find
1448 * the first free index and add it.
1450 for (j=0; j <= h->highest_lun; j++){
1451 if (h->drv[j].LunID == lunid){
1452 drv_index = j;
1453 drv_found = 1;
1457 /* check if the drive was found already in the array */
1458 if (!drv_found){
1459 drv_index = cciss_find_free_drive_index(ctlr);
1460 if (drv_index == -1)
1461 goto freeret;
1464 h->drv[drv_index].LunID = lunid;
1465 cciss_update_drive_info(ctlr, drv_index);
1466 } /* end for */
1467 } /* end else */
1469 freeret:
1470 kfree(ld_buff);
1471 h->busy_configuring = 0;
1472 /* We return -1 here to tell the ACU that we have registered/updated
1473 * all of the drives that we can and to keep it from calling us
1474 * additional times.
1476 return -1;
1477 mem_msg:
1478 printk(KERN_ERR "cciss: out of memory\n");
1479 goto freeret;
1482 /* This function will deregister the disk and it's queue from the
1483 * kernel. It must be called with the controller lock held and the
1484 * drv structures busy_configuring flag set. It's parameters are:
1486 * disk = This is the disk to be deregistered
1487 * drv = This is the drive_info_struct associated with the disk to be
1488 * deregistered. It contains information about the disk used
1489 * by the driver.
1490 * clear_all = This flag determines whether or not the disk information
1491 * is going to be completely cleared out and the highest_lun
1492 * reset. Sometimes we want to clear out information about
1493 * the disk in preperation for re-adding it. In this case
1494 * the highest_lun should be left unchanged and the LunID
1495 * should not be cleared.
1497 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1498 int clear_all)
1500 ctlr_info_t *h = get_host(disk);
1502 if (!capable(CAP_SYS_RAWIO))
1503 return -EPERM;
1505 /* make sure logical volume is NOT is use */
1506 if(clear_all || (h->gendisk[0] == disk)) {
1507 if (drv->usage_count > 1)
1508 return -EBUSY;
1510 else
1511 if( drv->usage_count > 0 )
1512 return -EBUSY;
1514 /* invalidate the devices and deregister the disk. If it is disk
1515 * zero do not deregister it but just zero out it's values. This
1516 * allows us to delete disk zero but keep the controller registered.
1518 if (h->gendisk[0] != disk){
1519 if (disk) {
1520 request_queue_t *q = disk->queue;
1521 if (disk->flags & GENHD_FL_UP)
1522 del_gendisk(disk);
1523 if (q) {
1524 blk_cleanup_queue(q);
1525 drv->queue = NULL;
1530 --h->num_luns;
1531 /* zero out the disk size info */
1532 drv->nr_blocks = 0;
1533 drv->block_size = 0;
1534 drv->heads = 0;
1535 drv->sectors = 0;
1536 drv->cylinders = 0;
1537 drv->raid_level = -1; /* This can be used as a flag variable to
1538 * indicate that this element of the drive
1539 * array is free.
1542 if (clear_all){
1543 /* check to see if it was the last disk */
1544 if (drv == h->drv + h->highest_lun) {
1545 /* if so, find the new hightest lun */
1546 int i, newhighest =-1;
1547 for(i=0; i<h->highest_lun; i++) {
1548 /* if the disk has size > 0, it is available */
1549 if (h->drv[i].heads)
1550 newhighest = i;
1552 h->highest_lun = newhighest;
1555 drv->LunID = 0;
1557 return(0);
1560 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1561 size_t size,
1562 unsigned int use_unit_num, /* 0: address the controller,
1563 1: address logical volume log_unit,
1564 2: periph device address is scsi3addr */
1565 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1566 int cmd_type)
1568 ctlr_info_t *h= hba[ctlr];
1569 u64bit buff_dma_handle;
1570 int status = IO_OK;
1572 c->cmd_type = CMD_IOCTL_PEND;
1573 c->Header.ReplyQueue = 0;
1574 if( buff != NULL) {
1575 c->Header.SGList = 1;
1576 c->Header.SGTotal= 1;
1577 } else {
1578 c->Header.SGList = 0;
1579 c->Header.SGTotal= 0;
1581 c->Header.Tag.lower = c->busaddr;
1583 c->Request.Type.Type = cmd_type;
1584 if (cmd_type == TYPE_CMD) {
1585 switch(cmd) {
1586 case CISS_INQUIRY:
1587 /* If the logical unit number is 0 then, this is going
1588 to controller so It's a physical command
1589 mode = 0 target = 0. So we have nothing to write.
1590 otherwise, if use_unit_num == 1,
1591 mode = 1(volume set addressing) target = LUNID
1592 otherwise, if use_unit_num == 2,
1593 mode = 0(periph dev addr) target = scsi3addr */
1594 if (use_unit_num == 1) {
1595 c->Header.LUN.LogDev.VolId=
1596 h->drv[log_unit].LunID;
1597 c->Header.LUN.LogDev.Mode = 1;
1598 } else if (use_unit_num == 2) {
1599 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1600 c->Header.LUN.LogDev.Mode = 0;
1602 /* are we trying to read a vital product page */
1603 if(page_code != 0) {
1604 c->Request.CDB[1] = 0x01;
1605 c->Request.CDB[2] = page_code;
1607 c->Request.CDBLen = 6;
1608 c->Request.Type.Attribute = ATTR_SIMPLE;
1609 c->Request.Type.Direction = XFER_READ;
1610 c->Request.Timeout = 0;
1611 c->Request.CDB[0] = CISS_INQUIRY;
1612 c->Request.CDB[4] = size & 0xFF;
1613 break;
1614 case CISS_REPORT_LOG:
1615 case CISS_REPORT_PHYS:
1616 /* Talking to controller so It's a physical command
1617 mode = 00 target = 0. Nothing to write.
1619 c->Request.CDBLen = 12;
1620 c->Request.Type.Attribute = ATTR_SIMPLE;
1621 c->Request.Type.Direction = XFER_READ;
1622 c->Request.Timeout = 0;
1623 c->Request.CDB[0] = cmd;
1624 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1625 c->Request.CDB[7] = (size >> 16) & 0xFF;
1626 c->Request.CDB[8] = (size >> 8) & 0xFF;
1627 c->Request.CDB[9] = size & 0xFF;
1628 break;
1630 case CCISS_READ_CAPACITY:
1631 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1632 c->Header.LUN.LogDev.Mode = 1;
1633 c->Request.CDBLen = 10;
1634 c->Request.Type.Attribute = ATTR_SIMPLE;
1635 c->Request.Type.Direction = XFER_READ;
1636 c->Request.Timeout = 0;
1637 c->Request.CDB[0] = cmd;
1638 break;
1639 case CCISS_CACHE_FLUSH:
1640 c->Request.CDBLen = 12;
1641 c->Request.Type.Attribute = ATTR_SIMPLE;
1642 c->Request.Type.Direction = XFER_WRITE;
1643 c->Request.Timeout = 0;
1644 c->Request.CDB[0] = BMIC_WRITE;
1645 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1646 break;
1647 default:
1648 printk(KERN_WARNING
1649 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1650 return(IO_ERROR);
1652 } else if (cmd_type == TYPE_MSG) {
1653 switch (cmd) {
1654 case 0: /* ABORT message */
1655 c->Request.CDBLen = 12;
1656 c->Request.Type.Attribute = ATTR_SIMPLE;
1657 c->Request.Type.Direction = XFER_WRITE;
1658 c->Request.Timeout = 0;
1659 c->Request.CDB[0] = cmd; /* abort */
1660 c->Request.CDB[1] = 0; /* abort a command */
1661 /* buff contains the tag of the command to abort */
1662 memcpy(&c->Request.CDB[4], buff, 8);
1663 break;
1664 case 1: /* RESET message */
1665 c->Request.CDBLen = 12;
1666 c->Request.Type.Attribute = ATTR_SIMPLE;
1667 c->Request.Type.Direction = XFER_WRITE;
1668 c->Request.Timeout = 0;
1669 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1670 c->Request.CDB[0] = cmd; /* reset */
1671 c->Request.CDB[1] = 0x04; /* reset a LUN */
1672 case 3: /* No-Op message */
1673 c->Request.CDBLen = 1;
1674 c->Request.Type.Attribute = ATTR_SIMPLE;
1675 c->Request.Type.Direction = XFER_WRITE;
1676 c->Request.Timeout = 0;
1677 c->Request.CDB[0] = cmd;
1678 break;
1679 default:
1680 printk(KERN_WARNING
1681 "cciss%d: unknown message type %d\n",
1682 ctlr, cmd);
1683 return IO_ERROR;
1685 } else {
1686 printk(KERN_WARNING
1687 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1688 return IO_ERROR;
1690 /* Fill in the scatter gather information */
1691 if (size > 0) {
1692 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1693 buff, size, PCI_DMA_BIDIRECTIONAL);
1694 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1695 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1696 c->SG[0].Len = size;
1697 c->SG[0].Ext = 0; /* we are not chaining */
1699 return status;
1701 static int sendcmd_withirq(__u8 cmd,
1702 int ctlr,
1703 void *buff,
1704 size_t size,
1705 unsigned int use_unit_num,
1706 unsigned int log_unit,
1707 __u8 page_code,
1708 int cmd_type)
1710 ctlr_info_t *h = hba[ctlr];
1711 CommandList_struct *c;
1712 u64bit buff_dma_handle;
1713 unsigned long flags;
1714 int return_status;
1715 DECLARE_COMPLETION(wait);
1717 if ((c = cmd_alloc(h , 0)) == NULL)
1718 return -ENOMEM;
1719 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1720 log_unit, page_code, NULL, cmd_type);
1721 if (return_status != IO_OK) {
1722 cmd_free(h, c, 0);
1723 return return_status;
1725 resend_cmd2:
1726 c->waiting = &wait;
1728 /* Put the request on the tail of the queue and send it */
1729 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1730 addQ(&h->reqQ, c);
1731 h->Qdepth++;
1732 start_io(h);
1733 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1735 wait_for_completion(&wait);
1737 if(c->err_info->CommandStatus != 0)
1738 { /* an error has occurred */
1739 switch(c->err_info->CommandStatus)
1741 case CMD_TARGET_STATUS:
1742 printk(KERN_WARNING "cciss: cmd %p has "
1743 " completed with errors\n", c);
1744 if( c->err_info->ScsiStatus)
1746 printk(KERN_WARNING "cciss: cmd %p "
1747 "has SCSI Status = %x\n",
1749 c->err_info->ScsiStatus);
1752 break;
1753 case CMD_DATA_UNDERRUN:
1754 case CMD_DATA_OVERRUN:
1755 /* expected for inquire and report lun commands */
1756 break;
1757 case CMD_INVALID:
1758 printk(KERN_WARNING "cciss: Cmd %p is "
1759 "reported invalid\n", c);
1760 return_status = IO_ERROR;
1761 break;
1762 case CMD_PROTOCOL_ERR:
1763 printk(KERN_WARNING "cciss: cmd %p has "
1764 "protocol error \n", c);
1765 return_status = IO_ERROR;
1766 break;
1767 case CMD_HARDWARE_ERR:
1768 printk(KERN_WARNING "cciss: cmd %p had "
1769 " hardware error\n", c);
1770 return_status = IO_ERROR;
1771 break;
1772 case CMD_CONNECTION_LOST:
1773 printk(KERN_WARNING "cciss: cmd %p had "
1774 "connection lost\n", c);
1775 return_status = IO_ERROR;
1776 break;
1777 case CMD_ABORTED:
1778 printk(KERN_WARNING "cciss: cmd %p was "
1779 "aborted\n", c);
1780 return_status = IO_ERROR;
1781 break;
1782 case CMD_ABORT_FAILED:
1783 printk(KERN_WARNING "cciss: cmd %p reports "
1784 "abort failed\n", c);
1785 return_status = IO_ERROR;
1786 break;
1787 case CMD_UNSOLICITED_ABORT:
1788 printk(KERN_WARNING
1789 "cciss%d: unsolicited abort %p\n",
1790 ctlr, c);
1791 if (c->retry_count < MAX_CMD_RETRIES) {
1792 printk(KERN_WARNING
1793 "cciss%d: retrying %p\n",
1794 ctlr, c);
1795 c->retry_count++;
1796 /* erase the old error information */
1797 memset(c->err_info, 0,
1798 sizeof(ErrorInfo_struct));
1799 return_status = IO_OK;
1800 INIT_COMPLETION(wait);
1801 goto resend_cmd2;
1803 return_status = IO_ERROR;
1804 break;
1805 default:
1806 printk(KERN_WARNING "cciss: cmd %p returned "
1807 "unknown status %x\n", c,
1808 c->err_info->CommandStatus);
1809 return_status = IO_ERROR;
1812 /* unlock the buffers from DMA */
1813 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1814 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1815 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1816 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1817 cmd_free(h, c, 0);
1818 return(return_status);
1821 static void cciss_geometry_inquiry(int ctlr, int logvol,
1822 int withirq, unsigned int total_size,
1823 unsigned int block_size, InquiryData_struct *inq_buff,
1824 drive_info_struct *drv)
1826 int return_code;
1827 memset(inq_buff, 0, sizeof(InquiryData_struct));
1828 if (withirq)
1829 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1830 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1831 else
1832 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1833 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1834 if (return_code == IO_OK) {
1835 if(inq_buff->data_byte[8] == 0xFF) {
1836 printk(KERN_WARNING
1837 "cciss: reading geometry failed, volume "
1838 "does not support reading geometry\n");
1839 drv->block_size = block_size;
1840 drv->nr_blocks = total_size;
1841 drv->heads = 255;
1842 drv->sectors = 32; // Sectors per track
1843 drv->cylinders = total_size / 255 / 32;
1844 } else {
1845 unsigned int t;
1847 drv->block_size = block_size;
1848 drv->nr_blocks = total_size;
1849 drv->heads = inq_buff->data_byte[6];
1850 drv->sectors = inq_buff->data_byte[7];
1851 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1852 drv->cylinders += inq_buff->data_byte[5];
1853 drv->raid_level = inq_buff->data_byte[8];
1854 t = drv->heads * drv->sectors;
1855 if (t > 1) {
1856 drv->cylinders = total_size/t;
1859 } else { /* Get geometry failed */
1860 printk(KERN_WARNING "cciss: reading geometry failed\n");
1862 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1863 drv->heads, drv->sectors, drv->cylinders);
1865 static void
1866 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1867 int withirq, unsigned int *total_size, unsigned int *block_size)
1869 int return_code;
1870 memset(buf, 0, sizeof(*buf));
1871 if (withirq)
1872 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1873 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1874 else
1875 return_code = sendcmd(CCISS_READ_CAPACITY,
1876 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1877 if (return_code == IO_OK) {
1878 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1879 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1880 } else { /* read capacity command failed */
1881 printk(KERN_WARNING "cciss: read capacity failed\n");
1882 *total_size = 0;
1883 *block_size = BLOCK_SIZE;
1885 printk(KERN_INFO " blocks= %u block_size= %d\n",
1886 *total_size, *block_size);
1887 return;
1890 static int cciss_revalidate(struct gendisk *disk)
1892 ctlr_info_t *h = get_host(disk);
1893 drive_info_struct *drv = get_drv(disk);
1894 int logvol;
1895 int FOUND=0;
1896 unsigned int block_size;
1897 unsigned int total_size;
1898 ReadCapdata_struct *size_buff = NULL;
1899 InquiryData_struct *inq_buff = NULL;
1901 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1903 if(h->drv[logvol].LunID == drv->LunID) {
1904 FOUND=1;
1905 break;
1909 if (!FOUND) return 1;
1911 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1912 if (size_buff == NULL)
1914 printk(KERN_WARNING "cciss: out of memory\n");
1915 return 1;
1917 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1918 if (inq_buff == NULL)
1920 printk(KERN_WARNING "cciss: out of memory\n");
1921 kfree(size_buff);
1922 return 1;
1925 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1926 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1928 blk_queue_hardsect_size(drv->queue, drv->block_size);
1929 set_capacity(disk, drv->nr_blocks);
1931 kfree(size_buff);
1932 kfree(inq_buff);
1933 return 0;
1937 * Wait polling for a command to complete.
1938 * The memory mapped FIFO is polled for the completion.
1939 * Used only at init time, interrupts from the HBA are disabled.
1941 static unsigned long pollcomplete(int ctlr)
1943 unsigned long done;
1944 int i;
1946 /* Wait (up to 20 seconds) for a command to complete */
1948 for (i = 20 * HZ; i > 0; i--) {
1949 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1950 if (done == FIFO_EMPTY)
1951 schedule_timeout_uninterruptible(1);
1952 else
1953 return (done);
1955 /* Invalid address to tell caller we ran out of time */
1956 return 1;
1959 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1961 /* We get in here if sendcmd() is polling for completions
1962 and gets some command back that it wasn't expecting --
1963 something other than that which it just sent down.
1964 Ordinarily, that shouldn't happen, but it can happen when
1965 the scsi tape stuff gets into error handling mode, and
1966 starts using sendcmd() to try to abort commands and
1967 reset tape drives. In that case, sendcmd may pick up
1968 completions of commands that were sent to logical drives
1969 through the block i/o system, or cciss ioctls completing, etc.
1970 In that case, we need to save those completions for later
1971 processing by the interrupt handler.
1974 #ifdef CONFIG_CISS_SCSI_TAPE
1975 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1977 /* If it's not the scsi tape stuff doing error handling, (abort */
1978 /* or reset) then we don't expect anything weird. */
1979 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1980 #endif
1981 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1982 "Invalid command list address returned! (%lx)\n",
1983 ctlr, complete);
1984 /* not much we can do. */
1985 #ifdef CONFIG_CISS_SCSI_TAPE
1986 return 1;
1989 /* We've sent down an abort or reset, but something else
1990 has completed */
1991 if (srl->ncompletions >= (NR_CMDS + 2)) {
1992 /* Uh oh. No room to save it for later... */
1993 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1994 "reject list overflow, command lost!\n", ctlr);
1995 return 1;
1997 /* Save it for later */
1998 srl->complete[srl->ncompletions] = complete;
1999 srl->ncompletions++;
2000 #endif
2001 return 0;
2005 * Send a command to the controller, and wait for it to complete.
2006 * Only used at init time.
2008 static int sendcmd(
2009 __u8 cmd,
2010 int ctlr,
2011 void *buff,
2012 size_t size,
2013 unsigned int use_unit_num, /* 0: address the controller,
2014 1: address logical volume log_unit,
2015 2: periph device address is scsi3addr */
2016 unsigned int log_unit,
2017 __u8 page_code,
2018 unsigned char *scsi3addr,
2019 int cmd_type)
2021 CommandList_struct *c;
2022 int i;
2023 unsigned long complete;
2024 ctlr_info_t *info_p= hba[ctlr];
2025 u64bit buff_dma_handle;
2026 int status, done = 0;
2028 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2029 printk(KERN_WARNING "cciss: unable to get memory");
2030 return(IO_ERROR);
2032 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2033 log_unit, page_code, scsi3addr, cmd_type);
2034 if (status != IO_OK) {
2035 cmd_free(info_p, c, 1);
2036 return status;
2038 resend_cmd1:
2040 * Disable interrupt
2042 #ifdef CCISS_DEBUG
2043 printk(KERN_DEBUG "cciss: turning intr off\n");
2044 #endif /* CCISS_DEBUG */
2045 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2047 /* Make sure there is room in the command FIFO */
2048 /* Actually it should be completely empty at this time */
2049 /* unless we are in here doing error handling for the scsi */
2050 /* tape side of the driver. */
2051 for (i = 200000; i > 0; i--)
2053 /* if fifo isn't full go */
2054 if (!(info_p->access.fifo_full(info_p)))
2057 break;
2059 udelay(10);
2060 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2061 " waiting!\n", ctlr);
2064 * Send the cmd
2066 info_p->access.submit_command(info_p, c);
2067 done = 0;
2068 do {
2069 complete = pollcomplete(ctlr);
2071 #ifdef CCISS_DEBUG
2072 printk(KERN_DEBUG "cciss: command completed\n");
2073 #endif /* CCISS_DEBUG */
2075 if (complete == 1) {
2076 printk( KERN_WARNING
2077 "cciss cciss%d: SendCmd Timeout out, "
2078 "No command list address returned!\n",
2079 ctlr);
2080 status = IO_ERROR;
2081 done = 1;
2082 break;
2085 /* This will need to change for direct lookup completions */
2086 if ( (complete & CISS_ERROR_BIT)
2087 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2089 /* if data overrun or underun on Report command
2090 ignore it
2092 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2093 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2094 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2095 ((c->err_info->CommandStatus ==
2096 CMD_DATA_OVERRUN) ||
2097 (c->err_info->CommandStatus ==
2098 CMD_DATA_UNDERRUN)
2101 complete = c->busaddr;
2102 } else {
2103 if (c->err_info->CommandStatus ==
2104 CMD_UNSOLICITED_ABORT) {
2105 printk(KERN_WARNING "cciss%d: "
2106 "unsolicited abort %p\n",
2107 ctlr, c);
2108 if (c->retry_count < MAX_CMD_RETRIES) {
2109 printk(KERN_WARNING
2110 "cciss%d: retrying %p\n",
2111 ctlr, c);
2112 c->retry_count++;
2113 /* erase the old error */
2114 /* information */
2115 memset(c->err_info, 0,
2116 sizeof(ErrorInfo_struct));
2117 goto resend_cmd1;
2118 } else {
2119 printk(KERN_WARNING
2120 "cciss%d: retried %p too "
2121 "many times\n", ctlr, c);
2122 status = IO_ERROR;
2123 goto cleanup1;
2125 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2126 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2127 status = IO_ERROR;
2128 goto cleanup1;
2130 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2131 " Error %x \n", ctlr,
2132 c->err_info->CommandStatus);
2133 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2134 " offensive info\n"
2135 " size %x\n num %x value %x\n", ctlr,
2136 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2137 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2138 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2139 status = IO_ERROR;
2140 goto cleanup1;
2143 /* This will need changing for direct lookup completions */
2144 if (complete != c->busaddr) {
2145 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2146 BUG(); /* we are pretty much hosed if we get here. */
2148 continue;
2149 } else
2150 done = 1;
2151 } while (!done);
2153 cleanup1:
2154 /* unlock the data buffer from DMA */
2155 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2156 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2157 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2158 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2159 #ifdef CONFIG_CISS_SCSI_TAPE
2160 /* if we saved some commands for later, process them now. */
2161 if (info_p->scsi_rejects.ncompletions > 0)
2162 do_cciss_intr(0, info_p, NULL);
2163 #endif
2164 cmd_free(info_p, c, 1);
2165 return (status);
2168 * Map (physical) PCI mem into (virtual) kernel space
2170 static void __iomem *remap_pci_mem(ulong base, ulong size)
2172 ulong page_base = ((ulong) base) & PAGE_MASK;
2173 ulong page_offs = ((ulong) base) - page_base;
2174 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2176 return page_remapped ? (page_remapped + page_offs) : NULL;
2180 * Takes jobs of the Q and sends them to the hardware, then puts it on
2181 * the Q to wait for completion.
2183 static void start_io( ctlr_info_t *h)
2185 CommandList_struct *c;
2187 while(( c = h->reqQ) != NULL )
2189 /* can't do anything if fifo is full */
2190 if ((h->access.fifo_full(h))) {
2191 printk(KERN_WARNING "cciss: fifo full\n");
2192 break;
2195 /* Get the frist entry from the Request Q */
2196 removeQ(&(h->reqQ), c);
2197 h->Qdepth--;
2199 /* Tell the controller execute command */
2200 h->access.submit_command(h, c);
2202 /* Put job onto the completed Q */
2203 addQ (&(h->cmpQ), c);
2206 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2207 /* Zeros out the error record and then resends the command back */
2208 /* to the controller */
2209 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2211 /* erase the old error information */
2212 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2214 /* add it to software queue and then send it to the controller */
2215 addQ(&(h->reqQ),c);
2216 h->Qdepth++;
2217 if(h->Qdepth > h->maxQsinceinit)
2218 h->maxQsinceinit = h->Qdepth;
2220 start_io(h);
2223 /* checks the status of the job and calls complete buffers to mark all
2224 * buffers for the completed job. Note that this function does not need
2225 * to hold the hba/queue lock.
2227 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2228 int timeout)
2230 int status = 1;
2231 int retry_cmd = 0;
2233 if (timeout)
2234 status = 0;
2236 if(cmd->err_info->CommandStatus != 0)
2237 { /* an error has occurred */
2238 switch(cmd->err_info->CommandStatus)
2240 unsigned char sense_key;
2241 case CMD_TARGET_STATUS:
2242 status = 0;
2244 if( cmd->err_info->ScsiStatus == 0x02)
2246 printk(KERN_WARNING "cciss: cmd %p "
2247 "has CHECK CONDITION "
2248 " byte 2 = 0x%x\n", cmd,
2249 cmd->err_info->SenseInfo[2]
2251 /* check the sense key */
2252 sense_key = 0xf &
2253 cmd->err_info->SenseInfo[2];
2254 /* no status or recovered error */
2255 if((sense_key == 0x0) ||
2256 (sense_key == 0x1))
2258 status = 1;
2260 } else
2262 printk(KERN_WARNING "cciss: cmd %p "
2263 "has SCSI Status 0x%x\n",
2264 cmd, cmd->err_info->ScsiStatus);
2266 break;
2267 case CMD_DATA_UNDERRUN:
2268 printk(KERN_WARNING "cciss: cmd %p has"
2269 " completed with data underrun "
2270 "reported\n", cmd);
2271 break;
2272 case CMD_DATA_OVERRUN:
2273 printk(KERN_WARNING "cciss: cmd %p has"
2274 " completed with data overrun "
2275 "reported\n", cmd);
2276 break;
2277 case CMD_INVALID:
2278 printk(KERN_WARNING "cciss: cmd %p is "
2279 "reported invalid\n", cmd);
2280 status = 0;
2281 break;
2282 case CMD_PROTOCOL_ERR:
2283 printk(KERN_WARNING "cciss: cmd %p has "
2284 "protocol error \n", cmd);
2285 status = 0;
2286 break;
2287 case CMD_HARDWARE_ERR:
2288 printk(KERN_WARNING "cciss: cmd %p had "
2289 " hardware error\n", cmd);
2290 status = 0;
2291 break;
2292 case CMD_CONNECTION_LOST:
2293 printk(KERN_WARNING "cciss: cmd %p had "
2294 "connection lost\n", cmd);
2295 status=0;
2296 break;
2297 case CMD_ABORTED:
2298 printk(KERN_WARNING "cciss: cmd %p was "
2299 "aborted\n", cmd);
2300 status=0;
2301 break;
2302 case CMD_ABORT_FAILED:
2303 printk(KERN_WARNING "cciss: cmd %p reports "
2304 "abort failed\n", cmd);
2305 status=0;
2306 break;
2307 case CMD_UNSOLICITED_ABORT:
2308 printk(KERN_WARNING "cciss%d: unsolicited "
2309 "abort %p\n", h->ctlr, cmd);
2310 if (cmd->retry_count < MAX_CMD_RETRIES) {
2311 retry_cmd=1;
2312 printk(KERN_WARNING
2313 "cciss%d: retrying %p\n",
2314 h->ctlr, cmd);
2315 cmd->retry_count++;
2316 } else
2317 printk(KERN_WARNING
2318 "cciss%d: %p retried too "
2319 "many times\n", h->ctlr, cmd);
2320 status=0;
2321 break;
2322 case CMD_TIMEOUT:
2323 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2324 cmd);
2325 status=0;
2326 break;
2327 default:
2328 printk(KERN_WARNING "cciss: cmd %p returned "
2329 "unknown status %x\n", cmd,
2330 cmd->err_info->CommandStatus);
2331 status=0;
2334 /* We need to return this command */
2335 if(retry_cmd) {
2336 resend_cciss_cmd(h,cmd);
2337 return;
2340 cmd->rq->completion_data = cmd;
2341 cmd->rq->errors = status;
2342 blk_complete_request(cmd->rq);
2346 * Get a request and submit it to the controller.
2348 static void do_cciss_request(request_queue_t *q)
2350 ctlr_info_t *h= q->queuedata;
2351 CommandList_struct *c;
2352 int start_blk, seg;
2353 struct request *creq;
2354 u64bit temp64;
2355 struct scatterlist tmp_sg[MAXSGENTRIES];
2356 drive_info_struct *drv;
2357 int i, dir;
2359 /* We call start_io here in case there is a command waiting on the
2360 * queue that has not been sent.
2362 if (blk_queue_plugged(q))
2363 goto startio;
2365 queue:
2366 creq = elv_next_request(q);
2367 if (!creq)
2368 goto startio;
2370 if (creq->nr_phys_segments > MAXSGENTRIES)
2371 BUG();
2373 if (( c = cmd_alloc(h, 1)) == NULL)
2374 goto full;
2376 blkdev_dequeue_request(creq);
2378 spin_unlock_irq(q->queue_lock);
2380 c->cmd_type = CMD_RWREQ;
2381 c->rq = creq;
2383 /* fill in the request */
2384 drv = creq->rq_disk->private_data;
2385 c->Header.ReplyQueue = 0; // unused in simple mode
2386 /* got command from pool, so use the command block index instead */
2387 /* for direct lookups. */
2388 /* The first 2 bits are reserved for controller error reporting. */
2389 c->Header.Tag.lower = (c->cmdindex << 3);
2390 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2391 c->Header.LUN.LogDev.VolId= drv->LunID;
2392 c->Header.LUN.LogDev.Mode = 1;
2393 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2394 c->Request.Type.Type = TYPE_CMD; // It is a command.
2395 c->Request.Type.Attribute = ATTR_SIMPLE;
2396 c->Request.Type.Direction =
2397 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2398 c->Request.Timeout = 0; // Don't time out
2399 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2400 start_blk = creq->sector;
2401 #ifdef CCISS_DEBUG
2402 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2403 (int) creq->nr_sectors);
2404 #endif /* CCISS_DEBUG */
2406 seg = blk_rq_map_sg(q, creq, tmp_sg);
2408 /* get the DMA records for the setup */
2409 if (c->Request.Type.Direction == XFER_READ)
2410 dir = PCI_DMA_FROMDEVICE;
2411 else
2412 dir = PCI_DMA_TODEVICE;
2414 for (i=0; i<seg; i++)
2416 c->SG[i].Len = tmp_sg[i].length;
2417 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2418 tmp_sg[i].offset, tmp_sg[i].length,
2419 dir);
2420 c->SG[i].Addr.lower = temp64.val32.lower;
2421 c->SG[i].Addr.upper = temp64.val32.upper;
2422 c->SG[i].Ext = 0; // we are not chaining
2424 /* track how many SG entries we are using */
2425 if( seg > h->maxSG)
2426 h->maxSG = seg;
2428 #ifdef CCISS_DEBUG
2429 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2430 #endif /* CCISS_DEBUG */
2432 c->Header.SGList = c->Header.SGTotal = seg;
2433 c->Request.CDB[1]= 0;
2434 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2435 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2436 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2437 c->Request.CDB[5]= start_blk & 0xff;
2438 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2439 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2440 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2441 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2443 spin_lock_irq(q->queue_lock);
2445 addQ(&(h->reqQ),c);
2446 h->Qdepth++;
2447 if(h->Qdepth > h->maxQsinceinit)
2448 h->maxQsinceinit = h->Qdepth;
2450 goto queue;
2451 full:
2452 blk_stop_queue(q);
2453 startio:
2454 /* We will already have the driver lock here so not need
2455 * to lock it.
2457 start_io(h);
2460 static inline unsigned long get_next_completion(ctlr_info_t *h)
2462 #ifdef CONFIG_CISS_SCSI_TAPE
2463 /* Any rejects from sendcmd() lying around? Process them first */
2464 if (h->scsi_rejects.ncompletions == 0)
2465 return h->access.command_completed(h);
2466 else {
2467 struct sendcmd_reject_list *srl;
2468 int n;
2469 srl = &h->scsi_rejects;
2470 n = --srl->ncompletions;
2471 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2472 printk("p");
2473 return srl->complete[n];
2475 #else
2476 return h->access.command_completed(h);
2477 #endif
2480 static inline int interrupt_pending(ctlr_info_t *h)
2482 #ifdef CONFIG_CISS_SCSI_TAPE
2483 return ( h->access.intr_pending(h)
2484 || (h->scsi_rejects.ncompletions > 0));
2485 #else
2486 return h->access.intr_pending(h);
2487 #endif
2490 static inline long interrupt_not_for_us(ctlr_info_t *h)
2492 #ifdef CONFIG_CISS_SCSI_TAPE
2493 return (((h->access.intr_pending(h) == 0) ||
2494 (h->interrupts_enabled == 0))
2495 && (h->scsi_rejects.ncompletions == 0));
2496 #else
2497 return (((h->access.intr_pending(h) == 0) ||
2498 (h->interrupts_enabled == 0)));
2499 #endif
2502 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2504 ctlr_info_t *h = dev_id;
2505 CommandList_struct *c;
2506 unsigned long flags;
2507 __u32 a, a1, a2;
2508 int j;
2509 int start_queue = h->next_to_run;
2511 if (interrupt_not_for_us(h))
2512 return IRQ_NONE;
2514 * If there are completed commands in the completion queue,
2515 * we had better do something about it.
2517 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2518 while (interrupt_pending(h)) {
2519 while((a = get_next_completion(h)) != FIFO_EMPTY) {
2520 a1 = a;
2521 if ((a & 0x04)) {
2522 a2 = (a >> 3);
2523 if (a2 >= NR_CMDS) {
2524 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2525 fail_all_cmds(h->ctlr);
2526 return IRQ_HANDLED;
2529 c = h->cmd_pool + a2;
2530 a = c->busaddr;
2532 } else {
2533 a &= ~3;
2534 if ((c = h->cmpQ) == NULL) {
2535 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2536 continue;
2538 while(c->busaddr != a) {
2539 c = c->next;
2540 if (c == h->cmpQ)
2541 break;
2545 * If we've found the command, take it off the
2546 * completion Q and free it
2548 if (c->busaddr == a) {
2549 removeQ(&h->cmpQ, c);
2550 if (c->cmd_type == CMD_RWREQ) {
2551 complete_command(h, c, 0);
2552 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2553 complete(c->waiting);
2555 # ifdef CONFIG_CISS_SCSI_TAPE
2556 else if (c->cmd_type == CMD_SCSI)
2557 complete_scsi_command(c, 0, a1);
2558 # endif
2559 continue;
2564 /* check to see if we have maxed out the number of commands that can
2565 * be placed on the queue. If so then exit. We do this check here
2566 * in case the interrupt we serviced was from an ioctl and did not
2567 * free any new commands.
2569 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2570 goto cleanup;
2572 /* We have room on the queue for more commands. Now we need to queue
2573 * them up. We will also keep track of the next queue to run so
2574 * that every queue gets a chance to be started first.
2576 for (j=0; j < h->highest_lun + 1; j++){
2577 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2578 /* make sure the disk has been added and the drive is real
2579 * because this can be called from the middle of init_one.
2581 if(!(h->drv[curr_queue].queue) ||
2582 !(h->drv[curr_queue].heads))
2583 continue;
2584 blk_start_queue(h->gendisk[curr_queue]->queue);
2586 /* check to see if we have maxed out the number of commands
2587 * that can be placed on the queue.
2589 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2591 if (curr_queue == start_queue){
2592 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
2593 goto cleanup;
2594 } else {
2595 h->next_to_run = curr_queue;
2596 goto cleanup;
2598 } else {
2599 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2603 cleanup:
2604 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2605 return IRQ_HANDLED;
2608 * We cannot read the structure directly, for portablity we must use
2609 * the io functions.
2610 * This is for debug only.
2612 #ifdef CCISS_DEBUG
2613 static void print_cfg_table( CfgTable_struct *tb)
2615 int i;
2616 char temp_name[17];
2618 printk("Controller Configuration information\n");
2619 printk("------------------------------------\n");
2620 for(i=0;i<4;i++)
2621 temp_name[i] = readb(&(tb->Signature[i]));
2622 temp_name[4]='\0';
2623 printk(" Signature = %s\n", temp_name);
2624 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2625 printk(" Transport methods supported = 0x%x\n",
2626 readl(&(tb-> TransportSupport)));
2627 printk(" Transport methods active = 0x%x\n",
2628 readl(&(tb->TransportActive)));
2629 printk(" Requested transport Method = 0x%x\n",
2630 readl(&(tb->HostWrite.TransportRequest)));
2631 printk(" Coalese Interrupt Delay = 0x%x\n",
2632 readl(&(tb->HostWrite.CoalIntDelay)));
2633 printk(" Coalese Interrupt Count = 0x%x\n",
2634 readl(&(tb->HostWrite.CoalIntCount)));
2635 printk(" Max outstanding commands = 0x%d\n",
2636 readl(&(tb->CmdsOutMax)));
2637 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2638 for(i=0;i<16;i++)
2639 temp_name[i] = readb(&(tb->ServerName[i]));
2640 temp_name[16] = '\0';
2641 printk(" Server Name = %s\n", temp_name);
2642 printk(" Heartbeat Counter = 0x%x\n\n\n",
2643 readl(&(tb->HeartBeat)));
2645 #endif /* CCISS_DEBUG */
2647 static void release_io_mem(ctlr_info_t *c)
2649 /* if IO mem was not protected do nothing */
2650 if( c->io_mem_addr == 0)
2651 return;
2652 release_region(c->io_mem_addr, c->io_mem_length);
2653 c->io_mem_addr = 0;
2654 c->io_mem_length = 0;
2657 static int find_PCI_BAR_index(struct pci_dev *pdev,
2658 unsigned long pci_bar_addr)
2660 int i, offset, mem_type, bar_type;
2661 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2662 return 0;
2663 offset = 0;
2664 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2665 bar_type = pci_resource_flags(pdev, i) &
2666 PCI_BASE_ADDRESS_SPACE;
2667 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2668 offset += 4;
2669 else {
2670 mem_type = pci_resource_flags(pdev, i) &
2671 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2672 switch (mem_type) {
2673 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2674 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2675 offset += 4; /* 32 bit */
2676 break;
2677 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2678 offset += 8;
2679 break;
2680 default: /* reserved in PCI 2.2 */
2681 printk(KERN_WARNING "Base address is invalid\n");
2682 return -1;
2683 break;
2686 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2687 return i+1;
2689 return -1;
2692 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2693 * controllers that are capable. If not, we use IO-APIC mode.
2696 static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
2698 #ifdef CONFIG_PCI_MSI
2699 int err;
2700 struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1},
2701 {0,2}, {0,3}};
2703 /* Some boards advertise MSI but don't really support it */
2704 if ((board_id == 0x40700E11) ||
2705 (board_id == 0x40800E11) ||
2706 (board_id == 0x40820E11) ||
2707 (board_id == 0x40830E11))
2708 goto default_int_mode;
2710 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2711 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2712 if (!err) {
2713 c->intr[0] = cciss_msix_entries[0].vector;
2714 c->intr[1] = cciss_msix_entries[1].vector;
2715 c->intr[2] = cciss_msix_entries[2].vector;
2716 c->intr[3] = cciss_msix_entries[3].vector;
2717 c->msix_vector = 1;
2718 return;
2720 if (err > 0) {
2721 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2722 "available\n", err);
2723 } else {
2724 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2725 err);
2728 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2729 if (!pci_enable_msi(pdev)) {
2730 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2731 c->msi_vector = 1;
2732 return;
2733 } else {
2734 printk(KERN_WARNING "cciss: MSI init failed\n");
2735 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2736 return;
2739 #endif /* CONFIG_PCI_MSI */
2740 /* if we get here we're going to use the default interrupt mode */
2741 default_int_mode:
2742 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2743 return;
2746 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2748 ushort subsystem_vendor_id, subsystem_device_id, command;
2749 __u32 board_id, scratchpad = 0;
2750 __u64 cfg_offset;
2751 __u32 cfg_base_addr;
2752 __u64 cfg_base_addr_index;
2753 int i;
2755 /* check to see if controller has been disabled */
2756 /* BEFORE trying to enable it */
2757 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2758 if(!(command & 0x02))
2760 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2761 return(-1);
2764 if (pci_enable_device(pdev))
2766 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2767 return( -1);
2770 subsystem_vendor_id = pdev->subsystem_vendor;
2771 subsystem_device_id = pdev->subsystem_device;
2772 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2773 subsystem_vendor_id);
2775 /* search for our IO range so we can protect it */
2776 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2778 /* is this an IO range */
2779 if( pci_resource_flags(pdev, i) & 0x01 ) {
2780 c->io_mem_addr = pci_resource_start(pdev, i);
2781 c->io_mem_length = pci_resource_end(pdev, i) -
2782 pci_resource_start(pdev, i) +1;
2783 #ifdef CCISS_DEBUG
2784 printk("IO value found base_addr[%d] %lx %lx\n", i,
2785 c->io_mem_addr, c->io_mem_length);
2786 #endif /* CCISS_DEBUG */
2787 /* register the IO range */
2788 if(!request_region( c->io_mem_addr,
2789 c->io_mem_length, "cciss"))
2791 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2792 c->io_mem_addr, c->io_mem_length);
2793 c->io_mem_addr= 0;
2794 c->io_mem_length = 0;
2796 break;
2800 #ifdef CCISS_DEBUG
2801 printk("command = %x\n", command);
2802 printk("irq = %x\n", pdev->irq);
2803 printk("board_id = %x\n", board_id);
2804 #endif /* CCISS_DEBUG */
2806 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2807 * else we use the IO-APIC interrupt assigned to us by system ROM.
2809 cciss_interrupt_mode(c, pdev, board_id);
2812 * Memory base addr is first addr , the second points to the config
2813 * table
2816 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2817 #ifdef CCISS_DEBUG
2818 printk("address 0 = %x\n", c->paddr);
2819 #endif /* CCISS_DEBUG */
2820 c->vaddr = remap_pci_mem(c->paddr, 200);
2822 /* Wait for the board to become ready. (PCI hotplug needs this.)
2823 * We poll for up to 120 secs, once per 100ms. */
2824 for (i=0; i < 1200; i++) {
2825 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2826 if (scratchpad == CCISS_FIRMWARE_READY)
2827 break;
2828 set_current_state(TASK_INTERRUPTIBLE);
2829 schedule_timeout(HZ / 10); /* wait 100ms */
2831 if (scratchpad != CCISS_FIRMWARE_READY) {
2832 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2833 return -1;
2836 /* get the address index number */
2837 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2838 cfg_base_addr &= (__u32) 0x0000ffff;
2839 #ifdef CCISS_DEBUG
2840 printk("cfg base address = %x\n", cfg_base_addr);
2841 #endif /* CCISS_DEBUG */
2842 cfg_base_addr_index =
2843 find_PCI_BAR_index(pdev, cfg_base_addr);
2844 #ifdef CCISS_DEBUG
2845 printk("cfg base address index = %x\n", cfg_base_addr_index);
2846 #endif /* CCISS_DEBUG */
2847 if (cfg_base_addr_index == -1) {
2848 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2849 release_io_mem(c);
2850 return -1;
2853 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2854 #ifdef CCISS_DEBUG
2855 printk("cfg offset = %x\n", cfg_offset);
2856 #endif /* CCISS_DEBUG */
2857 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2858 cfg_base_addr_index) + cfg_offset,
2859 sizeof(CfgTable_struct));
2860 c->board_id = board_id;
2862 #ifdef CCISS_DEBUG
2863 print_cfg_table(c->cfgtable);
2864 #endif /* CCISS_DEBUG */
2866 for(i=0; i<NR_PRODUCTS; i++) {
2867 if (board_id == products[i].board_id) {
2868 c->product_name = products[i].product_name;
2869 c->access = *(products[i].access);
2870 break;
2873 if (i == NR_PRODUCTS) {
2874 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2875 " to access the Smart Array controller %08lx\n",
2876 (unsigned long)board_id);
2877 return -1;
2879 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2880 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2881 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2882 (readb(&c->cfgtable->Signature[3]) != 'S') )
2884 printk("Does not appear to be a valid CISS config table\n");
2885 return -1;
2888 #ifdef CONFIG_X86
2890 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2891 __u32 prefetch;
2892 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2893 prefetch |= 0x100;
2894 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2896 #endif
2898 #ifdef CCISS_DEBUG
2899 printk("Trying to put board into Simple mode\n");
2900 #endif /* CCISS_DEBUG */
2901 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2902 /* Update the field, and then ring the doorbell */
2903 writel( CFGTBL_Trans_Simple,
2904 &(c->cfgtable->HostWrite.TransportRequest));
2905 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2907 /* under certain very rare conditions, this can take awhile.
2908 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2909 * as we enter this code.) */
2910 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2911 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2912 break;
2913 /* delay and try again */
2914 set_current_state(TASK_INTERRUPTIBLE);
2915 schedule_timeout(10);
2918 #ifdef CCISS_DEBUG
2919 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2920 #endif /* CCISS_DEBUG */
2921 #ifdef CCISS_DEBUG
2922 print_cfg_table(c->cfgtable);
2923 #endif /* CCISS_DEBUG */
2925 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2927 printk(KERN_WARNING "cciss: unable to get board into"
2928 " simple mode\n");
2929 return -1;
2931 return 0;
2936 * Gets information about the local volumes attached to the controller.
2938 static void cciss_getgeometry(int cntl_num)
2940 ReportLunData_struct *ld_buff;
2941 ReadCapdata_struct *size_buff;
2942 InquiryData_struct *inq_buff;
2943 int return_code;
2944 int i;
2945 int listlength = 0;
2946 __u32 lunid = 0;
2947 int block_size;
2948 int total_size;
2950 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2951 if (ld_buff == NULL)
2953 printk(KERN_ERR "cciss: out of memory\n");
2954 return;
2956 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2957 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2958 if (size_buff == NULL)
2960 printk(KERN_ERR "cciss: out of memory\n");
2961 kfree(ld_buff);
2962 return;
2964 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2965 if (inq_buff == NULL)
2967 printk(KERN_ERR "cciss: out of memory\n");
2968 kfree(ld_buff);
2969 kfree(size_buff);
2970 return;
2972 /* Get the firmware version */
2973 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2974 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2975 if (return_code == IO_OK)
2977 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2978 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2979 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2980 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2981 } else /* send command failed */
2983 printk(KERN_WARNING "cciss: unable to determine firmware"
2984 " version of controller\n");
2986 /* Get the number of logical volumes */
2987 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2988 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2990 if( return_code == IO_OK)
2992 #ifdef CCISS_DEBUG
2993 printk("LUN Data\n--------------------------\n");
2994 #endif /* CCISS_DEBUG */
2996 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2997 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2998 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2999 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3000 } else /* reading number of logical volumes failed */
3002 printk(KERN_WARNING "cciss: report logical volume"
3003 " command failed\n");
3004 listlength = 0;
3006 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3007 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
3009 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
3010 CISS_MAX_LUN);
3011 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3013 #ifdef CCISS_DEBUG
3014 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
3015 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
3016 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
3017 #endif /* CCISS_DEBUG */
3019 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
3020 // for(i=0; i< hba[cntl_num]->num_luns; i++)
3021 for(i=0; i < CISS_MAX_LUN; i++)
3023 if (i < hba[cntl_num]->num_luns){
3024 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3025 << 24;
3026 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3027 << 16;
3028 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3029 << 8;
3030 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3032 hba[cntl_num]->drv[i].LunID = lunid;
3035 #ifdef CCISS_DEBUG
3036 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3037 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3038 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3039 hba[cntl_num]->drv[i].LunID);
3040 #endif /* CCISS_DEBUG */
3041 cciss_read_capacity(cntl_num, i, size_buff, 0,
3042 &total_size, &block_size);
3043 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3044 block_size, inq_buff, &hba[cntl_num]->drv[i]);
3045 } else {
3046 /* initialize raid_level to indicate a free space */
3047 hba[cntl_num]->drv[i].raid_level = -1;
3050 kfree(ld_buff);
3051 kfree(size_buff);
3052 kfree(inq_buff);
3055 /* Function to find the first free pointer into our hba[] array */
3056 /* Returns -1 if no free entries are left. */
3057 static int alloc_cciss_hba(void)
3059 struct gendisk *disk[NWD];
3060 int i, n;
3061 for (n = 0; n < NWD; n++) {
3062 disk[n] = alloc_disk(1 << NWD_SHIFT);
3063 if (!disk[n])
3064 goto out;
3067 for(i=0; i< MAX_CTLR; i++) {
3068 if (!hba[i]) {
3069 ctlr_info_t *p;
3070 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3071 if (!p)
3072 goto Enomem;
3073 memset(p, 0, sizeof(ctlr_info_t));
3074 for (n = 0; n < NWD; n++)
3075 p->gendisk[n] = disk[n];
3076 hba[i] = p;
3077 return i;
3080 printk(KERN_WARNING "cciss: This driver supports a maximum"
3081 " of %d controllers.\n", MAX_CTLR);
3082 goto out;
3083 Enomem:
3084 printk(KERN_ERR "cciss: out of memory.\n");
3085 out:
3086 while (n--)
3087 put_disk(disk[n]);
3088 return -1;
3091 static void free_hba(int i)
3093 ctlr_info_t *p = hba[i];
3094 int n;
3096 hba[i] = NULL;
3097 for (n = 0; n < NWD; n++)
3098 put_disk(p->gendisk[n]);
3099 kfree(p);
3103 * This is it. Find all the controllers and register them. I really hate
3104 * stealing all these major device numbers.
3105 * returns the number of block devices registered.
3107 static int __devinit cciss_init_one(struct pci_dev *pdev,
3108 const struct pci_device_id *ent)
3110 request_queue_t *q;
3111 int i;
3112 int j;
3113 int rc;
3115 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3116 " bus %d dev %d func %d\n",
3117 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3118 PCI_FUNC(pdev->devfn));
3119 i = alloc_cciss_hba();
3120 if(i < 0)
3121 return (-1);
3123 hba[i]->busy_initializing = 1;
3125 if (cciss_pci_init(hba[i], pdev) != 0)
3126 goto clean1;
3128 sprintf(hba[i]->devname, "cciss%d", i);
3129 hba[i]->ctlr = i;
3130 hba[i]->pdev = pdev;
3132 /* configure PCI DMA stuff */
3133 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3134 printk("cciss: using DAC cycles\n");
3135 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3136 printk("cciss: not using DAC cycles\n");
3137 else {
3138 printk("cciss: no suitable DMA available\n");
3139 goto clean1;
3143 * register with the major number, or get a dynamic major number
3144 * by passing 0 as argument. This is done for greater than
3145 * 8 controller support.
3147 if (i < MAX_CTLR_ORIG)
3148 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3149 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3150 if(rc == -EBUSY || rc == -EINVAL) {
3151 printk(KERN_ERR
3152 "cciss: Unable to get major number %d for %s "
3153 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3154 goto clean1;
3156 else {
3157 if (i >= MAX_CTLR_ORIG)
3158 hba[i]->major = rc;
3161 /* make sure the board interrupts are off */
3162 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3163 if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3164 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3165 hba[i]->devname, hba[i])) {
3166 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3167 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3168 goto clean2;
3170 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3171 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3172 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3173 &(hba[i]->cmd_pool_dhandle));
3174 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3175 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3176 &(hba[i]->errinfo_pool_dhandle));
3177 if((hba[i]->cmd_pool_bits == NULL)
3178 || (hba[i]->cmd_pool == NULL)
3179 || (hba[i]->errinfo_pool == NULL)) {
3180 printk( KERN_ERR "cciss: out of memory");
3181 goto clean4;
3183 #ifdef CONFIG_CISS_SCSI_TAPE
3184 hba[i]->scsi_rejects.complete =
3185 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3186 (NR_CMDS + 5), GFP_KERNEL);
3187 if (hba[i]->scsi_rejects.complete == NULL) {
3188 printk( KERN_ERR "cciss: out of memory");
3189 goto clean4;
3191 #endif
3192 spin_lock_init(&hba[i]->lock);
3194 /* Initialize the pdev driver private data.
3195 have it point to hba[i]. */
3196 pci_set_drvdata(pdev, hba[i]);
3197 /* command and error info recs zeroed out before
3198 they are used */
3199 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3201 #ifdef CCISS_DEBUG
3202 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3203 #endif /* CCISS_DEBUG */
3205 cciss_getgeometry(i);
3207 cciss_scsi_setup(i);
3209 /* Turn the interrupts on so we can service requests */
3210 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3212 cciss_procinit(i);
3213 hba[i]->busy_initializing = 0;
3215 for(j=0; j < NWD; j++) { /* mfm */
3216 drive_info_struct *drv = &(hba[i]->drv[j]);
3217 struct gendisk *disk = hba[i]->gendisk[j];
3219 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3220 if (!q) {
3221 printk(KERN_ERR
3222 "cciss: unable to allocate queue for disk %d\n",
3224 break;
3226 drv->queue = q;
3228 q->backing_dev_info.ra_pages = READ_AHEAD;
3229 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3231 /* This is a hardware imposed limit. */
3232 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3234 /* This is a limit in the driver and could be eliminated. */
3235 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3237 blk_queue_max_sectors(q, 512);
3239 blk_queue_softirq_done(q, cciss_softirq_done);
3241 q->queuedata = hba[i];
3242 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3243 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3244 disk->major = hba[i]->major;
3245 disk->first_minor = j << NWD_SHIFT;
3246 disk->fops = &cciss_fops;
3247 disk->queue = q;
3248 disk->private_data = drv;
3249 /* we must register the controller even if no disks exist */
3250 /* this is for the online array utilities */
3251 if(!drv->heads && j)
3252 continue;
3253 blk_queue_hardsect_size(q, drv->block_size);
3254 set_capacity(disk, drv->nr_blocks);
3255 add_disk(disk);
3258 return(1);
3260 clean4:
3261 #ifdef CONFIG_CISS_SCSI_TAPE
3262 if(hba[i]->scsi_rejects.complete)
3263 kfree(hba[i]->scsi_rejects.complete);
3264 #endif
3265 kfree(hba[i]->cmd_pool_bits);
3266 if(hba[i]->cmd_pool)
3267 pci_free_consistent(hba[i]->pdev,
3268 NR_CMDS * sizeof(CommandList_struct),
3269 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3270 if(hba[i]->errinfo_pool)
3271 pci_free_consistent(hba[i]->pdev,
3272 NR_CMDS * sizeof( ErrorInfo_struct),
3273 hba[i]->errinfo_pool,
3274 hba[i]->errinfo_pool_dhandle);
3275 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3276 clean2:
3277 unregister_blkdev(hba[i]->major, hba[i]->devname);
3278 clean1:
3279 release_io_mem(hba[i]);
3280 hba[i]->busy_initializing = 0;
3281 free_hba(i);
3282 return(-1);
3285 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3287 ctlr_info_t *tmp_ptr;
3288 int i, j;
3289 char flush_buf[4];
3290 int return_code;
3292 if (pci_get_drvdata(pdev) == NULL)
3294 printk( KERN_ERR "cciss: Unable to remove device \n");
3295 return;
3297 tmp_ptr = pci_get_drvdata(pdev);
3298 i = tmp_ptr->ctlr;
3299 if (hba[i] == NULL)
3301 printk(KERN_ERR "cciss: device appears to "
3302 "already be removed \n");
3303 return;
3305 /* Turn board interrupts off and send the flush cache command */
3306 /* sendcmd will turn off interrupt, and send the flush...
3307 * To write all data in the battery backed cache to disks */
3308 memset(flush_buf, 0, 4);
3309 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3310 TYPE_CMD);
3311 if(return_code != IO_OK)
3313 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3316 free_irq(hba[i]->intr[2], hba[i]);
3318 #ifdef CONFIG_PCI_MSI
3319 if (hba[i]->msix_vector)
3320 pci_disable_msix(hba[i]->pdev);
3321 else if (hba[i]->msi_vector)
3322 pci_disable_msi(hba[i]->pdev);
3323 #endif /* CONFIG_PCI_MSI */
3325 pci_set_drvdata(pdev, NULL);
3326 iounmap(hba[i]->vaddr);
3327 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3328 unregister_blkdev(hba[i]->major, hba[i]->devname);
3329 remove_proc_entry(hba[i]->devname, proc_cciss);
3331 /* remove it from the disk list */
3332 for (j = 0; j < NWD; j++) {
3333 struct gendisk *disk = hba[i]->gendisk[j];
3334 if (disk) {
3335 request_queue_t *q = disk->queue;
3337 if (disk->flags & GENHD_FL_UP)
3338 del_gendisk(disk);
3339 if (q)
3340 blk_cleanup_queue(q);
3344 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3345 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3346 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3347 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3348 kfree(hba[i]->cmd_pool_bits);
3349 #ifdef CONFIG_CISS_SCSI_TAPE
3350 kfree(hba[i]->scsi_rejects.complete);
3351 #endif
3352 release_io_mem(hba[i]);
3353 free_hba(i);
3356 static struct pci_driver cciss_pci_driver = {
3357 .name = "cciss",
3358 .probe = cciss_init_one,
3359 .remove = __devexit_p(cciss_remove_one),
3360 .id_table = cciss_pci_device_id, /* id_table */
3364 * This is it. Register the PCI driver information for the cards we control
3365 * the OS will call our registered routines when it finds one of our cards.
3367 static int __init cciss_init(void)
3369 printk(KERN_INFO DRIVER_NAME "\n");
3371 /* Register for our PCI devices */
3372 return pci_register_driver(&cciss_pci_driver);
3375 static void __exit cciss_cleanup(void)
3377 int i;
3379 pci_unregister_driver(&cciss_pci_driver);
3380 /* double check that all controller entrys have been removed */
3381 for (i=0; i< MAX_CTLR; i++)
3383 if (hba[i] != NULL)
3385 printk(KERN_WARNING "cciss: had to remove"
3386 " controller %d\n", i);
3387 cciss_remove_one(hba[i]->pdev);
3390 remove_proc_entry("cciss", proc_root_driver);
3393 static void fail_all_cmds(unsigned long ctlr)
3395 /* If we get here, the board is apparently dead. */
3396 ctlr_info_t *h = hba[ctlr];
3397 CommandList_struct *c;
3398 unsigned long flags;
3400 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3401 h->alive = 0; /* the controller apparently died... */
3403 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3405 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3407 /* move everything off the request queue onto the completed queue */
3408 while( (c = h->reqQ) != NULL ) {
3409 removeQ(&(h->reqQ), c);
3410 h->Qdepth--;
3411 addQ (&(h->cmpQ), c);
3414 /* Now, fail everything on the completed queue with a HW error */
3415 while( (c = h->cmpQ) != NULL ) {
3416 removeQ(&h->cmpQ, c);
3417 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3418 if (c->cmd_type == CMD_RWREQ) {
3419 complete_command(h, c, 0);
3420 } else if (c->cmd_type == CMD_IOCTL_PEND)
3421 complete(c->waiting);
3422 #ifdef CONFIG_CISS_SCSI_TAPE
3423 else if (c->cmd_type == CMD_SCSI)
3424 complete_scsi_command(c, 0, 0);
3425 #endif
3427 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3428 return;
3431 module_init(cciss_init);
3432 module_exit(cciss_cleanup);