[PATCH] cciss: increase number of commands on controller
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / block / cciss.c
blob0f976aaaf0497e4526628c9d0ee47429cf36be49
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58 MODULE_VERSION("3.6.14");
59 MODULE_LICENSE("GPL");
61 #include "cciss_cmd.h"
62 #include "cciss.h"
63 #include <linux/cciss_ioctl.h>
65 /* define the PCI info for the cards we can control */
66 static const struct pci_device_id cciss_pci_device_id[] = {
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
86 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
87 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
88 {0,}
91 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
93 /* board_id = Subsystem Device ID & Vendor ID
94 * product = Marketing Name for the board
95 * access = Address of the struct of function pointers
96 * nr_cmds = Number of commands supported by controller
98 static struct board_type products[] = {
99 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
100 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
101 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
102 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
103 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
104 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
105 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
106 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
107 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
108 {0x3225103C, "Smart Array P600", &SA5_access, 512},
109 {0x3223103C, "Smart Array P800", &SA5_access, 512},
110 {0x3234103C, "Smart Array P400", &SA5_access, 512},
111 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
112 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
113 {0x3212103C, "Smart Array E200", &SA5_access, 120},
114 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
115 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
116 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
117 {0x3233103C, "Smart Array E500", &SA5_access, 512},
118 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
121 /* How long to wait (in milliseconds) for board to go into simple mode */
122 #define MAX_CONFIG_WAIT 30000
123 #define MAX_IOCTL_CONFIG_WAIT 1000
125 /*define how many times we will try a command because of bus resets */
126 #define MAX_CMD_RETRIES 3
128 #define READ_AHEAD 1024
129 #define MAX_CTLR 32
131 /* Originally cciss driver only supports 8 major numbers */
132 #define MAX_CTLR_ORIG 8
134 static ctlr_info_t *hba[MAX_CTLR];
136 static void do_cciss_request(request_queue_t *q);
137 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
138 static int cciss_open(struct inode *inode, struct file *filep);
139 static int cciss_release(struct inode *inode, struct file *filep);
140 static int cciss_ioctl(struct inode *inode, struct file *filep,
141 unsigned int cmd, unsigned long arg);
142 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
144 static int revalidate_allvol(ctlr_info_t *host);
145 static int cciss_revalidate(struct gendisk *disk);
146 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
147 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
148 int clear_all);
150 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
151 sector_t *total_size, unsigned int *block_size);
152 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
153 sector_t *total_size, unsigned int *block_size);
154 static void cciss_geometry_inquiry(int ctlr, int logvol,
155 int withirq, sector_t total_size,
156 unsigned int block_size, InquiryData_struct *inq_buff,
157 drive_info_struct *drv);
158 static void cciss_getgeometry(int cntl_num);
159 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
160 __u32);
161 static void start_io(ctlr_info_t *h);
162 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
163 unsigned int use_unit_num, unsigned int log_unit,
164 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
165 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
166 unsigned int use_unit_num, unsigned int log_unit,
167 __u8 page_code, int cmd_type);
169 static void fail_all_cmds(unsigned long ctlr);
171 #ifdef CONFIG_PROC_FS
172 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
173 int length, int *eof, void *data);
174 static void cciss_procinit(int i);
175 #else
176 static void cciss_procinit(int i)
179 #endif /* CONFIG_PROC_FS */
181 #ifdef CONFIG_COMPAT
182 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
183 #endif
185 static struct block_device_operations cciss_fops = {
186 .owner = THIS_MODULE,
187 .open = cciss_open,
188 .release = cciss_release,
189 .ioctl = cciss_ioctl,
190 .getgeo = cciss_getgeo,
191 #ifdef CONFIG_COMPAT
192 .compat_ioctl = cciss_compat_ioctl,
193 #endif
194 .revalidate_disk = cciss_revalidate,
198 * Enqueuing and dequeuing functions for cmdlists.
200 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
202 if (*Qptr == NULL) {
203 *Qptr = c;
204 c->next = c->prev = c;
205 } else {
206 c->prev = (*Qptr)->prev;
207 c->next = (*Qptr);
208 (*Qptr)->prev->next = c;
209 (*Qptr)->prev = c;
213 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
214 CommandList_struct *c)
216 if (c && c->next != c) {
217 if (*Qptr == c)
218 *Qptr = c->next;
219 c->prev->next = c->next;
220 c->next->prev = c->prev;
221 } else {
222 *Qptr = NULL;
224 return c;
227 #include "cciss_scsi.c" /* For SCSI tape support */
229 #ifdef CONFIG_PROC_FS
232 * Report information about this controller.
234 #define ENG_GIG 1000000000
235 #define ENG_GIG_FACTOR (ENG_GIG/512)
236 #define RAID_UNKNOWN 6
237 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
238 "UNKNOWN"
241 static struct proc_dir_entry *proc_cciss;
243 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
244 int length, int *eof, void *data)
246 off_t pos = 0;
247 off_t len = 0;
248 int size, i, ctlr;
249 ctlr_info_t *h = (ctlr_info_t *) data;
250 drive_info_struct *drv;
251 unsigned long flags;
252 sector_t vol_sz, vol_sz_frac;
254 ctlr = h->ctlr;
256 /* prevent displaying bogus info during configuration
257 * or deconfiguration of a logical volume
259 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
260 if (h->busy_configuring) {
261 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
262 return -EBUSY;
264 h->busy_configuring = 1;
265 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
267 size = sprintf(buffer, "%s: HP %s Controller\n"
268 "Board ID: 0x%08lx\n"
269 "Firmware Version: %c%c%c%c\n"
270 "IRQ: %d\n"
271 "Logical drives: %d\n"
272 "Current Q depth: %d\n"
273 "Current # commands on controller: %d\n"
274 "Max Q depth since init: %d\n"
275 "Max # commands on controller since init: %d\n"
276 "Max SG entries since init: %d\n\n",
277 h->devname,
278 h->product_name,
279 (unsigned long)h->board_id,
280 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
281 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
282 h->num_luns, h->Qdepth, h->commands_outstanding,
283 h->maxQsinceinit, h->max_outstanding, h->maxSG);
285 pos += size;
286 len += size;
287 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
288 for (i = 0; i <= h->highest_lun; i++) {
290 drv = &h->drv[i];
291 if (drv->heads == 0)
292 continue;
294 vol_sz = drv->nr_blocks;
295 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
296 vol_sz_frac *= 100;
297 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
299 if (drv->raid_level > 5)
300 drv->raid_level = RAID_UNKNOWN;
301 size = sprintf(buffer + len, "cciss/c%dd%d:"
302 "\t%4u.%02uGB\tRAID %s\n",
303 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
304 raid_label[drv->raid_level]);
305 pos += size;
306 len += size;
309 *eof = 1;
310 *start = buffer + offset;
311 len -= offset;
312 if (len > length)
313 len = length;
314 h->busy_configuring = 0;
315 return len;
318 static int
319 cciss_proc_write(struct file *file, const char __user *buffer,
320 unsigned long count, void *data)
322 unsigned char cmd[80];
323 int len;
324 #ifdef CONFIG_CISS_SCSI_TAPE
325 ctlr_info_t *h = (ctlr_info_t *) data;
326 int rc;
327 #endif
329 if (count > sizeof(cmd) - 1)
330 return -EINVAL;
331 if (copy_from_user(cmd, buffer, count))
332 return -EFAULT;
333 cmd[count] = '\0';
334 len = strlen(cmd); // above 3 lines ensure safety
335 if (len && cmd[len - 1] == '\n')
336 cmd[--len] = '\0';
337 # ifdef CONFIG_CISS_SCSI_TAPE
338 if (strcmp("engage scsi", cmd) == 0) {
339 rc = cciss_engage_scsi(h->ctlr);
340 if (rc != 0)
341 return -rc;
342 return count;
344 /* might be nice to have "disengage" too, but it's not
345 safely possible. (only 1 module use count, lock issues.) */
346 # endif
347 return -EINVAL;
351 * Get us a file in /proc/cciss that says something about each controller.
352 * Create /proc/cciss if it doesn't exist yet.
354 static void __devinit cciss_procinit(int i)
356 struct proc_dir_entry *pde;
358 if (proc_cciss == NULL) {
359 proc_cciss = proc_mkdir("cciss", proc_root_driver);
360 if (!proc_cciss)
361 return;
364 pde = create_proc_read_entry(hba[i]->devname,
365 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
366 proc_cciss, cciss_proc_get_info, hba[i]);
367 pde->write_proc = cciss_proc_write;
369 #endif /* CONFIG_PROC_FS */
372 * For operations that cannot sleep, a command block is allocated at init,
373 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
374 * which ones are free or in use. For operations that can wait for kmalloc
375 * to possible sleep, this routine can be called with get_from_pool set to 0.
376 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
378 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
380 CommandList_struct *c;
381 int i;
382 u64bit temp64;
383 dma_addr_t cmd_dma_handle, err_dma_handle;
385 if (!get_from_pool) {
386 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
387 sizeof(CommandList_struct), &cmd_dma_handle);
388 if (c == NULL)
389 return NULL;
390 memset(c, 0, sizeof(CommandList_struct));
392 c->cmdindex = -1;
394 c->err_info = (ErrorInfo_struct *)
395 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
396 &err_dma_handle);
398 if (c->err_info == NULL) {
399 pci_free_consistent(h->pdev,
400 sizeof(CommandList_struct), c, cmd_dma_handle);
401 return NULL;
403 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
404 } else { /* get it out of the controllers pool */
406 do {
407 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
408 if (i == h->nr_cmds)
409 return NULL;
410 } while (test_and_set_bit
411 (i & (BITS_PER_LONG - 1),
412 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
413 #ifdef CCISS_DEBUG
414 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
415 #endif
416 c = h->cmd_pool + i;
417 memset(c, 0, sizeof(CommandList_struct));
418 cmd_dma_handle = h->cmd_pool_dhandle
419 + i * sizeof(CommandList_struct);
420 c->err_info = h->errinfo_pool + i;
421 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
422 err_dma_handle = h->errinfo_pool_dhandle
423 + i * sizeof(ErrorInfo_struct);
424 h->nr_allocs++;
426 c->cmdindex = i;
429 c->busaddr = (__u32) cmd_dma_handle;
430 temp64.val = (__u64) err_dma_handle;
431 c->ErrDesc.Addr.lower = temp64.val32.lower;
432 c->ErrDesc.Addr.upper = temp64.val32.upper;
433 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
435 c->ctlr = h->ctlr;
436 return c;
440 * Frees a command block that was previously allocated with cmd_alloc().
442 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
444 int i;
445 u64bit temp64;
447 if (!got_from_pool) {
448 temp64.val32.lower = c->ErrDesc.Addr.lower;
449 temp64.val32.upper = c->ErrDesc.Addr.upper;
450 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
451 c->err_info, (dma_addr_t) temp64.val);
452 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
453 c, (dma_addr_t) c->busaddr);
454 } else {
455 i = c - h->cmd_pool;
456 clear_bit(i & (BITS_PER_LONG - 1),
457 h->cmd_pool_bits + (i / BITS_PER_LONG));
458 h->nr_frees++;
462 static inline ctlr_info_t *get_host(struct gendisk *disk)
464 return disk->queue->queuedata;
467 static inline drive_info_struct *get_drv(struct gendisk *disk)
469 return disk->private_data;
473 * Open. Make sure the device is really there.
475 static int cciss_open(struct inode *inode, struct file *filep)
477 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
478 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
480 #ifdef CCISS_DEBUG
481 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
482 #endif /* CCISS_DEBUG */
484 if (host->busy_initializing || drv->busy_configuring)
485 return -EBUSY;
487 * Root is allowed to open raw volume zero even if it's not configured
488 * so array config can still work. Root is also allowed to open any
489 * volume that has a LUN ID, so it can issue IOCTL to reread the
490 * disk information. I don't think I really like this
491 * but I'm already using way to many device nodes to claim another one
492 * for "raw controller".
494 if (drv->nr_blocks == 0) {
495 if (iminor(inode) != 0) { /* not node 0? */
496 /* if not node 0 make sure it is a partition = 0 */
497 if (iminor(inode) & 0x0f) {
498 return -ENXIO;
499 /* if it is, make sure we have a LUN ID */
500 } else if (drv->LunID == 0) {
501 return -ENXIO;
504 if (!capable(CAP_SYS_ADMIN))
505 return -EPERM;
507 drv->usage_count++;
508 host->usage_count++;
509 return 0;
513 * Close. Sync first.
515 static int cciss_release(struct inode *inode, struct file *filep)
517 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
518 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
520 #ifdef CCISS_DEBUG
521 printk(KERN_DEBUG "cciss_release %s\n",
522 inode->i_bdev->bd_disk->disk_name);
523 #endif /* CCISS_DEBUG */
525 drv->usage_count--;
526 host->usage_count--;
527 return 0;
530 #ifdef CONFIG_COMPAT
532 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
534 int ret;
535 lock_kernel();
536 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
537 unlock_kernel();
538 return ret;
541 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
542 unsigned long arg);
543 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
544 unsigned long arg);
546 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
548 switch (cmd) {
549 case CCISS_GETPCIINFO:
550 case CCISS_GETINTINFO:
551 case CCISS_SETINTINFO:
552 case CCISS_GETNODENAME:
553 case CCISS_SETNODENAME:
554 case CCISS_GETHEARTBEAT:
555 case CCISS_GETBUSTYPES:
556 case CCISS_GETFIRMVER:
557 case CCISS_GETDRIVVER:
558 case CCISS_REVALIDVOLS:
559 case CCISS_DEREGDISK:
560 case CCISS_REGNEWDISK:
561 case CCISS_REGNEWD:
562 case CCISS_RESCANDISK:
563 case CCISS_GETLUNINFO:
564 return do_ioctl(f, cmd, arg);
566 case CCISS_PASSTHRU32:
567 return cciss_ioctl32_passthru(f, cmd, arg);
568 case CCISS_BIG_PASSTHRU32:
569 return cciss_ioctl32_big_passthru(f, cmd, arg);
571 default:
572 return -ENOIOCTLCMD;
576 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
577 unsigned long arg)
579 IOCTL32_Command_struct __user *arg32 =
580 (IOCTL32_Command_struct __user *) arg;
581 IOCTL_Command_struct arg64;
582 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
583 int err;
584 u32 cp;
586 err = 0;
587 err |=
588 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
589 sizeof(arg64.LUN_info));
590 err |=
591 copy_from_user(&arg64.Request, &arg32->Request,
592 sizeof(arg64.Request));
593 err |=
594 copy_from_user(&arg64.error_info, &arg32->error_info,
595 sizeof(arg64.error_info));
596 err |= get_user(arg64.buf_size, &arg32->buf_size);
597 err |= get_user(cp, &arg32->buf);
598 arg64.buf = compat_ptr(cp);
599 err |= copy_to_user(p, &arg64, sizeof(arg64));
601 if (err)
602 return -EFAULT;
604 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
605 if (err)
606 return err;
607 err |=
608 copy_in_user(&arg32->error_info, &p->error_info,
609 sizeof(arg32->error_info));
610 if (err)
611 return -EFAULT;
612 return err;
615 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
616 unsigned long arg)
618 BIG_IOCTL32_Command_struct __user *arg32 =
619 (BIG_IOCTL32_Command_struct __user *) arg;
620 BIG_IOCTL_Command_struct arg64;
621 BIG_IOCTL_Command_struct __user *p =
622 compat_alloc_user_space(sizeof(arg64));
623 int err;
624 u32 cp;
626 err = 0;
627 err |=
628 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
629 sizeof(arg64.LUN_info));
630 err |=
631 copy_from_user(&arg64.Request, &arg32->Request,
632 sizeof(arg64.Request));
633 err |=
634 copy_from_user(&arg64.error_info, &arg32->error_info,
635 sizeof(arg64.error_info));
636 err |= get_user(arg64.buf_size, &arg32->buf_size);
637 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
638 err |= get_user(cp, &arg32->buf);
639 arg64.buf = compat_ptr(cp);
640 err |= copy_to_user(p, &arg64, sizeof(arg64));
642 if (err)
643 return -EFAULT;
645 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
646 if (err)
647 return err;
648 err |=
649 copy_in_user(&arg32->error_info, &p->error_info,
650 sizeof(arg32->error_info));
651 if (err)
652 return -EFAULT;
653 return err;
655 #endif
657 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
659 drive_info_struct *drv = get_drv(bdev->bd_disk);
661 if (!drv->cylinders)
662 return -ENXIO;
664 geo->heads = drv->heads;
665 geo->sectors = drv->sectors;
666 geo->cylinders = drv->cylinders;
667 return 0;
671 * ioctl
673 static int cciss_ioctl(struct inode *inode, struct file *filep,
674 unsigned int cmd, unsigned long arg)
676 struct block_device *bdev = inode->i_bdev;
677 struct gendisk *disk = bdev->bd_disk;
678 ctlr_info_t *host = get_host(disk);
679 drive_info_struct *drv = get_drv(disk);
680 int ctlr = host->ctlr;
681 void __user *argp = (void __user *)arg;
683 #ifdef CCISS_DEBUG
684 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
685 #endif /* CCISS_DEBUG */
687 switch (cmd) {
688 case CCISS_GETPCIINFO:
690 cciss_pci_info_struct pciinfo;
692 if (!arg)
693 return -EINVAL;
694 pciinfo.domain = pci_domain_nr(host->pdev->bus);
695 pciinfo.bus = host->pdev->bus->number;
696 pciinfo.dev_fn = host->pdev->devfn;
697 pciinfo.board_id = host->board_id;
698 if (copy_to_user
699 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
700 return -EFAULT;
701 return 0;
703 case CCISS_GETINTINFO:
705 cciss_coalint_struct intinfo;
706 if (!arg)
707 return -EINVAL;
708 intinfo.delay =
709 readl(&host->cfgtable->HostWrite.CoalIntDelay);
710 intinfo.count =
711 readl(&host->cfgtable->HostWrite.CoalIntCount);
712 if (copy_to_user
713 (argp, &intinfo, sizeof(cciss_coalint_struct)))
714 return -EFAULT;
715 return 0;
717 case CCISS_SETINTINFO:
719 cciss_coalint_struct intinfo;
720 unsigned long flags;
721 int i;
723 if (!arg)
724 return -EINVAL;
725 if (!capable(CAP_SYS_ADMIN))
726 return -EPERM;
727 if (copy_from_user
728 (&intinfo, argp, sizeof(cciss_coalint_struct)))
729 return -EFAULT;
730 if ((intinfo.delay == 0) && (intinfo.count == 0))
732 // printk("cciss_ioctl: delay and count cannot be 0\n");
733 return -EINVAL;
735 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
736 /* Update the field, and then ring the doorbell */
737 writel(intinfo.delay,
738 &(host->cfgtable->HostWrite.CoalIntDelay));
739 writel(intinfo.count,
740 &(host->cfgtable->HostWrite.CoalIntCount));
741 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
743 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
744 if (!(readl(host->vaddr + SA5_DOORBELL)
745 & CFGTBL_ChangeReq))
746 break;
747 /* delay and try again */
748 udelay(1000);
750 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
751 if (i >= MAX_IOCTL_CONFIG_WAIT)
752 return -EAGAIN;
753 return 0;
755 case CCISS_GETNODENAME:
757 NodeName_type NodeName;
758 int i;
760 if (!arg)
761 return -EINVAL;
762 for (i = 0; i < 16; i++)
763 NodeName[i] =
764 readb(&host->cfgtable->ServerName[i]);
765 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
766 return -EFAULT;
767 return 0;
769 case CCISS_SETNODENAME:
771 NodeName_type NodeName;
772 unsigned long flags;
773 int i;
775 if (!arg)
776 return -EINVAL;
777 if (!capable(CAP_SYS_ADMIN))
778 return -EPERM;
780 if (copy_from_user
781 (NodeName, argp, sizeof(NodeName_type)))
782 return -EFAULT;
784 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
786 /* Update the field, and then ring the doorbell */
787 for (i = 0; i < 16; i++)
788 writeb(NodeName[i],
789 &host->cfgtable->ServerName[i]);
791 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
793 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
794 if (!(readl(host->vaddr + SA5_DOORBELL)
795 & CFGTBL_ChangeReq))
796 break;
797 /* delay and try again */
798 udelay(1000);
800 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
801 if (i >= MAX_IOCTL_CONFIG_WAIT)
802 return -EAGAIN;
803 return 0;
806 case CCISS_GETHEARTBEAT:
808 Heartbeat_type heartbeat;
810 if (!arg)
811 return -EINVAL;
812 heartbeat = readl(&host->cfgtable->HeartBeat);
813 if (copy_to_user
814 (argp, &heartbeat, sizeof(Heartbeat_type)))
815 return -EFAULT;
816 return 0;
818 case CCISS_GETBUSTYPES:
820 BusTypes_type BusTypes;
822 if (!arg)
823 return -EINVAL;
824 BusTypes = readl(&host->cfgtable->BusTypes);
825 if (copy_to_user
826 (argp, &BusTypes, sizeof(BusTypes_type)))
827 return -EFAULT;
828 return 0;
830 case CCISS_GETFIRMVER:
832 FirmwareVer_type firmware;
834 if (!arg)
835 return -EINVAL;
836 memcpy(firmware, host->firm_ver, 4);
838 if (copy_to_user
839 (argp, firmware, sizeof(FirmwareVer_type)))
840 return -EFAULT;
841 return 0;
843 case CCISS_GETDRIVVER:
845 DriverVer_type DriverVer = DRIVER_VERSION;
847 if (!arg)
848 return -EINVAL;
850 if (copy_to_user
851 (argp, &DriverVer, sizeof(DriverVer_type)))
852 return -EFAULT;
853 return 0;
856 case CCISS_REVALIDVOLS:
857 if (bdev != bdev->bd_contains || drv != host->drv)
858 return -ENXIO;
859 return revalidate_allvol(host);
861 case CCISS_GETLUNINFO:{
862 LogvolInfo_struct luninfo;
864 luninfo.LunID = drv->LunID;
865 luninfo.num_opens = drv->usage_count;
866 luninfo.num_parts = 0;
867 if (copy_to_user(argp, &luninfo,
868 sizeof(LogvolInfo_struct)))
869 return -EFAULT;
870 return 0;
872 case CCISS_DEREGDISK:
873 return rebuild_lun_table(host, disk);
875 case CCISS_REGNEWD:
876 return rebuild_lun_table(host, NULL);
878 case CCISS_PASSTHRU:
880 IOCTL_Command_struct iocommand;
881 CommandList_struct *c;
882 char *buff = NULL;
883 u64bit temp64;
884 unsigned long flags;
885 DECLARE_COMPLETION_ONSTACK(wait);
887 if (!arg)
888 return -EINVAL;
890 if (!capable(CAP_SYS_RAWIO))
891 return -EPERM;
893 if (copy_from_user
894 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
895 return -EFAULT;
896 if ((iocommand.buf_size < 1) &&
897 (iocommand.Request.Type.Direction != XFER_NONE)) {
898 return -EINVAL;
900 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
901 /* Check kmalloc limits */
902 if (iocommand.buf_size > 128000)
903 return -EINVAL;
904 #endif
905 if (iocommand.buf_size > 0) {
906 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
907 if (buff == NULL)
908 return -EFAULT;
910 if (iocommand.Request.Type.Direction == XFER_WRITE) {
911 /* Copy the data into the buffer we created */
912 if (copy_from_user
913 (buff, iocommand.buf, iocommand.buf_size)) {
914 kfree(buff);
915 return -EFAULT;
917 } else {
918 memset(buff, 0, iocommand.buf_size);
920 if ((c = cmd_alloc(host, 0)) == NULL) {
921 kfree(buff);
922 return -ENOMEM;
924 // Fill in the command type
925 c->cmd_type = CMD_IOCTL_PEND;
926 // Fill in Command Header
927 c->Header.ReplyQueue = 0; // unused in simple mode
928 if (iocommand.buf_size > 0) // buffer to fill
930 c->Header.SGList = 1;
931 c->Header.SGTotal = 1;
932 } else // no buffers to fill
934 c->Header.SGList = 0;
935 c->Header.SGTotal = 0;
937 c->Header.LUN = iocommand.LUN_info;
938 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
940 // Fill in Request block
941 c->Request = iocommand.Request;
943 // Fill in the scatter gather information
944 if (iocommand.buf_size > 0) {
945 temp64.val = pci_map_single(host->pdev, buff,
946 iocommand.buf_size,
947 PCI_DMA_BIDIRECTIONAL);
948 c->SG[0].Addr.lower = temp64.val32.lower;
949 c->SG[0].Addr.upper = temp64.val32.upper;
950 c->SG[0].Len = iocommand.buf_size;
951 c->SG[0].Ext = 0; // we are not chaining
953 c->waiting = &wait;
955 /* Put the request on the tail of the request queue */
956 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
957 addQ(&host->reqQ, c);
958 host->Qdepth++;
959 start_io(host);
960 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
962 wait_for_completion(&wait);
964 /* unlock the buffers from DMA */
965 temp64.val32.lower = c->SG[0].Addr.lower;
966 temp64.val32.upper = c->SG[0].Addr.upper;
967 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
968 iocommand.buf_size,
969 PCI_DMA_BIDIRECTIONAL);
971 /* Copy the error information out */
972 iocommand.error_info = *(c->err_info);
973 if (copy_to_user
974 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
975 kfree(buff);
976 cmd_free(host, c, 0);
977 return -EFAULT;
980 if (iocommand.Request.Type.Direction == XFER_READ) {
981 /* Copy the data out of the buffer we created */
982 if (copy_to_user
983 (iocommand.buf, buff, iocommand.buf_size)) {
984 kfree(buff);
985 cmd_free(host, c, 0);
986 return -EFAULT;
989 kfree(buff);
990 cmd_free(host, c, 0);
991 return 0;
993 case CCISS_BIG_PASSTHRU:{
994 BIG_IOCTL_Command_struct *ioc;
995 CommandList_struct *c;
996 unsigned char **buff = NULL;
997 int *buff_size = NULL;
998 u64bit temp64;
999 unsigned long flags;
1000 BYTE sg_used = 0;
1001 int status = 0;
1002 int i;
1003 DECLARE_COMPLETION_ONSTACK(wait);
1004 __u32 left;
1005 __u32 sz;
1006 BYTE __user *data_ptr;
1008 if (!arg)
1009 return -EINVAL;
1010 if (!capable(CAP_SYS_RAWIO))
1011 return -EPERM;
1012 ioc = (BIG_IOCTL_Command_struct *)
1013 kmalloc(sizeof(*ioc), GFP_KERNEL);
1014 if (!ioc) {
1015 status = -ENOMEM;
1016 goto cleanup1;
1018 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1019 status = -EFAULT;
1020 goto cleanup1;
1022 if ((ioc->buf_size < 1) &&
1023 (ioc->Request.Type.Direction != XFER_NONE)) {
1024 status = -EINVAL;
1025 goto cleanup1;
1027 /* Check kmalloc limits using all SGs */
1028 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1029 status = -EINVAL;
1030 goto cleanup1;
1032 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1033 status = -EINVAL;
1034 goto cleanup1;
1036 buff =
1037 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1038 if (!buff) {
1039 status = -ENOMEM;
1040 goto cleanup1;
1042 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1043 GFP_KERNEL);
1044 if (!buff_size) {
1045 status = -ENOMEM;
1046 goto cleanup1;
1048 left = ioc->buf_size;
1049 data_ptr = ioc->buf;
1050 while (left) {
1051 sz = (left >
1052 ioc->malloc_size) ? ioc->
1053 malloc_size : left;
1054 buff_size[sg_used] = sz;
1055 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1056 if (buff[sg_used] == NULL) {
1057 status = -ENOMEM;
1058 goto cleanup1;
1060 if (ioc->Request.Type.Direction == XFER_WRITE) {
1061 if (copy_from_user
1062 (buff[sg_used], data_ptr, sz)) {
1063 status = -ENOMEM;
1064 goto cleanup1;
1066 } else {
1067 memset(buff[sg_used], 0, sz);
1069 left -= sz;
1070 data_ptr += sz;
1071 sg_used++;
1073 if ((c = cmd_alloc(host, 0)) == NULL) {
1074 status = -ENOMEM;
1075 goto cleanup1;
1077 c->cmd_type = CMD_IOCTL_PEND;
1078 c->Header.ReplyQueue = 0;
1080 if (ioc->buf_size > 0) {
1081 c->Header.SGList = sg_used;
1082 c->Header.SGTotal = sg_used;
1083 } else {
1084 c->Header.SGList = 0;
1085 c->Header.SGTotal = 0;
1087 c->Header.LUN = ioc->LUN_info;
1088 c->Header.Tag.lower = c->busaddr;
1090 c->Request = ioc->Request;
1091 if (ioc->buf_size > 0) {
1092 int i;
1093 for (i = 0; i < sg_used; i++) {
1094 temp64.val =
1095 pci_map_single(host->pdev, buff[i],
1096 buff_size[i],
1097 PCI_DMA_BIDIRECTIONAL);
1098 c->SG[i].Addr.lower =
1099 temp64.val32.lower;
1100 c->SG[i].Addr.upper =
1101 temp64.val32.upper;
1102 c->SG[i].Len = buff_size[i];
1103 c->SG[i].Ext = 0; /* we are not chaining */
1106 c->waiting = &wait;
1107 /* Put the request on the tail of the request queue */
1108 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1109 addQ(&host->reqQ, c);
1110 host->Qdepth++;
1111 start_io(host);
1112 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1113 wait_for_completion(&wait);
1114 /* unlock the buffers from DMA */
1115 for (i = 0; i < sg_used; i++) {
1116 temp64.val32.lower = c->SG[i].Addr.lower;
1117 temp64.val32.upper = c->SG[i].Addr.upper;
1118 pci_unmap_single(host->pdev,
1119 (dma_addr_t) temp64.val, buff_size[i],
1120 PCI_DMA_BIDIRECTIONAL);
1122 /* Copy the error information out */
1123 ioc->error_info = *(c->err_info);
1124 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1125 cmd_free(host, c, 0);
1126 status = -EFAULT;
1127 goto cleanup1;
1129 if (ioc->Request.Type.Direction == XFER_READ) {
1130 /* Copy the data out of the buffer we created */
1131 BYTE __user *ptr = ioc->buf;
1132 for (i = 0; i < sg_used; i++) {
1133 if (copy_to_user
1134 (ptr, buff[i], buff_size[i])) {
1135 cmd_free(host, c, 0);
1136 status = -EFAULT;
1137 goto cleanup1;
1139 ptr += buff_size[i];
1142 cmd_free(host, c, 0);
1143 status = 0;
1144 cleanup1:
1145 if (buff) {
1146 for (i = 0; i < sg_used; i++)
1147 kfree(buff[i]);
1148 kfree(buff);
1150 kfree(buff_size);
1151 kfree(ioc);
1152 return status;
1154 default:
1155 return -ENOTTY;
1160 * revalidate_allvol is for online array config utilities. After a
1161 * utility reconfigures the drives in the array, it can use this function
1162 * (through an ioctl) to make the driver zap any previous disk structs for
1163 * that controller and get new ones.
1165 * Right now I'm using the getgeometry() function to do this, but this
1166 * function should probably be finer grained and allow you to revalidate one
1167 * particular logical volume (instead of all of them on a particular
1168 * controller).
1170 static int revalidate_allvol(ctlr_info_t *host)
1172 int ctlr = host->ctlr, i;
1173 unsigned long flags;
1175 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1176 if (host->usage_count > 1) {
1177 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1178 printk(KERN_WARNING "cciss: Device busy for volume"
1179 " revalidation (usage=%d)\n", host->usage_count);
1180 return -EBUSY;
1182 host->usage_count++;
1183 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1185 for (i = 0; i < NWD; i++) {
1186 struct gendisk *disk = host->gendisk[i];
1187 if (disk) {
1188 request_queue_t *q = disk->queue;
1190 if (disk->flags & GENHD_FL_UP)
1191 del_gendisk(disk);
1192 if (q)
1193 blk_cleanup_queue(q);
1198 * Set the partition and block size structures for all volumes
1199 * on this controller to zero. We will reread all of this data
1201 memset(host->drv, 0, sizeof(drive_info_struct)
1202 * CISS_MAX_LUN);
1204 * Tell the array controller not to give us any interrupts while
1205 * we check the new geometry. Then turn interrupts back on when
1206 * we're done.
1208 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1209 cciss_getgeometry(ctlr);
1210 host->access.set_intr_mask(host, CCISS_INTR_ON);
1212 /* Loop through each real device */
1213 for (i = 0; i < NWD; i++) {
1214 struct gendisk *disk = host->gendisk[i];
1215 drive_info_struct *drv = &(host->drv[i]);
1216 /* we must register the controller even if no disks exist */
1217 /* this is for the online array utilities */
1218 if (!drv->heads && i)
1219 continue;
1220 blk_queue_hardsect_size(drv->queue, drv->block_size);
1221 set_capacity(disk, drv->nr_blocks);
1222 add_disk(disk);
1224 host->usage_count--;
1225 return 0;
1228 static inline void complete_buffers(struct bio *bio, int status)
1230 while (bio) {
1231 struct bio *xbh = bio->bi_next;
1232 int nr_sectors = bio_sectors(bio);
1234 bio->bi_next = NULL;
1235 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1236 bio = xbh;
1240 static void cciss_check_queues(ctlr_info_t *h)
1242 int start_queue = h->next_to_run;
1243 int i;
1245 /* check to see if we have maxed out the number of commands that can
1246 * be placed on the queue. If so then exit. We do this check here
1247 * in case the interrupt we serviced was from an ioctl and did not
1248 * free any new commands.
1250 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1251 return;
1253 /* We have room on the queue for more commands. Now we need to queue
1254 * them up. We will also keep track of the next queue to run so
1255 * that every queue gets a chance to be started first.
1257 for (i = 0; i < h->highest_lun + 1; i++) {
1258 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1259 /* make sure the disk has been added and the drive is real
1260 * because this can be called from the middle of init_one.
1262 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1263 continue;
1264 blk_start_queue(h->gendisk[curr_queue]->queue);
1266 /* check to see if we have maxed out the number of commands
1267 * that can be placed on the queue.
1269 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1270 if (curr_queue == start_queue) {
1271 h->next_to_run =
1272 (start_queue + 1) % (h->highest_lun + 1);
1273 break;
1274 } else {
1275 h->next_to_run = curr_queue;
1276 break;
1278 } else {
1279 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1284 static void cciss_softirq_done(struct request *rq)
1286 CommandList_struct *cmd = rq->completion_data;
1287 ctlr_info_t *h = hba[cmd->ctlr];
1288 unsigned long flags;
1289 u64bit temp64;
1290 int i, ddir;
1292 if (cmd->Request.Type.Direction == XFER_READ)
1293 ddir = PCI_DMA_FROMDEVICE;
1294 else
1295 ddir = PCI_DMA_TODEVICE;
1297 /* command did not need to be retried */
1298 /* unmap the DMA mapping for all the scatter gather elements */
1299 for (i = 0; i < cmd->Header.SGList; i++) {
1300 temp64.val32.lower = cmd->SG[i].Addr.lower;
1301 temp64.val32.upper = cmd->SG[i].Addr.upper;
1302 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1305 complete_buffers(rq->bio, rq->errors);
1307 if (blk_fs_request(rq)) {
1308 const int rw = rq_data_dir(rq);
1310 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1313 #ifdef CCISS_DEBUG
1314 printk("Done with %p\n", rq);
1315 #endif /* CCISS_DEBUG */
1317 add_disk_randomness(rq->rq_disk);
1318 spin_lock_irqsave(&h->lock, flags);
1319 end_that_request_last(rq, rq->errors);
1320 cmd_free(h, cmd, 1);
1321 cciss_check_queues(h);
1322 spin_unlock_irqrestore(&h->lock, flags);
1325 /* This function will check the usage_count of the drive to be updated/added.
1326 * If the usage_count is zero then the drive information will be updated and
1327 * the disk will be re-registered with the kernel. If not then it will be
1328 * left alone for the next reboot. The exception to this is disk 0 which
1329 * will always be left registered with the kernel since it is also the
1330 * controller node. Any changes to disk 0 will show up on the next
1331 * reboot.
1333 static void cciss_update_drive_info(int ctlr, int drv_index)
1335 ctlr_info_t *h = hba[ctlr];
1336 struct gendisk *disk;
1337 InquiryData_struct *inq_buff = NULL;
1338 unsigned int block_size;
1339 sector_t total_size;
1340 unsigned long flags = 0;
1341 int ret = 0;
1343 /* if the disk already exists then deregister it before proceeding */
1344 if (h->drv[drv_index].raid_level != -1) {
1345 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1346 h->drv[drv_index].busy_configuring = 1;
1347 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1348 ret = deregister_disk(h->gendisk[drv_index],
1349 &h->drv[drv_index], 0);
1350 h->drv[drv_index].busy_configuring = 0;
1353 /* If the disk is in use return */
1354 if (ret)
1355 return;
1357 /* Get information about the disk and modify the driver structure */
1358 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1359 if (inq_buff == NULL)
1360 goto mem_msg;
1362 cciss_read_capacity(ctlr, drv_index, 1,
1363 &total_size, &block_size);
1365 /* total size = last LBA + 1 */
1366 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
1367 /* so we assume this volume this must be >2TB in size */
1368 if (total_size == (__u32) 0) {
1369 cciss_read_capacity_16(ctlr, drv_index, 1,
1370 &total_size, &block_size);
1371 h->cciss_read = CCISS_READ_16;
1372 h->cciss_write = CCISS_WRITE_16;
1373 } else {
1374 h->cciss_read = CCISS_READ_10;
1375 h->cciss_write = CCISS_WRITE_10;
1377 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1378 inq_buff, &h->drv[drv_index]);
1380 ++h->num_luns;
1381 disk = h->gendisk[drv_index];
1382 set_capacity(disk, h->drv[drv_index].nr_blocks);
1384 /* if it's the controller it's already added */
1385 if (drv_index) {
1386 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1388 /* Set up queue information */
1389 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1390 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1392 /* This is a hardware imposed limit. */
1393 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1395 /* This is a limit in the driver and could be eliminated. */
1396 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1398 blk_queue_max_sectors(disk->queue, 512);
1400 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1402 disk->queue->queuedata = hba[ctlr];
1404 blk_queue_hardsect_size(disk->queue,
1405 hba[ctlr]->drv[drv_index].block_size);
1407 h->drv[drv_index].queue = disk->queue;
1408 add_disk(disk);
1411 freeret:
1412 kfree(inq_buff);
1413 return;
1414 mem_msg:
1415 printk(KERN_ERR "cciss: out of memory\n");
1416 goto freeret;
1419 /* This function will find the first index of the controllers drive array
1420 * that has a -1 for the raid_level and will return that index. This is
1421 * where new drives will be added. If the index to be returned is greater
1422 * than the highest_lun index for the controller then highest_lun is set
1423 * to this new index. If there are no available indexes then -1 is returned.
1425 static int cciss_find_free_drive_index(int ctlr)
1427 int i;
1429 for (i = 0; i < CISS_MAX_LUN; i++) {
1430 if (hba[ctlr]->drv[i].raid_level == -1) {
1431 if (i > hba[ctlr]->highest_lun)
1432 hba[ctlr]->highest_lun = i;
1433 return i;
1436 return -1;
1439 /* This function will add and remove logical drives from the Logical
1440 * drive array of the controller and maintain persistency of ordering
1441 * so that mount points are preserved until the next reboot. This allows
1442 * for the removal of logical drives in the middle of the drive array
1443 * without a re-ordering of those drives.
1444 * INPUT
1445 * h = The controller to perform the operations on
1446 * del_disk = The disk to remove if specified. If the value given
1447 * is NULL then no disk is removed.
1449 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1451 int ctlr = h->ctlr;
1452 int num_luns;
1453 ReportLunData_struct *ld_buff = NULL;
1454 drive_info_struct *drv = NULL;
1455 int return_code;
1456 int listlength = 0;
1457 int i;
1458 int drv_found;
1459 int drv_index = 0;
1460 __u32 lunid = 0;
1461 unsigned long flags;
1463 /* Set busy_configuring flag for this operation */
1464 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1465 if (h->num_luns >= CISS_MAX_LUN) {
1466 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1467 return -EINVAL;
1470 if (h->busy_configuring) {
1471 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1472 return -EBUSY;
1474 h->busy_configuring = 1;
1476 /* if del_disk is NULL then we are being called to add a new disk
1477 * and update the logical drive table. If it is not NULL then
1478 * we will check if the disk is in use or not.
1480 if (del_disk != NULL) {
1481 drv = get_drv(del_disk);
1482 drv->busy_configuring = 1;
1483 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1484 return_code = deregister_disk(del_disk, drv, 1);
1485 drv->busy_configuring = 0;
1486 h->busy_configuring = 0;
1487 return return_code;
1488 } else {
1489 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1490 if (!capable(CAP_SYS_RAWIO))
1491 return -EPERM;
1493 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1494 if (ld_buff == NULL)
1495 goto mem_msg;
1497 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1498 sizeof(ReportLunData_struct), 0,
1499 0, 0, TYPE_CMD);
1501 if (return_code == IO_OK) {
1502 listlength |=
1503 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1504 << 24;
1505 listlength |=
1506 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1507 << 16;
1508 listlength |=
1509 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1510 << 8;
1511 listlength |=
1512 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1513 } else { /* reading number of logical volumes failed */
1514 printk(KERN_WARNING "cciss: report logical volume"
1515 " command failed\n");
1516 listlength = 0;
1517 goto freeret;
1520 num_luns = listlength / 8; /* 8 bytes per entry */
1521 if (num_luns > CISS_MAX_LUN) {
1522 num_luns = CISS_MAX_LUN;
1523 printk(KERN_WARNING "cciss: more luns configured"
1524 " on controller than can be handled by"
1525 " this driver.\n");
1528 /* Compare controller drive array to drivers drive array.
1529 * Check for updates in the drive information and any new drives
1530 * on the controller.
1532 for (i = 0; i < num_luns; i++) {
1533 int j;
1535 drv_found = 0;
1537 lunid = (0xff &
1538 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1539 lunid |= (0xff &
1540 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1541 lunid |= (0xff &
1542 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1543 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1545 /* Find if the LUN is already in the drive array
1546 * of the controller. If so then update its info
1547 * if not is use. If it does not exist then find
1548 * the first free index and add it.
1550 for (j = 0; j <= h->highest_lun; j++) {
1551 if (h->drv[j].LunID == lunid) {
1552 drv_index = j;
1553 drv_found = 1;
1557 /* check if the drive was found already in the array */
1558 if (!drv_found) {
1559 drv_index = cciss_find_free_drive_index(ctlr);
1560 if (drv_index == -1)
1561 goto freeret;
1564 h->drv[drv_index].LunID = lunid;
1565 cciss_update_drive_info(ctlr, drv_index);
1566 } /* end for */
1567 } /* end else */
1569 freeret:
1570 kfree(ld_buff);
1571 h->busy_configuring = 0;
1572 /* We return -1 here to tell the ACU that we have registered/updated
1573 * all of the drives that we can and to keep it from calling us
1574 * additional times.
1576 return -1;
1577 mem_msg:
1578 printk(KERN_ERR "cciss: out of memory\n");
1579 goto freeret;
1582 /* This function will deregister the disk and it's queue from the
1583 * kernel. It must be called with the controller lock held and the
1584 * drv structures busy_configuring flag set. It's parameters are:
1586 * disk = This is the disk to be deregistered
1587 * drv = This is the drive_info_struct associated with the disk to be
1588 * deregistered. It contains information about the disk used
1589 * by the driver.
1590 * clear_all = This flag determines whether or not the disk information
1591 * is going to be completely cleared out and the highest_lun
1592 * reset. Sometimes we want to clear out information about
1593 * the disk in preparation for re-adding it. In this case
1594 * the highest_lun should be left unchanged and the LunID
1595 * should not be cleared.
1597 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1598 int clear_all)
1600 ctlr_info_t *h = get_host(disk);
1602 if (!capable(CAP_SYS_RAWIO))
1603 return -EPERM;
1605 /* make sure logical volume is NOT is use */
1606 if (clear_all || (h->gendisk[0] == disk)) {
1607 if (drv->usage_count > 1)
1608 return -EBUSY;
1609 } else if (drv->usage_count > 0)
1610 return -EBUSY;
1612 /* invalidate the devices and deregister the disk. If it is disk
1613 * zero do not deregister it but just zero out it's values. This
1614 * allows us to delete disk zero but keep the controller registered.
1616 if (h->gendisk[0] != disk) {
1617 if (disk) {
1618 request_queue_t *q = disk->queue;
1619 if (disk->flags & GENHD_FL_UP)
1620 del_gendisk(disk);
1621 if (q) {
1622 blk_cleanup_queue(q);
1623 drv->queue = NULL;
1628 --h->num_luns;
1629 /* zero out the disk size info */
1630 drv->nr_blocks = 0;
1631 drv->block_size = 0;
1632 drv->heads = 0;
1633 drv->sectors = 0;
1634 drv->cylinders = 0;
1635 drv->raid_level = -1; /* This can be used as a flag variable to
1636 * indicate that this element of the drive
1637 * array is free.
1640 if (clear_all) {
1641 /* check to see if it was the last disk */
1642 if (drv == h->drv + h->highest_lun) {
1643 /* if so, find the new hightest lun */
1644 int i, newhighest = -1;
1645 for (i = 0; i < h->highest_lun; i++) {
1646 /* if the disk has size > 0, it is available */
1647 if (h->drv[i].heads)
1648 newhighest = i;
1650 h->highest_lun = newhighest;
1653 drv->LunID = 0;
1655 return 0;
1658 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1659 1: address logical volume log_unit,
1660 2: periph device address is scsi3addr */
1661 unsigned int log_unit, __u8 page_code,
1662 unsigned char *scsi3addr, int cmd_type)
1664 ctlr_info_t *h = hba[ctlr];
1665 u64bit buff_dma_handle;
1666 int status = IO_OK;
1668 c->cmd_type = CMD_IOCTL_PEND;
1669 c->Header.ReplyQueue = 0;
1670 if (buff != NULL) {
1671 c->Header.SGList = 1;
1672 c->Header.SGTotal = 1;
1673 } else {
1674 c->Header.SGList = 0;
1675 c->Header.SGTotal = 0;
1677 c->Header.Tag.lower = c->busaddr;
1679 c->Request.Type.Type = cmd_type;
1680 if (cmd_type == TYPE_CMD) {
1681 switch (cmd) {
1682 case CISS_INQUIRY:
1683 /* If the logical unit number is 0 then, this is going
1684 to controller so It's a physical command
1685 mode = 0 target = 0. So we have nothing to write.
1686 otherwise, if use_unit_num == 1,
1687 mode = 1(volume set addressing) target = LUNID
1688 otherwise, if use_unit_num == 2,
1689 mode = 0(periph dev addr) target = scsi3addr */
1690 if (use_unit_num == 1) {
1691 c->Header.LUN.LogDev.VolId =
1692 h->drv[log_unit].LunID;
1693 c->Header.LUN.LogDev.Mode = 1;
1694 } else if (use_unit_num == 2) {
1695 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1697 c->Header.LUN.LogDev.Mode = 0;
1699 /* are we trying to read a vital product page */
1700 if (page_code != 0) {
1701 c->Request.CDB[1] = 0x01;
1702 c->Request.CDB[2] = page_code;
1704 c->Request.CDBLen = 6;
1705 c->Request.Type.Attribute = ATTR_SIMPLE;
1706 c->Request.Type.Direction = XFER_READ;
1707 c->Request.Timeout = 0;
1708 c->Request.CDB[0] = CISS_INQUIRY;
1709 c->Request.CDB[4] = size & 0xFF;
1710 break;
1711 case CISS_REPORT_LOG:
1712 case CISS_REPORT_PHYS:
1713 /* Talking to controller so It's a physical command
1714 mode = 00 target = 0. Nothing to write.
1716 c->Request.CDBLen = 12;
1717 c->Request.Type.Attribute = ATTR_SIMPLE;
1718 c->Request.Type.Direction = XFER_READ;
1719 c->Request.Timeout = 0;
1720 c->Request.CDB[0] = cmd;
1721 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1722 c->Request.CDB[7] = (size >> 16) & 0xFF;
1723 c->Request.CDB[8] = (size >> 8) & 0xFF;
1724 c->Request.CDB[9] = size & 0xFF;
1725 break;
1727 case CCISS_READ_CAPACITY:
1728 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1729 c->Header.LUN.LogDev.Mode = 1;
1730 c->Request.CDBLen = 10;
1731 c->Request.Type.Attribute = ATTR_SIMPLE;
1732 c->Request.Type.Direction = XFER_READ;
1733 c->Request.Timeout = 0;
1734 c->Request.CDB[0] = cmd;
1735 break;
1736 case CCISS_READ_CAPACITY_16:
1737 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1738 c->Header.LUN.LogDev.Mode = 1;
1739 c->Request.CDBLen = 16;
1740 c->Request.Type.Attribute = ATTR_SIMPLE;
1741 c->Request.Type.Direction = XFER_READ;
1742 c->Request.Timeout = 0;
1743 c->Request.CDB[0] = cmd;
1744 c->Request.CDB[1] = 0x10;
1745 c->Request.CDB[10] = (size >> 24) & 0xFF;
1746 c->Request.CDB[11] = (size >> 16) & 0xFF;
1747 c->Request.CDB[12] = (size >> 8) & 0xFF;
1748 c->Request.CDB[13] = size & 0xFF;
1749 c->Request.Timeout = 0;
1750 c->Request.CDB[0] = cmd;
1751 break;
1752 case CCISS_CACHE_FLUSH:
1753 c->Request.CDBLen = 12;
1754 c->Request.Type.Attribute = ATTR_SIMPLE;
1755 c->Request.Type.Direction = XFER_WRITE;
1756 c->Request.Timeout = 0;
1757 c->Request.CDB[0] = BMIC_WRITE;
1758 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1759 break;
1760 default:
1761 printk(KERN_WARNING
1762 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1763 return IO_ERROR;
1765 } else if (cmd_type == TYPE_MSG) {
1766 switch (cmd) {
1767 case 0: /* ABORT message */
1768 c->Request.CDBLen = 12;
1769 c->Request.Type.Attribute = ATTR_SIMPLE;
1770 c->Request.Type.Direction = XFER_WRITE;
1771 c->Request.Timeout = 0;
1772 c->Request.CDB[0] = cmd; /* abort */
1773 c->Request.CDB[1] = 0; /* abort a command */
1774 /* buff contains the tag of the command to abort */
1775 memcpy(&c->Request.CDB[4], buff, 8);
1776 break;
1777 case 1: /* RESET message */
1778 c->Request.CDBLen = 12;
1779 c->Request.Type.Attribute = ATTR_SIMPLE;
1780 c->Request.Type.Direction = XFER_WRITE;
1781 c->Request.Timeout = 0;
1782 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1783 c->Request.CDB[0] = cmd; /* reset */
1784 c->Request.CDB[1] = 0x04; /* reset a LUN */
1785 break;
1786 case 3: /* No-Op message */
1787 c->Request.CDBLen = 1;
1788 c->Request.Type.Attribute = ATTR_SIMPLE;
1789 c->Request.Type.Direction = XFER_WRITE;
1790 c->Request.Timeout = 0;
1791 c->Request.CDB[0] = cmd;
1792 break;
1793 default:
1794 printk(KERN_WARNING
1795 "cciss%d: unknown message type %d\n", ctlr, cmd);
1796 return IO_ERROR;
1798 } else {
1799 printk(KERN_WARNING
1800 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1801 return IO_ERROR;
1803 /* Fill in the scatter gather information */
1804 if (size > 0) {
1805 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1806 buff, size,
1807 PCI_DMA_BIDIRECTIONAL);
1808 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1809 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1810 c->SG[0].Len = size;
1811 c->SG[0].Ext = 0; /* we are not chaining */
1813 return status;
1816 static int sendcmd_withirq(__u8 cmd,
1817 int ctlr,
1818 void *buff,
1819 size_t size,
1820 unsigned int use_unit_num,
1821 unsigned int log_unit, __u8 page_code, int cmd_type)
1823 ctlr_info_t *h = hba[ctlr];
1824 CommandList_struct *c;
1825 u64bit buff_dma_handle;
1826 unsigned long flags;
1827 int return_status;
1828 DECLARE_COMPLETION_ONSTACK(wait);
1830 if ((c = cmd_alloc(h, 0)) == NULL)
1831 return -ENOMEM;
1832 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1833 log_unit, page_code, NULL, cmd_type);
1834 if (return_status != IO_OK) {
1835 cmd_free(h, c, 0);
1836 return return_status;
1838 resend_cmd2:
1839 c->waiting = &wait;
1841 /* Put the request on the tail of the queue and send it */
1842 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1843 addQ(&h->reqQ, c);
1844 h->Qdepth++;
1845 start_io(h);
1846 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1848 wait_for_completion(&wait);
1850 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1851 switch (c->err_info->CommandStatus) {
1852 case CMD_TARGET_STATUS:
1853 printk(KERN_WARNING "cciss: cmd %p has "
1854 " completed with errors\n", c);
1855 if (c->err_info->ScsiStatus) {
1856 printk(KERN_WARNING "cciss: cmd %p "
1857 "has SCSI Status = %x\n",
1858 c, c->err_info->ScsiStatus);
1861 break;
1862 case CMD_DATA_UNDERRUN:
1863 case CMD_DATA_OVERRUN:
1864 /* expected for inquire and report lun commands */
1865 break;
1866 case CMD_INVALID:
1867 printk(KERN_WARNING "cciss: Cmd %p is "
1868 "reported invalid\n", c);
1869 return_status = IO_ERROR;
1870 break;
1871 case CMD_PROTOCOL_ERR:
1872 printk(KERN_WARNING "cciss: cmd %p has "
1873 "protocol error \n", c);
1874 return_status = IO_ERROR;
1875 break;
1876 case CMD_HARDWARE_ERR:
1877 printk(KERN_WARNING "cciss: cmd %p had "
1878 " hardware error\n", c);
1879 return_status = IO_ERROR;
1880 break;
1881 case CMD_CONNECTION_LOST:
1882 printk(KERN_WARNING "cciss: cmd %p had "
1883 "connection lost\n", c);
1884 return_status = IO_ERROR;
1885 break;
1886 case CMD_ABORTED:
1887 printk(KERN_WARNING "cciss: cmd %p was "
1888 "aborted\n", c);
1889 return_status = IO_ERROR;
1890 break;
1891 case CMD_ABORT_FAILED:
1892 printk(KERN_WARNING "cciss: cmd %p reports "
1893 "abort failed\n", c);
1894 return_status = IO_ERROR;
1895 break;
1896 case CMD_UNSOLICITED_ABORT:
1897 printk(KERN_WARNING
1898 "cciss%d: unsolicited abort %p\n", ctlr, c);
1899 if (c->retry_count < MAX_CMD_RETRIES) {
1900 printk(KERN_WARNING
1901 "cciss%d: retrying %p\n", ctlr, c);
1902 c->retry_count++;
1903 /* erase the old error information */
1904 memset(c->err_info, 0,
1905 sizeof(ErrorInfo_struct));
1906 return_status = IO_OK;
1907 INIT_COMPLETION(wait);
1908 goto resend_cmd2;
1910 return_status = IO_ERROR;
1911 break;
1912 default:
1913 printk(KERN_WARNING "cciss: cmd %p returned "
1914 "unknown status %x\n", c,
1915 c->err_info->CommandStatus);
1916 return_status = IO_ERROR;
1919 /* unlock the buffers from DMA */
1920 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1921 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1922 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1923 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1924 cmd_free(h, c, 0);
1925 return return_status;
1928 static void cciss_geometry_inquiry(int ctlr, int logvol,
1929 int withirq, sector_t total_size,
1930 unsigned int block_size,
1931 InquiryData_struct *inq_buff,
1932 drive_info_struct *drv)
1934 int return_code;
1935 unsigned long t;
1937 memset(inq_buff, 0, sizeof(InquiryData_struct));
1938 if (withirq)
1939 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1940 inq_buff, sizeof(*inq_buff), 1,
1941 logvol, 0xC1, TYPE_CMD);
1942 else
1943 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1944 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1945 TYPE_CMD);
1946 if (return_code == IO_OK) {
1947 if (inq_buff->data_byte[8] == 0xFF) {
1948 printk(KERN_WARNING
1949 "cciss: reading geometry failed, volume "
1950 "does not support reading geometry\n");
1951 drv->heads = 255;
1952 drv->sectors = 32; // Sectors per track
1953 } else {
1954 drv->heads = inq_buff->data_byte[6];
1955 drv->sectors = inq_buff->data_byte[7];
1956 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1957 drv->cylinders += inq_buff->data_byte[5];
1958 drv->raid_level = inq_buff->data_byte[8];
1960 drv->block_size = block_size;
1961 drv->nr_blocks = total_size;
1962 t = drv->heads * drv->sectors;
1963 if (t > 1) {
1964 unsigned rem = sector_div(total_size, t);
1965 if (rem)
1966 total_size++;
1967 drv->cylinders = total_size;
1969 } else { /* Get geometry failed */
1970 printk(KERN_WARNING "cciss: reading geometry failed\n");
1972 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1973 drv->heads, drv->sectors, drv->cylinders);
1976 static void
1977 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1978 unsigned int *block_size)
1980 ReadCapdata_struct *buf;
1981 int return_code;
1982 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1983 if (buf == NULL) {
1984 printk(KERN_WARNING "cciss: out of memory\n");
1985 return;
1987 memset(buf, 0, sizeof(ReadCapdata_struct));
1988 if (withirq)
1989 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1990 ctlr, buf, sizeof(ReadCapdata_struct),
1991 1, logvol, 0, TYPE_CMD);
1992 else
1993 return_code = sendcmd(CCISS_READ_CAPACITY,
1994 ctlr, buf, sizeof(ReadCapdata_struct),
1995 1, logvol, 0, NULL, TYPE_CMD);
1996 if (return_code == IO_OK) {
1997 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
1998 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1999 } else { /* read capacity command failed */
2000 printk(KERN_WARNING "cciss: read capacity failed\n");
2001 *total_size = 0;
2002 *block_size = BLOCK_SIZE;
2004 if (*total_size != (__u32) 0)
2005 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2006 (unsigned long long)*total_size, *block_size);
2007 kfree(buf);
2008 return;
2011 static void
2012 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2014 ReadCapdata_struct_16 *buf;
2015 int return_code;
2016 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2017 if (buf == NULL) {
2018 printk(KERN_WARNING "cciss: out of memory\n");
2019 return;
2021 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2022 if (withirq) {
2023 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2024 ctlr, buf, sizeof(ReadCapdata_struct_16),
2025 1, logvol, 0, TYPE_CMD);
2027 else {
2028 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2029 ctlr, buf, sizeof(ReadCapdata_struct_16),
2030 1, logvol, 0, NULL, TYPE_CMD);
2032 if (return_code == IO_OK) {
2033 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
2034 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
2035 } else { /* read capacity command failed */
2036 printk(KERN_WARNING "cciss: read capacity failed\n");
2037 *total_size = 0;
2038 *block_size = BLOCK_SIZE;
2040 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2041 (unsigned long long)*total_size, *block_size);
2042 kfree(buf);
2043 return;
2046 static int cciss_revalidate(struct gendisk *disk)
2048 ctlr_info_t *h = get_host(disk);
2049 drive_info_struct *drv = get_drv(disk);
2050 int logvol;
2051 int FOUND = 0;
2052 unsigned int block_size;
2053 sector_t total_size;
2054 InquiryData_struct *inq_buff = NULL;
2056 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2057 if (h->drv[logvol].LunID == drv->LunID) {
2058 FOUND = 1;
2059 break;
2063 if (!FOUND)
2064 return 1;
2066 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2067 if (inq_buff == NULL) {
2068 printk(KERN_WARNING "cciss: out of memory\n");
2069 return 1;
2071 if (h->cciss_read == CCISS_READ_10) {
2072 cciss_read_capacity(h->ctlr, logvol, 1,
2073 &total_size, &block_size);
2074 } else {
2075 cciss_read_capacity_16(h->ctlr, logvol, 1,
2076 &total_size, &block_size);
2078 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2079 inq_buff, drv);
2081 blk_queue_hardsect_size(drv->queue, drv->block_size);
2082 set_capacity(disk, drv->nr_blocks);
2084 kfree(inq_buff);
2085 return 0;
2089 * Wait polling for a command to complete.
2090 * The memory mapped FIFO is polled for the completion.
2091 * Used only at init time, interrupts from the HBA are disabled.
2093 static unsigned long pollcomplete(int ctlr)
2095 unsigned long done;
2096 int i;
2098 /* Wait (up to 20 seconds) for a command to complete */
2100 for (i = 20 * HZ; i > 0; i--) {
2101 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2102 if (done == FIFO_EMPTY)
2103 schedule_timeout_uninterruptible(1);
2104 else
2105 return done;
2107 /* Invalid address to tell caller we ran out of time */
2108 return 1;
2111 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2113 /* We get in here if sendcmd() is polling for completions
2114 and gets some command back that it wasn't expecting --
2115 something other than that which it just sent down.
2116 Ordinarily, that shouldn't happen, but it can happen when
2117 the scsi tape stuff gets into error handling mode, and
2118 starts using sendcmd() to try to abort commands and
2119 reset tape drives. In that case, sendcmd may pick up
2120 completions of commands that were sent to logical drives
2121 through the block i/o system, or cciss ioctls completing, etc.
2122 In that case, we need to save those completions for later
2123 processing by the interrupt handler.
2126 #ifdef CONFIG_CISS_SCSI_TAPE
2127 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2129 /* If it's not the scsi tape stuff doing error handling, (abort */
2130 /* or reset) then we don't expect anything weird. */
2131 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2132 #endif
2133 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2134 "Invalid command list address returned! (%lx)\n",
2135 ctlr, complete);
2136 /* not much we can do. */
2137 #ifdef CONFIG_CISS_SCSI_TAPE
2138 return 1;
2141 /* We've sent down an abort or reset, but something else
2142 has completed */
2143 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2144 /* Uh oh. No room to save it for later... */
2145 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2146 "reject list overflow, command lost!\n", ctlr);
2147 return 1;
2149 /* Save it for later */
2150 srl->complete[srl->ncompletions] = complete;
2151 srl->ncompletions++;
2152 #endif
2153 return 0;
2157 * Send a command to the controller, and wait for it to complete.
2158 * Only used at init time.
2160 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2161 1: address logical volume log_unit,
2162 2: periph device address is scsi3addr */
2163 unsigned int log_unit,
2164 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2166 CommandList_struct *c;
2167 int i;
2168 unsigned long complete;
2169 ctlr_info_t *info_p = hba[ctlr];
2170 u64bit buff_dma_handle;
2171 int status, done = 0;
2173 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2174 printk(KERN_WARNING "cciss: unable to get memory");
2175 return IO_ERROR;
2177 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2178 log_unit, page_code, scsi3addr, cmd_type);
2179 if (status != IO_OK) {
2180 cmd_free(info_p, c, 1);
2181 return status;
2183 resend_cmd1:
2185 * Disable interrupt
2187 #ifdef CCISS_DEBUG
2188 printk(KERN_DEBUG "cciss: turning intr off\n");
2189 #endif /* CCISS_DEBUG */
2190 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2192 /* Make sure there is room in the command FIFO */
2193 /* Actually it should be completely empty at this time */
2194 /* unless we are in here doing error handling for the scsi */
2195 /* tape side of the driver. */
2196 for (i = 200000; i > 0; i--) {
2197 /* if fifo isn't full go */
2198 if (!(info_p->access.fifo_full(info_p))) {
2200 break;
2202 udelay(10);
2203 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2204 " waiting!\n", ctlr);
2207 * Send the cmd
2209 info_p->access.submit_command(info_p, c);
2210 done = 0;
2211 do {
2212 complete = pollcomplete(ctlr);
2214 #ifdef CCISS_DEBUG
2215 printk(KERN_DEBUG "cciss: command completed\n");
2216 #endif /* CCISS_DEBUG */
2218 if (complete == 1) {
2219 printk(KERN_WARNING
2220 "cciss cciss%d: SendCmd Timeout out, "
2221 "No command list address returned!\n", ctlr);
2222 status = IO_ERROR;
2223 done = 1;
2224 break;
2227 /* This will need to change for direct lookup completions */
2228 if ((complete & CISS_ERROR_BIT)
2229 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2230 /* if data overrun or underun on Report command
2231 ignore it
2233 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2234 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2235 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2236 ((c->err_info->CommandStatus ==
2237 CMD_DATA_OVERRUN) ||
2238 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2239 )) {
2240 complete = c->busaddr;
2241 } else {
2242 if (c->err_info->CommandStatus ==
2243 CMD_UNSOLICITED_ABORT) {
2244 printk(KERN_WARNING "cciss%d: "
2245 "unsolicited abort %p\n",
2246 ctlr, c);
2247 if (c->retry_count < MAX_CMD_RETRIES) {
2248 printk(KERN_WARNING
2249 "cciss%d: retrying %p\n",
2250 ctlr, c);
2251 c->retry_count++;
2252 /* erase the old error */
2253 /* information */
2254 memset(c->err_info, 0,
2255 sizeof
2256 (ErrorInfo_struct));
2257 goto resend_cmd1;
2258 } else {
2259 printk(KERN_WARNING
2260 "cciss%d: retried %p too "
2261 "many times\n", ctlr, c);
2262 status = IO_ERROR;
2263 goto cleanup1;
2265 } else if (c->err_info->CommandStatus ==
2266 CMD_UNABORTABLE) {
2267 printk(KERN_WARNING
2268 "cciss%d: command could not be aborted.\n",
2269 ctlr);
2270 status = IO_ERROR;
2271 goto cleanup1;
2273 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2274 " Error %x \n", ctlr,
2275 c->err_info->CommandStatus);
2276 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2277 " offensive info\n"
2278 " size %x\n num %x value %x\n",
2279 ctlr,
2280 c->err_info->MoreErrInfo.Invalid_Cmd.
2281 offense_size,
2282 c->err_info->MoreErrInfo.Invalid_Cmd.
2283 offense_num,
2284 c->err_info->MoreErrInfo.Invalid_Cmd.
2285 offense_value);
2286 status = IO_ERROR;
2287 goto cleanup1;
2290 /* This will need changing for direct lookup completions */
2291 if (complete != c->busaddr) {
2292 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2293 BUG(); /* we are pretty much hosed if we get here. */
2295 continue;
2296 } else
2297 done = 1;
2298 } while (!done);
2300 cleanup1:
2301 /* unlock the data buffer from DMA */
2302 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2303 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2304 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2305 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2306 #ifdef CONFIG_CISS_SCSI_TAPE
2307 /* if we saved some commands for later, process them now. */
2308 if (info_p->scsi_rejects.ncompletions > 0)
2309 do_cciss_intr(0, info_p);
2310 #endif
2311 cmd_free(info_p, c, 1);
2312 return status;
2316 * Map (physical) PCI mem into (virtual) kernel space
2318 static void __iomem *remap_pci_mem(ulong base, ulong size)
2320 ulong page_base = ((ulong) base) & PAGE_MASK;
2321 ulong page_offs = ((ulong) base) - page_base;
2322 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2324 return page_remapped ? (page_remapped + page_offs) : NULL;
2328 * Takes jobs of the Q and sends them to the hardware, then puts it on
2329 * the Q to wait for completion.
2331 static void start_io(ctlr_info_t *h)
2333 CommandList_struct *c;
2335 while ((c = h->reqQ) != NULL) {
2336 /* can't do anything if fifo is full */
2337 if ((h->access.fifo_full(h))) {
2338 printk(KERN_WARNING "cciss: fifo full\n");
2339 break;
2342 /* Get the first entry from the Request Q */
2343 removeQ(&(h->reqQ), c);
2344 h->Qdepth--;
2346 /* Tell the controller execute command */
2347 h->access.submit_command(h, c);
2349 /* Put job onto the completed Q */
2350 addQ(&(h->cmpQ), c);
2354 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2355 /* Zeros out the error record and then resends the command back */
2356 /* to the controller */
2357 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2359 /* erase the old error information */
2360 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2362 /* add it to software queue and then send it to the controller */
2363 addQ(&(h->reqQ), c);
2364 h->Qdepth++;
2365 if (h->Qdepth > h->maxQsinceinit)
2366 h->maxQsinceinit = h->Qdepth;
2368 start_io(h);
2371 /* checks the status of the job and calls complete buffers to mark all
2372 * buffers for the completed job. Note that this function does not need
2373 * to hold the hba/queue lock.
2375 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2376 int timeout)
2378 int status = 1;
2379 int retry_cmd = 0;
2381 if (timeout)
2382 status = 0;
2384 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2385 switch (cmd->err_info->CommandStatus) {
2386 unsigned char sense_key;
2387 case CMD_TARGET_STATUS:
2388 status = 0;
2390 if (cmd->err_info->ScsiStatus == 0x02) {
2391 printk(KERN_WARNING "cciss: cmd %p "
2392 "has CHECK CONDITION "
2393 " byte 2 = 0x%x\n", cmd,
2394 cmd->err_info->SenseInfo[2]
2396 /* check the sense key */
2397 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2398 /* no status or recovered error */
2399 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2400 status = 1;
2402 } else {
2403 printk(KERN_WARNING "cciss: cmd %p "
2404 "has SCSI Status 0x%x\n",
2405 cmd, cmd->err_info->ScsiStatus);
2407 break;
2408 case CMD_DATA_UNDERRUN:
2409 printk(KERN_WARNING "cciss: cmd %p has"
2410 " completed with data underrun "
2411 "reported\n", cmd);
2412 break;
2413 case CMD_DATA_OVERRUN:
2414 printk(KERN_WARNING "cciss: cmd %p has"
2415 " completed with data overrun "
2416 "reported\n", cmd);
2417 break;
2418 case CMD_INVALID:
2419 printk(KERN_WARNING "cciss: cmd %p is "
2420 "reported invalid\n", cmd);
2421 status = 0;
2422 break;
2423 case CMD_PROTOCOL_ERR:
2424 printk(KERN_WARNING "cciss: cmd %p has "
2425 "protocol error \n", cmd);
2426 status = 0;
2427 break;
2428 case CMD_HARDWARE_ERR:
2429 printk(KERN_WARNING "cciss: cmd %p had "
2430 " hardware error\n", cmd);
2431 status = 0;
2432 break;
2433 case CMD_CONNECTION_LOST:
2434 printk(KERN_WARNING "cciss: cmd %p had "
2435 "connection lost\n", cmd);
2436 status = 0;
2437 break;
2438 case CMD_ABORTED:
2439 printk(KERN_WARNING "cciss: cmd %p was "
2440 "aborted\n", cmd);
2441 status = 0;
2442 break;
2443 case CMD_ABORT_FAILED:
2444 printk(KERN_WARNING "cciss: cmd %p reports "
2445 "abort failed\n", cmd);
2446 status = 0;
2447 break;
2448 case CMD_UNSOLICITED_ABORT:
2449 printk(KERN_WARNING "cciss%d: unsolicited "
2450 "abort %p\n", h->ctlr, cmd);
2451 if (cmd->retry_count < MAX_CMD_RETRIES) {
2452 retry_cmd = 1;
2453 printk(KERN_WARNING
2454 "cciss%d: retrying %p\n", h->ctlr, cmd);
2455 cmd->retry_count++;
2456 } else
2457 printk(KERN_WARNING
2458 "cciss%d: %p retried too "
2459 "many times\n", h->ctlr, cmd);
2460 status = 0;
2461 break;
2462 case CMD_TIMEOUT:
2463 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2464 status = 0;
2465 break;
2466 default:
2467 printk(KERN_WARNING "cciss: cmd %p returned "
2468 "unknown status %x\n", cmd,
2469 cmd->err_info->CommandStatus);
2470 status = 0;
2473 /* We need to return this command */
2474 if (retry_cmd) {
2475 resend_cciss_cmd(h, cmd);
2476 return;
2479 cmd->rq->completion_data = cmd;
2480 cmd->rq->errors = status;
2481 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2482 blk_complete_request(cmd->rq);
2486 * Get a request and submit it to the controller.
2488 static void do_cciss_request(request_queue_t *q)
2490 ctlr_info_t *h = q->queuedata;
2491 CommandList_struct *c;
2492 sector_t start_blk;
2493 int seg;
2494 struct request *creq;
2495 u64bit temp64;
2496 struct scatterlist tmp_sg[MAXSGENTRIES];
2497 drive_info_struct *drv;
2498 int i, dir;
2500 /* We call start_io here in case there is a command waiting on the
2501 * queue that has not been sent.
2503 if (blk_queue_plugged(q))
2504 goto startio;
2506 queue:
2507 creq = elv_next_request(q);
2508 if (!creq)
2509 goto startio;
2511 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2513 if ((c = cmd_alloc(h, 1)) == NULL)
2514 goto full;
2516 blkdev_dequeue_request(creq);
2518 spin_unlock_irq(q->queue_lock);
2520 c->cmd_type = CMD_RWREQ;
2521 c->rq = creq;
2523 /* fill in the request */
2524 drv = creq->rq_disk->private_data;
2525 c->Header.ReplyQueue = 0; // unused in simple mode
2526 /* got command from pool, so use the command block index instead */
2527 /* for direct lookups. */
2528 /* The first 2 bits are reserved for controller error reporting. */
2529 c->Header.Tag.lower = (c->cmdindex << 3);
2530 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2531 c->Header.LUN.LogDev.VolId = drv->LunID;
2532 c->Header.LUN.LogDev.Mode = 1;
2533 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2534 c->Request.Type.Type = TYPE_CMD; // It is a command.
2535 c->Request.Type.Attribute = ATTR_SIMPLE;
2536 c->Request.Type.Direction =
2537 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2538 c->Request.Timeout = 0; // Don't time out
2539 c->Request.CDB[0] =
2540 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2541 start_blk = creq->sector;
2542 #ifdef CCISS_DEBUG
2543 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2544 (int)creq->nr_sectors);
2545 #endif /* CCISS_DEBUG */
2547 seg = blk_rq_map_sg(q, creq, tmp_sg);
2549 /* get the DMA records for the setup */
2550 if (c->Request.Type.Direction == XFER_READ)
2551 dir = PCI_DMA_FROMDEVICE;
2552 else
2553 dir = PCI_DMA_TODEVICE;
2555 for (i = 0; i < seg; i++) {
2556 c->SG[i].Len = tmp_sg[i].length;
2557 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2558 tmp_sg[i].offset,
2559 tmp_sg[i].length, dir);
2560 c->SG[i].Addr.lower = temp64.val32.lower;
2561 c->SG[i].Addr.upper = temp64.val32.upper;
2562 c->SG[i].Ext = 0; // we are not chaining
2564 /* track how many SG entries we are using */
2565 if (seg > h->maxSG)
2566 h->maxSG = seg;
2568 #ifdef CCISS_DEBUG
2569 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2570 creq->nr_sectors, seg);
2571 #endif /* CCISS_DEBUG */
2573 c->Header.SGList = c->Header.SGTotal = seg;
2574 if(h->cciss_read == CCISS_READ_10) {
2575 c->Request.CDB[1] = 0;
2576 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2577 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2578 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2579 c->Request.CDB[5] = start_blk & 0xff;
2580 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2581 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2582 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2583 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2584 } else {
2585 c->Request.CDBLen = 16;
2586 c->Request.CDB[1]= 0;
2587 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2588 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2589 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2590 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2591 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2592 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2593 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2594 c->Request.CDB[9]= start_blk & 0xff;
2595 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2596 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2597 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2598 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2599 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2602 spin_lock_irq(q->queue_lock);
2604 addQ(&(h->reqQ), c);
2605 h->Qdepth++;
2606 if (h->Qdepth > h->maxQsinceinit)
2607 h->maxQsinceinit = h->Qdepth;
2609 goto queue;
2610 full:
2611 blk_stop_queue(q);
2612 startio:
2613 /* We will already have the driver lock here so not need
2614 * to lock it.
2616 start_io(h);
2619 static inline unsigned long get_next_completion(ctlr_info_t *h)
2621 #ifdef CONFIG_CISS_SCSI_TAPE
2622 /* Any rejects from sendcmd() lying around? Process them first */
2623 if (h->scsi_rejects.ncompletions == 0)
2624 return h->access.command_completed(h);
2625 else {
2626 struct sendcmd_reject_list *srl;
2627 int n;
2628 srl = &h->scsi_rejects;
2629 n = --srl->ncompletions;
2630 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2631 printk("p");
2632 return srl->complete[n];
2634 #else
2635 return h->access.command_completed(h);
2636 #endif
2639 static inline int interrupt_pending(ctlr_info_t *h)
2641 #ifdef CONFIG_CISS_SCSI_TAPE
2642 return (h->access.intr_pending(h)
2643 || (h->scsi_rejects.ncompletions > 0));
2644 #else
2645 return h->access.intr_pending(h);
2646 #endif
2649 static inline long interrupt_not_for_us(ctlr_info_t *h)
2651 #ifdef CONFIG_CISS_SCSI_TAPE
2652 return (((h->access.intr_pending(h) == 0) ||
2653 (h->interrupts_enabled == 0))
2654 && (h->scsi_rejects.ncompletions == 0));
2655 #else
2656 return (((h->access.intr_pending(h) == 0) ||
2657 (h->interrupts_enabled == 0)));
2658 #endif
2661 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2663 ctlr_info_t *h = dev_id;
2664 CommandList_struct *c;
2665 unsigned long flags;
2666 __u32 a, a1, a2;
2668 if (interrupt_not_for_us(h))
2669 return IRQ_NONE;
2671 * If there are completed commands in the completion queue,
2672 * we had better do something about it.
2674 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2675 while (interrupt_pending(h)) {
2676 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2677 a1 = a;
2678 if ((a & 0x04)) {
2679 a2 = (a >> 3);
2680 if (a2 >= h->nr_cmds) {
2681 printk(KERN_WARNING
2682 "cciss: controller cciss%d failed, stopping.\n",
2683 h->ctlr);
2684 fail_all_cmds(h->ctlr);
2685 return IRQ_HANDLED;
2688 c = h->cmd_pool + a2;
2689 a = c->busaddr;
2691 } else {
2692 a &= ~3;
2693 if ((c = h->cmpQ) == NULL) {
2694 printk(KERN_WARNING
2695 "cciss: Completion of %08x ignored\n",
2696 a1);
2697 continue;
2699 while (c->busaddr != a) {
2700 c = c->next;
2701 if (c == h->cmpQ)
2702 break;
2706 * If we've found the command, take it off the
2707 * completion Q and free it
2709 if (c->busaddr == a) {
2710 removeQ(&h->cmpQ, c);
2711 if (c->cmd_type == CMD_RWREQ) {
2712 complete_command(h, c, 0);
2713 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2714 complete(c->waiting);
2716 # ifdef CONFIG_CISS_SCSI_TAPE
2717 else if (c->cmd_type == CMD_SCSI)
2718 complete_scsi_command(c, 0, a1);
2719 # endif
2720 continue;
2725 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2726 return IRQ_HANDLED;
2730 * We cannot read the structure directly, for portability we must use
2731 * the io functions.
2732 * This is for debug only.
2734 #ifdef CCISS_DEBUG
2735 static void print_cfg_table(CfgTable_struct *tb)
2737 int i;
2738 char temp_name[17];
2740 printk("Controller Configuration information\n");
2741 printk("------------------------------------\n");
2742 for (i = 0; i < 4; i++)
2743 temp_name[i] = readb(&(tb->Signature[i]));
2744 temp_name[4] = '\0';
2745 printk(" Signature = %s\n", temp_name);
2746 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2747 printk(" Transport methods supported = 0x%x\n",
2748 readl(&(tb->TransportSupport)));
2749 printk(" Transport methods active = 0x%x\n",
2750 readl(&(tb->TransportActive)));
2751 printk(" Requested transport Method = 0x%x\n",
2752 readl(&(tb->HostWrite.TransportRequest)));
2753 printk(" Coalesce Interrupt Delay = 0x%x\n",
2754 readl(&(tb->HostWrite.CoalIntDelay)));
2755 printk(" Coalesce Interrupt Count = 0x%x\n",
2756 readl(&(tb->HostWrite.CoalIntCount)));
2757 printk(" Max outstanding commands = 0x%d\n",
2758 readl(&(tb->CmdsOutMax)));
2759 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2760 for (i = 0; i < 16; i++)
2761 temp_name[i] = readb(&(tb->ServerName[i]));
2762 temp_name[16] = '\0';
2763 printk(" Server Name = %s\n", temp_name);
2764 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2766 #endif /* CCISS_DEBUG */
2768 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2770 int i, offset, mem_type, bar_type;
2771 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2772 return 0;
2773 offset = 0;
2774 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2775 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2776 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2777 offset += 4;
2778 else {
2779 mem_type = pci_resource_flags(pdev, i) &
2780 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2781 switch (mem_type) {
2782 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2783 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2784 offset += 4; /* 32 bit */
2785 break;
2786 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2787 offset += 8;
2788 break;
2789 default: /* reserved in PCI 2.2 */
2790 printk(KERN_WARNING
2791 "Base address is invalid\n");
2792 return -1;
2793 break;
2796 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2797 return i + 1;
2799 return -1;
2802 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2803 * controllers that are capable. If not, we use IO-APIC mode.
2806 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2807 struct pci_dev *pdev, __u32 board_id)
2809 #ifdef CONFIG_PCI_MSI
2810 int err;
2811 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2812 {0, 2}, {0, 3}
2815 /* Some boards advertise MSI but don't really support it */
2816 if ((board_id == 0x40700E11) ||
2817 (board_id == 0x40800E11) ||
2818 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2819 goto default_int_mode;
2821 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2822 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2823 if (!err) {
2824 c->intr[0] = cciss_msix_entries[0].vector;
2825 c->intr[1] = cciss_msix_entries[1].vector;
2826 c->intr[2] = cciss_msix_entries[2].vector;
2827 c->intr[3] = cciss_msix_entries[3].vector;
2828 c->msix_vector = 1;
2829 return;
2831 if (err > 0) {
2832 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2833 "available\n", err);
2834 } else {
2835 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2836 err);
2839 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2840 if (!pci_enable_msi(pdev)) {
2841 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2842 c->msi_vector = 1;
2843 return;
2844 } else {
2845 printk(KERN_WARNING "cciss: MSI init failed\n");
2846 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2847 return;
2850 default_int_mode:
2851 #endif /* CONFIG_PCI_MSI */
2852 /* if we get here we're going to use the default interrupt mode */
2853 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2854 return;
2857 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2859 ushort subsystem_vendor_id, subsystem_device_id, command;
2860 __u32 board_id, scratchpad = 0;
2861 __u64 cfg_offset;
2862 __u32 cfg_base_addr;
2863 __u64 cfg_base_addr_index;
2864 int i, err;
2866 /* check to see if controller has been disabled */
2867 /* BEFORE trying to enable it */
2868 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2869 if (!(command & 0x02)) {
2870 printk(KERN_WARNING
2871 "cciss: controller appears to be disabled\n");
2872 return -ENODEV;
2875 err = pci_enable_device(pdev);
2876 if (err) {
2877 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2878 return err;
2881 err = pci_request_regions(pdev, "cciss");
2882 if (err) {
2883 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2884 "aborting\n");
2885 goto err_out_disable_pdev;
2888 subsystem_vendor_id = pdev->subsystem_vendor;
2889 subsystem_device_id = pdev->subsystem_device;
2890 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2891 subsystem_vendor_id);
2893 #ifdef CCISS_DEBUG
2894 printk("command = %x\n", command);
2895 printk("irq = %x\n", pdev->irq);
2896 printk("board_id = %x\n", board_id);
2897 #endif /* CCISS_DEBUG */
2899 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2900 * else we use the IO-APIC interrupt assigned to us by system ROM.
2902 cciss_interrupt_mode(c, pdev, board_id);
2905 * Memory base addr is first addr , the second points to the config
2906 * table
2909 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2910 #ifdef CCISS_DEBUG
2911 printk("address 0 = %x\n", c->paddr);
2912 #endif /* CCISS_DEBUG */
2913 c->vaddr = remap_pci_mem(c->paddr, 200);
2915 /* Wait for the board to become ready. (PCI hotplug needs this.)
2916 * We poll for up to 120 secs, once per 100ms. */
2917 for (i = 0; i < 1200; i++) {
2918 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2919 if (scratchpad == CCISS_FIRMWARE_READY)
2920 break;
2921 set_current_state(TASK_INTERRUPTIBLE);
2922 schedule_timeout(HZ / 10); /* wait 100ms */
2924 if (scratchpad != CCISS_FIRMWARE_READY) {
2925 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2926 err = -ENODEV;
2927 goto err_out_free_res;
2930 /* get the address index number */
2931 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2932 cfg_base_addr &= (__u32) 0x0000ffff;
2933 #ifdef CCISS_DEBUG
2934 printk("cfg base address = %x\n", cfg_base_addr);
2935 #endif /* CCISS_DEBUG */
2936 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2937 #ifdef CCISS_DEBUG
2938 printk("cfg base address index = %x\n", cfg_base_addr_index);
2939 #endif /* CCISS_DEBUG */
2940 if (cfg_base_addr_index == -1) {
2941 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2942 err = -ENODEV;
2943 goto err_out_free_res;
2946 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2947 #ifdef CCISS_DEBUG
2948 printk("cfg offset = %x\n", cfg_offset);
2949 #endif /* CCISS_DEBUG */
2950 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2951 cfg_base_addr_index) +
2952 cfg_offset, sizeof(CfgTable_struct));
2953 c->board_id = board_id;
2955 #ifdef CCISS_DEBUG
2956 print_cfg_table(c->cfgtable);
2957 #endif /* CCISS_DEBUG */
2959 for (i = 0; i < ARRAY_SIZE(products); i++) {
2960 if (board_id == products[i].board_id) {
2961 c->product_name = products[i].product_name;
2962 c->access = *(products[i].access);
2963 c->nr_cmds = products[i].nr_cmds;
2964 break;
2967 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2968 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2969 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2970 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2971 printk("Does not appear to be a valid CISS config table\n");
2972 err = -ENODEV;
2973 goto err_out_free_res;
2975 /* We didn't find the controller in our list. We know the
2976 * signature is valid. If it's an HP device let's try to
2977 * bind to the device and fire it up. Otherwise we bail.
2979 if (i == ARRAY_SIZE(products)) {
2980 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
2981 c->product_name = products[i-1].product_name;
2982 c->access = *(products[i-1].access);
2983 c->nr_cmds = products[i-1].nr_cmds;
2984 printk(KERN_WARNING "cciss: This is an unknown "
2985 "Smart Array controller.\n"
2986 "cciss: Please update to the latest driver "
2987 "available from www.hp.com.\n");
2988 } else {
2989 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2990 " to access the Smart Array controller %08lx\n"
2991 , (unsigned long)board_id);
2992 err = -ENODEV;
2993 goto err_out_free_res;
2996 #ifdef CONFIG_X86
2998 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2999 __u32 prefetch;
3000 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3001 prefetch |= 0x100;
3002 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3004 #endif
3006 #ifdef CCISS_DEBUG
3007 printk("Trying to put board into Simple mode\n");
3008 #endif /* CCISS_DEBUG */
3009 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3010 /* Update the field, and then ring the doorbell */
3011 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3012 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3014 /* under certain very rare conditions, this can take awhile.
3015 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3016 * as we enter this code.) */
3017 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3018 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3019 break;
3020 /* delay and try again */
3021 set_current_state(TASK_INTERRUPTIBLE);
3022 schedule_timeout(10);
3025 #ifdef CCISS_DEBUG
3026 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3027 readl(c->vaddr + SA5_DOORBELL));
3028 #endif /* CCISS_DEBUG */
3029 #ifdef CCISS_DEBUG
3030 print_cfg_table(c->cfgtable);
3031 #endif /* CCISS_DEBUG */
3033 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3034 printk(KERN_WARNING "cciss: unable to get board into"
3035 " simple mode\n");
3036 err = -ENODEV;
3037 goto err_out_free_res;
3039 return 0;
3041 err_out_free_res:
3042 pci_release_regions(pdev);
3044 err_out_disable_pdev:
3045 pci_disable_device(pdev);
3046 return err;
3050 * Gets information about the local volumes attached to the controller.
3052 static void cciss_getgeometry(int cntl_num)
3054 ReportLunData_struct *ld_buff;
3055 InquiryData_struct *inq_buff;
3056 int return_code;
3057 int i;
3058 int listlength = 0;
3059 __u32 lunid = 0;
3060 int block_size;
3061 sector_t total_size;
3063 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3064 if (ld_buff == NULL) {
3065 printk(KERN_ERR "cciss: out of memory\n");
3066 return;
3068 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3069 if (inq_buff == NULL) {
3070 printk(KERN_ERR "cciss: out of memory\n");
3071 kfree(ld_buff);
3072 return;
3074 /* Get the firmware version */
3075 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3076 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3077 TYPE_CMD);
3078 if (return_code == IO_OK) {
3079 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3080 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3081 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3082 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3083 } else { /* send command failed */
3085 printk(KERN_WARNING "cciss: unable to determine firmware"
3086 " version of controller\n");
3088 /* Get the number of logical volumes */
3089 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3090 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3091 TYPE_CMD);
3093 if (return_code == IO_OK) {
3094 #ifdef CCISS_DEBUG
3095 printk("LUN Data\n--------------------------\n");
3096 #endif /* CCISS_DEBUG */
3098 listlength |=
3099 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3100 listlength |=
3101 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3102 listlength |=
3103 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3104 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3105 } else { /* reading number of logical volumes failed */
3107 printk(KERN_WARNING "cciss: report logical volume"
3108 " command failed\n");
3109 listlength = 0;
3111 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3112 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3113 printk(KERN_ERR
3114 "ciss: only %d number of logical volumes supported\n",
3115 CISS_MAX_LUN);
3116 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3118 #ifdef CCISS_DEBUG
3119 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3120 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3121 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3122 hba[cntl_num]->num_luns);
3123 #endif /* CCISS_DEBUG */
3125 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3126 for (i = 0; i < CISS_MAX_LUN; i++) {
3127 if (i < hba[cntl_num]->num_luns) {
3128 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3129 << 24;
3130 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3131 << 16;
3132 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3133 << 8;
3134 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3136 hba[cntl_num]->drv[i].LunID = lunid;
3138 #ifdef CCISS_DEBUG
3139 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3140 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3141 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3142 hba[cntl_num]->drv[i].LunID);
3143 #endif /* CCISS_DEBUG */
3145 /* testing to see if 16-byte CDBs are already being used */
3146 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3147 cciss_read_capacity_16(cntl_num, i, 0,
3148 &total_size, &block_size);
3149 goto geo_inq;
3151 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3153 /* total_size = last LBA + 1 */
3154 if(total_size == (__u32) 0) {
3155 cciss_read_capacity_16(cntl_num, i, 0,
3156 &total_size, &block_size);
3157 hba[cntl_num]->cciss_read = CCISS_READ_16;
3158 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3159 } else {
3160 hba[cntl_num]->cciss_read = CCISS_READ_10;
3161 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3163 geo_inq:
3164 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3165 block_size, inq_buff,
3166 &hba[cntl_num]->drv[i]);
3167 } else {
3168 /* initialize raid_level to indicate a free space */
3169 hba[cntl_num]->drv[i].raid_level = -1;
3172 kfree(ld_buff);
3173 kfree(inq_buff);
3176 /* Function to find the first free pointer into our hba[] array */
3177 /* Returns -1 if no free entries are left. */
3178 static int alloc_cciss_hba(void)
3180 struct gendisk *disk[NWD];
3181 int i, n;
3182 for (n = 0; n < NWD; n++) {
3183 disk[n] = alloc_disk(1 << NWD_SHIFT);
3184 if (!disk[n])
3185 goto out;
3188 for (i = 0; i < MAX_CTLR; i++) {
3189 if (!hba[i]) {
3190 ctlr_info_t *p;
3191 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3192 if (!p)
3193 goto Enomem;
3194 for (n = 0; n < NWD; n++)
3195 p->gendisk[n] = disk[n];
3196 hba[i] = p;
3197 return i;
3200 printk(KERN_WARNING "cciss: This driver supports a maximum"
3201 " of %d controllers.\n", MAX_CTLR);
3202 goto out;
3203 Enomem:
3204 printk(KERN_ERR "cciss: out of memory.\n");
3205 out:
3206 while (n--)
3207 put_disk(disk[n]);
3208 return -1;
3211 static void free_hba(int i)
3213 ctlr_info_t *p = hba[i];
3214 int n;
3216 hba[i] = NULL;
3217 for (n = 0; n < NWD; n++)
3218 put_disk(p->gendisk[n]);
3219 kfree(p);
3223 * This is it. Find all the controllers and register them. I really hate
3224 * stealing all these major device numbers.
3225 * returns the number of block devices registered.
3227 static int __devinit cciss_init_one(struct pci_dev *pdev,
3228 const struct pci_device_id *ent)
3230 request_queue_t *q;
3231 int i;
3232 int j;
3233 int rc;
3234 int dac;
3236 i = alloc_cciss_hba();
3237 if (i < 0)
3238 return -1;
3240 hba[i]->busy_initializing = 1;
3242 if (cciss_pci_init(hba[i], pdev) != 0)
3243 goto clean1;
3245 sprintf(hba[i]->devname, "cciss%d", i);
3246 hba[i]->ctlr = i;
3247 hba[i]->pdev = pdev;
3249 /* configure PCI DMA stuff */
3250 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3251 dac = 1;
3252 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3253 dac = 0;
3254 else {
3255 printk(KERN_ERR "cciss: no suitable DMA available\n");
3256 goto clean1;
3260 * register with the major number, or get a dynamic major number
3261 * by passing 0 as argument. This is done for greater than
3262 * 8 controller support.
3264 if (i < MAX_CTLR_ORIG)
3265 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3266 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3267 if (rc == -EBUSY || rc == -EINVAL) {
3268 printk(KERN_ERR
3269 "cciss: Unable to get major number %d for %s "
3270 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3271 goto clean1;
3272 } else {
3273 if (i >= MAX_CTLR_ORIG)
3274 hba[i]->major = rc;
3277 /* make sure the board interrupts are off */
3278 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3279 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3280 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3281 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3282 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3283 goto clean2;
3286 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3287 hba[i]->devname, pdev->device, pci_name(pdev),
3288 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3290 hba[i]->cmd_pool_bits =
3291 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3292 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3293 hba[i]->cmd_pool = (CommandList_struct *)
3294 pci_alloc_consistent(hba[i]->pdev,
3295 hba[i]->nr_cmds * sizeof(CommandList_struct),
3296 &(hba[i]->cmd_pool_dhandle));
3297 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3298 pci_alloc_consistent(hba[i]->pdev,
3299 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3300 &(hba[i]->errinfo_pool_dhandle));
3301 if ((hba[i]->cmd_pool_bits == NULL)
3302 || (hba[i]->cmd_pool == NULL)
3303 || (hba[i]->errinfo_pool == NULL)) {
3304 printk(KERN_ERR "cciss: out of memory");
3305 goto clean4;
3307 #ifdef CONFIG_CISS_SCSI_TAPE
3308 hba[i]->scsi_rejects.complete =
3309 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3310 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3311 if (hba[i]->scsi_rejects.complete == NULL) {
3312 printk(KERN_ERR "cciss: out of memory");
3313 goto clean4;
3315 #endif
3316 spin_lock_init(&hba[i]->lock);
3318 /* Initialize the pdev driver private data.
3319 have it point to hba[i]. */
3320 pci_set_drvdata(pdev, hba[i]);
3321 /* command and error info recs zeroed out before
3322 they are used */
3323 memset(hba[i]->cmd_pool_bits, 0,
3324 ((hba[i]->nr_cmds + BITS_PER_LONG -
3325 1) / BITS_PER_LONG) * sizeof(unsigned long));
3327 #ifdef CCISS_DEBUG
3328 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3329 #endif /* CCISS_DEBUG */
3331 cciss_getgeometry(i);
3333 cciss_scsi_setup(i);
3335 /* Turn the interrupts on so we can service requests */
3336 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3338 cciss_procinit(i);
3339 hba[i]->busy_initializing = 0;
3341 for (j = 0; j < NWD; j++) { /* mfm */
3342 drive_info_struct *drv = &(hba[i]->drv[j]);
3343 struct gendisk *disk = hba[i]->gendisk[j];
3345 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3346 if (!q) {
3347 printk(KERN_ERR
3348 "cciss: unable to allocate queue for disk %d\n",
3350 break;
3352 drv->queue = q;
3354 q->backing_dev_info.ra_pages = READ_AHEAD;
3355 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3357 /* This is a hardware imposed limit. */
3358 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3360 /* This is a limit in the driver and could be eliminated. */
3361 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3363 blk_queue_max_sectors(q, 512);
3365 blk_queue_softirq_done(q, cciss_softirq_done);
3367 q->queuedata = hba[i];
3368 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3369 disk->major = hba[i]->major;
3370 disk->first_minor = j << NWD_SHIFT;
3371 disk->fops = &cciss_fops;
3372 disk->queue = q;
3373 disk->private_data = drv;
3374 disk->driverfs_dev = &pdev->dev;
3375 /* we must register the controller even if no disks exist */
3376 /* this is for the online array utilities */
3377 if (!drv->heads && j)
3378 continue;
3379 blk_queue_hardsect_size(q, drv->block_size);
3380 set_capacity(disk, drv->nr_blocks);
3381 add_disk(disk);
3384 return 1;
3386 clean4:
3387 #ifdef CONFIG_CISS_SCSI_TAPE
3388 kfree(hba[i]->scsi_rejects.complete);
3389 #endif
3390 kfree(hba[i]->cmd_pool_bits);
3391 if (hba[i]->cmd_pool)
3392 pci_free_consistent(hba[i]->pdev,
3393 hba[i]->nr_cmds * sizeof(CommandList_struct),
3394 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3395 if (hba[i]->errinfo_pool)
3396 pci_free_consistent(hba[i]->pdev,
3397 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3398 hba[i]->errinfo_pool,
3399 hba[i]->errinfo_pool_dhandle);
3400 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3401 clean2:
3402 unregister_blkdev(hba[i]->major, hba[i]->devname);
3403 clean1:
3404 hba[i]->busy_initializing = 0;
3405 free_hba(i);
3406 return -1;
3409 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3411 ctlr_info_t *tmp_ptr;
3412 int i, j;
3413 char flush_buf[4];
3414 int return_code;
3416 if (pci_get_drvdata(pdev) == NULL) {
3417 printk(KERN_ERR "cciss: Unable to remove device \n");
3418 return;
3420 tmp_ptr = pci_get_drvdata(pdev);
3421 i = tmp_ptr->ctlr;
3422 if (hba[i] == NULL) {
3423 printk(KERN_ERR "cciss: device appears to "
3424 "already be removed \n");
3425 return;
3427 /* Turn board interrupts off and send the flush cache command */
3428 /* sendcmd will turn off interrupt, and send the flush...
3429 * To write all data in the battery backed cache to disks */
3430 memset(flush_buf, 0, 4);
3431 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3432 TYPE_CMD);
3433 if (return_code != IO_OK) {
3434 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3437 free_irq(hba[i]->intr[2], hba[i]);
3439 #ifdef CONFIG_PCI_MSI
3440 if (hba[i]->msix_vector)
3441 pci_disable_msix(hba[i]->pdev);
3442 else if (hba[i]->msi_vector)
3443 pci_disable_msi(hba[i]->pdev);
3444 #endif /* CONFIG_PCI_MSI */
3446 iounmap(hba[i]->vaddr);
3447 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3448 unregister_blkdev(hba[i]->major, hba[i]->devname);
3449 remove_proc_entry(hba[i]->devname, proc_cciss);
3451 /* remove it from the disk list */
3452 for (j = 0; j < NWD; j++) {
3453 struct gendisk *disk = hba[i]->gendisk[j];
3454 if (disk) {
3455 request_queue_t *q = disk->queue;
3457 if (disk->flags & GENHD_FL_UP)
3458 del_gendisk(disk);
3459 if (q)
3460 blk_cleanup_queue(q);
3464 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3465 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3466 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3467 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3468 kfree(hba[i]->cmd_pool_bits);
3469 #ifdef CONFIG_CISS_SCSI_TAPE
3470 kfree(hba[i]->scsi_rejects.complete);
3471 #endif
3472 pci_release_regions(pdev);
3473 pci_disable_device(pdev);
3474 pci_set_drvdata(pdev, NULL);
3475 free_hba(i);
3478 static struct pci_driver cciss_pci_driver = {
3479 .name = "cciss",
3480 .probe = cciss_init_one,
3481 .remove = __devexit_p(cciss_remove_one),
3482 .id_table = cciss_pci_device_id, /* id_table */
3486 * This is it. Register the PCI driver information for the cards we control
3487 * the OS will call our registered routines when it finds one of our cards.
3489 static int __init cciss_init(void)
3491 printk(KERN_INFO DRIVER_NAME "\n");
3493 /* Register for our PCI devices */
3494 return pci_register_driver(&cciss_pci_driver);
3497 static void __exit cciss_cleanup(void)
3499 int i;
3501 pci_unregister_driver(&cciss_pci_driver);
3502 /* double check that all controller entrys have been removed */
3503 for (i = 0; i < MAX_CTLR; i++) {
3504 if (hba[i] != NULL) {
3505 printk(KERN_WARNING "cciss: had to remove"
3506 " controller %d\n", i);
3507 cciss_remove_one(hba[i]->pdev);
3510 remove_proc_entry("cciss", proc_root_driver);
3513 static void fail_all_cmds(unsigned long ctlr)
3515 /* If we get here, the board is apparently dead. */
3516 ctlr_info_t *h = hba[ctlr];
3517 CommandList_struct *c;
3518 unsigned long flags;
3520 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3521 h->alive = 0; /* the controller apparently died... */
3523 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3525 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3527 /* move everything off the request queue onto the completed queue */
3528 while ((c = h->reqQ) != NULL) {
3529 removeQ(&(h->reqQ), c);
3530 h->Qdepth--;
3531 addQ(&(h->cmpQ), c);
3534 /* Now, fail everything on the completed queue with a HW error */
3535 while ((c = h->cmpQ) != NULL) {
3536 removeQ(&h->cmpQ, c);
3537 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3538 if (c->cmd_type == CMD_RWREQ) {
3539 complete_command(h, c, 0);
3540 } else if (c->cmd_type == CMD_IOCTL_PEND)
3541 complete(c->waiting);
3542 #ifdef CONFIG_CISS_SCSI_TAPE
3543 else if (c->cmd_type == CMD_SCSI)
3544 complete_scsi_command(c, 0, 0);
3545 #endif
3547 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3548 return;
3551 module_init(cciss_init);
3552 module_exit(cciss_cleanup);