drivers/block/cpqarray,cciss: kill unused var
[wandboard.git] / drivers / block / cciss.c
bloba895228f3d7009bad9cc60e9ef6d0ac55e2ff776
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48 #include <scsi/scsi.h>
49 #include <scsi/sg.h>
50 #include <scsi/scsi_ioctl.h>
51 #include <linux/cdrom.h>
53 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
54 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
55 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
57 /* Embedded module documentation macros - see modules.h */
58 MODULE_AUTHOR("Hewlett-Packard Company");
59 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
60 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
61 " SA6i P600 P800 P400 P400i E200 E200i E500");
62 MODULE_VERSION("3.6.14");
63 MODULE_LICENSE("GPL");
65 #include "cciss_cmd.h"
66 #include "cciss.h"
67 #include <linux/cciss_ioctl.h>
69 /* define the PCI info for the cards we can control */
70 static const struct pci_device_id cciss_pci_device_id[] = {
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
91 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
92 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
93 {0,}
96 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
98 /* board_id = Subsystem Device ID & Vendor ID
99 * product = Marketing Name for the board
100 * access = Address of the struct of function pointers
101 * nr_cmds = Number of commands supported by controller
103 static struct board_type products[] = {
104 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
105 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
106 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
107 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
108 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
109 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
110 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
111 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
112 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
113 {0x3225103C, "Smart Array P600", &SA5_access, 512},
114 {0x3223103C, "Smart Array P800", &SA5_access, 512},
115 {0x3234103C, "Smart Array P400", &SA5_access, 512},
116 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
117 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
118 {0x3212103C, "Smart Array E200", &SA5_access, 120},
119 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
121 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3237103C, "Smart Array E500", &SA5_access, 512},
123 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
124 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
127 /* How long to wait (in milliseconds) for board to go into simple mode */
128 #define MAX_CONFIG_WAIT 30000
129 #define MAX_IOCTL_CONFIG_WAIT 1000
131 /*define how many times we will try a command because of bus resets */
132 #define MAX_CMD_RETRIES 3
134 #define READ_AHEAD 1024
135 #define MAX_CTLR 32
137 /* Originally cciss driver only supports 8 major numbers */
138 #define MAX_CTLR_ORIG 8
140 static ctlr_info_t *hba[MAX_CTLR];
142 static void do_cciss_request(struct request_queue *q);
143 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
144 static int cciss_open(struct inode *inode, struct file *filep);
145 static int cciss_release(struct inode *inode, struct file *filep);
146 static int cciss_ioctl(struct inode *inode, struct file *filep,
147 unsigned int cmd, unsigned long arg);
148 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
150 static int cciss_revalidate(struct gendisk *disk);
151 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
152 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
153 int clear_all);
155 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
156 sector_t *total_size, unsigned int *block_size);
157 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
158 sector_t *total_size, unsigned int *block_size);
159 static void cciss_geometry_inquiry(int ctlr, int logvol,
160 int withirq, sector_t total_size,
161 unsigned int block_size, InquiryData_struct *inq_buff,
162 drive_info_struct *drv);
163 static void cciss_getgeometry(int cntl_num);
164 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
165 __u32);
166 static void start_io(ctlr_info_t *h);
167 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
168 unsigned int use_unit_num, unsigned int log_unit,
169 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
170 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
171 unsigned int use_unit_num, unsigned int log_unit,
172 __u8 page_code, int cmd_type);
174 static void fail_all_cmds(unsigned long ctlr);
176 #ifdef CONFIG_PROC_FS
177 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
178 int length, int *eof, void *data);
179 static void cciss_procinit(int i);
180 #else
181 static void cciss_procinit(int i)
184 #endif /* CONFIG_PROC_FS */
186 #ifdef CONFIG_COMPAT
187 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
188 #endif
190 static struct block_device_operations cciss_fops = {
191 .owner = THIS_MODULE,
192 .open = cciss_open,
193 .release = cciss_release,
194 .ioctl = cciss_ioctl,
195 .getgeo = cciss_getgeo,
196 #ifdef CONFIG_COMPAT
197 .compat_ioctl = cciss_compat_ioctl,
198 #endif
199 .revalidate_disk = cciss_revalidate,
203 * Enqueuing and dequeuing functions for cmdlists.
205 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
207 if (*Qptr == NULL) {
208 *Qptr = c;
209 c->next = c->prev = c;
210 } else {
211 c->prev = (*Qptr)->prev;
212 c->next = (*Qptr);
213 (*Qptr)->prev->next = c;
214 (*Qptr)->prev = c;
218 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
219 CommandList_struct *c)
221 if (c && c->next != c) {
222 if (*Qptr == c)
223 *Qptr = c->next;
224 c->prev->next = c->next;
225 c->next->prev = c->prev;
226 } else {
227 *Qptr = NULL;
229 return c;
232 #include "cciss_scsi.c" /* For SCSI tape support */
234 #define RAID_UNKNOWN 6
236 #ifdef CONFIG_PROC_FS
239 * Report information about this controller.
241 #define ENG_GIG 1000000000
242 #define ENG_GIG_FACTOR (ENG_GIG/512)
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
244 "UNKNOWN"
247 static struct proc_dir_entry *proc_cciss;
249 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
250 int length, int *eof, void *data)
252 off_t pos = 0;
253 off_t len = 0;
254 int size, i, ctlr;
255 ctlr_info_t *h = (ctlr_info_t *) data;
256 drive_info_struct *drv;
257 unsigned long flags;
258 sector_t vol_sz, vol_sz_frac;
260 ctlr = h->ctlr;
262 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
268 return -EBUSY;
270 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
273 size = sprintf(buffer, "%s: HP %s Controller\n"
274 "Board ID: 0x%08lx\n"
275 "Firmware Version: %c%c%c%c\n"
276 "IRQ: %d\n"
277 "Logical drives: %d\n"
278 "Max sectors: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
284 h->devname,
285 h->product_name,
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
288 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
289 h->num_luns,
290 h->cciss_max_sectors,
291 h->Qdepth, h->commands_outstanding,
292 h->maxQsinceinit, h->max_outstanding, h->maxSG);
294 pos += size;
295 len += size;
296 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
297 for (i = 0; i <= h->highest_lun; i++) {
299 drv = &h->drv[i];
300 if (drv->heads == 0)
301 continue;
303 vol_sz = drv->nr_blocks;
304 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
305 vol_sz_frac *= 100;
306 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
308 if (drv->raid_level > 5)
309 drv->raid_level = RAID_UNKNOWN;
310 size = sprintf(buffer + len, "cciss/c%dd%d:"
311 "\t%4u.%02uGB\tRAID %s\n",
312 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
313 raid_label[drv->raid_level]);
314 pos += size;
315 len += size;
318 *eof = 1;
319 *start = buffer + offset;
320 len -= offset;
321 if (len > length)
322 len = length;
323 h->busy_configuring = 0;
324 return len;
327 static int
328 cciss_proc_write(struct file *file, const char __user *buffer,
329 unsigned long count, void *data)
331 unsigned char cmd[80];
332 int len;
333 #ifdef CONFIG_CISS_SCSI_TAPE
334 ctlr_info_t *h = (ctlr_info_t *) data;
335 int rc;
336 #endif
338 if (count > sizeof(cmd) - 1)
339 return -EINVAL;
340 if (copy_from_user(cmd, buffer, count))
341 return -EFAULT;
342 cmd[count] = '\0';
343 len = strlen(cmd); // above 3 lines ensure safety
344 if (len && cmd[len - 1] == '\n')
345 cmd[--len] = '\0';
346 # ifdef CONFIG_CISS_SCSI_TAPE
347 if (strcmp("engage scsi", cmd) == 0) {
348 rc = cciss_engage_scsi(h->ctlr);
349 if (rc != 0)
350 return -rc;
351 return count;
353 /* might be nice to have "disengage" too, but it's not
354 safely possible. (only 1 module use count, lock issues.) */
355 # endif
356 return -EINVAL;
360 * Get us a file in /proc/cciss that says something about each controller.
361 * Create /proc/cciss if it doesn't exist yet.
363 static void __devinit cciss_procinit(int i)
365 struct proc_dir_entry *pde;
367 if (proc_cciss == NULL) {
368 proc_cciss = proc_mkdir("cciss", proc_root_driver);
369 if (!proc_cciss)
370 return;
373 pde = create_proc_read_entry(hba[i]->devname,
374 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
375 proc_cciss, cciss_proc_get_info, hba[i]);
376 pde->write_proc = cciss_proc_write;
378 #endif /* CONFIG_PROC_FS */
381 * For operations that cannot sleep, a command block is allocated at init,
382 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
383 * which ones are free or in use. For operations that can wait for kmalloc
384 * to possible sleep, this routine can be called with get_from_pool set to 0.
385 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
387 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
389 CommandList_struct *c;
390 int i;
391 u64bit temp64;
392 dma_addr_t cmd_dma_handle, err_dma_handle;
394 if (!get_from_pool) {
395 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
396 sizeof(CommandList_struct), &cmd_dma_handle);
397 if (c == NULL)
398 return NULL;
399 memset(c, 0, sizeof(CommandList_struct));
401 c->cmdindex = -1;
403 c->err_info = (ErrorInfo_struct *)
404 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
405 &err_dma_handle);
407 if (c->err_info == NULL) {
408 pci_free_consistent(h->pdev,
409 sizeof(CommandList_struct), c, cmd_dma_handle);
410 return NULL;
412 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
413 } else { /* get it out of the controllers pool */
415 do {
416 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
417 if (i == h->nr_cmds)
418 return NULL;
419 } while (test_and_set_bit
420 (i & (BITS_PER_LONG - 1),
421 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
422 #ifdef CCISS_DEBUG
423 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
424 #endif
425 c = h->cmd_pool + i;
426 memset(c, 0, sizeof(CommandList_struct));
427 cmd_dma_handle = h->cmd_pool_dhandle
428 + i * sizeof(CommandList_struct);
429 c->err_info = h->errinfo_pool + i;
430 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
431 err_dma_handle = h->errinfo_pool_dhandle
432 + i * sizeof(ErrorInfo_struct);
433 h->nr_allocs++;
435 c->cmdindex = i;
438 c->busaddr = (__u32) cmd_dma_handle;
439 temp64.val = (__u64) err_dma_handle;
440 c->ErrDesc.Addr.lower = temp64.val32.lower;
441 c->ErrDesc.Addr.upper = temp64.val32.upper;
442 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
444 c->ctlr = h->ctlr;
445 return c;
449 * Frees a command block that was previously allocated with cmd_alloc().
451 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
453 int i;
454 u64bit temp64;
456 if (!got_from_pool) {
457 temp64.val32.lower = c->ErrDesc.Addr.lower;
458 temp64.val32.upper = c->ErrDesc.Addr.upper;
459 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
460 c->err_info, (dma_addr_t) temp64.val);
461 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
462 c, (dma_addr_t) c->busaddr);
463 } else {
464 i = c - h->cmd_pool;
465 clear_bit(i & (BITS_PER_LONG - 1),
466 h->cmd_pool_bits + (i / BITS_PER_LONG));
467 h->nr_frees++;
471 static inline ctlr_info_t *get_host(struct gendisk *disk)
473 return disk->queue->queuedata;
476 static inline drive_info_struct *get_drv(struct gendisk *disk)
478 return disk->private_data;
482 * Open. Make sure the device is really there.
484 static int cciss_open(struct inode *inode, struct file *filep)
486 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
487 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
489 #ifdef CCISS_DEBUG
490 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
491 #endif /* CCISS_DEBUG */
493 if (host->busy_initializing || drv->busy_configuring)
494 return -EBUSY;
496 * Root is allowed to open raw volume zero even if it's not configured
497 * so array config can still work. Root is also allowed to open any
498 * volume that has a LUN ID, so it can issue IOCTL to reread the
499 * disk information. I don't think I really like this
500 * but I'm already using way to many device nodes to claim another one
501 * for "raw controller".
503 if (drv->heads == 0) {
504 if (iminor(inode) != 0) { /* not node 0? */
505 /* if not node 0 make sure it is a partition = 0 */
506 if (iminor(inode) & 0x0f) {
507 return -ENXIO;
508 /* if it is, make sure we have a LUN ID */
509 } else if (drv->LunID == 0) {
510 return -ENXIO;
513 if (!capable(CAP_SYS_ADMIN))
514 return -EPERM;
516 drv->usage_count++;
517 host->usage_count++;
518 return 0;
522 * Close. Sync first.
524 static int cciss_release(struct inode *inode, struct file *filep)
526 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
527 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
529 #ifdef CCISS_DEBUG
530 printk(KERN_DEBUG "cciss_release %s\n",
531 inode->i_bdev->bd_disk->disk_name);
532 #endif /* CCISS_DEBUG */
534 drv->usage_count--;
535 host->usage_count--;
536 return 0;
539 #ifdef CONFIG_COMPAT
541 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
543 int ret;
544 lock_kernel();
545 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
546 unlock_kernel();
547 return ret;
550 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
551 unsigned long arg);
552 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
553 unsigned long arg);
555 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
557 switch (cmd) {
558 case CCISS_GETPCIINFO:
559 case CCISS_GETINTINFO:
560 case CCISS_SETINTINFO:
561 case CCISS_GETNODENAME:
562 case CCISS_SETNODENAME:
563 case CCISS_GETHEARTBEAT:
564 case CCISS_GETBUSTYPES:
565 case CCISS_GETFIRMVER:
566 case CCISS_GETDRIVVER:
567 case CCISS_REVALIDVOLS:
568 case CCISS_DEREGDISK:
569 case CCISS_REGNEWDISK:
570 case CCISS_REGNEWD:
571 case CCISS_RESCANDISK:
572 case CCISS_GETLUNINFO:
573 return do_ioctl(f, cmd, arg);
575 case CCISS_PASSTHRU32:
576 return cciss_ioctl32_passthru(f, cmd, arg);
577 case CCISS_BIG_PASSTHRU32:
578 return cciss_ioctl32_big_passthru(f, cmd, arg);
580 default:
581 return -ENOIOCTLCMD;
585 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
586 unsigned long arg)
588 IOCTL32_Command_struct __user *arg32 =
589 (IOCTL32_Command_struct __user *) arg;
590 IOCTL_Command_struct arg64;
591 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
592 int err;
593 u32 cp;
595 err = 0;
596 err |=
597 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
598 sizeof(arg64.LUN_info));
599 err |=
600 copy_from_user(&arg64.Request, &arg32->Request,
601 sizeof(arg64.Request));
602 err |=
603 copy_from_user(&arg64.error_info, &arg32->error_info,
604 sizeof(arg64.error_info));
605 err |= get_user(arg64.buf_size, &arg32->buf_size);
606 err |= get_user(cp, &arg32->buf);
607 arg64.buf = compat_ptr(cp);
608 err |= copy_to_user(p, &arg64, sizeof(arg64));
610 if (err)
611 return -EFAULT;
613 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
614 if (err)
615 return err;
616 err |=
617 copy_in_user(&arg32->error_info, &p->error_info,
618 sizeof(arg32->error_info));
619 if (err)
620 return -EFAULT;
621 return err;
624 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
625 unsigned long arg)
627 BIG_IOCTL32_Command_struct __user *arg32 =
628 (BIG_IOCTL32_Command_struct __user *) arg;
629 BIG_IOCTL_Command_struct arg64;
630 BIG_IOCTL_Command_struct __user *p =
631 compat_alloc_user_space(sizeof(arg64));
632 int err;
633 u32 cp;
635 err = 0;
636 err |=
637 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
638 sizeof(arg64.LUN_info));
639 err |=
640 copy_from_user(&arg64.Request, &arg32->Request,
641 sizeof(arg64.Request));
642 err |=
643 copy_from_user(&arg64.error_info, &arg32->error_info,
644 sizeof(arg64.error_info));
645 err |= get_user(arg64.buf_size, &arg32->buf_size);
646 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
647 err |= get_user(cp, &arg32->buf);
648 arg64.buf = compat_ptr(cp);
649 err |= copy_to_user(p, &arg64, sizeof(arg64));
651 if (err)
652 return -EFAULT;
654 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
655 if (err)
656 return err;
657 err |=
658 copy_in_user(&arg32->error_info, &p->error_info,
659 sizeof(arg32->error_info));
660 if (err)
661 return -EFAULT;
662 return err;
664 #endif
666 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
668 drive_info_struct *drv = get_drv(bdev->bd_disk);
670 if (!drv->cylinders)
671 return -ENXIO;
673 geo->heads = drv->heads;
674 geo->sectors = drv->sectors;
675 geo->cylinders = drv->cylinders;
676 return 0;
680 * ioctl
682 static int cciss_ioctl(struct inode *inode, struct file *filep,
683 unsigned int cmd, unsigned long arg)
685 struct block_device *bdev = inode->i_bdev;
686 struct gendisk *disk = bdev->bd_disk;
687 ctlr_info_t *host = get_host(disk);
688 drive_info_struct *drv = get_drv(disk);
689 int ctlr = host->ctlr;
690 void __user *argp = (void __user *)arg;
692 #ifdef CCISS_DEBUG
693 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
694 #endif /* CCISS_DEBUG */
696 switch (cmd) {
697 case CCISS_GETPCIINFO:
699 cciss_pci_info_struct pciinfo;
701 if (!arg)
702 return -EINVAL;
703 pciinfo.domain = pci_domain_nr(host->pdev->bus);
704 pciinfo.bus = host->pdev->bus->number;
705 pciinfo.dev_fn = host->pdev->devfn;
706 pciinfo.board_id = host->board_id;
707 if (copy_to_user
708 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
709 return -EFAULT;
710 return 0;
712 case CCISS_GETINTINFO:
714 cciss_coalint_struct intinfo;
715 if (!arg)
716 return -EINVAL;
717 intinfo.delay =
718 readl(&host->cfgtable->HostWrite.CoalIntDelay);
719 intinfo.count =
720 readl(&host->cfgtable->HostWrite.CoalIntCount);
721 if (copy_to_user
722 (argp, &intinfo, sizeof(cciss_coalint_struct)))
723 return -EFAULT;
724 return 0;
726 case CCISS_SETINTINFO:
728 cciss_coalint_struct intinfo;
729 unsigned long flags;
730 int i;
732 if (!arg)
733 return -EINVAL;
734 if (!capable(CAP_SYS_ADMIN))
735 return -EPERM;
736 if (copy_from_user
737 (&intinfo, argp, sizeof(cciss_coalint_struct)))
738 return -EFAULT;
739 if ((intinfo.delay == 0) && (intinfo.count == 0))
741 // printk("cciss_ioctl: delay and count cannot be 0\n");
742 return -EINVAL;
744 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
745 /* Update the field, and then ring the doorbell */
746 writel(intinfo.delay,
747 &(host->cfgtable->HostWrite.CoalIntDelay));
748 writel(intinfo.count,
749 &(host->cfgtable->HostWrite.CoalIntCount));
750 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
752 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
753 if (!(readl(host->vaddr + SA5_DOORBELL)
754 & CFGTBL_ChangeReq))
755 break;
756 /* delay and try again */
757 udelay(1000);
759 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
760 if (i >= MAX_IOCTL_CONFIG_WAIT)
761 return -EAGAIN;
762 return 0;
764 case CCISS_GETNODENAME:
766 NodeName_type NodeName;
767 int i;
769 if (!arg)
770 return -EINVAL;
771 for (i = 0; i < 16; i++)
772 NodeName[i] =
773 readb(&host->cfgtable->ServerName[i]);
774 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
775 return -EFAULT;
776 return 0;
778 case CCISS_SETNODENAME:
780 NodeName_type NodeName;
781 unsigned long flags;
782 int i;
784 if (!arg)
785 return -EINVAL;
786 if (!capable(CAP_SYS_ADMIN))
787 return -EPERM;
789 if (copy_from_user
790 (NodeName, argp, sizeof(NodeName_type)))
791 return -EFAULT;
793 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
795 /* Update the field, and then ring the doorbell */
796 for (i = 0; i < 16; i++)
797 writeb(NodeName[i],
798 &host->cfgtable->ServerName[i]);
800 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
802 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
803 if (!(readl(host->vaddr + SA5_DOORBELL)
804 & CFGTBL_ChangeReq))
805 break;
806 /* delay and try again */
807 udelay(1000);
809 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
810 if (i >= MAX_IOCTL_CONFIG_WAIT)
811 return -EAGAIN;
812 return 0;
815 case CCISS_GETHEARTBEAT:
817 Heartbeat_type heartbeat;
819 if (!arg)
820 return -EINVAL;
821 heartbeat = readl(&host->cfgtable->HeartBeat);
822 if (copy_to_user
823 (argp, &heartbeat, sizeof(Heartbeat_type)))
824 return -EFAULT;
825 return 0;
827 case CCISS_GETBUSTYPES:
829 BusTypes_type BusTypes;
831 if (!arg)
832 return -EINVAL;
833 BusTypes = readl(&host->cfgtable->BusTypes);
834 if (copy_to_user
835 (argp, &BusTypes, sizeof(BusTypes_type)))
836 return -EFAULT;
837 return 0;
839 case CCISS_GETFIRMVER:
841 FirmwareVer_type firmware;
843 if (!arg)
844 return -EINVAL;
845 memcpy(firmware, host->firm_ver, 4);
847 if (copy_to_user
848 (argp, firmware, sizeof(FirmwareVer_type)))
849 return -EFAULT;
850 return 0;
852 case CCISS_GETDRIVVER:
854 DriverVer_type DriverVer = DRIVER_VERSION;
856 if (!arg)
857 return -EINVAL;
859 if (copy_to_user
860 (argp, &DriverVer, sizeof(DriverVer_type)))
861 return -EFAULT;
862 return 0;
865 case CCISS_REVALIDVOLS:
866 return rebuild_lun_table(host, NULL);
868 case CCISS_GETLUNINFO:{
869 LogvolInfo_struct luninfo;
871 luninfo.LunID = drv->LunID;
872 luninfo.num_opens = drv->usage_count;
873 luninfo.num_parts = 0;
874 if (copy_to_user(argp, &luninfo,
875 sizeof(LogvolInfo_struct)))
876 return -EFAULT;
877 return 0;
879 case CCISS_DEREGDISK:
880 return rebuild_lun_table(host, disk);
882 case CCISS_REGNEWD:
883 return rebuild_lun_table(host, NULL);
885 case CCISS_PASSTHRU:
887 IOCTL_Command_struct iocommand;
888 CommandList_struct *c;
889 char *buff = NULL;
890 u64bit temp64;
891 unsigned long flags;
892 DECLARE_COMPLETION_ONSTACK(wait);
894 if (!arg)
895 return -EINVAL;
897 if (!capable(CAP_SYS_RAWIO))
898 return -EPERM;
900 if (copy_from_user
901 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
902 return -EFAULT;
903 if ((iocommand.buf_size < 1) &&
904 (iocommand.Request.Type.Direction != XFER_NONE)) {
905 return -EINVAL;
907 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
908 /* Check kmalloc limits */
909 if (iocommand.buf_size > 128000)
910 return -EINVAL;
911 #endif
912 if (iocommand.buf_size > 0) {
913 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
914 if (buff == NULL)
915 return -EFAULT;
917 if (iocommand.Request.Type.Direction == XFER_WRITE) {
918 /* Copy the data into the buffer we created */
919 if (copy_from_user
920 (buff, iocommand.buf, iocommand.buf_size)) {
921 kfree(buff);
922 return -EFAULT;
924 } else {
925 memset(buff, 0, iocommand.buf_size);
927 if ((c = cmd_alloc(host, 0)) == NULL) {
928 kfree(buff);
929 return -ENOMEM;
931 // Fill in the command type
932 c->cmd_type = CMD_IOCTL_PEND;
933 // Fill in Command Header
934 c->Header.ReplyQueue = 0; // unused in simple mode
935 if (iocommand.buf_size > 0) // buffer to fill
937 c->Header.SGList = 1;
938 c->Header.SGTotal = 1;
939 } else // no buffers to fill
941 c->Header.SGList = 0;
942 c->Header.SGTotal = 0;
944 c->Header.LUN = iocommand.LUN_info;
945 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
947 // Fill in Request block
948 c->Request = iocommand.Request;
950 // Fill in the scatter gather information
951 if (iocommand.buf_size > 0) {
952 temp64.val = pci_map_single(host->pdev, buff,
953 iocommand.buf_size,
954 PCI_DMA_BIDIRECTIONAL);
955 c->SG[0].Addr.lower = temp64.val32.lower;
956 c->SG[0].Addr.upper = temp64.val32.upper;
957 c->SG[0].Len = iocommand.buf_size;
958 c->SG[0].Ext = 0; // we are not chaining
960 c->waiting = &wait;
962 /* Put the request on the tail of the request queue */
963 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
964 addQ(&host->reqQ, c);
965 host->Qdepth++;
966 start_io(host);
967 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
969 wait_for_completion(&wait);
971 /* unlock the buffers from DMA */
972 temp64.val32.lower = c->SG[0].Addr.lower;
973 temp64.val32.upper = c->SG[0].Addr.upper;
974 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
975 iocommand.buf_size,
976 PCI_DMA_BIDIRECTIONAL);
978 /* Copy the error information out */
979 iocommand.error_info = *(c->err_info);
980 if (copy_to_user
981 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
982 kfree(buff);
983 cmd_free(host, c, 0);
984 return -EFAULT;
987 if (iocommand.Request.Type.Direction == XFER_READ) {
988 /* Copy the data out of the buffer we created */
989 if (copy_to_user
990 (iocommand.buf, buff, iocommand.buf_size)) {
991 kfree(buff);
992 cmd_free(host, c, 0);
993 return -EFAULT;
996 kfree(buff);
997 cmd_free(host, c, 0);
998 return 0;
1000 case CCISS_BIG_PASSTHRU:{
1001 BIG_IOCTL_Command_struct *ioc;
1002 CommandList_struct *c;
1003 unsigned char **buff = NULL;
1004 int *buff_size = NULL;
1005 u64bit temp64;
1006 unsigned long flags;
1007 BYTE sg_used = 0;
1008 int status = 0;
1009 int i;
1010 DECLARE_COMPLETION_ONSTACK(wait);
1011 __u32 left;
1012 __u32 sz;
1013 BYTE __user *data_ptr;
1015 if (!arg)
1016 return -EINVAL;
1017 if (!capable(CAP_SYS_RAWIO))
1018 return -EPERM;
1019 ioc = (BIG_IOCTL_Command_struct *)
1020 kmalloc(sizeof(*ioc), GFP_KERNEL);
1021 if (!ioc) {
1022 status = -ENOMEM;
1023 goto cleanup1;
1025 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1026 status = -EFAULT;
1027 goto cleanup1;
1029 if ((ioc->buf_size < 1) &&
1030 (ioc->Request.Type.Direction != XFER_NONE)) {
1031 status = -EINVAL;
1032 goto cleanup1;
1034 /* Check kmalloc limits using all SGs */
1035 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1036 status = -EINVAL;
1037 goto cleanup1;
1039 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1040 status = -EINVAL;
1041 goto cleanup1;
1043 buff =
1044 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1045 if (!buff) {
1046 status = -ENOMEM;
1047 goto cleanup1;
1049 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1050 GFP_KERNEL);
1051 if (!buff_size) {
1052 status = -ENOMEM;
1053 goto cleanup1;
1055 left = ioc->buf_size;
1056 data_ptr = ioc->buf;
1057 while (left) {
1058 sz = (left >
1059 ioc->malloc_size) ? ioc->
1060 malloc_size : left;
1061 buff_size[sg_used] = sz;
1062 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1063 if (buff[sg_used] == NULL) {
1064 status = -ENOMEM;
1065 goto cleanup1;
1067 if (ioc->Request.Type.Direction == XFER_WRITE) {
1068 if (copy_from_user
1069 (buff[sg_used], data_ptr, sz)) {
1070 status = -ENOMEM;
1071 goto cleanup1;
1073 } else {
1074 memset(buff[sg_used], 0, sz);
1076 left -= sz;
1077 data_ptr += sz;
1078 sg_used++;
1080 if ((c = cmd_alloc(host, 0)) == NULL) {
1081 status = -ENOMEM;
1082 goto cleanup1;
1084 c->cmd_type = CMD_IOCTL_PEND;
1085 c->Header.ReplyQueue = 0;
1087 if (ioc->buf_size > 0) {
1088 c->Header.SGList = sg_used;
1089 c->Header.SGTotal = sg_used;
1090 } else {
1091 c->Header.SGList = 0;
1092 c->Header.SGTotal = 0;
1094 c->Header.LUN = ioc->LUN_info;
1095 c->Header.Tag.lower = c->busaddr;
1097 c->Request = ioc->Request;
1098 if (ioc->buf_size > 0) {
1099 int i;
1100 for (i = 0; i < sg_used; i++) {
1101 temp64.val =
1102 pci_map_single(host->pdev, buff[i],
1103 buff_size[i],
1104 PCI_DMA_BIDIRECTIONAL);
1105 c->SG[i].Addr.lower =
1106 temp64.val32.lower;
1107 c->SG[i].Addr.upper =
1108 temp64.val32.upper;
1109 c->SG[i].Len = buff_size[i];
1110 c->SG[i].Ext = 0; /* we are not chaining */
1113 c->waiting = &wait;
1114 /* Put the request on the tail of the request queue */
1115 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1116 addQ(&host->reqQ, c);
1117 host->Qdepth++;
1118 start_io(host);
1119 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1120 wait_for_completion(&wait);
1121 /* unlock the buffers from DMA */
1122 for (i = 0; i < sg_used; i++) {
1123 temp64.val32.lower = c->SG[i].Addr.lower;
1124 temp64.val32.upper = c->SG[i].Addr.upper;
1125 pci_unmap_single(host->pdev,
1126 (dma_addr_t) temp64.val, buff_size[i],
1127 PCI_DMA_BIDIRECTIONAL);
1129 /* Copy the error information out */
1130 ioc->error_info = *(c->err_info);
1131 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1132 cmd_free(host, c, 0);
1133 status = -EFAULT;
1134 goto cleanup1;
1136 if (ioc->Request.Type.Direction == XFER_READ) {
1137 /* Copy the data out of the buffer we created */
1138 BYTE __user *ptr = ioc->buf;
1139 for (i = 0; i < sg_used; i++) {
1140 if (copy_to_user
1141 (ptr, buff[i], buff_size[i])) {
1142 cmd_free(host, c, 0);
1143 status = -EFAULT;
1144 goto cleanup1;
1146 ptr += buff_size[i];
1149 cmd_free(host, c, 0);
1150 status = 0;
1151 cleanup1:
1152 if (buff) {
1153 for (i = 0; i < sg_used; i++)
1154 kfree(buff[i]);
1155 kfree(buff);
1157 kfree(buff_size);
1158 kfree(ioc);
1159 return status;
1162 /* scsi_cmd_ioctl handles these, below, though some are not */
1163 /* very meaningful for cciss. SG_IO is the main one people want. */
1165 case SG_GET_VERSION_NUM:
1166 case SG_SET_TIMEOUT:
1167 case SG_GET_TIMEOUT:
1168 case SG_GET_RESERVED_SIZE:
1169 case SG_SET_RESERVED_SIZE:
1170 case SG_EMULATED_HOST:
1171 case SG_IO:
1172 case SCSI_IOCTL_SEND_COMMAND:
1173 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1175 /* scsi_cmd_ioctl would normally handle these, below, but */
1176 /* they aren't a good fit for cciss, as CD-ROMs are */
1177 /* not supported, and we don't have any bus/target/lun */
1178 /* which we present to the kernel. */
1180 case CDROM_SEND_PACKET:
1181 case CDROMCLOSETRAY:
1182 case CDROMEJECT:
1183 case SCSI_IOCTL_GET_IDLUN:
1184 case SCSI_IOCTL_GET_BUS_NUMBER:
1185 default:
1186 return -ENOTTY;
1190 static inline void complete_buffers(struct bio *bio, int status)
1192 while (bio) {
1193 struct bio *xbh = bio->bi_next;
1195 bio->bi_next = NULL;
1196 bio_endio(bio, status ? 0 : -EIO);
1197 bio = xbh;
1201 static void cciss_check_queues(ctlr_info_t *h)
1203 int start_queue = h->next_to_run;
1204 int i;
1206 /* check to see if we have maxed out the number of commands that can
1207 * be placed on the queue. If so then exit. We do this check here
1208 * in case the interrupt we serviced was from an ioctl and did not
1209 * free any new commands.
1211 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1212 return;
1214 /* We have room on the queue for more commands. Now we need to queue
1215 * them up. We will also keep track of the next queue to run so
1216 * that every queue gets a chance to be started first.
1218 for (i = 0; i < h->highest_lun + 1; i++) {
1219 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1220 /* make sure the disk has been added and the drive is real
1221 * because this can be called from the middle of init_one.
1223 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1224 continue;
1225 blk_start_queue(h->gendisk[curr_queue]->queue);
1227 /* check to see if we have maxed out the number of commands
1228 * that can be placed on the queue.
1230 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1231 if (curr_queue == start_queue) {
1232 h->next_to_run =
1233 (start_queue + 1) % (h->highest_lun + 1);
1234 break;
1235 } else {
1236 h->next_to_run = curr_queue;
1237 break;
1239 } else {
1240 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1245 static void cciss_softirq_done(struct request *rq)
1247 CommandList_struct *cmd = rq->completion_data;
1248 ctlr_info_t *h = hba[cmd->ctlr];
1249 unsigned long flags;
1250 u64bit temp64;
1251 int i, ddir;
1253 if (cmd->Request.Type.Direction == XFER_READ)
1254 ddir = PCI_DMA_FROMDEVICE;
1255 else
1256 ddir = PCI_DMA_TODEVICE;
1258 /* command did not need to be retried */
1259 /* unmap the DMA mapping for all the scatter gather elements */
1260 for (i = 0; i < cmd->Header.SGList; i++) {
1261 temp64.val32.lower = cmd->SG[i].Addr.lower;
1262 temp64.val32.upper = cmd->SG[i].Addr.upper;
1263 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1266 complete_buffers(rq->bio, (rq->errors == 0));
1268 if (blk_fs_request(rq)) {
1269 const int rw = rq_data_dir(rq);
1271 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1274 #ifdef CCISS_DEBUG
1275 printk("Done with %p\n", rq);
1276 #endif /* CCISS_DEBUG */
1278 add_disk_randomness(rq->rq_disk);
1279 spin_lock_irqsave(&h->lock, flags);
1280 end_that_request_last(rq, (rq->errors == 0));
1281 cmd_free(h, cmd, 1);
1282 cciss_check_queues(h);
1283 spin_unlock_irqrestore(&h->lock, flags);
1286 /* This function will check the usage_count of the drive to be updated/added.
1287 * If the usage_count is zero then the drive information will be updated and
1288 * the disk will be re-registered with the kernel. If not then it will be
1289 * left alone for the next reboot. The exception to this is disk 0 which
1290 * will always be left registered with the kernel since it is also the
1291 * controller node. Any changes to disk 0 will show up on the next
1292 * reboot.
1294 static void cciss_update_drive_info(int ctlr, int drv_index)
1296 ctlr_info_t *h = hba[ctlr];
1297 struct gendisk *disk;
1298 InquiryData_struct *inq_buff = NULL;
1299 unsigned int block_size;
1300 sector_t total_size;
1301 unsigned long flags = 0;
1302 int ret = 0;
1304 /* if the disk already exists then deregister it before proceeding */
1305 if (h->drv[drv_index].raid_level != -1) {
1306 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1307 h->drv[drv_index].busy_configuring = 1;
1308 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1309 ret = deregister_disk(h->gendisk[drv_index],
1310 &h->drv[drv_index], 0);
1311 h->drv[drv_index].busy_configuring = 0;
1314 /* If the disk is in use return */
1315 if (ret)
1316 return;
1318 /* Get information about the disk and modify the driver structure */
1319 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1320 if (inq_buff == NULL)
1321 goto mem_msg;
1323 /* testing to see if 16-byte CDBs are already being used */
1324 if (h->cciss_read == CCISS_READ_16) {
1325 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1326 &total_size, &block_size);
1327 goto geo_inq;
1330 cciss_read_capacity(ctlr, drv_index, 1,
1331 &total_size, &block_size);
1333 /* if read_capacity returns all F's this volume is >2TB in size */
1334 /* so we switch to 16-byte CDB's for all read/write ops */
1335 if (total_size == 0xFFFFFFFFULL) {
1336 cciss_read_capacity_16(ctlr, drv_index, 1,
1337 &total_size, &block_size);
1338 h->cciss_read = CCISS_READ_16;
1339 h->cciss_write = CCISS_WRITE_16;
1340 } else {
1341 h->cciss_read = CCISS_READ_10;
1342 h->cciss_write = CCISS_WRITE_10;
1344 geo_inq:
1345 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1346 inq_buff, &h->drv[drv_index]);
1348 ++h->num_luns;
1349 disk = h->gendisk[drv_index];
1350 set_capacity(disk, h->drv[drv_index].nr_blocks);
1352 /* if it's the controller it's already added */
1353 if (drv_index) {
1354 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1355 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1356 disk->major = h->major;
1357 disk->first_minor = drv_index << NWD_SHIFT;
1358 disk->fops = &cciss_fops;
1359 disk->private_data = &h->drv[drv_index];
1361 /* Set up queue information */
1362 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1363 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1365 /* This is a hardware imposed limit. */
1366 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1368 /* This is a limit in the driver and could be eliminated. */
1369 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1371 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1373 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1375 disk->queue->queuedata = hba[ctlr];
1377 blk_queue_hardsect_size(disk->queue,
1378 hba[ctlr]->drv[drv_index].block_size);
1380 h->drv[drv_index].queue = disk->queue;
1381 add_disk(disk);
1384 freeret:
1385 kfree(inq_buff);
1386 return;
1387 mem_msg:
1388 printk(KERN_ERR "cciss: out of memory\n");
1389 goto freeret;
1392 /* This function will find the first index of the controllers drive array
1393 * that has a -1 for the raid_level and will return that index. This is
1394 * where new drives will be added. If the index to be returned is greater
1395 * than the highest_lun index for the controller then highest_lun is set
1396 * to this new index. If there are no available indexes then -1 is returned.
1398 static int cciss_find_free_drive_index(int ctlr)
1400 int i;
1402 for (i = 0; i < CISS_MAX_LUN; i++) {
1403 if (hba[ctlr]->drv[i].raid_level == -1) {
1404 if (i > hba[ctlr]->highest_lun)
1405 hba[ctlr]->highest_lun = i;
1406 return i;
1409 return -1;
1412 /* This function will add and remove logical drives from the Logical
1413 * drive array of the controller and maintain persistency of ordering
1414 * so that mount points are preserved until the next reboot. This allows
1415 * for the removal of logical drives in the middle of the drive array
1416 * without a re-ordering of those drives.
1417 * INPUT
1418 * h = The controller to perform the operations on
1419 * del_disk = The disk to remove if specified. If the value given
1420 * is NULL then no disk is removed.
1422 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1424 int ctlr = h->ctlr;
1425 int num_luns;
1426 ReportLunData_struct *ld_buff = NULL;
1427 drive_info_struct *drv = NULL;
1428 int return_code;
1429 int listlength = 0;
1430 int i;
1431 int drv_found;
1432 int drv_index = 0;
1433 __u32 lunid = 0;
1434 unsigned long flags;
1436 /* Set busy_configuring flag for this operation */
1437 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1438 if (h->busy_configuring) {
1439 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1440 return -EBUSY;
1442 h->busy_configuring = 1;
1444 /* if del_disk is NULL then we are being called to add a new disk
1445 * and update the logical drive table. If it is not NULL then
1446 * we will check if the disk is in use or not.
1448 if (del_disk != NULL) {
1449 drv = get_drv(del_disk);
1450 drv->busy_configuring = 1;
1451 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1452 return_code = deregister_disk(del_disk, drv, 1);
1453 drv->busy_configuring = 0;
1454 h->busy_configuring = 0;
1455 return return_code;
1456 } else {
1457 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1458 if (!capable(CAP_SYS_RAWIO))
1459 return -EPERM;
1461 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1462 if (ld_buff == NULL)
1463 goto mem_msg;
1465 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1466 sizeof(ReportLunData_struct), 0,
1467 0, 0, TYPE_CMD);
1469 if (return_code == IO_OK) {
1470 listlength =
1471 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1472 } else { /* reading number of logical volumes failed */
1473 printk(KERN_WARNING "cciss: report logical volume"
1474 " command failed\n");
1475 listlength = 0;
1476 goto freeret;
1479 num_luns = listlength / 8; /* 8 bytes per entry */
1480 if (num_luns > CISS_MAX_LUN) {
1481 num_luns = CISS_MAX_LUN;
1482 printk(KERN_WARNING "cciss: more luns configured"
1483 " on controller than can be handled by"
1484 " this driver.\n");
1487 /* Compare controller drive array to drivers drive array.
1488 * Check for updates in the drive information and any new drives
1489 * on the controller.
1491 for (i = 0; i < num_luns; i++) {
1492 int j;
1494 drv_found = 0;
1496 lunid = (0xff &
1497 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1498 lunid |= (0xff &
1499 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1500 lunid |= (0xff &
1501 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1502 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1504 /* Find if the LUN is already in the drive array
1505 * of the controller. If so then update its info
1506 * if not is use. If it does not exist then find
1507 * the first free index and add it.
1509 for (j = 0; j <= h->highest_lun; j++) {
1510 if (h->drv[j].LunID == lunid) {
1511 drv_index = j;
1512 drv_found = 1;
1516 /* check if the drive was found already in the array */
1517 if (!drv_found) {
1518 drv_index = cciss_find_free_drive_index(ctlr);
1519 if (drv_index == -1)
1520 goto freeret;
1522 /*Check if the gendisk needs to be allocated */
1523 if (!h->gendisk[drv_index]){
1524 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1525 if (!h->gendisk[drv_index]){
1526 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1527 goto mem_msg;
1531 h->drv[drv_index].LunID = lunid;
1532 cciss_update_drive_info(ctlr, drv_index);
1533 } /* end for */
1534 } /* end else */
1536 freeret:
1537 kfree(ld_buff);
1538 h->busy_configuring = 0;
1539 /* We return -1 here to tell the ACU that we have registered/updated
1540 * all of the drives that we can and to keep it from calling us
1541 * additional times.
1543 return -1;
1544 mem_msg:
1545 printk(KERN_ERR "cciss: out of memory\n");
1546 goto freeret;
1549 /* This function will deregister the disk and it's queue from the
1550 * kernel. It must be called with the controller lock held and the
1551 * drv structures busy_configuring flag set. It's parameters are:
1553 * disk = This is the disk to be deregistered
1554 * drv = This is the drive_info_struct associated with the disk to be
1555 * deregistered. It contains information about the disk used
1556 * by the driver.
1557 * clear_all = This flag determines whether or not the disk information
1558 * is going to be completely cleared out and the highest_lun
1559 * reset. Sometimes we want to clear out information about
1560 * the disk in preparation for re-adding it. In this case
1561 * the highest_lun should be left unchanged and the LunID
1562 * should not be cleared.
1564 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1565 int clear_all)
1567 int i;
1568 ctlr_info_t *h = get_host(disk);
1570 if (!capable(CAP_SYS_RAWIO))
1571 return -EPERM;
1573 /* make sure logical volume is NOT is use */
1574 if (clear_all || (h->gendisk[0] == disk)) {
1575 if (drv->usage_count > 1)
1576 return -EBUSY;
1577 } else if (drv->usage_count > 0)
1578 return -EBUSY;
1580 /* invalidate the devices and deregister the disk. If it is disk
1581 * zero do not deregister it but just zero out it's values. This
1582 * allows us to delete disk zero but keep the controller registered.
1584 if (h->gendisk[0] != disk) {
1585 if (disk) {
1586 struct request_queue *q = disk->queue;
1587 if (disk->flags & GENHD_FL_UP)
1588 del_gendisk(disk);
1589 if (q) {
1590 blk_cleanup_queue(q);
1591 /* Set drv->queue to NULL so that we do not try
1592 * to call blk_start_queue on this queue in the
1593 * interrupt handler
1595 drv->queue = NULL;
1597 /* If clear_all is set then we are deleting the logical
1598 * drive, not just refreshing its info. For drives
1599 * other than disk 0 we will call put_disk. We do not
1600 * do this for disk 0 as we need it to be able to
1601 * configure the controller.
1603 if (clear_all){
1604 /* This isn't pretty, but we need to find the
1605 * disk in our array and NULL our the pointer.
1606 * This is so that we will call alloc_disk if
1607 * this index is used again later.
1609 for (i=0; i < CISS_MAX_LUN; i++){
1610 if(h->gendisk[i] == disk){
1611 h->gendisk[i] = NULL;
1612 break;
1615 put_disk(disk);
1618 } else {
1619 set_capacity(disk, 0);
1622 --h->num_luns;
1623 /* zero out the disk size info */
1624 drv->nr_blocks = 0;
1625 drv->block_size = 0;
1626 drv->heads = 0;
1627 drv->sectors = 0;
1628 drv->cylinders = 0;
1629 drv->raid_level = -1; /* This can be used as a flag variable to
1630 * indicate that this element of the drive
1631 * array is free.
1634 if (clear_all) {
1635 /* check to see if it was the last disk */
1636 if (drv == h->drv + h->highest_lun) {
1637 /* if so, find the new hightest lun */
1638 int i, newhighest = -1;
1639 for (i = 0; i < h->highest_lun; i++) {
1640 /* if the disk has size > 0, it is available */
1641 if (h->drv[i].heads)
1642 newhighest = i;
1644 h->highest_lun = newhighest;
1647 drv->LunID = 0;
1649 return 0;
1652 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1653 1: address logical volume log_unit,
1654 2: periph device address is scsi3addr */
1655 unsigned int log_unit, __u8 page_code,
1656 unsigned char *scsi3addr, int cmd_type)
1658 ctlr_info_t *h = hba[ctlr];
1659 u64bit buff_dma_handle;
1660 int status = IO_OK;
1662 c->cmd_type = CMD_IOCTL_PEND;
1663 c->Header.ReplyQueue = 0;
1664 if (buff != NULL) {
1665 c->Header.SGList = 1;
1666 c->Header.SGTotal = 1;
1667 } else {
1668 c->Header.SGList = 0;
1669 c->Header.SGTotal = 0;
1671 c->Header.Tag.lower = c->busaddr;
1673 c->Request.Type.Type = cmd_type;
1674 if (cmd_type == TYPE_CMD) {
1675 switch (cmd) {
1676 case CISS_INQUIRY:
1677 /* If the logical unit number is 0 then, this is going
1678 to controller so It's a physical command
1679 mode = 0 target = 0. So we have nothing to write.
1680 otherwise, if use_unit_num == 1,
1681 mode = 1(volume set addressing) target = LUNID
1682 otherwise, if use_unit_num == 2,
1683 mode = 0(periph dev addr) target = scsi3addr */
1684 if (use_unit_num == 1) {
1685 c->Header.LUN.LogDev.VolId =
1686 h->drv[log_unit].LunID;
1687 c->Header.LUN.LogDev.Mode = 1;
1688 } else if (use_unit_num == 2) {
1689 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1691 c->Header.LUN.LogDev.Mode = 0;
1693 /* are we trying to read a vital product page */
1694 if (page_code != 0) {
1695 c->Request.CDB[1] = 0x01;
1696 c->Request.CDB[2] = page_code;
1698 c->Request.CDBLen = 6;
1699 c->Request.Type.Attribute = ATTR_SIMPLE;
1700 c->Request.Type.Direction = XFER_READ;
1701 c->Request.Timeout = 0;
1702 c->Request.CDB[0] = CISS_INQUIRY;
1703 c->Request.CDB[4] = size & 0xFF;
1704 break;
1705 case CISS_REPORT_LOG:
1706 case CISS_REPORT_PHYS:
1707 /* Talking to controller so It's a physical command
1708 mode = 00 target = 0. Nothing to write.
1710 c->Request.CDBLen = 12;
1711 c->Request.Type.Attribute = ATTR_SIMPLE;
1712 c->Request.Type.Direction = XFER_READ;
1713 c->Request.Timeout = 0;
1714 c->Request.CDB[0] = cmd;
1715 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1716 c->Request.CDB[7] = (size >> 16) & 0xFF;
1717 c->Request.CDB[8] = (size >> 8) & 0xFF;
1718 c->Request.CDB[9] = size & 0xFF;
1719 break;
1721 case CCISS_READ_CAPACITY:
1722 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1723 c->Header.LUN.LogDev.Mode = 1;
1724 c->Request.CDBLen = 10;
1725 c->Request.Type.Attribute = ATTR_SIMPLE;
1726 c->Request.Type.Direction = XFER_READ;
1727 c->Request.Timeout = 0;
1728 c->Request.CDB[0] = cmd;
1729 break;
1730 case CCISS_READ_CAPACITY_16:
1731 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1732 c->Header.LUN.LogDev.Mode = 1;
1733 c->Request.CDBLen = 16;
1734 c->Request.Type.Attribute = ATTR_SIMPLE;
1735 c->Request.Type.Direction = XFER_READ;
1736 c->Request.Timeout = 0;
1737 c->Request.CDB[0] = cmd;
1738 c->Request.CDB[1] = 0x10;
1739 c->Request.CDB[10] = (size >> 24) & 0xFF;
1740 c->Request.CDB[11] = (size >> 16) & 0xFF;
1741 c->Request.CDB[12] = (size >> 8) & 0xFF;
1742 c->Request.CDB[13] = size & 0xFF;
1743 c->Request.Timeout = 0;
1744 c->Request.CDB[0] = cmd;
1745 break;
1746 case CCISS_CACHE_FLUSH:
1747 c->Request.CDBLen = 12;
1748 c->Request.Type.Attribute = ATTR_SIMPLE;
1749 c->Request.Type.Direction = XFER_WRITE;
1750 c->Request.Timeout = 0;
1751 c->Request.CDB[0] = BMIC_WRITE;
1752 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1753 break;
1754 default:
1755 printk(KERN_WARNING
1756 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1757 return IO_ERROR;
1759 } else if (cmd_type == TYPE_MSG) {
1760 switch (cmd) {
1761 case 0: /* ABORT message */
1762 c->Request.CDBLen = 12;
1763 c->Request.Type.Attribute = ATTR_SIMPLE;
1764 c->Request.Type.Direction = XFER_WRITE;
1765 c->Request.Timeout = 0;
1766 c->Request.CDB[0] = cmd; /* abort */
1767 c->Request.CDB[1] = 0; /* abort a command */
1768 /* buff contains the tag of the command to abort */
1769 memcpy(&c->Request.CDB[4], buff, 8);
1770 break;
1771 case 1: /* RESET message */
1772 c->Request.CDBLen = 12;
1773 c->Request.Type.Attribute = ATTR_SIMPLE;
1774 c->Request.Type.Direction = XFER_WRITE;
1775 c->Request.Timeout = 0;
1776 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1777 c->Request.CDB[0] = cmd; /* reset */
1778 c->Request.CDB[1] = 0x04; /* reset a LUN */
1779 break;
1780 case 3: /* No-Op message */
1781 c->Request.CDBLen = 1;
1782 c->Request.Type.Attribute = ATTR_SIMPLE;
1783 c->Request.Type.Direction = XFER_WRITE;
1784 c->Request.Timeout = 0;
1785 c->Request.CDB[0] = cmd;
1786 break;
1787 default:
1788 printk(KERN_WARNING
1789 "cciss%d: unknown message type %d\n", ctlr, cmd);
1790 return IO_ERROR;
1792 } else {
1793 printk(KERN_WARNING
1794 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1795 return IO_ERROR;
1797 /* Fill in the scatter gather information */
1798 if (size > 0) {
1799 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1800 buff, size,
1801 PCI_DMA_BIDIRECTIONAL);
1802 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1803 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1804 c->SG[0].Len = size;
1805 c->SG[0].Ext = 0; /* we are not chaining */
1807 return status;
1810 static int sendcmd_withirq(__u8 cmd,
1811 int ctlr,
1812 void *buff,
1813 size_t size,
1814 unsigned int use_unit_num,
1815 unsigned int log_unit, __u8 page_code, int cmd_type)
1817 ctlr_info_t *h = hba[ctlr];
1818 CommandList_struct *c;
1819 u64bit buff_dma_handle;
1820 unsigned long flags;
1821 int return_status;
1822 DECLARE_COMPLETION_ONSTACK(wait);
1824 if ((c = cmd_alloc(h, 0)) == NULL)
1825 return -ENOMEM;
1826 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1827 log_unit, page_code, NULL, cmd_type);
1828 if (return_status != IO_OK) {
1829 cmd_free(h, c, 0);
1830 return return_status;
1832 resend_cmd2:
1833 c->waiting = &wait;
1835 /* Put the request on the tail of the queue and send it */
1836 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1837 addQ(&h->reqQ, c);
1838 h->Qdepth++;
1839 start_io(h);
1840 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1842 wait_for_completion(&wait);
1844 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1845 switch (c->err_info->CommandStatus) {
1846 case CMD_TARGET_STATUS:
1847 printk(KERN_WARNING "cciss: cmd %p has "
1848 " completed with errors\n", c);
1849 if (c->err_info->ScsiStatus) {
1850 printk(KERN_WARNING "cciss: cmd %p "
1851 "has SCSI Status = %x\n",
1852 c, c->err_info->ScsiStatus);
1855 break;
1856 case CMD_DATA_UNDERRUN:
1857 case CMD_DATA_OVERRUN:
1858 /* expected for inquire and report lun commands */
1859 break;
1860 case CMD_INVALID:
1861 printk(KERN_WARNING "cciss: Cmd %p is "
1862 "reported invalid\n", c);
1863 return_status = IO_ERROR;
1864 break;
1865 case CMD_PROTOCOL_ERR:
1866 printk(KERN_WARNING "cciss: cmd %p has "
1867 "protocol error \n", c);
1868 return_status = IO_ERROR;
1869 break;
1870 case CMD_HARDWARE_ERR:
1871 printk(KERN_WARNING "cciss: cmd %p had "
1872 " hardware error\n", c);
1873 return_status = IO_ERROR;
1874 break;
1875 case CMD_CONNECTION_LOST:
1876 printk(KERN_WARNING "cciss: cmd %p had "
1877 "connection lost\n", c);
1878 return_status = IO_ERROR;
1879 break;
1880 case CMD_ABORTED:
1881 printk(KERN_WARNING "cciss: cmd %p was "
1882 "aborted\n", c);
1883 return_status = IO_ERROR;
1884 break;
1885 case CMD_ABORT_FAILED:
1886 printk(KERN_WARNING "cciss: cmd %p reports "
1887 "abort failed\n", c);
1888 return_status = IO_ERROR;
1889 break;
1890 case CMD_UNSOLICITED_ABORT:
1891 printk(KERN_WARNING
1892 "cciss%d: unsolicited abort %p\n", ctlr, c);
1893 if (c->retry_count < MAX_CMD_RETRIES) {
1894 printk(KERN_WARNING
1895 "cciss%d: retrying %p\n", ctlr, c);
1896 c->retry_count++;
1897 /* erase the old error information */
1898 memset(c->err_info, 0,
1899 sizeof(ErrorInfo_struct));
1900 return_status = IO_OK;
1901 INIT_COMPLETION(wait);
1902 goto resend_cmd2;
1904 return_status = IO_ERROR;
1905 break;
1906 default:
1907 printk(KERN_WARNING "cciss: cmd %p returned "
1908 "unknown status %x\n", c,
1909 c->err_info->CommandStatus);
1910 return_status = IO_ERROR;
1913 /* unlock the buffers from DMA */
1914 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1915 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1916 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1917 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1918 cmd_free(h, c, 0);
1919 return return_status;
1922 static void cciss_geometry_inquiry(int ctlr, int logvol,
1923 int withirq, sector_t total_size,
1924 unsigned int block_size,
1925 InquiryData_struct *inq_buff,
1926 drive_info_struct *drv)
1928 int return_code;
1929 unsigned long t;
1931 memset(inq_buff, 0, sizeof(InquiryData_struct));
1932 if (withirq)
1933 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1934 inq_buff, sizeof(*inq_buff), 1,
1935 logvol, 0xC1, TYPE_CMD);
1936 else
1937 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1938 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1939 TYPE_CMD);
1940 if (return_code == IO_OK) {
1941 if (inq_buff->data_byte[8] == 0xFF) {
1942 printk(KERN_WARNING
1943 "cciss: reading geometry failed, volume "
1944 "does not support reading geometry\n");
1945 drv->heads = 255;
1946 drv->sectors = 32; // Sectors per track
1947 drv->cylinders = total_size + 1;
1948 drv->raid_level = RAID_UNKNOWN;
1949 } else {
1950 drv->heads = inq_buff->data_byte[6];
1951 drv->sectors = inq_buff->data_byte[7];
1952 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1953 drv->cylinders += inq_buff->data_byte[5];
1954 drv->raid_level = inq_buff->data_byte[8];
1956 drv->block_size = block_size;
1957 drv->nr_blocks = total_size + 1;
1958 t = drv->heads * drv->sectors;
1959 if (t > 1) {
1960 sector_t real_size = total_size + 1;
1961 unsigned long rem = sector_div(real_size, t);
1962 if (rem)
1963 real_size++;
1964 drv->cylinders = real_size;
1966 } else { /* Get geometry failed */
1967 printk(KERN_WARNING "cciss: reading geometry failed\n");
1969 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1970 drv->heads, drv->sectors, drv->cylinders);
1973 static void
1974 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1975 unsigned int *block_size)
1977 ReadCapdata_struct *buf;
1978 int return_code;
1980 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1981 if (!buf) {
1982 printk(KERN_WARNING "cciss: out of memory\n");
1983 return;
1986 if (withirq)
1987 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1988 ctlr, buf, sizeof(ReadCapdata_struct),
1989 1, logvol, 0, TYPE_CMD);
1990 else
1991 return_code = sendcmd(CCISS_READ_CAPACITY,
1992 ctlr, buf, sizeof(ReadCapdata_struct),
1993 1, logvol, 0, NULL, TYPE_CMD);
1994 if (return_code == IO_OK) {
1995 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
1996 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
1997 } else { /* read capacity command failed */
1998 printk(KERN_WARNING "cciss: read capacity failed\n");
1999 *total_size = 0;
2000 *block_size = BLOCK_SIZE;
2002 if (*total_size != 0)
2003 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2004 (unsigned long long)*total_size+1, *block_size);
2005 kfree(buf);
2008 static void
2009 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2011 ReadCapdata_struct_16 *buf;
2012 int return_code;
2014 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2015 if (!buf) {
2016 printk(KERN_WARNING "cciss: out of memory\n");
2017 return;
2020 if (withirq) {
2021 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2022 ctlr, buf, sizeof(ReadCapdata_struct_16),
2023 1, logvol, 0, TYPE_CMD);
2025 else {
2026 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2027 ctlr, buf, sizeof(ReadCapdata_struct_16),
2028 1, logvol, 0, NULL, TYPE_CMD);
2030 if (return_code == IO_OK) {
2031 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2032 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2033 } else { /* read capacity command failed */
2034 printk(KERN_WARNING "cciss: read capacity failed\n");
2035 *total_size = 0;
2036 *block_size = BLOCK_SIZE;
2038 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2039 (unsigned long long)*total_size+1, *block_size);
2040 kfree(buf);
2043 static int cciss_revalidate(struct gendisk *disk)
2045 ctlr_info_t *h = get_host(disk);
2046 drive_info_struct *drv = get_drv(disk);
2047 int logvol;
2048 int FOUND = 0;
2049 unsigned int block_size;
2050 sector_t total_size;
2051 InquiryData_struct *inq_buff = NULL;
2053 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2054 if (h->drv[logvol].LunID == drv->LunID) {
2055 FOUND = 1;
2056 break;
2060 if (!FOUND)
2061 return 1;
2063 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2064 if (inq_buff == NULL) {
2065 printk(KERN_WARNING "cciss: out of memory\n");
2066 return 1;
2068 if (h->cciss_read == CCISS_READ_10) {
2069 cciss_read_capacity(h->ctlr, logvol, 1,
2070 &total_size, &block_size);
2071 } else {
2072 cciss_read_capacity_16(h->ctlr, logvol, 1,
2073 &total_size, &block_size);
2075 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2076 inq_buff, drv);
2078 blk_queue_hardsect_size(drv->queue, drv->block_size);
2079 set_capacity(disk, drv->nr_blocks);
2081 kfree(inq_buff);
2082 return 0;
2086 * Wait polling for a command to complete.
2087 * The memory mapped FIFO is polled for the completion.
2088 * Used only at init time, interrupts from the HBA are disabled.
2090 static unsigned long pollcomplete(int ctlr)
2092 unsigned long done;
2093 int i;
2095 /* Wait (up to 20 seconds) for a command to complete */
2097 for (i = 20 * HZ; i > 0; i--) {
2098 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2099 if (done == FIFO_EMPTY)
2100 schedule_timeout_uninterruptible(1);
2101 else
2102 return done;
2104 /* Invalid address to tell caller we ran out of time */
2105 return 1;
2108 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2110 /* We get in here if sendcmd() is polling for completions
2111 and gets some command back that it wasn't expecting --
2112 something other than that which it just sent down.
2113 Ordinarily, that shouldn't happen, but it can happen when
2114 the scsi tape stuff gets into error handling mode, and
2115 starts using sendcmd() to try to abort commands and
2116 reset tape drives. In that case, sendcmd may pick up
2117 completions of commands that were sent to logical drives
2118 through the block i/o system, or cciss ioctls completing, etc.
2119 In that case, we need to save those completions for later
2120 processing by the interrupt handler.
2123 #ifdef CONFIG_CISS_SCSI_TAPE
2124 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2126 /* If it's not the scsi tape stuff doing error handling, (abort */
2127 /* or reset) then we don't expect anything weird. */
2128 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2129 #endif
2130 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2131 "Invalid command list address returned! (%lx)\n",
2132 ctlr, complete);
2133 /* not much we can do. */
2134 #ifdef CONFIG_CISS_SCSI_TAPE
2135 return 1;
2138 /* We've sent down an abort or reset, but something else
2139 has completed */
2140 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2141 /* Uh oh. No room to save it for later... */
2142 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2143 "reject list overflow, command lost!\n", ctlr);
2144 return 1;
2146 /* Save it for later */
2147 srl->complete[srl->ncompletions] = complete;
2148 srl->ncompletions++;
2149 #endif
2150 return 0;
2154 * Send a command to the controller, and wait for it to complete.
2155 * Only used at init time.
2157 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2158 1: address logical volume log_unit,
2159 2: periph device address is scsi3addr */
2160 unsigned int log_unit,
2161 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2163 CommandList_struct *c;
2164 int i;
2165 unsigned long complete;
2166 ctlr_info_t *info_p = hba[ctlr];
2167 u64bit buff_dma_handle;
2168 int status, done = 0;
2170 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2171 printk(KERN_WARNING "cciss: unable to get memory");
2172 return IO_ERROR;
2174 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2175 log_unit, page_code, scsi3addr, cmd_type);
2176 if (status != IO_OK) {
2177 cmd_free(info_p, c, 1);
2178 return status;
2180 resend_cmd1:
2182 * Disable interrupt
2184 #ifdef CCISS_DEBUG
2185 printk(KERN_DEBUG "cciss: turning intr off\n");
2186 #endif /* CCISS_DEBUG */
2187 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2189 /* Make sure there is room in the command FIFO */
2190 /* Actually it should be completely empty at this time */
2191 /* unless we are in here doing error handling for the scsi */
2192 /* tape side of the driver. */
2193 for (i = 200000; i > 0; i--) {
2194 /* if fifo isn't full go */
2195 if (!(info_p->access.fifo_full(info_p))) {
2197 break;
2199 udelay(10);
2200 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2201 " waiting!\n", ctlr);
2204 * Send the cmd
2206 info_p->access.submit_command(info_p, c);
2207 done = 0;
2208 do {
2209 complete = pollcomplete(ctlr);
2211 #ifdef CCISS_DEBUG
2212 printk(KERN_DEBUG "cciss: command completed\n");
2213 #endif /* CCISS_DEBUG */
2215 if (complete == 1) {
2216 printk(KERN_WARNING
2217 "cciss cciss%d: SendCmd Timeout out, "
2218 "No command list address returned!\n", ctlr);
2219 status = IO_ERROR;
2220 done = 1;
2221 break;
2224 /* This will need to change for direct lookup completions */
2225 if ((complete & CISS_ERROR_BIT)
2226 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2227 /* if data overrun or underun on Report command
2228 ignore it
2230 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2231 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2232 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2233 ((c->err_info->CommandStatus ==
2234 CMD_DATA_OVERRUN) ||
2235 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2236 )) {
2237 complete = c->busaddr;
2238 } else {
2239 if (c->err_info->CommandStatus ==
2240 CMD_UNSOLICITED_ABORT) {
2241 printk(KERN_WARNING "cciss%d: "
2242 "unsolicited abort %p\n",
2243 ctlr, c);
2244 if (c->retry_count < MAX_CMD_RETRIES) {
2245 printk(KERN_WARNING
2246 "cciss%d: retrying %p\n",
2247 ctlr, c);
2248 c->retry_count++;
2249 /* erase the old error */
2250 /* information */
2251 memset(c->err_info, 0,
2252 sizeof
2253 (ErrorInfo_struct));
2254 goto resend_cmd1;
2255 } else {
2256 printk(KERN_WARNING
2257 "cciss%d: retried %p too "
2258 "many times\n", ctlr, c);
2259 status = IO_ERROR;
2260 goto cleanup1;
2262 } else if (c->err_info->CommandStatus ==
2263 CMD_UNABORTABLE) {
2264 printk(KERN_WARNING
2265 "cciss%d: command could not be aborted.\n",
2266 ctlr);
2267 status = IO_ERROR;
2268 goto cleanup1;
2270 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2271 " Error %x \n", ctlr,
2272 c->err_info->CommandStatus);
2273 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2274 " offensive info\n"
2275 " size %x\n num %x value %x\n",
2276 ctlr,
2277 c->err_info->MoreErrInfo.Invalid_Cmd.
2278 offense_size,
2279 c->err_info->MoreErrInfo.Invalid_Cmd.
2280 offense_num,
2281 c->err_info->MoreErrInfo.Invalid_Cmd.
2282 offense_value);
2283 status = IO_ERROR;
2284 goto cleanup1;
2287 /* This will need changing for direct lookup completions */
2288 if (complete != c->busaddr) {
2289 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2290 BUG(); /* we are pretty much hosed if we get here. */
2292 continue;
2293 } else
2294 done = 1;
2295 } while (!done);
2297 cleanup1:
2298 /* unlock the data buffer from DMA */
2299 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2300 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2301 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2302 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2303 #ifdef CONFIG_CISS_SCSI_TAPE
2304 /* if we saved some commands for later, process them now. */
2305 if (info_p->scsi_rejects.ncompletions > 0)
2306 do_cciss_intr(0, info_p);
2307 #endif
2308 cmd_free(info_p, c, 1);
2309 return status;
2313 * Map (physical) PCI mem into (virtual) kernel space
2315 static void __iomem *remap_pci_mem(ulong base, ulong size)
2317 ulong page_base = ((ulong) base) & PAGE_MASK;
2318 ulong page_offs = ((ulong) base) - page_base;
2319 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2321 return page_remapped ? (page_remapped + page_offs) : NULL;
2325 * Takes jobs of the Q and sends them to the hardware, then puts it on
2326 * the Q to wait for completion.
2328 static void start_io(ctlr_info_t *h)
2330 CommandList_struct *c;
2332 while ((c = h->reqQ) != NULL) {
2333 /* can't do anything if fifo is full */
2334 if ((h->access.fifo_full(h))) {
2335 printk(KERN_WARNING "cciss: fifo full\n");
2336 break;
2339 /* Get the first entry from the Request Q */
2340 removeQ(&(h->reqQ), c);
2341 h->Qdepth--;
2343 /* Tell the controller execute command */
2344 h->access.submit_command(h, c);
2346 /* Put job onto the completed Q */
2347 addQ(&(h->cmpQ), c);
2351 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2352 /* Zeros out the error record and then resends the command back */
2353 /* to the controller */
2354 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2356 /* erase the old error information */
2357 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2359 /* add it to software queue and then send it to the controller */
2360 addQ(&(h->reqQ), c);
2361 h->Qdepth++;
2362 if (h->Qdepth > h->maxQsinceinit)
2363 h->maxQsinceinit = h->Qdepth;
2365 start_io(h);
2368 static inline int evaluate_target_status(CommandList_struct *cmd)
2370 unsigned char sense_key;
2371 int error_count = 1;
2373 if (cmd->err_info->ScsiStatus != 0x02) { /* not check condition? */
2374 if (!blk_pc_request(cmd->rq))
2375 printk(KERN_WARNING "cciss: cmd %p "
2376 "has SCSI Status 0x%x\n",
2377 cmd, cmd->err_info->ScsiStatus);
2378 return error_count;
2381 /* check the sense key */
2382 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2383 /* no status or recovered error */
2384 if ((sense_key == 0x0) || (sense_key == 0x1))
2385 error_count = 0;
2387 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2388 if (error_count != 0)
2389 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2390 " sense key = 0x%x\n", cmd, sense_key);
2391 return error_count;
2394 /* SG_IO or similar, copy sense data back */
2395 if (cmd->rq->sense) {
2396 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2397 cmd->rq->sense_len = cmd->err_info->SenseLen;
2398 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2399 cmd->rq->sense_len);
2400 } else
2401 cmd->rq->sense_len = 0;
2403 return error_count;
2406 /* checks the status of the job and calls complete buffers to mark all
2407 * buffers for the completed job. Note that this function does not need
2408 * to hold the hba/queue lock.
2410 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2411 int timeout)
2413 int retry_cmd = 0;
2414 struct request *rq = cmd->rq;
2416 rq->errors = 0;
2418 if (timeout)
2419 rq->errors = 1;
2421 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2422 goto after_error_processing;
2424 switch (cmd->err_info->CommandStatus) {
2425 case CMD_TARGET_STATUS:
2426 rq->errors = evaluate_target_status(cmd);
2427 break;
2428 case CMD_DATA_UNDERRUN:
2429 if (blk_fs_request(cmd->rq)) {
2430 printk(KERN_WARNING "cciss: cmd %p has"
2431 " completed with data underrun "
2432 "reported\n", cmd);
2433 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2435 break;
2436 case CMD_DATA_OVERRUN:
2437 if (blk_fs_request(cmd->rq))
2438 printk(KERN_WARNING "cciss: cmd %p has"
2439 " completed with data overrun "
2440 "reported\n", cmd);
2441 break;
2442 case CMD_INVALID:
2443 printk(KERN_WARNING "cciss: cmd %p is "
2444 "reported invalid\n", cmd);
2445 rq->errors = 1;
2446 break;
2447 case CMD_PROTOCOL_ERR:
2448 printk(KERN_WARNING "cciss: cmd %p has "
2449 "protocol error \n", cmd);
2450 rq->errors = 1;
2451 break;
2452 case CMD_HARDWARE_ERR:
2453 printk(KERN_WARNING "cciss: cmd %p had "
2454 " hardware error\n", cmd);
2455 rq->errors = 1;
2456 break;
2457 case CMD_CONNECTION_LOST:
2458 printk(KERN_WARNING "cciss: cmd %p had "
2459 "connection lost\n", cmd);
2460 rq->errors = 1;
2461 break;
2462 case CMD_ABORTED:
2463 printk(KERN_WARNING "cciss: cmd %p was "
2464 "aborted\n", cmd);
2465 rq->errors = 1;
2466 break;
2467 case CMD_ABORT_FAILED:
2468 printk(KERN_WARNING "cciss: cmd %p reports "
2469 "abort failed\n", cmd);
2470 rq->errors = 1;
2471 break;
2472 case CMD_UNSOLICITED_ABORT:
2473 printk(KERN_WARNING "cciss%d: unsolicited "
2474 "abort %p\n", h->ctlr, cmd);
2475 if (cmd->retry_count < MAX_CMD_RETRIES) {
2476 retry_cmd = 1;
2477 printk(KERN_WARNING
2478 "cciss%d: retrying %p\n", h->ctlr, cmd);
2479 cmd->retry_count++;
2480 } else
2481 printk(KERN_WARNING
2482 "cciss%d: %p retried too "
2483 "many times\n", h->ctlr, cmd);
2484 rq->errors = 1;
2485 break;
2486 case CMD_TIMEOUT:
2487 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2488 rq->errors = 1;
2489 break;
2490 default:
2491 printk(KERN_WARNING "cciss: cmd %p returned "
2492 "unknown status %x\n", cmd,
2493 cmd->err_info->CommandStatus);
2494 rq->errors = 1;
2497 after_error_processing:
2499 /* We need to return this command */
2500 if (retry_cmd) {
2501 resend_cciss_cmd(h, cmd);
2502 return;
2504 cmd->rq->data_len = 0;
2505 cmd->rq->completion_data = cmd;
2506 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2507 blk_complete_request(cmd->rq);
2511 * Get a request and submit it to the controller.
2513 static void do_cciss_request(struct request_queue *q)
2515 ctlr_info_t *h = q->queuedata;
2516 CommandList_struct *c;
2517 sector_t start_blk;
2518 int seg;
2519 struct request *creq;
2520 u64bit temp64;
2521 struct scatterlist tmp_sg[MAXSGENTRIES];
2522 drive_info_struct *drv;
2523 int i, dir;
2525 /* We call start_io here in case there is a command waiting on the
2526 * queue that has not been sent.
2528 if (blk_queue_plugged(q))
2529 goto startio;
2531 queue:
2532 creq = elv_next_request(q);
2533 if (!creq)
2534 goto startio;
2536 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2538 if ((c = cmd_alloc(h, 1)) == NULL)
2539 goto full;
2541 blkdev_dequeue_request(creq);
2543 spin_unlock_irq(q->queue_lock);
2545 c->cmd_type = CMD_RWREQ;
2546 c->rq = creq;
2548 /* fill in the request */
2549 drv = creq->rq_disk->private_data;
2550 c->Header.ReplyQueue = 0; // unused in simple mode
2551 /* got command from pool, so use the command block index instead */
2552 /* for direct lookups. */
2553 /* The first 2 bits are reserved for controller error reporting. */
2554 c->Header.Tag.lower = (c->cmdindex << 3);
2555 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2556 c->Header.LUN.LogDev.VolId = drv->LunID;
2557 c->Header.LUN.LogDev.Mode = 1;
2558 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2559 c->Request.Type.Type = TYPE_CMD; // It is a command.
2560 c->Request.Type.Attribute = ATTR_SIMPLE;
2561 c->Request.Type.Direction =
2562 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2563 c->Request.Timeout = 0; // Don't time out
2564 c->Request.CDB[0] =
2565 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2566 start_blk = creq->sector;
2567 #ifdef CCISS_DEBUG
2568 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2569 (int)creq->nr_sectors);
2570 #endif /* CCISS_DEBUG */
2572 seg = blk_rq_map_sg(q, creq, tmp_sg);
2574 /* get the DMA records for the setup */
2575 if (c->Request.Type.Direction == XFER_READ)
2576 dir = PCI_DMA_FROMDEVICE;
2577 else
2578 dir = PCI_DMA_TODEVICE;
2580 for (i = 0; i < seg; i++) {
2581 c->SG[i].Len = tmp_sg[i].length;
2582 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2583 tmp_sg[i].offset,
2584 tmp_sg[i].length, dir);
2585 c->SG[i].Addr.lower = temp64.val32.lower;
2586 c->SG[i].Addr.upper = temp64.val32.upper;
2587 c->SG[i].Ext = 0; // we are not chaining
2589 /* track how many SG entries we are using */
2590 if (seg > h->maxSG)
2591 h->maxSG = seg;
2593 #ifdef CCISS_DEBUG
2594 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2595 creq->nr_sectors, seg);
2596 #endif /* CCISS_DEBUG */
2598 c->Header.SGList = c->Header.SGTotal = seg;
2599 if (likely(blk_fs_request(creq))) {
2600 if(h->cciss_read == CCISS_READ_10) {
2601 c->Request.CDB[1] = 0;
2602 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2603 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2604 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2605 c->Request.CDB[5] = start_blk & 0xff;
2606 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2607 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2608 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2609 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2610 } else {
2611 c->Request.CDBLen = 16;
2612 c->Request.CDB[1]= 0;
2613 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2614 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2615 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2616 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2617 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2618 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2619 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2620 c->Request.CDB[9]= start_blk & 0xff;
2621 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2622 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2623 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2624 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2625 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2627 } else if (blk_pc_request(creq)) {
2628 c->Request.CDBLen = creq->cmd_len;
2629 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2630 } else {
2631 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2632 BUG();
2635 spin_lock_irq(q->queue_lock);
2637 addQ(&(h->reqQ), c);
2638 h->Qdepth++;
2639 if (h->Qdepth > h->maxQsinceinit)
2640 h->maxQsinceinit = h->Qdepth;
2642 goto queue;
2643 full:
2644 blk_stop_queue(q);
2645 startio:
2646 /* We will already have the driver lock here so not need
2647 * to lock it.
2649 start_io(h);
2652 static inline unsigned long get_next_completion(ctlr_info_t *h)
2654 #ifdef CONFIG_CISS_SCSI_TAPE
2655 /* Any rejects from sendcmd() lying around? Process them first */
2656 if (h->scsi_rejects.ncompletions == 0)
2657 return h->access.command_completed(h);
2658 else {
2659 struct sendcmd_reject_list *srl;
2660 int n;
2661 srl = &h->scsi_rejects;
2662 n = --srl->ncompletions;
2663 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2664 printk("p");
2665 return srl->complete[n];
2667 #else
2668 return h->access.command_completed(h);
2669 #endif
2672 static inline int interrupt_pending(ctlr_info_t *h)
2674 #ifdef CONFIG_CISS_SCSI_TAPE
2675 return (h->access.intr_pending(h)
2676 || (h->scsi_rejects.ncompletions > 0));
2677 #else
2678 return h->access.intr_pending(h);
2679 #endif
2682 static inline long interrupt_not_for_us(ctlr_info_t *h)
2684 #ifdef CONFIG_CISS_SCSI_TAPE
2685 return (((h->access.intr_pending(h) == 0) ||
2686 (h->interrupts_enabled == 0))
2687 && (h->scsi_rejects.ncompletions == 0));
2688 #else
2689 return (((h->access.intr_pending(h) == 0) ||
2690 (h->interrupts_enabled == 0)));
2691 #endif
2694 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2696 ctlr_info_t *h = dev_id;
2697 CommandList_struct *c;
2698 unsigned long flags;
2699 __u32 a, a1, a2;
2701 if (interrupt_not_for_us(h))
2702 return IRQ_NONE;
2704 * If there are completed commands in the completion queue,
2705 * we had better do something about it.
2707 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2708 while (interrupt_pending(h)) {
2709 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2710 a1 = a;
2711 if ((a & 0x04)) {
2712 a2 = (a >> 3);
2713 if (a2 >= h->nr_cmds) {
2714 printk(KERN_WARNING
2715 "cciss: controller cciss%d failed, stopping.\n",
2716 h->ctlr);
2717 fail_all_cmds(h->ctlr);
2718 return IRQ_HANDLED;
2721 c = h->cmd_pool + a2;
2722 a = c->busaddr;
2724 } else {
2725 a &= ~3;
2726 if ((c = h->cmpQ) == NULL) {
2727 printk(KERN_WARNING
2728 "cciss: Completion of %08x ignored\n",
2729 a1);
2730 continue;
2732 while (c->busaddr != a) {
2733 c = c->next;
2734 if (c == h->cmpQ)
2735 break;
2739 * If we've found the command, take it off the
2740 * completion Q and free it
2742 if (c->busaddr == a) {
2743 removeQ(&h->cmpQ, c);
2744 if (c->cmd_type == CMD_RWREQ) {
2745 complete_command(h, c, 0);
2746 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2747 complete(c->waiting);
2749 # ifdef CONFIG_CISS_SCSI_TAPE
2750 else if (c->cmd_type == CMD_SCSI)
2751 complete_scsi_command(c, 0, a1);
2752 # endif
2753 continue;
2758 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2759 return IRQ_HANDLED;
2763 * We cannot read the structure directly, for portability we must use
2764 * the io functions.
2765 * This is for debug only.
2767 #ifdef CCISS_DEBUG
2768 static void print_cfg_table(CfgTable_struct *tb)
2770 int i;
2771 char temp_name[17];
2773 printk("Controller Configuration information\n");
2774 printk("------------------------------------\n");
2775 for (i = 0; i < 4; i++)
2776 temp_name[i] = readb(&(tb->Signature[i]));
2777 temp_name[4] = '\0';
2778 printk(" Signature = %s\n", temp_name);
2779 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2780 printk(" Transport methods supported = 0x%x\n",
2781 readl(&(tb->TransportSupport)));
2782 printk(" Transport methods active = 0x%x\n",
2783 readl(&(tb->TransportActive)));
2784 printk(" Requested transport Method = 0x%x\n",
2785 readl(&(tb->HostWrite.TransportRequest)));
2786 printk(" Coalesce Interrupt Delay = 0x%x\n",
2787 readl(&(tb->HostWrite.CoalIntDelay)));
2788 printk(" Coalesce Interrupt Count = 0x%x\n",
2789 readl(&(tb->HostWrite.CoalIntCount)));
2790 printk(" Max outstanding commands = 0x%d\n",
2791 readl(&(tb->CmdsOutMax)));
2792 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2793 for (i = 0; i < 16; i++)
2794 temp_name[i] = readb(&(tb->ServerName[i]));
2795 temp_name[16] = '\0';
2796 printk(" Server Name = %s\n", temp_name);
2797 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2799 #endif /* CCISS_DEBUG */
2801 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2803 int i, offset, mem_type, bar_type;
2804 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2805 return 0;
2806 offset = 0;
2807 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2808 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2809 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2810 offset += 4;
2811 else {
2812 mem_type = pci_resource_flags(pdev, i) &
2813 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2814 switch (mem_type) {
2815 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2816 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2817 offset += 4; /* 32 bit */
2818 break;
2819 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2820 offset += 8;
2821 break;
2822 default: /* reserved in PCI 2.2 */
2823 printk(KERN_WARNING
2824 "Base address is invalid\n");
2825 return -1;
2826 break;
2829 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2830 return i + 1;
2832 return -1;
2835 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2836 * controllers that are capable. If not, we use IO-APIC mode.
2839 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2840 struct pci_dev *pdev, __u32 board_id)
2842 #ifdef CONFIG_PCI_MSI
2843 int err;
2844 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2845 {0, 2}, {0, 3}
2848 /* Some boards advertise MSI but don't really support it */
2849 if ((board_id == 0x40700E11) ||
2850 (board_id == 0x40800E11) ||
2851 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2852 goto default_int_mode;
2854 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2855 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2856 if (!err) {
2857 c->intr[0] = cciss_msix_entries[0].vector;
2858 c->intr[1] = cciss_msix_entries[1].vector;
2859 c->intr[2] = cciss_msix_entries[2].vector;
2860 c->intr[3] = cciss_msix_entries[3].vector;
2861 c->msix_vector = 1;
2862 return;
2864 if (err > 0) {
2865 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2866 "available\n", err);
2867 goto default_int_mode;
2868 } else {
2869 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2870 err);
2871 goto default_int_mode;
2874 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2875 if (!pci_enable_msi(pdev)) {
2876 c->msi_vector = 1;
2877 } else {
2878 printk(KERN_WARNING "cciss: MSI init failed\n");
2881 default_int_mode:
2882 #endif /* CONFIG_PCI_MSI */
2883 /* if we get here we're going to use the default interrupt mode */
2884 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2885 return;
2888 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2890 ushort subsystem_vendor_id, subsystem_device_id, command;
2891 __u32 board_id, scratchpad = 0;
2892 __u64 cfg_offset;
2893 __u32 cfg_base_addr;
2894 __u64 cfg_base_addr_index;
2895 int i, err;
2897 /* check to see if controller has been disabled */
2898 /* BEFORE trying to enable it */
2899 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2900 if (!(command & 0x02)) {
2901 printk(KERN_WARNING
2902 "cciss: controller appears to be disabled\n");
2903 return -ENODEV;
2906 err = pci_enable_device(pdev);
2907 if (err) {
2908 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2909 return err;
2912 err = pci_request_regions(pdev, "cciss");
2913 if (err) {
2914 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2915 "aborting\n");
2916 return err;
2919 subsystem_vendor_id = pdev->subsystem_vendor;
2920 subsystem_device_id = pdev->subsystem_device;
2921 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2922 subsystem_vendor_id);
2924 #ifdef CCISS_DEBUG
2925 printk("command = %x\n", command);
2926 printk("irq = %x\n", pdev->irq);
2927 printk("board_id = %x\n", board_id);
2928 #endif /* CCISS_DEBUG */
2930 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2931 * else we use the IO-APIC interrupt assigned to us by system ROM.
2933 cciss_interrupt_mode(c, pdev, board_id);
2936 * Memory base addr is first addr , the second points to the config
2937 * table
2940 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2941 #ifdef CCISS_DEBUG
2942 printk("address 0 = %x\n", c->paddr);
2943 #endif /* CCISS_DEBUG */
2944 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2946 /* Wait for the board to become ready. (PCI hotplug needs this.)
2947 * We poll for up to 120 secs, once per 100ms. */
2948 for (i = 0; i < 1200; i++) {
2949 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2950 if (scratchpad == CCISS_FIRMWARE_READY)
2951 break;
2952 set_current_state(TASK_INTERRUPTIBLE);
2953 schedule_timeout(HZ / 10); /* wait 100ms */
2955 if (scratchpad != CCISS_FIRMWARE_READY) {
2956 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2957 err = -ENODEV;
2958 goto err_out_free_res;
2961 /* get the address index number */
2962 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2963 cfg_base_addr &= (__u32) 0x0000ffff;
2964 #ifdef CCISS_DEBUG
2965 printk("cfg base address = %x\n", cfg_base_addr);
2966 #endif /* CCISS_DEBUG */
2967 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2968 #ifdef CCISS_DEBUG
2969 printk("cfg base address index = %x\n", cfg_base_addr_index);
2970 #endif /* CCISS_DEBUG */
2971 if (cfg_base_addr_index == -1) {
2972 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2973 err = -ENODEV;
2974 goto err_out_free_res;
2977 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2978 #ifdef CCISS_DEBUG
2979 printk("cfg offset = %x\n", cfg_offset);
2980 #endif /* CCISS_DEBUG */
2981 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2982 cfg_base_addr_index) +
2983 cfg_offset, sizeof(CfgTable_struct));
2984 c->board_id = board_id;
2986 #ifdef CCISS_DEBUG
2987 print_cfg_table(c->cfgtable);
2988 #endif /* CCISS_DEBUG */
2990 for (i = 0; i < ARRAY_SIZE(products); i++) {
2991 if (board_id == products[i].board_id) {
2992 c->product_name = products[i].product_name;
2993 c->access = *(products[i].access);
2994 c->nr_cmds = products[i].nr_cmds;
2995 break;
2998 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2999 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3000 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3001 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3002 printk("Does not appear to be a valid CISS config table\n");
3003 err = -ENODEV;
3004 goto err_out_free_res;
3006 /* We didn't find the controller in our list. We know the
3007 * signature is valid. If it's an HP device let's try to
3008 * bind to the device and fire it up. Otherwise we bail.
3010 if (i == ARRAY_SIZE(products)) {
3011 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3012 c->product_name = products[i-1].product_name;
3013 c->access = *(products[i-1].access);
3014 c->nr_cmds = products[i-1].nr_cmds;
3015 printk(KERN_WARNING "cciss: This is an unknown "
3016 "Smart Array controller.\n"
3017 "cciss: Please update to the latest driver "
3018 "available from www.hp.com.\n");
3019 } else {
3020 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3021 " to access the Smart Array controller %08lx\n"
3022 , (unsigned long)board_id);
3023 err = -ENODEV;
3024 goto err_out_free_res;
3027 #ifdef CONFIG_X86
3029 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3030 __u32 prefetch;
3031 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3032 prefetch |= 0x100;
3033 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3035 #endif
3037 /* Disabling DMA prefetch for the P600
3038 * An ASIC bug may result in a prefetch beyond
3039 * physical memory.
3041 if(board_id == 0x3225103C) {
3042 __u32 dma_prefetch;
3043 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3044 dma_prefetch |= 0x8000;
3045 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3048 #ifdef CCISS_DEBUG
3049 printk("Trying to put board into Simple mode\n");
3050 #endif /* CCISS_DEBUG */
3051 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3052 /* Update the field, and then ring the doorbell */
3053 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3054 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3056 /* under certain very rare conditions, this can take awhile.
3057 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3058 * as we enter this code.) */
3059 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3060 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3061 break;
3062 /* delay and try again */
3063 set_current_state(TASK_INTERRUPTIBLE);
3064 schedule_timeout(10);
3067 #ifdef CCISS_DEBUG
3068 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3069 readl(c->vaddr + SA5_DOORBELL));
3070 #endif /* CCISS_DEBUG */
3071 #ifdef CCISS_DEBUG
3072 print_cfg_table(c->cfgtable);
3073 #endif /* CCISS_DEBUG */
3075 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3076 printk(KERN_WARNING "cciss: unable to get board into"
3077 " simple mode\n");
3078 err = -ENODEV;
3079 goto err_out_free_res;
3081 return 0;
3083 err_out_free_res:
3085 * Deliberately omit pci_disable_device(): it does something nasty to
3086 * Smart Array controllers that pci_enable_device does not undo
3088 pci_release_regions(pdev);
3089 return err;
3093 * Gets information about the local volumes attached to the controller.
3095 static void cciss_getgeometry(int cntl_num)
3097 ReportLunData_struct *ld_buff;
3098 InquiryData_struct *inq_buff;
3099 int return_code;
3100 int i;
3101 int listlength = 0;
3102 __u32 lunid = 0;
3103 unsigned block_size;
3104 sector_t total_size;
3106 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3107 if (ld_buff == NULL) {
3108 printk(KERN_ERR "cciss: out of memory\n");
3109 return;
3111 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3112 if (inq_buff == NULL) {
3113 printk(KERN_ERR "cciss: out of memory\n");
3114 kfree(ld_buff);
3115 return;
3117 /* Get the firmware version */
3118 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3119 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3120 TYPE_CMD);
3121 if (return_code == IO_OK) {
3122 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3123 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3124 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3125 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3126 } else { /* send command failed */
3128 printk(KERN_WARNING "cciss: unable to determine firmware"
3129 " version of controller\n");
3131 /* Get the number of logical volumes */
3132 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3133 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3134 TYPE_CMD);
3136 if (return_code == IO_OK) {
3137 #ifdef CCISS_DEBUG
3138 printk("LUN Data\n--------------------------\n");
3139 #endif /* CCISS_DEBUG */
3141 listlength |=
3142 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3143 listlength |=
3144 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3145 listlength |=
3146 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3147 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3148 } else { /* reading number of logical volumes failed */
3150 printk(KERN_WARNING "cciss: report logical volume"
3151 " command failed\n");
3152 listlength = 0;
3154 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3155 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3156 printk(KERN_ERR
3157 "ciss: only %d number of logical volumes supported\n",
3158 CISS_MAX_LUN);
3159 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3161 #ifdef CCISS_DEBUG
3162 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3163 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3164 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3165 hba[cntl_num]->num_luns);
3166 #endif /* CCISS_DEBUG */
3168 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3169 for (i = 0; i < CISS_MAX_LUN; i++) {
3170 if (i < hba[cntl_num]->num_luns) {
3171 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3172 << 24;
3173 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3174 << 16;
3175 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3176 << 8;
3177 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3179 hba[cntl_num]->drv[i].LunID = lunid;
3181 #ifdef CCISS_DEBUG
3182 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3183 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3184 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3185 hba[cntl_num]->drv[i].LunID);
3186 #endif /* CCISS_DEBUG */
3188 /* testing to see if 16-byte CDBs are already being used */
3189 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3190 cciss_read_capacity_16(cntl_num, i, 0,
3191 &total_size, &block_size);
3192 goto geo_inq;
3194 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3196 /* If read_capacity returns all F's the logical is >2TB */
3197 /* so we switch to 16-byte CDBs for all read/write ops */
3198 if(total_size == 0xFFFFFFFFULL) {
3199 cciss_read_capacity_16(cntl_num, i, 0,
3200 &total_size, &block_size);
3201 hba[cntl_num]->cciss_read = CCISS_READ_16;
3202 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3203 } else {
3204 hba[cntl_num]->cciss_read = CCISS_READ_10;
3205 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3207 geo_inq:
3208 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3209 block_size, inq_buff,
3210 &hba[cntl_num]->drv[i]);
3211 } else {
3212 /* initialize raid_level to indicate a free space */
3213 hba[cntl_num]->drv[i].raid_level = -1;
3216 kfree(ld_buff);
3217 kfree(inq_buff);
3220 /* Function to find the first free pointer into our hba[] array */
3221 /* Returns -1 if no free entries are left. */
3222 static int alloc_cciss_hba(void)
3224 int i;
3226 for (i = 0; i < MAX_CTLR; i++) {
3227 if (!hba[i]) {
3228 ctlr_info_t *p;
3230 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3231 if (!p)
3232 goto Enomem;
3233 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3234 if (!p->gendisk[0]) {
3235 kfree(p);
3236 goto Enomem;
3238 hba[i] = p;
3239 return i;
3242 printk(KERN_WARNING "cciss: This driver supports a maximum"
3243 " of %d controllers.\n", MAX_CTLR);
3244 return -1;
3245 Enomem:
3246 printk(KERN_ERR "cciss: out of memory.\n");
3247 return -1;
3250 static void free_hba(int i)
3252 ctlr_info_t *p = hba[i];
3253 int n;
3255 hba[i] = NULL;
3256 for (n = 0; n < CISS_MAX_LUN; n++)
3257 put_disk(p->gendisk[n]);
3258 kfree(p);
3262 * This is it. Find all the controllers and register them. I really hate
3263 * stealing all these major device numbers.
3264 * returns the number of block devices registered.
3266 static int __devinit cciss_init_one(struct pci_dev *pdev,
3267 const struct pci_device_id *ent)
3269 int i;
3270 int j = 0;
3271 int rc;
3272 int dac;
3274 i = alloc_cciss_hba();
3275 if (i < 0)
3276 return -1;
3278 hba[i]->busy_initializing = 1;
3280 if (cciss_pci_init(hba[i], pdev) != 0)
3281 goto clean1;
3283 sprintf(hba[i]->devname, "cciss%d", i);
3284 hba[i]->ctlr = i;
3285 hba[i]->pdev = pdev;
3287 /* configure PCI DMA stuff */
3288 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3289 dac = 1;
3290 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3291 dac = 0;
3292 else {
3293 printk(KERN_ERR "cciss: no suitable DMA available\n");
3294 goto clean1;
3298 * register with the major number, or get a dynamic major number
3299 * by passing 0 as argument. This is done for greater than
3300 * 8 controller support.
3302 if (i < MAX_CTLR_ORIG)
3303 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3304 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3305 if (rc == -EBUSY || rc == -EINVAL) {
3306 printk(KERN_ERR
3307 "cciss: Unable to get major number %d for %s "
3308 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3309 goto clean1;
3310 } else {
3311 if (i >= MAX_CTLR_ORIG)
3312 hba[i]->major = rc;
3315 /* make sure the board interrupts are off */
3316 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3317 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3318 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3319 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3320 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3321 goto clean2;
3324 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3325 hba[i]->devname, pdev->device, pci_name(pdev),
3326 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3328 hba[i]->cmd_pool_bits =
3329 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3330 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3331 hba[i]->cmd_pool = (CommandList_struct *)
3332 pci_alloc_consistent(hba[i]->pdev,
3333 hba[i]->nr_cmds * sizeof(CommandList_struct),
3334 &(hba[i]->cmd_pool_dhandle));
3335 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3336 pci_alloc_consistent(hba[i]->pdev,
3337 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3338 &(hba[i]->errinfo_pool_dhandle));
3339 if ((hba[i]->cmd_pool_bits == NULL)
3340 || (hba[i]->cmd_pool == NULL)
3341 || (hba[i]->errinfo_pool == NULL)) {
3342 printk(KERN_ERR "cciss: out of memory");
3343 goto clean4;
3345 #ifdef CONFIG_CISS_SCSI_TAPE
3346 hba[i]->scsi_rejects.complete =
3347 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3348 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3349 if (hba[i]->scsi_rejects.complete == NULL) {
3350 printk(KERN_ERR "cciss: out of memory");
3351 goto clean4;
3353 #endif
3354 spin_lock_init(&hba[i]->lock);
3356 /* Initialize the pdev driver private data.
3357 have it point to hba[i]. */
3358 pci_set_drvdata(pdev, hba[i]);
3359 /* command and error info recs zeroed out before
3360 they are used */
3361 memset(hba[i]->cmd_pool_bits, 0,
3362 ((hba[i]->nr_cmds + BITS_PER_LONG -
3363 1) / BITS_PER_LONG) * sizeof(unsigned long));
3365 #ifdef CCISS_DEBUG
3366 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3367 #endif /* CCISS_DEBUG */
3369 cciss_getgeometry(i);
3371 cciss_scsi_setup(i);
3373 /* Turn the interrupts on so we can service requests */
3374 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3376 cciss_procinit(i);
3378 hba[i]->cciss_max_sectors = 2048;
3380 hba[i]->busy_initializing = 0;
3382 do {
3383 drive_info_struct *drv = &(hba[i]->drv[j]);
3384 struct gendisk *disk = hba[i]->gendisk[j];
3385 struct request_queue *q;
3387 /* Check if the disk was allocated already */
3388 if (!disk){
3389 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3390 disk = hba[i]->gendisk[j];
3393 /* Check that the disk was able to be allocated */
3394 if (!disk) {
3395 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3396 goto clean4;
3399 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3400 if (!q) {
3401 printk(KERN_ERR
3402 "cciss: unable to allocate queue for disk %d\n",
3404 goto clean4;
3406 drv->queue = q;
3408 q->backing_dev_info.ra_pages = READ_AHEAD;
3409 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3411 /* This is a hardware imposed limit. */
3412 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3414 /* This is a limit in the driver and could be eliminated. */
3415 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3417 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3419 blk_queue_softirq_done(q, cciss_softirq_done);
3421 q->queuedata = hba[i];
3422 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3423 disk->major = hba[i]->major;
3424 disk->first_minor = j << NWD_SHIFT;
3425 disk->fops = &cciss_fops;
3426 disk->queue = q;
3427 disk->private_data = drv;
3428 disk->driverfs_dev = &pdev->dev;
3429 /* we must register the controller even if no disks exist */
3430 /* this is for the online array utilities */
3431 if (!drv->heads && j)
3432 continue;
3433 blk_queue_hardsect_size(q, drv->block_size);
3434 set_capacity(disk, drv->nr_blocks);
3435 add_disk(disk);
3436 j++;
3437 } while (j <= hba[i]->highest_lun);
3439 return 1;
3441 clean4:
3442 #ifdef CONFIG_CISS_SCSI_TAPE
3443 kfree(hba[i]->scsi_rejects.complete);
3444 #endif
3445 kfree(hba[i]->cmd_pool_bits);
3446 if (hba[i]->cmd_pool)
3447 pci_free_consistent(hba[i]->pdev,
3448 hba[i]->nr_cmds * sizeof(CommandList_struct),
3449 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3450 if (hba[i]->errinfo_pool)
3451 pci_free_consistent(hba[i]->pdev,
3452 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3453 hba[i]->errinfo_pool,
3454 hba[i]->errinfo_pool_dhandle);
3455 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3456 clean2:
3457 unregister_blkdev(hba[i]->major, hba[i]->devname);
3458 clean1:
3459 hba[i]->busy_initializing = 0;
3460 /* cleanup any queues that may have been initialized */
3461 for (j=0; j <= hba[i]->highest_lun; j++){
3462 drive_info_struct *drv = &(hba[i]->drv[j]);
3463 if (drv->queue)
3464 blk_cleanup_queue(drv->queue);
3467 * Deliberately omit pci_disable_device(): it does something nasty to
3468 * Smart Array controllers that pci_enable_device does not undo
3470 pci_release_regions(pdev);
3471 pci_set_drvdata(pdev, NULL);
3472 free_hba(i);
3473 return -1;
3476 static void cciss_shutdown(struct pci_dev *pdev)
3478 ctlr_info_t *tmp_ptr;
3479 int i;
3480 char flush_buf[4];
3481 int return_code;
3483 tmp_ptr = pci_get_drvdata(pdev);
3484 if (tmp_ptr == NULL)
3485 return;
3486 i = tmp_ptr->ctlr;
3487 if (hba[i] == NULL)
3488 return;
3490 /* Turn board interrupts off and send the flush cache command */
3491 /* sendcmd will turn off interrupt, and send the flush...
3492 * To write all data in the battery backed cache to disks */
3493 memset(flush_buf, 0, 4);
3494 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3495 TYPE_CMD);
3496 if (return_code == IO_OK) {
3497 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3498 } else {
3499 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3501 free_irq(hba[i]->intr[2], hba[i]);
3504 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3506 ctlr_info_t *tmp_ptr;
3507 int i, j;
3509 if (pci_get_drvdata(pdev) == NULL) {
3510 printk(KERN_ERR "cciss: Unable to remove device \n");
3511 return;
3513 tmp_ptr = pci_get_drvdata(pdev);
3514 i = tmp_ptr->ctlr;
3515 if (hba[i] == NULL) {
3516 printk(KERN_ERR "cciss: device appears to "
3517 "already be removed \n");
3518 return;
3521 remove_proc_entry(hba[i]->devname, proc_cciss);
3522 unregister_blkdev(hba[i]->major, hba[i]->devname);
3524 /* remove it from the disk list */
3525 for (j = 0; j < CISS_MAX_LUN; j++) {
3526 struct gendisk *disk = hba[i]->gendisk[j];
3527 if (disk) {
3528 struct request_queue *q = disk->queue;
3530 if (disk->flags & GENHD_FL_UP)
3531 del_gendisk(disk);
3532 if (q)
3533 blk_cleanup_queue(q);
3537 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3539 cciss_shutdown(pdev);
3541 #ifdef CONFIG_PCI_MSI
3542 if (hba[i]->msix_vector)
3543 pci_disable_msix(hba[i]->pdev);
3544 else if (hba[i]->msi_vector)
3545 pci_disable_msi(hba[i]->pdev);
3546 #endif /* CONFIG_PCI_MSI */
3548 iounmap(hba[i]->vaddr);
3550 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3551 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3552 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3553 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3554 kfree(hba[i]->cmd_pool_bits);
3555 #ifdef CONFIG_CISS_SCSI_TAPE
3556 kfree(hba[i]->scsi_rejects.complete);
3557 #endif
3559 * Deliberately omit pci_disable_device(): it does something nasty to
3560 * Smart Array controllers that pci_enable_device does not undo
3562 pci_release_regions(pdev);
3563 pci_set_drvdata(pdev, NULL);
3564 free_hba(i);
3567 static struct pci_driver cciss_pci_driver = {
3568 .name = "cciss",
3569 .probe = cciss_init_one,
3570 .remove = __devexit_p(cciss_remove_one),
3571 .id_table = cciss_pci_device_id, /* id_table */
3572 .shutdown = cciss_shutdown,
3576 * This is it. Register the PCI driver information for the cards we control
3577 * the OS will call our registered routines when it finds one of our cards.
3579 static int __init cciss_init(void)
3581 printk(KERN_INFO DRIVER_NAME "\n");
3583 /* Register for our PCI devices */
3584 return pci_register_driver(&cciss_pci_driver);
3587 static void __exit cciss_cleanup(void)
3589 int i;
3591 pci_unregister_driver(&cciss_pci_driver);
3592 /* double check that all controller entrys have been removed */
3593 for (i = 0; i < MAX_CTLR; i++) {
3594 if (hba[i] != NULL) {
3595 printk(KERN_WARNING "cciss: had to remove"
3596 " controller %d\n", i);
3597 cciss_remove_one(hba[i]->pdev);
3600 remove_proc_entry("cciss", proc_root_driver);
3603 static void fail_all_cmds(unsigned long ctlr)
3605 /* If we get here, the board is apparently dead. */
3606 ctlr_info_t *h = hba[ctlr];
3607 CommandList_struct *c;
3608 unsigned long flags;
3610 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3611 h->alive = 0; /* the controller apparently died... */
3613 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3615 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3617 /* move everything off the request queue onto the completed queue */
3618 while ((c = h->reqQ) != NULL) {
3619 removeQ(&(h->reqQ), c);
3620 h->Qdepth--;
3621 addQ(&(h->cmpQ), c);
3624 /* Now, fail everything on the completed queue with a HW error */
3625 while ((c = h->cmpQ) != NULL) {
3626 removeQ(&h->cmpQ, c);
3627 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3628 if (c->cmd_type == CMD_RWREQ) {
3629 complete_command(h, c, 0);
3630 } else if (c->cmd_type == CMD_IOCTL_PEND)
3631 complete(c->waiting);
3632 #ifdef CONFIG_CISS_SCSI_TAPE
3633 else if (c->cmd_type == CMD_SCSI)
3634 complete_scsi_command(c, 0, 0);
3635 #endif
3637 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3638 return;
3641 module_init(cciss_init);
3642 module_exit(cciss_cleanup);