initial commit with v2.6.9
[linux-2.6.9-moxart.git] / drivers / scsi / megaraid / megaraid_mbox.c
blob7afd6a5f12330b2f0aec1b18a6376b147deafb83
1 /*
3 * Linux MegaRAID device driver
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4 (September 27 2004)
15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com>
17 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
18 * Manoj Jose <Manoj.Jose@lsil.com>
20 * List of supported controllers
22 * OEM Product Name VID DID SSVID SSID
23 * --- ------------ --- --- ---- ----
24 * Dell PERC3/QC 101E 1960 1028 0471
25 * Dell PERC3/DC 101E 1960 1028 0493
26 * Dell PERC3/SC 101E 1960 1028 0475
27 * Dell PERC3/Di 1028 1960 1028 0123
28 * Dell PERC4/SC 1000 1960 1028 0520
29 * Dell PERC4/DC 1000 1960 1028 0518
30 * Dell PERC4/QC 1000 0407 1028 0531
31 * Dell PERC4/Di 1028 000F 1028 014A
32 * Dell PERC 4e/Si 1028 0013 1028 016c
33 * Dell PERC 4e/Di 1028 0013 1028 016d
34 * Dell PERC 4e/Di 1028 0013 1028 016e
35 * Dell PERC 4e/Di 1028 0013 1028 016f
36 * Dell PERC 4e/Di 1028 0013 1028 0170
37 * Dell PERC 4e/DC 1000 0408 1028 0002
38 * Dell PERC 4e/SC 1000 0408 1028 0001
41 * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520
42 * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520
43 * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518
44 * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530
45 * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532
46 * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531
47 * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001
48 * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002
49 * LSI MegaRAID SATA 150-4 1000 1960 1000 4523
50 * LSI MegaRAID SATA 150-6 1000 1960 1000 0523
51 * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004
52 * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008
54 * INTEL RAID Controller SRCU42X 1000 0407 8086 0532
55 * INTEL RAID Controller SRCS16 1000 1960 8086 0523
56 * INTEL RAID Controller SRCU42E 1000 0408 8086 0002
57 * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530
58 * INTEL RAID Controller SRCS28X 1000 0409 8086 3008
59 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431
60 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499
61 * INTEL RAID Controller SRCU51L 1000 1960 8086 0520
64 * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065
67 * ACER MegaRAID ROMB-2E 1000 0408 1025 004D
70 * For history of changes, see Documentation/ChangeLog.megaraid
73 #include "megaraid_mbox.h"
75 static int megaraid_init(void);
76 static void megaraid_exit(void);
78 static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
79 static void megaraid_detach_one(struct pci_dev *);
80 static void megaraid_mbox_shutdown(struct device *);
82 static int megaraid_io_attach(adapter_t *);
83 static void megaraid_io_detach(adapter_t *);
85 static int megaraid_init_mbox(adapter_t *);
86 static void megaraid_fini_mbox(adapter_t *);
88 static int megaraid_alloc_cmd_packets(adapter_t *);
89 static void megaraid_free_cmd_packets(adapter_t *);
91 static int megaraid_mbox_setup_dma_pools(adapter_t *);
92 static void megaraid_mbox_teardown_dma_pools(adapter_t *);
94 static int megaraid_abort_handler(struct scsi_cmnd *);
95 static int megaraid_reset_handler(struct scsi_cmnd *);
97 static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
98 static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
99 static int megaraid_busywait_mbox(mraid_device_t *);
100 static int megaraid_mbox_product_info(adapter_t *);
101 static int megaraid_mbox_extended_cdb(adapter_t *);
102 static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
103 static int megaraid_mbox_support_random_del(adapter_t *);
104 static int megaraid_mbox_get_max_sg(adapter_t *);
105 static void megaraid_mbox_enum_raid_scsi(adapter_t *);
106 static void megaraid_mbox_flush_cache(adapter_t *);
108 static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
109 static void megaraid_mbox_setup_device_map(adapter_t *);
111 static int megaraid_queue_command(struct scsi_cmnd *,
112 void (*)(struct scsi_cmnd *));
113 static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
114 static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
115 static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
116 struct scsi_cmnd *);
117 static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
118 struct scsi_cmnd *);
120 static irqreturn_t megaraid_isr(int, void *, struct pt_regs *);
122 static void megaraid_mbox_dpc(unsigned long);
124 static int megaraid_cmm_register(adapter_t *);
125 static int megaraid_cmm_unregister(adapter_t *);
126 static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
127 static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
128 static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
129 static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
130 static int wait_till_fw_empty(adapter_t *);
134 MODULE_AUTHOR("LSI Logic Corporation");
135 MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
136 MODULE_LICENSE("GPL");
137 MODULE_VERSION(MEGARAID_VERSION);
140 * ### modules parameters for driver ###
144 * Set to enable driver to expose unconfigured disk to kernel
146 static int megaraid_expose_unconf_disks = 0;
147 module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
148 MODULE_PARM_DESC(unconf_disks,
149 "Set to expose unconfigured disks to kernel (default=0)");
152 * driver wait time if the adapter's mailbox is busy
154 static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
155 module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
156 MODULE_PARM_DESC(busy_wait,
157 "Max wait for mailbox in microseconds if busy (default=10)");
160 * number of sectors per IO command
162 static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
163 module_param_named(max_sectors, megaraid_max_sectors, int, 0);
164 MODULE_PARM_DESC(max_sectors,
165 "Maximum number of sectors per IO command (default=128)");
168 * number of commands per logical unit
170 static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
171 module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
172 MODULE_PARM_DESC(cmd_per_lun,
173 "Maximum number of commands per logical unit (default=64)");
177 * Fast driver load option, skip scanning for physical devices during load.
178 * This would result in non-disk devices being skipped during driver load
179 * time. These can be later added though, using /proc/scsi/scsi
181 static unsigned int megaraid_fast_load = 0;
182 module_param_named(fast_load, megaraid_fast_load, int, 0);
183 MODULE_PARM_DESC(fast_load,
184 "Faster loading of the driver, skips physical devices! (default=0)");
188 * mraid_debug level - threshold for amount of information to be displayed by
189 * the driver. This level can be changed through modules parameters, ioctl or
190 * sysfs/proc interface. By default, print the announcement messages only.
192 int mraid_debug_level = CL_ANN;
193 module_param_named(debug_level, mraid_debug_level, int, 0);
194 MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
197 * ### global data ###
199 static uint8_t megaraid_mbox_version[8] =
200 { 0x02, 0x20, 0x04, 0x00, 9, 27, 20, 4 };
204 * PCI table for all supported controllers.
206 static struct pci_device_id pci_id_table_g[] = {
208 PCI_VENDOR_ID_DELL,
209 PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
210 PCI_VENDOR_ID_DELL,
211 PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
214 PCI_VENDOR_ID_LSI_LOGIC,
215 PCI_DEVICE_ID_PERC4_SC,
216 PCI_VENDOR_ID_DELL,
217 PCI_SUBSYS_ID_PERC4_SC,
220 PCI_VENDOR_ID_LSI_LOGIC,
221 PCI_DEVICE_ID_PERC4_DC,
222 PCI_VENDOR_ID_DELL,
223 PCI_SUBSYS_ID_PERC4_DC,
226 PCI_VENDOR_ID_LSI_LOGIC,
227 PCI_DEVICE_ID_PERC4_QC,
228 PCI_VENDOR_ID_DELL,
229 PCI_SUBSYS_ID_PERC4_QC,
232 PCI_VENDOR_ID_DELL,
233 PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
234 PCI_VENDOR_ID_DELL,
235 PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
238 PCI_VENDOR_ID_DELL,
239 PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
240 PCI_VENDOR_ID_DELL,
241 PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
244 PCI_VENDOR_ID_DELL,
245 PCI_DEVICE_ID_PERC4E_DI_KOBUK,
246 PCI_VENDOR_ID_DELL,
247 PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
250 PCI_VENDOR_ID_DELL,
251 PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
252 PCI_VENDOR_ID_DELL,
253 PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
256 PCI_VENDOR_ID_DELL,
257 PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
258 PCI_VENDOR_ID_DELL,
259 PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
262 PCI_VENDOR_ID_DELL,
263 PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
264 PCI_VENDOR_ID_DELL,
265 PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
268 PCI_VENDOR_ID_LSI_LOGIC,
269 PCI_DEVICE_ID_PERC4E_DC_320_2E,
270 PCI_VENDOR_ID_DELL,
271 PCI_SUBSYS_ID_PERC4E_DC_320_2E,
274 PCI_VENDOR_ID_LSI_LOGIC,
275 PCI_DEVICE_ID_PERC4E_SC_320_1E,
276 PCI_VENDOR_ID_DELL,
277 PCI_SUBSYS_ID_PERC4E_SC_320_1E,
280 PCI_VENDOR_ID_AMI,
281 PCI_DEVICE_ID_AMI_MEGARAID3,
282 PCI_VENDOR_ID_DELL,
283 PCI_SUBSYS_ID_PERC3_QC,
286 PCI_VENDOR_ID_AMI,
287 PCI_DEVICE_ID_AMI_MEGARAID3,
288 PCI_VENDOR_ID_DELL,
289 PCI_SUBSYS_ID_PERC3_DC,
292 PCI_VENDOR_ID_AMI,
293 PCI_DEVICE_ID_AMI_MEGARAID3,
294 PCI_VENDOR_ID_DELL,
295 PCI_SUBSYS_ID_PERC3_SC,
298 PCI_VENDOR_ID_LSI_LOGIC,
299 PCI_DEVICE_ID_MEGARAID_SCSI_320_0,
300 PCI_VENDOR_ID_LSI_LOGIC,
301 PCI_SUBSYS_ID_MEGARAID_SCSI_320_0,
304 PCI_VENDOR_ID_LSI_LOGIC,
305 PCI_DEVICE_ID_MEGARAID_SCSI_320_1,
306 PCI_VENDOR_ID_LSI_LOGIC,
307 PCI_SUBSYS_ID_MEGARAID_SCSI_320_1,
310 PCI_VENDOR_ID_LSI_LOGIC,
311 PCI_DEVICE_ID_MEGARAID_SCSI_320_2,
312 PCI_VENDOR_ID_LSI_LOGIC,
313 PCI_SUBSYS_ID_MEGARAID_SCSI_320_2,
316 PCI_VENDOR_ID_LSI_LOGIC,
317 PCI_DEVICE_ID_MEGARAID_SCSI_320_0x,
318 PCI_VENDOR_ID_LSI_LOGIC,
319 PCI_SUBSYS_ID_MEGARAID_SCSI_320_0x,
322 PCI_VENDOR_ID_LSI_LOGIC,
323 PCI_DEVICE_ID_MEGARAID_SCSI_320_2x,
324 PCI_VENDOR_ID_LSI_LOGIC,
325 PCI_SUBSYS_ID_MEGARAID_SCSI_320_2x,
328 PCI_VENDOR_ID_LSI_LOGIC,
329 PCI_DEVICE_ID_MEGARAID_SCSI_320_4x,
330 PCI_VENDOR_ID_LSI_LOGIC,
331 PCI_SUBSYS_ID_MEGARAID_SCSI_320_4x,
334 PCI_VENDOR_ID_LSI_LOGIC,
335 PCI_DEVICE_ID_MEGARAID_SCSI_320_1E,
336 PCI_VENDOR_ID_LSI_LOGIC,
337 PCI_SUBSYS_ID_MEGARAID_SCSI_320_1E,
340 PCI_VENDOR_ID_LSI_LOGIC,
341 PCI_DEVICE_ID_MEGARAID_SCSI_320_2E,
342 PCI_VENDOR_ID_LSI_LOGIC,
343 PCI_SUBSYS_ID_MEGARAID_SCSI_320_2E,
346 PCI_VENDOR_ID_LSI_LOGIC,
347 PCI_DEVICE_ID_MEGARAID_I4_133_RAID,
348 PCI_VENDOR_ID_LSI_LOGIC,
349 PCI_SUBSYS_ID_MEGARAID_I4_133_RAID,
352 PCI_VENDOR_ID_LSI_LOGIC,
353 PCI_DEVICE_ID_MEGARAID_SATA_150_4,
354 PCI_VENDOR_ID_LSI_LOGIC,
355 PCI_SUBSYS_ID_MEGARAID_SATA_150_4,
358 PCI_VENDOR_ID_LSI_LOGIC,
359 PCI_DEVICE_ID_MEGARAID_SATA_150_6,
360 PCI_VENDOR_ID_LSI_LOGIC,
361 PCI_SUBSYS_ID_MEGARAID_SATA_150_6,
364 PCI_VENDOR_ID_LSI_LOGIC,
365 PCI_DEVICE_ID_MEGARAID_SATA_300_4x,
366 PCI_VENDOR_ID_LSI_LOGIC,
367 PCI_SUBSYS_ID_MEGARAID_SATA_300_4x,
370 PCI_VENDOR_ID_LSI_LOGIC,
371 PCI_DEVICE_ID_MEGARAID_SATA_300_8x,
372 PCI_VENDOR_ID_LSI_LOGIC,
373 PCI_SUBSYS_ID_MEGARAID_SATA_300_8x,
376 PCI_VENDOR_ID_LSI_LOGIC,
377 PCI_DEVICE_ID_INTEL_RAID_SRCU42X,
378 PCI_VENDOR_ID_INTEL,
379 PCI_SUBSYS_ID_INTEL_RAID_SRCU42X,
382 PCI_VENDOR_ID_LSI_LOGIC,
383 PCI_DEVICE_ID_INTEL_RAID_SRCS16,
384 PCI_VENDOR_ID_INTEL,
385 PCI_SUBSYS_ID_INTEL_RAID_SRCS16,
388 PCI_VENDOR_ID_LSI_LOGIC,
389 PCI_DEVICE_ID_INTEL_RAID_SRCU42E,
390 PCI_VENDOR_ID_INTEL,
391 PCI_SUBSYS_ID_INTEL_RAID_SRCU42E,
394 PCI_VENDOR_ID_LSI_LOGIC,
395 PCI_DEVICE_ID_INTEL_RAID_SRCZCRX,
396 PCI_VENDOR_ID_INTEL,
397 PCI_SUBSYS_ID_INTEL_RAID_SRCZCRX,
400 PCI_VENDOR_ID_LSI_LOGIC,
401 PCI_DEVICE_ID_INTEL_RAID_SRCS28X,
402 PCI_VENDOR_ID_INTEL,
403 PCI_SUBSYS_ID_INTEL_RAID_SRCS28X,
406 PCI_VENDOR_ID_LSI_LOGIC,
407 PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_ALIEF,
408 PCI_VENDOR_ID_INTEL,
409 PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_ALIEF,
412 PCI_VENDOR_ID_LSI_LOGIC,
413 PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_HARWICH,
414 PCI_VENDOR_ID_INTEL,
415 PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_HARWICH,
418 PCI_VENDOR_ID_LSI_LOGIC,
419 PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK,
420 PCI_VENDOR_ID_INTEL,
421 PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK,
424 PCI_VENDOR_ID_LSI_LOGIC,
425 PCI_DEVICE_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB,
426 PCI_SUBSYS_ID_FSC,
427 PCI_SUBSYS_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB,
430 PCI_VENDOR_ID_LSI_LOGIC,
431 PCI_DEVICE_ID_MEGARAID_ACER_ROMB_2E,
432 PCI_VENDOR_ID_AI,
433 PCI_SUBSYS_ID_MEGARAID_ACER_ROMB_2E,
435 {0} /* Terminating entry */
437 MODULE_DEVICE_TABLE(pci, pci_id_table_g);
440 static struct pci_driver megaraid_pci_driver_g = {
441 .name = "megaraid",
442 .id_table = pci_id_table_g,
443 .probe = megaraid_probe_one,
444 .remove = __devexit_p(megaraid_detach_one),
445 .driver = {
446 .shutdown = megaraid_mbox_shutdown,
452 * Scsi host template for megaraid unified driver
454 static struct scsi_host_template megaraid_template_g = {
455 .module = THIS_MODULE,
456 .name = "LSI Logic MegaRAID driver",
457 .proc_name = "megaraid",
458 .queuecommand = megaraid_queue_command,
459 .eh_abort_handler = megaraid_abort_handler,
460 .eh_device_reset_handler = megaraid_reset_handler,
461 .eh_bus_reset_handler = megaraid_reset_handler,
462 .eh_host_reset_handler = megaraid_reset_handler,
463 .use_clustering = ENABLE_CLUSTERING,
468 * megaraid_init - module load hook
470 * We register ourselves as hotplug enabled module and let PCI subsystem
471 * discover our adaters
473 static int __init
474 megaraid_init(void)
476 int rval;
478 // Announce the driver version
479 con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
480 MEGARAID_EXT_VERSION));
482 // check validity of module parameters
483 if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
485 con_log(CL_ANN, (KERN_WARNING
486 "megaraid mailbox: max commands per lun reset to %d\n",
487 MBOX_MAX_SCSI_CMDS));
489 megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
493 // register as a PCI hot-plug driver module
494 if ((rval = pci_module_init(&megaraid_pci_driver_g))) {
495 con_log(CL_ANN, (KERN_WARNING
496 "megaraid: could not register hotplug support.\n"));
499 return rval;
504 * megaraid_exit - driver unload entry point
506 * We simply unwrap the megaraid_init routine here
508 static void __exit
509 megaraid_exit(void)
511 con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
513 // unregister as PCI hotplug driver
514 pci_unregister_driver(&megaraid_pci_driver_g);
516 return;
521 * megaraid_probe_one - PCI hotplug entry point
522 * @param pdev : handle to this controller's PCI configuration space
523 * @param id : pci device id of the class of controllers
525 * This routine should be called whenever a new adapter is detected by the
526 * PCI hotplug susbsytem.
528 static int __devinit
529 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
531 adapter_t *adapter;
534 // detected a new controller
535 con_log(CL_ANN, (KERN_INFO
536 "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
537 pdev->vendor, pdev->device, pdev->subsystem_vendor,
538 pdev->subsystem_device));
540 con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
541 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
543 if (pci_enable_device(pdev)) {
544 con_log(CL_ANN, (KERN_WARNING
545 "megaraid: pci_enable_device failed\n"));
547 return -ENODEV;
550 // Enable bus-mastering on this controller
551 pci_set_master(pdev);
553 // Allocate the per driver initialization structure
554 adapter = kmalloc(sizeof(adapter_t), GFP_KERNEL);
556 if (adapter == NULL) {
557 con_log(CL_ANN, (KERN_WARNING
558 "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__));
560 goto out_probe_one;
562 memset(adapter, 0, sizeof(adapter_t));
565 // set up PCI related soft state and other pre-known parameters
566 adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
567 adapter->irq = pdev->irq;
568 adapter->pdev = pdev;
570 atomic_set(&adapter->being_detached, 0);
572 // Setup the default DMA mask. This would be changed later on
573 // depending on hardware capabilities
574 if (pci_set_dma_mask(adapter->pdev, 0xFFFFFFFF) != 0) {
576 con_log(CL_ANN, (KERN_WARNING
577 "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
579 goto out_free_adapter;
583 // Initialize the synchronization lock for kernel and LLD
584 spin_lock_init(&adapter->lock);
585 adapter->host_lock = &adapter->lock;
588 // Initialize the command queues: the list of free SCBs and the list
589 // of pending SCBs.
590 INIT_LIST_HEAD(&adapter->kscb_pool);
591 spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
593 INIT_LIST_HEAD(&adapter->pend_list);
594 spin_lock_init(PENDING_LIST_LOCK(adapter));
596 INIT_LIST_HEAD(&adapter->completed_list);
597 spin_lock_init(COMPLETED_LIST_LOCK(adapter));
600 // Start the mailbox based controller
601 if (megaraid_init_mbox(adapter) != 0) {
602 con_log(CL_ANN, (KERN_WARNING
603 "megaraid: maibox adapter did not initialize\n"));
605 goto out_free_adapter;
608 // Register with LSI Common Management Module
609 if (megaraid_cmm_register(adapter) != 0) {
611 con_log(CL_ANN, (KERN_WARNING
612 "megaraid: could not register with management module\n"));
614 goto out_fini_mbox;
617 // setup adapter handle in PCI soft state
618 pci_set_drvdata(pdev, adapter);
620 // attach with scsi mid-layer
621 if (megaraid_io_attach(adapter) != 0) {
623 con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
625 goto out_cmm_unreg;
628 return 0;
630 out_cmm_unreg:
631 pci_set_drvdata(pdev, NULL);
632 megaraid_cmm_unregister(adapter);
633 out_fini_mbox:
634 megaraid_fini_mbox(adapter);
635 out_free_adapter:
636 kfree(adapter);
637 out_probe_one:
638 pci_disable_device(pdev);
640 return -ENODEV;
645 * megaraid_detach_one - release the framework resources and call LLD release
646 * routine
647 * @param pdev : handle for our PCI cofiguration space
649 * This routine is called during driver unload. We free all the allocated
650 * resources and call the corresponding LLD so that it can also release all
651 * its resources.
653 * This routine is also called from the PCI hotplug system
655 static void
656 megaraid_detach_one(struct pci_dev *pdev)
658 adapter_t *adapter;
659 struct Scsi_Host *host;
662 // Start a rollback on this adapter
663 adapter = pci_get_drvdata(pdev);
665 if (!adapter) {
666 con_log(CL_ANN, (KERN_CRIT
667 "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
668 pdev->vendor, pdev->device, pdev->subsystem_vendor,
669 pdev->subsystem_device));
671 return;
673 else {
674 con_log(CL_ANN, (KERN_NOTICE
675 "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
676 pdev->vendor, pdev->device, pdev->subsystem_vendor,
677 pdev->subsystem_device));
681 host = adapter->host;
683 // do not allow any more requests from the management module for this
684 // adapter.
685 // FIXME: How do we account for the request which might still be
686 // pending with us?
687 atomic_set(&adapter->being_detached, 1);
689 // detach from the IO sub-system
690 megaraid_io_detach(adapter);
692 // reset the device state in the PCI structure. We check this
693 // condition when we enter here. If the device state is NULL,
694 // that would mean the device has already been removed
695 pci_set_drvdata(pdev, NULL);
697 // Unregister from common management module
699 // FIXME: this must return success or failure for conditions if there
700 // is a command pending with LLD or not.
701 megaraid_cmm_unregister(adapter);
703 // finalize the mailbox based controller and release all resources
704 megaraid_fini_mbox(adapter);
706 kfree(adapter);
708 scsi_host_put(host);
710 pci_disable_device(pdev);
712 return;
717 * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
718 * @param device : generice driver model device
720 * Shutdown notification, perform flush cache
722 static void
723 megaraid_mbox_shutdown(struct device *device)
725 adapter_t *adapter = pci_get_drvdata(to_pci_dev(device));
726 static int counter;
728 if (!adapter) {
729 con_log(CL_ANN, (KERN_WARNING
730 "megaraid: null device in shutdown\n"));
731 return;
734 // flush caches now
735 con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
736 counter++));
738 megaraid_mbox_flush_cache(adapter);
740 con_log(CL_ANN, ("done\n"));
745 * megaraid_io_attach - attach a device with the IO subsystem
746 * @param adapter : controller's soft state
748 * Attach this device with the IO subsystem
750 static int
751 megaraid_io_attach(adapter_t *adapter)
753 struct Scsi_Host *host;
755 // Initialize SCSI Host structure
756 host = scsi_host_alloc(&megaraid_template_g, 8);
757 if (!host) {
758 con_log(CL_ANN, (KERN_WARNING
759 "megaraid mbox: scsi_register failed\n"));
761 return -1;
764 SCSIHOST2ADAP(host) = (caddr_t)adapter;
765 adapter->host = host;
767 // export the parameters required by the mid-layer
768 scsi_assign_lock(host, adapter->host_lock);
769 scsi_set_device(host, &adapter->pdev->dev);
771 host->irq = adapter->irq;
772 host->unique_id = adapter->unique_id;
773 host->can_queue = adapter->max_cmds;
774 host->this_id = adapter->init_id;
775 host->sg_tablesize = adapter->sglen;
776 host->max_sectors = adapter->max_sectors;
777 host->cmd_per_lun = adapter->cmd_per_lun;
778 host->max_channel = adapter->max_channel;
779 host->max_id = adapter->max_target;
780 host->max_lun = adapter->max_lun;
783 // notify mid-layer about the new controller
784 if (scsi_add_host(host, &adapter->pdev->dev)) {
786 con_log(CL_ANN, (KERN_WARNING
787 "megaraid mbox: scsi_add_host failed\n"));
789 scsi_host_put(host);
791 return -1;
794 scsi_scan_host(host);
796 return 0;
801 * megaraid_io_detach - detach a device from the IO subsystem
802 * @param adapter : controller's soft state
804 * Detach this device from the IO subsystem
806 static void
807 megaraid_io_detach(adapter_t *adapter)
809 struct Scsi_Host *host;
811 con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
813 host = adapter->host;
815 scsi_remove_host(host);
817 return;
822 * START: Mailbox Low Level Driver
824 * This is section specific to the single mailbox based controllers
828 * megaraid_init_mbox - initialize controller
829 * @param adapter - our soft state
831 * . Allocate 16-byte aligned mailbox memory for firmware handshake
832 * . Allocate controller's memory resources
833 * . Find out all initialization data
834 * . Allocate memory required for all the commands
835 * . Use internal library of FW routines, build up complete soft state
837 static int __init
838 megaraid_init_mbox(adapter_t *adapter)
840 struct pci_dev *pdev;
841 mraid_device_t *raid_dev;
842 int i;
845 adapter->ito = MBOX_TIMEOUT;
846 pdev = adapter->pdev;
849 * Allocate and initialize the init data structure for mailbox
850 * controllers
852 raid_dev = kmalloc(sizeof(mraid_device_t), GFP_KERNEL);
853 if (raid_dev == NULL) return -1;
855 memset(raid_dev, 0, sizeof(mraid_device_t));
858 * Attach the adapter soft state to raid device soft state
860 adapter->raid_device = (caddr_t)raid_dev;
861 raid_dev->fast_load = megaraid_fast_load;
864 // our baseport
865 raid_dev->baseport = pci_resource_start(pdev, 0);
867 if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
869 con_log(CL_ANN, (KERN_WARNING
870 "megaraid: mem region busy\n"));
872 goto out_free_raid_dev;
875 raid_dev->baseaddr = (unsigned long)
876 ioremap_nocache(raid_dev->baseport, 128);
878 if (!raid_dev->baseaddr) {
880 con_log(CL_ANN, (KERN_WARNING
881 "megaraid: could not map hba memory\n") );
883 goto out_release_regions;
887 // Setup the rest of the soft state using the library of FW routines
890 // request IRQ and register the interrupt service routine
891 if (request_irq(adapter->irq, megaraid_isr, SA_SHIRQ, "megaraid",
892 adapter)) {
894 con_log(CL_ANN, (KERN_WARNING
895 "megaraid: Couldn't register IRQ %d!\n", adapter->irq));
897 goto out_iounmap;
901 // initialize the mutual exclusion lock for the mailbox
902 spin_lock_init(&raid_dev->mailbox_lock);
904 // allocate memory required for commands
905 if (megaraid_alloc_cmd_packets(adapter) != 0) {
906 goto out_free_irq;
909 // Product info
910 if (megaraid_mbox_product_info(adapter) != 0) {
911 goto out_alloc_cmds;
914 // Do we support extended CDBs
915 adapter->max_cdb_sz = 10;
916 if (megaraid_mbox_extended_cdb(adapter) == 0) {
917 adapter->max_cdb_sz = 16;
921 * Do we support cluster environment, if we do, what is the initiator
922 * id.
923 * NOTE: In a non-cluster aware firmware environment, the LLD should
924 * return 7 as initiator id.
926 adapter->ha = 0;
927 adapter->init_id = -1;
928 if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
929 adapter->ha = 1;
933 * Prepare the device ids array to have the mapping between the kernel
934 * device address and megaraid device address.
935 * We export the physical devices on their actual addresses. The
936 * logical drives are exported on a virtual SCSI channel
938 megaraid_mbox_setup_device_map(adapter);
940 // If the firmware supports random deletion, update the device id map
941 if (megaraid_mbox_support_random_del(adapter)) {
943 // Change the logical drives numbers in device_ids array one
944 // slot in device_ids is reserved for target id, that's why
945 // "<=" below
946 for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
947 adapter->device_ids[adapter->max_channel][i] += 0x80;
949 adapter->device_ids[adapter->max_channel][adapter->init_id] =
950 0xFF;
954 * find out the maximum number of scatter-gather elements supported by
955 * this firmware
957 adapter->sglen = megaraid_mbox_get_max_sg(adapter);
959 // enumerate RAID and SCSI channels so that all devices on SCSI
960 // channels can later be exported, including disk devices
961 megaraid_mbox_enum_raid_scsi(adapter);
964 * Other parameters required by upper layer
966 * maximum number of sectors per IO command
968 adapter->max_sectors = megaraid_max_sectors;
971 * number of queued commands per LUN.
973 adapter->cmd_per_lun = megaraid_cmd_per_lun;
975 // Set the DMA mask to 64-bit. All supported controllers as capable of
976 // DMA in this range
977 if (pci_set_dma_mask(adapter->pdev, 0xFFFFFFFFFFFFFFFFULL) != 0) {
979 con_log(CL_ANN, (KERN_WARNING
980 "megaraid: could not set DMA mask for 64-bit.\n"));
982 goto out_alloc_cmds;
985 // setup tasklet for DPC
986 tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
987 (unsigned long)adapter);
989 con_log(CL_DLEVEL1, (KERN_INFO
990 "megaraid mbox hba successfully initialized\n"));
992 return 0;
994 out_alloc_cmds:
995 megaraid_free_cmd_packets(adapter);
996 out_free_irq:
997 free_irq(adapter->irq, adapter);
998 out_iounmap:
999 iounmap((caddr_t)raid_dev->baseaddr);
1000 out_release_regions:
1001 pci_release_regions(pdev);
1002 out_free_raid_dev:
1003 kfree(raid_dev);
1005 return -1;
1010 * megaraid_fini_mbox - undo controller initialization
1011 * @param adapter : our soft state
1013 static void
1014 megaraid_fini_mbox(adapter_t *adapter)
1016 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1018 // flush all caches
1019 megaraid_mbox_flush_cache(adapter);
1021 tasklet_kill(&adapter->dpc_h);
1023 megaraid_free_cmd_packets(adapter);
1025 free_irq(adapter->irq, adapter);
1027 iounmap((caddr_t)raid_dev->baseaddr);
1029 pci_release_regions(adapter->pdev);
1031 kfree(raid_dev);
1033 return;
1038 * megaraid_alloc_cmd_packets - allocate shared mailbox
1039 * @param adapter : soft state of the raid controller
1041 * Allocate and align the shared mailbox. This maibox is used to issue
1042 * all the commands. For IO based controllers, the mailbox is also regsitered
1043 * with the FW. Allocate memory for all commands as well.
1044 * This is our big allocator
1046 static int
1047 megaraid_alloc_cmd_packets(adapter_t *adapter)
1049 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1050 struct pci_dev *pdev;
1051 unsigned long align;
1052 scb_t *scb;
1053 mbox_ccb_t *ccb;
1054 struct mraid_pci_blk *epthru_pci_blk;
1055 struct mraid_pci_blk *sg_pci_blk;
1056 struct mraid_pci_blk *mbox_pci_blk;
1057 int i;
1059 pdev = adapter->pdev;
1062 * Setup the mailbox
1063 * Allocate the common 16-byte aligned memory for the handshake
1064 * mailbox.
1066 raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev,
1067 sizeof(mbox64_t), &raid_dev->una_mbox64_dma);
1069 if (!raid_dev->una_mbox64) {
1070 con_log(CL_ANN, (KERN_WARNING
1071 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1072 __LINE__));
1073 return -1;
1075 memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t));
1078 * Align the mailbox at 16-byte boundary
1080 raid_dev->mbox = &raid_dev->una_mbox64->mbox32;
1082 raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
1083 (~0UL ^ 0xFUL));
1085 raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
1087 align = ((void *)raid_dev->mbox -
1088 ((void *)&raid_dev->una_mbox64->mbox32));
1090 raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
1091 align;
1093 // Allocate memory for commands issued internally
1094 adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE,
1095 &adapter->ibuf_dma_h);
1096 if (!adapter->ibuf) {
1098 con_log(CL_ANN, (KERN_WARNING
1099 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1100 __LINE__));
1102 goto out_free_common_mbox;
1104 memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);
1106 // Allocate memory for our SCSI Command Blocks and their associated
1107 // memory
1110 * Allocate memory for the base list of scb. Later allocate memory for
1111 * CCBs and embedded components of each CCB and point the pointers in
1112 * scb to the allocated components
1113 * NOTE: The code to allocate SCB will be duplicated in all the LLD
1114 * since the calling routine does not yet know the number of available
1115 * commands.
1117 adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS,
1118 GFP_KERNEL);
1120 if (adapter->kscb_list == NULL) {
1121 con_log(CL_ANN, (KERN_WARNING
1122 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1123 __LINE__));
1124 goto out_free_ibuf;
1126 memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS);
1128 // memory allocation for our command packets
1129 if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
1130 con_log(CL_ANN, (KERN_WARNING
1131 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1132 __LINE__));
1133 goto out_free_scb_list;
1136 // Adjust the scb pointers and link in the free pool
1137 epthru_pci_blk = raid_dev->epthru_pool;
1138 sg_pci_blk = raid_dev->sg_pool;
1139 mbox_pci_blk = raid_dev->mbox_pool;
1141 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1142 scb = adapter->kscb_list + i;
1143 ccb = raid_dev->ccb_list + i;
1145 ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
1146 ccb->raw_mbox = (uint8_t *)ccb->mbox;
1147 ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
1148 ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16;
1150 // make sure the mailbox is aligned properly
1151 if (ccb->mbox_dma_h & 0x0F) {
1152 con_log(CL_ANN, (KERN_CRIT
1153 "megaraid mbox: not aligned on 16-bytes\n"));
1155 goto out_teardown_dma_pools;
1158 ccb->epthru = (mraid_epassthru_t *)
1159 epthru_pci_blk[i].vaddr;
1160 ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr;
1161 ccb->pthru = (mraid_passthru_t *)ccb->epthru;
1162 ccb->pthru_dma_h = ccb->epthru_dma_h;
1165 ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr;
1166 ccb->sgl_dma_h = sg_pci_blk[i].dma_addr;
1167 ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64;
1169 scb->ccb = (caddr_t)ccb;
1170 scb->gp = 0;
1172 scb->sno = i; // command index
1174 scb->scp = NULL;
1175 scb->state = SCB_FREE;
1176 scb->dma_direction = PCI_DMA_NONE;
1177 scb->dma_type = MRAID_DMA_NONE;
1178 scb->dev_channel = -1;
1179 scb->dev_target = -1;
1181 // put scb in the free pool
1182 list_add_tail(&scb->list, &adapter->kscb_pool);
1185 return 0;
1187 out_teardown_dma_pools:
1188 megaraid_mbox_teardown_dma_pools(adapter);
1189 out_free_scb_list:
1190 kfree(adapter->kscb_list);
1191 out_free_ibuf:
1192 pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
1193 adapter->ibuf_dma_h);
1194 out_free_common_mbox:
1195 pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1196 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1198 return -1;
1203 * megaraid_free_cmd_packets - free memory
1204 * @param adapter : soft state of the raid controller
1206 * Release memory resources allocated for commands
1208 static void
1209 megaraid_free_cmd_packets(adapter_t *adapter)
1211 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1213 megaraid_mbox_teardown_dma_pools(adapter);
1215 kfree(adapter->kscb_list);
1217 pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
1218 (void *)adapter->ibuf, adapter->ibuf_dma_h);
1220 pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1221 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1222 return;
1227 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
1228 * @param adapter : HBA soft state
1230 * setup the dma pools for mailbox, passthru and extended passthru structures,
1231 * and scatter-gather lists
1233 static int
1234 megaraid_mbox_setup_dma_pools(adapter_t *adapter)
1236 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1237 struct mraid_pci_blk *epthru_pci_blk;
1238 struct mraid_pci_blk *sg_pci_blk;
1239 struct mraid_pci_blk *mbox_pci_blk;
1240 int i;
1244 // Allocate memory for 16-bytes aligned mailboxes
1245 raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
1246 adapter->pdev,
1247 sizeof(mbox64_t) + 16,
1248 16, 0);
1250 if (raid_dev->mbox_pool_handle == NULL) {
1251 goto fail_setup_dma_pool;
1254 mbox_pci_blk = raid_dev->mbox_pool;
1255 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1256 mbox_pci_blk[i].vaddr = pci_pool_alloc(
1257 raid_dev->mbox_pool_handle,
1258 GFP_KERNEL,
1259 &mbox_pci_blk[i].dma_addr);
1260 if (!mbox_pci_blk[i].vaddr) {
1261 goto fail_setup_dma_pool;
1266 * Allocate memory for each embedded passthru strucuture pointer
1267 * Request for a 128 bytes aligned structure for each passthru command
1268 * structure
1269 * Since passthru and extended passthru commands are exclusive, they
1270 * share common memory pool. Passthru structures piggyback on memory
1271 * allocted to extended passthru since passthru is smaller of the two
1273 raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
1274 adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
1276 if (raid_dev->epthru_pool_handle == NULL) {
1277 goto fail_setup_dma_pool;
1280 epthru_pci_blk = raid_dev->epthru_pool;
1281 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1282 epthru_pci_blk[i].vaddr = pci_pool_alloc(
1283 raid_dev->epthru_pool_handle,
1284 GFP_KERNEL,
1285 &epthru_pci_blk[i].dma_addr);
1286 if (!epthru_pci_blk[i].vaddr) {
1287 goto fail_setup_dma_pool;
1292 // Allocate memory for each scatter-gather list. Request for 512 bytes
1293 // alignment for each sg list
1294 raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
1295 adapter->pdev,
1296 sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
1297 512, 0);
1299 if (raid_dev->sg_pool_handle == NULL) {
1300 goto fail_setup_dma_pool;
1303 sg_pci_blk = raid_dev->sg_pool;
1304 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1305 sg_pci_blk[i].vaddr = pci_pool_alloc(
1306 raid_dev->sg_pool_handle,
1307 GFP_KERNEL,
1308 &sg_pci_blk[i].dma_addr);
1309 if (!sg_pci_blk[i].vaddr) {
1310 goto fail_setup_dma_pool;
1314 return 0;
1316 fail_setup_dma_pool:
1317 megaraid_mbox_teardown_dma_pools(adapter);
1318 return -1;
1323 * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
1324 * @param adapter : HBA soft state
1326 * teardown the dma pool for mailbox, passthru and extended passthru
1327 * structures, and scatter-gather lists
1329 static void
1330 megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1332 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1333 struct mraid_pci_blk *epthru_pci_blk;
1334 struct mraid_pci_blk *sg_pci_blk;
1335 struct mraid_pci_blk *mbox_pci_blk;
1336 int i;
1339 sg_pci_blk = raid_dev->sg_pool;
1340 for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
1341 pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
1342 sg_pci_blk[i].dma_addr);
1344 if (raid_dev->sg_pool_handle)
1345 pci_pool_destroy(raid_dev->sg_pool_handle);
1348 epthru_pci_blk = raid_dev->epthru_pool;
1349 for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
1350 pci_pool_free(raid_dev->epthru_pool_handle,
1351 epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
1353 if (raid_dev->epthru_pool_handle)
1354 pci_pool_destroy(raid_dev->epthru_pool_handle);
1357 mbox_pci_blk = raid_dev->mbox_pool;
1358 for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
1359 pci_pool_free(raid_dev->mbox_pool_handle,
1360 mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
1362 if (raid_dev->mbox_pool_handle)
1363 pci_pool_destroy(raid_dev->mbox_pool_handle);
1365 return;
1370 * megaraid_alloc_scb - detach and return a scb from the free list
1371 * @adapter : controller's soft state
1373 * return the scb from the head of the free list. NULL if there are none
1374 * available
1376 static inline scb_t *
1377 megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
1379 struct list_head *head = &adapter->kscb_pool;
1380 scb_t *scb = NULL;
1381 unsigned long flags;
1383 // detach scb from free pool
1384 spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1386 if (list_empty(head)) {
1387 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1388 return NULL;
1391 scb = list_entry(head->next, scb_t, list);
1392 list_del_init(&scb->list);
1394 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1396 scb->state = SCB_ACTIVE;
1397 scb->scp = scp;
1398 scb->dma_type = MRAID_DMA_NONE;
1400 return scb;
1405 * megaraid_dealloc_scb - return the scb to the free pool
1406 * @adapter : controller's soft state
1407 * @scb : scb to be freed
1409 * return the scb back to the free list of scbs. The caller must 'flush' the
1410 * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
1411 * NOTE NOTE: Make sure the scb is not on any list before calling this
1412 * routine.
1414 static inline void
1415 megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
1417 unsigned long flags;
1419 // put scb in the free pool
1420 scb->state = SCB_FREE;
1421 scb->scp = NULL;
1422 spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1424 list_add(&scb->list, &adapter->kscb_pool);
1426 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1428 return;
1433 * megaraid_mbox_mksgl - make the scatter-gather list
1434 * @adapter - controller's soft state
1435 * @scb - scsi control block
1437 * prepare the scatter-gather list
1439 static inline int
1440 megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1442 struct scatterlist *sgl;
1443 mbox_ccb_t *ccb;
1444 struct page *page;
1445 unsigned long offset;
1446 struct scsi_cmnd *scp;
1447 int sgcnt;
1448 int i;
1451 scp = scb->scp;
1452 ccb = (mbox_ccb_t *)scb->ccb;
1454 // no mapping required if no data to be transferred
1455 if (!scp->request_buffer || !scp->request_bufflen)
1456 return 0;
1458 if (!scp->use_sg) { /* scatter-gather list not used */
1460 page = virt_to_page(scp->request_buffer);
1462 offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
1464 ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
1465 scp->request_bufflen,
1466 scb->dma_direction);
1467 scb->dma_type = MRAID_DMA_WBUF;
1470 * We need to handle special 64-bit commands that need a
1471 * minimum of 1 SG
1473 sgcnt = 1;
1474 ccb->sgl64[0].address = ccb->buf_dma_h;
1475 ccb->sgl64[0].length = scp->request_bufflen;
1477 return sgcnt;
1480 sgl = (struct scatterlist *)scp->request_buffer;
1482 // The number of sg elements returned must not exceed our limit
1483 sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
1484 scb->dma_direction);
1486 if (sgcnt > adapter->sglen) {
1487 con_log(CL_ANN, (KERN_CRIT
1488 "megaraid critical: too many sg elements:%d\n",
1489 sgcnt));
1490 BUG();
1493 scb->dma_type = MRAID_DMA_WSG;
1495 for (i = 0; i < sgcnt; i++, sgl++) {
1496 ccb->sgl64[i].address = sg_dma_address(sgl);
1497 ccb->sgl64[i].length = sg_dma_len(sgl);
1500 // Return count of SG nodes
1501 return sgcnt;
1506 * mbox_post_cmd - issue a mailbox command
1507 * @adapter - controller's soft state
1508 * @scb - command to be issued
1510 * post the command to the controller if mailbox is availble.
1512 static inline int
1513 mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1515 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1516 mbox64_t *mbox64;
1517 mbox_t *mbox;
1518 mbox_ccb_t *ccb;
1519 unsigned long flags;
1520 unsigned int i = 0;
1523 ccb = (mbox_ccb_t *)scb->ccb;
1524 mbox = raid_dev->mbox;
1525 mbox64 = raid_dev->mbox64;
1528 * Check for busy mailbox. If it is, return failure - the caller
1529 * should retry later.
1531 spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
1533 if (unlikely(mbox->busy)) {
1534 do {
1535 udelay(1);
1536 i++;
1537 rmb();
1538 } while(mbox->busy && (i < max_mbox_busy_wait));
1540 if (mbox->busy) {
1542 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1544 return -1;
1549 // Copy this command's mailbox data into "adapter's" mailbox
1550 memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
1551 mbox->cmdid = scb->sno;
1553 adapter->outstanding_cmds++;
1555 if (scb->dma_direction == PCI_DMA_TODEVICE) {
1556 if (!scb->scp->use_sg) { // sg list not used
1557 pci_dma_sync_single(adapter->pdev, ccb->buf_dma_h,
1558 scb->scp->request_bufflen,
1559 PCI_DMA_TODEVICE);
1561 else {
1562 pci_dma_sync_sg(adapter->pdev, scb->scp->request_buffer,
1563 scb->scp->use_sg, PCI_DMA_TODEVICE);
1567 mbox->busy = 1; // Set busy
1568 mbox->poll = 0;
1569 mbox->ack = 0;
1570 wmb();
1572 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
1574 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1576 return 0;
1581 * megaraid_queue_command - generic queue entry point for all LLDs
1582 * @scp : pointer to the scsi command to be executed
1583 * @done : callback routine to be called after the cmd has be completed
1585 * Queue entry point for mailbox based controllers.
1587 static int
1588 megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *))
1590 adapter_t *adapter;
1591 scb_t *scb;
1592 int if_busy;
1594 adapter = SCP2ADAPTER(scp);
1595 scp->scsi_done = done;
1596 scp->result = 0;
1598 ASSERT(spin_is_locked(adapter->host_lock));
1600 spin_unlock(adapter->host_lock);
1603 * Allocate and build a SCB request
1604 * if_busy flag will be set if megaraid_mbox_build_cmd() command could
1605 * not allocate scb. We will return non-zero status in that case.
1606 * NOTE: scb can be null even though certain commands completed
1607 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
1608 * return 0 in that case, and we would do the callback right away.
1610 if_busy = 0;
1611 scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
1613 if (scb) {
1614 megaraid_mbox_runpendq(adapter, scb);
1617 spin_lock(adapter->host_lock);
1619 if (!scb) { // command already completed
1620 done(scp);
1623 return if_busy;
1628 * megaraid_mbox_build_cmd - transform the mid-layer scsi command to megaraid
1629 * firmware lingua
1630 * @adapter - controller's soft state
1631 * @scp - mid-layer scsi command pointer
1632 * @busy - set if request could not be completed because of lack of
1633 * resources
1635 * convert the command issued by mid-layer to format understood by megaraid
1636 * firmware. We also complete certain command without sending them to firmware
1638 static scb_t *
1639 megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1641 mraid_device_t *rdev = ADAP2RAIDDEV(adapter);
1642 int channel;
1643 int target;
1644 int islogical;
1645 mbox_ccb_t *ccb;
1646 mraid_passthru_t *pthru;
1647 mbox64_t *mbox64;
1648 mbox_t *mbox;
1649 scb_t *scb;
1650 char skip[] = "skipping";
1651 char scan[] = "scanning";
1652 char *ss;
1656 * Get the appropriate device map for the device this command is
1657 * intended for
1659 MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
1662 * Logical drive commands
1664 if (islogical) {
1665 switch (scp->cmnd[0]) {
1666 case TEST_UNIT_READY:
1668 * Do we support clustering and is the support enabled
1669 * If no, return success always
1671 if (!adapter->ha) {
1672 scp->result = (DID_OK << 16);
1673 return NULL;
1676 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1677 scp->result = (DID_ERROR << 16);
1678 *busy = 1;
1679 return NULL;
1682 scb->dma_direction = scp->sc_data_direction;
1683 scb->dev_channel = 0xFF;
1684 scb->dev_target = target;
1685 ccb = (mbox_ccb_t *)scb->ccb;
1688 * The command id will be provided by the command
1689 * issuance routine
1691 ccb->raw_mbox[0] = CLUSTER_CMD;
1692 ccb->raw_mbox[2] = RESERVATION_STATUS;
1693 ccb->raw_mbox[3] = target;
1695 return scb;
1697 case MODE_SENSE:
1698 if (scp->use_sg) {
1699 struct scatterlist *sgl;
1700 caddr_t vaddr;
1702 sgl = (struct scatterlist *)scp->request_buffer;
1703 if (sgl->page) {
1704 vaddr = (caddr_t)
1705 (page_address((&sgl[0])->page)
1706 + (&sgl[0])->offset);
1708 memset(vaddr, 0, scp->cmnd[4]);
1710 else {
1711 con_log(CL_ANN, (KERN_WARNING
1712 "megaraid mailbox: invalid sg:%d\n",
1713 __LINE__));
1716 else {
1717 memset(scp->request_buffer, 0, scp->cmnd[4]);
1719 scp->result = (DID_OK << 16);
1720 return NULL;
1722 case INQUIRY:
1724 * Display the channel scan for logical drives
1725 * Do not display scan for a channel if already done.
1727 if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1729 con_log(CL_ANN, (KERN_INFO
1730 "scsi[%d]: scanning scsi channel %d",
1731 adapter->host->host_no,
1732 SCP2CHANNEL(scp)));
1734 con_log(CL_ANN, (
1735 " [virtual] for logical drives\n"));
1737 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1740 /* Fall through */
1742 case READ_CAPACITY:
1744 * Do not allow LUN > 0 for logical drives and
1745 * requests for more than 40 logical drives
1747 if (SCP2LUN(scp)) {
1748 scp->result = (DID_BAD_TARGET << 16);
1749 return NULL;
1751 if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
1752 scp->result = (DID_BAD_TARGET << 16);
1753 return NULL;
1757 /* Allocate a SCB and initialize passthru */
1758 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1759 scp->result = (DID_ERROR << 16);
1760 *busy = 1;
1761 return NULL;
1764 ccb = (mbox_ccb_t *)scb->ccb;
1765 scb->dev_channel = 0xFF;
1766 scb->dev_target = target;
1767 pthru = ccb->pthru;
1768 mbox = ccb->mbox;
1769 mbox64 = ccb->mbox64;
1771 pthru->timeout = 0;
1772 pthru->ars = 1;
1773 pthru->reqsenselen = 14;
1774 pthru->islogical = 1;
1775 pthru->logdrv = target;
1776 pthru->cdblen = scp->cmd_len;
1777 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1779 mbox->cmd = MBOXCMD_PASSTHRU64;
1780 scb->dma_direction = scp->sc_data_direction;
1782 pthru->dataxferlen = scp->request_bufflen;
1783 pthru->dataxferaddr = ccb->sgl_dma_h;
1784 pthru->numsge = megaraid_mbox_mksgl(adapter,
1785 scb);
1787 mbox->xferaddr = 0xFFFFFFFF;
1788 mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h;
1789 mbox64->xferaddr_hi = 0;
1791 return scb;
1793 case READ_6:
1794 case WRITE_6:
1795 case READ_10:
1796 case WRITE_10:
1797 case READ_12:
1798 case WRITE_12:
1801 * Allocate a SCB and initialize mailbox
1803 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1804 scp->result = (DID_ERROR << 16);
1805 *busy = 1;
1806 return NULL;
1808 ccb = (mbox_ccb_t *)scb->ccb;
1809 scb->dev_channel = 0xFF;
1810 scb->dev_target = target;
1811 mbox = ccb->mbox;
1812 mbox64 = ccb->mbox64;
1813 mbox->logdrv = target;
1816 * A little HACK: 2nd bit is zero for all scsi read
1817 * commands and is set for all scsi write commands
1819 mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64:
1820 MBOXCMD_LREAD64 ;
1823 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1825 if (scp->cmd_len == 6) {
1826 mbox->numsectors = (uint32_t)scp->cmnd[4];
1827 mbox->lba =
1828 ((uint32_t)scp->cmnd[1] << 16) |
1829 ((uint32_t)scp->cmnd[2] << 8) |
1830 (uint32_t)scp->cmnd[3];
1832 mbox->lba &= 0x1FFFFF;
1836 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1838 else if (scp->cmd_len == 10) {
1839 mbox->numsectors =
1840 (uint32_t)scp->cmnd[8] |
1841 ((uint32_t)scp->cmnd[7] << 8);
1842 mbox->lba =
1843 ((uint32_t)scp->cmnd[2] << 24) |
1844 ((uint32_t)scp->cmnd[3] << 16) |
1845 ((uint32_t)scp->cmnd[4] << 8) |
1846 (uint32_t)scp->cmnd[5];
1850 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1852 else if (scp->cmd_len == 12) {
1853 mbox->lba =
1854 ((uint32_t)scp->cmnd[2] << 24) |
1855 ((uint32_t)scp->cmnd[3] << 16) |
1856 ((uint32_t)scp->cmnd[4] << 8) |
1857 (uint32_t)scp->cmnd[5];
1859 mbox->numsectors =
1860 ((uint32_t)scp->cmnd[6] << 24) |
1861 ((uint32_t)scp->cmnd[7] << 16) |
1862 ((uint32_t)scp->cmnd[8] << 8) |
1863 (uint32_t)scp->cmnd[9];
1865 else {
1866 con_log(CL_ANN, (KERN_WARNING
1867 "megaraid: unsupported CDB length\n"));
1869 megaraid_dealloc_scb(adapter, scb);
1871 scp->result = (DID_ERROR << 16);
1872 return NULL;
1875 scb->dma_direction = scp->sc_data_direction;
1877 // Calculate Scatter-Gather info
1878 mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h;
1879 mbox->numsge = megaraid_mbox_mksgl(adapter,
1880 scb);
1881 mbox->xferaddr = 0xFFFFFFFF;
1882 mbox64->xferaddr_hi = 0;
1884 return scb;
1886 case RESERVE:
1887 case RELEASE:
1889 * Do we support clustering and is the support enabled
1891 if (!adapter->ha) {
1892 scp->result = (DID_BAD_TARGET << 16);
1893 return NULL;
1897 * Allocate a SCB and initialize mailbox
1899 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1900 scp->result = (DID_ERROR << 16);
1901 *busy = 1;
1902 return NULL;
1905 ccb = (mbox_ccb_t *)scb->ccb;
1906 scb->dev_channel = 0xFF;
1907 scb->dev_target = target;
1908 ccb->raw_mbox[0] = CLUSTER_CMD;
1909 ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
1910 RESERVE_LD : RELEASE_LD;
1912 ccb->raw_mbox[3] = target;
1913 scb->dma_direction = scp->sc_data_direction;
1915 return scb;
1917 default:
1918 scp->result = (DID_BAD_TARGET << 16);
1919 return NULL;
1922 else { // Passthru device commands
1924 // Do not allow access to target id > 15 or LUN > 7
1925 if (target > 15 || SCP2LUN(scp) > 7) {
1926 scp->result = (DID_BAD_TARGET << 16);
1927 return NULL;
1930 // if fast load option was set and scan for last device is
1931 // over, reset the fast_load flag so that during a possible
1932 // next scan, devices can be made available
1933 if (rdev->fast_load && (target == 15) &&
1934 (SCP2CHANNEL(scp) == adapter->max_channel -1)) {
1936 con_log(CL_ANN, (KERN_INFO
1937 "megaraid[%d]: physical device scan re-enabled\n",
1938 adapter->host->host_no));
1939 rdev->fast_load = 0;
1943 * Display the channel scan for physical devices
1945 if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1947 ss = rdev->fast_load ? skip : scan;
1949 con_log(CL_ANN, (KERN_INFO
1950 "scsi[%d]: %s scsi channel %d [Phy %d]",
1951 adapter->host->host_no, ss, SCP2CHANNEL(scp),
1952 channel));
1954 con_log(CL_ANN, (
1955 " for non-raid devices\n"));
1957 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1960 // disable channel sweep if fast load option given
1961 if (rdev->fast_load) {
1962 scp->result = (DID_BAD_TARGET << 16);
1963 return NULL;
1966 // Allocate a SCB and initialize passthru
1967 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1968 scp->result = (DID_ERROR << 16);
1969 *busy = 1;
1970 return NULL;
1973 ccb = (mbox_ccb_t *)scb->ccb;
1974 scb->dev_channel = channel;
1975 scb->dev_target = target;
1976 scb->dma_direction = scp->sc_data_direction;
1977 mbox = ccb->mbox;
1978 mbox64 = ccb->mbox64;
1980 // Does this firmware support extended CDBs
1981 if (adapter->max_cdb_sz == 16) {
1982 mbox->cmd = MBOXCMD_EXTPTHRU;
1984 megaraid_mbox_prepare_epthru(adapter, scb, scp);
1986 mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h;
1987 mbox64->xferaddr_hi = 0;
1988 mbox->xferaddr = 0xFFFFFFFF;
1990 else {
1991 mbox->cmd = MBOXCMD_PASSTHRU64;
1993 megaraid_mbox_prepare_pthru(adapter, scb, scp);
1995 mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h;
1996 mbox64->xferaddr_hi = 0;
1997 mbox->xferaddr = 0xFFFFFFFF;
1999 return scb;
2002 // NOT REACHED
2007 * megaraid_mbox_runpendq - execute commands queued in the pending queue
2008 * @adapter : controller's soft state
2009 * @scb : SCB to be queued in the pending list
2011 * scan the pending list for commands which are not yet issued and try to
2012 * post to the controller. The SCB can be a null pointer, which would indicate
2013 * no SCB to be queue, just try to execute the ones in the pending list.
2015 * NOTE: We do not actually traverse the pending list. The SCBs are plucked
2016 * out from the head of the pending list. If it is successfully issued, the
2017 * next SCB is at the head now.
2019 static void
2020 megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
2022 scb_t *scb;
2023 unsigned long flags;
2025 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2027 if (scb_q) {
2028 scb_q->state = SCB_PENDQ;
2029 list_add_tail(&scb_q->list, &adapter->pend_list);
2032 // if the adapter in not in quiescent mode, post the commands to FW
2033 if (adapter->quiescent) {
2034 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2035 return;
2038 while (!list_empty(&adapter->pend_list)) {
2040 ASSERT(spin_is_locked(PENDING_LIST_LOCK(adapter)));
2042 scb = list_entry(adapter->pend_list.next, scb_t, list);
2044 // remove the scb from the pending list and try to
2045 // issue. If we are unable to issue it, put back in
2046 // the pending list and return
2048 list_del_init(&scb->list);
2050 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2052 // if mailbox was busy, return SCB back to pending
2053 // list. Make sure to add at the head, since that's
2054 // where it would have been removed from
2056 scb->state = SCB_ISSUED;
2058 if (mbox_post_cmd(adapter, scb) != 0) {
2060 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2062 scb->state = SCB_PENDQ;
2064 list_add(&scb->list, &adapter->pend_list);
2066 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
2067 flags);
2069 return;
2072 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2075 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2078 return;
2083 * megaraid_mbox_prepare_pthru - prepare a command for physical devices
2084 * @adapter - pointer to controller's soft state
2085 * @scb - scsi control block
2086 * @scp - scsi command from the mid-layer
2088 * prepare a command for the scsi physical devices
2090 static void
2091 megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
2092 struct scsi_cmnd *scp)
2094 mbox_ccb_t *ccb;
2095 mraid_passthru_t *pthru;
2096 uint8_t channel;
2097 uint8_t target;
2099 ccb = (mbox_ccb_t *)scb->ccb;
2100 pthru = ccb->pthru;
2101 channel = scb->dev_channel;
2102 target = scb->dev_target;
2104 pthru->timeout = 1; // 0=6sec, 1=60sec, 2=10min, 3=3hrs
2105 pthru->ars = 1;
2106 pthru->islogical = 0;
2107 pthru->channel = 0;
2108 pthru->target = (channel << 4) | target;
2109 pthru->logdrv = SCP2LUN(scp);
2110 pthru->reqsenselen = 14;
2111 pthru->cdblen = scp->cmd_len;
2113 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
2115 if (scp->request_bufflen) {
2116 pthru->dataxferlen = scp->request_bufflen;
2117 pthru->dataxferaddr = ccb->sgl_dma_h;
2118 pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2120 else {
2121 pthru->dataxferaddr = 0;
2122 pthru->dataxferlen = 0;
2123 pthru->numsge = 0;
2125 return;
2130 * megaraid_mbox_prepare_epthru - prepare a command for physical devices
2131 * @adapter - pointer to controller's soft state
2132 * @scb - scsi control block
2133 * @scp - scsi command from the mid-layer
2135 * prepare a command for the scsi physical devices. This rountine prepares
2136 * commands for devices which can take extended CDBs (>10 bytes)
2138 static void
2139 megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2140 struct scsi_cmnd *scp)
2142 mbox_ccb_t *ccb;
2143 mraid_epassthru_t *epthru;
2144 uint8_t channel;
2145 uint8_t target;
2147 ccb = (mbox_ccb_t *)scb->ccb;
2148 epthru = ccb->epthru;
2149 channel = scb->dev_channel;
2150 target = scb->dev_target;
2152 epthru->timeout = 1; // 0=6sec, 1=60sec, 2=10min, 3=3hrs
2153 epthru->ars = 1;
2154 epthru->islogical = 0;
2155 epthru->channel = 0;
2156 epthru->target = (channel << 4) | target;
2157 epthru->logdrv = SCP2LUN(scp);
2158 epthru->reqsenselen = 14;
2159 epthru->cdblen = scp->cmd_len;
2161 memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
2163 if (scp->request_bufflen) {
2164 epthru->dataxferlen = scp->request_bufflen;
2165 epthru->dataxferaddr = ccb->sgl_dma_h;
2166 epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2168 else {
2169 epthru->dataxferaddr = 0;
2170 epthru->dataxferlen = 0;
2171 epthru->numsge = 0;
2173 return;
2178 * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
2179 * @adapter - controller's soft state
2181 * Interrupt ackrowledgement sequence for memory mapped HBAs. Find out the
2182 * completed command and put them on the completed list for later processing.
2184 * Returns: 1 if the interrupt is valid, 0 otherwise
2186 static inline int
2187 megaraid_ack_sequence(adapter_t *adapter)
2189 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2190 mbox_t *mbox;
2191 scb_t *scb;
2192 uint8_t nstatus;
2193 uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
2194 struct list_head clist;
2195 int handled;
2196 uint32_t dword;
2197 unsigned long flags;
2198 int i, j;
2201 mbox = raid_dev->mbox;
2203 // move the SCBs from the firmware completed array to our local list
2204 INIT_LIST_HEAD(&clist);
2206 // loop till F/W has more commands for us to complete
2207 handled = 0;
2208 spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
2209 do {
2211 * Check if a valid interrupt is pending. If found, force the
2212 * interrupt line low.
2214 dword = RDOUTDOOR(raid_dev);
2215 if (dword != 0x10001234) break;
2217 handled = 1;
2219 WROUTDOOR(raid_dev, 0x10001234);
2221 nstatus = 0;
2222 // wait for valid numstatus to post
2223 for (i = 0; i < 0xFFFFF; i++) {
2224 if (mbox->numstatus != 0xFF) {
2225 nstatus = mbox->numstatus;
2226 break;
2228 rmb();
2230 mbox->numstatus = 0xFF;
2232 adapter->outstanding_cmds -= nstatus;
2234 for (i = 0; i < nstatus; i++) {
2236 // wait for valid command index to post
2237 for (j = 0; j < 0xFFFFF; j++) {
2238 if (mbox->completed[i] != 0xFF) break;
2239 rmb();
2241 completed[i] = mbox->completed[i];
2242 mbox->completed[i] = 0xFF;
2244 if (completed[i] == 0xFF) {
2245 con_log(CL_ANN, (KERN_CRIT
2246 "megaraid: command posting timed out\n"));
2248 BUG();
2249 continue;
2252 // Get SCB associated with this command id
2253 if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
2254 // a cmm command
2255 scb = adapter->uscb_list + (completed[i] -
2256 MBOX_MAX_SCSI_CMDS);
2258 else {
2259 // an os command
2260 scb = adapter->kscb_list + completed[i];
2263 scb->status = mbox->status;
2264 list_add_tail(&scb->list, &clist);
2267 // Acknowledge interrupt
2268 WRINDOOR(raid_dev, 0x02);
2270 } while(1);
2272 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
2275 // put the completed commands in the completed list. DPC would
2276 // complete these commands later
2277 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2279 list_splice(&clist, &adapter->completed_list);
2281 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2284 // schedule the DPC if there is some work for it
2285 if (handled)
2286 tasklet_schedule(&adapter->dpc_h);
2288 return handled;
2293 * megaraid_isr - isr for memory based mailbox based controllers
2294 * @irq - irq
2295 * @devp - pointer to our soft state
2296 * @regs - unused
2298 * Interrupt service routine for memory-mapped mailbox controllers.
2300 static irqreturn_t
2301 megaraid_isr(int irq, void *devp, struct pt_regs *regs)
2303 adapter_t *adapter = devp;
2304 int handled;
2306 handled = megaraid_ack_sequence(adapter);
2308 /* Loop through any pending requests */
2309 if (!adapter->quiescent) {
2310 megaraid_mbox_runpendq(adapter, NULL);
2313 return IRQ_RETVAL(handled);
2318 * megaraid_mbox_sync_scb - sync kernel buffers
2319 * @adapter : controller's soft state
2320 * @scb : pointer to the resource packet
2322 * DMA sync if required.
2324 static inline void
2325 megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2327 mbox_ccb_t *ccb;
2329 ccb = (mbox_ccb_t *)scb->ccb;
2331 switch (scb->dma_type) {
2333 case MRAID_DMA_WBUF:
2334 if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
2335 pci_dma_sync_single(adapter->pdev,
2336 ccb->buf_dma_h,
2337 scb->scp->request_bufflen,
2338 PCI_DMA_FROMDEVICE);
2341 pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
2342 scb->scp->request_bufflen, scb->dma_direction);
2344 break;
2346 case MRAID_DMA_WSG:
2347 if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
2348 pci_dma_sync_sg(adapter->pdev,
2349 scb->scp->request_buffer,
2350 scb->scp->use_sg, PCI_DMA_FROMDEVICE);
2353 pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
2354 scb->scp->use_sg, scb->dma_direction);
2356 break;
2358 default:
2359 break;
2362 return;
2367 * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
2368 * @devp : pointer to HBA soft state
2370 * Pick up the commands from the completed list and send back to the owners.
2371 * This is a reentrant function and does not assume any locks are held while
2372 * it is being called.
2374 static void
2375 megaraid_mbox_dpc(unsigned long devp)
2377 adapter_t *adapter = (adapter_t *)devp;
2378 mraid_device_t *raid_dev;
2379 struct list_head clist;
2380 struct scatterlist *sgl;
2381 scb_t *scb;
2382 scb_t *tmp;
2383 struct scsi_cmnd *scp;
2384 mraid_passthru_t *pthru;
2385 mraid_epassthru_t *epthru;
2386 mbox_ccb_t *ccb;
2387 int islogical;
2388 int pdev_index;
2389 int pdev_state;
2390 mbox_t *mbox;
2391 unsigned long flags;
2392 uint8_t c;
2393 int status;
2396 if (!adapter) return;
2398 raid_dev = ADAP2RAIDDEV(adapter);
2400 // move the SCBs from the completed list to our local list
2401 INIT_LIST_HEAD(&clist);
2403 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2405 list_splice_init(&adapter->completed_list, &clist);
2407 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2410 list_for_each_entry_safe(scb, tmp, &clist, list) {
2412 status = scb->status;
2413 scp = scb->scp;
2414 ccb = (mbox_ccb_t *)scb->ccb;
2415 pthru = ccb->pthru;
2416 epthru = ccb->epthru;
2417 mbox = ccb->mbox;
2419 // Make sure f/w has completed a valid command
2420 if (scb->state != SCB_ISSUED) {
2421 con_log(CL_ANN, (KERN_CRIT
2422 "megaraid critical err: invalid command %d:%d:%p\n",
2423 scb->sno, scb->state, scp));
2424 BUG();
2425 continue; // Must never happen!
2428 // check for the management command and complete it right away
2429 if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2430 scb->state = SCB_FREE;
2431 scb->status = status;
2433 // remove from local clist
2434 list_del_init(&scb->list);
2436 megaraid_mbox_mm_done(adapter, scb);
2438 continue;
2441 // Was an abort issued for this command earlier
2442 if (scb->state & SCB_ABORT) {
2443 con_log(CL_ANN, (KERN_NOTICE
2444 "megaraid: aborted cmd %lx[%x] completed\n",
2445 scp->serial_number, scb->sno));
2449 * If the inquiry came of a disk drive which is not part of
2450 * any RAID array, expose it to the kernel. For this to be
2451 * enabled, user must set the "megaraid_expose_unconf_disks"
2452 * flag to 1 by specifying it on module parameter list.
2453 * This would enable data migration off drives from other
2454 * configurations.
2456 islogical = MRAID_IS_LOGICAL(adapter, scp);
2457 if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
2458 && IS_RAID_CH(raid_dev, scb->dev_channel)) {
2460 if (scp->use_sg) {
2461 sgl = (struct scatterlist *)
2462 scp->request_buffer;
2464 if (sgl->page) {
2465 c = *(unsigned char *)
2466 (page_address((&sgl[0])->page) +
2467 (&sgl[0])->offset);
2469 else {
2470 con_log(CL_ANN, (KERN_WARNING
2471 "megaraid mailbox: invalid sg:%d\n",
2472 __LINE__));
2473 c = 0;
2476 else {
2477 c = *(uint8_t *)scp->request_buffer;
2480 if ((c & 0x1F ) == TYPE_DISK) {
2481 pdev_index = (scb->dev_channel * 16) +
2482 scb->dev_target;
2483 pdev_state =
2484 raid_dev->pdrv_state[pdev_index] & 0x0F;
2486 if (pdev_state == PDRV_ONLINE ||
2487 pdev_state == PDRV_FAILED ||
2488 pdev_state == PDRV_RBLD ||
2489 pdev_state == PDRV_HOTSPARE ||
2490 megaraid_expose_unconf_disks == 0) {
2492 status = 0xF0;
2497 // Convert MegaRAID status to Linux error code
2498 switch (status) {
2500 case 0x00:
2502 scp->result = (DID_OK << 16);
2503 break;
2505 case 0x02:
2507 /* set sense_buffer and result fields */
2508 if (mbox->cmd == MBOXCMD_PASSTHRU ||
2509 mbox->cmd == MBOXCMD_PASSTHRU64) {
2511 memcpy(scp->sense_buffer, pthru->reqsensearea,
2512 14);
2514 scp->result = DRIVER_SENSE << 24 |
2515 DID_OK << 16 | CHECK_CONDITION << 1;
2517 else {
2518 if (mbox->cmd == MBOXCMD_EXTPTHRU) {
2520 memcpy(scp->sense_buffer,
2521 epthru->reqsensearea, 14);
2523 scp->result = DRIVER_SENSE << 24 |
2524 DID_OK << 16 |
2525 CHECK_CONDITION << 1;
2526 } else {
2527 scp->sense_buffer[0] = 0x70;
2528 scp->sense_buffer[2] = ABORTED_COMMAND;
2529 scp->result = CHECK_CONDITION << 1;
2532 break;
2534 case 0x08:
2536 scp->result = DID_BUS_BUSY << 16 | status;
2537 break;
2539 default:
2542 * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
2543 * failed
2545 if (scp->cmnd[0] == TEST_UNIT_READY) {
2546 scp->result = DID_ERROR << 16 |
2547 RESERVATION_CONFLICT << 1;
2549 else
2551 * Error code returned is 1 if Reserve or Release
2552 * failed or the input parameter is invalid
2554 if (status == 1 && (scp->cmnd[0] == RESERVE ||
2555 scp->cmnd[0] == RELEASE)) {
2557 scp->result = DID_ERROR << 16 |
2558 RESERVATION_CONFLICT << 1;
2560 else {
2561 scp->result = DID_BAD_TARGET << 16 | status;
2565 // print a debug message for all failed commands
2566 if (status) {
2567 megaraid_mbox_display_scb(adapter, scb);
2570 // Free our internal resources and call the mid-layer callback
2571 // routine
2572 megaraid_mbox_sync_scb(adapter, scb);
2574 // remove from local clist
2575 list_del_init(&scb->list);
2577 // put back in free list
2578 megaraid_dealloc_scb(adapter, scb);
2580 // send the scsi packet back to kernel
2581 spin_lock(adapter->host_lock);
2582 scp->scsi_done(scp);
2583 spin_unlock(adapter->host_lock);
2586 return;
2591 * megaraid_abort_handler - abort the scsi command
2592 * @scp : command to be aborted
2594 * Abort a previous SCSI request. Only commands on the pending list can be
2595 * aborted. All the commands issued to the F/W must complete.
2597 static int
2598 megaraid_abort_handler(struct scsi_cmnd *scp)
2600 adapter_t *adapter;
2601 mraid_device_t *raid_dev;
2602 scb_t *scb;
2603 scb_t *tmp;
2604 int found;
2605 unsigned long flags;
2606 int i;
2609 adapter = SCP2ADAPTER(scp);
2610 raid_dev = ADAP2RAIDDEV(adapter);
2612 ASSERT(spin_is_locked(adapter->host_lock));
2614 con_log(CL_ANN, (KERN_WARNING
2615 "megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n",
2616 scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp),
2617 SCP2TARGET(scp), SCP2LUN(scp)));
2619 // If FW has stopped responding, simply return failure
2620 if (raid_dev->hw_error) {
2621 con_log(CL_ANN, (KERN_NOTICE
2622 "megaraid: hw error, not aborting\n"));
2623 return FAILED;
2626 // There might a race here, where the command was completed by the
2627 // firmware and now it is on the completed list. Before we could
2628 // complete the command to the kernel in dpc, the abort came.
2629 // Find out if this is the case to avoid the race.
2630 scb = NULL;
2631 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2632 list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
2634 if (scb->scp == scp) { // Found command
2636 list_del_init(&scb->list); // from completed list
2638 con_log(CL_ANN, (KERN_WARNING
2639 "megaraid: %ld:%d[%d:%d], abort from completed list\n",
2640 scp->serial_number, scb->sno,
2641 scb->dev_channel, scb->dev_target));
2643 scp->result = (DID_ABORT << 16);
2644 scp->scsi_done(scp);
2646 megaraid_dealloc_scb(adapter, scb);
2648 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
2649 flags);
2651 return SUCCESS;
2654 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2657 // Find out if this command is still on the pending list. If it is and
2658 // was never issued, abort and return success. If the command is owned
2659 // by the firmware, we must wait for it to complete by the FW.
2660 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2661 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2663 if (scb->scp == scp) { // Found command
2665 list_del_init(&scb->list); // from pending list
2667 ASSERT(!(scb->state & SCB_ISSUED));
2669 con_log(CL_ANN, (KERN_WARNING
2670 "megaraid abort: %ld[%d:%d], driver owner\n",
2671 scp->serial_number, scb->dev_channel,
2672 scb->dev_target));
2674 scp->result = (DID_ABORT << 16);
2675 scp->scsi_done(scp);
2677 megaraid_dealloc_scb(adapter, scb);
2679 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
2680 flags);
2682 return SUCCESS;
2685 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2688 // Check do we even own this command, in which case this would be
2689 // owned by the firmware. The only way to locate the FW scb is to
2690 // traverse through the list of all SCB, since driver does not
2691 // maintain these SCBs on any list
2692 found = 0;
2693 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
2694 scb = adapter->kscb_list + i;
2696 if (scb->scp == scp) {
2698 found = 1;
2700 if (!(scb->state & SCB_ISSUED)) {
2701 con_log(CL_ANN, (KERN_WARNING
2702 "megaraid abort: %ld%d[%d:%d], invalid state\n",
2703 scp->serial_number, scb->sno, scb->dev_channel,
2704 scb->dev_target));
2705 BUG();
2707 else {
2708 con_log(CL_ANN, (KERN_WARNING
2709 "megaraid abort: %ld:%d[%d:%d], fw owner\n",
2710 scp->serial_number, scb->sno, scb->dev_channel,
2711 scb->dev_target));
2716 if (!found) {
2717 con_log(CL_ANN, (KERN_WARNING
2718 "megaraid abort: scsi cmd:%ld, do now own\n",
2719 scp->serial_number));
2721 // FIXME: Should there be a callback for this command?
2722 return SUCCESS;
2725 // We cannot actually abort a command owned by firmware, return
2726 // failure and wait for reset. In host reset handler, we will find out
2727 // if the HBA is still live
2728 return FAILED;
2733 * megaraid_reset_handler - device reset hadler for mailbox based driver
2734 * @scp : reference command
2736 * Reset handler for the mailbox based controller. First try to find out if
2737 * the FW is still live, in which case the outstanding commands counter mut go
2738 * down to 0. If that happens, also issue the reservation reset command to
2739 * relinquish (possible) reservations on the logical drives connected to this
2740 * host
2742 static int
2743 megaraid_reset_handler(struct scsi_cmnd *scp)
2745 adapter_t *adapter;
2746 scb_t *scb;
2747 scb_t *tmp;
2748 mraid_device_t *raid_dev;
2749 unsigned long flags;
2750 uint8_t raw_mbox[sizeof(mbox_t)];
2751 int rval;
2752 int recovery_window;
2753 int recovering;
2754 int i;
2756 adapter = SCP2ADAPTER(scp);
2757 raid_dev = ADAP2RAIDDEV(adapter);
2759 ASSERT(spin_is_locked(adapter->host_lock));
2761 con_log(CL_ANN, (KERN_WARNING "megaraid: reseting the host...\n"));
2763 // return failure if adapter is not responding
2764 if (raid_dev->hw_error) {
2765 con_log(CL_ANN, (KERN_NOTICE
2766 "megaraid: hw error, cannot reset\n"));
2767 return FAILED;
2771 // Under exceptional conditions, FW can take up to 3 minutes to
2772 // complete command processing. Wait for additional 2 minutes for the
2773 // pending commands counter to go down to 0. If it doesn't, let the
2774 // controller be marked offline
2775 // Also, reset all the commands currently owned by the driver
2776 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2777 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2779 list_del_init(&scb->list); // from pending list
2781 con_log(CL_ANN, (KERN_WARNING
2782 "megaraid: %ld:%d[%d:%d], reset from pending list\n",
2783 scp->serial_number, scb->sno,
2784 scb->dev_channel, scb->dev_target));
2786 scp->result = (DID_RESET << 16);
2787 scp->scsi_done(scp);
2789 megaraid_dealloc_scb(adapter, scb);
2791 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2793 if (adapter->outstanding_cmds) {
2794 con_log(CL_ANN, (KERN_NOTICE
2795 "megaraid: %d outstanding commands. Max wait %d sec\n",
2796 adapter->outstanding_cmds, MBOX_RESET_WAIT));
2799 spin_unlock(adapter->host_lock);
2801 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2803 recovering = adapter->outstanding_cmds;
2805 for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) {
2807 megaraid_ack_sequence(adapter);
2809 // print a message once every 5 seconds only
2810 if (!(i % 5)) {
2811 con_log(CL_ANN, (
2812 "megaraid mbox: Wait for %d commands to complete:%d\n",
2813 adapter->outstanding_cmds,
2814 MBOX_RESET_WAIT - i));
2817 // bailout if no recovery happended in reset time
2818 if ((i == MBOX_RESET_WAIT) &&
2819 (recovering == adapter->outstanding_cmds)) {
2820 break;
2823 msleep(1000);
2826 spin_lock(adapter->host_lock);
2828 // If still outstanding commands, bail out
2829 if (adapter->outstanding_cmds) {
2830 con_log(CL_ANN, (KERN_WARNING
2831 "megaraid mbox: critical hardware error!\n"));
2833 raid_dev->hw_error = 1;
2835 return FAILED;
2837 else {
2838 con_log(CL_ANN, (KERN_NOTICE
2839 "megaraid mbox: reset sequence completed sucessfully\n"));
2843 // If the controller supports clustering, reset reservations
2844 if (!adapter->ha) return SUCCESS;
2846 // clear reservations if any
2847 raw_mbox[0] = CLUSTER_CMD;
2848 raw_mbox[2] = RESET_RESERVATIONS;
2850 rval = SUCCESS;
2851 if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
2852 con_log(CL_ANN,
2853 (KERN_INFO "megaraid: reservation reset\n"));
2855 else {
2856 rval = FAILED;
2857 con_log(CL_ANN, (KERN_WARNING
2858 "megaraid: reservation reset failed\n"));
2861 return rval;
2866 * START: internal commands library
2868 * This section of the driver has the common routine used by the driver and
2869 * also has all the FW routines
2873 * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
2874 * @adapter - controller's soft state
2875 * @raw_mbox - the mailbox
2877 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2878 * controllers
2880 static int
2881 mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
2883 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2884 mbox64_t *mbox64;
2885 mbox_t *mbox;
2886 uint8_t status;
2887 int i;
2890 mbox64 = raid_dev->mbox64;
2891 mbox = raid_dev->mbox;
2894 * Wait until mailbox is free
2896 if (megaraid_busywait_mbox(raid_dev) != 0)
2897 goto blocked_mailbox;
2900 * Copy mailbox data into host structure
2902 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
2903 mbox->cmdid = 0xFE;
2904 mbox->busy = 1;
2905 mbox->poll = 0;
2906 mbox->ack = 0;
2907 mbox->numstatus = 0xFF;
2908 mbox->status = 0xFF;
2910 wmb();
2911 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2913 // wait for maximum 1 second for status to post. If the status is not
2914 // available within 1 second, assume FW is initializing and wait
2915 // for an extended amount of time
2916 if (mbox->numstatus == 0xFF) { // status not yet available
2917 udelay(25);;
2919 for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
2920 rmb();
2921 msleep(1);
2925 if (i == 1000) {
2926 con_log(CL_ANN, (KERN_NOTICE
2927 "megaraid mailbox: wait for FW to boot "));
2929 for (i = 0; (mbox->numstatus == 0xFF) &&
2930 (i < MBOX_RESET_WAIT); i++) {
2931 rmb();
2932 con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
2933 MBOX_RESET_WAIT - i));
2934 msleep(1000);
2937 if (i == MBOX_RESET_WAIT) {
2939 con_log(CL_ANN, (
2940 "\nmegaraid mailbox: status not available\n"));
2942 return -1;
2944 con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
2948 // wait for maximum 1 second for poll semaphore
2949 if (mbox->poll != 0x77) {
2950 udelay(25);
2952 for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
2953 rmb();
2954 msleep(1);
2957 if (i == 1000) {
2958 con_log(CL_ANN, (KERN_WARNING
2959 "megaraid mailbox: could not get poll semaphore\n"));
2960 return -1;
2964 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
2965 wmb();
2967 // wait for maximum 1 second for acknowledgement
2968 if (RDINDOOR(raid_dev) & 0x2) {
2969 udelay(25);
2971 for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
2972 rmb();
2973 msleep(1);
2976 if (i == 1000) {
2977 con_log(CL_ANN, (KERN_WARNING
2978 "megaraid mailbox: could not acknowledge\n"));
2979 return -1;
2982 mbox->poll = 0;
2983 mbox->ack = 0x77;
2985 status = mbox->status;
2987 // invalidate the completed command id array. After command
2988 // completion, firmware would write the valid id.
2989 mbox->numstatus = 0xFF;
2990 mbox->status = 0xFF;
2991 for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
2992 mbox->completed[i] = 0xFF;
2995 return status;
2997 blocked_mailbox:
2999 con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
3000 return -1;
3005 * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
3006 * @adapter - controller's soft state
3007 * @raw_mbox - the mailbox
3009 * Issue a scb in synchronous and non-interrupt mode for mailbox based
3010 * controllers. This is a faster version of the synchronous command and
3011 * therefore can be called in interrupt-context as well
3013 static int
3014 mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
3016 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3017 mbox_t *mbox;
3018 long i;
3021 mbox = raid_dev->mbox;
3023 // return immediately if the mailbox is busy
3024 if (mbox->busy) return -1;
3026 // Copy mailbox data into host structure
3027 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
3028 mbox->cmdid = 0xFE;
3029 mbox->busy = 1;
3030 mbox->poll = 0;
3031 mbox->ack = 0;
3032 mbox->numstatus = 0xFF;
3033 mbox->status = 0xFF;
3035 wmb();
3036 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
3038 for (i = 0; i < 0xFFFFF; i++) {
3039 if (mbox->numstatus != 0xFF) break;
3042 if (i == 0xFFFFF) {
3043 // We may need to re-calibrate the counter
3044 con_log(CL_ANN, (KERN_CRIT
3045 "megaraid: fast sync command timed out\n"));
3048 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
3049 wmb();
3051 return mbox->status;
3056 * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
3057 * @raid_dev - RAID device (HBA) soft state
3059 * wait until the controller's mailbox is available to accept more commands.
3060 * wait for at most 1 second
3062 static int
3063 megaraid_busywait_mbox(mraid_device_t *raid_dev)
3065 mbox_t *mbox = raid_dev->mbox;
3066 int i = 0;
3068 if (mbox->busy) {
3069 udelay(25);
3070 for (i = 0; mbox->busy && i < 1000; i++)
3071 msleep(1);
3074 if (i < 1000) return 0;
3075 else return -1;
3080 * megaraid_mbox_product_info - some static information about the controller
3081 * @adapter - our soft state
3083 * issue commands to the controller to grab some parameters required by our
3084 * caller.
3086 static int
3087 megaraid_mbox_product_info(adapter_t *adapter)
3089 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3090 mbox_t *mbox;
3091 uint8_t raw_mbox[sizeof(mbox_t)];
3092 mraid_pinfo_t *pinfo;
3093 dma_addr_t pinfo_dma_h;
3094 mraid_inquiry3_t *mraid_inq3;
3095 int i;
3098 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3099 mbox = (mbox_t *)raw_mbox;
3102 * Issue an ENQUIRY3 command to find out certain adapter parameters,
3103 * e.g., max channels, max commands etc.
3105 pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3106 &pinfo_dma_h);
3108 if (pinfo == NULL) {
3109 con_log(CL_ANN, (KERN_WARNING
3110 "megaraid: out of memory, %s %d\n", __FUNCTION__,
3111 __LINE__));
3113 return -1;
3115 memset(pinfo, 0, sizeof(mraid_pinfo_t));
3117 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3118 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3120 raw_mbox[0] = FC_NEW_CONFIG;
3121 raw_mbox[2] = NC_SUBOP_ENQUIRY3;
3122 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
3124 // Issue the command
3125 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3127 con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
3129 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3130 pinfo, pinfo_dma_h);
3132 return -1;
3136 * Collect information about state of each physical drive
3137 * attached to the controller. We will expose all the disks
3138 * which are not part of RAID
3140 mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
3141 for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
3142 raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
3146 * Get product info for information like number of channels,
3147 * maximum commands supported.
3149 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3150 mbox->xferaddr = (uint32_t)pinfo_dma_h;
3152 raw_mbox[0] = FC_NEW_CONFIG;
3153 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
3155 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3157 con_log(CL_ANN, (KERN_WARNING
3158 "megaraid: product info failed\n"));
3160 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3161 pinfo, pinfo_dma_h);
3163 return -1;
3167 * Setup some parameters for host, as required by our caller
3169 adapter->max_channel = pinfo->nchannels;
3172 * we will export all the logical drives on a single channel.
3173 * Add 1 since inquires do not come for inititor ID
3175 adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1;
3176 adapter->max_lun = 8; // up to 8 LUNs for non-disk devices
3179 * These are the maximum outstanding commands for the scsi-layer
3181 adapter->max_cmds = MBOX_MAX_SCSI_CMDS;
3183 memset(adapter->fw_version, 0, VERSION_SIZE);
3184 memset(adapter->bios_version, 0, VERSION_SIZE);
3186 memcpy(adapter->fw_version, pinfo->fw_version, 4);
3187 adapter->fw_version[4] = 0;
3189 memcpy(adapter->bios_version, pinfo->bios_version, 4);
3190 adapter->bios_version[4] = 0;
3192 con_log(CL_ANN, (KERN_NOTICE
3193 "megaraid: fw version:[%s] bios version:[%s]\n",
3194 adapter->fw_version, adapter->bios_version));
3196 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
3197 pinfo_dma_h);
3199 return 0;
3205 * megaraid_mbox_extended_cdb - check for support for extended CDBs
3206 * @adapter - soft state for the controller
3208 * this routine check whether the controller in question supports extended
3209 * ( > 10 bytes ) CDBs
3211 static int
3212 megaraid_mbox_extended_cdb(adapter_t *adapter)
3214 mbox_t *mbox;
3215 uint8_t raw_mbox[sizeof(mbox_t)];
3216 int rval;
3218 mbox = (mbox_t *)raw_mbox;
3220 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3221 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3223 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3225 raw_mbox[0] = MAIN_MISC_OPCODE;
3226 raw_mbox[2] = SUPPORT_EXT_CDB;
3229 * Issue the command
3231 rval = 0;
3232 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3233 rval = -1;
3236 return rval;
3241 * megaraid_mbox_support_ha - Do we support clustering
3242 * @adapter - soft state for the controller
3243 * @init_id - ID of the initiator
3245 * Determine if the firmware supports clustering and the ID of the initiator.
3247 static int
3248 megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
3250 mbox_t *mbox;
3251 uint8_t raw_mbox[sizeof(mbox_t)];
3252 int rval;
3255 mbox = (mbox_t *)raw_mbox;
3257 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3259 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3261 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3263 raw_mbox[0] = GET_TARGET_ID;
3265 // Issue the command
3266 *init_id = 7;
3267 rval = -1;
3268 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3270 *init_id = *(uint8_t *)adapter->ibuf;
3272 con_log(CL_ANN, (KERN_INFO
3273 "megaraid: cluster firmware, initiator ID: %d\n",
3274 *init_id));
3276 rval = 0;
3279 return rval;
3284 * megaraid_mbox_support_random_del - Do we support random deletion
3285 * @adapter - soft state for the controller
3287 * Determine if the firmware supports random deletion
3288 * Return: 1 is operation supported, 0 otherwise
3290 static int
3291 megaraid_mbox_support_random_del(adapter_t *adapter)
3293 mbox_t *mbox;
3294 uint8_t raw_mbox[sizeof(mbox_t)];
3295 int rval;
3298 mbox = (mbox_t *)raw_mbox;
3300 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3302 raw_mbox[0] = FC_DEL_LOGDRV;
3303 raw_mbox[0] = OP_SUP_DEL_LOGDRV;
3305 // Issue the command
3306 rval = 0;
3307 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3309 con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
3311 rval = 1;
3314 return rval;
3319 * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
3320 * @adapter - soft state for the controller
3322 * Find out the maximum number of scatter-gather elements supported by the
3323 * firmware
3325 static int
3326 megaraid_mbox_get_max_sg(adapter_t *adapter)
3328 mbox_t *mbox;
3329 uint8_t raw_mbox[sizeof(mbox_t)];
3330 int nsg;
3333 mbox = (mbox_t *)raw_mbox;
3335 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3337 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3339 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3341 raw_mbox[0] = MAIN_MISC_OPCODE;
3342 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3344 // Issue the command
3345 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3346 nsg = *(uint8_t *)adapter->ibuf;
3348 else {
3349 nsg = MBOX_DEFAULT_SG_SIZE;
3352 if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
3354 return nsg;
3359 * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
3360 * @adapter - soft state for the controller
3362 * Enumerate the RAID and SCSI channels for ROMB platoforms so that channels
3363 * can be exported as regular SCSI channels
3365 static void
3366 megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
3368 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3369 mbox_t *mbox;
3370 uint8_t raw_mbox[sizeof(mbox_t)];
3373 mbox = (mbox_t *)raw_mbox;
3375 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3377 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3379 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3381 raw_mbox[0] = CHNL_CLASS;
3382 raw_mbox[2] = GET_CHNL_CLASS;
3384 // Issue the command. If the command fails, all channels are RAID
3385 // channels
3386 raid_dev->channel_class = 0xFF;
3387 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3388 raid_dev->channel_class = *(uint8_t *)adapter->ibuf;
3391 return;
3396 * megaraid_mbox_flush_cache - flush adapter and disks cache
3397 * @param adapter : soft state for the controller
3399 * Flush adapter cache followed by disks cache
3401 static void
3402 megaraid_mbox_flush_cache(adapter_t *adapter)
3404 mbox_t *mbox;
3405 uint8_t raw_mbox[sizeof(mbox_t)];
3408 mbox = (mbox_t *)raw_mbox;
3410 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3412 raw_mbox[0] = FLUSH_ADAPTER;
3414 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3415 con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
3418 raw_mbox[0] = FLUSH_SYSTEM;
3420 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3421 con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
3424 return;
3429 * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
3430 * @param adapter : controllers' soft state
3431 * @param scb : SCB to be displayed
3432 * @param level : debug level for console print
3434 * Diplay information about the given SCB iff the current debug level is
3435 * verbose
3437 static void
3438 megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
3440 mbox_ccb_t *ccb;
3441 struct scsi_cmnd *scp;
3442 mbox_t *mbox;
3443 int level;
3444 int i;
3447 ccb = (mbox_ccb_t *)scb->ccb;
3448 scp = scb->scp;
3449 mbox = ccb->mbox;
3451 level = CL_DLEVEL3;
3453 con_log(level, (KERN_NOTICE
3454 "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
3455 mbox->cmd, scb->sno));
3457 con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
3458 mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
3459 mbox->numsge));
3461 if (!scp) return;
3463 con_log(level, (KERN_NOTICE "scsi cmnd: "));
3465 for (i = 0; i < scp->cmd_len; i++) {
3466 con_log(level, ("%#2.02x ", scp->cmnd[i]));
3469 con_log(level, ("\n"));
3471 return;
3476 * megaraid_mbox_setup_device_map - manage device ids
3477 * @adapter : Driver's soft state
3479 * Manange the device ids to have an appropraite mapping between the kernel
3480 * scsi addresses and megaraid scsi and logical drive addresses. We export
3481 * scsi devices on their actual addresses, whereas the logical drives are
3482 * exported on a virtual scsi channel.
3484 static void
3485 megaraid_mbox_setup_device_map(adapter_t *adapter)
3487 uint8_t c;
3488 uint8_t t;
3491 * First fill the values on the logical drive channel
3493 for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3494 adapter->device_ids[adapter->max_channel][t] =
3495 (t < adapter->init_id) ? t : t - 1;
3497 adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
3500 * Fill the values on the physical devices channels
3502 for (c = 0; c < adapter->max_channel; c++)
3503 for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3504 adapter->device_ids[c][t] = (c << 8) | t;
3509 * END: internal commands library
3513 * START: Interface for the common management module
3515 * This is the module, which interfaces with the common mangement module to
3516 * provide support for ioctl and sysfs
3520 * megaraid_cmm_register - register with the mangement module
3521 * @param adapter : HBA soft state
3523 * Register with the management module, which allows applications to issue
3524 * ioctl calls to the drivers. This interface is used by the management module
3525 * to setup sysfs support as well.
3527 static int
3528 megaraid_cmm_register(adapter_t *adapter)
3530 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3531 mraid_mmadp_t adp;
3532 scb_t *scb;
3533 mbox_ccb_t *ccb;
3534 int rval;
3535 int i;
3537 // Allocate memory for the base list of scb for management module.
3538 adapter->uscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_USER_CMDS,
3539 GFP_KERNEL);
3541 if (adapter->uscb_list == NULL) {
3542 con_log(CL_ANN, (KERN_WARNING
3543 "megaraid: out of memory, %s %d\n", __FUNCTION__,
3544 __LINE__));
3545 return -1;
3547 memset(adapter->uscb_list, 0, sizeof(scb_t) * MBOX_MAX_USER_CMDS);
3550 // Initialize the synchronization parameters for resources for
3551 // commands for management module
3552 INIT_LIST_HEAD(&adapter->uscb_pool);
3554 spin_lock_init(USER_FREE_LIST_LOCK(adapter));
3558 // link all the packets. Note, CCB for commands, coming from the
3559 // commom management module, mailbox physical address are already
3560 // setup by it. We just need placeholder for that in our local command
3561 // control blocks
3562 for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
3564 scb = adapter->uscb_list + i;
3565 ccb = raid_dev->uccb_list + i;
3567 scb->ccb = (caddr_t)ccb;
3568 ccb->mbox64 = raid_dev->umbox64 + i;
3569 ccb->mbox = &ccb->mbox64->mbox32;
3570 ccb->raw_mbox = (uint8_t *)ccb->mbox;
3572 scb->gp = 0;
3574 // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
3575 // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
3576 scb->sno = i + MBOX_MAX_SCSI_CMDS;
3578 scb->scp = NULL;
3579 scb->state = SCB_FREE;
3580 scb->dma_direction = PCI_DMA_NONE;
3581 scb->dma_type = MRAID_DMA_NONE;
3582 scb->dev_channel = -1;
3583 scb->dev_target = -1;
3585 // put scb in the free pool
3586 list_add_tail(&scb->list, &adapter->uscb_pool);
3589 adp.unique_id = adapter->unique_id;
3590 adp.drvr_type = DRVRTYPE_MBOX;
3591 adp.drvr_data = (unsigned long)adapter;
3592 adp.pdev = adapter->pdev;
3593 adp.issue_uioc = megaraid_mbox_mm_handler;
3594 adp.timeout = 30;
3595 adp.max_kioc = MBOX_MAX_USER_CMDS;
3597 if ((rval = mraid_mm_register_adp(&adp)) != 0) {
3599 con_log(CL_ANN, (KERN_WARNING
3600 "megaraid mbox: did not register with CMM\n"));
3602 kfree(adapter->uscb_list);
3605 return rval;
3610 * megaraid_cmm_unregister - un-register with the mangement module
3611 * @param adapter : HBA soft state
3613 * Un-register with the management module.
3614 * FIXME: mgmt module must return failure for unregister if it has pending
3615 * commands in LLD
3617 static int
3618 megaraid_cmm_unregister(adapter_t *adapter)
3620 kfree(adapter->uscb_list);
3621 mraid_mm_unregister_adp(adapter->unique_id);
3622 return 0;
3627 * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
3628 * @param drvr_data : LLD specific data
3629 * @param kioc : CMM interface packet
3630 * @param action : command action
3632 * This routine is invoked whenever the Common Mangement Module (CMM) has a
3633 * command for us. The 'action' parameter specifies if this is a new command
3634 * or otherwise.
3636 static int
3637 megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
3639 adapter_t *adapter;
3641 if (action != IOCTL_ISSUE) {
3642 con_log(CL_ANN, (KERN_WARNING
3643 "megaraid: unsupported management action:%#2x\n",
3644 action));
3645 return (-ENOTSUPP);
3648 adapter = (adapter_t *)drvr_data;
3650 // make sure this adapter is not being detached right now.
3651 if (atomic_read(&adapter->being_detached)) {
3652 con_log(CL_ANN, (KERN_WARNING
3653 "megaraid: reject management request, detaching\n"));
3654 return (-ENODEV);
3657 switch (kioc->opcode) {
3659 case GET_ADAP_INFO:
3661 kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *)
3662 (unsigned long)kioc->buf_vaddr);
3664 kioc->done(kioc);
3666 return kioc->status;
3668 case MBOX_CMD:
3670 return megaraid_mbox_mm_command(adapter, kioc);
3672 default:
3673 kioc->status = (-EINVAL);
3674 kioc->done(kioc);
3675 return (-EINVAL);
3678 return 0; // not reached
3682 * megaraid_mbox_mm_command - issues commands routed through CMM
3683 * @param adapter : HBA soft state
3684 * @param kioc : management command packet
3686 * Issues commands, which are routed through the management module.
3688 static int
3689 megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
3691 struct list_head *head = &adapter->uscb_pool;
3692 mbox64_t *mbox64;
3693 uint8_t *raw_mbox;
3694 scb_t *scb;
3695 mbox_ccb_t *ccb;
3696 unsigned long flags;
3698 // detach one scb from free pool
3699 spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3701 if (list_empty(head)) { // should never happen because of CMM
3703 con_log(CL_ANN, (KERN_WARNING
3704 "megaraid mbox: bug in cmm handler, lost resources\n"));
3706 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3708 return (-EINVAL);
3711 scb = list_entry(head->next, scb_t, list);
3712 list_del_init(&scb->list);
3714 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3716 scb->state = SCB_ACTIVE;
3717 scb->dma_type = MRAID_DMA_NONE;
3719 ccb = (mbox_ccb_t *)scb->ccb;
3720 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3721 raw_mbox = (uint8_t *)&mbox64->mbox32;
3723 memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
3725 scb->gp = (unsigned long)kioc;
3728 * If it is a logdrv random delete operation, we have to wait till
3729 * there are no outstanding cmds at the fw and then issue it directly
3731 if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3733 if (wait_till_fw_empty(adapter)) {
3734 con_log(CL_ANN, (KERN_NOTICE
3735 "megaraid mbox: LD delete, timed out\n"));
3737 kioc->status = -ETIME;
3739 scb->status = -1;
3741 megaraid_mbox_mm_done(adapter, scb);
3743 return (-ETIME);
3746 INIT_LIST_HEAD(&scb->list);
3748 scb->state = SCB_ISSUED;
3749 if (mbox_post_cmd(adapter, scb) != 0) {
3751 con_log(CL_ANN, (KERN_NOTICE
3752 "megaraid mbox: LD delete, mailbox busy\n"));
3754 kioc->status = -EBUSY;
3756 scb->status = -1;
3758 megaraid_mbox_mm_done(adapter, scb);
3760 return (-EBUSY);
3763 return 0;
3766 // put the command on the pending list and execute
3767 megaraid_mbox_runpendq(adapter, scb);
3769 return 0;
3773 static int
3774 wait_till_fw_empty(adapter_t *adapter)
3776 unsigned long flags = 0;
3777 int i;
3781 * Set the quiescent flag to stop issuing cmds to FW.
3783 spin_lock_irqsave(adapter->host_lock, flags);
3784 adapter->quiescent++;
3785 spin_unlock_irqrestore(adapter->host_lock, flags);
3788 * Wait till there are no more cmds outstanding at FW. Try for at most
3789 * 60 seconds
3791 for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
3792 con_log(CL_DLEVEL1, (KERN_INFO
3793 "megaraid: FW has %d pending commands\n",
3794 adapter->outstanding_cmds));
3796 msleep(1000);
3799 return adapter->outstanding_cmds;
3804 * megaraid_mbox_mm_done - callback for CMM commands
3805 * @adapter : HBA soft state
3806 * @scb : completed command
3808 * Callback routine for internal commands originated from the management
3809 * module.
3811 static void
3812 megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3814 uioc_t *kioc;
3815 mbox64_t *mbox64;
3816 uint8_t *raw_mbox;
3817 unsigned long flags;
3819 kioc = (uioc_t *)scb->gp;
3820 kioc->status = 0;
3821 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3822 mbox64->mbox32.status = scb->status;
3823 raw_mbox = (uint8_t *)&mbox64->mbox32;
3826 // put scb in the free pool
3827 scb->state = SCB_FREE;
3828 scb->scp = NULL;
3830 spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3832 list_add(&scb->list, &adapter->uscb_pool);
3834 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3836 // if a delete logical drive operation succeeded, restart the
3837 // controller
3838 if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3840 adapter->quiescent--;
3842 megaraid_mbox_runpendq(adapter, NULL);
3845 kioc->done(kioc);
3847 return;
3852 * gather_hbainfo - HBA characteristics for the applications
3853 * @param adapter : HBA soft state
3854 * @param hinfo : pointer to the caller's host info strucuture
3856 static int
3857 gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
3859 uint8_t dmajor;
3861 dmajor = megaraid_mbox_version[0];
3863 hinfo->pci_vendor_id = adapter->pdev->vendor;
3864 hinfo->pci_device_id = adapter->pdev->device;
3865 hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
3866 hinfo->subsys_device_id = adapter->pdev->subsystem_device;
3868 hinfo->pci_bus = adapter->pdev->bus->number;
3869 hinfo->pci_dev_fn = adapter->pdev->devfn;
3870 hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn);
3871 hinfo->irq = adapter->host->irq;
3872 hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport;
3874 hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn;
3875 hinfo->host_no = adapter->host->host_no;
3877 return 0;
3881 * END: Interface for the common management module
3886 * END: Mailbox Low Level Driver
3888 module_init(megaraid_init);
3889 module_exit(megaraid_exit);
3891 /* vim: set ts=8 sw=8 tw=78 ai si: */