2 * Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * You should have received a copy of the GNU General Public
13 * License along with this program; if not, write the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 * For further information regarding this notice, see:
18 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/hdreg.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/timer.h>
30 #include <linux/ioport.h>
31 #include <linux/blkdev.h>
32 #include <linux/ioc4.h>
35 #include <linux/ide.h>
37 /* IOC4 Specific Definitions */
38 #define IOC4_CMD_OFFSET 0x100
39 #define IOC4_CTRL_OFFSET 0x120
40 #define IOC4_DMA_OFFSET 0x140
41 #define IOC4_INTR_OFFSET 0x0
43 #define IOC4_TIMING 0x00
44 #define IOC4_DMA_PTR_L 0x01
45 #define IOC4_DMA_PTR_H 0x02
46 #define IOC4_DMA_ADDR_L 0x03
47 #define IOC4_DMA_ADDR_H 0x04
48 #define IOC4_BC_DEV 0x05
49 #define IOC4_BC_MEM 0x06
50 #define IOC4_DMA_CTRL 0x07
51 #define IOC4_DMA_END_ADDR 0x08
53 /* Bits in the IOC4 Control/Status Register */
54 #define IOC4_S_DMA_START 0x01
55 #define IOC4_S_DMA_STOP 0x02
56 #define IOC4_S_DMA_DIR 0x04
57 #define IOC4_S_DMA_ACTIVE 0x08
58 #define IOC4_S_DMA_ERROR 0x10
59 #define IOC4_ATA_MEMERR 0x02
61 /* Read/Write Directions */
62 #define IOC4_DMA_WRITE 0x04
63 #define IOC4_DMA_READ 0x00
65 /* Interrupt Register Offsets */
66 #define IOC4_INTR_REG 0x03
67 #define IOC4_INTR_SET 0x05
68 #define IOC4_INTR_CLEAR 0x07
70 #define IOC4_IDE_CACHELINE_SIZE 128
71 #define IOC4_CMD_CTL_BLK_SIZE 0x20
72 #define IOC4_SUPPORTED_FIRMWARE_REV 46
86 /* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */
87 /* IOC4 has only 1 IDE channel */
88 #define IOC4_PRD_BYTES 16
89 #define IOC4_PRD_ENTRIES (PAGE_SIZE /(4*IOC4_PRD_BYTES))
93 sgiioc4_init_hwif_ports(hw_regs_t
* hw
, unsigned long data_port
,
94 unsigned long ctrl_port
, unsigned long irq_port
)
96 unsigned long reg
= data_port
;
99 /* Registers are word (32 bit) aligned */
100 for (i
= IDE_DATA_OFFSET
; i
<= IDE_STATUS_OFFSET
; i
++)
101 hw
->io_ports
[i
] = reg
+ i
* 4;
104 hw
->io_ports
[IDE_CONTROL_OFFSET
] = ctrl_port
;
107 hw
->io_ports
[IDE_IRQ_OFFSET
] = irq_port
;
111 sgiioc4_maskproc(ide_drive_t
* drive
, int mask
)
113 ide_hwif_t
*hwif
= HWIF(drive
);
114 hwif
->OUTB(mask
? (drive
->ctl
| 2) : (drive
->ctl
& ~2),
120 sgiioc4_checkirq(ide_hwif_t
* hwif
)
123 hwif
->INL(hwif
->io_ports
[IDE_IRQ_OFFSET
] + IOC4_INTR_REG
* 4);
133 sgiioc4_clearirq(ide_drive_t
* drive
)
136 ide_hwif_t
*hwif
= HWIF(drive
);
137 unsigned long other_ir
=
138 hwif
->io_ports
[IDE_IRQ_OFFSET
] + (IOC4_INTR_REG
<< 2);
140 /* Code to check for PCI error conditions */
141 intr_reg
= hwif
->INL(other_ir
);
142 if (intr_reg
& 0x03) { /* Valid IOC4-IDE interrupt */
144 * Using hwif->INB to read the IDE_STATUS_REG has a side effect
145 * of clearing the interrupt. The first read should clear it
146 * if it is set. The second read should return a "clear" status
147 * if it got cleared. If not, then spin for a bit trying to
150 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
152 stat
= hwif
->INB(IDE_STATUS_REG
);
153 while ((stat
& 0x80) && (count
++ < 100)) {
155 stat
= hwif
->INB(IDE_STATUS_REG
);
158 if (intr_reg
& 0x02) {
159 /* Error when transferring DMA data on PCI bus */
160 u32 pci_err_addr_low
, pci_err_addr_high
,
164 hwif
->INL(hwif
->io_ports
[IDE_IRQ_OFFSET
]);
166 hwif
->INL(hwif
->io_ports
[IDE_IRQ_OFFSET
] + 4);
167 pci_read_config_dword(hwif
->pci_dev
, PCI_COMMAND
,
170 "%s(%s) : PCI Bus Error when doing DMA:"
171 " status-cmd reg is 0x%x\n",
172 __FUNCTION__
, drive
->name
, pci_stat_cmd_reg
);
174 "%s(%s) : PCI Error Address is 0x%x%x\n",
175 __FUNCTION__
, drive
->name
,
176 pci_err_addr_high
, pci_err_addr_low
);
177 /* Clear the PCI Error indicator */
178 pci_write_config_dword(hwif
->pci_dev
, PCI_COMMAND
,
182 /* Clear the Interrupt, Error bits on the IOC4 */
183 hwif
->OUTL(0x03, other_ir
);
185 intr_reg
= hwif
->INL(other_ir
);
191 static void sgiioc4_ide_dma_start(ide_drive_t
* drive
)
193 ide_hwif_t
*hwif
= HWIF(drive
);
194 unsigned int reg
= hwif
->INL(hwif
->dma_base
+ IOC4_DMA_CTRL
* 4);
195 unsigned int temp_reg
= reg
| IOC4_S_DMA_START
;
197 hwif
->OUTL(temp_reg
, hwif
->dma_base
+ IOC4_DMA_CTRL
* 4);
201 sgiioc4_ide_dma_stop(ide_hwif_t
*hwif
, u64 dma_base
)
207 ioc4_dma
= hwif
->INL(dma_base
+ IOC4_DMA_CTRL
* 4);
208 while ((ioc4_dma
& IOC4_S_DMA_STOP
) && (count
++ < 200)) {
210 ioc4_dma
= hwif
->INL(dma_base
+ IOC4_DMA_CTRL
* 4);
215 /* Stops the IOC4 DMA Engine */
217 sgiioc4_ide_dma_end(ide_drive_t
* drive
)
219 u32 ioc4_dma
, bc_dev
, bc_mem
, num
, valid
= 0, cnt
= 0;
220 ide_hwif_t
*hwif
= HWIF(drive
);
221 u64 dma_base
= hwif
->dma_base
;
223 unsigned long *ending_dma
= (unsigned long *) hwif
->dma_base2
;
225 hwif
->OUTL(IOC4_S_DMA_STOP
, dma_base
+ IOC4_DMA_CTRL
* 4);
227 ioc4_dma
= sgiioc4_ide_dma_stop(hwif
, dma_base
);
229 if (ioc4_dma
& IOC4_S_DMA_STOP
) {
231 "%s(%s): IOC4 DMA STOP bit is still 1 :"
232 "ioc4_dma_reg 0x%x\n",
233 __FUNCTION__
, drive
->name
, ioc4_dma
);
238 * The IOC4 will DMA 1's to the ending dma area to indicate that
239 * previous data DMA is complete. This is necessary because of relaxed
240 * ordering between register reads and DMA writes on the Altix.
242 while ((cnt
++ < 200) && (!valid
)) {
243 for (num
= 0; num
< 16; num
++) {
244 if (ending_dma
[num
]) {
252 printk(KERN_ERR
"%s(%s) : DMA incomplete\n", __FUNCTION__
,
257 bc_dev
= hwif
->INL(dma_base
+ IOC4_BC_DEV
* 4);
258 bc_mem
= hwif
->INL(dma_base
+ IOC4_BC_MEM
* 4);
260 if ((bc_dev
& 0x01FF) || (bc_mem
& 0x1FF)) {
261 if (bc_dev
> bc_mem
+ 8) {
263 "%s(%s): WARNING!! byte_count_dev %d "
264 "!= byte_count_mem %d\n",
265 __FUNCTION__
, drive
->name
, bc_dev
, bc_mem
);
269 drive
->waiting_for_dma
= 0;
270 ide_destroy_dmatable(drive
);
276 sgiioc4_ide_dma_check(ide_drive_t
* drive
)
278 if (ide_config_drive_speed(drive
, XFER_MW_DMA_2
) != 0) {
280 "Couldnot set %s in Multimode-2 DMA mode | "
281 "Drive %s using PIO instead\n",
282 drive
->name
, drive
->name
);
283 drive
->using_dma
= 0;
285 drive
->using_dma
= 1;
291 sgiioc4_ide_dma_on(ide_drive_t
* drive
)
293 drive
->using_dma
= 1;
295 return HWIF(drive
)->ide_dma_host_on(drive
);
299 sgiioc4_ide_dma_off_quietly(ide_drive_t
* drive
)
301 drive
->using_dma
= 0;
303 return HWIF(drive
)->ide_dma_host_off(drive
);
306 /* returns 1 if dma irq issued, 0 otherwise */
308 sgiioc4_ide_dma_test_irq(ide_drive_t
* drive
)
310 return sgiioc4_checkirq(HWIF(drive
));
314 sgiioc4_ide_dma_host_on(ide_drive_t
* drive
)
316 if (drive
->using_dma
)
323 sgiioc4_ide_dma_host_off(ide_drive_t
* drive
)
325 sgiioc4_clearirq(drive
);
331 sgiioc4_ide_dma_lostirq(ide_drive_t
* drive
)
333 HWIF(drive
)->resetproc(drive
);
335 return __ide_dma_lostirq(drive
);
339 sgiioc4_resetproc(ide_drive_t
* drive
)
341 sgiioc4_ide_dma_end(drive
);
342 sgiioc4_clearirq(drive
);
346 sgiioc4_INB(unsigned long port
)
348 u8 reg
= (u8
) inb(port
);
350 if ((port
& 0xFFF) == 0x11C) { /* Status register of IOC4 */
351 if (reg
& 0x51) { /* Not busy...check for interrupt */
352 unsigned long other_ir
= port
- 0x110;
353 unsigned int intr_reg
= (u32
) inl(other_ir
);
355 /* Clear the Interrupt, Error bits on the IOC4 */
356 if (intr_reg
& 0x03) {
357 outl(0x03, other_ir
);
358 intr_reg
= (u32
) inl(other_ir
);
366 /* Creates a dma map for the scatter-gather list entries */
367 static void __devinit
368 ide_dma_sgiioc4(ide_hwif_t
* hwif
, unsigned long dma_base
)
370 int num_ports
= sizeof (ioc4_dma_regs_t
);
372 printk(KERN_INFO
"%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif
->name
,
373 dma_base
, dma_base
+ num_ports
- 1);
375 if (!request_region(dma_base
, num_ports
, hwif
->name
)) {
377 "%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
379 __FUNCTION__
, hwif
->name
, (void *) dma_base
,
380 (void *) dma_base
+ num_ports
- 1);
381 goto dma_alloc_failure
;
384 hwif
->dma_base
= dma_base
;
385 hwif
->dmatable_cpu
= pci_alloc_consistent(hwif
->pci_dev
,
386 IOC4_PRD_ENTRIES
* IOC4_PRD_BYTES
,
387 &hwif
->dmatable_dma
);
389 if (!hwif
->dmatable_cpu
)
390 goto dma_alloc_failure
;
392 hwif
->sg_max_nents
= IOC4_PRD_ENTRIES
;
394 hwif
->dma_base2
= (unsigned long)
395 pci_alloc_consistent(hwif
->pci_dev
,
396 IOC4_IDE_CACHELINE_SIZE
,
397 (dma_addr_t
*) &(hwif
->dma_status
));
399 if (!hwif
->dma_base2
)
400 goto dma_base2alloc_failure
;
404 dma_base2alloc_failure
:
405 pci_free_consistent(hwif
->pci_dev
,
406 IOC4_PRD_ENTRIES
* IOC4_PRD_BYTES
,
407 hwif
->dmatable_cpu
, hwif
->dmatable_dma
);
409 "%s() -- Error! Unable to allocate DMA Maps for drive %s\n",
410 __FUNCTION__
, hwif
->name
);
412 "Changing from DMA to PIO mode for Drive %s\n", hwif
->name
);
415 /* Disable DMA because we couldnot allocate any DMA maps */
420 /* Initializes the IOC4 DMA Engine */
422 sgiioc4_configure_for_dma(int dma_direction
, ide_drive_t
* drive
)
425 ide_hwif_t
*hwif
= HWIF(drive
);
426 u64 dma_base
= hwif
->dma_base
;
427 u32 dma_addr
, ending_dma_addr
;
429 ioc4_dma
= hwif
->INL(dma_base
+ IOC4_DMA_CTRL
* 4);
431 if (ioc4_dma
& IOC4_S_DMA_ACTIVE
) {
433 "%s(%s):Warning!! DMA from previous transfer was still active\n",
434 __FUNCTION__
, drive
->name
);
435 hwif
->OUTL(IOC4_S_DMA_STOP
, dma_base
+ IOC4_DMA_CTRL
* 4);
436 ioc4_dma
= sgiioc4_ide_dma_stop(hwif
, dma_base
);
438 if (ioc4_dma
& IOC4_S_DMA_STOP
)
440 "%s(%s) : IOC4 Dma STOP bit is still 1\n",
441 __FUNCTION__
, drive
->name
);
444 ioc4_dma
= hwif
->INL(dma_base
+ IOC4_DMA_CTRL
* 4);
445 if (ioc4_dma
& IOC4_S_DMA_ERROR
) {
447 "%s(%s) : Warning!! - DMA Error during Previous"
448 " transfer | status 0x%x\n",
449 __FUNCTION__
, drive
->name
, ioc4_dma
);
450 hwif
->OUTL(IOC4_S_DMA_STOP
, dma_base
+ IOC4_DMA_CTRL
* 4);
451 ioc4_dma
= sgiioc4_ide_dma_stop(hwif
, dma_base
);
453 if (ioc4_dma
& IOC4_S_DMA_STOP
)
455 "%s(%s) : IOC4 DMA STOP bit is still 1\n",
456 __FUNCTION__
, drive
->name
);
459 /* Address of the Scatter Gather List */
460 dma_addr
= cpu_to_le32(hwif
->dmatable_dma
);
461 hwif
->OUTL(dma_addr
, dma_base
+ IOC4_DMA_PTR_L
* 4);
463 /* Address of the Ending DMA */
464 memset((unsigned int *) hwif
->dma_base2
, 0, IOC4_IDE_CACHELINE_SIZE
);
465 ending_dma_addr
= cpu_to_le32(hwif
->dma_status
);
466 hwif
->OUTL(ending_dma_addr
, dma_base
+ IOC4_DMA_END_ADDR
* 4);
468 hwif
->OUTL(dma_direction
, dma_base
+ IOC4_DMA_CTRL
* 4);
469 drive
->waiting_for_dma
= 1;
472 /* IOC4 Scatter Gather list Format */
473 /* 128 Bit entries to support 64 bit addresses in the future */
474 /* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */
475 /* --------------------------------------------------------------------- */
476 /* | Upper 32 bits - Zero | Lower 32 bits- address | */
477 /* --------------------------------------------------------------------- */
478 /* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */
479 /* --------------------------------------------------------------------- */
480 /* Creates the scatter gather list, DMA Table */
482 sgiioc4_build_dma_table(ide_drive_t
* drive
, struct request
*rq
, int ddir
)
484 ide_hwif_t
*hwif
= HWIF(drive
);
485 unsigned int *table
= hwif
->dmatable_cpu
;
486 unsigned int count
= 0, i
= 1;
487 struct scatterlist
*sg
;
489 hwif
->sg_nents
= i
= ide_build_sglist(drive
, rq
);
492 return 0; /* sglist of length Zero */
495 while (i
&& sg_dma_len(sg
)) {
498 cur_addr
= sg_dma_address(sg
);
499 cur_len
= sg_dma_len(sg
);
502 if (count
++ >= IOC4_PRD_ENTRIES
) {
504 "%s: DMA table too small\n",
506 goto use_pio_instead
;
509 0x10000 - (cur_addr
& 0xffff);
511 if (bcount
> cur_len
)
514 /* put the addr, length in
515 * the IOC4 dma-table format */
518 *table
= cpu_to_be32(cur_addr
);
523 *table
= cpu_to_be32(bcount
);
537 *table
|= cpu_to_be32(0x80000000);
542 pci_unmap_sg(hwif
->pci_dev
, hwif
->sg_table
, hwif
->sg_nents
,
543 hwif
->sg_dma_direction
);
545 return 0; /* revert to PIO for this request */
548 static int sgiioc4_ide_dma_setup(ide_drive_t
*drive
)
550 struct request
*rq
= HWGROUP(drive
)->rq
;
551 unsigned int count
= 0;
555 ddir
= PCI_DMA_TODEVICE
;
557 ddir
= PCI_DMA_FROMDEVICE
;
559 if (!(count
= sgiioc4_build_dma_table(drive
, rq
, ddir
))) {
560 /* try PIO instead of DMA */
561 ide_map_sg(drive
, rq
);
566 /* Writes TO the IOC4 FROM Main Memory */
567 ddir
= IOC4_DMA_READ
;
569 /* Writes FROM the IOC4 TO Main Memory */
570 ddir
= IOC4_DMA_WRITE
;
572 sgiioc4_configure_for_dma(ddir
, drive
);
577 static void __devinit
578 ide_init_sgiioc4(ide_hwif_t
* hwif
)
583 hwif
->ultra_mask
= 0x0; /* Disable Ultra DMA */
584 hwif
->mwdma_mask
= 0x2; /* Multimode-2 DMA */
585 hwif
->swdma_mask
= 0x2;
586 hwif
->tuneproc
= NULL
; /* Sets timing for PIO mode */
587 hwif
->speedproc
= NULL
; /* Sets timing for DMA &/or PIO modes */
588 hwif
->selectproc
= NULL
;/* Use the default routine to select drive */
589 hwif
->reset_poll
= NULL
;/* No HBA specific reset_poll needed */
590 hwif
->pre_reset
= NULL
; /* No HBA specific pre_set needed */
591 hwif
->resetproc
= &sgiioc4_resetproc
;/* Reset DMA engine,
593 hwif
->intrproc
= NULL
; /* Enable or Disable interrupt from drive */
594 hwif
->maskproc
= &sgiioc4_maskproc
; /* Mask on/off NIEN register */
595 hwif
->quirkproc
= NULL
;
596 hwif
->busproc
= NULL
;
598 hwif
->dma_setup
= &sgiioc4_ide_dma_setup
;
599 hwif
->dma_start
= &sgiioc4_ide_dma_start
;
600 hwif
->ide_dma_end
= &sgiioc4_ide_dma_end
;
601 hwif
->ide_dma_check
= &sgiioc4_ide_dma_check
;
602 hwif
->ide_dma_on
= &sgiioc4_ide_dma_on
;
603 hwif
->ide_dma_off_quietly
= &sgiioc4_ide_dma_off_quietly
;
604 hwif
->ide_dma_test_irq
= &sgiioc4_ide_dma_test_irq
;
605 hwif
->ide_dma_host_on
= &sgiioc4_ide_dma_host_on
;
606 hwif
->ide_dma_host_off
= &sgiioc4_ide_dma_host_off
;
607 hwif
->ide_dma_lostirq
= &sgiioc4_ide_dma_lostirq
;
608 hwif
->ide_dma_timeout
= &__ide_dma_timeout
;
609 hwif
->INB
= &sgiioc4_INB
;
613 sgiioc4_ide_setup_pci_device(struct pci_dev
*dev
, ide_pci_device_t
* d
)
615 unsigned long base
, ctl
, dma_base
, irqport
;
620 * Find an empty HWIF; if none available, return -ENOMEM.
622 for (h
= 0; h
< MAX_HWIFS
; ++h
) {
623 hwif
= &ide_hwifs
[h
];
624 if (hwif
->chipset
== ide_unknown
)
627 if (h
== MAX_HWIFS
) {
628 printk(KERN_ERR
"%s: too many IDE interfaces, no room in table\n", d
->name
);
632 /* Get the CmdBlk and CtrlBlk Base Registers */
633 base
= pci_resource_start(dev
, 0) + IOC4_CMD_OFFSET
;
634 ctl
= pci_resource_start(dev
, 0) + IOC4_CTRL_OFFSET
;
635 irqport
= pci_resource_start(dev
, 0) + IOC4_INTR_OFFSET
;
636 dma_base
= pci_resource_start(dev
, 0) + IOC4_DMA_OFFSET
;
638 if (!request_region(base
, IOC4_CMD_CTL_BLK_SIZE
, hwif
->name
)) {
640 "%s : %s -- ERROR, Port Addresses "
641 "0x%p to 0x%p ALREADY in use\n",
642 __FUNCTION__
, hwif
->name
, (void *) base
,
643 (void *) base
+ IOC4_CMD_CTL_BLK_SIZE
);
647 if (hwif
->io_ports
[IDE_DATA_OFFSET
] != base
) {
648 /* Initialize the IO registers */
649 sgiioc4_init_hwif_ports(&hwif
->hw
, base
, ctl
, irqport
);
650 memcpy(hwif
->io_ports
, hwif
->hw
.io_ports
,
651 sizeof (hwif
->io_ports
));
652 hwif
->noprobe
= !hwif
->io_ports
[IDE_DATA_OFFSET
];
655 hwif
->irq
= dev
->irq
;
656 hwif
->chipset
= ide_pci
;
658 hwif
->channel
= 0; /* Single Channel chip */
659 hwif
->cds
= (struct ide_pci_device_s
*) d
;
660 hwif
->gendev
.parent
= &dev
->dev
;/* setup proper ancestral information */
662 /* Initializing chipset IRQ Registers */
663 hwif
->OUTL(0x03, irqport
+ IOC4_INTR_SET
* 4);
665 ide_init_sgiioc4(hwif
);
668 ide_dma_sgiioc4(hwif
, dma_base
);
670 printk(KERN_INFO
"%s: %s Bus-Master DMA disabled\n",
671 hwif
->name
, d
->name
);
673 if (probe_hwif_init(hwif
))
676 /* Create /proc/ide entries */
677 create_proc_ide_interfaces();
682 static unsigned int __devinit
683 pci_init_sgiioc4(struct pci_dev
*dev
, ide_pci_device_t
* d
)
685 unsigned int class_rev
;
688 pci_read_config_dword(dev
, PCI_CLASS_REVISION
, &class_rev
);
690 printk(KERN_INFO
"%s: IDE controller at PCI slot %s, revision %d\n",
691 d
->name
, pci_name(dev
), class_rev
);
692 if (class_rev
< IOC4_SUPPORTED_FIRMWARE_REV
) {
693 printk(KERN_ERR
"Skipping %s IDE controller in slot %s: "
694 "firmware is obsolete - please upgrade to revision"
695 "46 or higher\n", d
->name
, pci_name(dev
));
699 ret
= sgiioc4_ide_setup_pci_device(dev
, d
);
704 static ide_pci_device_t sgiioc4_chipsets
[] __devinitdata
= {
708 .init_hwif
= ide_init_sgiioc4
,
709 .init_dma
= ide_dma_sgiioc4
,
712 /* SGI IOC4 doesn't have enablebits. */
713 .bootable
= ON_BOARD
,
718 ioc4_ide_attach_one(struct ioc4_driver_data
*idd
)
720 return pci_init_sgiioc4(idd
->idd_pdev
,
721 &sgiioc4_chipsets
[idd
->idd_pci_id
->driver_data
]);
724 static struct ioc4_submodule ioc4_ide_submodule
= {
725 .is_name
= "IOC4_ide",
726 .is_owner
= THIS_MODULE
,
727 .is_probe
= ioc4_ide_attach_one
,
728 /* .is_remove = ioc4_ide_remove_one, */
734 return ioc4_register_submodule(&ioc4_ide_submodule
);
737 static void __devexit
740 ioc4_unregister_submodule(&ioc4_ide_submodule
);
743 module_init(ioc4_ide_init
);
744 module_exit(ioc4_ide_exit
);
746 MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)");
747 MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card");
748 MODULE_LICENSE("GPL");