2 * Support for IDE interfaces on Celleb platform
4 * (C) Copyright 2006 TOSHIBA CORPORATION
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/delay.h>
29 #include <linux/ide.h>
30 #include <linux/init.h>
32 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
34 #define SCC_PATA_NAME "scc IDE"
36 #define TDVHSEL_MASTER 0x00000001
37 #define TDVHSEL_SLAVE 0x00000004
39 #define MODE_JCUSFEN 0x00000080
41 #define CCKCTRL_ATARESET 0x00040000
42 #define CCKCTRL_BUFCNT 0x00020000
43 #define CCKCTRL_CRST 0x00010000
44 #define CCKCTRL_OCLKEN 0x00000100
45 #define CCKCTRL_ATACLKOEN 0x00000002
46 #define CCKCTRL_LCLKEN 0x00000001
48 #define QCHCD_IOS_SS 0x00000001
50 #define QCHSD_STPDIAG 0x00020000
52 #define INTMASK_MSK 0xD1000012
53 #define INTSTS_SERROR 0x80000000
54 #define INTSTS_PRERR 0x40000000
55 #define INTSTS_RERR 0x10000000
56 #define INTSTS_ICERR 0x01000000
57 #define INTSTS_BMSINT 0x00000010
58 #define INTSTS_BMHE 0x00000008
59 #define INTSTS_IOIRQS 0x00000004
60 #define INTSTS_INTRQ 0x00000002
61 #define INTSTS_ACTEINT 0x00000001
63 #define ECMODE_VALUE 0x01
65 static struct scc_ports
{
66 unsigned long ctl
, dma
;
67 struct ide_host
*host
; /* for removing port from system */
68 } scc_ports
[MAX_HWIFS
];
70 /* PIO transfer mode table */
72 static unsigned long JCHSTtbl
[2][7] = {
73 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
74 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
78 static unsigned long JCHHTtbl
[2][7] = {
79 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
80 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
84 static unsigned long JCHCTtbl
[2][7] = {
85 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
86 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
90 /* DMA transfer mode table */
92 static unsigned long JCHDCTxtbl
[2][7] = {
93 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
94 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
98 static unsigned long JCSTWTxtbl
[2][7] = {
99 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
100 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
104 static unsigned long JCTSStbl
[2][7] = {
105 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
110 static unsigned long JCENVTtbl
[2][7] = {
111 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
112 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
115 /* JCACTSELS/JCACTSELM */
116 static unsigned long JCACTSELtbl
[2][7] = {
117 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
118 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
122 static u8
scc_ide_inb(unsigned long port
)
124 u32 data
= in_be32((void*)port
);
128 static void scc_exec_command(ide_hwif_t
*hwif
, u8 cmd
)
130 out_be32((void *)hwif
->io_ports
.command_addr
, cmd
);
132 in_be32((void *)(hwif
->dma_base
+ 0x01c));
136 static u8
scc_read_status(ide_hwif_t
*hwif
)
138 return (u8
)in_be32((void *)hwif
->io_ports
.status_addr
);
141 static u8
scc_read_altstatus(ide_hwif_t
*hwif
)
143 return (u8
)in_be32((void *)hwif
->io_ports
.ctl_addr
);
146 static u8
scc_read_sff_dma_status(ide_hwif_t
*hwif
)
148 return (u8
)in_be32((void *)(hwif
->dma_base
+ 4));
151 static void scc_set_irq(ide_hwif_t
*hwif
, int on
)
153 u8 ctl
= ATA_DEVCTL_OBS
;
155 if (on
== 4) { /* hack for SRST */
162 out_be32((void *)hwif
->io_ports
.ctl_addr
, ctl
);
164 in_be32((void *)(hwif
->dma_base
+ 0x01c));
168 static void scc_ide_insw(unsigned long port
, void *addr
, u32 count
)
170 u16
*ptr
= (u16
*)addr
;
172 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
176 static void scc_ide_insl(unsigned long port
, void *addr
, u32 count
)
178 u16
*ptr
= (u16
*)addr
;
180 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
181 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
185 static void scc_ide_outb(u8 addr
, unsigned long port
)
187 out_be32((void*)port
, addr
);
191 scc_ide_outsw(unsigned long port
, void *addr
, u32 count
)
193 u16
*ptr
= (u16
*)addr
;
195 out_be32((void*)port
, cpu_to_le16(*ptr
++));
200 scc_ide_outsl(unsigned long port
, void *addr
, u32 count
)
202 u16
*ptr
= (u16
*)addr
;
204 out_be32((void*)port
, cpu_to_le16(*ptr
++));
205 out_be32((void*)port
, cpu_to_le16(*ptr
++));
210 * scc_set_pio_mode - set host controller for PIO mode
212 * @pio: PIO mode number
214 * Load the timing settings for this device mode into the
218 static void scc_set_pio_mode(ide_drive_t
*drive
, const u8 pio
)
220 ide_hwif_t
*hwif
= HWIF(drive
);
221 struct scc_ports
*ports
= ide_get_hwifdata(hwif
);
222 unsigned long ctl_base
= ports
->ctl
;
223 unsigned long cckctrl_port
= ctl_base
+ 0xff0;
224 unsigned long piosht_port
= ctl_base
+ 0x000;
225 unsigned long pioct_port
= ctl_base
+ 0x004;
229 reg
= in_be32((void __iomem
*)cckctrl_port
);
230 if (reg
& CCKCTRL_ATACLKOEN
) {
231 offset
= 1; /* 133MHz */
233 offset
= 0; /* 100MHz */
235 reg
= JCHSTtbl
[offset
][pio
] << 16 | JCHHTtbl
[offset
][pio
];
236 out_be32((void __iomem
*)piosht_port
, reg
);
237 reg
= JCHCTtbl
[offset
][pio
];
238 out_be32((void __iomem
*)pioct_port
, reg
);
242 * scc_set_dma_mode - set host controller for DMA mode
246 * Load the timing settings for this device mode into the
250 static void scc_set_dma_mode(ide_drive_t
*drive
, const u8 speed
)
252 ide_hwif_t
*hwif
= HWIF(drive
);
253 struct scc_ports
*ports
= ide_get_hwifdata(hwif
);
254 unsigned long ctl_base
= ports
->ctl
;
255 unsigned long cckctrl_port
= ctl_base
+ 0xff0;
256 unsigned long mdmact_port
= ctl_base
+ 0x008;
257 unsigned long mcrcst_port
= ctl_base
+ 0x00c;
258 unsigned long sdmact_port
= ctl_base
+ 0x010;
259 unsigned long scrcst_port
= ctl_base
+ 0x014;
260 unsigned long udenvt_port
= ctl_base
+ 0x018;
261 unsigned long tdvhsel_port
= ctl_base
+ 0x020;
262 int is_slave
= (&hwif
->drives
[1] == drive
);
265 unsigned long jcactsel
;
267 reg
= in_be32((void __iomem
*)cckctrl_port
);
268 if (reg
& CCKCTRL_ATACLKOEN
) {
269 offset
= 1; /* 133MHz */
271 offset
= 0; /* 100MHz */
274 idx
= speed
- XFER_UDMA_0
;
276 jcactsel
= JCACTSELtbl
[offset
][idx
];
278 out_be32((void __iomem
*)sdmact_port
, JCHDCTxtbl
[offset
][idx
]);
279 out_be32((void __iomem
*)scrcst_port
, JCSTWTxtbl
[offset
][idx
]);
280 jcactsel
= jcactsel
<< 2;
281 out_be32((void __iomem
*)tdvhsel_port
, (in_be32((void __iomem
*)tdvhsel_port
) & ~TDVHSEL_SLAVE
) | jcactsel
);
283 out_be32((void __iomem
*)mdmact_port
, JCHDCTxtbl
[offset
][idx
]);
284 out_be32((void __iomem
*)mcrcst_port
, JCSTWTxtbl
[offset
][idx
]);
285 out_be32((void __iomem
*)tdvhsel_port
, (in_be32((void __iomem
*)tdvhsel_port
) & ~TDVHSEL_MASTER
) | jcactsel
);
287 reg
= JCTSStbl
[offset
][idx
] << 16 | JCENVTtbl
[offset
][idx
];
288 out_be32((void __iomem
*)udenvt_port
, reg
);
291 static void scc_dma_host_set(ide_drive_t
*drive
, int on
)
293 ide_hwif_t
*hwif
= drive
->hwif
;
294 u8 unit
= drive
->dn
& 1;
295 u8 dma_stat
= scc_ide_inb(hwif
->dma_base
+ 4);
298 dma_stat
|= (1 << (5 + unit
));
300 dma_stat
&= ~(1 << (5 + unit
));
302 scc_ide_outb(dma_stat
, hwif
->dma_base
+ 4);
306 * scc_ide_dma_setup - begin a DMA phase
307 * @drive: target device
309 * Build an IDE DMA PRD (IDE speak for scatter gather table)
310 * and then set up the DMA transfer registers.
312 * Returns 0 on success. If a PIO fallback is required then 1
316 static int scc_dma_setup(ide_drive_t
*drive
)
318 ide_hwif_t
*hwif
= drive
->hwif
;
319 struct request
*rq
= HWGROUP(drive
)->rq
;
320 unsigned int reading
;
328 /* fall back to pio! */
329 if (!ide_build_dmatable(drive
, rq
)) {
330 ide_map_sg(drive
, rq
);
335 out_be32((void __iomem
*)(hwif
->dma_base
+ 8), hwif
->dmatable_dma
);
338 out_be32((void __iomem
*)hwif
->dma_base
, reading
);
340 /* read DMA status for INTR & ERROR flags */
341 dma_stat
= in_be32((void __iomem
*)(hwif
->dma_base
+ 4));
343 /* clear INTR & ERROR flags */
344 out_be32((void __iomem
*)(hwif
->dma_base
+ 4), dma_stat
| 6);
345 drive
->waiting_for_dma
= 1;
349 static void scc_dma_start(ide_drive_t
*drive
)
351 ide_hwif_t
*hwif
= drive
->hwif
;
352 u8 dma_cmd
= scc_ide_inb(hwif
->dma_base
);
355 scc_ide_outb(dma_cmd
| 1, hwif
->dma_base
);
359 static int __scc_dma_end(ide_drive_t
*drive
)
361 ide_hwif_t
*hwif
= drive
->hwif
;
362 u8 dma_stat
, dma_cmd
;
364 drive
->waiting_for_dma
= 0;
365 /* get DMA command mode */
366 dma_cmd
= scc_ide_inb(hwif
->dma_base
);
368 scc_ide_outb(dma_cmd
& ~1, hwif
->dma_base
);
370 dma_stat
= scc_ide_inb(hwif
->dma_base
+ 4);
371 /* clear the INTR & ERROR bits */
372 scc_ide_outb(dma_stat
| 6, hwif
->dma_base
+ 4);
373 /* purge DMA mappings */
374 ide_destroy_dmatable(drive
);
375 /* verify good DMA status */
377 return (dma_stat
& 7) != 4 ? (0x10 | dma_stat
) : 0;
381 * scc_dma_end - Stop DMA
384 * Check and clear INT Status register.
385 * Then call __scc_dma_end().
388 static int scc_dma_end(ide_drive_t
*drive
)
390 ide_hwif_t
*hwif
= HWIF(drive
);
391 void __iomem
*dma_base
= (void __iomem
*)hwif
->dma_base
;
392 unsigned long intsts_port
= hwif
->dma_base
+ 0x014;
394 int dma_stat
, data_loss
= 0;
395 static int retry
= 0;
397 /* errata A308 workaround: Step5 (check data loss) */
398 /* We don't check non ide_disk because it is limited to UDMA4 */
399 if (!(in_be32((void __iomem
*)hwif
->io_ports
.ctl_addr
)
401 drive
->media
== ide_disk
&& drive
->current_speed
> XFER_UDMA_4
) {
402 reg
= in_be32((void __iomem
*)intsts_port
);
403 if (!(reg
& INTSTS_ACTEINT
)) {
404 printk(KERN_WARNING
"%s: operation failed (transfer data loss)\n",
408 struct request
*rq
= HWGROUP(drive
)->rq
;
410 /* ERROR_RESET and drive->crc_count are needed
411 * to reduce DMA transfer mode in retry process.
414 rq
->errors
|= ERROR_RESET
;
415 for (unit
= 0; unit
< MAX_DRIVES
; unit
++) {
416 ide_drive_t
*drive
= &hwif
->drives
[unit
];
424 reg
= in_be32((void __iomem
*)intsts_port
);
426 if (reg
& INTSTS_SERROR
) {
427 printk(KERN_WARNING
"%s: SERROR\n", SCC_PATA_NAME
);
428 out_be32((void __iomem
*)intsts_port
, INTSTS_SERROR
|INTSTS_BMSINT
);
430 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
434 if (reg
& INTSTS_PRERR
) {
436 unsigned long ctl_base
= hwif
->config_data
;
438 maea0
= in_be32((void __iomem
*)(ctl_base
+ 0xF50));
439 maec0
= in_be32((void __iomem
*)(ctl_base
+ 0xF54));
441 printk(KERN_WARNING
"%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME
, maea0
, maec0
);
443 out_be32((void __iomem
*)intsts_port
, INTSTS_PRERR
|INTSTS_BMSINT
);
445 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
449 if (reg
& INTSTS_RERR
) {
450 printk(KERN_WARNING
"%s: Response Error\n", SCC_PATA_NAME
);
451 out_be32((void __iomem
*)intsts_port
, INTSTS_RERR
|INTSTS_BMSINT
);
453 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
457 if (reg
& INTSTS_ICERR
) {
458 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
460 printk(KERN_WARNING
"%s: Illegal Configuration\n", SCC_PATA_NAME
);
461 out_be32((void __iomem
*)intsts_port
, INTSTS_ICERR
|INTSTS_BMSINT
);
465 if (reg
& INTSTS_BMSINT
) {
466 printk(KERN_WARNING
"%s: Internal Bus Error\n", SCC_PATA_NAME
);
467 out_be32((void __iomem
*)intsts_port
, INTSTS_BMSINT
);
473 if (reg
& INTSTS_BMHE
) {
474 out_be32((void __iomem
*)intsts_port
, INTSTS_BMHE
);
478 if (reg
& INTSTS_ACTEINT
) {
479 out_be32((void __iomem
*)intsts_port
, INTSTS_ACTEINT
);
483 if (reg
& INTSTS_IOIRQS
) {
484 out_be32((void __iomem
*)intsts_port
, INTSTS_IOIRQS
);
490 dma_stat
= __scc_dma_end(drive
);
492 dma_stat
|= 2; /* emulate DMA error (to retry command) */
496 /* returns 1 if dma irq issued, 0 otherwise */
497 static int scc_dma_test_irq(ide_drive_t
*drive
)
499 ide_hwif_t
*hwif
= HWIF(drive
);
500 u32 int_stat
= in_be32((void __iomem
*)hwif
->dma_base
+ 0x014);
502 /* SCC errata A252,A308 workaround: Step4 */
503 if ((in_be32((void __iomem
*)hwif
->io_ports
.ctl_addr
)
505 (int_stat
& INTSTS_INTRQ
))
508 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
509 if (int_stat
& INTSTS_IOIRQS
)
515 static u8
scc_udma_filter(ide_drive_t
*drive
)
517 ide_hwif_t
*hwif
= drive
->hwif
;
518 u8 mask
= hwif
->ultra_mask
;
520 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
521 if ((drive
->media
!= ide_disk
) && (mask
& 0xE0)) {
522 printk(KERN_INFO
"%s: limit %s to UDMA4\n",
523 SCC_PATA_NAME
, drive
->name
);
531 * setup_mmio_scc - map CTRL/BMID region
532 * @dev: PCI device we are configuring
537 static int setup_mmio_scc (struct pci_dev
*dev
, const char *name
)
539 void __iomem
*ctl_addr
;
540 void __iomem
*dma_addr
;
543 for (i
= 0; i
< MAX_HWIFS
; i
++) {
544 if (scc_ports
[i
].ctl
== 0)
550 ret
= pci_request_selected_regions(dev
, (1 << 2) - 1, name
);
552 printk(KERN_ERR
"%s: can't reserve resources\n", name
);
556 ctl_addr
= pci_ioremap_bar(dev
, 0);
560 dma_addr
= pci_ioremap_bar(dev
, 1);
565 scc_ports
[i
].ctl
= (unsigned long)ctl_addr
;
566 scc_ports
[i
].dma
= (unsigned long)dma_addr
;
567 pci_set_drvdata(dev
, (void *) &scc_ports
[i
]);
577 static int scc_ide_setup_pci_device(struct pci_dev
*dev
,
578 const struct ide_port_info
*d
)
580 struct scc_ports
*ports
= pci_get_drvdata(dev
);
581 struct ide_host
*host
;
582 hw_regs_t hw
, *hws
[] = { &hw
, NULL
, NULL
, NULL
};
585 memset(&hw
, 0, sizeof(hw
));
586 for (i
= 0; i
<= 8; i
++)
587 hw
.io_ports_array
[i
] = ports
->dma
+ 0x20 + i
* 4;
590 hw
.chipset
= ide_pci
;
592 rc
= ide_host_add(d
, hws
, &host
);
602 * init_setup_scc - set up an SCC PATA Controller
606 * Perform the initial set up for this device.
609 static int __devinit
init_setup_scc(struct pci_dev
*dev
,
610 const struct ide_port_info
*d
)
612 unsigned long ctl_base
;
613 unsigned long dma_base
;
614 unsigned long cckctrl_port
;
615 unsigned long intmask_port
;
616 unsigned long mode_port
;
617 unsigned long ecmode_port
;
619 struct scc_ports
*ports
;
622 rc
= pci_enable_device(dev
);
626 rc
= setup_mmio_scc(dev
, d
->name
);
630 ports
= pci_get_drvdata(dev
);
631 ctl_base
= ports
->ctl
;
632 dma_base
= ports
->dma
;
633 cckctrl_port
= ctl_base
+ 0xff0;
634 intmask_port
= dma_base
+ 0x010;
635 mode_port
= ctl_base
+ 0x024;
636 ecmode_port
= ctl_base
+ 0xf00;
638 /* controller initialization */
640 out_be32((void*)cckctrl_port
, reg
);
641 reg
|= CCKCTRL_ATACLKOEN
;
642 out_be32((void*)cckctrl_port
, reg
);
643 reg
|= CCKCTRL_LCLKEN
| CCKCTRL_OCLKEN
;
644 out_be32((void*)cckctrl_port
, reg
);
646 out_be32((void*)cckctrl_port
, reg
);
649 reg
= in_be32((void*)cckctrl_port
);
650 if (reg
& CCKCTRL_CRST
)
655 reg
|= CCKCTRL_ATARESET
;
656 out_be32((void*)cckctrl_port
, reg
);
658 out_be32((void*)ecmode_port
, ECMODE_VALUE
);
659 out_be32((void*)mode_port
, MODE_JCUSFEN
);
660 out_be32((void*)intmask_port
, INTMASK_MSK
);
662 rc
= scc_ide_setup_pci_device(dev
, d
);
668 static void scc_tf_load(ide_drive_t
*drive
, ide_task_t
*task
)
670 struct ide_io_ports
*io_ports
= &drive
->hwif
->io_ports
;
671 struct ide_taskfile
*tf
= &task
->tf
;
672 u8 HIHI
= (task
->tf_flags
& IDE_TFLAG_LBA48
) ? 0xE0 : 0xEF;
674 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
677 if (task
->tf_flags
& IDE_TFLAG_OUT_DATA
)
678 out_be32((void *)io_ports
->data_addr
,
679 (tf
->hob_data
<< 8) | tf
->data
);
681 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_FEATURE
)
682 scc_ide_outb(tf
->hob_feature
, io_ports
->feature_addr
);
683 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_NSECT
)
684 scc_ide_outb(tf
->hob_nsect
, io_ports
->nsect_addr
);
685 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAL
)
686 scc_ide_outb(tf
->hob_lbal
, io_ports
->lbal_addr
);
687 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAM
)
688 scc_ide_outb(tf
->hob_lbam
, io_ports
->lbam_addr
);
689 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAH
)
690 scc_ide_outb(tf
->hob_lbah
, io_ports
->lbah_addr
);
692 if (task
->tf_flags
& IDE_TFLAG_OUT_FEATURE
)
693 scc_ide_outb(tf
->feature
, io_ports
->feature_addr
);
694 if (task
->tf_flags
& IDE_TFLAG_OUT_NSECT
)
695 scc_ide_outb(tf
->nsect
, io_ports
->nsect_addr
);
696 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAL
)
697 scc_ide_outb(tf
->lbal
, io_ports
->lbal_addr
);
698 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAM
)
699 scc_ide_outb(tf
->lbam
, io_ports
->lbam_addr
);
700 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAH
)
701 scc_ide_outb(tf
->lbah
, io_ports
->lbah_addr
);
703 if (task
->tf_flags
& IDE_TFLAG_OUT_DEVICE
)
704 scc_ide_outb((tf
->device
& HIHI
) | drive
->select
,
705 io_ports
->device_addr
);
708 static void scc_tf_read(ide_drive_t
*drive
, ide_task_t
*task
)
710 struct ide_io_ports
*io_ports
= &drive
->hwif
->io_ports
;
711 struct ide_taskfile
*tf
= &task
->tf
;
713 if (task
->tf_flags
& IDE_TFLAG_IN_DATA
) {
714 u16 data
= (u16
)in_be32((void *)io_ports
->data_addr
);
716 tf
->data
= data
& 0xff;
717 tf
->hob_data
= (data
>> 8) & 0xff;
720 /* be sure we're looking at the low order bits */
721 scc_ide_outb(ATA_DEVCTL_OBS
& ~0x80, io_ports
->ctl_addr
);
723 if (task
->tf_flags
& IDE_TFLAG_IN_FEATURE
)
724 tf
->feature
= scc_ide_inb(io_ports
->feature_addr
);
725 if (task
->tf_flags
& IDE_TFLAG_IN_NSECT
)
726 tf
->nsect
= scc_ide_inb(io_ports
->nsect_addr
);
727 if (task
->tf_flags
& IDE_TFLAG_IN_LBAL
)
728 tf
->lbal
= scc_ide_inb(io_ports
->lbal_addr
);
729 if (task
->tf_flags
& IDE_TFLAG_IN_LBAM
)
730 tf
->lbam
= scc_ide_inb(io_ports
->lbam_addr
);
731 if (task
->tf_flags
& IDE_TFLAG_IN_LBAH
)
732 tf
->lbah
= scc_ide_inb(io_ports
->lbah_addr
);
733 if (task
->tf_flags
& IDE_TFLAG_IN_DEVICE
)
734 tf
->device
= scc_ide_inb(io_ports
->device_addr
);
736 if (task
->tf_flags
& IDE_TFLAG_LBA48
) {
737 scc_ide_outb(ATA_DEVCTL_OBS
| 0x80, io_ports
->ctl_addr
);
739 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_FEATURE
)
740 tf
->hob_feature
= scc_ide_inb(io_ports
->feature_addr
);
741 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_NSECT
)
742 tf
->hob_nsect
= scc_ide_inb(io_ports
->nsect_addr
);
743 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAL
)
744 tf
->hob_lbal
= scc_ide_inb(io_ports
->lbal_addr
);
745 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAM
)
746 tf
->hob_lbam
= scc_ide_inb(io_ports
->lbam_addr
);
747 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAH
)
748 tf
->hob_lbah
= scc_ide_inb(io_ports
->lbah_addr
);
752 static void scc_input_data(ide_drive_t
*drive
, struct request
*rq
,
753 void *buf
, unsigned int len
)
755 unsigned long data_addr
= drive
->hwif
->io_ports
.data_addr
;
759 if (drive
->io_32bit
) {
760 scc_ide_insl(data_addr
, buf
, len
/ 4);
763 scc_ide_insw(data_addr
, (u8
*)buf
+ (len
& ~3), 1);
765 scc_ide_insw(data_addr
, buf
, len
/ 2);
768 static void scc_output_data(ide_drive_t
*drive
, struct request
*rq
,
769 void *buf
, unsigned int len
)
771 unsigned long data_addr
= drive
->hwif
->io_ports
.data_addr
;
775 if (drive
->io_32bit
) {
776 scc_ide_outsl(data_addr
, buf
, len
/ 4);
779 scc_ide_outsw(data_addr
, (u8
*)buf
+ (len
& ~3), 1);
781 scc_ide_outsw(data_addr
, buf
, len
/ 2);
785 * init_mmio_iops_scc - set up the iops for MMIO
786 * @hwif: interface to set up
790 static void __devinit
init_mmio_iops_scc(ide_hwif_t
*hwif
)
792 struct pci_dev
*dev
= to_pci_dev(hwif
->dev
);
793 struct scc_ports
*ports
= pci_get_drvdata(dev
);
794 unsigned long dma_base
= ports
->dma
;
796 ide_set_hwifdata(hwif
, ports
);
798 hwif
->dma_base
= dma_base
;
799 hwif
->config_data
= ports
->ctl
;
803 * init_iops_scc - set up iops
804 * @hwif: interface to set up
806 * Do the basic setup for the SCC hardware interface
807 * and then do the MMIO setup.
810 static void __devinit
init_iops_scc(ide_hwif_t
*hwif
)
812 struct pci_dev
*dev
= to_pci_dev(hwif
->dev
);
814 hwif
->hwif_data
= NULL
;
815 if (pci_get_drvdata(dev
) == NULL
)
817 init_mmio_iops_scc(hwif
);
820 static int __devinit
scc_init_dma(ide_hwif_t
*hwif
,
821 const struct ide_port_info
*d
)
823 return ide_allocate_dma_engine(hwif
);
826 static u8
scc_cable_detect(ide_hwif_t
*hwif
)
828 return ATA_CBL_PATA80
;
832 * init_hwif_scc - set up hwif
833 * @hwif: interface to set up
835 * We do the basic set up of the interface structure. The SCC
836 * requires several custom handlers so we override the default
837 * ide DMA handlers appropriately.
840 static void __devinit
init_hwif_scc(ide_hwif_t
*hwif
)
843 out_be32((void __iomem
*)(hwif
->dma_base
+ 0x018), hwif
->dmatable_dma
);
845 if (in_be32((void __iomem
*)(hwif
->config_data
+ 0xff0)) & CCKCTRL_ATACLKOEN
)
846 hwif
->ultra_mask
= ATA_UDMA6
; /* 133MHz */
848 hwif
->ultra_mask
= ATA_UDMA5
; /* 100MHz */
851 static const struct ide_tp_ops scc_tp_ops
= {
852 .exec_command
= scc_exec_command
,
853 .read_status
= scc_read_status
,
854 .read_altstatus
= scc_read_altstatus
,
855 .read_sff_dma_status
= scc_read_sff_dma_status
,
857 .set_irq
= scc_set_irq
,
859 .tf_load
= scc_tf_load
,
860 .tf_read
= scc_tf_read
,
862 .input_data
= scc_input_data
,
863 .output_data
= scc_output_data
,
866 static const struct ide_port_ops scc_port_ops
= {
867 .set_pio_mode
= scc_set_pio_mode
,
868 .set_dma_mode
= scc_set_dma_mode
,
869 .udma_filter
= scc_udma_filter
,
870 .cable_detect
= scc_cable_detect
,
873 static const struct ide_dma_ops scc_dma_ops
= {
874 .dma_host_set
= scc_dma_host_set
,
875 .dma_setup
= scc_dma_setup
,
876 .dma_exec_cmd
= ide_dma_exec_cmd
,
877 .dma_start
= scc_dma_start
,
878 .dma_end
= scc_dma_end
,
879 .dma_test_irq
= scc_dma_test_irq
,
880 .dma_lost_irq
= ide_dma_lost_irq
,
881 .dma_timeout
= ide_dma_timeout
,
884 #define DECLARE_SCC_DEV(name_str) \
887 .init_iops = init_iops_scc, \
888 .init_dma = scc_init_dma, \
889 .init_hwif = init_hwif_scc, \
890 .tp_ops = &scc_tp_ops, \
891 .port_ops = &scc_port_ops, \
892 .dma_ops = &scc_dma_ops, \
893 .host_flags = IDE_HFLAG_SINGLE, \
894 .pio_mask = ATA_PIO4, \
897 static const struct ide_port_info scc_chipsets
[] __devinitdata
= {
898 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
902 * scc_init_one - pci layer discovery entry
904 * @id: ident table entry
906 * Called by the PCI code when it finds an SCC PATA controller.
907 * We then use the IDE PCI generic helper to do most of the work.
910 static int __devinit
scc_init_one(struct pci_dev
*dev
, const struct pci_device_id
*id
)
912 return init_setup_scc(dev
, &scc_chipsets
[id
->driver_data
]);
916 * scc_remove - pci layer remove entry
919 * Called by the PCI code when it removes an SCC PATA controller.
922 static void __devexit
scc_remove(struct pci_dev
*dev
)
924 struct scc_ports
*ports
= pci_get_drvdata(dev
);
925 struct ide_host
*host
= ports
->host
;
927 ide_host_remove(host
);
929 iounmap((void*)ports
->dma
);
930 iounmap((void*)ports
->ctl
);
931 pci_release_selected_regions(dev
, (1 << 2) - 1);
932 memset(ports
, 0, sizeof(*ports
));
935 static const struct pci_device_id scc_pci_tbl
[] = {
936 { PCI_VDEVICE(TOSHIBA_2
, PCI_DEVICE_ID_TOSHIBA_SCC_ATA
), 0 },
939 MODULE_DEVICE_TABLE(pci
, scc_pci_tbl
);
941 static struct pci_driver scc_pci_driver
= {
943 .id_table
= scc_pci_tbl
,
944 .probe
= scc_init_one
,
945 .remove
= __devexit_p(scc_remove
),
948 static int scc_ide_init(void)
950 return ide_pci_register_driver(&scc_pci_driver
);
953 module_init(scc_ide_init
);
955 static void scc_ide_exit(void)
957 ide_pci_unregister_driver(&scc_pci_driver);
959 module_exit(scc_ide_exit);
963 MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
964 MODULE_LICENSE("GPL");