2 * Support for IDE interfaces on Celleb platform
4 * (C) Copyright 2006 TOSHIBA CORPORATION
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/delay.h>
29 #include <linux/ide.h>
30 #include <linux/init.h>
32 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
34 #define SCC_PATA_NAME "scc IDE"
36 #define TDVHSEL_MASTER 0x00000001
37 #define TDVHSEL_SLAVE 0x00000004
39 #define MODE_JCUSFEN 0x00000080
41 #define CCKCTRL_ATARESET 0x00040000
42 #define CCKCTRL_BUFCNT 0x00020000
43 #define CCKCTRL_CRST 0x00010000
44 #define CCKCTRL_OCLKEN 0x00000100
45 #define CCKCTRL_ATACLKOEN 0x00000002
46 #define CCKCTRL_LCLKEN 0x00000001
48 #define QCHCD_IOS_SS 0x00000001
50 #define QCHSD_STPDIAG 0x00020000
52 #define INTMASK_MSK 0xD1000012
53 #define INTSTS_SERROR 0x80000000
54 #define INTSTS_PRERR 0x40000000
55 #define INTSTS_RERR 0x10000000
56 #define INTSTS_ICERR 0x01000000
57 #define INTSTS_BMSINT 0x00000010
58 #define INTSTS_BMHE 0x00000008
59 #define INTSTS_IOIRQS 0x00000004
60 #define INTSTS_INTRQ 0x00000002
61 #define INTSTS_ACTEINT 0x00000001
63 #define ECMODE_VALUE 0x01
65 static struct scc_ports
{
66 unsigned long ctl
, dma
;
67 struct ide_host
*host
; /* for removing port from system */
68 } scc_ports
[MAX_HWIFS
];
70 /* PIO transfer mode table */
72 static unsigned long JCHSTtbl
[2][7] = {
73 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
74 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
78 static unsigned long JCHHTtbl
[2][7] = {
79 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
80 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
84 static unsigned long JCHCTtbl
[2][7] = {
85 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
86 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
90 /* DMA transfer mode table */
92 static unsigned long JCHDCTxtbl
[2][7] = {
93 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
94 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
98 static unsigned long JCSTWTxtbl
[2][7] = {
99 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
100 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
104 static unsigned long JCTSStbl
[2][7] = {
105 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
110 static unsigned long JCENVTtbl
[2][7] = {
111 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
112 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
115 /* JCACTSELS/JCACTSELM */
116 static unsigned long JCACTSELtbl
[2][7] = {
117 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
118 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
122 static u8
scc_ide_inb(unsigned long port
)
124 u32 data
= in_be32((void*)port
);
128 static void scc_exec_command(ide_hwif_t
*hwif
, u8 cmd
)
130 out_be32((void *)hwif
->io_ports
.command_addr
, cmd
);
132 in_be32((void *)(hwif
->dma_base
+ 0x01c));
136 static u8
scc_read_status(ide_hwif_t
*hwif
)
138 return (u8
)in_be32((void *)hwif
->io_ports
.status_addr
);
141 static u8
scc_read_altstatus(ide_hwif_t
*hwif
)
143 return (u8
)in_be32((void *)hwif
->io_ports
.ctl_addr
);
146 static u8
scc_dma_sff_read_status(ide_hwif_t
*hwif
)
148 return (u8
)in_be32((void *)(hwif
->dma_base
+ 4));
151 static void scc_set_irq(ide_hwif_t
*hwif
, int on
)
153 u8 ctl
= ATA_DEVCTL_OBS
;
155 if (on
== 4) { /* hack for SRST */
162 out_be32((void *)hwif
->io_ports
.ctl_addr
, ctl
);
164 in_be32((void *)(hwif
->dma_base
+ 0x01c));
168 static void scc_ide_insw(unsigned long port
, void *addr
, u32 count
)
170 u16
*ptr
= (u16
*)addr
;
172 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
176 static void scc_ide_insl(unsigned long port
, void *addr
, u32 count
)
178 u16
*ptr
= (u16
*)addr
;
180 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
181 *ptr
++ = le16_to_cpu(in_be32((void*)port
));
185 static void scc_ide_outb(u8 addr
, unsigned long port
)
187 out_be32((void*)port
, addr
);
191 scc_ide_outsw(unsigned long port
, void *addr
, u32 count
)
193 u16
*ptr
= (u16
*)addr
;
195 out_be32((void*)port
, cpu_to_le16(*ptr
++));
200 scc_ide_outsl(unsigned long port
, void *addr
, u32 count
)
202 u16
*ptr
= (u16
*)addr
;
204 out_be32((void*)port
, cpu_to_le16(*ptr
++));
205 out_be32((void*)port
, cpu_to_le16(*ptr
++));
210 * scc_set_pio_mode - set host controller for PIO mode
212 * @pio: PIO mode number
214 * Load the timing settings for this device mode into the
218 static void scc_set_pio_mode(ide_drive_t
*drive
, const u8 pio
)
220 ide_hwif_t
*hwif
= drive
->hwif
;
221 struct scc_ports
*ports
= ide_get_hwifdata(hwif
);
222 unsigned long ctl_base
= ports
->ctl
;
223 unsigned long cckctrl_port
= ctl_base
+ 0xff0;
224 unsigned long piosht_port
= ctl_base
+ 0x000;
225 unsigned long pioct_port
= ctl_base
+ 0x004;
229 reg
= in_be32((void __iomem
*)cckctrl_port
);
230 if (reg
& CCKCTRL_ATACLKOEN
) {
231 offset
= 1; /* 133MHz */
233 offset
= 0; /* 100MHz */
235 reg
= JCHSTtbl
[offset
][pio
] << 16 | JCHHTtbl
[offset
][pio
];
236 out_be32((void __iomem
*)piosht_port
, reg
);
237 reg
= JCHCTtbl
[offset
][pio
];
238 out_be32((void __iomem
*)pioct_port
, reg
);
242 * scc_set_dma_mode - set host controller for DMA mode
246 * Load the timing settings for this device mode into the
250 static void scc_set_dma_mode(ide_drive_t
*drive
, const u8 speed
)
252 ide_hwif_t
*hwif
= drive
->hwif
;
253 struct scc_ports
*ports
= ide_get_hwifdata(hwif
);
254 unsigned long ctl_base
= ports
->ctl
;
255 unsigned long cckctrl_port
= ctl_base
+ 0xff0;
256 unsigned long mdmact_port
= ctl_base
+ 0x008;
257 unsigned long mcrcst_port
= ctl_base
+ 0x00c;
258 unsigned long sdmact_port
= ctl_base
+ 0x010;
259 unsigned long scrcst_port
= ctl_base
+ 0x014;
260 unsigned long udenvt_port
= ctl_base
+ 0x018;
261 unsigned long tdvhsel_port
= ctl_base
+ 0x020;
262 int is_slave
= drive
->dn
& 1;
265 unsigned long jcactsel
;
267 reg
= in_be32((void __iomem
*)cckctrl_port
);
268 if (reg
& CCKCTRL_ATACLKOEN
) {
269 offset
= 1; /* 133MHz */
271 offset
= 0; /* 100MHz */
274 idx
= speed
- XFER_UDMA_0
;
276 jcactsel
= JCACTSELtbl
[offset
][idx
];
278 out_be32((void __iomem
*)sdmact_port
, JCHDCTxtbl
[offset
][idx
]);
279 out_be32((void __iomem
*)scrcst_port
, JCSTWTxtbl
[offset
][idx
]);
280 jcactsel
= jcactsel
<< 2;
281 out_be32((void __iomem
*)tdvhsel_port
, (in_be32((void __iomem
*)tdvhsel_port
) & ~TDVHSEL_SLAVE
) | jcactsel
);
283 out_be32((void __iomem
*)mdmact_port
, JCHDCTxtbl
[offset
][idx
]);
284 out_be32((void __iomem
*)mcrcst_port
, JCSTWTxtbl
[offset
][idx
]);
285 out_be32((void __iomem
*)tdvhsel_port
, (in_be32((void __iomem
*)tdvhsel_port
) & ~TDVHSEL_MASTER
) | jcactsel
);
287 reg
= JCTSStbl
[offset
][idx
] << 16 | JCENVTtbl
[offset
][idx
];
288 out_be32((void __iomem
*)udenvt_port
, reg
);
291 static void scc_dma_host_set(ide_drive_t
*drive
, int on
)
293 ide_hwif_t
*hwif
= drive
->hwif
;
294 u8 unit
= drive
->dn
& 1;
295 u8 dma_stat
= scc_dma_sff_read_status(hwif
);
298 dma_stat
|= (1 << (5 + unit
));
300 dma_stat
&= ~(1 << (5 + unit
));
302 scc_ide_outb(dma_stat
, hwif
->dma_base
+ 4);
306 * scc_ide_dma_setup - begin a DMA phase
307 * @drive: target device
309 * Build an IDE DMA PRD (IDE speak for scatter gather table)
310 * and then set up the DMA transfer registers.
312 * Returns 0 on success. If a PIO fallback is required then 1
316 static int scc_dma_setup(ide_drive_t
*drive
)
318 ide_hwif_t
*hwif
= drive
->hwif
;
319 struct request
*rq
= hwif
->rq
;
320 unsigned int reading
;
328 /* fall back to pio! */
329 if (!ide_build_dmatable(drive
, rq
)) {
330 ide_map_sg(drive
, rq
);
335 out_be32((void __iomem
*)(hwif
->dma_base
+ 8), hwif
->dmatable_dma
);
338 out_be32((void __iomem
*)hwif
->dma_base
, reading
);
340 /* read DMA status for INTR & ERROR flags */
341 dma_stat
= scc_dma_sff_read_status(hwif
);
343 /* clear INTR & ERROR flags */
344 out_be32((void __iomem
*)(hwif
->dma_base
+ 4), dma_stat
| 6);
345 drive
->waiting_for_dma
= 1;
349 static void scc_dma_start(ide_drive_t
*drive
)
351 ide_hwif_t
*hwif
= drive
->hwif
;
352 u8 dma_cmd
= scc_ide_inb(hwif
->dma_base
);
355 scc_ide_outb(dma_cmd
| 1, hwif
->dma_base
);
359 static int __scc_dma_end(ide_drive_t
*drive
)
361 ide_hwif_t
*hwif
= drive
->hwif
;
362 u8 dma_stat
, dma_cmd
;
364 drive
->waiting_for_dma
= 0;
365 /* get DMA command mode */
366 dma_cmd
= scc_ide_inb(hwif
->dma_base
);
368 scc_ide_outb(dma_cmd
& ~1, hwif
->dma_base
);
370 dma_stat
= scc_dma_sff_read_status(hwif
);
371 /* clear the INTR & ERROR bits */
372 scc_ide_outb(dma_stat
| 6, hwif
->dma_base
+ 4);
373 /* purge DMA mappings */
374 ide_destroy_dmatable(drive
);
375 /* verify good DMA status */
377 return (dma_stat
& 7) != 4 ? (0x10 | dma_stat
) : 0;
381 * scc_dma_end - Stop DMA
384 * Check and clear INT Status register.
385 * Then call __scc_dma_end().
388 static int scc_dma_end(ide_drive_t
*drive
)
390 ide_hwif_t
*hwif
= drive
->hwif
;
391 void __iomem
*dma_base
= (void __iomem
*)hwif
->dma_base
;
392 unsigned long intsts_port
= hwif
->dma_base
+ 0x014;
394 int dma_stat
, data_loss
= 0;
395 static int retry
= 0;
397 /* errata A308 workaround: Step5 (check data loss) */
398 /* We don't check non ide_disk because it is limited to UDMA4 */
399 if (!(in_be32((void __iomem
*)hwif
->io_ports
.ctl_addr
)
401 drive
->media
== ide_disk
&& drive
->current_speed
> XFER_UDMA_4
) {
402 reg
= in_be32((void __iomem
*)intsts_port
);
403 if (!(reg
& INTSTS_ACTEINT
)) {
404 printk(KERN_WARNING
"%s: operation failed (transfer data loss)\n",
408 struct request
*rq
= hwif
->rq
;
412 /* ERROR_RESET and drive->crc_count are needed
413 * to reduce DMA transfer mode in retry process.
416 rq
->errors
|= ERROR_RESET
;
418 ide_port_for_each_dev(i
, drive
, hwif
)
425 reg
= in_be32((void __iomem
*)intsts_port
);
427 if (reg
& INTSTS_SERROR
) {
428 printk(KERN_WARNING
"%s: SERROR\n", SCC_PATA_NAME
);
429 out_be32((void __iomem
*)intsts_port
, INTSTS_SERROR
|INTSTS_BMSINT
);
431 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
435 if (reg
& INTSTS_PRERR
) {
437 unsigned long ctl_base
= hwif
->config_data
;
439 maea0
= in_be32((void __iomem
*)(ctl_base
+ 0xF50));
440 maec0
= in_be32((void __iomem
*)(ctl_base
+ 0xF54));
442 printk(KERN_WARNING
"%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME
, maea0
, maec0
);
444 out_be32((void __iomem
*)intsts_port
, INTSTS_PRERR
|INTSTS_BMSINT
);
446 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
450 if (reg
& INTSTS_RERR
) {
451 printk(KERN_WARNING
"%s: Response Error\n", SCC_PATA_NAME
);
452 out_be32((void __iomem
*)intsts_port
, INTSTS_RERR
|INTSTS_BMSINT
);
454 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
458 if (reg
& INTSTS_ICERR
) {
459 out_be32(dma_base
, in_be32(dma_base
) & ~QCHCD_IOS_SS
);
461 printk(KERN_WARNING
"%s: Illegal Configuration\n", SCC_PATA_NAME
);
462 out_be32((void __iomem
*)intsts_port
, INTSTS_ICERR
|INTSTS_BMSINT
);
466 if (reg
& INTSTS_BMSINT
) {
467 printk(KERN_WARNING
"%s: Internal Bus Error\n", SCC_PATA_NAME
);
468 out_be32((void __iomem
*)intsts_port
, INTSTS_BMSINT
);
474 if (reg
& INTSTS_BMHE
) {
475 out_be32((void __iomem
*)intsts_port
, INTSTS_BMHE
);
479 if (reg
& INTSTS_ACTEINT
) {
480 out_be32((void __iomem
*)intsts_port
, INTSTS_ACTEINT
);
484 if (reg
& INTSTS_IOIRQS
) {
485 out_be32((void __iomem
*)intsts_port
, INTSTS_IOIRQS
);
491 dma_stat
= __scc_dma_end(drive
);
493 dma_stat
|= 2; /* emulate DMA error (to retry command) */
497 /* returns 1 if dma irq issued, 0 otherwise */
498 static int scc_dma_test_irq(ide_drive_t
*drive
)
500 ide_hwif_t
*hwif
= drive
->hwif
;
501 u32 int_stat
= in_be32((void __iomem
*)hwif
->dma_base
+ 0x014);
503 /* SCC errata A252,A308 workaround: Step4 */
504 if ((in_be32((void __iomem
*)hwif
->io_ports
.ctl_addr
)
506 (int_stat
& INTSTS_INTRQ
))
509 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
510 if (int_stat
& INTSTS_IOIRQS
)
516 static u8
scc_udma_filter(ide_drive_t
*drive
)
518 ide_hwif_t
*hwif
= drive
->hwif
;
519 u8 mask
= hwif
->ultra_mask
;
521 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
522 if ((drive
->media
!= ide_disk
) && (mask
& 0xE0)) {
523 printk(KERN_INFO
"%s: limit %s to UDMA4\n",
524 SCC_PATA_NAME
, drive
->name
);
532 * setup_mmio_scc - map CTRL/BMID region
533 * @dev: PCI device we are configuring
538 static int setup_mmio_scc (struct pci_dev
*dev
, const char *name
)
540 void __iomem
*ctl_addr
;
541 void __iomem
*dma_addr
;
544 for (i
= 0; i
< MAX_HWIFS
; i
++) {
545 if (scc_ports
[i
].ctl
== 0)
551 ret
= pci_request_selected_regions(dev
, (1 << 2) - 1, name
);
553 printk(KERN_ERR
"%s: can't reserve resources\n", name
);
557 ctl_addr
= pci_ioremap_bar(dev
, 0);
561 dma_addr
= pci_ioremap_bar(dev
, 1);
566 scc_ports
[i
].ctl
= (unsigned long)ctl_addr
;
567 scc_ports
[i
].dma
= (unsigned long)dma_addr
;
568 pci_set_drvdata(dev
, (void *) &scc_ports
[i
]);
578 static int scc_ide_setup_pci_device(struct pci_dev
*dev
,
579 const struct ide_port_info
*d
)
581 struct scc_ports
*ports
= pci_get_drvdata(dev
);
582 struct ide_host
*host
;
583 hw_regs_t hw
, *hws
[] = { &hw
, NULL
, NULL
, NULL
};
586 memset(&hw
, 0, sizeof(hw
));
587 for (i
= 0; i
<= 8; i
++)
588 hw
.io_ports_array
[i
] = ports
->dma
+ 0x20 + i
* 4;
591 hw
.chipset
= ide_pci
;
593 rc
= ide_host_add(d
, hws
, &host
);
603 * init_setup_scc - set up an SCC PATA Controller
607 * Perform the initial set up for this device.
610 static int __devinit
init_setup_scc(struct pci_dev
*dev
,
611 const struct ide_port_info
*d
)
613 unsigned long ctl_base
;
614 unsigned long dma_base
;
615 unsigned long cckctrl_port
;
616 unsigned long intmask_port
;
617 unsigned long mode_port
;
618 unsigned long ecmode_port
;
620 struct scc_ports
*ports
;
623 rc
= pci_enable_device(dev
);
627 rc
= setup_mmio_scc(dev
, d
->name
);
631 ports
= pci_get_drvdata(dev
);
632 ctl_base
= ports
->ctl
;
633 dma_base
= ports
->dma
;
634 cckctrl_port
= ctl_base
+ 0xff0;
635 intmask_port
= dma_base
+ 0x010;
636 mode_port
= ctl_base
+ 0x024;
637 ecmode_port
= ctl_base
+ 0xf00;
639 /* controller initialization */
641 out_be32((void*)cckctrl_port
, reg
);
642 reg
|= CCKCTRL_ATACLKOEN
;
643 out_be32((void*)cckctrl_port
, reg
);
644 reg
|= CCKCTRL_LCLKEN
| CCKCTRL_OCLKEN
;
645 out_be32((void*)cckctrl_port
, reg
);
647 out_be32((void*)cckctrl_port
, reg
);
650 reg
= in_be32((void*)cckctrl_port
);
651 if (reg
& CCKCTRL_CRST
)
656 reg
|= CCKCTRL_ATARESET
;
657 out_be32((void*)cckctrl_port
, reg
);
659 out_be32((void*)ecmode_port
, ECMODE_VALUE
);
660 out_be32((void*)mode_port
, MODE_JCUSFEN
);
661 out_be32((void*)intmask_port
, INTMASK_MSK
);
663 rc
= scc_ide_setup_pci_device(dev
, d
);
669 static void scc_tf_load(ide_drive_t
*drive
, ide_task_t
*task
)
671 struct ide_io_ports
*io_ports
= &drive
->hwif
->io_ports
;
672 struct ide_taskfile
*tf
= &task
->tf
;
673 u8 HIHI
= (task
->tf_flags
& IDE_TFLAG_LBA48
) ? 0xE0 : 0xEF;
675 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
678 if (task
->tf_flags
& IDE_TFLAG_OUT_DATA
)
679 out_be32((void *)io_ports
->data_addr
,
680 (tf
->hob_data
<< 8) | tf
->data
);
682 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_FEATURE
)
683 scc_ide_outb(tf
->hob_feature
, io_ports
->feature_addr
);
684 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_NSECT
)
685 scc_ide_outb(tf
->hob_nsect
, io_ports
->nsect_addr
);
686 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAL
)
687 scc_ide_outb(tf
->hob_lbal
, io_ports
->lbal_addr
);
688 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAM
)
689 scc_ide_outb(tf
->hob_lbam
, io_ports
->lbam_addr
);
690 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAH
)
691 scc_ide_outb(tf
->hob_lbah
, io_ports
->lbah_addr
);
693 if (task
->tf_flags
& IDE_TFLAG_OUT_FEATURE
)
694 scc_ide_outb(tf
->feature
, io_ports
->feature_addr
);
695 if (task
->tf_flags
& IDE_TFLAG_OUT_NSECT
)
696 scc_ide_outb(tf
->nsect
, io_ports
->nsect_addr
);
697 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAL
)
698 scc_ide_outb(tf
->lbal
, io_ports
->lbal_addr
);
699 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAM
)
700 scc_ide_outb(tf
->lbam
, io_ports
->lbam_addr
);
701 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAH
)
702 scc_ide_outb(tf
->lbah
, io_ports
->lbah_addr
);
704 if (task
->tf_flags
& IDE_TFLAG_OUT_DEVICE
)
705 scc_ide_outb((tf
->device
& HIHI
) | drive
->select
,
706 io_ports
->device_addr
);
709 static void scc_tf_read(ide_drive_t
*drive
, ide_task_t
*task
)
711 struct ide_io_ports
*io_ports
= &drive
->hwif
->io_ports
;
712 struct ide_taskfile
*tf
= &task
->tf
;
714 if (task
->tf_flags
& IDE_TFLAG_IN_DATA
) {
715 u16 data
= (u16
)in_be32((void *)io_ports
->data_addr
);
717 tf
->data
= data
& 0xff;
718 tf
->hob_data
= (data
>> 8) & 0xff;
721 /* be sure we're looking at the low order bits */
722 scc_ide_outb(ATA_DEVCTL_OBS
& ~0x80, io_ports
->ctl_addr
);
724 if (task
->tf_flags
& IDE_TFLAG_IN_FEATURE
)
725 tf
->feature
= scc_ide_inb(io_ports
->feature_addr
);
726 if (task
->tf_flags
& IDE_TFLAG_IN_NSECT
)
727 tf
->nsect
= scc_ide_inb(io_ports
->nsect_addr
);
728 if (task
->tf_flags
& IDE_TFLAG_IN_LBAL
)
729 tf
->lbal
= scc_ide_inb(io_ports
->lbal_addr
);
730 if (task
->tf_flags
& IDE_TFLAG_IN_LBAM
)
731 tf
->lbam
= scc_ide_inb(io_ports
->lbam_addr
);
732 if (task
->tf_flags
& IDE_TFLAG_IN_LBAH
)
733 tf
->lbah
= scc_ide_inb(io_ports
->lbah_addr
);
734 if (task
->tf_flags
& IDE_TFLAG_IN_DEVICE
)
735 tf
->device
= scc_ide_inb(io_ports
->device_addr
);
737 if (task
->tf_flags
& IDE_TFLAG_LBA48
) {
738 scc_ide_outb(ATA_DEVCTL_OBS
| 0x80, io_ports
->ctl_addr
);
740 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_FEATURE
)
741 tf
->hob_feature
= scc_ide_inb(io_ports
->feature_addr
);
742 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_NSECT
)
743 tf
->hob_nsect
= scc_ide_inb(io_ports
->nsect_addr
);
744 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAL
)
745 tf
->hob_lbal
= scc_ide_inb(io_ports
->lbal_addr
);
746 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAM
)
747 tf
->hob_lbam
= scc_ide_inb(io_ports
->lbam_addr
);
748 if (task
->tf_flags
& IDE_TFLAG_IN_HOB_LBAH
)
749 tf
->hob_lbah
= scc_ide_inb(io_ports
->lbah_addr
);
753 static void scc_input_data(ide_drive_t
*drive
, struct request
*rq
,
754 void *buf
, unsigned int len
)
756 unsigned long data_addr
= drive
->hwif
->io_ports
.data_addr
;
760 if (drive
->io_32bit
) {
761 scc_ide_insl(data_addr
, buf
, len
/ 4);
764 scc_ide_insw(data_addr
, (u8
*)buf
+ (len
& ~3), 1);
766 scc_ide_insw(data_addr
, buf
, len
/ 2);
769 static void scc_output_data(ide_drive_t
*drive
, struct request
*rq
,
770 void *buf
, unsigned int len
)
772 unsigned long data_addr
= drive
->hwif
->io_ports
.data_addr
;
776 if (drive
->io_32bit
) {
777 scc_ide_outsl(data_addr
, buf
, len
/ 4);
780 scc_ide_outsw(data_addr
, (u8
*)buf
+ (len
& ~3), 1);
782 scc_ide_outsw(data_addr
, buf
, len
/ 2);
786 * init_mmio_iops_scc - set up the iops for MMIO
787 * @hwif: interface to set up
791 static void __devinit
init_mmio_iops_scc(ide_hwif_t
*hwif
)
793 struct pci_dev
*dev
= to_pci_dev(hwif
->dev
);
794 struct scc_ports
*ports
= pci_get_drvdata(dev
);
795 unsigned long dma_base
= ports
->dma
;
797 ide_set_hwifdata(hwif
, ports
);
799 hwif
->dma_base
= dma_base
;
800 hwif
->config_data
= ports
->ctl
;
804 * init_iops_scc - set up iops
805 * @hwif: interface to set up
807 * Do the basic setup for the SCC hardware interface
808 * and then do the MMIO setup.
811 static void __devinit
init_iops_scc(ide_hwif_t
*hwif
)
813 struct pci_dev
*dev
= to_pci_dev(hwif
->dev
);
815 hwif
->hwif_data
= NULL
;
816 if (pci_get_drvdata(dev
) == NULL
)
818 init_mmio_iops_scc(hwif
);
821 static int __devinit
scc_init_dma(ide_hwif_t
*hwif
,
822 const struct ide_port_info
*d
)
824 return ide_allocate_dma_engine(hwif
);
827 static u8
scc_cable_detect(ide_hwif_t
*hwif
)
829 return ATA_CBL_PATA80
;
833 * init_hwif_scc - set up hwif
834 * @hwif: interface to set up
836 * We do the basic set up of the interface structure. The SCC
837 * requires several custom handlers so we override the default
838 * ide DMA handlers appropriately.
841 static void __devinit
init_hwif_scc(ide_hwif_t
*hwif
)
844 out_be32((void __iomem
*)(hwif
->dma_base
+ 0x018), hwif
->dmatable_dma
);
846 if (in_be32((void __iomem
*)(hwif
->config_data
+ 0xff0)) & CCKCTRL_ATACLKOEN
)
847 hwif
->ultra_mask
= ATA_UDMA6
; /* 133MHz */
849 hwif
->ultra_mask
= ATA_UDMA5
; /* 100MHz */
852 static const struct ide_tp_ops scc_tp_ops
= {
853 .exec_command
= scc_exec_command
,
854 .read_status
= scc_read_status
,
855 .read_altstatus
= scc_read_altstatus
,
857 .set_irq
= scc_set_irq
,
859 .tf_load
= scc_tf_load
,
860 .tf_read
= scc_tf_read
,
862 .input_data
= scc_input_data
,
863 .output_data
= scc_output_data
,
866 static const struct ide_port_ops scc_port_ops
= {
867 .set_pio_mode
= scc_set_pio_mode
,
868 .set_dma_mode
= scc_set_dma_mode
,
869 .udma_filter
= scc_udma_filter
,
870 .cable_detect
= scc_cable_detect
,
873 static const struct ide_dma_ops scc_dma_ops
= {
874 .dma_host_set
= scc_dma_host_set
,
875 .dma_setup
= scc_dma_setup
,
876 .dma_exec_cmd
= ide_dma_exec_cmd
,
877 .dma_start
= scc_dma_start
,
878 .dma_end
= scc_dma_end
,
879 .dma_test_irq
= scc_dma_test_irq
,
880 .dma_lost_irq
= ide_dma_lost_irq
,
881 .dma_timeout
= ide_dma_timeout
,
882 .dma_sff_read_status
= scc_dma_sff_read_status
,
885 #define DECLARE_SCC_DEV(name_str) \
888 .init_iops = init_iops_scc, \
889 .init_dma = scc_init_dma, \
890 .init_hwif = init_hwif_scc, \
891 .tp_ops = &scc_tp_ops, \
892 .port_ops = &scc_port_ops, \
893 .dma_ops = &scc_dma_ops, \
894 .host_flags = IDE_HFLAG_SINGLE, \
895 .pio_mask = ATA_PIO4, \
898 static const struct ide_port_info scc_chipsets
[] __devinitdata
= {
899 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
903 * scc_init_one - pci layer discovery entry
905 * @id: ident table entry
907 * Called by the PCI code when it finds an SCC PATA controller.
908 * We then use the IDE PCI generic helper to do most of the work.
911 static int __devinit
scc_init_one(struct pci_dev
*dev
, const struct pci_device_id
*id
)
913 return init_setup_scc(dev
, &scc_chipsets
[id
->driver_data
]);
917 * scc_remove - pci layer remove entry
920 * Called by the PCI code when it removes an SCC PATA controller.
923 static void __devexit
scc_remove(struct pci_dev
*dev
)
925 struct scc_ports
*ports
= pci_get_drvdata(dev
);
926 struct ide_host
*host
= ports
->host
;
928 ide_host_remove(host
);
930 iounmap((void*)ports
->dma
);
931 iounmap((void*)ports
->ctl
);
932 pci_release_selected_regions(dev
, (1 << 2) - 1);
933 memset(ports
, 0, sizeof(*ports
));
936 static const struct pci_device_id scc_pci_tbl
[] = {
937 { PCI_VDEVICE(TOSHIBA_2
, PCI_DEVICE_ID_TOSHIBA_SCC_ATA
), 0 },
940 MODULE_DEVICE_TABLE(pci
, scc_pci_tbl
);
942 static struct pci_driver scc_pci_driver
= {
944 .id_table
= scc_pci_tbl
,
945 .probe
= scc_init_one
,
946 .remove
= __devexit_p(scc_remove
),
949 static int scc_ide_init(void)
951 return ide_pci_register_driver(&scc_pci_driver
);
954 module_init(scc_ide_init
);
956 static void scc_ide_exit(void)
958 ide_pci_unregister_driver(&scc_pci_driver);
960 module_exit(scc_ide_exit);
964 MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
965 MODULE_LICENSE("GPL");