1 /**** vi:set ts=8 sts=8 sw=8:************************************************
3 * Copyright (C) 2002 Marcin Dalecki <martin@dalecki.de>
5 * Based on previous work by:
7 * Copyright (c) 1999-2000 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (c) 1995-1998 Mark Lord
10 * May be copied or modified under the terms of the GNU General Public License
14 * Those are the generic BM DMA support functions for PCI bus based systems.
17 #include <linux/config.h>
18 #define __NO_VERSION__
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/timer.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/hdreg.h>
29 #include <linux/ide.h>
36 #define DEFAULT_BMIBA 0xe800 /* in case BIOS did not init it */
37 #define DEFAULT_BMCRBA 0xcc00 /* VIA's default value */
38 #define DEFAULT_BMALIBA 0xd400 /* ALI's default value */
41 * This is the handler for disk read/write DMA interrupts.
43 ide_startstop_t
ide_dma_intr(struct ata_device
*drive
, struct request
*rq
)
46 dma_stat
= udma_stop(drive
);
48 if (ata_status(drive
, DRIVE_READY
, drive
->bad_wstat
| DRQ_STAT
)) {
50 ata_end_request(drive
, rq
, 1, rq
->nr_sectors
);
52 return ATA_OP_FINISHED
;
54 printk(KERN_ERR
"%s: dma_intr: bad DMA status (dma_stat=%x)\n",
55 drive
->name
, dma_stat
);
58 return ata_error(drive
, rq
, __FUNCTION__
);
62 * FIXME: taskfiles should be a map of pages, not a long virt address... /jens
63 * FIXME: I agree with Jens --mdcki!
65 static int build_sglist(struct ata_device
*drive
, struct request
*rq
)
67 struct ata_channel
*ch
= drive
->channel
;
68 struct scatterlist
*sg
= ch
->sg_table
;
71 if ((rq
->flags
& REQ_SPECIAL
) && (drive
->type
== ATA_DISK
)) {
72 struct ata_taskfile
*args
= rq
->special
;
74 unsigned char *virt_addr
= rq
->buffer
;
75 int sector_count
= rq
->nr_sectors
;
77 nents
= blk_rq_map_sg(&drive
->queue
, rq
, ch
->sg_table
);
79 if (nents
> rq
->nr_segments
)
80 printk("ide-dma: received %d segments, build %d\n", rq
->nr_segments
, nents
);
83 if (args
->command_type
== IDE_DRIVE_TASK_RAW_WRITE
)
84 ch
->sg_dma_direction
= PCI_DMA_TODEVICE
;
86 ch
->sg_dma_direction
= PCI_DMA_FROMDEVICE
;
89 * FIXME: This depends upon a hard coded page size!
91 if (sector_count
> 128) {
92 memset(&sg
[nents
], 0, sizeof(*sg
));
94 sg
[nents
].page
= virt_to_page(virt_addr
);
95 sg
[nents
].offset
= (unsigned long) virt_addr
& ~PAGE_MASK
;
96 sg
[nents
].length
= 128 * SECTOR_SIZE
;
98 virt_addr
= virt_addr
+ (128 * SECTOR_SIZE
);
101 memset(&sg
[nents
], 0, sizeof(*sg
));
102 sg
[nents
].page
= virt_to_page(virt_addr
);
103 sg
[nents
].offset
= (unsigned long) virt_addr
& ~PAGE_MASK
;
104 sg
[nents
].length
= sector_count
* SECTOR_SIZE
;
107 nents
= blk_rq_map_sg(&drive
->queue
, rq
, ch
->sg_table
);
109 if (rq
->q
&& nents
> rq
->nr_phys_segments
)
110 printk("ide-dma: received %d phys segments, build %d\n", rq
->nr_phys_segments
, nents
);
112 if (rq_data_dir(rq
) == READ
)
113 ch
->sg_dma_direction
= PCI_DMA_FROMDEVICE
;
115 ch
->sg_dma_direction
= PCI_DMA_TODEVICE
;
119 return pci_map_sg(ch
->pci_dev
, sg
, nents
, ch
->sg_dma_direction
);
123 * 1 dma-ing, 2 error, 4 intr
125 static ide_startstop_t
dma_timer_expiry(struct ata_device
*drive
, struct request
*rq
, unsigned long *wait
)
127 /* FIXME: What's that? */
128 u8 dma_stat
= inb(drive
->channel
->dma_base
+ 2);
131 printk("%s: dma_timer_expiry: dma status == 0x%02x\n", drive
->name
, dma_stat
);
135 drive
->expiry
= NULL
; /* one free ride for now */
138 if (dma_stat
& 2) { /* ERROR */
139 ata_status(drive
, 0, 0);
140 return ata_error(drive
, rq
, __FUNCTION__
);
142 if (dma_stat
& 1) { /* DMAing */
144 return ATA_OP_CONTINUES
;
147 return ATA_OP_FINISHED
;
150 int ata_start_dma(struct ata_device
*drive
, struct request
*rq
)
152 struct ata_channel
*ch
= drive
->channel
;
153 unsigned long dma_base
= ch
->dma_base
;
154 unsigned int reading
= 0;
156 if (rq_data_dir(rq
) == READ
)
159 /* try PIO instead of DMA */
160 if (!udma_new_table(drive
, rq
))
163 outl(ch
->dmatable_dma
, dma_base
+ 4); /* PRD table */
164 outb(reading
, dma_base
); /* specify r/w */
165 outb(inb(dma_base
+2)|6, dma_base
+2); /* clear INTR & ERROR flags */
170 /* generic udma_setup() function for drivers having ->speedproc/tuneproc */
171 int udma_generic_setup(struct ata_device
*drive
, int map
)
173 struct hd_driveid
*id
= drive
->id
;
174 struct ata_channel
*ch
= drive
->channel
;
178 if (!id
|| (drive
->type
!= ATA_DISK
&& ch
->no_atapi_autodma
))
181 if ((map
& XFER_UDMA_80W
) && !eighty_ninty_three(drive
))
182 map
&= ~XFER_UDMA_80W
;
184 if ((id
->capability
& 1) && ch
->autodma
&& ch
->speedproc
) {
186 /* Consult the list of known "bad" devices. */
187 if (udma_black_list(drive
))
190 mode
= ata_timing_mode(drive
, map
);
192 /* Device is UltraDMA capable. */
193 if (mode
& XFER_UDMA
) {
194 if((on
= !ch
->speedproc(drive
, mode
)))
197 printk(KERN_WARNING
"%s: UDMA auto-tune failed.\n", drive
->name
);
199 map
&= ~XFER_UDMA_ALL
;
200 mode
= ata_timing_mode(drive
, map
);
203 /* Device is regular DMA capable. */
204 if (mode
& (XFER_SWDMA
| XFER_MWDMA
)) {
205 if((on
= !ch
->speedproc(drive
, mode
)))
208 printk(KERN_WARNING
"%s: DMA auto-tune failed.\n", drive
->name
);
211 /* FIXME: this seems non-functional --bkz */
212 /* Consult the list of known "good" devices. */
213 if (udma_white_list(drive
)) {
215 if (id
->eide_dma_time
> 150)
218 printk(KERN_INFO
"%s: device is on DMA whitelist.\n", drive
->name
);
223 if (!on
&& ch
->tuneproc
)
224 ch
->tuneproc(drive
, 255);
228 udma_enable(drive
, on
, !on
);
234 * Configure a device for DMA operation.
236 int udma_pci_setup(struct ata_device
*drive
, int map
)
238 int config_allows_dma
= 1;
239 struct hd_driveid
*id
= drive
->id
;
240 struct ata_channel
*ch
= drive
->channel
;
242 #ifdef CONFIG_IDEDMA_ONLYDISK
243 if (drive
->type
!= ATA_DISK
)
244 config_allows_dma
= 0;
247 if (id
&& (id
->capability
& 1) && ch
->autodma
&& config_allows_dma
) {
248 /* Consult the list of known "bad" drives */
249 if (udma_black_list(drive
)) {
250 udma_enable(drive
, 0, 1);
255 /* Enable DMA on any drive that has UltraDMA (mode 6/7/?) enabled */
256 if ((id
->field_valid
& 4) && (eighty_ninty_three(drive
)))
257 if ((id
->dma_ultra
& (id
->dma_ultra
>> 14) & 2)) {
258 udma_enable(drive
, 1, 1);
262 /* Enable DMA on any drive that has UltraDMA (mode 3/4/5) enabled */
263 if ((id
->field_valid
& 4) && (eighty_ninty_three(drive
)))
264 if ((id
->dma_ultra
& (id
->dma_ultra
>> 11) & 7)) {
265 udma_enable(drive
, 1, 1);
269 /* Enable DMA on any drive that has UltraDMA (mode 0/1/2) enabled */
270 if (id
->field_valid
& 4) /* UltraDMA */
271 if ((id
->dma_ultra
& (id
->dma_ultra
>> 8) & 7)) {
272 udma_enable(drive
, 1, 1);
276 /* Enable DMA on any drive that has mode2 DMA (multi or single) enabled */
277 if (id
->field_valid
& 2) /* regular DMA */
278 if ((id
->dma_mword
& 0x404) == 0x404 || (id
->dma_1word
& 0x404) == 0x404) {
279 udma_enable(drive
, 1, 1);
283 /* Consult the list of known "good" drives */
284 if (udma_white_list(drive
)) {
285 udma_enable(drive
, 1, 1);
290 udma_enable(drive
, 0, 0);
296 * Needed for allowing full modular support of ide-driver
298 void ide_release_dma(struct ata_channel
*ch
)
303 if (ch
->dmatable_cpu
) {
304 pci_free_consistent(ch
->pci_dev
,
305 PRD_ENTRIES
* PRD_BYTES
,
308 ch
->dmatable_cpu
= NULL
;
314 if ((ch
->dma_extra
) && (ch
->unit
== 0))
315 release_region((ch
->dma_base
+ 16), ch
->dma_extra
);
316 release_region(ch
->dma_base
, 8);
320 /****************************************************************************
321 * PCI specific UDMA channel method implementations.
325 * This is the generic part of the DMA setup used by the host chipset drivers
326 * in the corresponding DMA setup method.
328 * FIXME: there are some places where this gets used driectly for "error
329 * recovery" in the ATAPI drivers. This was just plain wrong before, in esp.
330 * not portable, and just got uncovered now.
332 void udma_pci_enable(struct ata_device
*drive
, int on
, int verbose
)
334 struct ata_channel
*ch
= drive
->channel
;
339 /* Fall back to the default implementation. */
340 unit
= (drive
->select
.b
.unit
& 0x01);
341 addr
= BLK_BOUNCE_HIGH
;
345 printk("%s: DMA disabled\n", drive
->name
);
347 outb(inb(ch
->dma_base
+ 2) & ~(1 << (5 + unit
)), ch
->dma_base
+ 2);
348 #ifdef CONFIG_BLK_DEV_IDE_TCQ
349 udma_tcq_enable(drive
, 0);
353 /* toggle bounce buffers */
355 if (on
&& drive
->type
== ATA_DISK
&& drive
->channel
->highmem
) {
356 if (!PCI_DMA_BUS_IS_PHYS
)
357 addr
= BLK_BOUNCE_ANY
;
359 addr
= drive
->channel
->pci_dev
->dma_mask
;
362 blk_queue_bounce_limit(&drive
->queue
, addr
);
364 drive
->using_dma
= on
;
367 outb(inb(ch
->dma_base
+ 2) | (1 << (5 + unit
)), ch
->dma_base
+ 2);
368 #ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
369 udma_tcq_enable(drive
, 1);
375 * This prepares a dma request. Returns 0 if all went okay, returns 1
376 * otherwise. May also be invoked from trm290.c
378 int udma_new_table(struct ata_device
*drive
, struct request
*rq
)
380 struct ata_channel
*ch
= drive
->channel
;
381 unsigned int *table
= ch
->dmatable_cpu
;
383 struct scatterlist
*sg
;
385 ch
->sg_nents
= i
= build_sglist(drive
, rq
);
389 BUG_ON(i
> PRD_ENTRIES
);
393 u32 cur_addr
= sg_dma_address(sg
);
394 u32 cur_len
= sg_dma_len(sg
) & 0xffff;
396 /* Delete this test after linux ~2.5.35, as we care
397 about performance in this loop. */
398 BUG_ON(cur_len
> ch
->max_segment_size
);
400 *table
++ = cpu_to_le32(cur_addr
);
401 *table
++ = cpu_to_le32(cur_len
);
406 #ifdef CONFIG_BLK_DEV_TRM290
407 if (ch
->chipset
== ide_trm290
)
408 *--table
|= cpu_to_le32(0x80000000);
415 * Teardown mappings after DMA has completed.
417 void udma_destroy_table(struct ata_channel
*ch
)
419 pci_unmap_sg(ch
->pci_dev
, ch
->sg_table
, ch
->sg_nents
, ch
->sg_dma_direction
);
423 * Prepare the channel for a DMA startfer. Please note that only the broken
424 * Pacific Digital host chip needs the reques to be passed there to decide
425 * about addressing modes.
427 void udma_pci_start(struct ata_device
*drive
, struct request
*rq
)
429 struct ata_channel
*ch
= drive
->channel
;
430 unsigned long dma_base
= ch
->dma_base
;
432 /* Note that this is done *after* the cmd has been issued to the drive,
433 * as per the BM-IDE spec. The Promise Ultra33 doesn't work correctly
434 * when we do this part before issuing the drive cmd.
436 outb(inb(dma_base
) | 1, dma_base
); /* start DMA */
439 int udma_pci_stop(struct ata_device
*drive
)
441 struct ata_channel
*ch
= drive
->channel
;
442 unsigned long dma_base
= ch
->dma_base
;
445 outb(inb(dma_base
)&~1, dma_base
); /* stop DMA */
446 dma_stat
= inb(dma_base
+2); /* get DMA status */
447 outb(dma_stat
|6, dma_base
+2); /* clear the INTR & ERROR bits */
448 udma_destroy_table(ch
); /* purge DMA mappings */
450 return (dma_stat
& 7) != 4 ? (0x10 | dma_stat
) : 0; /* verify good DMA status */
454 * FIXME: This should be attached to a channel as we can see now!
456 int udma_pci_irq_status(struct ata_device
*drive
)
458 struct ata_channel
*ch
= drive
->channel
;
462 dma_stat
= inb(ch
->dma_base
+ 2);
464 return (dma_stat
& 4) == 4; /* return 1 if INTR asserted */
467 void udma_pci_timeout(struct ata_device
*drive
)
469 printk(KERN_ERR
"%s: UDMA timeout!\n", drive
->name
);
472 void udma_pci_irq_lost(struct ata_device
*drive
)
477 * This can be called for a dynamically installed interface. Don't __init it
479 void ata_init_dma(struct ata_channel
*ch
, unsigned long dma_base
)
481 if (!request_region(dma_base
, 8, ch
->name
)) {
482 printk(KERN_ERR
"ATA: ERROR: BM DMA portst already in use!\n");
486 printk(KERN_INFO
" %s: BM-DMA at 0x%04lx-0x%04lx", ch
->name
, dma_base
, dma_base
+ 7);
487 ch
->dma_base
= dma_base
;
488 ch
->dmatable_cpu
= pci_alloc_consistent(ch
->pci_dev
,
489 PRD_ENTRIES
* PRD_BYTES
,
491 if (ch
->dmatable_cpu
== NULL
)
492 goto dma_alloc_failure
;
494 ch
->sg_table
= kmalloc(sizeof(struct scatterlist
) * PRD_ENTRIES
,
496 if (ch
->sg_table
== NULL
) {
497 pci_free_consistent(ch
->pci_dev
, PRD_ENTRIES
* PRD_BYTES
,
498 ch
->dmatable_cpu
, ch
->dmatable_dma
);
499 goto dma_alloc_failure
;
503 * We could just assign them, and then leave it up to the chipset
504 * specific code to override these after they've called this function.
507 ch
->udma_setup
= udma_pci_setup
;
508 if (!ch
->udma_enable
)
509 ch
->udma_enable
= udma_pci_enable
;
511 ch
->udma_start
= udma_pci_start
;
513 ch
->udma_stop
= udma_pci_stop
;
515 ch
->udma_init
= udma_pci_init
;
516 if (!ch
->udma_irq_status
)
517 ch
->udma_irq_status
= udma_pci_irq_status
;
518 if (!ch
->udma_timeout
)
519 ch
->udma_timeout
= udma_pci_timeout
;
520 if (!ch
->udma_irq_lost
)
521 ch
->udma_irq_lost
= udma_pci_irq_lost
;
523 if (ch
->chipset
!= ide_trm290
) {
524 u8 dma_stat
= inb(dma_base
+2);
525 printk(", BIOS settings: %s:%s, %s:%s",
526 ch
->drives
[0].name
, (dma_stat
& 0x20) ? "DMA" : "pio",
527 ch
->drives
[1].name
, (dma_stat
& 0x40) ? "DMA" : "pio");
533 printk(" -- ERROR, UNABLE TO ALLOCATE DMA TABLES\n");
537 * This is the default read write function.
539 * It's exported only for host chips which use it for fallback or (too) late
540 * capability checking.
542 int udma_pci_init(struct ata_device
*drive
, struct request
*rq
)
546 if (ata_start_dma(drive
, rq
))
547 return ATA_OP_FINISHED
;
549 /* No DMA transfers on ATAPI devices. */
550 if (drive
->type
!= ATA_DISK
)
551 return ATA_OP_CONTINUES
;
553 if (rq_data_dir(rq
) == READ
)
558 ata_set_handler(drive
, ide_dma_intr
, WAIT_CMD
, dma_timer_expiry
);
559 if (drive
->addressing
)
560 outb(cmd
? WIN_READDMA_EXT
: WIN_WRITEDMA_EXT
, IDE_COMMAND_REG
);
562 outb(cmd
? WIN_READDMA
: WIN_WRITEDMA
, IDE_COMMAND_REG
);
564 udma_start(drive
, rq
);
566 return ATA_OP_CONTINUES
;
569 EXPORT_SYMBOL(ide_dma_intr
);
570 EXPORT_SYMBOL(udma_pci_enable
);
571 EXPORT_SYMBOL(udma_pci_start
);
572 EXPORT_SYMBOL(udma_pci_stop
);
573 EXPORT_SYMBOL(udma_pci_init
);
574 EXPORT_SYMBOL(udma_pci_irq_status
);
575 EXPORT_SYMBOL(udma_pci_timeout
);
576 EXPORT_SYMBOL(udma_pci_irq_lost
);