Reformat ...
[linux-2.6/linux-mips.git] / drivers / ide / ide-tcq.c
blob5d0fadf80075310de797bc8aa75feacdedd66d44
1 /*
2 * Copyright (C) 2001, 2002 Jens Axboe <axboe@suse.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * Support for the DMA queued protocol, which enables ATA disk drives to
20 * use tagged command queueing.
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/ide.h>
28 #include <asm/io.h>
29 #include <asm/delay.h>
32 * warning: it will be _very_ verbose if defined
34 #undef IDE_TCQ_DEBUG
36 #ifdef IDE_TCQ_DEBUG
37 #define TCQ_PRINTK printk
38 #else
39 #define TCQ_PRINTK(x...)
40 #endif
43 * use nIEN or not
45 #undef IDE_TCQ_NIEN
48 * we are leaving the SERVICE interrupt alone, IBM drives have it
49 * on per default and it can't be turned off. Doesn't matter, this
50 * is the sane config.
52 #undef IDE_TCQ_FIDDLE_SI
55 * bad drive blacklist, for drives that raport tcq capability but don't
56 * work reliably with the default config. initially from freebsd table.
58 struct ide_tcq_blacklist {
59 char *model;
60 char works;
61 unsigned int max_sectors;
64 static struct ide_tcq_blacklist ide_tcq_blacklist[] = {
66 .model = "IBM-DTTA",
67 .works = 1,
68 .max_sectors = 128,
71 .model = "IBM-DJNA",
72 .works = 0,
75 .model = "WDC AC",
76 .works = 0,
79 .model = NULL,
83 ide_startstop_t ide_dmaq_intr(ide_drive_t *drive);
84 ide_startstop_t ide_service(ide_drive_t *drive);
86 static struct ide_tcq_blacklist *ide_find_drive_blacklist(ide_drive_t *drive)
88 struct ide_tcq_blacklist *itb;
89 int i = 0;
91 do {
92 itb = &ide_tcq_blacklist[i];
94 if (!itb->model)
95 break;
97 if (!strncmp(drive->id->model, itb->model, strlen(itb->model)))
98 return itb;
100 i++;
101 } while (1);
103 return NULL;
106 static inline void drive_ctl_nien(ide_drive_t *drive, int set)
108 #ifdef IDE_TCQ_NIEN
109 if (IDE_CONTROL_REG) {
110 int mask = set ? 0x02 : 0x00;
112 hwif->OUTB(drive->ctl | mask, IDE_CONTROL_REG);
114 #endif
117 static ide_startstop_t ide_tcq_nop_handler(ide_drive_t *drive)
119 ide_task_t *args = HWGROUP(drive)->rq->special;
120 ide_hwif_t *hwif = HWIF(drive);
121 int auto_poll_check = 0;
122 u8 stat, err;
124 if (args->tfRegister[IDE_FEATURE_OFFSET] & 0x01)
125 auto_poll_check = 1;
127 local_irq_enable();
129 stat = hwif->INB(IDE_STATUS_REG);
130 err = hwif->INB(IDE_ERROR_REG);
131 ide_end_drive_cmd(drive, stat, err);
134 * do taskfile and check ABRT bit -- intelligent adapters will not
135 * pass NOP with sub-code 0x01 to device, so the command will not
136 * fail there
138 if (auto_poll_check) {
139 if (!(args->tfRegister[IDE_FEATURE_OFFSET] & ABRT_ERR)) {
140 HWIF(drive)->auto_poll = 1;
141 printk("%s: NOP Auto-poll enabled\n",HWIF(drive)->name);
145 kfree(args);
146 return ide_stopped;
150 * if we encounter _any_ error doing I/O to one of the tags, we must
151 * invalidate the pending queue. clear the software busy queue and requeue
152 * on the request queue for restart. issue a WIN_NOP to clear hardware queue
154 static void ide_tcq_invalidate_queue(ide_drive_t *drive)
156 ide_hwgroup_t *hwgroup = HWGROUP(drive);
157 request_queue_t *q = &drive->queue;
158 struct request *rq;
159 unsigned long flags;
161 printk("%s: invalidating tag queue (%d commands)\n", drive->name, ata_pending_commands(drive));
164 * first kill timer and block queue
166 spin_lock_irqsave(&ide_lock, flags);
168 del_timer(&hwgroup->timer);
170 if (HWIF(drive)->dma)
171 HWIF(drive)->ide_dma_end(drive);
173 blk_queue_invalidate_tags(q);
175 drive->using_tcq = 0;
176 drive->queue_depth = 1;
177 hwgroup->busy = 0;
178 hwgroup->handler = NULL;
180 spin_unlock_irqrestore(&ide_lock, flags);
183 * now kill hardware queue with a NOP
185 rq = &hwgroup->wrq;
186 ide_init_drive_cmd(rq);
187 rq->buffer = hwgroup->cmd_buf;
188 memset(rq->buffer, 0, sizeof(hwgroup->cmd_buf));
189 rq->buffer[0] = WIN_NOP;
190 ide_do_drive_cmd(drive, rq, ide_preempt);
193 void ide_tcq_intr_timeout(unsigned long data)
195 ide_drive_t *drive = (ide_drive_t *) data;
196 ide_hwgroup_t *hwgroup = HWGROUP(drive);
197 ide_hwif_t *hwif = HWIF(drive);
198 unsigned long flags;
200 printk(KERN_ERR "ide_tcq_intr_timeout: timeout waiting for %s interrupt\n", hwgroup->rq ? "completion" : "service");
202 spin_lock_irqsave(&ide_lock, flags);
204 if (!hwgroup->busy)
205 printk(KERN_ERR "ide_tcq_intr_timeout: hwgroup not busy\n");
206 if (hwgroup->handler == NULL)
207 printk(KERN_ERR "ide_tcq_intr_timeout: missing isr!\n");
209 hwgroup->busy = 1;
210 spin_unlock_irqrestore(&ide_lock, flags);
213 * if pending commands, try service before giving up
215 if (ata_pending_commands(drive)) {
216 u8 stat = hwif->INB(IDE_STATUS_REG);
218 if ((stat & SRV_STAT) && (ide_service(drive) == ide_started))
219 return;
222 if (drive)
223 ide_tcq_invalidate_queue(drive);
226 void __ide_tcq_set_intr(ide_hwgroup_t *hwgroup, ide_handler_t *handler)
229 * always just bump the timer for now, the timeout handling will
230 * have to be changed to be per-command
232 hwgroup->timer.function = ide_tcq_intr_timeout;
233 hwgroup->timer.data = (unsigned long) hwgroup->drive;
234 mod_timer(&hwgroup->timer, jiffies + 5 * HZ);
236 hwgroup->handler = handler;
239 void ide_tcq_set_intr(ide_hwgroup_t *hwgroup, ide_handler_t *handler)
241 unsigned long flags;
243 spin_lock_irqsave(&ide_lock, flags);
244 __ide_tcq_set_intr(hwgroup, handler);
245 spin_unlock_irqrestore(&ide_lock, flags);
249 * wait 400ns, then poll for busy_mask to clear from alt status
251 #define IDE_TCQ_WAIT (10000)
252 int ide_tcq_wait_altstat(ide_drive_t *drive, byte *stat, byte busy_mask)
254 ide_hwif_t *hwif = HWIF(drive);
255 int i = 0;
257 udelay(1);
259 do {
260 *stat = hwif->INB(IDE_ALTSTATUS_REG);
262 if (!(*stat & busy_mask))
263 break;
265 if (unlikely(i++ > IDE_TCQ_WAIT))
266 return 1;
268 udelay(10);
269 } while (1);
271 return 0;
275 * issue SERVICE command to drive -- drive must have been selected first,
276 * and it must have reported a need for service (status has SRV_STAT set)
278 * Also, nIEN must be set as not to need protection against ide_dmaq_intr
280 ide_startstop_t ide_service(ide_drive_t *drive)
282 ide_hwif_t *hwif = HWIF(drive);
283 unsigned long flags;
284 struct request *rq;
285 byte feat, stat;
286 int tag;
288 TCQ_PRINTK("%s: started service\n", drive->name);
291 * could be called with IDE_DMA in-progress from invalidate
292 * handler, refuse to do anything
294 if (hwif->dma)
295 return ide_stopped;
298 * need to select the right drive first...
300 if (drive != HWGROUP(drive)->drive) {
301 SELECT_DRIVE(drive);
302 udelay(10);
305 drive_ctl_nien(drive, 1);
308 * send SERVICE, wait 400ns, wait for BUSY_STAT to clear
310 hwif->OUTB(WIN_QUEUED_SERVICE, IDE_COMMAND_REG);
312 if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
313 printk(KERN_ERR "ide_service: BUSY clear took too long\n");
314 ide_dump_status(drive, "ide_service", stat);
315 ide_tcq_invalidate_queue(drive);
316 return ide_stopped;
319 drive_ctl_nien(drive, 0);
322 * FIXME, invalidate queue
324 if (stat & ERR_STAT) {
325 ide_dump_status(drive, "ide_service", stat);
326 ide_tcq_invalidate_queue(drive);
327 return ide_stopped;
331 * should not happen, a buggy device could introduce loop
333 feat = hwif->INB(IDE_NSECTOR_REG);
334 if (feat & REL) {
335 HWGROUP(drive)->rq = NULL;
336 printk(KERN_ERR "%s: release in service\n", drive->name);
337 return ide_stopped;
340 tag = feat >> 3;
342 TCQ_PRINTK("ide_service: stat %x, feat %x\n", stat, feat);
344 spin_lock_irqsave(&ide_lock, flags);
346 if ((rq = blk_queue_find_tag(&drive->queue, tag))) {
347 HWGROUP(drive)->rq = rq;
350 * we'll start a dma read or write, device will trigger
351 * interrupt to indicate end of transfer, release is not
352 * allowed
354 TCQ_PRINTK("ide_service: starting command, stat=%x\n", stat);
355 spin_unlock_irqrestore(&ide_lock, flags);
356 return HWIF(drive)->ide_dma_queued_start(drive);
359 printk(KERN_ERR "ide_service: missing request for tag %d\n", tag);
360 spin_unlock_irqrestore(&ide_lock, flags);
361 return ide_stopped;
364 ide_startstop_t ide_check_service(ide_drive_t *drive)
366 ide_hwif_t *hwif = HWIF(drive);
367 byte stat;
369 TCQ_PRINTK("%s: ide_check_service\n", drive->name);
371 if (!ata_pending_commands(drive))
372 return ide_stopped;
374 stat = hwif->INB(IDE_STATUS_REG);
375 if (stat & SRV_STAT)
376 return ide_service(drive);
379 * we have pending commands, wait for interrupt
381 TCQ_PRINTK("%s: wait for service interrupt\n", drive->name);
382 ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
383 return ide_started;
386 ide_startstop_t ide_dmaq_complete(ide_drive_t *drive, struct request *rq, byte stat)
388 byte dma_stat;
391 * transfer was in progress, stop DMA engine
393 dma_stat = HWIF(drive)->ide_dma_end(drive);
396 * must be end of I/O, check status and complete as necessary
398 if (unlikely(!OK_STAT(stat, READY_STAT, drive->bad_wstat | DRQ_STAT))) {
399 printk(KERN_ERR "ide_dmaq_intr: %s: error status %x\n",drive->name,stat);
400 ide_dump_status(drive, "ide_dmaq_complete", stat);
401 ide_tcq_invalidate_queue(drive);
402 return ide_stopped;
405 if (dma_stat)
406 printk(KERN_WARNING "%s: bad DMA status (dma_stat=%x)\n", drive->name, dma_stat);
408 TCQ_PRINTK("ide_dmaq_complete: ending %p, tag %d\n", rq, rq->tag);
409 ide_end_request(drive, 1, rq->nr_sectors);
412 * we completed this command, check if we can service a new command
414 return ide_check_service(drive);
418 * intr handler for queued dma operations. this can be entered for two
419 * reasons:
421 * 1) device has completed dma transfer
422 * 2) service request to start a command
424 * if the drive has an active tag, we first complete that request before
425 * processing any pending SERVICE.
427 ide_startstop_t ide_dmaq_intr(ide_drive_t *drive)
429 struct request *rq = HWGROUP(drive)->rq;
430 ide_hwif_t *hwif = HWIF(drive);
431 byte stat = hwif->INB(IDE_STATUS_REG);
433 TCQ_PRINTK("ide_dmaq_intr: stat=%x\n", stat);
436 * if a command completion interrupt is pending, do that first and
437 * check service afterwards
439 if (rq) {
440 TCQ_PRINTK("ide_dmaq_intr: completion\n");
441 return ide_dmaq_complete(drive, rq, stat);
445 * service interrupt
447 if (stat & SRV_STAT) {
448 TCQ_PRINTK("ide_dmaq_intr: SERV (stat=%x)\n", stat);
449 return ide_service(drive);
452 printk("ide_dmaq_intr: stat=%x, not expected\n", stat);
453 return ide_check_service(drive);
457 * check if the ata adapter this drive is attached to supports the
458 * NOP auto-poll for multiple tcq enabled drives on one channel
460 static int ide_tcq_check_autopoll(ide_drive_t *drive)
462 ide_task_t *args;
463 int i, drives;
466 * only need to probe if both drives on a channel support tcq
468 for (i = 0, drives = 0; i < MAX_DRIVES; i++)
469 if (HWIF(drive)->drives[i].present && drive->media == ide_disk)
470 drives++;
472 if (drives <= 1)
473 return 0;
476 * what a mess...
478 args = kmalloc(sizeof(*args), GFP_ATOMIC);
479 if (!args)
480 return 1;
482 memset(args, 0, sizeof(*args));
484 args->tfRegister[IDE_FEATURE_OFFSET] = 0x01;
485 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_NOP;
486 args->command_type = ide_cmd_type_parser(args);
487 args->handler = ide_tcq_nop_handler;
488 return ide_raw_taskfile(drive, args, NULL);
492 * configure the drive for tcq
494 static int ide_tcq_configure(ide_drive_t *drive)
496 int tcq_mask = 1 << 1 | 1 << 14;
497 int tcq_bits = tcq_mask | 1 << 15;
498 ide_task_t *args;
501 * bit 14 and 1 must be set in word 83 of the device id to indicate
502 * support for dma queued protocol, and bit 15 must be cleared
504 if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask)
505 return -EIO;
507 args = kmalloc(sizeof(*args), GFP_ATOMIC);
508 if (!args)
509 return -ENOMEM;
511 memset(args, 0, sizeof(ide_task_t));
512 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
513 args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_EN_WCACHE;
514 args->command_type = ide_cmd_type_parser(args);
516 if (ide_raw_taskfile(drive, args, NULL)) {
517 printk(KERN_WARNING "%s: failed to enable write cache\n", drive->name);
518 goto err;
522 * disable RELease interrupt, it's quicker to poll this after
523 * having sent the command opcode
525 memset(args, 0, sizeof(ide_task_t));
526 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
527 args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_DIS_RI;
528 args->command_type = ide_cmd_type_parser(args);
530 if (ide_raw_taskfile(drive, args, NULL)) {
531 printk(KERN_ERR "%s: disabling release interrupt fail\n", drive->name);
532 goto err;
535 #ifdef IDE_TCQ_FIDDLE_SI
537 * enable SERVICE interrupt
539 memset(args, 0, sizeof(ide_task_t));
540 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
541 args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_EN_SI;
542 args->command_type = ide_cmd_type_parser(args);
544 if (ide_raw_taskfile(drive, args, NULL)) {
545 printk(KERN_ERR "%s: enabling service interrupt fail\n", drive->name);
546 goto err;
548 #endif
550 kfree(args);
551 return 0;
552 err:
553 kfree(args);
554 return -EIO;
558 * for now assume that command list is always as big as we need and don't
559 * attempt to shrink it on tcq disable
561 static int ide_enable_queued(ide_drive_t *drive, int on)
563 struct ide_tcq_blacklist *itb;
564 int depth = drive->using_tcq ? drive->queue_depth : 0;
567 * disable or adjust queue depth
569 if (!on) {
570 if (drive->using_tcq)
571 printk(KERN_INFO "%s: TCQ disabled\n", drive->name);
573 drive->using_tcq = 0;
574 return 0;
577 if (ide_tcq_configure(drive)) {
578 drive->using_tcq = 0;
579 return 1;
583 * some drives need limited transfer size in tcq
585 itb = ide_find_drive_blacklist(drive);
586 if (itb && itb->max_sectors) {
587 if (itb->max_sectors > HWIF(drive)->rqsize)
588 itb->max_sectors = HWIF(drive)->rqsize;
590 blk_queue_max_sectors(&drive->queue, itb->max_sectors);
594 * enable block tagging
596 if (!blk_queue_tagged(&drive->queue))
597 blk_queue_init_tags(&drive->queue, IDE_MAX_TAG);
600 * check auto-poll support
602 ide_tcq_check_autopoll(drive);
604 if (depth != drive->queue_depth)
605 printk(KERN_INFO "%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth);
607 drive->using_tcq = 1;
608 return 0;
611 int ide_tcq_wait_dataphase(ide_drive_t *drive)
613 ide_hwif_t *hwif = HWIF(drive);
614 byte stat;
615 int i;
617 do {
618 stat = hwif->INB(IDE_STATUS_REG);
619 if (!(stat & BUSY_STAT))
620 break;
622 udelay(10);
623 } while (1);
625 if (OK_STAT(stat, READY_STAT | DRQ_STAT, drive->bad_wstat))
626 return 0;
628 i = 0;
629 udelay(1);
630 do {
631 stat = hwif->INB(IDE_STATUS_REG);
633 if (OK_STAT(stat, READY_STAT | DRQ_STAT, drive->bad_wstat))
634 break;
636 ++i;
637 if (unlikely(i >= IDE_TCQ_WAIT))
638 return 1;
640 udelay(10);
641 } while (1);
643 return 0;
646 static int ide_tcq_check_blacklist(ide_drive_t *drive)
648 struct ide_tcq_blacklist *itb = ide_find_drive_blacklist(drive);
650 if (!itb)
651 return 0;
653 return !itb->works;
656 int __ide_dma_queued_on(ide_drive_t *drive)
658 if (!drive->using_dma)
659 return 1;
660 if (HWIF(drive)->chipset == ide_pdc4030)
661 return 1;
662 if (ide_tcq_check_blacklist(drive)) {
663 printk(KERN_WARNING "%s: tcq forbidden by blacklist\n",
664 drive->name);
665 return 1;
667 if (drive->next != drive) {
668 printk(KERN_WARNING "%s: only one drive on a channel supported"
669 " for tcq\n", drive->name);
670 return 1;
673 if (ata_pending_commands(drive)) {
674 printk(KERN_WARNING "ide-tcq; can't toggle tcq feature on "
675 "busy drive\n");
676 return 1;
679 return ide_enable_queued(drive, 1);
682 int __ide_dma_queued_off(ide_drive_t *drive)
684 if (ata_pending_commands(drive)) {
685 printk("ide-tcq; can't toggle tcq feature on busy drive\n");
686 return 1;
689 return ide_enable_queued(drive, 0);
692 static ide_startstop_t ide_dma_queued_rw(ide_drive_t *drive, u8 command)
694 ide_hwif_t *hwif = HWIF(drive);
695 unsigned long flags;
696 byte stat, feat;
698 TCQ_PRINTK("%s: starting tag\n", drive->name);
701 * set nIEN, tag start operation will enable again when
702 * it is safe
704 drive_ctl_nien(drive, 1);
706 TCQ_PRINTK("%s: sending cmd=%x\n", drive->name, command);
707 hwif->OUTB(command, IDE_COMMAND_REG);
709 if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
710 printk("%s: alt stat timeout\n", drive->name);
711 goto err;
714 drive_ctl_nien(drive, 0);
716 if (stat & ERR_STAT)
717 goto err;
720 * bus not released, start dma
722 feat = hwif->INB(IDE_NSECTOR_REG);
723 if (!(feat & REL)) {
724 TCQ_PRINTK("IMMED in queued_start, feat=%x\n", feat);
725 return hwif->ide_dma_queued_start(drive);
729 * drive released the bus, clear active request and check for service
731 spin_lock_irqsave(&ide_lock, flags);
732 HWGROUP(drive)->rq = NULL;
733 __ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
734 spin_unlock_irqrestore(&ide_lock, flags);
736 TCQ_PRINTK("REL in queued_start\n");
738 stat = hwif->INB(IDE_STATUS_REG);
739 if (stat & SRV_STAT)
740 return ide_service(drive);
742 return ide_released;
743 err:
744 ide_dump_status(drive, "rw_queued", stat);
745 ide_tcq_invalidate_queue(drive);
746 return ide_stopped;
749 ide_startstop_t __ide_dma_queued_read(ide_drive_t *drive)
751 u8 command = WIN_READDMA_QUEUED;
753 if (drive->addressing == 1)
754 command = WIN_READDMA_QUEUED_EXT;
756 return ide_dma_queued_rw(drive, command);
759 ide_startstop_t __ide_dma_queued_write(ide_drive_t *drive)
761 u8 command = WIN_WRITEDMA_QUEUED;
763 if (drive->addressing == 1)
764 command = WIN_WRITEDMA_QUEUED_EXT;
766 return ide_dma_queued_rw(drive, command);
769 ide_startstop_t __ide_dma_queued_start(ide_drive_t *drive)
771 ide_hwgroup_t *hwgroup = HWGROUP(drive);
772 struct request *rq = hwgroup->rq;
773 ide_hwif_t *hwif = HWIF(drive);
774 unsigned int reading = 0;
776 TCQ_PRINTK("ide_dma: setting up queued tag=%d\n", rq->tag);
778 if (!hwgroup->busy)
779 printk(KERN_ERR "queued_rw: hwgroup not busy\n");
781 if (ide_tcq_wait_dataphase(drive)) {
782 printk(KERN_WARNING "timeout waiting for data phase\n");
783 return ide_stopped;
786 if (rq_data_dir(rq) == READ)
787 reading = 1 << 3;
789 if (ide_start_dma(hwif, drive, reading))
790 return ide_stopped;
792 ide_tcq_set_intr(hwgroup, ide_dmaq_intr);
794 if (!hwif->ide_dma_begin(drive))
795 return ide_started;
797 return ide_stopped;