More meth updates.
[linux-2.6/linux-mips.git] / drivers / ide / tcq.c
blobf6ce02e194e2de3fa926dffa8f2062eccb1441e7
1 /*
2 * Copyright (C) 2001, 2002 Jens Axboe <axboe@suse.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * Support for the DMA queued protocol, which enables ATA disk drives to
20 * use tagged command queueing.
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/hdreg.h>
29 #include <linux/ide.h>
31 #include <asm/delay.h>
34 * warning: it will be _very_ verbose if defined
36 #undef IDE_TCQ_DEBUG
38 #ifdef IDE_TCQ_DEBUG
39 #define TCQ_PRINTK printk
40 #else
41 #define TCQ_PRINTK(x...)
42 #endif
45 * use nIEN or not
47 #undef IDE_TCQ_NIEN
50 * We are leaving the SERVICE interrupt alone, IBM drives have it
51 * on per default and it can't be turned off. Doesn't matter, this
52 * is the sane config.
54 #undef IDE_TCQ_FIDDLE_SI
56 static ide_startstop_t ide_dmaq_intr(struct ata_device *drive, struct request *rq);
57 static ide_startstop_t service(struct ata_device *drive, struct request *rq);
59 static ide_startstop_t tcq_nop_handler(struct ata_device *drive, struct request *rq)
61 unsigned long flags;
62 struct ata_taskfile *args = rq->special;
63 struct ata_channel *ch = drive->channel;
65 local_irq_enable();
67 spin_lock_irqsave(ch->lock, flags);
69 blkdev_dequeue_request(rq);
70 drive->rq = NULL;
71 end_that_request_last(rq);
73 spin_unlock_irqrestore(ch->lock, flags);
75 kfree(args);
77 return ATA_OP_FINISHED;
81 * If we encounter _any_ error doing I/O to one of the tags, we must
82 * invalidate the pending queue. Clear the software busy queue and requeue
83 * on the request queue for restart. Issue a WIN_NOP to clear hardware queue.
85 static void tcq_invalidate_queue(struct ata_device *drive)
87 struct ata_channel *ch = drive->channel;
88 request_queue_t *q = &drive->queue;
89 struct ata_taskfile *ar;
90 struct request *rq;
91 unsigned long flags;
93 printk(KERN_INFO "ATA: %s: invalidating pending queue (%d)\n", drive->name, ata_pending_commands(drive));
95 spin_lock_irqsave(ch->lock, flags);
97 del_timer(&ch->timer);
99 if (test_bit(IDE_DMA, ch->active))
100 udma_stop(drive);
102 blk_queue_invalidate_tags(q);
104 drive->using_tcq = 0;
105 drive->queue_depth = 1;
106 clear_bit(IDE_BUSY, ch->active);
107 clear_bit(IDE_DMA, ch->active);
108 ch->handler = NULL;
111 * Do some internal stuff -- we really need this command to be
112 * executed before any new commands are started. issue a NOP
113 * to clear internal queue on drive.
115 ar = kmalloc(sizeof(*ar), GFP_ATOMIC);
116 if (!ar) {
117 printk(KERN_ERR "ATA: %s: failed to issue NOP\n", drive->name);
118 goto out;
121 rq = __blk_get_request(&drive->queue, READ);
122 if (!rq)
123 rq = __blk_get_request(&drive->queue, WRITE);
126 * blk_queue_invalidate_tags() just added back at least one command
127 * to the free list, so there _must_ be at least one free.
129 BUG_ON(!rq);
131 /* WIN_NOP is a special request so set it's flags ?? */
132 rq->flags = REQ_SPECIAL;
133 rq->special = ar;
134 ar->cmd = WIN_NOP;
135 ar->XXX_handler = tcq_nop_handler;
136 ar->command_type = IDE_DRIVE_TASK_NO_DATA;
138 _elv_add_request(q, rq, 0, 0);
140 out:
141 #ifdef IDE_TCQ_NIEN
142 ata_irq_enable(drive, 1);
143 #endif
146 * start doing stuff again
148 q->request_fn(q);
149 spin_unlock_irqrestore(ch->lock, flags);
150 printk(KERN_DEBUG "ATA: tcq_invalidate_queue: done\n");
153 static void ata_tcq_irq_timeout(unsigned long data)
155 struct ata_device *drive = (struct ata_device *) data;
156 struct ata_channel *ch = drive->channel;
157 unsigned long flags;
159 printk(KERN_ERR "ATA: %s: timeout waiting for interrupt...\n", __FUNCTION__);
161 spin_lock_irqsave(ch->lock, flags);
163 if (test_and_set_bit(IDE_BUSY, ch->active))
164 printk(KERN_ERR "ATA: %s: IRQ handler not busy\n", __FUNCTION__);
165 if (!ch->handler)
166 printk(KERN_ERR "ATA: %s: missing ISR!\n", __FUNCTION__);
168 spin_unlock_irqrestore(ch->lock, flags);
171 * if pending commands, try service before giving up
173 if (ata_pending_commands(drive) && !ata_status(drive, 0, SERVICE_STAT))
174 if (service(drive, drive->rq) == ATA_OP_CONTINUES)
175 return;
177 if (drive)
178 tcq_invalidate_queue(drive);
181 static void __set_irq(struct ata_channel *ch, ata_handler_t *handler)
184 * always just bump the timer for now, the timeout handling will
185 * have to be changed to be per-command
187 * FIXME: Jens - this is broken it will interfere with
188 * the normal timer function on serialized drives!
191 ch->timer.function = ata_tcq_irq_timeout;
192 ch->timer.data = (unsigned long) ch->drive;
193 mod_timer(&ch->timer, jiffies + 5 * HZ);
194 ch->handler = handler;
197 static void set_irq(struct ata_device *drive, ata_handler_t *handler)
199 struct ata_channel *ch = drive->channel;
200 unsigned long flags;
202 spin_lock_irqsave(ch->lock, flags);
203 __set_irq(ch, handler);
204 spin_unlock_irqrestore(ch->lock, flags);
208 * wait 400ns, then poll for busy_mask to clear from alt status
210 #define IDE_TCQ_WAIT (10000)
211 static int wait_altstat(struct ata_device *drive, u8 *stat, u8 busy_mask)
213 int i = 0;
215 udelay(1);
217 while ((*stat = GET_ALTSTAT()) & busy_mask) {
218 if (unlikely(i++ > IDE_TCQ_WAIT))
219 return 1;
221 udelay(10);
224 return 0;
227 static ide_startstop_t udma_tcq_start(struct ata_device *drive, struct request *rq);
230 * issue SERVICE command to drive -- drive must have been selected first,
231 * and it must have reported a need for service (status has SERVICE_STAT set)
233 * Also, nIEN must be set as not to need protection against ide_dmaq_intr
235 static ide_startstop_t service(struct ata_device *drive, struct request *rq)
237 struct ata_channel *ch = drive->channel;
238 unsigned long flags;
239 u8 feat, stat;
240 int tag;
242 TCQ_PRINTK("%s: started service\n", drive->name);
245 * Could be called with IDE_DMA in-progress from invalidate
246 * handler, refuse to do anything.
248 if (test_bit(IDE_DMA, drive->channel->active))
249 return ATA_OP_FINISHED;
252 * need to select the right drive first...
254 if (drive != drive->channel->drive)
255 ata_select(drive, 10);
257 #ifdef IDE_TCQ_NIEN
258 ata_irq_enable(drive, 0);
259 #endif
261 * send SERVICE, wait 400ns, wait for BUSY_STAT to clear
263 OUT_BYTE(WIN_QUEUED_SERVICE, IDE_COMMAND_REG);
265 if (wait_altstat(drive, &stat, BUSY_STAT)) {
266 ata_dump(drive, rq, "BUSY clear took too long");
267 tcq_invalidate_queue(drive);
269 return ATA_OP_FINISHED;
272 #ifdef IDE_TCQ_NIEN
273 ata_irq_enable(drive, 1);
274 #endif
277 * FIXME, invalidate queue
279 if (stat & ERR_STAT) {
280 ata_dump(drive, rq, "ERR condition");
281 tcq_invalidate_queue(drive);
283 return ATA_OP_FINISHED;
287 * should not happen, a buggy device could introduce loop
289 if ((feat = GET_FEAT()) & NSEC_REL) {
290 drive->rq = NULL;
291 printk("%s: release in service\n", drive->name);
292 return ATA_OP_FINISHED;
295 tag = feat >> 3;
297 TCQ_PRINTK("%s: stat %x, feat %x\n", __FUNCTION__, stat, feat);
299 spin_lock_irqsave(ch->lock, flags);
301 rq = blk_queue_find_tag(&drive->queue, tag);
302 if (!rq) {
303 printk(KERN_ERR"%s: missing request for tag %d\n", __FUNCTION__, tag);
304 spin_unlock_irqrestore(ch->lock, flags);
305 return ATA_OP_FINISHED;
308 drive->rq = rq;
310 spin_unlock_irqrestore(ch->lock, flags);
312 * we'll start a dma read or write, device will trigger
313 * interrupt to indicate end of transfer, release is not allowed
315 TCQ_PRINTK("%s: starting command %x\n", __FUNCTION__, stat);
317 return udma_tcq_start(drive, rq);
320 static ide_startstop_t check_service(struct ata_device *drive, struct request *rq)
322 TCQ_PRINTK("%s: %s\n", drive->name, __FUNCTION__);
324 if (!ata_pending_commands(drive))
325 return ATA_OP_FINISHED;
327 if (!ata_status(drive, 0, SERVICE_STAT))
328 return service(drive, rq);
331 * we have pending commands, wait for interrupt
333 set_irq(drive, ide_dmaq_intr);
335 return ATA_OP_CONTINUES;
338 static ide_startstop_t dmaq_complete(struct ata_device *drive, struct request *rq)
340 u8 dma_stat;
343 * transfer was in progress, stop DMA engine
345 dma_stat = udma_stop(drive);
348 * must be end of I/O, check status and complete as necessary
350 if (!ata_status(drive, READY_STAT, drive->bad_wstat | DRQ_STAT)) {
351 ata_dump(drive, rq, __FUNCTION__);
352 tcq_invalidate_queue(drive);
354 return ATA_OP_FINISHED;
357 if (dma_stat)
358 printk("%s: bad DMA status (dma_stat=%x)\n", drive->name, dma_stat);
360 TCQ_PRINTK("%s: ending %p, tag %d\n", __FUNCTION__, rq, rq->tag);
362 ata_end_request(drive, rq, !dma_stat, rq->nr_sectors);
365 * we completed this command, check if we can service a new command
367 return check_service(drive, rq);
371 * Interrupt handler for queued dma operations. this can be entered for two
372 * reasons:
374 * 1) device has completed dma transfer
375 * 2) service request to start a command
377 * if the drive has an active tag, we first complete that request before
378 * processing any pending SERVICE.
380 static ide_startstop_t ide_dmaq_intr(struct ata_device *drive, struct request *rq)
382 int ok;
384 ok = !ata_status(drive, 0, SERVICE_STAT);
385 TCQ_PRINTK("%s: stat=%x\n", __FUNCTION__, drive->status);
388 * If a command completion interrupt is pending, do that first and
389 * check service afterwards.
391 if (rq)
392 return dmaq_complete(drive, rq);
395 * service interrupt
397 if (ok) {
398 TCQ_PRINTK("%s: SERV (stat=%x)\n", __FUNCTION__, drive->status);
399 return service(drive, rq);
402 printk("%s: stat=%x, not expected\n", __FUNCTION__, drive->status);
404 return check_service(drive, rq);
408 * Check if the ata adapter this drive is attached to supports the
409 * NOP auto-poll for multiple tcq enabled drives on one channel.
411 static int check_autopoll(struct ata_device *drive)
413 struct ata_channel *ch = drive->channel;
414 struct ata_taskfile args;
415 int drives = 0, i;
418 * only need to probe if both drives on a channel support tcq
420 for (i = 0; i < MAX_DRIVES; i++)
421 if (drive->channel->drives[i].present &&drive->type == ATA_DISK)
422 drives++;
424 if (drives <= 1)
425 return 0;
428 * do taskfile and check ABRT bit -- intelligent adapters will not
429 * pass NOP with sub-code 0x01 to device, so the command will not
430 * fail there
432 memset(&args, 0, sizeof(args));
433 args.taskfile.feature = 0x01;
434 args.cmd = WIN_NOP;
435 ide_raw_taskfile(drive, &args, NULL);
436 if (args.taskfile.feature & ABRT_ERR)
437 return 1;
439 ch->auto_poll = 1;
440 printk("%s: NOP Auto-poll enabled\n", ch->name);
441 return 0;
445 * configure the drive for tcq
447 static int configure_tcq(struct ata_device *drive)
449 int tcq_mask = 1 << 1 | 1 << 14;
450 int tcq_bits = tcq_mask | 1 << 15;
451 struct ata_taskfile args;
454 * bit 14 and 1 must be set in word 83 of the device id to indicate
455 * support for dma queued protocol, and bit 15 must be cleared
457 if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask)
458 return -EIO;
460 memset(&args, 0, sizeof(args));
461 args.taskfile.feature = SETFEATURES_EN_WCACHE;
462 args.cmd = WIN_SETFEATURES;
463 if (ide_raw_taskfile(drive, &args, NULL)) {
464 printk("%s: failed to enable write cache\n", drive->name);
465 return 1;
469 * disable RELease interrupt, it's quicker to poll this after
470 * having sent the command opcode
472 memset(&args, 0, sizeof(args));
473 args.taskfile.feature = SETFEATURES_DIS_RI;
474 args.cmd = WIN_SETFEATURES;
475 if (ide_raw_taskfile(drive, &args, NULL)) {
476 printk("%s: disabling release interrupt fail\n", drive->name);
477 return 1;
480 #ifdef IDE_TCQ_FIDDLE_SI
482 * enable SERVICE interrupt
484 memset(&args, 0, sizeof(args));
485 args.taskfile.feature = SETFEATURES_EN_SI;
486 args.cmd = WIN_SETFEATURES;
487 if (ide_raw_taskfile(drive, &args, NULL)) {
488 printk("%s: enabling service interrupt fail\n", drive->name);
489 return 1;
491 #endif
493 return 0;
496 static int tcq_wait_dataphase(struct ata_device *drive)
498 int i;
500 while (!ata_status(drive, 0, BUSY_STAT))
501 udelay(10);
503 if (ata_status(drive, READY_STAT | DRQ_STAT, drive->bad_wstat))
504 return 0;
506 i = 0;
507 udelay(1);
508 while (!ata_status(drive, READY_STAT | DRQ_STAT, drive->bad_wstat)) {
509 ++i;
510 if (i > IDE_TCQ_WAIT)
511 return 1;
513 udelay(10);
516 return 0;
519 /****************************************************************************
520 * UDMA transfer handling functions.
524 * Invoked from a SERVICE interrupt, command etc already known. Just need to
525 * start the dma engine for this tag.
527 static ide_startstop_t udma_tcq_start(struct ata_device *drive, struct request *rq)
529 struct ata_channel *ch = drive->channel;
531 TCQ_PRINTK("%s: setting up queued %d\n", __FUNCTION__, rq->tag);
532 if (!test_bit(IDE_BUSY, ch->active))
533 printk("queued_rw: IDE_BUSY not set\n");
535 if (tcq_wait_dataphase(drive))
536 return ATA_OP_FINISHED;
538 if (ata_start_dma(drive, rq))
539 return ATA_OP_FINISHED;
541 __set_irq(ch, ide_dmaq_intr);
542 udma_start(drive, rq);
544 return ATA_OP_CONTINUES;
548 * Start a queued command from scratch.
550 ide_startstop_t udma_tcq_init(struct ata_device *drive, struct request *rq)
552 u8 stat;
553 u8 feat;
555 struct ata_taskfile *args = rq->special;
557 TCQ_PRINTK("%s: start tag %d\n", drive->name, rq->tag);
560 * set nIEN, tag start operation will enable again when
561 * it is safe
563 #ifdef IDE_TCQ_NIEN
564 ata_irq_enable(drive, 0);
565 #endif
567 OUT_BYTE(args->cmd, IDE_COMMAND_REG);
569 if (wait_altstat(drive, &stat, BUSY_STAT)) {
570 ata_dump(drive, rq, "queued start");
571 tcq_invalidate_queue(drive);
572 return ATA_OP_FINISHED;
575 #ifdef IDE_TCQ_NIEN
576 ata_irq_enable(drive, 1);
577 #endif
579 if (stat & ERR_STAT) {
580 ata_dump(drive, rq, "tcq_start");
581 return ATA_OP_FINISHED;
585 * drive released the bus, clear active tag and
586 * check for service
588 if ((feat = GET_FEAT()) & NSEC_REL) {
589 drive->immed_rel++;
590 drive->rq = NULL;
591 set_irq(drive, ide_dmaq_intr);
593 TCQ_PRINTK("REL in queued_start\n");
595 if (!ata_status(drive, 0, SERVICE_STAT))
596 return service(drive, rq);
598 return ATA_OP_RELEASED;
601 TCQ_PRINTK("IMMED in queued_start\n");
602 drive->immed_comp++;
604 return udma_tcq_start(drive, rq);
608 * For now assume that command list is always as big as we need and don't
609 * attempt to shrink it on tcq disable.
611 int udma_tcq_enable(struct ata_device *drive, int on)
613 int depth = drive->using_tcq ? drive->queue_depth : 0;
616 * disable or adjust queue depth
618 if (!on) {
619 if (drive->using_tcq)
620 printk("%s: TCQ disabled\n", drive->name);
621 drive->using_tcq = 0;
622 return 0;
625 if (configure_tcq(drive)) {
626 drive->using_tcq = 0;
627 return 1;
631 * enable block tagging
633 if (!blk_queue_tagged(&drive->queue))
634 blk_queue_init_tags(&drive->queue, IDE_MAX_TAG);
637 * check auto-poll support
639 check_autopoll(drive);
641 if (depth != drive->queue_depth)
642 printk("%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth);
644 drive->using_tcq = 1;
645 return 0;
648 /* FIXME: This should go away! */
649 EXPORT_SYMBOL(udma_tcq_enable);