4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/hdreg.h>
44 #include <linux/completion.h>
45 #include <linux/reboot.h>
46 #include <linux/cdrom.h>
47 #include <linux/seq_file.h>
48 #include <linux/device.h>
49 #include <linux/kmod.h>
50 #include <linux/scatterlist.h>
51 #include <linux/bitops.h>
53 #include <asm/byteorder.h>
55 #include <asm/uaccess.h>
58 static int __ide_end_request(ide_drive_t
*drive
, struct request
*rq
,
59 int uptodate
, unsigned int nr_bytes
, int dequeue
)
65 error
= uptodate
? uptodate
: -EIO
;
68 * if failfast is set on a request, override number of sectors and
69 * complete the whole request right now
71 if (blk_noretry_request(rq
) && error
)
72 nr_bytes
= rq
->hard_nr_sectors
<< 9;
74 if (!blk_fs_request(rq
) && error
&& !rq
->errors
)
78 * decide whether to reenable DMA -- 3 is a random magic for now,
79 * if we DMA timeout more than 3 times, just stay in PIO
81 if ((drive
->dev_flags
& IDE_DFLAG_DMA_PIO_RETRY
) &&
82 drive
->retry_pio
<= 3) {
83 drive
->dev_flags
&= ~IDE_DFLAG_DMA_PIO_RETRY
;
87 if (!blk_end_request(rq
, error
, nr_bytes
))
90 if (ret
== 0 && dequeue
)
91 drive
->hwif
->rq
= NULL
;
97 * ide_end_request - complete an IDE I/O
98 * @drive: IDE device for the I/O
100 * @nr_sectors: number of sectors completed
102 * This is our end_request wrapper function. We complete the I/O
103 * update random number input and dequeue the request, which if
104 * it was tagged may be out of order.
107 int ide_end_request (ide_drive_t
*drive
, int uptodate
, int nr_sectors
)
109 unsigned int nr_bytes
= nr_sectors
<< 9;
110 struct request
*rq
= drive
->hwif
->rq
;
113 if (blk_pc_request(rq
))
114 nr_bytes
= rq
->data_len
;
116 nr_bytes
= rq
->hard_cur_sectors
<< 9;
119 return __ide_end_request(drive
, rq
, uptodate
, nr_bytes
, 1);
121 EXPORT_SYMBOL(ide_end_request
);
124 * ide_end_dequeued_request - complete an IDE I/O
125 * @drive: IDE device for the I/O
127 * @nr_sectors: number of sectors completed
129 * Complete an I/O that is no longer on the request queue. This
130 * typically occurs when we pull the request and issue a REQUEST_SENSE.
131 * We must still finish the old request but we must not tamper with the
132 * queue in the meantime.
134 * NOTE: This path does not handle barrier, but barrier is not supported
138 int ide_end_dequeued_request(ide_drive_t
*drive
, struct request
*rq
,
139 int uptodate
, int nr_sectors
)
141 BUG_ON(!blk_rq_started(rq
));
143 return __ide_end_request(drive
, rq
, uptodate
, nr_sectors
<< 9, 0);
145 EXPORT_SYMBOL_GPL(ide_end_dequeued_request
);
147 void ide_complete_cmd(ide_drive_t
*drive
, struct ide_cmd
*cmd
, u8 stat
, u8 err
)
149 struct ide_taskfile
*tf
= &cmd
->tf
;
150 struct request
*rq
= cmd
->rq
;
155 drive
->hwif
->tp_ops
->tf_read(drive
, cmd
);
157 if (rq
&& rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
158 memcpy(rq
->special
, cmd
, sizeof(*cmd
));
160 if (cmd
->tf_flags
& IDE_TFLAG_DYN
)
164 void ide_complete_rq(ide_drive_t
*drive
, u8 err
)
166 ide_hwif_t
*hwif
= drive
->hwif
;
167 struct request
*rq
= hwif
->rq
;
173 if (unlikely(blk_end_request(rq
, (rq
->errors
? -EIO
: 0),
177 EXPORT_SYMBOL(ide_complete_rq
);
179 void ide_kill_rq(ide_drive_t
*drive
, struct request
*rq
)
181 u8 drv_req
= blk_special_request(rq
) && rq
->rq_disk
;
182 u8 media
= drive
->media
;
184 drive
->failed_pc
= NULL
;
186 if ((media
== ide_floppy
&& drv_req
) || media
== ide_tape
)
187 rq
->errors
= IDE_DRV_ERROR_GENERAL
;
189 if ((media
== ide_floppy
|| media
== ide_tape
) && drv_req
)
190 ide_complete_rq(drive
, 0);
192 ide_end_request(drive
, 0, 0);
195 static void ide_tf_set_specify_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
197 tf
->nsect
= drive
->sect
;
198 tf
->lbal
= drive
->sect
;
199 tf
->lbam
= drive
->cyl
;
200 tf
->lbah
= drive
->cyl
>> 8;
201 tf
->device
= (drive
->head
- 1) | drive
->select
;
202 tf
->command
= ATA_CMD_INIT_DEV_PARAMS
;
205 static void ide_tf_set_restore_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
207 tf
->nsect
= drive
->sect
;
208 tf
->command
= ATA_CMD_RESTORE
;
211 static void ide_tf_set_setmult_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
213 tf
->nsect
= drive
->mult_req
;
214 tf
->command
= ATA_CMD_SET_MULTI
;
217 static ide_startstop_t
ide_disk_special(ide_drive_t
*drive
)
219 special_t
*s
= &drive
->special
;
222 memset(&cmd
, 0, sizeof(cmd
));
223 cmd
.data_phase
= TASKFILE_NO_DATA
;
225 if (s
->b
.set_geometry
) {
226 s
->b
.set_geometry
= 0;
227 ide_tf_set_specify_cmd(drive
, &cmd
.tf
);
228 } else if (s
->b
.recalibrate
) {
229 s
->b
.recalibrate
= 0;
230 ide_tf_set_restore_cmd(drive
, &cmd
.tf
);
231 } else if (s
->b
.set_multmode
) {
232 s
->b
.set_multmode
= 0;
233 ide_tf_set_setmult_cmd(drive
, &cmd
.tf
);
235 int special
= s
->all
;
237 printk(KERN_ERR
"%s: bad special flag: 0x%02x\n", drive
->name
, special
);
241 cmd
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
|
242 IDE_TFLAG_CUSTOM_HANDLER
;
244 do_rw_taskfile(drive
, &cmd
);
250 * do_special - issue some special commands
251 * @drive: drive the command is for
253 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
254 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
256 * It used to do much more, but has been scaled back.
259 static ide_startstop_t
do_special (ide_drive_t
*drive
)
261 special_t
*s
= &drive
->special
;
264 printk("%s: do_special: 0x%02x\n", drive
->name
, s
->all
);
266 if (drive
->media
== ide_disk
)
267 return ide_disk_special(drive
);
274 void ide_map_sg(ide_drive_t
*drive
, struct request
*rq
)
276 ide_hwif_t
*hwif
= drive
->hwif
;
277 struct ide_cmd
*cmd
= &hwif
->cmd
;
278 struct scatterlist
*sg
= hwif
->sg_table
;
280 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
281 sg_init_one(sg
, rq
->buffer
, rq
->nr_sectors
* SECTOR_SIZE
);
283 } else if (!rq
->bio
) {
284 sg_init_one(sg
, rq
->data
, rq
->data_len
);
287 cmd
->sg_nents
= blk_rq_map_sg(drive
->queue
, rq
, sg
);
289 EXPORT_SYMBOL_GPL(ide_map_sg
);
291 void ide_init_sg_cmd(struct ide_cmd
*cmd
, int nsect
)
293 cmd
->nsect
= cmd
->nleft
= nsect
;
297 EXPORT_SYMBOL_GPL(ide_init_sg_cmd
);
300 * execute_drive_command - issue special drive command
301 * @drive: the drive to issue the command on
302 * @rq: the request structure holding the command
304 * execute_drive_cmd() issues a special drive command, usually
305 * initiated by ioctl() from the external hdparm program. The
306 * command can be a drive command, drive task or taskfile
307 * operation. Weirdly you can call it with NULL to wait for
308 * all commands to finish. Don't do this as that is due to change
311 static ide_startstop_t
execute_drive_cmd (ide_drive_t
*drive
,
314 struct ide_cmd
*cmd
= rq
->special
;
317 switch (cmd
->data_phase
) {
318 case TASKFILE_MULTI_OUT
:
320 case TASKFILE_MULTI_IN
:
322 ide_init_sg_cmd(cmd
, rq
->nr_sectors
);
323 ide_map_sg(drive
, rq
);
328 return do_rw_taskfile(drive
, cmd
);
332 * NULL is actually a valid way of waiting for
333 * all current requests to be flushed from the queue.
336 printk("%s: DRIVE_CMD (null)\n", drive
->name
);
338 ide_complete_rq(drive
, 0);
343 static ide_startstop_t
ide_special_rq(ide_drive_t
*drive
, struct request
*rq
)
349 case REQ_UNPARK_HEADS
:
350 return ide_do_park_unpark(drive
, rq
);
351 case REQ_DEVSET_EXEC
:
352 return ide_do_devset(drive
, rq
);
353 case REQ_DRIVE_RESET
:
354 return ide_do_reset(drive
);
356 blk_dump_rq_flags(rq
, "ide_special_rq - bad request");
357 ide_end_request(drive
, 0, 0);
363 * start_request - start of I/O and command issuing for IDE
365 * start_request() initiates handling of a new I/O request. It
366 * accepts commands and I/O (read/write) requests.
368 * FIXME: this function needs a rename
371 static ide_startstop_t
start_request (ide_drive_t
*drive
, struct request
*rq
)
373 ide_startstop_t startstop
;
375 BUG_ON(!blk_rq_started(rq
));
378 printk("%s: start_request: current=0x%08lx\n",
379 drive
->hwif
->name
, (unsigned long) rq
);
382 /* bail early if we've exceeded max_failures */
383 if (drive
->max_failures
&& (drive
->failures
> drive
->max_failures
)) {
384 rq
->cmd_flags
|= REQ_FAILED
;
388 if (blk_pm_request(rq
))
389 ide_check_pm_state(drive
, rq
);
392 if (ide_wait_stat(&startstop
, drive
, drive
->ready_stat
,
393 ATA_BUSY
| ATA_DRQ
, WAIT_READY
)) {
394 printk(KERN_ERR
"%s: drive not ready for command\n", drive
->name
);
397 if (!drive
->special
.all
) {
398 struct ide_driver
*drv
;
401 * We reset the drive so we need to issue a SETFEATURES.
402 * Do it _after_ do_special() restored device parameters.
404 if (drive
->current_speed
== 0xff)
405 ide_config_drive_speed(drive
, drive
->desired_speed
);
407 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
408 return execute_drive_cmd(drive
, rq
);
409 else if (blk_pm_request(rq
)) {
410 struct request_pm_state
*pm
= rq
->data
;
412 printk("%s: start_power_step(step: %d)\n",
413 drive
->name
, pm
->pm_step
);
415 startstop
= ide_start_power_step(drive
, rq
);
416 if (startstop
== ide_stopped
&&
417 pm
->pm_step
== IDE_PM_COMPLETED
)
418 ide_complete_pm_rq(drive
, rq
);
420 } else if (!rq
->rq_disk
&& blk_special_request(rq
))
422 * TODO: Once all ULDs have been modified to
423 * check for specific op codes rather than
424 * blindly accepting any special request, the
425 * check for ->rq_disk above may be replaced
426 * by a more suitable mechanism or even
429 return ide_special_rq(drive
, rq
);
431 drv
= *(struct ide_driver
**)rq
->rq_disk
->private_data
;
433 return drv
->do_request(drive
, rq
, rq
->sector
);
435 return do_special(drive
);
437 ide_kill_rq(drive
, rq
);
442 * ide_stall_queue - pause an IDE device
443 * @drive: drive to stall
444 * @timeout: time to stall for (jiffies)
446 * ide_stall_queue() can be used by a drive to give excess bandwidth back
447 * to the port by sleeping for timeout jiffies.
450 void ide_stall_queue (ide_drive_t
*drive
, unsigned long timeout
)
452 if (timeout
> WAIT_WORSTCASE
)
453 timeout
= WAIT_WORSTCASE
;
454 drive
->sleep
= timeout
+ jiffies
;
455 drive
->dev_flags
|= IDE_DFLAG_SLEEPING
;
457 EXPORT_SYMBOL(ide_stall_queue
);
459 static inline int ide_lock_port(ide_hwif_t
*hwif
)
469 static inline void ide_unlock_port(ide_hwif_t
*hwif
)
474 static inline int ide_lock_host(struct ide_host
*host
, ide_hwif_t
*hwif
)
478 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
479 rc
= test_and_set_bit_lock(IDE_HOST_BUSY
, &host
->host_busy
);
482 host
->get_lock(ide_intr
, hwif
);
488 static inline void ide_unlock_host(struct ide_host
*host
)
490 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
491 if (host
->release_lock
)
492 host
->release_lock();
493 clear_bit_unlock(IDE_HOST_BUSY
, &host
->host_busy
);
498 * Issue a new request to a device.
500 void do_ide_request(struct request_queue
*q
)
502 ide_drive_t
*drive
= q
->queuedata
;
503 ide_hwif_t
*hwif
= drive
->hwif
;
504 struct ide_host
*host
= hwif
->host
;
505 struct request
*rq
= NULL
;
506 ide_startstop_t startstop
;
509 * drive is doing pre-flush, ordered write, post-flush sequence. even
510 * though that is 3 requests, it must be seen as a single transaction.
511 * we must not preempt this drive until that is complete
513 if (blk_queue_flushing(q
))
515 * small race where queue could get replugged during
516 * the 3-request flush cycle, just yank the plug since
517 * we want it to finish asap
521 spin_unlock_irq(q
->queue_lock
);
523 if (ide_lock_host(host
, hwif
))
526 spin_lock_irq(&hwif
->lock
);
528 if (!ide_lock_port(hwif
)) {
529 ide_hwif_t
*prev_port
;
531 prev_port
= hwif
->host
->cur_port
;
534 if (drive
->dev_flags
& IDE_DFLAG_SLEEPING
) {
535 if (time_before(drive
->sleep
, jiffies
)) {
536 ide_unlock_port(hwif
);
541 if ((hwif
->host
->host_flags
& IDE_HFLAG_SERIALIZE
) &&
544 * set nIEN for previous port, drives in the
545 * quirk_list may not like intr setups/cleanups
547 if (prev_port
&& prev_port
->cur_dev
->quirk_list
== 0)
548 prev_port
->tp_ops
->set_irq(prev_port
, 0);
550 hwif
->host
->cur_port
= hwif
;
552 hwif
->cur_dev
= drive
;
553 drive
->dev_flags
&= ~(IDE_DFLAG_SLEEPING
| IDE_DFLAG_PARKED
);
555 spin_unlock_irq(&hwif
->lock
);
556 spin_lock_irq(q
->queue_lock
);
558 * we know that the queue isn't empty, but this can happen
559 * if the q->prep_rq_fn() decides to kill a request
561 rq
= elv_next_request(drive
->queue
);
562 spin_unlock_irq(q
->queue_lock
);
563 spin_lock_irq(&hwif
->lock
);
566 ide_unlock_port(hwif
);
571 * Sanity: don't accept a request that isn't a PM request
572 * if we are currently power managed. This is very important as
573 * blk_stop_queue() doesn't prevent the elv_next_request()
574 * above to return us whatever is in the queue. Since we call
575 * ide_do_request() ourselves, we end up taking requests while
576 * the queue is blocked...
578 * We let requests forced at head of queue with ide-preempt
579 * though. I hope that doesn't happen too much, hopefully not
580 * unless the subdriver triggers such a thing in its own PM
583 if ((drive
->dev_flags
& IDE_DFLAG_BLOCKED
) &&
584 blk_pm_request(rq
) == 0 &&
585 (rq
->cmd_flags
& REQ_PREEMPT
) == 0) {
586 /* there should be no pending command at this point */
587 ide_unlock_port(hwif
);
593 spin_unlock_irq(&hwif
->lock
);
594 startstop
= start_request(drive
, rq
);
595 spin_lock_irq(&hwif
->lock
);
597 if (startstop
== ide_stopped
)
602 spin_unlock_irq(&hwif
->lock
);
604 ide_unlock_host(host
);
605 spin_lock_irq(q
->queue_lock
);
609 spin_unlock_irq(&hwif
->lock
);
610 ide_unlock_host(host
);
612 spin_lock_irq(q
->queue_lock
);
614 if (!elv_queue_empty(q
))
618 static void ide_plug_device(ide_drive_t
*drive
)
620 struct request_queue
*q
= drive
->queue
;
623 spin_lock_irqsave(q
->queue_lock
, flags
);
624 if (!elv_queue_empty(q
))
626 spin_unlock_irqrestore(q
->queue_lock
, flags
);
629 static int drive_is_ready(ide_drive_t
*drive
)
631 ide_hwif_t
*hwif
= drive
->hwif
;
634 if (drive
->waiting_for_dma
)
635 return hwif
->dma_ops
->dma_test_irq(drive
);
637 if (hwif
->io_ports
.ctl_addr
&&
638 (hwif
->host_flags
& IDE_HFLAG_BROKEN_ALTSTATUS
) == 0)
639 stat
= hwif
->tp_ops
->read_altstatus(hwif
);
641 /* Note: this may clear a pending IRQ!! */
642 stat
= hwif
->tp_ops
->read_status(hwif
);
645 /* drive busy: definitely not interrupting */
648 /* drive ready: *might* be interrupting */
653 * ide_timer_expiry - handle lack of an IDE interrupt
654 * @data: timer callback magic (hwif)
656 * An IDE command has timed out before the expected drive return
657 * occurred. At this point we attempt to clean up the current
658 * mess. If the current handler includes an expiry handler then
659 * we invoke the expiry handler, and providing it is happy the
660 * work is done. If that fails we apply generic recovery rules
661 * invoking the handler and checking the drive DMA status. We
662 * have an excessively incestuous relationship with the DMA
663 * logic that wants cleaning up.
666 void ide_timer_expiry (unsigned long data
)
668 ide_hwif_t
*hwif
= (ide_hwif_t
*)data
;
669 ide_drive_t
*uninitialized_var(drive
);
670 ide_handler_t
*handler
;
675 spin_lock_irqsave(&hwif
->lock
, flags
);
677 handler
= hwif
->handler
;
679 if (handler
== NULL
|| hwif
->req_gen
!= hwif
->req_gen_timer
) {
681 * Either a marginal timeout occurred
682 * (got the interrupt just as timer expired),
683 * or we were "sleeping" to give other devices a chance.
684 * Either way, we don't really want to complain about anything.
687 ide_expiry_t
*expiry
= hwif
->expiry
;
688 ide_startstop_t startstop
= ide_stopped
;
690 drive
= hwif
->cur_dev
;
693 wait
= expiry(drive
);
694 if (wait
> 0) { /* continue */
696 hwif
->timer
.expires
= jiffies
+ wait
;
697 hwif
->req_gen_timer
= hwif
->req_gen
;
698 add_timer(&hwif
->timer
);
699 spin_unlock_irqrestore(&hwif
->lock
, flags
);
703 hwif
->handler
= NULL
;
705 * We need to simulate a real interrupt when invoking
706 * the handler() function, which means we need to
707 * globally mask the specific IRQ:
709 spin_unlock(&hwif
->lock
);
710 /* disable_irq_nosync ?? */
711 disable_irq(hwif
->irq
);
712 /* local CPU only, as if we were handling an interrupt */
715 startstop
= handler(drive
);
716 } else if (drive_is_ready(drive
)) {
717 if (drive
->waiting_for_dma
)
718 hwif
->dma_ops
->dma_lost_irq(drive
);
720 hwif
->ack_intr(hwif
);
721 printk(KERN_WARNING
"%s: lost interrupt\n",
723 startstop
= handler(drive
);
725 if (drive
->waiting_for_dma
)
726 startstop
= ide_dma_timeout_retry(drive
, wait
);
728 startstop
= ide_error(drive
, "irq timeout",
729 hwif
->tp_ops
->read_status(hwif
));
731 spin_lock_irq(&hwif
->lock
);
732 enable_irq(hwif
->irq
);
733 if (startstop
== ide_stopped
) {
734 ide_unlock_port(hwif
);
738 spin_unlock_irqrestore(&hwif
->lock
, flags
);
741 ide_unlock_host(hwif
->host
);
742 ide_plug_device(drive
);
747 * unexpected_intr - handle an unexpected IDE interrupt
748 * @irq: interrupt line
749 * @hwif: port being processed
751 * There's nothing really useful we can do with an unexpected interrupt,
752 * other than reading the status register (to clear it), and logging it.
753 * There should be no way that an irq can happen before we're ready for it,
754 * so we needn't worry much about losing an "important" interrupt here.
756 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
757 * the drive enters "idle", "standby", or "sleep" mode, so if the status
758 * looks "good", we just ignore the interrupt completely.
760 * This routine assumes __cli() is in effect when called.
762 * If an unexpected interrupt happens on irq15 while we are handling irq14
763 * and if the two interfaces are "serialized" (CMD640), then it looks like
764 * we could screw up by interfering with a new request being set up for
767 * In reality, this is a non-issue. The new command is not sent unless
768 * the drive is ready to accept one, in which case we know the drive is
769 * not trying to interrupt us. And ide_set_handler() is always invoked
770 * before completing the issuance of any new drive command, so we will not
771 * be accidentally invoked as a result of any valid command completion
775 static void unexpected_intr(int irq
, ide_hwif_t
*hwif
)
777 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
779 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
)) {
780 /* Try to not flood the console with msgs */
781 static unsigned long last_msgtime
, count
;
784 if (time_after(jiffies
, last_msgtime
+ HZ
)) {
785 last_msgtime
= jiffies
;
786 printk(KERN_ERR
"%s: unexpected interrupt, "
787 "status=0x%02x, count=%ld\n",
788 hwif
->name
, stat
, count
);
794 * ide_intr - default IDE interrupt handler
795 * @irq: interrupt number
797 * @regs: unused weirdness from the kernel irq layer
799 * This is the default IRQ handler for the IDE layer. You should
800 * not need to override it. If you do be aware it is subtle in
803 * hwif is the interface in the group currently performing
804 * a command. hwif->cur_dev is the drive and hwif->handler is
805 * the IRQ handler to call. As we issue a command the handlers
806 * step through multiple states, reassigning the handler to the
807 * next step in the process. Unlike a smart SCSI controller IDE
808 * expects the main processor to sequence the various transfer
809 * stages. We also manage a poll timer to catch up with most
810 * timeout situations. There are still a few where the handlers
811 * don't ever decide to give up.
813 * The handler eventually returns ide_stopped to indicate the
814 * request completed. At this point we issue the next request
815 * on the port and the process begins again.
818 irqreturn_t
ide_intr (int irq
, void *dev_id
)
820 ide_hwif_t
*hwif
= (ide_hwif_t
*)dev_id
;
821 struct ide_host
*host
= hwif
->host
;
822 ide_drive_t
*uninitialized_var(drive
);
823 ide_handler_t
*handler
;
825 ide_startstop_t startstop
;
826 irqreturn_t irq_ret
= IRQ_NONE
;
829 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
830 if (hwif
!= host
->cur_port
)
834 spin_lock_irqsave(&hwif
->lock
, flags
);
836 if (hwif
->ack_intr
&& hwif
->ack_intr(hwif
) == 0)
839 handler
= hwif
->handler
;
841 if (handler
== NULL
|| hwif
->polling
) {
843 * Not expecting an interrupt from this drive.
844 * That means this could be:
845 * (1) an interrupt from another PCI device
846 * sharing the same PCI INT# as us.
847 * or (2) a drive just entered sleep or standby mode,
848 * and is interrupting to let us know.
849 * or (3) a spurious interrupt of unknown origin.
851 * For PCI, we cannot tell the difference,
852 * so in that case we just ignore it and hope it goes away.
854 if ((host
->irq_flags
& IRQF_SHARED
) == 0) {
856 * Probably not a shared PCI interrupt,
857 * so we can safely try to do something about it:
859 unexpected_intr(irq
, hwif
);
862 * Whack the status register, just in case
863 * we have a leftover pending IRQ.
865 (void)hwif
->tp_ops
->read_status(hwif
);
870 drive
= hwif
->cur_dev
;
872 if (!drive_is_ready(drive
))
874 * This happens regularly when we share a PCI IRQ with
875 * another device. Unfortunately, it can also happen
876 * with some buggy drives that trigger the IRQ before
877 * their status register is up to date. Hopefully we have
878 * enough advance overhead that the latter isn't a problem.
882 hwif
->handler
= NULL
;
884 del_timer(&hwif
->timer
);
885 spin_unlock(&hwif
->lock
);
887 if (hwif
->port_ops
&& hwif
->port_ops
->clear_irq
)
888 hwif
->port_ops
->clear_irq(drive
);
890 if (drive
->dev_flags
& IDE_DFLAG_UNMASK
)
891 local_irq_enable_in_hardirq();
893 /* service this interrupt, may set handler for next interrupt */
894 startstop
= handler(drive
);
896 spin_lock_irq(&hwif
->lock
);
898 * Note that handler() may have set things up for another
899 * interrupt to occur soon, but it cannot happen until
900 * we exit from this routine, because it will be the
901 * same irq as is currently being serviced here, and Linux
902 * won't allow another of the same (on any CPU) until we return.
904 if (startstop
== ide_stopped
) {
905 BUG_ON(hwif
->handler
);
906 ide_unlock_port(hwif
);
909 irq_ret
= IRQ_HANDLED
;
911 spin_unlock_irqrestore(&hwif
->lock
, flags
);
914 ide_unlock_host(hwif
->host
);
915 ide_plug_device(drive
);
920 EXPORT_SYMBOL_GPL(ide_intr
);
922 void ide_pad_transfer(ide_drive_t
*drive
, int write
, int len
)
924 ide_hwif_t
*hwif
= drive
->hwif
;
929 hwif
->tp_ops
->output_data(drive
, NULL
, buf
, min(4, len
));
931 hwif
->tp_ops
->input_data(drive
, NULL
, buf
, min(4, len
));
935 EXPORT_SYMBOL_GPL(ide_pad_transfer
);