2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2001-2002 Klaus Smolin
5 * IBM Storage Technology Division
6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
8 * The big the bad and the ugly.
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/hdreg.h>
20 #include <linux/ide.h>
21 #include <linux/scatterlist.h>
23 #include <asm/uaccess.h>
26 void ide_tf_dump(const char *s
, struct ide_taskfile
*tf
)
29 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
30 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
31 s
, tf
->feature
, tf
->nsect
, tf
->lbal
,
32 tf
->lbam
, tf
->lbah
, tf
->device
, tf
->command
);
33 printk("%s: hob: nsect 0x%02x lbal 0x%02x "
34 "lbam 0x%02x lbah 0x%02x\n",
35 s
, tf
->hob_nsect
, tf
->hob_lbal
,
36 tf
->hob_lbam
, tf
->hob_lbah
);
40 int taskfile_lib_get_identify (ide_drive_t
*drive
, u8
*buf
)
44 memset(&args
, 0, sizeof(ide_task_t
));
46 if (drive
->media
== ide_disk
)
47 args
.tf
.command
= ATA_CMD_ID_ATA
;
49 args
.tf
.command
= ATA_CMD_ID_ATAPI
;
50 args
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
51 args
.data_phase
= TASKFILE_IN
;
52 return ide_raw_taskfile(drive
, &args
, buf
, 1);
55 static ide_startstop_t
task_no_data_intr(ide_drive_t
*);
56 static ide_startstop_t
set_geometry_intr(ide_drive_t
*);
57 static ide_startstop_t
recal_intr(ide_drive_t
*);
58 static ide_startstop_t
set_multmode_intr(ide_drive_t
*);
59 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*, struct request
*);
60 static ide_startstop_t
task_in_intr(ide_drive_t
*);
62 ide_startstop_t
do_rw_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
64 ide_hwif_t
*hwif
= HWIF(drive
);
65 struct ide_taskfile
*tf
= &task
->tf
;
66 ide_handler_t
*handler
= NULL
;
67 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
68 const struct ide_dma_ops
*dma_ops
= hwif
->dma_ops
;
70 if (task
->data_phase
== TASKFILE_MULTI_IN
||
71 task
->data_phase
== TASKFILE_MULTI_OUT
) {
72 if (!drive
->mult_count
) {
73 printk(KERN_ERR
"%s: multimode not set!\n",
79 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
80 task
->tf_flags
|= IDE_TFLAG_FLAGGED_SET_IN_FLAGS
;
82 if ((task
->tf_flags
& IDE_TFLAG_DMA_PIO_FALLBACK
) == 0) {
83 ide_tf_dump(drive
->name
, tf
);
84 tp_ops
->set_irq(hwif
, 1);
85 SELECT_MASK(drive
, 0);
86 tp_ops
->tf_load(drive
, task
);
89 switch (task
->data_phase
) {
90 case TASKFILE_MULTI_OUT
:
92 tp_ops
->exec_command(hwif
, tf
->command
);
93 ndelay(400); /* FIXME */
94 return pre_task_out_intr(drive
, task
->rq
);
95 case TASKFILE_MULTI_IN
:
97 handler
= task_in_intr
;
99 case TASKFILE_NO_DATA
:
101 handler
= task_no_data_intr
;
102 if (task
->tf_flags
& IDE_TFLAG_CUSTOM_HANDLER
) {
103 switch (tf
->command
) {
104 case ATA_CMD_INIT_DEV_PARAMS
:
105 handler
= set_geometry_intr
;
107 case ATA_CMD_RESTORE
:
108 handler
= recal_intr
;
110 case ATA_CMD_SET_MULTI
:
111 handler
= set_multmode_intr
;
115 ide_execute_command(drive
, tf
->command
, handler
,
116 WAIT_WORSTCASE
, NULL
);
119 if (drive
->using_dma
== 0 || dma_ops
->dma_setup(drive
))
121 dma_ops
->dma_exec_cmd(drive
, tf
->command
);
122 dma_ops
->dma_start(drive
);
126 EXPORT_SYMBOL_GPL(do_rw_taskfile
);
129 * set_multmode_intr() is invoked on completion of a ATA_CMD_SET_MULTI cmd.
131 static ide_startstop_t
set_multmode_intr(ide_drive_t
*drive
)
133 ide_hwif_t
*hwif
= drive
->hwif
;
136 local_irq_enable_in_hardirq();
137 stat
= hwif
->tp_ops
->read_status(hwif
);
139 if (OK_STAT(stat
, ATA_DRDY
, BAD_STAT
))
140 drive
->mult_count
= drive
->mult_req
;
142 drive
->mult_req
= drive
->mult_count
= 0;
143 drive
->special
.b
.recalibrate
= 1;
144 (void) ide_dump_status(drive
, "set_multmode", stat
);
150 * set_geometry_intr() is invoked on completion of a ATA_CMD_INIT_DEV_PARAMS cmd.
152 static ide_startstop_t
set_geometry_intr(ide_drive_t
*drive
)
154 ide_hwif_t
*hwif
= drive
->hwif
;
158 local_irq_enable_in_hardirq();
161 stat
= hwif
->tp_ops
->read_status(hwif
);
162 if ((stat
& ATA_BUSY
) == 0 || retries
-- == 0)
167 if (OK_STAT(stat
, ATA_DRDY
, BAD_STAT
))
170 if (stat
& (ATA_ERR
| ATA_DRQ
))
171 return ide_error(drive
, "set_geometry_intr", stat
);
173 ide_set_handler(drive
, &set_geometry_intr
, WAIT_WORSTCASE
, NULL
);
178 * recal_intr() is invoked on completion of a ATA_CMD_RESTORE (recalibrate) cmd.
180 static ide_startstop_t
recal_intr(ide_drive_t
*drive
)
182 ide_hwif_t
*hwif
= drive
->hwif
;
185 local_irq_enable_in_hardirq();
186 stat
= hwif
->tp_ops
->read_status(hwif
);
188 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
))
189 return ide_error(drive
, "recal_intr", stat
);
194 * Handler for commands without a data phase
196 static ide_startstop_t
task_no_data_intr(ide_drive_t
*drive
)
198 ide_hwif_t
*hwif
= drive
->hwif
;
199 ide_task_t
*args
= hwif
->hwgroup
->rq
->special
;
202 local_irq_enable_in_hardirq();
203 stat
= hwif
->tp_ops
->read_status(hwif
);
205 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
))
206 return ide_error(drive
, "task_no_data_intr", stat
);
207 /* calls ide_end_drive_cmd */
210 ide_end_drive_cmd(drive
, stat
, ide_read_error(drive
));
215 static u8
wait_drive_not_busy(ide_drive_t
*drive
)
217 ide_hwif_t
*hwif
= drive
->hwif
;
222 * Last sector was transfered, wait until device is ready. This can
223 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
225 for (retries
= 0; retries
< 1000; retries
++) {
226 stat
= hwif
->tp_ops
->read_status(hwif
);
235 printk(KERN_ERR
"%s: drive still BUSY!\n", drive
->name
);
240 static void ide_pio_sector(ide_drive_t
*drive
, struct request
*rq
,
243 ide_hwif_t
*hwif
= drive
->hwif
;
244 struct scatterlist
*sg
= hwif
->sg_table
;
245 struct scatterlist
*cursg
= hwif
->cursg
;
247 #ifdef CONFIG_HIGHMEM
259 page
= sg_page(cursg
);
260 offset
= cursg
->offset
+ hwif
->cursg_ofs
* SECTOR_SIZE
;
262 /* get the current page and offset */
263 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
266 #ifdef CONFIG_HIGHMEM
267 local_irq_save(flags
);
269 buf
= kmap_atomic(page
, KM_BIO_SRC_IRQ
) + offset
;
274 if ((hwif
->cursg_ofs
* SECTOR_SIZE
) == cursg
->length
) {
275 hwif
->cursg
= sg_next(hwif
->cursg
);
279 /* do the actual data transfer */
281 hwif
->tp_ops
->output_data(drive
, rq
, buf
, SECTOR_SIZE
);
283 hwif
->tp_ops
->input_data(drive
, rq
, buf
, SECTOR_SIZE
);
285 kunmap_atomic(buf
, KM_BIO_SRC_IRQ
);
286 #ifdef CONFIG_HIGHMEM
287 local_irq_restore(flags
);
291 static void ide_pio_multi(ide_drive_t
*drive
, struct request
*rq
,
296 nsect
= min_t(unsigned int, drive
->hwif
->nleft
, drive
->mult_count
);
298 ide_pio_sector(drive
, rq
, write
);
301 static void ide_pio_datablock(ide_drive_t
*drive
, struct request
*rq
,
304 u8 saved_io_32bit
= drive
->io_32bit
;
306 if (rq
->bio
) /* fs request */
309 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
310 ide_task_t
*task
= rq
->special
;
312 if (task
->tf_flags
& IDE_TFLAG_IO_16BIT
)
316 touch_softlockup_watchdog();
318 switch (drive
->hwif
->data_phase
) {
319 case TASKFILE_MULTI_IN
:
320 case TASKFILE_MULTI_OUT
:
321 ide_pio_multi(drive
, rq
, write
);
324 ide_pio_sector(drive
, rq
, write
);
328 drive
->io_32bit
= saved_io_32bit
;
331 static ide_startstop_t
task_error(ide_drive_t
*drive
, struct request
*rq
,
332 const char *s
, u8 stat
)
335 ide_hwif_t
*hwif
= drive
->hwif
;
336 int sectors
= hwif
->nsect
- hwif
->nleft
;
338 switch (hwif
->data_phase
) {
346 case TASKFILE_MULTI_IN
:
350 case TASKFILE_MULTI_OUT
:
351 sectors
-= drive
->mult_count
;
359 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
360 drv
->end_request(drive
, 1, sectors
);
363 return ide_error(drive
, s
, stat
);
366 void task_end_request(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
368 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
369 u8 err
= ide_read_error(drive
);
371 ide_end_drive_cmd(drive
, stat
, err
);
378 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;;
379 drv
->end_request(drive
, 1, rq
->nr_sectors
);
381 ide_end_request(drive
, 1, rq
->nr_sectors
);
385 * We got an interrupt on a task_in case, but no errors and no DRQ.
387 * It might be a spurious irq (shared irq), but it might be a
388 * command that had no output.
390 static ide_startstop_t
task_in_unexpected(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
392 /* Command all done? */
393 if (OK_STAT(stat
, ATA_DRDY
, ATA_BUSY
)) {
394 task_end_request(drive
, rq
, stat
);
398 /* Assume it was a spurious irq */
399 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
404 * Handler for command with PIO data-in phase (Read/Read Multiple).
406 static ide_startstop_t
task_in_intr(ide_drive_t
*drive
)
408 ide_hwif_t
*hwif
= drive
->hwif
;
409 struct request
*rq
= hwif
->hwgroup
->rq
;
410 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
414 return task_error(drive
, rq
, __func__
, stat
);
416 /* Didn't want any data? Odd. */
417 if ((stat
& ATA_DRQ
) == 0)
418 return task_in_unexpected(drive
, rq
, stat
);
420 ide_pio_datablock(drive
, rq
, 0);
422 /* Are we done? Check status and finish transfer. */
424 stat
= wait_drive_not_busy(drive
);
425 if (!OK_STAT(stat
, 0, BAD_STAT
))
426 return task_error(drive
, rq
, __func__
, stat
);
427 task_end_request(drive
, rq
, stat
);
431 /* Still data left to transfer. */
432 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
438 * Handler for command with PIO data-out phase (Write/Write Multiple).
440 static ide_startstop_t
task_out_intr (ide_drive_t
*drive
)
442 ide_hwif_t
*hwif
= drive
->hwif
;
443 struct request
*rq
= HWGROUP(drive
)->rq
;
444 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
446 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
447 return task_error(drive
, rq
, __func__
, stat
);
449 /* Deal with unexpected ATA data phase. */
450 if (((stat
& ATA_DRQ
) == 0) ^ !hwif
->nleft
)
451 return task_error(drive
, rq
, __func__
, stat
);
454 task_end_request(drive
, rq
, stat
);
458 /* Still data left to transfer. */
459 ide_pio_datablock(drive
, rq
, 1);
460 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
465 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*drive
, struct request
*rq
)
467 ide_startstop_t startstop
;
469 if (ide_wait_stat(&startstop
, drive
, ATA_DRQ
,
470 drive
->bad_wstat
, WAIT_DRQ
)) {
471 printk(KERN_ERR
"%s: no DRQ after issuing %sWRITE%s\n",
473 drive
->hwif
->data_phase
? "MULT" : "",
474 drive
->addressing
? "_EXT" : "");
481 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
482 ide_pio_datablock(drive
, rq
, 1);
487 int ide_raw_taskfile(ide_drive_t
*drive
, ide_task_t
*task
, u8
*buf
, u16 nsect
)
492 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
493 rq
->cmd_type
= REQ_TYPE_ATA_TASKFILE
;
497 * (ks) We transfer currently only whole sectors.
498 * This is suffient for now. But, it would be great,
499 * if we would find a solution to transfer any size.
500 * To support special commands like READ LONG.
502 rq
->hard_nr_sectors
= rq
->nr_sectors
= nsect
;
503 rq
->hard_cur_sectors
= rq
->current_nr_sectors
= nsect
;
505 if (task
->tf_flags
& IDE_TFLAG_WRITE
)
506 rq
->cmd_flags
|= REQ_RW
;
511 error
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
517 EXPORT_SYMBOL(ide_raw_taskfile
);
519 int ide_no_data_taskfile(ide_drive_t
*drive
, ide_task_t
*task
)
521 task
->data_phase
= TASKFILE_NO_DATA
;
523 return ide_raw_taskfile(drive
, task
, NULL
, 0);
525 EXPORT_SYMBOL_GPL(ide_no_data_taskfile
);
527 #ifdef CONFIG_IDE_TASK_IOCTL
528 int ide_taskfile_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
530 ide_task_request_t
*req_task
;
536 int tasksize
= sizeof(struct ide_task_request_s
);
537 unsigned int taskin
= 0;
538 unsigned int taskout
= 0;
540 char __user
*buf
= (char __user
*)arg
;
542 // printk("IDE Taskfile ...\n");
544 req_task
= kzalloc(tasksize
, GFP_KERNEL
);
545 if (req_task
== NULL
) return -ENOMEM
;
546 if (copy_from_user(req_task
, buf
, tasksize
)) {
551 taskout
= req_task
->out_size
;
552 taskin
= req_task
->in_size
;
554 if (taskin
> 65536 || taskout
> 65536) {
560 int outtotal
= tasksize
;
561 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
562 if (outbuf
== NULL
) {
566 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
573 int intotal
= tasksize
+ taskout
;
574 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
579 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
585 memset(&args
, 0, sizeof(ide_task_t
));
587 memcpy(&args
.tf_array
[0], req_task
->hob_ports
, HDIO_DRIVE_HOB_HDR_SIZE
- 2);
588 memcpy(&args
.tf_array
[6], req_task
->io_ports
, HDIO_DRIVE_TASK_HDR_SIZE
);
590 args
.data_phase
= req_task
->data_phase
;
592 args
.tf_flags
= IDE_TFLAG_IO_16BIT
| IDE_TFLAG_DEVICE
|
594 if (drive
->addressing
== 1)
595 args
.tf_flags
|= (IDE_TFLAG_LBA48
| IDE_TFLAG_IN_HOB
);
597 if (req_task
->out_flags
.all
) {
598 args
.tf_flags
|= IDE_TFLAG_FLAGGED
;
600 if (req_task
->out_flags
.b
.data
)
601 args
.tf_flags
|= IDE_TFLAG_OUT_DATA
;
603 if (req_task
->out_flags
.b
.nsector_hob
)
604 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_NSECT
;
605 if (req_task
->out_flags
.b
.sector_hob
)
606 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAL
;
607 if (req_task
->out_flags
.b
.lcyl_hob
)
608 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAM
;
609 if (req_task
->out_flags
.b
.hcyl_hob
)
610 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAH
;
612 if (req_task
->out_flags
.b
.error_feature
)
613 args
.tf_flags
|= IDE_TFLAG_OUT_FEATURE
;
614 if (req_task
->out_flags
.b
.nsector
)
615 args
.tf_flags
|= IDE_TFLAG_OUT_NSECT
;
616 if (req_task
->out_flags
.b
.sector
)
617 args
.tf_flags
|= IDE_TFLAG_OUT_LBAL
;
618 if (req_task
->out_flags
.b
.lcyl
)
619 args
.tf_flags
|= IDE_TFLAG_OUT_LBAM
;
620 if (req_task
->out_flags
.b
.hcyl
)
621 args
.tf_flags
|= IDE_TFLAG_OUT_LBAH
;
623 args
.tf_flags
|= IDE_TFLAG_OUT_TF
;
624 if (args
.tf_flags
& IDE_TFLAG_LBA48
)
625 args
.tf_flags
|= IDE_TFLAG_OUT_HOB
;
628 if (req_task
->in_flags
.b
.data
)
629 args
.tf_flags
|= IDE_TFLAG_IN_DATA
;
631 switch(req_task
->data_phase
) {
632 case TASKFILE_MULTI_OUT
:
633 if (!drive
->mult_count
) {
634 /* (hs): give up if multcount is not set */
635 printk(KERN_ERR
"%s: %s Multimode Write " \
636 "multcount is not set\n",
637 drive
->name
, __func__
);
644 case TASKFILE_OUT_DMAQ
:
645 case TASKFILE_OUT_DMA
:
646 nsect
= taskout
/ SECTOR_SIZE
;
649 case TASKFILE_MULTI_IN
:
650 if (!drive
->mult_count
) {
651 /* (hs): give up if multcount is not set */
652 printk(KERN_ERR
"%s: %s Multimode Read failure " \
653 "multcount is not set\n",
654 drive
->name
, __func__
);
661 case TASKFILE_IN_DMAQ
:
662 case TASKFILE_IN_DMA
:
663 nsect
= taskin
/ SECTOR_SIZE
;
666 case TASKFILE_NO_DATA
:
673 if (req_task
->req_cmd
== IDE_DRIVE_TASK_NO_DATA
)
676 nsect
= (args
.tf
.hob_nsect
<< 8) | args
.tf
.nsect
;
679 printk(KERN_ERR
"%s: in/out command without data\n",
686 if (req_task
->req_cmd
== IDE_DRIVE_TASK_RAW_WRITE
)
687 args
.tf_flags
|= IDE_TFLAG_WRITE
;
689 err
= ide_raw_taskfile(drive
, &args
, data_buf
, nsect
);
691 memcpy(req_task
->hob_ports
, &args
.tf_array
[0], HDIO_DRIVE_HOB_HDR_SIZE
- 2);
692 memcpy(req_task
->io_ports
, &args
.tf_array
[6], HDIO_DRIVE_TASK_HDR_SIZE
);
694 if ((args
.tf_flags
& IDE_TFLAG_FLAGGED_SET_IN_FLAGS
) &&
695 req_task
->in_flags
.all
== 0) {
696 req_task
->in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
697 if (drive
->addressing
== 1)
698 req_task
->in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
701 if (copy_to_user(buf
, req_task
, tasksize
)) {
706 int outtotal
= tasksize
;
707 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
713 int intotal
= tasksize
+ taskout
;
714 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {
724 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);