2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2001-2002 Klaus Smolin
5 * IBM Storage Technology Division
6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
8 * The big the bad and the ugly.
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/hdreg.h>
20 #include <linux/ide.h>
21 #include <linux/scatterlist.h>
23 #include <asm/uaccess.h>
26 void ide_tf_dump(const char *s
, struct ide_taskfile
*tf
)
29 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
30 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
31 s
, tf
->feature
, tf
->nsect
, tf
->lbal
,
32 tf
->lbam
, tf
->lbah
, tf
->device
, tf
->command
);
33 printk("%s: hob: nsect 0x%02x lbal 0x%02x "
34 "lbam 0x%02x lbah 0x%02x\n",
35 s
, tf
->hob_nsect
, tf
->hob_lbal
,
36 tf
->hob_lbam
, tf
->hob_lbah
);
40 int taskfile_lib_get_identify (ide_drive_t
*drive
, u8
*buf
)
44 memset(&args
, 0, sizeof(ide_task_t
));
46 if (drive
->media
== ide_disk
)
47 args
.tf
.command
= ATA_CMD_ID_ATA
;
49 args
.tf
.command
= ATA_CMD_ID_ATAPI
;
50 args
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
51 args
.data_phase
= TASKFILE_IN
;
52 return ide_raw_taskfile(drive
, &args
, buf
, 1);
55 static ide_startstop_t
task_no_data_intr(ide_drive_t
*);
56 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*, struct request
*);
57 static ide_startstop_t
task_in_intr(ide_drive_t
*);
59 ide_startstop_t
do_rw_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
61 ide_hwif_t
*hwif
= HWIF(drive
);
62 struct ide_taskfile
*tf
= &task
->tf
;
63 ide_handler_t
*handler
= NULL
;
64 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
65 const struct ide_dma_ops
*dma_ops
= hwif
->dma_ops
;
67 if (task
->data_phase
== TASKFILE_MULTI_IN
||
68 task
->data_phase
== TASKFILE_MULTI_OUT
) {
69 if (!drive
->mult_count
) {
70 printk(KERN_ERR
"%s: multimode not set!\n",
76 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
77 task
->tf_flags
|= IDE_TFLAG_FLAGGED_SET_IN_FLAGS
;
79 memcpy(&hwif
->task
, task
, sizeof(*task
));
81 if ((task
->tf_flags
& IDE_TFLAG_DMA_PIO_FALLBACK
) == 0) {
82 ide_tf_dump(drive
->name
, tf
);
83 tp_ops
->set_irq(hwif
, 1);
84 SELECT_MASK(drive
, 0);
85 tp_ops
->tf_load(drive
, task
);
88 switch (task
->data_phase
) {
89 case TASKFILE_MULTI_OUT
:
91 tp_ops
->exec_command(hwif
, tf
->command
);
92 ndelay(400); /* FIXME */
93 return pre_task_out_intr(drive
, task
->rq
);
94 case TASKFILE_MULTI_IN
:
96 handler
= task_in_intr
;
98 case TASKFILE_NO_DATA
:
100 handler
= task_no_data_intr
;
101 ide_execute_command(drive
, tf
->command
, handler
,
102 WAIT_WORSTCASE
, NULL
);
105 if ((drive
->dev_flags
& IDE_DFLAG_USING_DMA
) == 0 ||
106 dma_ops
->dma_setup(drive
))
108 dma_ops
->dma_exec_cmd(drive
, tf
->command
);
109 dma_ops
->dma_start(drive
);
113 EXPORT_SYMBOL_GPL(do_rw_taskfile
);
116 * Handler for commands without a data phase
118 static ide_startstop_t
task_no_data_intr(ide_drive_t
*drive
)
120 ide_hwif_t
*hwif
= drive
->hwif
;
121 ide_task_t
*task
= &hwif
->task
;
122 struct ide_taskfile
*tf
= &task
->tf
;
123 int custom
= (task
->tf_flags
& IDE_TFLAG_CUSTOM_HANDLER
) ? 1 : 0;
124 int retries
= (custom
&& tf
->command
== ATA_CMD_INIT_DEV_PARAMS
) ? 5 : 1;
127 local_irq_enable_in_hardirq();
130 stat
= hwif
->tp_ops
->read_status(hwif
);
131 if ((stat
& ATA_BUSY
) == 0 || retries
-- == 0)
136 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
)) {
137 if (custom
&& tf
->command
== ATA_CMD_SET_MULTI
) {
138 drive
->mult_req
= drive
->mult_count
= 0;
139 drive
->special
.b
.recalibrate
= 1;
140 (void)ide_dump_status(drive
, __func__
, stat
);
142 } else if (custom
&& tf
->command
== ATA_CMD_INIT_DEV_PARAMS
) {
143 if ((stat
& (ATA_ERR
| ATA_DRQ
)) == 0) {
144 ide_set_handler(drive
, &task_no_data_intr
,
145 WAIT_WORSTCASE
, NULL
);
149 return ide_error(drive
, "task_no_data_intr", stat
);
150 /* calls ide_end_drive_cmd */
154 ide_end_drive_cmd(drive
, stat
, ide_read_error(drive
));
155 else if (tf
->command
== ATA_CMD_IDLEIMMEDIATE
) {
156 hwif
->tp_ops
->tf_read(drive
, task
);
157 if (tf
->lbal
!= 0xc4) {
158 printk(KERN_ERR
"%s: head unload failed!\n",
160 ide_tf_dump(drive
->name
, tf
);
162 drive
->dev_flags
|= IDE_DFLAG_PARKED
;
163 ide_end_drive_cmd(drive
, stat
, ide_read_error(drive
));
164 } else if (tf
->command
== ATA_CMD_SET_MULTI
)
165 drive
->mult_count
= drive
->mult_req
;
170 static u8
wait_drive_not_busy(ide_drive_t
*drive
)
172 ide_hwif_t
*hwif
= drive
->hwif
;
177 * Last sector was transfered, wait until device is ready. This can
178 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
180 for (retries
= 0; retries
< 1000; retries
++) {
181 stat
= hwif
->tp_ops
->read_status(hwif
);
190 printk(KERN_ERR
"%s: drive still BUSY!\n", drive
->name
);
195 static void ide_pio_sector(ide_drive_t
*drive
, struct request
*rq
,
198 ide_hwif_t
*hwif
= drive
->hwif
;
199 struct scatterlist
*sg
= hwif
->sg_table
;
200 struct scatterlist
*cursg
= hwif
->cursg
;
202 #ifdef CONFIG_HIGHMEM
214 page
= sg_page(cursg
);
215 offset
= cursg
->offset
+ hwif
->cursg_ofs
* SECTOR_SIZE
;
217 /* get the current page and offset */
218 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
221 #ifdef CONFIG_HIGHMEM
222 local_irq_save(flags
);
224 buf
= kmap_atomic(page
, KM_BIO_SRC_IRQ
) + offset
;
229 if ((hwif
->cursg_ofs
* SECTOR_SIZE
) == cursg
->length
) {
230 hwif
->cursg
= sg_next(hwif
->cursg
);
234 /* do the actual data transfer */
236 hwif
->tp_ops
->output_data(drive
, rq
, buf
, SECTOR_SIZE
);
238 hwif
->tp_ops
->input_data(drive
, rq
, buf
, SECTOR_SIZE
);
240 kunmap_atomic(buf
, KM_BIO_SRC_IRQ
);
241 #ifdef CONFIG_HIGHMEM
242 local_irq_restore(flags
);
246 static void ide_pio_multi(ide_drive_t
*drive
, struct request
*rq
,
251 nsect
= min_t(unsigned int, drive
->hwif
->nleft
, drive
->mult_count
);
253 ide_pio_sector(drive
, rq
, write
);
256 static void ide_pio_datablock(ide_drive_t
*drive
, struct request
*rq
,
259 u8 saved_io_32bit
= drive
->io_32bit
;
261 if (rq
->bio
) /* fs request */
264 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
265 ide_task_t
*task
= rq
->special
;
267 if (task
->tf_flags
& IDE_TFLAG_IO_16BIT
)
271 touch_softlockup_watchdog();
273 switch (drive
->hwif
->data_phase
) {
274 case TASKFILE_MULTI_IN
:
275 case TASKFILE_MULTI_OUT
:
276 ide_pio_multi(drive
, rq
, write
);
279 ide_pio_sector(drive
, rq
, write
);
283 drive
->io_32bit
= saved_io_32bit
;
286 static ide_startstop_t
task_error(ide_drive_t
*drive
, struct request
*rq
,
287 const char *s
, u8 stat
)
290 ide_hwif_t
*hwif
= drive
->hwif
;
291 int sectors
= hwif
->nsect
- hwif
->nleft
;
293 switch (hwif
->data_phase
) {
301 case TASKFILE_MULTI_IN
:
305 case TASKFILE_MULTI_OUT
:
306 sectors
-= drive
->mult_count
;
314 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
315 drv
->end_request(drive
, 1, sectors
);
318 return ide_error(drive
, s
, stat
);
321 void task_end_request(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
323 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
324 u8 err
= ide_read_error(drive
);
326 ide_end_drive_cmd(drive
, stat
, err
);
333 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;;
334 drv
->end_request(drive
, 1, rq
->nr_sectors
);
336 ide_end_request(drive
, 1, rq
->nr_sectors
);
340 * We got an interrupt on a task_in case, but no errors and no DRQ.
342 * It might be a spurious irq (shared irq), but it might be a
343 * command that had no output.
345 static ide_startstop_t
task_in_unexpected(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
347 /* Command all done? */
348 if (OK_STAT(stat
, ATA_DRDY
, ATA_BUSY
)) {
349 task_end_request(drive
, rq
, stat
);
353 /* Assume it was a spurious irq */
354 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
359 * Handler for command with PIO data-in phase (Read/Read Multiple).
361 static ide_startstop_t
task_in_intr(ide_drive_t
*drive
)
363 ide_hwif_t
*hwif
= drive
->hwif
;
364 struct request
*rq
= hwif
->hwgroup
->rq
;
365 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
369 return task_error(drive
, rq
, __func__
, stat
);
371 /* Didn't want any data? Odd. */
372 if ((stat
& ATA_DRQ
) == 0)
373 return task_in_unexpected(drive
, rq
, stat
);
375 ide_pio_datablock(drive
, rq
, 0);
377 /* Are we done? Check status and finish transfer. */
379 stat
= wait_drive_not_busy(drive
);
380 if (!OK_STAT(stat
, 0, BAD_STAT
))
381 return task_error(drive
, rq
, __func__
, stat
);
382 task_end_request(drive
, rq
, stat
);
386 /* Still data left to transfer. */
387 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
393 * Handler for command with PIO data-out phase (Write/Write Multiple).
395 static ide_startstop_t
task_out_intr (ide_drive_t
*drive
)
397 ide_hwif_t
*hwif
= drive
->hwif
;
398 struct request
*rq
= HWGROUP(drive
)->rq
;
399 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
401 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
402 return task_error(drive
, rq
, __func__
, stat
);
404 /* Deal with unexpected ATA data phase. */
405 if (((stat
& ATA_DRQ
) == 0) ^ !hwif
->nleft
)
406 return task_error(drive
, rq
, __func__
, stat
);
409 task_end_request(drive
, rq
, stat
);
413 /* Still data left to transfer. */
414 ide_pio_datablock(drive
, rq
, 1);
415 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
420 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*drive
, struct request
*rq
)
422 ide_startstop_t startstop
;
424 if (ide_wait_stat(&startstop
, drive
, ATA_DRQ
,
425 drive
->bad_wstat
, WAIT_DRQ
)) {
426 printk(KERN_ERR
"%s: no DRQ after issuing %sWRITE%s\n",
427 drive
->name
, drive
->hwif
->data_phase
? "MULT" : "",
428 (drive
->dev_flags
& IDE_DFLAG_LBA48
) ? "_EXT" : "");
432 if ((drive
->dev_flags
& IDE_DFLAG_UNMASK
) == 0)
435 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
436 ide_pio_datablock(drive
, rq
, 1);
441 int ide_raw_taskfile(ide_drive_t
*drive
, ide_task_t
*task
, u8
*buf
, u16 nsect
)
446 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
447 rq
->cmd_type
= REQ_TYPE_ATA_TASKFILE
;
451 * (ks) We transfer currently only whole sectors.
452 * This is suffient for now. But, it would be great,
453 * if we would find a solution to transfer any size.
454 * To support special commands like READ LONG.
456 rq
->hard_nr_sectors
= rq
->nr_sectors
= nsect
;
457 rq
->hard_cur_sectors
= rq
->current_nr_sectors
= nsect
;
459 if (task
->tf_flags
& IDE_TFLAG_WRITE
)
460 rq
->cmd_flags
|= REQ_RW
;
465 error
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
471 EXPORT_SYMBOL(ide_raw_taskfile
);
473 int ide_no_data_taskfile(ide_drive_t
*drive
, ide_task_t
*task
)
475 task
->data_phase
= TASKFILE_NO_DATA
;
477 return ide_raw_taskfile(drive
, task
, NULL
, 0);
479 EXPORT_SYMBOL_GPL(ide_no_data_taskfile
);
481 #ifdef CONFIG_IDE_TASK_IOCTL
482 int ide_taskfile_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
484 ide_task_request_t
*req_task
;
490 int tasksize
= sizeof(struct ide_task_request_s
);
491 unsigned int taskin
= 0;
492 unsigned int taskout
= 0;
494 char __user
*buf
= (char __user
*)arg
;
496 // printk("IDE Taskfile ...\n");
498 req_task
= kzalloc(tasksize
, GFP_KERNEL
);
499 if (req_task
== NULL
) return -ENOMEM
;
500 if (copy_from_user(req_task
, buf
, tasksize
)) {
505 taskout
= req_task
->out_size
;
506 taskin
= req_task
->in_size
;
508 if (taskin
> 65536 || taskout
> 65536) {
514 int outtotal
= tasksize
;
515 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
516 if (outbuf
== NULL
) {
520 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
527 int intotal
= tasksize
+ taskout
;
528 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
533 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
539 memset(&args
, 0, sizeof(ide_task_t
));
541 memcpy(&args
.tf_array
[0], req_task
->hob_ports
, HDIO_DRIVE_HOB_HDR_SIZE
- 2);
542 memcpy(&args
.tf_array
[6], req_task
->io_ports
, HDIO_DRIVE_TASK_HDR_SIZE
);
544 args
.data_phase
= req_task
->data_phase
;
546 args
.tf_flags
= IDE_TFLAG_IO_16BIT
| IDE_TFLAG_DEVICE
|
548 if (drive
->dev_flags
& IDE_DFLAG_LBA48
)
549 args
.tf_flags
|= (IDE_TFLAG_LBA48
| IDE_TFLAG_IN_HOB
);
551 if (req_task
->out_flags
.all
) {
552 args
.tf_flags
|= IDE_TFLAG_FLAGGED
;
554 if (req_task
->out_flags
.b
.data
)
555 args
.tf_flags
|= IDE_TFLAG_OUT_DATA
;
557 if (req_task
->out_flags
.b
.nsector_hob
)
558 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_NSECT
;
559 if (req_task
->out_flags
.b
.sector_hob
)
560 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAL
;
561 if (req_task
->out_flags
.b
.lcyl_hob
)
562 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAM
;
563 if (req_task
->out_flags
.b
.hcyl_hob
)
564 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAH
;
566 if (req_task
->out_flags
.b
.error_feature
)
567 args
.tf_flags
|= IDE_TFLAG_OUT_FEATURE
;
568 if (req_task
->out_flags
.b
.nsector
)
569 args
.tf_flags
|= IDE_TFLAG_OUT_NSECT
;
570 if (req_task
->out_flags
.b
.sector
)
571 args
.tf_flags
|= IDE_TFLAG_OUT_LBAL
;
572 if (req_task
->out_flags
.b
.lcyl
)
573 args
.tf_flags
|= IDE_TFLAG_OUT_LBAM
;
574 if (req_task
->out_flags
.b
.hcyl
)
575 args
.tf_flags
|= IDE_TFLAG_OUT_LBAH
;
577 args
.tf_flags
|= IDE_TFLAG_OUT_TF
;
578 if (args
.tf_flags
& IDE_TFLAG_LBA48
)
579 args
.tf_flags
|= IDE_TFLAG_OUT_HOB
;
582 if (req_task
->in_flags
.b
.data
)
583 args
.tf_flags
|= IDE_TFLAG_IN_DATA
;
585 switch(req_task
->data_phase
) {
586 case TASKFILE_MULTI_OUT
:
587 if (!drive
->mult_count
) {
588 /* (hs): give up if multcount is not set */
589 printk(KERN_ERR
"%s: %s Multimode Write " \
590 "multcount is not set\n",
591 drive
->name
, __func__
);
598 case TASKFILE_OUT_DMAQ
:
599 case TASKFILE_OUT_DMA
:
600 nsect
= taskout
/ SECTOR_SIZE
;
603 case TASKFILE_MULTI_IN
:
604 if (!drive
->mult_count
) {
605 /* (hs): give up if multcount is not set */
606 printk(KERN_ERR
"%s: %s Multimode Read failure " \
607 "multcount is not set\n",
608 drive
->name
, __func__
);
615 case TASKFILE_IN_DMAQ
:
616 case TASKFILE_IN_DMA
:
617 nsect
= taskin
/ SECTOR_SIZE
;
620 case TASKFILE_NO_DATA
:
627 if (req_task
->req_cmd
== IDE_DRIVE_TASK_NO_DATA
)
630 nsect
= (args
.tf
.hob_nsect
<< 8) | args
.tf
.nsect
;
633 printk(KERN_ERR
"%s: in/out command without data\n",
640 if (req_task
->req_cmd
== IDE_DRIVE_TASK_RAW_WRITE
)
641 args
.tf_flags
|= IDE_TFLAG_WRITE
;
643 err
= ide_raw_taskfile(drive
, &args
, data_buf
, nsect
);
645 memcpy(req_task
->hob_ports
, &args
.tf_array
[0], HDIO_DRIVE_HOB_HDR_SIZE
- 2);
646 memcpy(req_task
->io_ports
, &args
.tf_array
[6], HDIO_DRIVE_TASK_HDR_SIZE
);
648 if ((args
.tf_flags
& IDE_TFLAG_FLAGGED_SET_IN_FLAGS
) &&
649 req_task
->in_flags
.all
== 0) {
650 req_task
->in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
651 if (drive
->dev_flags
& IDE_DFLAG_LBA48
)
652 req_task
->in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
655 if (copy_to_user(buf
, req_task
, tasksize
)) {
660 int outtotal
= tasksize
;
661 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
667 int intotal
= tasksize
+ taskout
;
668 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {
678 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);