2 * linux/drivers/ide/ide-taskfile.c Version 0.38 March 05, 2003
4 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
5 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2001-2002 Klaus Smolin
7 * IBM Storage Technology Division
8 * Copyright (C) 2003-2004 Bartlomiej Zolnierkiewicz
10 * The big the bad and the ugly.
12 * Problems to be fixed because of BH interface or the lack therefore.
14 * Fill me in stupid !!!
17 * General refers to the Controller and Driver "pair".
19 * Under the context of Linux it generally refers to an interrupt handler.
20 * However, it correctly describes the 'HOST'
22 * The amount of data needed to be transfered as predefined in the
23 * setup of the device.
25 * The 'DATA BLOCK' associated to the 'DATA HANDLER', and can be as
26 * small as a single sector or as large as the entire command block
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/kernel.h>
35 #include <linux/timer.h>
37 #include <linux/sched.h>
38 #include <linux/interrupt.h>
39 #include <linux/major.h>
40 #include <linux/errno.h>
41 #include <linux/genhd.h>
42 #include <linux/blkpg.h>
43 #include <linux/slab.h>
44 #include <linux/pci.h>
45 #include <linux/delay.h>
46 #include <linux/hdreg.h>
47 #include <linux/ide.h>
48 #include <linux/bitops.h>
50 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
55 static void ata_bswap_data (void *buffer
, int wcount
)
60 *p
= *p
<< 8 | *p
>> 8; p
++;
61 *p
= *p
<< 8 | *p
>> 8; p
++;
65 static void taskfile_input_data(ide_drive_t
*drive
, void *buffer
, u32 wcount
)
67 HWIF(drive
)->ata_input_data(drive
, buffer
, wcount
);
69 ata_bswap_data(buffer
, wcount
);
72 static void taskfile_output_data(ide_drive_t
*drive
, void *buffer
, u32 wcount
)
75 ata_bswap_data(buffer
, wcount
);
76 HWIF(drive
)->ata_output_data(drive
, buffer
, wcount
);
77 ata_bswap_data(buffer
, wcount
);
79 HWIF(drive
)->ata_output_data(drive
, buffer
, wcount
);
83 int taskfile_lib_get_identify (ide_drive_t
*drive
, u8
*buf
)
86 memset(&args
, 0, sizeof(ide_task_t
));
87 args
.tfRegister
[IDE_NSECTOR_OFFSET
] = 0x01;
88 if (drive
->media
== ide_disk
)
89 args
.tfRegister
[IDE_COMMAND_OFFSET
] = WIN_IDENTIFY
;
91 args
.tfRegister
[IDE_COMMAND_OFFSET
] = WIN_PIDENTIFY
;
92 args
.command_type
= IDE_DRIVE_TASK_IN
;
93 args
.data_phase
= TASKFILE_IN
;
94 args
.handler
= &task_in_intr
;
95 return ide_raw_taskfile(drive
, &args
, buf
);
98 ide_startstop_t
do_rw_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
100 ide_hwif_t
*hwif
= HWIF(drive
);
101 task_struct_t
*taskfile
= (task_struct_t
*) task
->tfRegister
;
102 hob_struct_t
*hobfile
= (hob_struct_t
*) task
->hobRegister
;
103 u8 HIHI
= (drive
->addressing
== 1) ? 0xE0 : 0xEF;
105 /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
106 if (IDE_CONTROL_REG
) {
108 hwif
->OUTB(drive
->ctl
, IDE_CONTROL_REG
);
110 SELECT_MASK(drive
, 0);
112 if (drive
->addressing
== 1) {
113 hwif
->OUTB(hobfile
->feature
, IDE_FEATURE_REG
);
114 hwif
->OUTB(hobfile
->sector_count
, IDE_NSECTOR_REG
);
115 hwif
->OUTB(hobfile
->sector_number
, IDE_SECTOR_REG
);
116 hwif
->OUTB(hobfile
->low_cylinder
, IDE_LCYL_REG
);
117 hwif
->OUTB(hobfile
->high_cylinder
, IDE_HCYL_REG
);
120 hwif
->OUTB(taskfile
->feature
, IDE_FEATURE_REG
);
121 hwif
->OUTB(taskfile
->sector_count
, IDE_NSECTOR_REG
);
122 hwif
->OUTB(taskfile
->sector_number
, IDE_SECTOR_REG
);
123 hwif
->OUTB(taskfile
->low_cylinder
, IDE_LCYL_REG
);
124 hwif
->OUTB(taskfile
->high_cylinder
, IDE_HCYL_REG
);
126 hwif
->OUTB((taskfile
->device_head
& HIHI
) | drive
->select
.all
, IDE_SELECT_REG
);
128 if (task
->handler
!= NULL
) {
129 if (task
->prehandler
!= NULL
) {
130 hwif
->OUTBSYNC(drive
, taskfile
->command
, IDE_COMMAND_REG
);
131 ndelay(400); /* FIXME */
132 return task
->prehandler(drive
, task
->rq
);
134 ide_execute_command(drive
, taskfile
->command
, task
->handler
, WAIT_WORSTCASE
, NULL
);
138 if (!drive
->using_dma
)
141 switch (taskfile
->command
) {
142 case WIN_WRITEDMA_ONCE
:
144 case WIN_WRITEDMA_EXT
:
145 case WIN_READDMA_ONCE
:
147 case WIN_READDMA_EXT
:
148 case WIN_IDENTIFY_DMA
:
149 if (!hwif
->dma_setup(drive
)) {
150 hwif
->dma_exec_cmd(drive
, taskfile
->command
);
151 hwif
->dma_start(drive
);
156 if (task
->handler
== NULL
)
164 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
166 ide_startstop_t
set_multmode_intr (ide_drive_t
*drive
)
168 ide_hwif_t
*hwif
= HWIF(drive
);
171 if (OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
),READY_STAT
,BAD_STAT
)) {
172 drive
->mult_count
= drive
->mult_req
;
174 drive
->mult_req
= drive
->mult_count
= 0;
175 drive
->special
.b
.recalibrate
= 1;
176 (void) ide_dump_status(drive
, "set_multmode", stat
);
182 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
184 ide_startstop_t
set_geometry_intr (ide_drive_t
*drive
)
186 ide_hwif_t
*hwif
= HWIF(drive
);
190 while (((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) && retries
--)
193 if (OK_STAT(stat
, READY_STAT
, BAD_STAT
))
196 if (stat
& (ERR_STAT
|DRQ_STAT
))
197 return ide_error(drive
, "set_geometry_intr", stat
);
199 if (HWGROUP(drive
)->handler
!= NULL
)
201 ide_set_handler(drive
, &set_geometry_intr
, WAIT_WORSTCASE
, NULL
);
206 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
208 ide_startstop_t
recal_intr (ide_drive_t
*drive
)
210 ide_hwif_t
*hwif
= HWIF(drive
);
213 if (!OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
), READY_STAT
, BAD_STAT
))
214 return ide_error(drive
, "recal_intr", stat
);
219 * Handler for commands without a data phase
221 ide_startstop_t
task_no_data_intr (ide_drive_t
*drive
)
223 ide_task_t
*args
= HWGROUP(drive
)->rq
->special
;
224 ide_hwif_t
*hwif
= HWIF(drive
);
228 if (!OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
),READY_STAT
,BAD_STAT
)) {
229 return ide_error(drive
, "task_no_data_intr", stat
);
230 /* calls ide_end_drive_cmd */
233 ide_end_drive_cmd(drive
, stat
, hwif
->INB(IDE_ERROR_REG
));
238 EXPORT_SYMBOL(task_no_data_intr
);
240 static u8
wait_drive_not_busy(ide_drive_t
*drive
)
242 ide_hwif_t
*hwif
= HWIF(drive
);
247 * Last sector was transfered, wait until drive is ready.
248 * This can take up to 10 usec, but we will wait max 1 ms
249 * (drive_cmd_intr() waits that long).
251 while (((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) && retries
--)
255 printk(KERN_ERR
"%s: drive still BUSY!\n", drive
->name
);
260 static void ide_pio_sector(ide_drive_t
*drive
, unsigned int write
)
262 ide_hwif_t
*hwif
= drive
->hwif
;
263 struct scatterlist
*sg
= hwif
->sg_table
;
265 #ifdef CONFIG_HIGHMEM
271 page
= sg
[hwif
->cursg
].page
;
272 offset
= sg
[hwif
->cursg
].offset
+ hwif
->cursg_ofs
* SECTOR_SIZE
;
274 /* get the current page and offset */
275 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
278 #ifdef CONFIG_HIGHMEM
279 local_irq_save(flags
);
281 buf
= kmap_atomic(page
, KM_BIO_SRC_IRQ
) + offset
;
286 if ((hwif
->cursg_ofs
* SECTOR_SIZE
) == sg
[hwif
->cursg
].length
) {
291 /* do the actual data transfer */
293 taskfile_output_data(drive
, buf
, SECTOR_WORDS
);
295 taskfile_input_data(drive
, buf
, SECTOR_WORDS
);
297 kunmap_atomic(buf
, KM_BIO_SRC_IRQ
);
298 #ifdef CONFIG_HIGHMEM
299 local_irq_restore(flags
);
303 static void ide_pio_multi(ide_drive_t
*drive
, unsigned int write
)
307 nsect
= min_t(unsigned int, drive
->hwif
->nleft
, drive
->mult_count
);
309 ide_pio_sector(drive
, write
);
312 static void ide_pio_datablock(ide_drive_t
*drive
, struct request
*rq
,
315 if (rq
->bio
) /* fs request */
318 touch_softlockup_watchdog();
320 switch (drive
->hwif
->data_phase
) {
321 case TASKFILE_MULTI_IN
:
322 case TASKFILE_MULTI_OUT
:
323 ide_pio_multi(drive
, write
);
326 ide_pio_sector(drive
, write
);
331 static ide_startstop_t
task_error(ide_drive_t
*drive
, struct request
*rq
,
332 const char *s
, u8 stat
)
335 ide_hwif_t
*hwif
= drive
->hwif
;
336 int sectors
= hwif
->nsect
- hwif
->nleft
;
338 switch (hwif
->data_phase
) {
346 case TASKFILE_MULTI_IN
:
350 case TASKFILE_MULTI_OUT
:
351 sectors
-= drive
->mult_count
;
359 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
360 drv
->end_request(drive
, 1, sectors
);
363 return ide_error(drive
, s
, stat
);
366 static void task_end_request(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
368 if (rq
->flags
& REQ_DRIVE_TASKFILE
) {
369 ide_task_t
*task
= rq
->special
;
371 if (task
->tf_out_flags
.all
) {
372 u8 err
= drive
->hwif
->INB(IDE_ERROR_REG
);
373 ide_end_drive_cmd(drive
, stat
, err
);
381 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;;
382 drv
->end_request(drive
, 1, rq
->hard_nr_sectors
);
384 ide_end_request(drive
, 1, rq
->hard_nr_sectors
);
388 * Handler for command with PIO data-in phase (Read/Read Multiple).
390 ide_startstop_t
task_in_intr (ide_drive_t
*drive
)
392 ide_hwif_t
*hwif
= drive
->hwif
;
393 struct request
*rq
= HWGROUP(drive
)->rq
;
394 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
396 /* new way for dealing with premature shared PCI interrupts */
397 if (!OK_STAT(stat
, DATA_READY
, BAD_R_STAT
)) {
398 if (stat
& (ERR_STAT
| DRQ_STAT
))
399 return task_error(drive
, rq
, __FUNCTION__
, stat
);
400 /* No data yet, so wait for another IRQ. */
401 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
405 ide_pio_datablock(drive
, rq
, 0);
407 /* If it was the last datablock check status and finish transfer. */
409 stat
= wait_drive_not_busy(drive
);
410 if (!OK_STAT(stat
, 0, BAD_R_STAT
))
411 return task_error(drive
, rq
, __FUNCTION__
, stat
);
412 task_end_request(drive
, rq
, stat
);
416 /* Still data left to transfer. */
417 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
421 EXPORT_SYMBOL(task_in_intr
);
424 * Handler for command with PIO data-out phase (Write/Write Multiple).
426 static ide_startstop_t
task_out_intr (ide_drive_t
*drive
)
428 ide_hwif_t
*hwif
= drive
->hwif
;
429 struct request
*rq
= HWGROUP(drive
)->rq
;
430 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
432 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
433 return task_error(drive
, rq
, __FUNCTION__
, stat
);
435 /* Deal with unexpected ATA data phase. */
436 if (((stat
& DRQ_STAT
) == 0) ^ !hwif
->nleft
)
437 return task_error(drive
, rq
, __FUNCTION__
, stat
);
440 task_end_request(drive
, rq
, stat
);
444 /* Still data left to transfer. */
445 ide_pio_datablock(drive
, rq
, 1);
446 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
451 ide_startstop_t
pre_task_out_intr (ide_drive_t
*drive
, struct request
*rq
)
453 ide_startstop_t startstop
;
455 if (ide_wait_stat(&startstop
, drive
, DATA_READY
,
456 drive
->bad_wstat
, WAIT_DRQ
)) {
457 printk(KERN_ERR
"%s: no DRQ after issuing %sWRITE%s\n",
459 drive
->hwif
->data_phase
? "MULT" : "",
460 drive
->addressing
? "_EXT" : "");
467 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
468 ide_pio_datablock(drive
, rq
, 1);
472 EXPORT_SYMBOL(pre_task_out_intr
);
474 static int ide_diag_taskfile(ide_drive_t
*drive
, ide_task_t
*args
, unsigned long data_size
, u8
*buf
)
478 memset(&rq
, 0, sizeof(rq
));
479 rq
.flags
= REQ_DRIVE_TASKFILE
;
483 * (ks) We transfer currently only whole sectors.
484 * This is suffient for now. But, it would be great,
485 * if we would find a solution to transfer any size.
486 * To support special commands like READ LONG.
488 if (args
->command_type
!= IDE_DRIVE_TASK_NO_DATA
) {
490 rq
.nr_sectors
= (args
->hobRegister
[IDE_NSECTOR_OFFSET
] << 8) | args
->tfRegister
[IDE_NSECTOR_OFFSET
];
492 rq
.nr_sectors
= data_size
/ SECTOR_SIZE
;
494 if (!rq
.nr_sectors
) {
495 printk(KERN_ERR
"%s: in/out command without data\n",
500 rq
.hard_nr_sectors
= rq
.nr_sectors
;
501 rq
.hard_cur_sectors
= rq
.current_nr_sectors
= rq
.nr_sectors
;
503 if (args
->command_type
== IDE_DRIVE_TASK_RAW_WRITE
)
509 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
512 int ide_raw_taskfile (ide_drive_t
*drive
, ide_task_t
*args
, u8
*buf
)
514 return ide_diag_taskfile(drive
, args
, 0, buf
);
517 EXPORT_SYMBOL(ide_raw_taskfile
);
519 int ide_taskfile_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
521 ide_task_request_t
*req_task
;
525 task_ioreg_t
*argsptr
= args
.tfRegister
;
526 task_ioreg_t
*hobsptr
= args
.hobRegister
;
528 int tasksize
= sizeof(struct ide_task_request_s
);
531 u8 io_32bit
= drive
->io_32bit
;
532 char __user
*buf
= (char __user
*)arg
;
534 // printk("IDE Taskfile ...\n");
536 req_task
= kzalloc(tasksize
, GFP_KERNEL
);
537 if (req_task
== NULL
) return -ENOMEM
;
538 if (copy_from_user(req_task
, buf
, tasksize
)) {
543 taskout
= (int) req_task
->out_size
;
544 taskin
= (int) req_task
->in_size
;
547 int outtotal
= tasksize
;
548 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
549 if (outbuf
== NULL
) {
553 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
560 int intotal
= tasksize
+ taskout
;
561 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
566 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
572 memset(&args
, 0, sizeof(ide_task_t
));
573 memcpy(argsptr
, req_task
->io_ports
, HDIO_DRIVE_TASK_HDR_SIZE
);
574 memcpy(hobsptr
, req_task
->hob_ports
, HDIO_DRIVE_HOB_HDR_SIZE
);
576 args
.tf_in_flags
= req_task
->in_flags
;
577 args
.tf_out_flags
= req_task
->out_flags
;
578 args
.data_phase
= req_task
->data_phase
;
579 args
.command_type
= req_task
->req_cmd
;
582 switch(req_task
->data_phase
) {
583 case TASKFILE_OUT_DMAQ
:
584 case TASKFILE_OUT_DMA
:
585 err
= ide_diag_taskfile(drive
, &args
, taskout
, outbuf
);
587 case TASKFILE_IN_DMAQ
:
588 case TASKFILE_IN_DMA
:
589 err
= ide_diag_taskfile(drive
, &args
, taskin
, inbuf
);
591 case TASKFILE_MULTI_OUT
:
592 if (!drive
->mult_count
) {
593 /* (hs): give up if multcount is not set */
594 printk(KERN_ERR
"%s: %s Multimode Write " \
595 "multcount is not set\n",
596 drive
->name
, __FUNCTION__
);
602 args
.prehandler
= &pre_task_out_intr
;
603 args
.handler
= &task_out_intr
;
604 err
= ide_diag_taskfile(drive
, &args
, taskout
, outbuf
);
606 case TASKFILE_MULTI_IN
:
607 if (!drive
->mult_count
) {
608 /* (hs): give up if multcount is not set */
609 printk(KERN_ERR
"%s: %s Multimode Read failure " \
610 "multcount is not set\n",
611 drive
->name
, __FUNCTION__
);
617 args
.handler
= &task_in_intr
;
618 err
= ide_diag_taskfile(drive
, &args
, taskin
, inbuf
);
620 case TASKFILE_NO_DATA
:
621 args
.handler
= &task_no_data_intr
;
622 err
= ide_diag_taskfile(drive
, &args
, 0, NULL
);
629 memcpy(req_task
->io_ports
, &(args
.tfRegister
), HDIO_DRIVE_TASK_HDR_SIZE
);
630 memcpy(req_task
->hob_ports
, &(args
.hobRegister
), HDIO_DRIVE_HOB_HDR_SIZE
);
631 req_task
->in_flags
= args
.tf_in_flags
;
632 req_task
->out_flags
= args
.tf_out_flags
;
634 if (copy_to_user(buf
, req_task
, tasksize
)) {
639 int outtotal
= tasksize
;
640 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
646 int intotal
= tasksize
+ taskout
;
647 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {
657 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);
659 drive
->io_32bit
= io_32bit
;
664 int ide_wait_cmd (ide_drive_t
*drive
, u8 cmd
, u8 nsect
, u8 feature
, u8 sectors
, u8
*buf
)
671 memset(buf
, 0, 4 + SECTOR_WORDS
* 4 * sectors
);
672 ide_init_drive_cmd(&rq
);
678 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
682 * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
684 int ide_cmd_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
687 u8 args
[4], *argbuf
= args
;
692 if (NULL
== (void *) arg
) {
694 ide_init_drive_cmd(&rq
);
695 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
698 if (copy_from_user(args
, (void __user
*)arg
, 4))
701 memset(&tfargs
, 0, sizeof(ide_task_t
));
702 tfargs
.tfRegister
[IDE_FEATURE_OFFSET
] = args
[2];
703 tfargs
.tfRegister
[IDE_NSECTOR_OFFSET
] = args
[3];
704 tfargs
.tfRegister
[IDE_SECTOR_OFFSET
] = args
[1];
705 tfargs
.tfRegister
[IDE_LCYL_OFFSET
] = 0x00;
706 tfargs
.tfRegister
[IDE_HCYL_OFFSET
] = 0x00;
707 tfargs
.tfRegister
[IDE_SELECT_OFFSET
] = 0x00;
708 tfargs
.tfRegister
[IDE_COMMAND_OFFSET
] = args
[0];
711 argsize
= 4 + (SECTOR_WORDS
* 4 * args
[3]);
712 argbuf
= kzalloc(argsize
, GFP_KERNEL
);
716 if (set_transfer(drive
, &tfargs
)) {
718 if (ide_ata66_check(drive
, &tfargs
))
722 err
= ide_wait_cmd(drive
, args
[0], args
[1], args
[2], args
[3], argbuf
);
724 if (!err
&& xfer_rate
) {
725 /* active-retuning-calls future */
726 ide_set_xfer_rate(drive
, xfer_rate
);
727 ide_driveid_update(drive
);
730 if (copy_to_user((void __user
*)arg
, argbuf
, argsize
))
737 static int ide_wait_cmd_task(ide_drive_t
*drive
, u8
*buf
)
741 ide_init_drive_cmd(&rq
);
742 rq
.flags
= REQ_DRIVE_TASK
;
744 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
748 * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
750 int ide_task_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
752 void __user
*p
= (void __user
*)arg
;
754 u8 args
[7], *argbuf
= args
;
757 if (copy_from_user(args
, p
, 7))
759 err
= ide_wait_cmd_task(drive
, argbuf
);
760 if (copy_to_user(p
, argbuf
, argsize
))
766 * NOTICE: This is additions from IBM to provide a discrete interface,
767 * for selective taskregister access operations. Nice JOB Klaus!!!
768 * Glad to be able to work and co-develop this with you and IBM.
770 ide_startstop_t
flagged_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
772 ide_hwif_t
*hwif
= HWIF(drive
);
773 task_struct_t
*taskfile
= (task_struct_t
*) task
->tfRegister
;
774 hob_struct_t
*hobfile
= (hob_struct_t
*) task
->hobRegister
;
776 if (task
->data_phase
== TASKFILE_MULTI_IN
||
777 task
->data_phase
== TASKFILE_MULTI_OUT
) {
778 if (!drive
->mult_count
) {
779 printk(KERN_ERR
"%s: multimode not set!\n", drive
->name
);
785 * (ks) Check taskfile in flags.
786 * If set, then execute as it is defined.
787 * If not set, then define default settings.
788 * The default values are:
789 * read all taskfile registers (except data)
790 * read the hob registers (sector, nsector, lcyl, hcyl)
792 if (task
->tf_in_flags
.all
== 0) {
793 task
->tf_in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
794 if (drive
->addressing
== 1)
795 task
->tf_in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
798 /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
801 hwif
->OUTB(drive
->ctl
, IDE_CONTROL_REG
);
802 SELECT_MASK(drive
, 0);
804 if (task
->tf_out_flags
.b
.data
) {
805 u16 data
= taskfile
->data
+ (hobfile
->data
<< 8);
806 hwif
->OUTW(data
, IDE_DATA_REG
);
809 /* (ks) send hob registers first */
810 if (task
->tf_out_flags
.b
.nsector_hob
)
811 hwif
->OUTB(hobfile
->sector_count
, IDE_NSECTOR_REG
);
812 if (task
->tf_out_flags
.b
.sector_hob
)
813 hwif
->OUTB(hobfile
->sector_number
, IDE_SECTOR_REG
);
814 if (task
->tf_out_flags
.b
.lcyl_hob
)
815 hwif
->OUTB(hobfile
->low_cylinder
, IDE_LCYL_REG
);
816 if (task
->tf_out_flags
.b
.hcyl_hob
)
817 hwif
->OUTB(hobfile
->high_cylinder
, IDE_HCYL_REG
);
819 /* (ks) Send now the standard registers */
820 if (task
->tf_out_flags
.b
.error_feature
)
821 hwif
->OUTB(taskfile
->feature
, IDE_FEATURE_REG
);
822 /* refers to number of sectors to transfer */
823 if (task
->tf_out_flags
.b
.nsector
)
824 hwif
->OUTB(taskfile
->sector_count
, IDE_NSECTOR_REG
);
825 /* refers to sector offset or start sector */
826 if (task
->tf_out_flags
.b
.sector
)
827 hwif
->OUTB(taskfile
->sector_number
, IDE_SECTOR_REG
);
828 if (task
->tf_out_flags
.b
.lcyl
)
829 hwif
->OUTB(taskfile
->low_cylinder
, IDE_LCYL_REG
);
830 if (task
->tf_out_flags
.b
.hcyl
)
831 hwif
->OUTB(taskfile
->high_cylinder
, IDE_HCYL_REG
);
834 * (ks) In the flagged taskfile approch, we will use all specified
835 * registers and the register value will not be changed, except the
836 * select bit (master/slave) in the drive_head register. We must make
837 * sure that the desired drive is selected.
839 hwif
->OUTB(taskfile
->device_head
| drive
->select
.all
, IDE_SELECT_REG
);
840 switch(task
->data_phase
) {
842 case TASKFILE_OUT_DMAQ
:
843 case TASKFILE_OUT_DMA
:
844 case TASKFILE_IN_DMAQ
:
845 case TASKFILE_IN_DMA
:
846 hwif
->dma_setup(drive
);
847 hwif
->dma_exec_cmd(drive
, taskfile
->command
);
848 hwif
->dma_start(drive
);
852 if (task
->handler
== NULL
)
855 /* Issue the command */
856 if (task
->prehandler
) {
857 hwif
->OUTBSYNC(drive
, taskfile
->command
, IDE_COMMAND_REG
);
858 ndelay(400); /* FIXME */
859 return task
->prehandler(drive
, task
->rq
);
861 ide_execute_command(drive
, taskfile
->command
, task
->handler
, WAIT_WORSTCASE
, NULL
);