2 * linux/drivers/ide/ide-taskfile.c Version 0.38 March 05, 2003
4 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
5 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2001-2002 Klaus Smolin
7 * IBM Storage Technology Division
8 * Copyright (C) 2003-2004 Bartlomiej Zolnierkiewicz
10 * The big the bad and the ugly.
12 * Problems to be fixed because of BH interface or the lack therefore.
14 * Fill me in stupid !!!
17 * General refers to the Controller and Driver "pair".
19 * Under the context of Linux it generally refers to an interrupt handler.
20 * However, it correctly describes the 'HOST'
22 * The amount of data needed to be transfered as predefined in the
23 * setup of the device.
25 * The 'DATA BLOCK' associated to the 'DATA HANDLER', and can be as
26 * small as a single sector or as large as the entire command block
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/kernel.h>
35 #include <linux/timer.h>
37 #include <linux/sched.h>
38 #include <linux/interrupt.h>
39 #include <linux/major.h>
40 #include <linux/errno.h>
41 #include <linux/genhd.h>
42 #include <linux/blkpg.h>
43 #include <linux/slab.h>
44 #include <linux/pci.h>
45 #include <linux/delay.h>
46 #include <linux/hdreg.h>
47 #include <linux/ide.h>
48 #include <linux/bitops.h>
50 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
55 static void ata_bswap_data (void *buffer
, int wcount
)
60 *p
= *p
<< 8 | *p
>> 8; p
++;
61 *p
= *p
<< 8 | *p
>> 8; p
++;
65 static void taskfile_input_data(ide_drive_t
*drive
, void *buffer
, u32 wcount
)
67 HWIF(drive
)->ata_input_data(drive
, buffer
, wcount
);
69 ata_bswap_data(buffer
, wcount
);
72 static void taskfile_output_data(ide_drive_t
*drive
, void *buffer
, u32 wcount
)
75 ata_bswap_data(buffer
, wcount
);
76 HWIF(drive
)->ata_output_data(drive
, buffer
, wcount
);
77 ata_bswap_data(buffer
, wcount
);
79 HWIF(drive
)->ata_output_data(drive
, buffer
, wcount
);
83 int taskfile_lib_get_identify (ide_drive_t
*drive
, u8
*buf
)
86 memset(&args
, 0, sizeof(ide_task_t
));
87 args
.tfRegister
[IDE_NSECTOR_OFFSET
] = 0x01;
88 if (drive
->media
== ide_disk
)
89 args
.tfRegister
[IDE_COMMAND_OFFSET
] = WIN_IDENTIFY
;
91 args
.tfRegister
[IDE_COMMAND_OFFSET
] = WIN_PIDENTIFY
;
92 args
.command_type
= IDE_DRIVE_TASK_IN
;
93 args
.data_phase
= TASKFILE_IN
;
94 args
.handler
= &task_in_intr
;
95 return ide_raw_taskfile(drive
, &args
, buf
);
98 ide_startstop_t
do_rw_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
100 ide_hwif_t
*hwif
= HWIF(drive
);
101 task_struct_t
*taskfile
= (task_struct_t
*) task
->tfRegister
;
102 hob_struct_t
*hobfile
= (hob_struct_t
*) task
->hobRegister
;
103 u8 HIHI
= (drive
->addressing
== 1) ? 0xE0 : 0xEF;
105 /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
106 if (IDE_CONTROL_REG
) {
108 hwif
->OUTB(drive
->ctl
, IDE_CONTROL_REG
);
110 SELECT_MASK(drive
, 0);
112 if (drive
->addressing
== 1) {
113 hwif
->OUTB(hobfile
->feature
, IDE_FEATURE_REG
);
114 hwif
->OUTB(hobfile
->sector_count
, IDE_NSECTOR_REG
);
115 hwif
->OUTB(hobfile
->sector_number
, IDE_SECTOR_REG
);
116 hwif
->OUTB(hobfile
->low_cylinder
, IDE_LCYL_REG
);
117 hwif
->OUTB(hobfile
->high_cylinder
, IDE_HCYL_REG
);
120 hwif
->OUTB(taskfile
->feature
, IDE_FEATURE_REG
);
121 hwif
->OUTB(taskfile
->sector_count
, IDE_NSECTOR_REG
);
122 hwif
->OUTB(taskfile
->sector_number
, IDE_SECTOR_REG
);
123 hwif
->OUTB(taskfile
->low_cylinder
, IDE_LCYL_REG
);
124 hwif
->OUTB(taskfile
->high_cylinder
, IDE_HCYL_REG
);
126 hwif
->OUTB((taskfile
->device_head
& HIHI
) | drive
->select
.all
, IDE_SELECT_REG
);
128 if (task
->handler
!= NULL
) {
129 if (task
->prehandler
!= NULL
) {
130 hwif
->OUTBSYNC(drive
, taskfile
->command
, IDE_COMMAND_REG
);
131 ndelay(400); /* FIXME */
132 return task
->prehandler(drive
, task
->rq
);
134 ide_execute_command(drive
, taskfile
->command
, task
->handler
, WAIT_WORSTCASE
, NULL
);
138 if (!drive
->using_dma
)
141 switch (taskfile
->command
) {
142 case WIN_WRITEDMA_ONCE
:
144 case WIN_WRITEDMA_EXT
:
145 case WIN_READDMA_ONCE
:
147 case WIN_READDMA_EXT
:
148 case WIN_IDENTIFY_DMA
:
149 if (!hwif
->dma_setup(drive
)) {
150 hwif
->dma_exec_cmd(drive
, taskfile
->command
);
151 hwif
->dma_start(drive
);
156 if (task
->handler
== NULL
)
164 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
166 ide_startstop_t
set_multmode_intr (ide_drive_t
*drive
)
168 ide_hwif_t
*hwif
= HWIF(drive
);
171 if (OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
),READY_STAT
,BAD_STAT
)) {
172 drive
->mult_count
= drive
->mult_req
;
174 drive
->mult_req
= drive
->mult_count
= 0;
175 drive
->special
.b
.recalibrate
= 1;
176 (void) ide_dump_status(drive
, "set_multmode", stat
);
182 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
184 ide_startstop_t
set_geometry_intr (ide_drive_t
*drive
)
186 ide_hwif_t
*hwif
= HWIF(drive
);
190 while (((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) && retries
--)
193 if (OK_STAT(stat
, READY_STAT
, BAD_STAT
))
196 if (stat
& (ERR_STAT
|DRQ_STAT
))
197 return ide_error(drive
, "set_geometry_intr", stat
);
199 if (HWGROUP(drive
)->handler
!= NULL
)
201 ide_set_handler(drive
, &set_geometry_intr
, WAIT_WORSTCASE
, NULL
);
206 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
208 ide_startstop_t
recal_intr (ide_drive_t
*drive
)
210 ide_hwif_t
*hwif
= HWIF(drive
);
213 if (!OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
), READY_STAT
, BAD_STAT
))
214 return ide_error(drive
, "recal_intr", stat
);
219 * Handler for commands without a data phase
221 ide_startstop_t
task_no_data_intr (ide_drive_t
*drive
)
223 ide_task_t
*args
= HWGROUP(drive
)->rq
->special
;
224 ide_hwif_t
*hwif
= HWIF(drive
);
228 if (!OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
),READY_STAT
,BAD_STAT
)) {
229 return ide_error(drive
, "task_no_data_intr", stat
);
230 /* calls ide_end_drive_cmd */
233 ide_end_drive_cmd(drive
, stat
, hwif
->INB(IDE_ERROR_REG
));
238 EXPORT_SYMBOL(task_no_data_intr
);
240 static u8
wait_drive_not_busy(ide_drive_t
*drive
)
242 ide_hwif_t
*hwif
= HWIF(drive
);
247 * Last sector was transfered, wait until drive is ready.
248 * This can take up to 10 usec, but we will wait max 1 ms
249 * (drive_cmd_intr() waits that long).
251 while (((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) && retries
--)
255 printk(KERN_ERR
"%s: drive still BUSY!\n", drive
->name
);
260 static void ide_pio_sector(ide_drive_t
*drive
, unsigned int write
)
262 ide_hwif_t
*hwif
= drive
->hwif
;
263 struct scatterlist
*sg
= hwif
->sg_table
;
265 #ifdef CONFIG_HIGHMEM
271 page
= sg
[hwif
->cursg
].page
;
272 offset
= sg
[hwif
->cursg
].offset
+ hwif
->cursg_ofs
* SECTOR_SIZE
;
274 /* get the current page and offset */
275 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
278 #ifdef CONFIG_HIGHMEM
279 local_irq_save(flags
);
281 buf
= kmap_atomic(page
, KM_BIO_SRC_IRQ
) + offset
;
286 if ((hwif
->cursg_ofs
* SECTOR_SIZE
) == sg
[hwif
->cursg
].length
) {
291 /* do the actual data transfer */
293 taskfile_output_data(drive
, buf
, SECTOR_WORDS
);
295 taskfile_input_data(drive
, buf
, SECTOR_WORDS
);
297 kunmap_atomic(buf
, KM_BIO_SRC_IRQ
);
298 #ifdef CONFIG_HIGHMEM
299 local_irq_restore(flags
);
303 static void ide_pio_multi(ide_drive_t
*drive
, unsigned int write
)
307 nsect
= min_t(unsigned int, drive
->hwif
->nleft
, drive
->mult_count
);
309 ide_pio_sector(drive
, write
);
312 static void ide_pio_datablock(ide_drive_t
*drive
, struct request
*rq
,
315 if (rq
->bio
) /* fs request */
318 touch_softlockup_watchdog();
320 switch (drive
->hwif
->data_phase
) {
321 case TASKFILE_MULTI_IN
:
322 case TASKFILE_MULTI_OUT
:
323 ide_pio_multi(drive
, write
);
326 ide_pio_sector(drive
, write
);
331 static ide_startstop_t
task_error(ide_drive_t
*drive
, struct request
*rq
,
332 const char *s
, u8 stat
)
335 ide_hwif_t
*hwif
= drive
->hwif
;
336 int sectors
= hwif
->nsect
- hwif
->nleft
;
338 switch (hwif
->data_phase
) {
346 case TASKFILE_MULTI_IN
:
350 case TASKFILE_MULTI_OUT
:
351 sectors
-= drive
->mult_count
;
359 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
360 drv
->end_request(drive
, 1, sectors
);
363 return ide_error(drive
, s
, stat
);
366 static void task_end_request(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
368 if (rq
->flags
& REQ_DRIVE_TASKFILE
) {
369 ide_task_t
*task
= rq
->special
;
371 if (task
->tf_out_flags
.all
) {
372 u8 err
= drive
->hwif
->INB(IDE_ERROR_REG
);
373 ide_end_drive_cmd(drive
, stat
, err
);
378 ide_end_request(drive
, 1, rq
->hard_nr_sectors
);
382 * Handler for command with PIO data-in phase (Read/Read Multiple).
384 ide_startstop_t
task_in_intr (ide_drive_t
*drive
)
386 ide_hwif_t
*hwif
= drive
->hwif
;
387 struct request
*rq
= HWGROUP(drive
)->rq
;
388 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
390 /* new way for dealing with premature shared PCI interrupts */
391 if (!OK_STAT(stat
, DATA_READY
, BAD_R_STAT
)) {
392 if (stat
& (ERR_STAT
| DRQ_STAT
))
393 return task_error(drive
, rq
, __FUNCTION__
, stat
);
394 /* No data yet, so wait for another IRQ. */
395 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
399 ide_pio_datablock(drive
, rq
, 0);
401 /* If it was the last datablock check status and finish transfer. */
403 stat
= wait_drive_not_busy(drive
);
404 if (!OK_STAT(stat
, 0, BAD_R_STAT
))
405 return task_error(drive
, rq
, __FUNCTION__
, stat
);
406 task_end_request(drive
, rq
, stat
);
410 /* Still data left to transfer. */
411 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
415 EXPORT_SYMBOL(task_in_intr
);
418 * Handler for command with PIO data-out phase (Write/Write Multiple).
420 static ide_startstop_t
task_out_intr (ide_drive_t
*drive
)
422 ide_hwif_t
*hwif
= drive
->hwif
;
423 struct request
*rq
= HWGROUP(drive
)->rq
;
424 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
426 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
427 return task_error(drive
, rq
, __FUNCTION__
, stat
);
429 /* Deal with unexpected ATA data phase. */
430 if (((stat
& DRQ_STAT
) == 0) ^ !hwif
->nleft
)
431 return task_error(drive
, rq
, __FUNCTION__
, stat
);
434 task_end_request(drive
, rq
, stat
);
438 /* Still data left to transfer. */
439 ide_pio_datablock(drive
, rq
, 1);
440 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
445 ide_startstop_t
pre_task_out_intr (ide_drive_t
*drive
, struct request
*rq
)
447 ide_startstop_t startstop
;
449 if (ide_wait_stat(&startstop
, drive
, DATA_READY
,
450 drive
->bad_wstat
, WAIT_DRQ
)) {
451 printk(KERN_ERR
"%s: no DRQ after issuing %sWRITE%s\n",
453 drive
->hwif
->data_phase
? "MULT" : "",
454 drive
->addressing
? "_EXT" : "");
461 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
462 ide_pio_datablock(drive
, rq
, 1);
466 EXPORT_SYMBOL(pre_task_out_intr
);
468 static int ide_diag_taskfile(ide_drive_t
*drive
, ide_task_t
*args
, unsigned long data_size
, u8
*buf
)
472 memset(&rq
, 0, sizeof(rq
));
473 rq
.flags
= REQ_DRIVE_TASKFILE
;
477 * (ks) We transfer currently only whole sectors.
478 * This is suffient for now. But, it would be great,
479 * if we would find a solution to transfer any size.
480 * To support special commands like READ LONG.
482 if (args
->command_type
!= IDE_DRIVE_TASK_NO_DATA
) {
484 rq
.nr_sectors
= (args
->hobRegister
[IDE_NSECTOR_OFFSET
] << 8) | args
->tfRegister
[IDE_NSECTOR_OFFSET
];
486 rq
.nr_sectors
= data_size
/ SECTOR_SIZE
;
488 if (!rq
.nr_sectors
) {
489 printk(KERN_ERR
"%s: in/out command without data\n",
494 rq
.hard_nr_sectors
= rq
.nr_sectors
;
495 rq
.hard_cur_sectors
= rq
.current_nr_sectors
= rq
.nr_sectors
;
497 if (args
->command_type
== IDE_DRIVE_TASK_RAW_WRITE
)
503 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
506 int ide_raw_taskfile (ide_drive_t
*drive
, ide_task_t
*args
, u8
*buf
)
508 return ide_diag_taskfile(drive
, args
, 0, buf
);
511 EXPORT_SYMBOL(ide_raw_taskfile
);
513 int ide_taskfile_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
515 ide_task_request_t
*req_task
;
519 task_ioreg_t
*argsptr
= args
.tfRegister
;
520 task_ioreg_t
*hobsptr
= args
.hobRegister
;
522 int tasksize
= sizeof(struct ide_task_request_s
);
525 u8 io_32bit
= drive
->io_32bit
;
526 char __user
*buf
= (char __user
*)arg
;
528 // printk("IDE Taskfile ...\n");
530 req_task
= kzalloc(tasksize
, GFP_KERNEL
);
531 if (req_task
== NULL
) return -ENOMEM
;
532 if (copy_from_user(req_task
, buf
, tasksize
)) {
537 taskout
= (int) req_task
->out_size
;
538 taskin
= (int) req_task
->in_size
;
541 int outtotal
= tasksize
;
542 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
543 if (outbuf
== NULL
) {
547 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
554 int intotal
= tasksize
+ taskout
;
555 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
560 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
566 memset(&args
, 0, sizeof(ide_task_t
));
567 memcpy(argsptr
, req_task
->io_ports
, HDIO_DRIVE_TASK_HDR_SIZE
);
568 memcpy(hobsptr
, req_task
->hob_ports
, HDIO_DRIVE_HOB_HDR_SIZE
);
570 args
.tf_in_flags
= req_task
->in_flags
;
571 args
.tf_out_flags
= req_task
->out_flags
;
572 args
.data_phase
= req_task
->data_phase
;
573 args
.command_type
= req_task
->req_cmd
;
576 switch(req_task
->data_phase
) {
577 case TASKFILE_OUT_DMAQ
:
578 case TASKFILE_OUT_DMA
:
579 err
= ide_diag_taskfile(drive
, &args
, taskout
, outbuf
);
581 case TASKFILE_IN_DMAQ
:
582 case TASKFILE_IN_DMA
:
583 err
= ide_diag_taskfile(drive
, &args
, taskin
, inbuf
);
585 case TASKFILE_MULTI_OUT
:
586 if (!drive
->mult_count
) {
587 /* (hs): give up if multcount is not set */
588 printk(KERN_ERR
"%s: %s Multimode Write " \
589 "multcount is not set\n",
590 drive
->name
, __FUNCTION__
);
596 args
.prehandler
= &pre_task_out_intr
;
597 args
.handler
= &task_out_intr
;
598 err
= ide_diag_taskfile(drive
, &args
, taskout
, outbuf
);
600 case TASKFILE_MULTI_IN
:
601 if (!drive
->mult_count
) {
602 /* (hs): give up if multcount is not set */
603 printk(KERN_ERR
"%s: %s Multimode Read failure " \
604 "multcount is not set\n",
605 drive
->name
, __FUNCTION__
);
611 args
.handler
= &task_in_intr
;
612 err
= ide_diag_taskfile(drive
, &args
, taskin
, inbuf
);
614 case TASKFILE_NO_DATA
:
615 args
.handler
= &task_no_data_intr
;
616 err
= ide_diag_taskfile(drive
, &args
, 0, NULL
);
623 memcpy(req_task
->io_ports
, &(args
.tfRegister
), HDIO_DRIVE_TASK_HDR_SIZE
);
624 memcpy(req_task
->hob_ports
, &(args
.hobRegister
), HDIO_DRIVE_HOB_HDR_SIZE
);
625 req_task
->in_flags
= args
.tf_in_flags
;
626 req_task
->out_flags
= args
.tf_out_flags
;
628 if (copy_to_user(buf
, req_task
, tasksize
)) {
633 int outtotal
= tasksize
;
634 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
640 int intotal
= tasksize
+ taskout
;
641 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {
651 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);
653 drive
->io_32bit
= io_32bit
;
658 int ide_wait_cmd (ide_drive_t
*drive
, u8 cmd
, u8 nsect
, u8 feature
, u8 sectors
, u8
*buf
)
665 memset(buf
, 0, 4 + SECTOR_WORDS
* 4 * sectors
);
666 ide_init_drive_cmd(&rq
);
672 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
676 * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
678 int ide_cmd_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
681 u8 args
[4], *argbuf
= args
;
686 if (NULL
== (void *) arg
) {
688 ide_init_drive_cmd(&rq
);
689 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
692 if (copy_from_user(args
, (void __user
*)arg
, 4))
695 memset(&tfargs
, 0, sizeof(ide_task_t
));
696 tfargs
.tfRegister
[IDE_FEATURE_OFFSET
] = args
[2];
697 tfargs
.tfRegister
[IDE_NSECTOR_OFFSET
] = args
[3];
698 tfargs
.tfRegister
[IDE_SECTOR_OFFSET
] = args
[1];
699 tfargs
.tfRegister
[IDE_LCYL_OFFSET
] = 0x00;
700 tfargs
.tfRegister
[IDE_HCYL_OFFSET
] = 0x00;
701 tfargs
.tfRegister
[IDE_SELECT_OFFSET
] = 0x00;
702 tfargs
.tfRegister
[IDE_COMMAND_OFFSET
] = args
[0];
705 argsize
= 4 + (SECTOR_WORDS
* 4 * args
[3]);
706 argbuf
= kzalloc(argsize
, GFP_KERNEL
);
710 if (set_transfer(drive
, &tfargs
)) {
712 if (ide_ata66_check(drive
, &tfargs
))
716 err
= ide_wait_cmd(drive
, args
[0], args
[1], args
[2], args
[3], argbuf
);
718 if (!err
&& xfer_rate
) {
719 /* active-retuning-calls future */
720 ide_set_xfer_rate(drive
, xfer_rate
);
721 ide_driveid_update(drive
);
724 if (copy_to_user((void __user
*)arg
, argbuf
, argsize
))
731 static int ide_wait_cmd_task(ide_drive_t
*drive
, u8
*buf
)
735 ide_init_drive_cmd(&rq
);
736 rq
.flags
= REQ_DRIVE_TASK
;
738 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
742 * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
744 int ide_task_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
746 void __user
*p
= (void __user
*)arg
;
748 u8 args
[7], *argbuf
= args
;
751 if (copy_from_user(args
, p
, 7))
753 err
= ide_wait_cmd_task(drive
, argbuf
);
754 if (copy_to_user(p
, argbuf
, argsize
))
760 * NOTICE: This is additions from IBM to provide a discrete interface,
761 * for selective taskregister access operations. Nice JOB Klaus!!!
762 * Glad to be able to work and co-develop this with you and IBM.
764 ide_startstop_t
flagged_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
766 ide_hwif_t
*hwif
= HWIF(drive
);
767 task_struct_t
*taskfile
= (task_struct_t
*) task
->tfRegister
;
768 hob_struct_t
*hobfile
= (hob_struct_t
*) task
->hobRegister
;
770 if (task
->data_phase
== TASKFILE_MULTI_IN
||
771 task
->data_phase
== TASKFILE_MULTI_OUT
) {
772 if (!drive
->mult_count
) {
773 printk(KERN_ERR
"%s: multimode not set!\n", drive
->name
);
779 * (ks) Check taskfile in flags.
780 * If set, then execute as it is defined.
781 * If not set, then define default settings.
782 * The default values are:
783 * read all taskfile registers (except data)
784 * read the hob registers (sector, nsector, lcyl, hcyl)
786 if (task
->tf_in_flags
.all
== 0) {
787 task
->tf_in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
788 if (drive
->addressing
== 1)
789 task
->tf_in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
792 /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
795 hwif
->OUTB(drive
->ctl
, IDE_CONTROL_REG
);
796 SELECT_MASK(drive
, 0);
798 if (task
->tf_out_flags
.b
.data
) {
799 u16 data
= taskfile
->data
+ (hobfile
->data
<< 8);
800 hwif
->OUTW(data
, IDE_DATA_REG
);
803 /* (ks) send hob registers first */
804 if (task
->tf_out_flags
.b
.nsector_hob
)
805 hwif
->OUTB(hobfile
->sector_count
, IDE_NSECTOR_REG
);
806 if (task
->tf_out_flags
.b
.sector_hob
)
807 hwif
->OUTB(hobfile
->sector_number
, IDE_SECTOR_REG
);
808 if (task
->tf_out_flags
.b
.lcyl_hob
)
809 hwif
->OUTB(hobfile
->low_cylinder
, IDE_LCYL_REG
);
810 if (task
->tf_out_flags
.b
.hcyl_hob
)
811 hwif
->OUTB(hobfile
->high_cylinder
, IDE_HCYL_REG
);
813 /* (ks) Send now the standard registers */
814 if (task
->tf_out_flags
.b
.error_feature
)
815 hwif
->OUTB(taskfile
->feature
, IDE_FEATURE_REG
);
816 /* refers to number of sectors to transfer */
817 if (task
->tf_out_flags
.b
.nsector
)
818 hwif
->OUTB(taskfile
->sector_count
, IDE_NSECTOR_REG
);
819 /* refers to sector offset or start sector */
820 if (task
->tf_out_flags
.b
.sector
)
821 hwif
->OUTB(taskfile
->sector_number
, IDE_SECTOR_REG
);
822 if (task
->tf_out_flags
.b
.lcyl
)
823 hwif
->OUTB(taskfile
->low_cylinder
, IDE_LCYL_REG
);
824 if (task
->tf_out_flags
.b
.hcyl
)
825 hwif
->OUTB(taskfile
->high_cylinder
, IDE_HCYL_REG
);
828 * (ks) In the flagged taskfile approch, we will use all specified
829 * registers and the register value will not be changed, except the
830 * select bit (master/slave) in the drive_head register. We must make
831 * sure that the desired drive is selected.
833 hwif
->OUTB(taskfile
->device_head
| drive
->select
.all
, IDE_SELECT_REG
);
834 switch(task
->data_phase
) {
836 case TASKFILE_OUT_DMAQ
:
837 case TASKFILE_OUT_DMA
:
838 case TASKFILE_IN_DMAQ
:
839 case TASKFILE_IN_DMA
:
840 hwif
->dma_setup(drive
);
841 hwif
->dma_exec_cmd(drive
, taskfile
->command
);
842 hwif
->dma_start(drive
);
846 if (task
->handler
== NULL
)
849 /* Issue the command */
850 if (task
->prehandler
) {
851 hwif
->OUTBSYNC(drive
, taskfile
->command
, IDE_COMMAND_REG
);
852 ndelay(400); /* FIXME */
853 return task
->prehandler(drive
, task
->rq
);
855 ide_execute_command(drive
, taskfile
->command
, task
->handler
, WAIT_WORSTCASE
, NULL
);