2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2001-2002 Klaus Smolin
5 * IBM Storage Technology Division
6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
8 * The big the bad and the ugly.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/major.h>
20 #include <linux/errno.h>
21 #include <linux/genhd.h>
22 #include <linux/blkpg.h>
23 #include <linux/slab.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/hdreg.h>
27 #include <linux/ide.h>
28 #include <linux/bitops.h>
29 #include <linux/scatterlist.h>
31 #include <asm/byteorder.h>
33 #include <asm/uaccess.h>
36 void ide_tf_dump(const char *s
, struct ide_taskfile
*tf
)
39 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
40 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
41 s
, tf
->feature
, tf
->nsect
, tf
->lbal
,
42 tf
->lbam
, tf
->lbah
, tf
->device
, tf
->command
);
43 printk("%s: hob: nsect 0x%02x lbal 0x%02x "
44 "lbam 0x%02x lbah 0x%02x\n",
45 s
, tf
->hob_nsect
, tf
->hob_lbal
,
46 tf
->hob_lbam
, tf
->hob_lbah
);
50 int taskfile_lib_get_identify (ide_drive_t
*drive
, u8
*buf
)
54 memset(&args
, 0, sizeof(ide_task_t
));
56 if (drive
->media
== ide_disk
)
57 args
.tf
.command
= WIN_IDENTIFY
;
59 args
.tf
.command
= WIN_PIDENTIFY
;
60 args
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
61 args
.data_phase
= TASKFILE_IN
;
62 return ide_raw_taskfile(drive
, &args
, buf
, 1);
65 static int inline task_dma_ok(ide_task_t
*task
)
67 if (blk_fs_request(task
->rq
) || (task
->tf_flags
& IDE_TFLAG_FLAGGED
))
70 switch (task
->tf
.command
) {
71 case WIN_WRITEDMA_ONCE
:
73 case WIN_WRITEDMA_EXT
:
74 case WIN_READDMA_ONCE
:
77 case WIN_IDENTIFY_DMA
:
84 static ide_startstop_t
task_no_data_intr(ide_drive_t
*);
85 static ide_startstop_t
set_geometry_intr(ide_drive_t
*);
86 static ide_startstop_t
recal_intr(ide_drive_t
*);
87 static ide_startstop_t
set_multmode_intr(ide_drive_t
*);
88 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*, struct request
*);
89 static ide_startstop_t
task_in_intr(ide_drive_t
*);
91 ide_startstop_t
do_rw_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
93 ide_hwif_t
*hwif
= HWIF(drive
);
94 struct ide_taskfile
*tf
= &task
->tf
;
95 ide_handler_t
*handler
= NULL
;
96 const struct ide_dma_ops
*dma_ops
= hwif
->dma_ops
;
98 if (task
->data_phase
== TASKFILE_MULTI_IN
||
99 task
->data_phase
== TASKFILE_MULTI_OUT
) {
100 if (!drive
->mult_count
) {
101 printk(KERN_ERR
"%s: multimode not set!\n",
107 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
108 task
->tf_flags
|= IDE_TFLAG_FLAGGED_SET_IN_FLAGS
;
110 if ((task
->tf_flags
& IDE_TFLAG_DMA_PIO_FALLBACK
) == 0) {
111 ide_tf_dump(drive
->name
, tf
);
112 ide_set_irq(drive
, 1);
113 SELECT_MASK(drive
, 0);
114 hwif
->tf_load(drive
, task
);
117 switch (task
->data_phase
) {
118 case TASKFILE_MULTI_OUT
:
120 hwif
->OUTBSYNC(hwif
, tf
->command
, hwif
->io_ports
.command_addr
);
121 ndelay(400); /* FIXME */
122 return pre_task_out_intr(drive
, task
->rq
);
123 case TASKFILE_MULTI_IN
:
125 handler
= task_in_intr
;
127 case TASKFILE_NO_DATA
:
129 handler
= task_no_data_intr
;
130 /* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */
131 if (task
->tf_flags
& IDE_TFLAG_CUSTOM_HANDLER
) {
132 switch (tf
->command
) {
133 case WIN_SPECIFY
: handler
= set_geometry_intr
; break;
134 case WIN_RESTORE
: handler
= recal_intr
; break;
135 case WIN_SETMULT
: handler
= set_multmode_intr
; break;
138 ide_execute_command(drive
, tf
->command
, handler
,
139 WAIT_WORSTCASE
, NULL
);
142 if (task_dma_ok(task
) == 0 || drive
->using_dma
== 0 ||
143 dma_ops
->dma_setup(drive
))
145 dma_ops
->dma_exec_cmd(drive
, tf
->command
);
146 dma_ops
->dma_start(drive
);
150 EXPORT_SYMBOL_GPL(do_rw_taskfile
);
153 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
155 static ide_startstop_t
set_multmode_intr(ide_drive_t
*drive
)
157 u8 stat
= ide_read_status(drive
);
159 if (OK_STAT(stat
, READY_STAT
, BAD_STAT
))
160 drive
->mult_count
= drive
->mult_req
;
162 drive
->mult_req
= drive
->mult_count
= 0;
163 drive
->special
.b
.recalibrate
= 1;
164 (void) ide_dump_status(drive
, "set_multmode", stat
);
170 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
172 static ide_startstop_t
set_geometry_intr(ide_drive_t
*drive
)
177 while (((stat
= ide_read_status(drive
)) & BUSY_STAT
) && retries
--)
180 if (OK_STAT(stat
, READY_STAT
, BAD_STAT
))
183 if (stat
& (ERR_STAT
|DRQ_STAT
))
184 return ide_error(drive
, "set_geometry_intr", stat
);
186 BUG_ON(HWGROUP(drive
)->handler
!= NULL
);
187 ide_set_handler(drive
, &set_geometry_intr
, WAIT_WORSTCASE
, NULL
);
192 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
194 static ide_startstop_t
recal_intr(ide_drive_t
*drive
)
196 u8 stat
= ide_read_status(drive
);
198 if (!OK_STAT(stat
, READY_STAT
, BAD_STAT
))
199 return ide_error(drive
, "recal_intr", stat
);
204 * Handler for commands without a data phase
206 static ide_startstop_t
task_no_data_intr(ide_drive_t
*drive
)
208 ide_task_t
*args
= HWGROUP(drive
)->rq
->special
;
211 local_irq_enable_in_hardirq();
212 stat
= ide_read_status(drive
);
214 if (!OK_STAT(stat
, READY_STAT
, BAD_STAT
))
215 return ide_error(drive
, "task_no_data_intr", stat
);
216 /* calls ide_end_drive_cmd */
219 ide_end_drive_cmd(drive
, stat
, ide_read_error(drive
));
224 static u8
wait_drive_not_busy(ide_drive_t
*drive
)
230 * Last sector was transfered, wait until device is ready. This can
231 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
233 for (retries
= 0; retries
< 1000; retries
++) {
234 stat
= ide_read_status(drive
);
236 if (stat
& BUSY_STAT
)
242 if (stat
& BUSY_STAT
)
243 printk(KERN_ERR
"%s: drive still BUSY!\n", drive
->name
);
248 static void ide_pio_sector(ide_drive_t
*drive
, struct request
*rq
,
251 ide_hwif_t
*hwif
= drive
->hwif
;
252 struct scatterlist
*sg
= hwif
->sg_table
;
253 struct scatterlist
*cursg
= hwif
->cursg
;
255 #ifdef CONFIG_HIGHMEM
267 page
= sg_page(cursg
);
268 offset
= cursg
->offset
+ hwif
->cursg_ofs
* SECTOR_SIZE
;
270 /* get the current page and offset */
271 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
274 #ifdef CONFIG_HIGHMEM
275 local_irq_save(flags
);
277 buf
= kmap_atomic(page
, KM_BIO_SRC_IRQ
) + offset
;
282 if ((hwif
->cursg_ofs
* SECTOR_SIZE
) == cursg
->length
) {
283 hwif
->cursg
= sg_next(hwif
->cursg
);
287 /* do the actual data transfer */
289 hwif
->output_data(drive
, rq
, buf
, SECTOR_SIZE
);
291 hwif
->input_data(drive
, rq
, buf
, SECTOR_SIZE
);
293 kunmap_atomic(buf
, KM_BIO_SRC_IRQ
);
294 #ifdef CONFIG_HIGHMEM
295 local_irq_restore(flags
);
299 static void ide_pio_multi(ide_drive_t
*drive
, struct request
*rq
,
304 nsect
= min_t(unsigned int, drive
->hwif
->nleft
, drive
->mult_count
);
306 ide_pio_sector(drive
, rq
, write
);
309 static void ide_pio_datablock(ide_drive_t
*drive
, struct request
*rq
,
312 u8 saved_io_32bit
= drive
->io_32bit
;
314 if (rq
->bio
) /* fs request */
317 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
318 ide_task_t
*task
= rq
->special
;
320 if (task
->tf_flags
& IDE_TFLAG_IO_16BIT
)
324 touch_softlockup_watchdog();
326 switch (drive
->hwif
->data_phase
) {
327 case TASKFILE_MULTI_IN
:
328 case TASKFILE_MULTI_OUT
:
329 ide_pio_multi(drive
, rq
, write
);
332 ide_pio_sector(drive
, rq
, write
);
336 drive
->io_32bit
= saved_io_32bit
;
339 static ide_startstop_t
task_error(ide_drive_t
*drive
, struct request
*rq
,
340 const char *s
, u8 stat
)
343 ide_hwif_t
*hwif
= drive
->hwif
;
344 int sectors
= hwif
->nsect
- hwif
->nleft
;
346 switch (hwif
->data_phase
) {
354 case TASKFILE_MULTI_IN
:
358 case TASKFILE_MULTI_OUT
:
359 sectors
-= drive
->mult_count
;
367 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
368 drv
->end_request(drive
, 1, sectors
);
371 return ide_error(drive
, s
, stat
);
374 void task_end_request(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
376 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
377 u8 err
= ide_read_error(drive
);
379 ide_end_drive_cmd(drive
, stat
, err
);
386 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;;
387 drv
->end_request(drive
, 1, rq
->nr_sectors
);
389 ide_end_request(drive
, 1, rq
->nr_sectors
);
393 * We got an interrupt on a task_in case, but no errors and no DRQ.
395 * It might be a spurious irq (shared irq), but it might be a
396 * command that had no output.
398 static ide_startstop_t
task_in_unexpected(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
400 /* Command all done? */
401 if (OK_STAT(stat
, READY_STAT
, BUSY_STAT
)) {
402 task_end_request(drive
, rq
, stat
);
406 /* Assume it was a spurious irq */
407 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
412 * Handler for command with PIO data-in phase (Read/Read Multiple).
414 static ide_startstop_t
task_in_intr(ide_drive_t
*drive
)
416 ide_hwif_t
*hwif
= drive
->hwif
;
417 struct request
*rq
= HWGROUP(drive
)->rq
;
418 u8 stat
= ide_read_status(drive
);
422 return task_error(drive
, rq
, __func__
, stat
);
424 /* Didn't want any data? Odd. */
425 if (!(stat
& DRQ_STAT
))
426 return task_in_unexpected(drive
, rq
, stat
);
428 ide_pio_datablock(drive
, rq
, 0);
430 /* Are we done? Check status and finish transfer. */
432 stat
= wait_drive_not_busy(drive
);
433 if (!OK_STAT(stat
, 0, BAD_STAT
))
434 return task_error(drive
, rq
, __func__
, stat
);
435 task_end_request(drive
, rq
, stat
);
439 /* Still data left to transfer. */
440 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
446 * Handler for command with PIO data-out phase (Write/Write Multiple).
448 static ide_startstop_t
task_out_intr (ide_drive_t
*drive
)
450 ide_hwif_t
*hwif
= drive
->hwif
;
451 struct request
*rq
= HWGROUP(drive
)->rq
;
452 u8 stat
= ide_read_status(drive
);
454 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
455 return task_error(drive
, rq
, __func__
, stat
);
457 /* Deal with unexpected ATA data phase. */
458 if (((stat
& DRQ_STAT
) == 0) ^ !hwif
->nleft
)
459 return task_error(drive
, rq
, __func__
, stat
);
462 task_end_request(drive
, rq
, stat
);
466 /* Still data left to transfer. */
467 ide_pio_datablock(drive
, rq
, 1);
468 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
473 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*drive
, struct request
*rq
)
475 ide_startstop_t startstop
;
477 if (ide_wait_stat(&startstop
, drive
, DRQ_STAT
,
478 drive
->bad_wstat
, WAIT_DRQ
)) {
479 printk(KERN_ERR
"%s: no DRQ after issuing %sWRITE%s\n",
481 drive
->hwif
->data_phase
? "MULT" : "",
482 drive
->addressing
? "_EXT" : "");
489 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
490 ide_pio_datablock(drive
, rq
, 1);
495 int ide_raw_taskfile(ide_drive_t
*drive
, ide_task_t
*task
, u8
*buf
, u16 nsect
)
500 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
501 rq
->cmd_type
= REQ_TYPE_ATA_TASKFILE
;
505 * (ks) We transfer currently only whole sectors.
506 * This is suffient for now. But, it would be great,
507 * if we would find a solution to transfer any size.
508 * To support special commands like READ LONG.
510 rq
->hard_nr_sectors
= rq
->nr_sectors
= nsect
;
511 rq
->hard_cur_sectors
= rq
->current_nr_sectors
= nsect
;
513 if (task
->tf_flags
& IDE_TFLAG_WRITE
)
514 rq
->cmd_flags
|= REQ_RW
;
519 error
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
525 EXPORT_SYMBOL(ide_raw_taskfile
);
527 int ide_no_data_taskfile(ide_drive_t
*drive
, ide_task_t
*task
)
529 task
->data_phase
= TASKFILE_NO_DATA
;
531 return ide_raw_taskfile(drive
, task
, NULL
, 0);
533 EXPORT_SYMBOL_GPL(ide_no_data_taskfile
);
535 #ifdef CONFIG_IDE_TASK_IOCTL
536 int ide_taskfile_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
538 ide_task_request_t
*req_task
;
544 int tasksize
= sizeof(struct ide_task_request_s
);
545 unsigned int taskin
= 0;
546 unsigned int taskout
= 0;
548 char __user
*buf
= (char __user
*)arg
;
550 // printk("IDE Taskfile ...\n");
552 req_task
= kzalloc(tasksize
, GFP_KERNEL
);
553 if (req_task
== NULL
) return -ENOMEM
;
554 if (copy_from_user(req_task
, buf
, tasksize
)) {
559 taskout
= req_task
->out_size
;
560 taskin
= req_task
->in_size
;
562 if (taskin
> 65536 || taskout
> 65536) {
568 int outtotal
= tasksize
;
569 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
570 if (outbuf
== NULL
) {
574 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
581 int intotal
= tasksize
+ taskout
;
582 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
587 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
593 memset(&args
, 0, sizeof(ide_task_t
));
595 memcpy(&args
.tf_array
[0], req_task
->hob_ports
, HDIO_DRIVE_HOB_HDR_SIZE
- 2);
596 memcpy(&args
.tf_array
[6], req_task
->io_ports
, HDIO_DRIVE_TASK_HDR_SIZE
);
598 args
.data_phase
= req_task
->data_phase
;
600 args
.tf_flags
= IDE_TFLAG_IO_16BIT
| IDE_TFLAG_DEVICE
|
602 if (drive
->addressing
== 1)
603 args
.tf_flags
|= (IDE_TFLAG_LBA48
| IDE_TFLAG_IN_HOB
);
605 if (req_task
->out_flags
.all
) {
606 args
.tf_flags
|= IDE_TFLAG_FLAGGED
;
608 if (req_task
->out_flags
.b
.data
)
609 args
.tf_flags
|= IDE_TFLAG_OUT_DATA
;
611 if (req_task
->out_flags
.b
.nsector_hob
)
612 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_NSECT
;
613 if (req_task
->out_flags
.b
.sector_hob
)
614 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAL
;
615 if (req_task
->out_flags
.b
.lcyl_hob
)
616 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAM
;
617 if (req_task
->out_flags
.b
.hcyl_hob
)
618 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAH
;
620 if (req_task
->out_flags
.b
.error_feature
)
621 args
.tf_flags
|= IDE_TFLAG_OUT_FEATURE
;
622 if (req_task
->out_flags
.b
.nsector
)
623 args
.tf_flags
|= IDE_TFLAG_OUT_NSECT
;
624 if (req_task
->out_flags
.b
.sector
)
625 args
.tf_flags
|= IDE_TFLAG_OUT_LBAL
;
626 if (req_task
->out_flags
.b
.lcyl
)
627 args
.tf_flags
|= IDE_TFLAG_OUT_LBAM
;
628 if (req_task
->out_flags
.b
.hcyl
)
629 args
.tf_flags
|= IDE_TFLAG_OUT_LBAH
;
631 args
.tf_flags
|= IDE_TFLAG_OUT_TF
;
632 if (args
.tf_flags
& IDE_TFLAG_LBA48
)
633 args
.tf_flags
|= IDE_TFLAG_OUT_HOB
;
636 if (req_task
->in_flags
.b
.data
)
637 args
.tf_flags
|= IDE_TFLAG_IN_DATA
;
639 switch(req_task
->data_phase
) {
640 case TASKFILE_MULTI_OUT
:
641 if (!drive
->mult_count
) {
642 /* (hs): give up if multcount is not set */
643 printk(KERN_ERR
"%s: %s Multimode Write " \
644 "multcount is not set\n",
645 drive
->name
, __func__
);
652 case TASKFILE_OUT_DMAQ
:
653 case TASKFILE_OUT_DMA
:
654 nsect
= taskout
/ SECTOR_SIZE
;
657 case TASKFILE_MULTI_IN
:
658 if (!drive
->mult_count
) {
659 /* (hs): give up if multcount is not set */
660 printk(KERN_ERR
"%s: %s Multimode Read failure " \
661 "multcount is not set\n",
662 drive
->name
, __func__
);
669 case TASKFILE_IN_DMAQ
:
670 case TASKFILE_IN_DMA
:
671 nsect
= taskin
/ SECTOR_SIZE
;
674 case TASKFILE_NO_DATA
:
681 if (req_task
->req_cmd
== IDE_DRIVE_TASK_NO_DATA
)
684 nsect
= (args
.tf
.hob_nsect
<< 8) | args
.tf
.nsect
;
687 printk(KERN_ERR
"%s: in/out command without data\n",
694 if (req_task
->req_cmd
== IDE_DRIVE_TASK_RAW_WRITE
)
695 args
.tf_flags
|= IDE_TFLAG_WRITE
;
697 err
= ide_raw_taskfile(drive
, &args
, data_buf
, nsect
);
699 memcpy(req_task
->hob_ports
, &args
.tf_array
[0], HDIO_DRIVE_HOB_HDR_SIZE
- 2);
700 memcpy(req_task
->io_ports
, &args
.tf_array
[6], HDIO_DRIVE_TASK_HDR_SIZE
);
702 if ((args
.tf_flags
& IDE_TFLAG_FLAGGED_SET_IN_FLAGS
) &&
703 req_task
->in_flags
.all
== 0) {
704 req_task
->in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
705 if (drive
->addressing
== 1)
706 req_task
->in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
709 if (copy_to_user(buf
, req_task
, tasksize
)) {
714 int outtotal
= tasksize
;
715 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
721 int intotal
= tasksize
+ taskout
;
722 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {
732 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);
738 int ide_cmd_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
741 int bufsize
= 0, err
= 0;
742 u8 args
[4], xfer_rate
= 0;
744 struct ide_taskfile
*tf
= &tfargs
.tf
;
745 struct hd_driveid
*id
= drive
->id
;
747 if (NULL
== (void *) arg
) {
750 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
751 rq
->cmd_type
= REQ_TYPE_ATA_TASKFILE
;
752 err
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
758 if (copy_from_user(args
, (void __user
*)arg
, 4))
761 memset(&tfargs
, 0, sizeof(ide_task_t
));
762 tf
->feature
= args
[2];
763 if (args
[0] == WIN_SMART
) {
768 tfargs
.tf_flags
= IDE_TFLAG_OUT_TF
| IDE_TFLAG_IN_NSECT
;
771 tfargs
.tf_flags
= IDE_TFLAG_OUT_FEATURE
|
772 IDE_TFLAG_OUT_NSECT
| IDE_TFLAG_IN_NSECT
;
774 tf
->command
= args
[0];
775 tfargs
.data_phase
= args
[3] ? TASKFILE_IN
: TASKFILE_NO_DATA
;
778 tfargs
.tf_flags
|= IDE_TFLAG_IO_16BIT
;
779 bufsize
= SECTOR_WORDS
* 4 * args
[3];
780 buf
= kzalloc(bufsize
, GFP_KERNEL
);
785 if (tf
->command
== WIN_SETFEATURES
&&
786 tf
->feature
== SETFEATURES_XFER
&&
787 tf
->nsect
>= XFER_SW_DMA_0
&&
788 (id
->dma_ultra
|| id
->dma_mword
|| id
->dma_1word
)) {
790 if (tf
->nsect
> XFER_UDMA_2
&& !eighty_ninty_three(drive
)) {
791 printk(KERN_WARNING
"%s: UDMA speeds >UDMA33 cannot "
792 "be set\n", drive
->name
);
797 err
= ide_raw_taskfile(drive
, &tfargs
, buf
, args
[3]);
799 args
[0] = tf
->status
;
803 if (!err
&& xfer_rate
) {
804 /* active-retuning-calls future */
805 ide_set_xfer_rate(drive
, xfer_rate
);
806 ide_driveid_update(drive
);
809 if (copy_to_user((void __user
*)arg
, &args
, 4))
812 if (copy_to_user((void __user
*)(arg
+ 4), buf
, bufsize
))
819 int ide_task_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
821 void __user
*p
= (void __user
*)arg
;
826 if (copy_from_user(args
, p
, 7))
829 memset(&task
, 0, sizeof(task
));
830 memcpy(&task
.tf_array
[7], &args
[1], 6);
831 task
.tf
.command
= args
[0];
832 task
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
834 err
= ide_no_data_taskfile(drive
, &task
);
836 args
[0] = task
.tf
.command
;
837 memcpy(&args
[1], &task
.tf_array
[7], 6);
839 if (copy_to_user(p
, args
, 7))