2 * linux/drivers/ide/ide-taskfile.c Version 0.38 March 05, 2003
4 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
5 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2001-2002 Klaus Smolin
7 * IBM Storage Technology Division
8 * Copyright (C) 2003-2004 Bartlomiej Zolnierkiewicz
10 * The big the bad and the ugly.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/major.h>
22 #include <linux/errno.h>
23 #include <linux/genhd.h>
24 #include <linux/blkpg.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/delay.h>
28 #include <linux/hdreg.h>
29 #include <linux/ide.h>
30 #include <linux/bitops.h>
31 #include <linux/scatterlist.h>
33 #include <asm/byteorder.h>
35 #include <asm/uaccess.h>
38 void ide_tf_load(ide_drive_t
*drive
, ide_task_t
*task
)
40 ide_hwif_t
*hwif
= drive
->hwif
;
41 struct ide_taskfile
*tf
= &task
->tf
;
42 u8 HIHI
= (task
->tf_flags
& IDE_TFLAG_LBA48
) ? 0xE0 : 0xEF;
44 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
48 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
49 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
50 drive
->name
, tf
->feature
, tf
->nsect
, tf
->lbal
,
51 tf
->lbam
, tf
->lbah
, tf
->device
, tf
->command
);
52 printk("%s: hob: nsect 0x%02x lbal 0x%02x "
53 "lbam 0x%02x lbah 0x%02x\n",
54 drive
->name
, tf
->hob_nsect
, tf
->hob_lbal
,
55 tf
->hob_lbam
, tf
->hob_lbah
);
58 ide_set_irq(drive
, 1);
60 if ((task
->tf_flags
& IDE_TFLAG_NO_SELECT_MASK
) == 0)
61 SELECT_MASK(drive
, 0);
63 if (task
->tf_flags
& IDE_TFLAG_OUT_DATA
)
64 hwif
->OUTW((tf
->hob_data
<< 8) | tf
->data
, IDE_DATA_REG
);
66 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_FEATURE
)
67 hwif
->OUTB(tf
->hob_feature
, IDE_FEATURE_REG
);
68 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_NSECT
)
69 hwif
->OUTB(tf
->hob_nsect
, IDE_NSECTOR_REG
);
70 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAL
)
71 hwif
->OUTB(tf
->hob_lbal
, IDE_SECTOR_REG
);
72 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAM
)
73 hwif
->OUTB(tf
->hob_lbam
, IDE_LCYL_REG
);
74 if (task
->tf_flags
& IDE_TFLAG_OUT_HOB_LBAH
)
75 hwif
->OUTB(tf
->hob_lbah
, IDE_HCYL_REG
);
77 if (task
->tf_flags
& IDE_TFLAG_OUT_FEATURE
)
78 hwif
->OUTB(tf
->feature
, IDE_FEATURE_REG
);
79 if (task
->tf_flags
& IDE_TFLAG_OUT_NSECT
)
80 hwif
->OUTB(tf
->nsect
, IDE_NSECTOR_REG
);
81 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAL
)
82 hwif
->OUTB(tf
->lbal
, IDE_SECTOR_REG
);
83 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAM
)
84 hwif
->OUTB(tf
->lbam
, IDE_LCYL_REG
);
85 if (task
->tf_flags
& IDE_TFLAG_OUT_LBAH
)
86 hwif
->OUTB(tf
->lbah
, IDE_HCYL_REG
);
88 if (task
->tf_flags
& IDE_TFLAG_OUT_DEVICE
)
89 hwif
->OUTB((tf
->device
& HIHI
) | drive
->select
.all
, IDE_SELECT_REG
);
92 int taskfile_lib_get_identify (ide_drive_t
*drive
, u8
*buf
)
96 memset(&args
, 0, sizeof(ide_task_t
));
98 if (drive
->media
== ide_disk
)
99 args
.tf
.command
= WIN_IDENTIFY
;
101 args
.tf
.command
= WIN_PIDENTIFY
;
102 args
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
103 args
.data_phase
= TASKFILE_IN
;
104 return ide_raw_taskfile(drive
, &args
, buf
, 1);
107 static int inline task_dma_ok(ide_task_t
*task
)
109 if (blk_fs_request(task
->rq
) || (task
->tf_flags
& IDE_TFLAG_FLAGGED
))
112 switch (task
->tf
.command
) {
113 case WIN_WRITEDMA_ONCE
:
115 case WIN_WRITEDMA_EXT
:
116 case WIN_READDMA_ONCE
:
118 case WIN_READDMA_EXT
:
119 case WIN_IDENTIFY_DMA
:
126 static ide_startstop_t
task_no_data_intr(ide_drive_t
*);
127 static ide_startstop_t
set_geometry_intr(ide_drive_t
*);
128 static ide_startstop_t
recal_intr(ide_drive_t
*);
129 static ide_startstop_t
set_multmode_intr(ide_drive_t
*);
130 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*, struct request
*);
131 static ide_startstop_t
task_in_intr(ide_drive_t
*);
133 ide_startstop_t
do_rw_taskfile (ide_drive_t
*drive
, ide_task_t
*task
)
135 ide_hwif_t
*hwif
= HWIF(drive
);
136 struct ide_taskfile
*tf
= &task
->tf
;
137 ide_handler_t
*handler
= NULL
;
139 if (task
->data_phase
== TASKFILE_MULTI_IN
||
140 task
->data_phase
== TASKFILE_MULTI_OUT
) {
141 if (!drive
->mult_count
) {
142 printk(KERN_ERR
"%s: multimode not set!\n",
148 if (task
->tf_flags
& IDE_TFLAG_FLAGGED
)
149 task
->tf_flags
|= IDE_TFLAG_FLAGGED_SET_IN_FLAGS
;
151 if ((task
->tf_flags
& IDE_TFLAG_DMA_PIO_FALLBACK
) == 0)
152 ide_tf_load(drive
, task
);
154 switch (task
->data_phase
) {
155 case TASKFILE_MULTI_OUT
:
157 hwif
->OUTBSYNC(drive
, tf
->command
, IDE_COMMAND_REG
);
158 ndelay(400); /* FIXME */
159 return pre_task_out_intr(drive
, task
->rq
);
160 case TASKFILE_MULTI_IN
:
162 handler
= task_in_intr
;
164 case TASKFILE_NO_DATA
:
166 handler
= task_no_data_intr
;
167 /* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */
168 if (task
->tf_flags
& IDE_TFLAG_CUSTOM_HANDLER
) {
169 switch (tf
->command
) {
170 case WIN_SPECIFY
: handler
= set_geometry_intr
; break;
171 case WIN_RESTORE
: handler
= recal_intr
; break;
172 case WIN_SETMULT
: handler
= set_multmode_intr
; break;
175 ide_execute_command(drive
, tf
->command
, handler
,
176 WAIT_WORSTCASE
, NULL
);
179 if (task_dma_ok(task
) == 0 || drive
->using_dma
== 0 ||
180 hwif
->dma_setup(drive
))
182 hwif
->dma_exec_cmd(drive
, tf
->command
);
183 hwif
->dma_start(drive
);
187 EXPORT_SYMBOL_GPL(do_rw_taskfile
);
190 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
192 static ide_startstop_t
set_multmode_intr(ide_drive_t
*drive
)
194 ide_hwif_t
*hwif
= HWIF(drive
);
197 if (OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
),READY_STAT
,BAD_STAT
)) {
198 drive
->mult_count
= drive
->mult_req
;
200 drive
->mult_req
= drive
->mult_count
= 0;
201 drive
->special
.b
.recalibrate
= 1;
202 (void) ide_dump_status(drive
, "set_multmode", stat
);
208 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
210 static ide_startstop_t
set_geometry_intr(ide_drive_t
*drive
)
212 ide_hwif_t
*hwif
= HWIF(drive
);
216 while (((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) && retries
--)
219 if (OK_STAT(stat
, READY_STAT
, BAD_STAT
))
222 if (stat
& (ERR_STAT
|DRQ_STAT
))
223 return ide_error(drive
, "set_geometry_intr", stat
);
225 BUG_ON(HWGROUP(drive
)->handler
!= NULL
);
226 ide_set_handler(drive
, &set_geometry_intr
, WAIT_WORSTCASE
, NULL
);
231 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
233 static ide_startstop_t
recal_intr(ide_drive_t
*drive
)
235 ide_hwif_t
*hwif
= HWIF(drive
);
238 if (!OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
), READY_STAT
, BAD_STAT
))
239 return ide_error(drive
, "recal_intr", stat
);
244 * Handler for commands without a data phase
246 static ide_startstop_t
task_no_data_intr(ide_drive_t
*drive
)
248 ide_task_t
*args
= HWGROUP(drive
)->rq
->special
;
249 ide_hwif_t
*hwif
= HWIF(drive
);
252 local_irq_enable_in_hardirq();
253 if (!OK_STAT(stat
= hwif
->INB(IDE_STATUS_REG
),READY_STAT
,BAD_STAT
)) {
254 return ide_error(drive
, "task_no_data_intr", stat
);
255 /* calls ide_end_drive_cmd */
258 ide_end_drive_cmd(drive
, stat
, hwif
->INB(IDE_ERROR_REG
));
263 u8
wait_drive_not_busy(ide_drive_t
*drive
)
265 ide_hwif_t
*hwif
= HWIF(drive
);
270 * Last sector was transfered, wait until drive is ready.
271 * This can take up to 10 usec, but we will wait max 1 ms.
273 for (retries
= 0; retries
< 100; retries
++) {
274 if ((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
)
280 if (stat
& BUSY_STAT
)
281 printk(KERN_ERR
"%s: drive still BUSY!\n", drive
->name
);
286 static void ide_pio_sector(ide_drive_t
*drive
, unsigned int write
)
288 ide_hwif_t
*hwif
= drive
->hwif
;
289 struct scatterlist
*sg
= hwif
->sg_table
;
290 struct scatterlist
*cursg
= hwif
->cursg
;
292 #ifdef CONFIG_HIGHMEM
304 page
= sg_page(cursg
);
305 offset
= cursg
->offset
+ hwif
->cursg_ofs
* SECTOR_SIZE
;
307 /* get the current page and offset */
308 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
311 #ifdef CONFIG_HIGHMEM
312 local_irq_save(flags
);
314 buf
= kmap_atomic(page
, KM_BIO_SRC_IRQ
) + offset
;
319 if ((hwif
->cursg_ofs
* SECTOR_SIZE
) == cursg
->length
) {
320 hwif
->cursg
= sg_next(hwif
->cursg
);
324 /* do the actual data transfer */
326 hwif
->ata_output_data(drive
, buf
, SECTOR_WORDS
);
328 hwif
->ata_input_data(drive
, buf
, SECTOR_WORDS
);
330 kunmap_atomic(buf
, KM_BIO_SRC_IRQ
);
331 #ifdef CONFIG_HIGHMEM
332 local_irq_restore(flags
);
336 static void ide_pio_multi(ide_drive_t
*drive
, unsigned int write
)
340 nsect
= min_t(unsigned int, drive
->hwif
->nleft
, drive
->mult_count
);
342 ide_pio_sector(drive
, write
);
345 static void ide_pio_datablock(ide_drive_t
*drive
, struct request
*rq
,
348 u8 saved_io_32bit
= drive
->io_32bit
;
350 if (rq
->bio
) /* fs request */
353 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
354 ide_task_t
*task
= rq
->special
;
356 if (task
->tf_flags
& IDE_TFLAG_IO_16BIT
)
360 touch_softlockup_watchdog();
362 switch (drive
->hwif
->data_phase
) {
363 case TASKFILE_MULTI_IN
:
364 case TASKFILE_MULTI_OUT
:
365 ide_pio_multi(drive
, write
);
368 ide_pio_sector(drive
, write
);
372 drive
->io_32bit
= saved_io_32bit
;
375 static ide_startstop_t
task_error(ide_drive_t
*drive
, struct request
*rq
,
376 const char *s
, u8 stat
)
379 ide_hwif_t
*hwif
= drive
->hwif
;
380 int sectors
= hwif
->nsect
- hwif
->nleft
;
382 switch (hwif
->data_phase
) {
390 case TASKFILE_MULTI_IN
:
394 case TASKFILE_MULTI_OUT
:
395 sectors
-= drive
->mult_count
;
403 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
404 drv
->end_request(drive
, 1, sectors
);
407 return ide_error(drive
, s
, stat
);
410 void task_end_request(ide_drive_t
*drive
, struct request
*rq
, u8 stat
)
412 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
413 u8 err
= drive
->hwif
->INB(IDE_ERROR_REG
);
415 ide_end_drive_cmd(drive
, stat
, err
);
422 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;;
423 drv
->end_request(drive
, 1, rq
->nr_sectors
);
425 ide_end_request(drive
, 1, rq
->nr_sectors
);
429 * Handler for command with PIO data-in phase (Read/Read Multiple).
431 static ide_startstop_t
task_in_intr(ide_drive_t
*drive
)
433 ide_hwif_t
*hwif
= drive
->hwif
;
434 struct request
*rq
= HWGROUP(drive
)->rq
;
435 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
437 /* new way for dealing with premature shared PCI interrupts */
438 if (!OK_STAT(stat
, DRQ_STAT
, BAD_R_STAT
)) {
439 if (stat
& (ERR_STAT
| DRQ_STAT
))
440 return task_error(drive
, rq
, __FUNCTION__
, stat
);
441 /* No data yet, so wait for another IRQ. */
442 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
446 ide_pio_datablock(drive
, rq
, 0);
448 /* If it was the last datablock check status and finish transfer. */
450 stat
= wait_drive_not_busy(drive
);
451 if (!OK_STAT(stat
, 0, BAD_STAT
))
452 return task_error(drive
, rq
, __FUNCTION__
, stat
);
453 task_end_request(drive
, rq
, stat
);
457 /* Still data left to transfer. */
458 ide_set_handler(drive
, &task_in_intr
, WAIT_WORSTCASE
, NULL
);
464 * Handler for command with PIO data-out phase (Write/Write Multiple).
466 static ide_startstop_t
task_out_intr (ide_drive_t
*drive
)
468 ide_hwif_t
*hwif
= drive
->hwif
;
469 struct request
*rq
= HWGROUP(drive
)->rq
;
470 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
472 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
473 return task_error(drive
, rq
, __FUNCTION__
, stat
);
475 /* Deal with unexpected ATA data phase. */
476 if (((stat
& DRQ_STAT
) == 0) ^ !hwif
->nleft
)
477 return task_error(drive
, rq
, __FUNCTION__
, stat
);
480 task_end_request(drive
, rq
, stat
);
484 /* Still data left to transfer. */
485 ide_pio_datablock(drive
, rq
, 1);
486 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
491 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*drive
, struct request
*rq
)
493 ide_startstop_t startstop
;
495 if (ide_wait_stat(&startstop
, drive
, DRQ_STAT
,
496 drive
->bad_wstat
, WAIT_DRQ
)) {
497 printk(KERN_ERR
"%s: no DRQ after issuing %sWRITE%s\n",
499 drive
->hwif
->data_phase
? "MULT" : "",
500 drive
->addressing
? "_EXT" : "");
507 ide_set_handler(drive
, &task_out_intr
, WAIT_WORSTCASE
, NULL
);
508 ide_pio_datablock(drive
, rq
, 1);
513 int ide_raw_taskfile(ide_drive_t
*drive
, ide_task_t
*task
, u8
*buf
, u16 nsect
)
517 memset(&rq
, 0, sizeof(rq
));
519 rq
.cmd_type
= REQ_TYPE_ATA_TASKFILE
;
523 * (ks) We transfer currently only whole sectors.
524 * This is suffient for now. But, it would be great,
525 * if we would find a solution to transfer any size.
526 * To support special commands like READ LONG.
528 rq
.hard_nr_sectors
= rq
.nr_sectors
= nsect
;
529 rq
.hard_cur_sectors
= rq
.current_nr_sectors
= nsect
;
531 if (task
->tf_flags
& IDE_TFLAG_WRITE
)
532 rq
.cmd_flags
|= REQ_RW
;
537 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
540 EXPORT_SYMBOL(ide_raw_taskfile
);
542 int ide_no_data_taskfile(ide_drive_t
*drive
, ide_task_t
*task
)
544 task
->data_phase
= TASKFILE_NO_DATA
;
546 return ide_raw_taskfile(drive
, task
, NULL
, 0);
548 EXPORT_SYMBOL_GPL(ide_no_data_taskfile
);
550 #ifdef CONFIG_IDE_TASK_IOCTL
551 int ide_taskfile_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
553 ide_task_request_t
*req_task
;
559 int tasksize
= sizeof(struct ide_task_request_s
);
560 unsigned int taskin
= 0;
561 unsigned int taskout
= 0;
563 char __user
*buf
= (char __user
*)arg
;
565 // printk("IDE Taskfile ...\n");
567 req_task
= kzalloc(tasksize
, GFP_KERNEL
);
568 if (req_task
== NULL
) return -ENOMEM
;
569 if (copy_from_user(req_task
, buf
, tasksize
)) {
574 taskout
= req_task
->out_size
;
575 taskin
= req_task
->in_size
;
577 if (taskin
> 65536 || taskout
> 65536) {
583 int outtotal
= tasksize
;
584 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
585 if (outbuf
== NULL
) {
589 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
596 int intotal
= tasksize
+ taskout
;
597 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
602 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
608 memset(&args
, 0, sizeof(ide_task_t
));
610 memcpy(&args
.tf_array
[0], req_task
->hob_ports
, HDIO_DRIVE_HOB_HDR_SIZE
- 2);
611 memcpy(&args
.tf_array
[6], req_task
->io_ports
, HDIO_DRIVE_TASK_HDR_SIZE
);
613 args
.data_phase
= req_task
->data_phase
;
615 args
.tf_flags
= IDE_TFLAG_IO_16BIT
| IDE_TFLAG_DEVICE
|
617 if (drive
->addressing
== 1)
618 args
.tf_flags
|= (IDE_TFLAG_LBA48
| IDE_TFLAG_IN_HOB
);
620 if (req_task
->out_flags
.all
) {
621 args
.tf_flags
|= IDE_TFLAG_FLAGGED
;
623 if (req_task
->out_flags
.b
.data
)
624 args
.tf_flags
|= IDE_TFLAG_OUT_DATA
;
626 if (req_task
->out_flags
.b
.nsector_hob
)
627 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_NSECT
;
628 if (req_task
->out_flags
.b
.sector_hob
)
629 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAL
;
630 if (req_task
->out_flags
.b
.lcyl_hob
)
631 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAM
;
632 if (req_task
->out_flags
.b
.hcyl_hob
)
633 args
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAH
;
635 if (req_task
->out_flags
.b
.error_feature
)
636 args
.tf_flags
|= IDE_TFLAG_OUT_FEATURE
;
637 if (req_task
->out_flags
.b
.nsector
)
638 args
.tf_flags
|= IDE_TFLAG_OUT_NSECT
;
639 if (req_task
->out_flags
.b
.sector
)
640 args
.tf_flags
|= IDE_TFLAG_OUT_LBAL
;
641 if (req_task
->out_flags
.b
.lcyl
)
642 args
.tf_flags
|= IDE_TFLAG_OUT_LBAM
;
643 if (req_task
->out_flags
.b
.hcyl
)
644 args
.tf_flags
|= IDE_TFLAG_OUT_LBAH
;
646 args
.tf_flags
|= IDE_TFLAG_OUT_TF
;
647 if (args
.tf_flags
& IDE_TFLAG_LBA48
)
648 args
.tf_flags
|= IDE_TFLAG_OUT_HOB
;
651 if (req_task
->in_flags
.b
.data
)
652 args
.tf_flags
|= IDE_TFLAG_IN_DATA
;
654 switch(req_task
->data_phase
) {
655 case TASKFILE_MULTI_OUT
:
656 if (!drive
->mult_count
) {
657 /* (hs): give up if multcount is not set */
658 printk(KERN_ERR
"%s: %s Multimode Write " \
659 "multcount is not set\n",
660 drive
->name
, __FUNCTION__
);
667 case TASKFILE_OUT_DMAQ
:
668 case TASKFILE_OUT_DMA
:
669 nsect
= taskout
/ SECTOR_SIZE
;
672 case TASKFILE_MULTI_IN
:
673 if (!drive
->mult_count
) {
674 /* (hs): give up if multcount is not set */
675 printk(KERN_ERR
"%s: %s Multimode Read failure " \
676 "multcount is not set\n",
677 drive
->name
, __FUNCTION__
);
684 case TASKFILE_IN_DMAQ
:
685 case TASKFILE_IN_DMA
:
686 nsect
= taskin
/ SECTOR_SIZE
;
689 case TASKFILE_NO_DATA
:
696 if (req_task
->req_cmd
== IDE_DRIVE_TASK_NO_DATA
)
699 nsect
= (args
.tf
.hob_nsect
<< 8) | args
.tf
.nsect
;
702 printk(KERN_ERR
"%s: in/out command without data\n",
709 if (req_task
->req_cmd
== IDE_DRIVE_TASK_RAW_WRITE
)
710 args
.tf_flags
|= IDE_TFLAG_WRITE
;
712 err
= ide_raw_taskfile(drive
, &args
, data_buf
, nsect
);
714 memcpy(req_task
->hob_ports
, &args
.tf_array
[0], HDIO_DRIVE_HOB_HDR_SIZE
- 2);
715 memcpy(req_task
->io_ports
, &args
.tf_array
[6], HDIO_DRIVE_TASK_HDR_SIZE
);
717 if ((args
.tf_flags
& IDE_TFLAG_FLAGGED_SET_IN_FLAGS
) &&
718 req_task
->in_flags
.all
== 0) {
719 req_task
->in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
720 if (drive
->addressing
== 1)
721 req_task
->in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
724 if (copy_to_user(buf
, req_task
, tasksize
)) {
729 int outtotal
= tasksize
;
730 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
736 int intotal
= tasksize
+ taskout
;
737 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {
747 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);
753 int ide_cmd_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
756 int bufsize
= 0, err
= 0;
757 u8 args
[4], xfer_rate
= 0;
759 struct ide_taskfile
*tf
= &tfargs
.tf
;
761 if (NULL
== (void *) arg
) {
764 ide_init_drive_cmd(&rq
);
765 rq
.cmd_type
= REQ_TYPE_ATA_TASKFILE
;
767 return ide_do_drive_cmd(drive
, &rq
, ide_wait
);
770 if (copy_from_user(args
, (void __user
*)arg
, 4))
773 memset(&tfargs
, 0, sizeof(ide_task_t
));
774 tf
->feature
= args
[2];
775 if (args
[0] == WIN_SMART
) {
780 tfargs
.tf_flags
= IDE_TFLAG_OUT_TF
| IDE_TFLAG_IN_NSECT
;
783 tfargs
.tf_flags
= IDE_TFLAG_OUT_FEATURE
|
784 IDE_TFLAG_OUT_NSECT
| IDE_TFLAG_IN_NSECT
;
786 tf
->command
= args
[0];
787 tfargs
.data_phase
= args
[3] ? TASKFILE_IN
: TASKFILE_NO_DATA
;
790 tfargs
.tf_flags
|= IDE_TFLAG_IO_16BIT
;
791 bufsize
= SECTOR_WORDS
* 4 * args
[3];
792 buf
= kzalloc(bufsize
, GFP_KERNEL
);
797 if (set_transfer(drive
, &tfargs
)) {
799 if (ide_ata66_check(drive
, &tfargs
))
803 err
= ide_raw_taskfile(drive
, &tfargs
, buf
, args
[3]);
805 args
[0] = tf
->status
;
809 if (!err
&& xfer_rate
) {
810 /* active-retuning-calls future */
811 ide_set_xfer_rate(drive
, xfer_rate
);
812 ide_driveid_update(drive
);
815 if (copy_to_user((void __user
*)arg
, &args
, 4))
818 if (copy_to_user((void __user
*)(arg
+ 4), buf
, bufsize
))
825 int ide_task_ioctl (ide_drive_t
*drive
, unsigned int cmd
, unsigned long arg
)
827 void __user
*p
= (void __user
*)arg
;
832 if (copy_from_user(args
, p
, 7))
835 memset(&task
, 0, sizeof(task
));
836 memcpy(&task
.tf_array
[7], &args
[1], 6);
837 task
.tf
.command
= args
[0];
838 task
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
840 err
= ide_no_data_taskfile(drive
, &task
);
842 args
[0] = task
.tf
.command
;
843 memcpy(&args
[1], &task
.tf_array
[7], 6);
845 if (copy_to_user(p
, args
, 7))