ide: pass command instead of request to ide_pio_datablock()
[linux-2.6/btrfs-unstable.git] / drivers / ide / ide-taskfile.c
blobd3bd93afbf2bc8ca04d7d7ded6f5dce28b9f1297
1 /*
2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2001-2002 Klaus Smolin
5 * IBM Storage Technology Division
6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
8 * The big the bad and the ugly.
9 */
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/hdreg.h>
20 #include <linux/ide.h>
21 #include <linux/scatterlist.h>
23 #include <asm/uaccess.h>
24 #include <asm/io.h>
26 void ide_tf_dump(const char *s, struct ide_taskfile *tf)
28 #ifdef DEBUG
29 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
30 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
31 s, tf->feature, tf->nsect, tf->lbal,
32 tf->lbam, tf->lbah, tf->device, tf->command);
33 printk("%s: hob: nsect 0x%02x lbal 0x%02x "
34 "lbam 0x%02x lbah 0x%02x\n",
35 s, tf->hob_nsect, tf->hob_lbal,
36 tf->hob_lbam, tf->hob_lbah);
37 #endif
40 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
42 struct ide_cmd cmd;
44 memset(&cmd, 0, sizeof(cmd));
45 cmd.tf.nsect = 0x01;
46 if (drive->media == ide_disk)
47 cmd.tf.command = ATA_CMD_ID_ATA;
48 else
49 cmd.tf.command = ATA_CMD_ID_ATAPI;
50 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
51 cmd.data_phase = TASKFILE_IN;
53 return ide_raw_taskfile(drive, &cmd, buf, 1);
56 static ide_startstop_t task_no_data_intr(ide_drive_t *);
57 static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *);
58 static ide_startstop_t task_in_intr(ide_drive_t *);
60 ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
62 ide_hwif_t *hwif = drive->hwif;
63 struct ide_cmd *cmd = &hwif->cmd;
64 struct ide_taskfile *tf = &cmd->tf;
65 ide_handler_t *handler = NULL;
66 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
67 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
69 if (orig_cmd->data_phase == TASKFILE_MULTI_IN ||
70 orig_cmd->data_phase == TASKFILE_MULTI_OUT) {
71 if (!drive->mult_count) {
72 printk(KERN_ERR "%s: multimode not set!\n",
73 drive->name);
74 return ide_stopped;
78 if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
79 orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS;
81 memcpy(cmd, orig_cmd, sizeof(*cmd));
83 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
84 ide_tf_dump(drive->name, tf);
85 tp_ops->set_irq(hwif, 1);
86 SELECT_MASK(drive, 0);
87 tp_ops->tf_load(drive, cmd);
90 switch (cmd->data_phase) {
91 case TASKFILE_MULTI_OUT:
92 case TASKFILE_OUT:
93 tp_ops->exec_command(hwif, tf->command);
94 ndelay(400); /* FIXME */
95 return pre_task_out_intr(drive, cmd);
96 case TASKFILE_MULTI_IN:
97 case TASKFILE_IN:
98 handler = task_in_intr;
99 /* fall-through */
100 case TASKFILE_NO_DATA:
101 if (handler == NULL)
102 handler = task_no_data_intr;
103 ide_execute_command(drive, tf->command, handler,
104 WAIT_WORSTCASE, NULL);
105 return ide_started;
106 default:
107 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
108 ide_build_sglist(drive, hwif->rq) == 0 ||
109 dma_ops->dma_setup(drive))
110 return ide_stopped;
111 dma_ops->dma_exec_cmd(drive, tf->command);
112 dma_ops->dma_start(drive);
113 return ide_started;
116 EXPORT_SYMBOL_GPL(do_rw_taskfile);
119 * Handler for commands without a data phase
121 static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
123 ide_hwif_t *hwif = drive->hwif;
124 struct ide_cmd *cmd = &hwif->cmd;
125 struct ide_taskfile *tf = &cmd->tf;
126 int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
127 int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
128 u8 stat;
130 local_irq_enable_in_hardirq();
132 while (1) {
133 stat = hwif->tp_ops->read_status(hwif);
134 if ((stat & ATA_BUSY) == 0 || retries-- == 0)
135 break;
136 udelay(10);
139 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
140 if (custom && tf->command == ATA_CMD_SET_MULTI) {
141 drive->mult_req = drive->mult_count = 0;
142 drive->special.b.recalibrate = 1;
143 (void)ide_dump_status(drive, __func__, stat);
144 return ide_stopped;
145 } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
146 if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
147 ide_set_handler(drive, &task_no_data_intr,
148 WAIT_WORSTCASE, NULL);
149 return ide_started;
152 return ide_error(drive, "task_no_data_intr", stat);
155 if (custom && tf->command == ATA_CMD_IDLEIMMEDIATE) {
156 hwif->tp_ops->tf_read(drive, cmd);
157 if (tf->lbal != 0xc4) {
158 printk(KERN_ERR "%s: head unload failed!\n",
159 drive->name);
160 ide_tf_dump(drive->name, tf);
161 } else
162 drive->dev_flags |= IDE_DFLAG_PARKED;
163 } else if (custom && tf->command == ATA_CMD_SET_MULTI)
164 drive->mult_count = drive->mult_req;
166 if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE) {
167 struct request *rq = hwif->rq;
168 u8 err = ide_read_error(drive);
170 if (blk_pm_request(rq))
171 ide_complete_pm_rq(drive, rq);
172 else {
173 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
174 ide_complete_cmd(drive, cmd, stat, err);
175 ide_complete_rq(drive, err);
179 return ide_stopped;
182 static u8 wait_drive_not_busy(ide_drive_t *drive)
184 ide_hwif_t *hwif = drive->hwif;
185 int retries;
186 u8 stat;
189 * Last sector was transfered, wait until device is ready. This can
190 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
192 for (retries = 0; retries < 1000; retries++) {
193 stat = hwif->tp_ops->read_status(hwif);
195 if (stat & ATA_BUSY)
196 udelay(10);
197 else
198 break;
201 if (stat & ATA_BUSY)
202 printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
204 return stat;
207 static void ide_pio_sector(ide_drive_t *drive, struct ide_cmd *cmd,
208 unsigned int write)
210 ide_hwif_t *hwif = drive->hwif;
211 struct scatterlist *sg = hwif->sg_table;
212 struct scatterlist *cursg = hwif->cursg;
213 struct page *page;
214 #ifdef CONFIG_HIGHMEM
215 unsigned long flags;
216 #endif
217 unsigned int offset;
218 u8 *buf;
220 cursg = hwif->cursg;
221 if (!cursg) {
222 cursg = sg;
223 hwif->cursg = sg;
226 page = sg_page(cursg);
227 offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
229 /* get the current page and offset */
230 page = nth_page(page, (offset >> PAGE_SHIFT));
231 offset %= PAGE_SIZE;
233 #ifdef CONFIG_HIGHMEM
234 local_irq_save(flags);
235 #endif
236 buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
238 hwif->nleft--;
239 hwif->cursg_ofs++;
241 if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
242 hwif->cursg = sg_next(hwif->cursg);
243 hwif->cursg_ofs = 0;
246 /* do the actual data transfer */
247 if (write)
248 hwif->tp_ops->output_data(drive, cmd, buf, SECTOR_SIZE);
249 else
250 hwif->tp_ops->input_data(drive, cmd, buf, SECTOR_SIZE);
252 kunmap_atomic(buf, KM_BIO_SRC_IRQ);
253 #ifdef CONFIG_HIGHMEM
254 local_irq_restore(flags);
255 #endif
258 static void ide_pio_multi(ide_drive_t *drive, struct ide_cmd *cmd,
259 unsigned int write)
261 unsigned int nsect;
263 nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
264 while (nsect--)
265 ide_pio_sector(drive, cmd, write);
268 static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
269 unsigned int write)
271 u8 saved_io_32bit = drive->io_32bit;
273 if (cmd->tf_flags & IDE_TFLAG_FS)
274 cmd->rq->errors = 0;
276 if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
277 drive->io_32bit = 0;
279 touch_softlockup_watchdog();
281 switch (cmd->data_phase) {
282 case TASKFILE_MULTI_IN:
283 case TASKFILE_MULTI_OUT:
284 ide_pio_multi(drive, cmd, write);
285 break;
286 default:
287 ide_pio_sector(drive, cmd, write);
288 break;
291 drive->io_32bit = saved_io_32bit;
294 static ide_startstop_t task_error(ide_drive_t *drive, struct ide_cmd *cmd,
295 const char *s, u8 stat)
297 if (cmd->tf_flags & IDE_TFLAG_FS) {
298 ide_hwif_t *hwif = drive->hwif;
299 int sectors = hwif->nsect - hwif->nleft;
301 switch (cmd->data_phase) {
302 case TASKFILE_IN:
303 if (hwif->nleft)
304 break;
305 /* fall through */
306 case TASKFILE_OUT:
307 sectors--;
308 break;
309 case TASKFILE_MULTI_IN:
310 if (hwif->nleft)
311 break;
312 /* fall through */
313 case TASKFILE_MULTI_OUT:
314 sectors -= drive->mult_count;
315 default:
316 break;
319 if (sectors > 0)
320 ide_end_request(drive, 1, sectors);
322 return ide_error(drive, s, stat);
325 void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
327 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) {
328 u8 err = ide_read_error(drive);
330 ide_complete_cmd(drive, cmd, stat, err);
331 ide_complete_rq(drive, err);
332 return;
335 ide_end_request(drive, 1, cmd->rq->nr_sectors);
339 * We got an interrupt on a task_in case, but no errors and no DRQ.
341 * It might be a spurious irq (shared irq), but it might be a
342 * command that had no output.
344 static ide_startstop_t task_in_unexpected(ide_drive_t *drive,
345 struct ide_cmd *cmd, u8 stat)
347 /* Command all done? */
348 if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) {
349 ide_finish_cmd(drive, cmd, stat);
350 return ide_stopped;
353 /* Assume it was a spurious irq */
354 ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
355 return ide_started;
359 * Handler for command with PIO data-in phase (Read/Read Multiple).
361 static ide_startstop_t task_in_intr(ide_drive_t *drive)
363 ide_hwif_t *hwif = drive->hwif;
364 struct ide_cmd *cmd = &drive->hwif->cmd;
365 u8 stat = hwif->tp_ops->read_status(hwif);
367 /* Error? */
368 if (stat & ATA_ERR)
369 return task_error(drive, cmd, __func__, stat);
371 /* Didn't want any data? Odd. */
372 if ((stat & ATA_DRQ) == 0)
373 return task_in_unexpected(drive, cmd, stat);
375 ide_pio_datablock(drive, cmd, 0);
377 /* Are we done? Check status and finish transfer. */
378 if (!hwif->nleft) {
379 stat = wait_drive_not_busy(drive);
380 if (!OK_STAT(stat, 0, BAD_STAT))
381 return task_error(drive, cmd, __func__, stat);
382 ide_finish_cmd(drive, cmd, stat);
383 return ide_stopped;
386 /* Still data left to transfer. */
387 ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
389 return ide_started;
393 * Handler for command with PIO data-out phase (Write/Write Multiple).
395 static ide_startstop_t task_out_intr (ide_drive_t *drive)
397 ide_hwif_t *hwif = drive->hwif;
398 struct ide_cmd *cmd = &drive->hwif->cmd;
399 u8 stat = hwif->tp_ops->read_status(hwif);
401 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
402 return task_error(drive, cmd, __func__, stat);
404 /* Deal with unexpected ATA data phase. */
405 if (((stat & ATA_DRQ) == 0) ^ !hwif->nleft)
406 return task_error(drive, cmd, __func__, stat);
408 if (!hwif->nleft) {
409 ide_finish_cmd(drive, cmd, stat);
410 return ide_stopped;
413 /* Still data left to transfer. */
414 ide_pio_datablock(drive, cmd, 1);
415 ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
417 return ide_started;
420 static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
421 struct ide_cmd *cmd)
423 ide_startstop_t startstop;
425 if (ide_wait_stat(&startstop, drive, ATA_DRQ,
426 drive->bad_wstat, WAIT_DRQ)) {
427 printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
428 drive->name,
429 cmd->data_phase == TASKFILE_MULTI_OUT ? "MULT" : "",
430 (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
431 return startstop;
434 if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
435 local_irq_disable();
437 ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
438 ide_pio_datablock(drive, cmd, 1);
440 return ide_started;
443 int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
444 u16 nsect)
446 struct request *rq;
447 int error;
449 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
450 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
451 rq->buffer = buf;
454 * (ks) We transfer currently only whole sectors.
455 * This is suffient for now. But, it would be great,
456 * if we would find a solution to transfer any size.
457 * To support special commands like READ LONG.
459 rq->hard_nr_sectors = rq->nr_sectors = nsect;
460 rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
462 if (cmd->tf_flags & IDE_TFLAG_WRITE)
463 rq->cmd_flags |= REQ_RW;
465 rq->special = cmd;
466 cmd->rq = rq;
468 error = blk_execute_rq(drive->queue, NULL, rq, 0);
469 blk_put_request(rq);
471 return error;
474 EXPORT_SYMBOL(ide_raw_taskfile);
476 int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd)
478 cmd->data_phase = TASKFILE_NO_DATA;
480 return ide_raw_taskfile(drive, cmd, NULL, 0);
482 EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
484 #ifdef CONFIG_IDE_TASK_IOCTL
485 int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
487 ide_task_request_t *req_task;
488 struct ide_cmd cmd;
489 u8 *outbuf = NULL;
490 u8 *inbuf = NULL;
491 u8 *data_buf = NULL;
492 int err = 0;
493 int tasksize = sizeof(struct ide_task_request_s);
494 unsigned int taskin = 0;
495 unsigned int taskout = 0;
496 u16 nsect = 0;
497 char __user *buf = (char __user *)arg;
499 // printk("IDE Taskfile ...\n");
501 req_task = kzalloc(tasksize, GFP_KERNEL);
502 if (req_task == NULL) return -ENOMEM;
503 if (copy_from_user(req_task, buf, tasksize)) {
504 kfree(req_task);
505 return -EFAULT;
508 taskout = req_task->out_size;
509 taskin = req_task->in_size;
511 if (taskin > 65536 || taskout > 65536) {
512 err = -EINVAL;
513 goto abort;
516 if (taskout) {
517 int outtotal = tasksize;
518 outbuf = kzalloc(taskout, GFP_KERNEL);
519 if (outbuf == NULL) {
520 err = -ENOMEM;
521 goto abort;
523 if (copy_from_user(outbuf, buf + outtotal, taskout)) {
524 err = -EFAULT;
525 goto abort;
529 if (taskin) {
530 int intotal = tasksize + taskout;
531 inbuf = kzalloc(taskin, GFP_KERNEL);
532 if (inbuf == NULL) {
533 err = -ENOMEM;
534 goto abort;
536 if (copy_from_user(inbuf, buf + intotal, taskin)) {
537 err = -EFAULT;
538 goto abort;
542 memset(&cmd, 0, sizeof(cmd));
544 memcpy(&cmd.tf_array[0], req_task->hob_ports,
545 HDIO_DRIVE_HOB_HDR_SIZE - 2);
546 memcpy(&cmd.tf_array[6], req_task->io_ports,
547 HDIO_DRIVE_TASK_HDR_SIZE);
549 cmd.data_phase = req_task->data_phase;
550 cmd.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
551 IDE_TFLAG_IN_TF;
553 if (drive->dev_flags & IDE_DFLAG_LBA48)
554 cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
556 if (req_task->out_flags.all) {
557 cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
559 if (req_task->out_flags.b.data)
560 cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
562 if (req_task->out_flags.b.nsector_hob)
563 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
564 if (req_task->out_flags.b.sector_hob)
565 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
566 if (req_task->out_flags.b.lcyl_hob)
567 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
568 if (req_task->out_flags.b.hcyl_hob)
569 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
571 if (req_task->out_flags.b.error_feature)
572 cmd.tf_flags |= IDE_TFLAG_OUT_FEATURE;
573 if (req_task->out_flags.b.nsector)
574 cmd.tf_flags |= IDE_TFLAG_OUT_NSECT;
575 if (req_task->out_flags.b.sector)
576 cmd.tf_flags |= IDE_TFLAG_OUT_LBAL;
577 if (req_task->out_flags.b.lcyl)
578 cmd.tf_flags |= IDE_TFLAG_OUT_LBAM;
579 if (req_task->out_flags.b.hcyl)
580 cmd.tf_flags |= IDE_TFLAG_OUT_LBAH;
581 } else {
582 cmd.tf_flags |= IDE_TFLAG_OUT_TF;
583 if (cmd.tf_flags & IDE_TFLAG_LBA48)
584 cmd.tf_flags |= IDE_TFLAG_OUT_HOB;
587 if (req_task->in_flags.b.data)
588 cmd.ftf_flags |= IDE_FTFLAG_IN_DATA;
590 switch(req_task->data_phase) {
591 case TASKFILE_MULTI_OUT:
592 if (!drive->mult_count) {
593 /* (hs): give up if multcount is not set */
594 printk(KERN_ERR "%s: %s Multimode Write " \
595 "multcount is not set\n",
596 drive->name, __func__);
597 err = -EPERM;
598 goto abort;
600 /* fall through */
601 case TASKFILE_OUT:
602 /* fall through */
603 case TASKFILE_OUT_DMAQ:
604 case TASKFILE_OUT_DMA:
605 nsect = taskout / SECTOR_SIZE;
606 data_buf = outbuf;
607 break;
608 case TASKFILE_MULTI_IN:
609 if (!drive->mult_count) {
610 /* (hs): give up if multcount is not set */
611 printk(KERN_ERR "%s: %s Multimode Read failure " \
612 "multcount is not set\n",
613 drive->name, __func__);
614 err = -EPERM;
615 goto abort;
617 /* fall through */
618 case TASKFILE_IN:
619 /* fall through */
620 case TASKFILE_IN_DMAQ:
621 case TASKFILE_IN_DMA:
622 nsect = taskin / SECTOR_SIZE;
623 data_buf = inbuf;
624 break;
625 case TASKFILE_NO_DATA:
626 break;
627 default:
628 err = -EFAULT;
629 goto abort;
632 if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
633 nsect = 0;
634 else if (!nsect) {
635 nsect = (cmd.tf.hob_nsect << 8) | cmd.tf.nsect;
637 if (!nsect) {
638 printk(KERN_ERR "%s: in/out command without data\n",
639 drive->name);
640 err = -EFAULT;
641 goto abort;
645 if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
646 cmd.tf_flags |= IDE_TFLAG_WRITE;
648 err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
650 memcpy(req_task->hob_ports, &cmd.tf_array[0],
651 HDIO_DRIVE_HOB_HDR_SIZE - 2);
652 memcpy(req_task->io_ports, &cmd.tf_array[6],
653 HDIO_DRIVE_TASK_HDR_SIZE);
655 if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
656 req_task->in_flags.all == 0) {
657 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
658 if (drive->dev_flags & IDE_DFLAG_LBA48)
659 req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
662 if (copy_to_user(buf, req_task, tasksize)) {
663 err = -EFAULT;
664 goto abort;
666 if (taskout) {
667 int outtotal = tasksize;
668 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
669 err = -EFAULT;
670 goto abort;
673 if (taskin) {
674 int intotal = tasksize + taskout;
675 if (copy_to_user(buf + intotal, inbuf, taskin)) {
676 err = -EFAULT;
677 goto abort;
680 abort:
681 kfree(req_task);
682 kfree(outbuf);
683 kfree(inbuf);
685 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);
687 return err;
689 #endif