kernel: Remove some old major numbers.
[dragonfly.git] / sys / dev / disk / ata / ata-disk.c
blobe87898d55bafbc4a58429f317d12e9c099ca9b73
1 /*-
2 * Copyright (c) 1998,1999,2000,2001,2002 Søren Schmidt <sos@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/dev/ata/ata-disk.c,v 1.60.2.24 2003/01/30 07:19:59 sos Exp $
31 #include "opt_ata.h"
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/ata.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/conf.h>
40 #include <sys/disk.h>
41 #include <sys/devicestat.h>
42 #include <sys/cons.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/rman.h>
46 #include <sys/proc.h>
47 #include <sys/buf2.h>
48 #include <sys/thread2.h>
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
53 #include <machine/md_var.h>
54 #include <machine/clock.h>
56 #include "ata-all.h"
57 #include "ata-disk.h"
58 #include "ata-raid.h"
60 /* device structures */
61 static d_open_t adopen;
62 static d_close_t adclose;
63 static d_strategy_t adstrategy;
64 static d_dump_t addump;
66 static struct dev_ops ad_ops = {
67 { "ad", 0, D_DISK },
68 .d_open = adopen,
69 .d_close = adclose,
70 .d_read = physread,
71 .d_write = physwrite,
72 .d_strategy = adstrategy,
73 .d_dump = addump,
76 /* prototypes */
77 static void ad_requeue(struct ata_channel *, struct ad_request *);
78 static void ad_invalidatequeue(struct ad_softc *, struct ad_request *);
79 static int ad_tagsupported(struct ad_softc *);
80 static void ad_timeout(struct ad_request *);
81 static void ad_free(struct ad_request *);
82 static int ad_version(u_int16_t);
84 /* misc defines */
85 #define AD_MAX_RETRIES 3
87 /* internal vars */
88 static u_int32_t adp_lun_map = 0;
89 static int ata_dma = 1;
90 static int ata_wc = 1;
91 static int ata_tags = 0;
92 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
93 TUNABLE_INT("hw.ata.wc", &ata_wc);
94 TUNABLE_INT("hw.ata.tags", &ata_tags);
95 static MALLOC_DEFINE(M_AD, "AD driver", "ATA disk driver");
97 /* sysctl vars */
98 SYSCTL_DECL(_hw_ata);
99 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RD, &ata_dma, 0,
100 "ATA disk DMA mode control");
101 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RD, &ata_wc, 0,
102 "ATA disk write caching");
103 SYSCTL_INT(_hw_ata, OID_AUTO, tags, CTLFLAG_RD, &ata_tags, 0,
104 "ATA disk tagged queuing support");
106 void
107 ad_attach(struct ata_device *atadev, int alreadylocked)
109 struct ad_softc *adp;
110 struct disk_info info;
111 cdev_t dev;
113 adp = kmalloc(sizeof(struct ad_softc), M_AD, M_WAITOK | M_ZERO);
115 KKASSERT(atadev->channel->req_mpipe.max_count != 0);
117 adp->device = atadev;
118 #ifdef ATA_STATIC_ID
119 adp->lun = (device_get_unit(atadev->channel->dev)<<1)+ATA_DEV(atadev->unit);
120 #else
121 adp->lun = ata_get_lun(&adp_lun_map);
122 #endif
123 ata_set_name(atadev, "ad", adp->lun);
124 adp->heads = atadev->param->heads;
125 adp->sectors = atadev->param->sectors;
126 adp->total_secs = atadev->param->cylinders * adp->heads * adp->sectors;
127 bioq_init(&adp->bio_queue);
129 /* does this device need oldstyle CHS addressing */
130 if (!ad_version(atadev->param->version_major) ||
131 !(atadev->param->atavalid & ATA_FLAG_54_58) || !atadev->param->lba_size)
132 adp->flags |= AD_F_CHS_USED;
134 /* use the 28bit LBA size if valid */
135 if (atadev->param->cylinders == 16383 &&
136 adp->total_secs < atadev->param->lba_size)
137 adp->total_secs = atadev->param->lba_size;
139 /* use the 48bit LBA size if valid */
140 if (atadev->param->support.address48 &&
141 atadev->param->lba_size48 > 268435455)
142 adp->total_secs = atadev->param->lba_size48;
144 if (!alreadylocked)
145 ATA_SLEEPLOCK_CH(atadev->channel, ATA_CONTROL);
146 /* use multiple sectors/interrupt if device supports it */
147 adp->transfersize = DEV_BSIZE;
148 if (ad_version(atadev->param->version_major)) {
149 int secsperint = max(1, min(atadev->param->sectors_intr, 16));
151 if (!ata_command(atadev, ATA_C_SET_MULTI, 0, secsperint,
152 0, ATA_WAIT_INTR) && !ata_wait(atadev, 0))
153 adp->transfersize *= secsperint;
156 /* enable read caching if not default on device */
157 if (ata_command(atadev, ATA_C_SETFEATURES,
158 0, 0, ATA_C_F_ENAB_RCACHE, ATA_WAIT_INTR))
159 ata_prtdev(atadev, "enabling readahead cache failed\n");
161 /* enable write caching if allowed and not default on device */
162 if (ata_wc || (ata_tags && ad_tagsupported(adp))) {
163 if (ata_command(atadev, ATA_C_SETFEATURES,
164 0, 0, ATA_C_F_ENAB_WCACHE, ATA_WAIT_INTR))
165 ata_prtdev(atadev, "enabling write cache failed\n");
167 else {
168 if (ata_command(atadev, ATA_C_SETFEATURES,
169 0, 0, ATA_C_F_DIS_WCACHE, ATA_WAIT_INTR))
170 ata_prtdev(atadev, "disabling write cache failed\n");
173 /* use DMA if allowed and if drive/controller supports it */
174 if (ata_dma)
175 ata_dmainit(atadev, ata_pmode(atadev->param),
176 ata_wmode(atadev->param), ata_umode(atadev->param));
177 else
178 ata_dmainit(atadev, ata_pmode(atadev->param), -1, -1);
180 /* use tagged queueing if allowed and supported */
181 if (ata_tags && ad_tagsupported(adp)) {
182 adp->num_tags = atadev->param->queuelen;
183 adp->flags |= AD_F_TAG_ENABLED;
184 adp->device->channel->flags |= ATA_QUEUED;
185 if (ata_command(atadev, ATA_C_SETFEATURES,
186 0, 0, ATA_C_F_DIS_RELIRQ, ATA_WAIT_INTR))
187 ata_prtdev(atadev, "disabling release interrupt failed\n");
188 if (ata_command(atadev, ATA_C_SETFEATURES,
189 0, 0, ATA_C_F_DIS_SRVIRQ, ATA_WAIT_INTR))
190 ata_prtdev(atadev, "disabling service interrupt failed\n");
193 ATA_UNLOCK_CH(atadev->channel);
195 devstat_add_entry(&adp->stats, "ad", adp->lun, DEV_BSIZE,
196 DEVSTAT_NO_ORDERED_TAGS,
197 DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE,
198 DEVSTAT_PRIORITY_DISK);
200 dev = disk_create(adp->lun, &adp->disk, &ad_ops);
201 dev->si_drv1 = adp;
202 dev->si_iosize_max = 256 * DEV_BSIZE;
203 adp->dev = dev;
205 /* construct the disk_info */
206 bzero(&info, sizeof(info));
207 info.d_media_blksize = DEV_BSIZE;
208 info.d_media_blocks = adp->total_secs;
209 info.d_nheads = adp->heads;
210 info.d_secpertrack = adp->sectors;
211 info.d_ncylinders = adp->total_secs /
212 (info.d_nheads * info.d_secpertrack);
213 info.d_secpercyl = info.d_secpertrack * info.d_nheads;
214 disk_setdiskinfo(&adp->disk, &info);
216 atadev->driver = adp;
217 atadev->flags = 0;
219 /* if this disk belongs to an ATA RAID dont print the probe */
220 if (ata_raiddisk_attach(adp))
221 adp->flags |= AD_F_RAID_SUBDISK;
222 else {
223 if (atadev->driver) {
224 ad_print(adp);
225 ata_enclosure_print(atadev);
230 void
231 ad_detach(struct ata_device *atadev, int flush) /* get rid of flush XXX SOS */
233 struct ad_softc *adp = atadev->driver;
234 struct ad_request *request;
235 struct bio *bio;
236 struct buf *bp;
238 atadev->flags |= ATA_D_DETACHING;
239 ata_prtdev(atadev, "removed from configuration\n");
240 ad_invalidatequeue(adp, NULL);
241 TAILQ_FOREACH(request, &atadev->channel->ata_queue, chain) {
242 if (request->softc != adp)
243 continue;
244 TAILQ_REMOVE(&atadev->channel->ata_queue, request, chain);
245 request->bio->bio_buf->b_error = ENXIO;
246 request->bio->bio_buf->b_flags |= B_ERROR;
247 biodone(request->bio);
248 ad_free(request);
250 ata_dmafree(atadev);
251 while ((bio = bioq_first(&adp->bio_queue))) {
252 bioq_remove(&adp->bio_queue, bio);
253 bp = bio->bio_buf;
254 bp->b_error = ENXIO;
255 bp->b_flags |= B_ERROR;
256 biodone(bio);
258 disk_invalidate(&adp->disk);
259 devstat_remove_entry(&adp->stats);
260 disk_destroy(&adp->disk);
261 if (flush) {
262 if (ata_command(atadev, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
263 ata_prtdev(atadev, "flushing cache on detach failed\n");
265 if (adp->flags & AD_F_RAID_SUBDISK)
266 ata_raiddisk_detach(adp);
267 ata_free_name(atadev);
268 ata_free_lun(&adp_lun_map, adp->lun);
269 atadev->driver = NULL;
270 atadev->flags = 0;
271 kfree(adp, M_AD);
274 static int
275 adopen(struct dev_open_args *ap)
277 struct ad_softc *adp = ap->a_head.a_dev->si_drv1;
279 if (adp->flags & AD_F_RAID_SUBDISK)
280 return EBUSY;
281 return 0;
284 static int
285 adclose(struct dev_close_args *ap)
287 struct ad_softc *adp = ap->a_head.a_dev->si_drv1;
289 crit_enter(); /* interlock non-atomic channel lock */
290 ATA_SLEEPLOCK_CH(adp->device->channel, ATA_CONTROL);
291 if (ata_command(adp->device, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
292 ata_prtdev(adp->device, "flushing cache on close failed\n");
293 ATA_UNLOCK_CH(adp->device->channel);
294 crit_exit();
295 return 0;
299 * note: always use the passed device rather then bp->b_dev, as the bp
300 * may have been translated through several layers.
302 static int
303 adstrategy(struct dev_strategy_args *ap)
305 cdev_t dev = ap->a_head.a_dev;
306 struct bio *bio = ap->a_bio;
307 struct buf *bp = bio->bio_buf;
308 struct ad_softc *adp = dev->si_drv1;
310 if (adp->device->flags & ATA_D_DETACHING) {
311 bp->b_error = ENXIO;
312 bp->b_flags |= B_ERROR;
313 biodone(bio);
314 return(0);
316 bio->bio_driver_info = dev;
317 crit_enter();
318 bioqdisksort(&adp->bio_queue, bio);
319 crit_exit();
320 ata_start(adp->device->channel);
321 return(0);
325 addump(struct dev_dump_args *ap)
327 cdev_t dev = ap->a_head.a_dev;
328 struct ad_softc *adp = dev->si_drv1;
329 struct ad_request request;
331 if (!adp)
332 return ENXIO;
334 /* force PIO mode for dumps */
335 adp->device->mode = ATA_PIO;
336 ata_reinit(adp->device->channel);
338 /* set up request */
339 bzero(&request, sizeof(struct ad_request));
340 request.softc = adp;
341 request.blockaddr = ap->a_offset / DEV_BSIZE;
342 request.bytecount = ap->a_length;
343 request.data = ap->a_virtual;
344 callout_init(&request.callout);
345 while (request.bytecount > 0) {
346 ad_transfer(&request);
347 if (request.flags & ADR_F_ERROR)
348 return EIO;
349 request.donecount += request.currentsize;
350 request.bytecount -= request.currentsize;
351 DELAY(20);
354 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DSC) < 0)
355 ata_prtdev(adp->device, "timeout waiting for final ready\n");
357 return 0;
361 * Critical section is held when this function is called
362 * by ata_start().
364 void
365 ad_start(struct ata_device *atadev)
367 struct ad_softc *adp = atadev->driver;
368 struct bio *bio = bioq_first(&adp->bio_queue);
369 struct buf *bp;
370 struct ad_request *request;
371 int tag = 0;
373 if (bio == NULL)
374 return;
375 bp = bio->bio_buf;
377 /* if tagged queueing enabled get next free tag */
378 if (adp->flags & AD_F_TAG_ENABLED) {
379 while (tag <= adp->num_tags && adp->tags[tag])
380 tag++;
381 if (tag > adp->num_tags )
382 return;
386 * Allocate a request. The allocation can only fail if the pipeline
387 * is full, in which case the request will be picked up later when
388 * ad_start() is called after another request completes.
390 request = mpipe_alloc_nowait(&atadev->channel->req_mpipe);
391 if (request == NULL) {
392 ata_prtdev(atadev, "pipeline full allocating request in ad_start\n");
393 return;
396 KASSERT((bio->bio_offset & DEV_BMASK) == 0,
397 ("bio_offset not on sector boundary %08llx", bio->bio_offset));
399 /* setup request */
400 request->softc = adp;
401 request->bio = bio;
402 request->blockaddr = (u_int64_t)(bio->bio_offset >> DEV_BSHIFT);
403 request->bytecount = bp->b_bcount;
404 request->data = bp->b_data;
405 request->tag = tag;
406 callout_init(&request->callout);
407 if (bp->b_cmd == BUF_CMD_READ)
408 request->flags |= ADR_F_READ;
409 if (adp->device->mode >= ATA_DMA) {
410 if (ata_dmaalloc(atadev, M_NOWAIT) != 0) {
411 mpipe_free(&atadev->channel->req_mpipe, request);
412 ata_prtdev(atadev, "pipeline full allocated dmabuf in ad_start\n");
413 /* do not revert to PIO, wait for ad_start after I/O completion */
414 return;
418 /* insert in tag array */
419 adp->tags[tag] = request;
421 /* remove from drive queue */
422 bioq_remove(&adp->bio_queue, bio);
424 /* link onto controller queue */
425 TAILQ_INSERT_TAIL(&atadev->channel->ata_queue, request, chain);
428 void
429 ad_requeue(struct ata_channel *chan, struct ad_request *req)
431 if (req->donecount) {
432 ata_printf(chan, -1,
433 "WARNING: resetting donecount %u for retry\n",
434 req->donecount);
435 req->bytecount += req->donecount;
436 req->donecount = 0;
438 TAILQ_INSERT_HEAD(&chan->ata_queue, req, chain);
442 ad_transfer(struct ad_request *request)
444 struct ad_softc *adp;
445 u_int64_t lba;
446 u_int32_t count, max_count;
447 u_int8_t cmd;
448 int flags = ATA_IMMEDIATE;
450 /* get request params */
451 adp = request->softc;
453 /* calculate transfer details */
454 lba = request->blockaddr + (request->donecount / DEV_BSIZE);
456 if (request->donecount == 0) {
458 /* start timeout for this transfer */
459 if (dumping) {
460 callout_stop(&request->callout);
461 } else {
462 callout_reset(&request->callout, 10 * hz,
463 (void *)ad_timeout, request);
466 /* setup transfer parameters */
467 count = howmany(request->bytecount, DEV_BSIZE);
468 max_count = adp->device->param->support.address48 ? 65536 : 256;
469 if (count > max_count) {
470 ata_prtdev(adp->device,
471 "count %d size transfers not supported\n", count);
472 count = max_count;
475 if (adp->flags & AD_F_CHS_USED) {
476 int sector = (lba % adp->sectors) + 1;
477 int cylinder = lba / (adp->sectors * adp->heads);
478 int head = (lba % (adp->sectors * adp->heads)) / adp->sectors;
480 lba = (sector&0xff) | ((cylinder&0xffff)<<8) | ((head&0xf)<<24);
481 adp->device->flags |= ATA_D_USE_CHS;
484 /* setup first transfer length */
485 request->currentsize = min(request->bytecount, adp->transfersize);
487 devstat_start_transaction(&adp->stats);
489 /* does this drive & transfer work with DMA ? */
490 request->flags &= ~ADR_F_DMA_USED;
491 if (adp->device->mode >= ATA_DMA &&
492 !ata_dmasetup(adp->device, request->data, request->bytecount)) {
493 request->flags |= ADR_F_DMA_USED;
494 request->currentsize = request->bytecount;
496 /* do we have tags enabled ? */
497 if (adp->flags & AD_F_TAG_ENABLED) {
498 cmd = (request->flags & ADR_F_READ) ?
499 ATA_C_READ_DMA_QUEUED : ATA_C_WRITE_DMA_QUEUED;
501 if (ata_command(adp->device, cmd, lba,
502 request->tag << 3, count, flags)) {
503 ata_prtdev(adp->device, "error executing command");
504 goto transfer_failed;
506 if (ata_wait(adp->device, ATA_S_READY)) {
507 ata_prtdev(adp->device, "timeout waiting for READY\n");
508 goto transfer_failed;
510 adp->outstanding++;
512 /* if ATA bus RELEASE check for SERVICE */
513 if (adp->flags & AD_F_TAG_ENABLED &&
514 ATA_INB(adp->device->channel->r_io, ATA_IREASON) &
515 ATA_I_RELEASE)
516 return ad_service(adp, 1);
518 else {
519 cmd = (request->flags & ADR_F_READ) ?
520 ATA_C_READ_DMA : ATA_C_WRITE_DMA;
522 if (ata_command(adp->device, cmd, lba, count, 0, flags)) {
523 ata_prtdev(adp->device, "error executing command");
524 goto transfer_failed;
526 #if 0
528 * wait for data transfer phase
530 * well this should be here acording to specs, but older
531 * promise controllers doesn't like it, they lockup!
533 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
534 ata_prtdev(adp->device, "timeout waiting for data phase\n");
535 goto transfer_failed;
537 #endif
540 /* start transfer, return and wait for interrupt */
541 ata_dmastart(adp->device, request->data, request->bytecount,
542 request->flags & ADR_F_READ);
543 return ATA_OP_CONTINUES;
546 /* does this drive support multi sector transfers ? */
547 if (request->currentsize > DEV_BSIZE)
548 cmd = request->flags&ADR_F_READ ? ATA_C_READ_MUL : ATA_C_WRITE_MUL;
550 /* just plain old single sector transfer */
551 else
552 cmd = request->flags&ADR_F_READ ? ATA_C_READ : ATA_C_WRITE;
554 if (ata_command(adp->device, cmd, lba, count, 0, flags)){
555 ata_prtdev(adp->device, "error executing command");
556 goto transfer_failed;
560 /* calculate this transfer length */
561 request->currentsize = min(request->bytecount, adp->transfersize);
563 /* if this is a PIO read operation, return and wait for interrupt */
564 if (request->flags & ADR_F_READ)
565 return ATA_OP_CONTINUES;
567 /* ready to write PIO data ? */
568 if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
569 ata_prtdev(adp->device, "timeout waiting for DRQ");
570 goto transfer_failed;
573 /* output the data */
574 if (adp->device->channel->flags & ATA_USE_16BIT)
575 ATA_OUTSW(adp->device->channel->r_io, ATA_DATA,
576 (void *)((uintptr_t)request->data + request->donecount),
577 request->currentsize / sizeof(int16_t));
578 else
579 ATA_OUTSL(adp->device->channel->r_io, ATA_DATA,
580 (void *)((uintptr_t)request->data + request->donecount),
581 request->currentsize / sizeof(int32_t));
582 return ATA_OP_CONTINUES;
584 transfer_failed:
585 callout_stop(&request->callout);
586 ad_invalidatequeue(adp, request);
587 kprintf(" - resetting\n");
589 /* if retries still permit, reinject this request */
590 if (request->retries++ < AD_MAX_RETRIES)
591 ad_requeue(adp->device->channel, request);
592 else {
593 /* retries all used up, return error */
594 request->bio->bio_buf->b_error = EIO;
595 request->bio->bio_buf->b_flags |= B_ERROR;
596 request->bio->bio_buf->b_resid = request->bytecount;
597 devstat_end_transaction_buf(&adp->stats, request->bio->bio_buf);
598 biodone(request->bio);
599 ad_free(request);
601 ata_reinit(adp->device->channel);
602 return ATA_OP_CONTINUES;
606 ad_interrupt(struct ad_request *request)
608 struct ad_softc *adp = request->softc;
609 int dma_stat = 0;
610 cdev_t dev;
612 /* finish DMA transfer */
613 if (request->flags & ADR_F_DMA_USED)
614 dma_stat = ata_dmadone(adp->device);
616 dev = request->bio->bio_driver_info;
617 /* do we have a corrected soft error ? */
618 if (adp->device->channel->status & ATA_S_CORR)
619 diskerr(request->bio, dev,
620 "soft error (ECC corrected)", LOG_PRINTF,
621 request->donecount);
623 /* did any real errors happen ? */
624 if ((adp->device->channel->status & ATA_S_ERROR) ||
625 (request->flags & ADR_F_DMA_USED && dma_stat & ATA_BMSTAT_ERROR)) {
626 adp->device->channel->error =
627 ATA_INB(adp->device->channel->r_io, ATA_ERROR);
628 diskerr(request->bio, dev,
629 (adp->device->channel->error & ATA_E_ICRC) ?
630 "UDMA ICRC error" : "hard error", LOG_PRINTF,
631 request->donecount);
633 /* if this is a UDMA CRC error, reinject request */
634 if (request->flags & ADR_F_DMA_USED &&
635 adp->device->channel->error & ATA_E_ICRC) {
636 callout_stop(&request->callout);
637 ad_invalidatequeue(adp, request);
639 if (request->retries++ < AD_MAX_RETRIES)
640 kprintf(" retrying\n");
641 else {
642 ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
643 kprintf(" falling back to PIO mode\n");
645 ad_requeue(adp->device->channel, request);
646 return ATA_OP_FINISHED;
649 /* if using DMA, try once again in PIO mode */
650 if (request->flags & ADR_F_DMA_USED) {
651 callout_stop(&request->callout);
652 ad_invalidatequeue(adp, request);
653 ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
654 request->flags |= ADR_F_FORCE_PIO;
655 kprintf(" trying PIO mode\n");
656 ad_requeue(adp->device->channel, request);
657 return ATA_OP_FINISHED;
660 request->flags |= ADR_F_ERROR;
661 kprintf(" status=%02x error=%02x\n",
662 adp->device->channel->status, adp->device->channel->error);
665 /* if we arrived here with forced PIO mode, DMA doesn't work right */
666 if (request->flags & ADR_F_FORCE_PIO && !(request->flags & ADR_F_ERROR))
667 ata_prtdev(adp->device, "DMA problem fallback to PIO mode\n");
669 /* if this was a PIO read operation, get the data */
670 if (!(request->flags & ADR_F_DMA_USED) &&
671 (request->flags & (ADR_F_READ | ADR_F_ERROR)) == ADR_F_READ) {
673 /* ready to receive data? */
674 if ((adp->device->channel->status & ATA_S_READY) == 0)
675 ata_prtdev(adp->device, "read interrupt arrived early");
677 if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) != 0) {
678 ata_prtdev(adp->device, "read error detected (too) late");
679 request->flags |= ADR_F_ERROR;
681 else {
682 /* data ready, read in */
683 if (adp->device->channel->flags & ATA_USE_16BIT)
684 ATA_INSW(adp->device->channel->r_io, ATA_DATA,
685 (void*)((uintptr_t)request->data + request->donecount),
686 request->currentsize / sizeof(int16_t));
687 else
688 ATA_INSL(adp->device->channel->r_io, ATA_DATA,
689 (void*)((uintptr_t)request->data + request->donecount),
690 request->currentsize / sizeof(int32_t));
694 /* finish up transfer */
695 if (request->flags & ADR_F_ERROR) {
696 request->bio->bio_buf->b_error = EIO;
697 request->bio->bio_buf->b_flags |= B_ERROR;
699 else {
700 request->bytecount -= request->currentsize;
701 request->donecount += request->currentsize;
702 if (request->bytecount > 0) {
703 ad_transfer(request);
704 return ATA_OP_CONTINUES;
708 /* disarm timeout for this transfer */
709 callout_stop(&request->callout);
711 request->bio->bio_buf->b_resid = request->bytecount;
713 devstat_end_transaction_buf(&adp->stats, request->bio->bio_buf);
714 biodone(request->bio);
715 ad_free(request);
716 adp->outstanding--;
718 /* check for SERVICE (tagged operations only) */
719 return ad_service(adp, 1);
723 ad_service(struct ad_softc *adp, int change)
725 /* do we have to check the other device on this channel ? */
726 if (adp->device->channel->flags & ATA_QUEUED && change) {
727 int device = adp->device->unit;
729 if (adp->device->unit == ATA_MASTER) {
730 if ((adp->device->channel->devices & ATA_ATA_SLAVE) &&
731 (adp->device->channel->device[SLAVE].driver) &&
732 ((struct ad_softc *) (adp->device->channel->
733 device[SLAVE].driver))->flags & AD_F_TAG_ENABLED)
734 device = ATA_SLAVE;
736 else {
737 if ((adp->device->channel->devices & ATA_ATA_MASTER) &&
738 (adp->device->channel->device[MASTER].driver) &&
739 ((struct ad_softc *) (adp->device->channel->
740 device[MASTER].driver))->flags & AD_F_TAG_ENABLED)
741 device = ATA_MASTER;
743 if (device != adp->device->unit &&
744 ((struct ad_softc *)
745 (adp->device->channel->
746 device[ATA_DEV(device)].driver))->outstanding > 0) {
747 ATA_OUTB(adp->device->channel->r_io, ATA_DRIVE, ATA_D_IBM | device);
748 adp = adp->device->channel->device[ATA_DEV(device)].driver;
749 DELAY(10);
752 adp->device->channel->status =
753 ATA_INB(adp->device->channel->r_altio, ATA_ALTSTAT);
755 /* do we have a SERVICE request from the drive ? */
756 if (adp->flags & AD_F_TAG_ENABLED &&
757 adp->outstanding > 0 &&
758 adp->device->channel->status & ATA_S_SERVICE) {
759 struct ad_request *request;
760 int tag;
762 /* check for error */
763 if (adp->device->channel->status & ATA_S_ERROR) {
764 ata_prtdev(adp->device, "Oops! controller says s=0x%02x e=0x%02x\n",
765 adp->device->channel->status,
766 adp->device->channel->error);
767 ad_invalidatequeue(adp, NULL);
768 return ATA_OP_FINISHED;
771 /* issue SERVICE cmd */
772 if (ata_command(adp->device, ATA_C_SERVICE, 0, 0, 0, ATA_IMMEDIATE)) {
773 ata_prtdev(adp->device, "problem executing SERVICE cmd\n");
774 ad_invalidatequeue(adp, NULL);
775 return ATA_OP_FINISHED;
778 /* setup the transfer environment when ready */
779 if (ata_wait(adp->device, ATA_S_READY)) {
780 ata_prtdev(adp->device, "SERVICE timeout tag=%d s=%02x e=%02x\n",
781 ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3,
782 adp->device->channel->status,
783 adp->device->channel->error);
784 ad_invalidatequeue(adp, NULL);
785 return ATA_OP_FINISHED;
787 tag = ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3;
788 if (!(request = adp->tags[tag])) {
789 ata_prtdev(adp->device, "no request for tag=%d\n", tag);
790 ad_invalidatequeue(adp, NULL);
791 return ATA_OP_FINISHED;
793 ATA_FORCELOCK_CH(adp->device->channel, ATA_ACTIVE_ATA);
794 adp->device->channel->running = request;
795 request->serv++;
797 /* start DMA transfer when ready */
798 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
799 ata_prtdev(adp->device, "timeout starting DMA s=%02x e=%02x\n",
800 adp->device->channel->status,
801 adp->device->channel->error);
802 ad_invalidatequeue(adp, NULL);
803 return ATA_OP_FINISHED;
805 ata_dmastart(adp->device, request->data, request->bytecount,
806 request->flags & ADR_F_READ);
807 return ATA_OP_CONTINUES;
809 return ATA_OP_FINISHED;
812 static void
813 ad_free(struct ad_request *request)
815 crit_enter();
816 ata_dmafree(request->softc->device);
817 request->softc->tags[request->tag] = NULL;
818 mpipe_free(&request->softc->device->channel->req_mpipe, request);
819 crit_exit();
822 static void
823 ad_invalidatequeue(struct ad_softc *adp, struct ad_request *request)
825 /* if tags used invalidate all other tagged transfers */
826 if (adp->flags & AD_F_TAG_ENABLED) {
827 struct ad_request *tmpreq;
828 int tag;
830 ata_prtdev(adp->device, "invalidating queued requests\n");
831 for (tag = 0; tag <= adp->num_tags; tag++) {
832 tmpreq = adp->tags[tag];
833 adp->tags[tag] = NULL;
834 if (tmpreq == request || tmpreq == NULL)
835 continue;
836 callout_stop(&request->callout);
837 ad_requeue(adp->device->channel, tmpreq);
839 if (ata_command(adp->device, ATA_C_NOP,
840 0, 0, ATA_C_F_FLUSHQUEUE, ATA_WAIT_READY))
841 ata_prtdev(adp->device, "flush queue failed\n");
842 adp->outstanding = 0;
846 static int
847 ad_tagsupported(struct ad_softc *adp)
849 const char *good[] = {"IBM-DPTA", "IBM-DTLA", NULL};
850 int i = 0;
852 switch (adp->device->channel->chiptype) {
853 case 0x4d33105a: /* Promises before TX2 doesn't work with tagged queuing */
854 case 0x4d38105a:
855 case 0x0d30105a:
856 case 0x4d30105a:
857 return 0;
860 /* check that drive does DMA, has tags enabled, and is one we know works */
861 if (adp->device->mode >= ATA_DMA && adp->device->param->support.queued &&
862 adp->device->param->enabled.queued) {
863 while (good[i] != NULL) {
864 if (!strncmp(adp->device->param->model, good[i], strlen(good[i])))
865 return 1;
866 i++;
869 * check IBM's new obscure way of naming drives
870 * we want "IC" (IBM CORP) and "AT" or "AV" (ATA interface)
871 * but doesn't care about the other info (size, capacity etc)
873 if (!strncmp(adp->device->param->model, "IC", 2) &&
874 (!strncmp(adp->device->param->model + 8, "AT", 2) ||
875 !strncmp(adp->device->param->model + 8, "AV", 2)))
876 return 1;
878 return 0;
881 static void
882 ad_timeout(struct ad_request *request)
884 struct ad_softc *adp = request->softc;
886 adp->device->channel->running = NULL;
887 ata_prtdev(adp->device, "%s command timeout tag=%d serv=%d - resetting\n",
888 (request->flags & ADR_F_READ) ? "READ" : "WRITE",
889 request->tag, request->serv);
891 if (request->flags & ADR_F_DMA_USED) {
892 ata_dmadone(adp->device);
893 ad_invalidatequeue(adp, request);
894 if (request->retries == AD_MAX_RETRIES) {
895 ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
896 ata_prtdev(adp->device, "trying fallback to PIO mode\n");
897 request->retries = 0;
901 /* if retries still permit, reinject this request */
902 if (request->retries++ < AD_MAX_RETRIES) {
903 ad_requeue(adp->device->channel, request);
905 else {
906 /* retries all used up, return error */
907 request->bio->bio_buf->b_error = EIO;
908 request->bio->bio_buf->b_flags |= B_ERROR;
909 devstat_end_transaction_buf(&adp->stats, request->bio->bio_buf);
910 biodone(request->bio);
911 ad_free(request);
913 ata_reinit(adp->device->channel);
916 void
917 ad_reinit(struct ata_device *atadev)
919 struct ad_softc *adp = atadev->driver;
921 /* reinit disk parameters */
922 ad_invalidatequeue(atadev->driver, NULL);
923 ata_command(atadev, ATA_C_SET_MULTI, 0,
924 adp->transfersize / DEV_BSIZE, 0, ATA_WAIT_READY);
925 if (adp->device->mode >= ATA_DMA)
926 ata_dmainit(atadev, ata_pmode(adp->device->param),
927 ata_wmode(adp->device->param),
928 ata_umode(adp->device->param));
929 else
930 ata_dmainit(atadev, ata_pmode(adp->device->param), -1, -1);
933 void
934 ad_print(struct ad_softc *adp)
936 if (bootverbose) {
937 ata_prtdev(adp->device, "<%.40s/%.8s> ATA-%d disk at ata%d-%s\n",
938 adp->device->param->model, adp->device->param->revision,
939 ad_version(adp->device->param->version_major),
940 device_get_unit(adp->device->channel->dev),
941 (adp->device->unit == ATA_MASTER) ? "master" : "slave");
943 ata_prtdev(adp->device,
944 "%lluMB (%llu sectors), %llu C, %u H, %u S, %u B\n",
945 (unsigned long long)(adp->total_secs /
946 ((1024L*1024L)/DEV_BSIZE)),
947 (unsigned long long) adp->total_secs,
948 (unsigned long long) (adp->total_secs /
949 (adp->heads * adp->sectors)),
950 adp->heads, adp->sectors, DEV_BSIZE);
952 ata_prtdev(adp->device, "%d secs/int, %d depth queue, %s%s\n",
953 adp->transfersize / DEV_BSIZE, adp->num_tags + 1,
954 (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
955 ata_mode2str(adp->device->mode));
957 ata_prtdev(adp->device, "piomode=%d dmamode=%d udmamode=%d cblid=%d\n",
958 ata_pmode(adp->device->param), ata_wmode(adp->device->param),
959 ata_umode(adp->device->param),
960 adp->device->param->hwres_cblid);
963 else
964 ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s%s\n",
965 (unsigned long long)(adp->total_secs /
966 ((1024L * 1024L) / DEV_BSIZE)),
967 adp->device->param->model,
968 (unsigned long long)(adp->total_secs /
969 (adp->heads*adp->sectors)),
970 adp->heads, adp->sectors,
971 device_get_unit(adp->device->channel->dev),
972 (adp->device->unit == ATA_MASTER) ? "master" : "slave",
973 (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
974 ata_mode2str(adp->device->mode));
977 static int
978 ad_version(u_int16_t version)
980 int bit;
982 if (version == 0xffff)
983 return 0;
984 for (bit = 15; bit >= 0; bit--)
985 if (version & (1<<bit))
986 return bit;
987 return 0;