NFE - Change default RX ring size from 128 -> 256, Adjust moderation timer.
[dragonfly.git] / sys / dev / disk / ata / ata-disk.c
blobac3e08407396fcd9db8aabd85b0cc3fb643e7105
1 /*-
2 * Copyright (c) 1998,1999,2000,2001,2002 Søren Schmidt <sos@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/dev/ata/ata-disk.c,v 1.60.2.24 2003/01/30 07:19:59 sos Exp $
29 * $DragonFly: src/sys/dev/disk/ata/ata-disk.c,v 1.36 2007/05/19 02:39:02 dillon Exp $
32 #include "opt_ata.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/ata.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/buf.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <sys/disk.h>
42 #include <sys/devicestat.h>
43 #include <sys/cons.h>
44 #include <sys/sysctl.h>
45 #include <sys/syslog.h>
46 #include <sys/rman.h>
47 #include <sys/proc.h>
48 #include <sys/buf2.h>
49 #include <sys/thread2.h>
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
54 #include <machine/md_var.h>
55 #include <machine/clock.h>
57 #include "ata-all.h"
58 #include "ata-disk.h"
59 #include "ata-raid.h"
61 /* device structures */
62 static d_open_t adopen;
63 static d_close_t adclose;
64 static d_strategy_t adstrategy;
65 static d_dump_t addump;
67 static struct dev_ops ad_ops = {
68 { "ad", 116, D_DISK },
69 .d_open = adopen,
70 .d_close = adclose,
71 .d_read = physread,
72 .d_write = physwrite,
73 .d_strategy = adstrategy,
74 .d_dump = addump,
77 /* prototypes */
78 static void ad_requeue(struct ata_channel *, struct ad_request *);
79 static void ad_invalidatequeue(struct ad_softc *, struct ad_request *);
80 static int ad_tagsupported(struct ad_softc *);
81 static void ad_timeout(struct ad_request *);
82 static void ad_free(struct ad_request *);
83 static int ad_version(u_int16_t);
85 /* misc defines */
86 #define AD_MAX_RETRIES 3
88 /* internal vars */
89 static u_int32_t adp_lun_map = 0;
90 static int ata_dma = 1;
91 static int ata_wc = 1;
92 static int ata_tags = 0;
93 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
94 TUNABLE_INT("hw.ata.wc", &ata_wc);
95 TUNABLE_INT("hw.ata.tags", &ata_tags);
96 static MALLOC_DEFINE(M_AD, "AD driver", "ATA disk driver");
98 /* sysctl vars */
99 SYSCTL_DECL(_hw_ata);
100 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RD, &ata_dma, 0,
101 "ATA disk DMA mode control");
102 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RD, &ata_wc, 0,
103 "ATA disk write caching");
104 SYSCTL_INT(_hw_ata, OID_AUTO, tags, CTLFLAG_RD, &ata_tags, 0,
105 "ATA disk tagged queuing support");
107 void
108 ad_attach(struct ata_device *atadev, int alreadylocked)
110 struct ad_softc *adp;
111 struct disk_info info;
112 cdev_t dev;
114 adp = kmalloc(sizeof(struct ad_softc), M_AD, M_WAITOK | M_ZERO);
116 KKASSERT(atadev->channel->req_mpipe.max_count != 0);
118 adp->device = atadev;
119 #ifdef ATA_STATIC_ID
120 adp->lun = (device_get_unit(atadev->channel->dev)<<1)+ATA_DEV(atadev->unit);
121 #else
122 adp->lun = ata_get_lun(&adp_lun_map);
123 #endif
124 ata_set_name(atadev, "ad", adp->lun);
125 adp->heads = atadev->param->heads;
126 adp->sectors = atadev->param->sectors;
127 adp->total_secs = atadev->param->cylinders * adp->heads * adp->sectors;
128 bioq_init(&adp->bio_queue);
130 /* does this device need oldstyle CHS addressing */
131 if (!ad_version(atadev->param->version_major) ||
132 !(atadev->param->atavalid & ATA_FLAG_54_58) || !atadev->param->lba_size)
133 adp->flags |= AD_F_CHS_USED;
135 /* use the 28bit LBA size if valid */
136 if (atadev->param->cylinders == 16383 &&
137 adp->total_secs < atadev->param->lba_size)
138 adp->total_secs = atadev->param->lba_size;
140 /* use the 48bit LBA size if valid */
141 if (atadev->param->support.address48 &&
142 atadev->param->lba_size48 > 268435455)
143 adp->total_secs = atadev->param->lba_size48;
145 if (!alreadylocked)
146 ATA_SLEEPLOCK_CH(atadev->channel, ATA_CONTROL);
147 /* use multiple sectors/interrupt if device supports it */
148 adp->transfersize = DEV_BSIZE;
149 if (ad_version(atadev->param->version_major)) {
150 int secsperint = max(1, min(atadev->param->sectors_intr, 16));
152 if (!ata_command(atadev, ATA_C_SET_MULTI, 0, secsperint,
153 0, ATA_WAIT_INTR) && !ata_wait(atadev, 0))
154 adp->transfersize *= secsperint;
157 /* enable read caching if not default on device */
158 if (ata_command(atadev, ATA_C_SETFEATURES,
159 0, 0, ATA_C_F_ENAB_RCACHE, ATA_WAIT_INTR))
160 ata_prtdev(atadev, "enabling readahead cache failed\n");
162 /* enable write caching if allowed and not default on device */
163 if (ata_wc || (ata_tags && ad_tagsupported(adp))) {
164 if (ata_command(atadev, ATA_C_SETFEATURES,
165 0, 0, ATA_C_F_ENAB_WCACHE, ATA_WAIT_INTR))
166 ata_prtdev(atadev, "enabling write cache failed\n");
168 else {
169 if (ata_command(atadev, ATA_C_SETFEATURES,
170 0, 0, ATA_C_F_DIS_WCACHE, ATA_WAIT_INTR))
171 ata_prtdev(atadev, "disabling write cache failed\n");
174 /* use DMA if allowed and if drive/controller supports it */
175 if (ata_dma)
176 ata_dmainit(atadev, ata_pmode(atadev->param),
177 ata_wmode(atadev->param), ata_umode(atadev->param));
178 else
179 ata_dmainit(atadev, ata_pmode(atadev->param), -1, -1);
181 /* use tagged queueing if allowed and supported */
182 if (ata_tags && ad_tagsupported(adp)) {
183 adp->num_tags = atadev->param->queuelen;
184 adp->flags |= AD_F_TAG_ENABLED;
185 adp->device->channel->flags |= ATA_QUEUED;
186 if (ata_command(atadev, ATA_C_SETFEATURES,
187 0, 0, ATA_C_F_DIS_RELIRQ, ATA_WAIT_INTR))
188 ata_prtdev(atadev, "disabling release interrupt failed\n");
189 if (ata_command(atadev, ATA_C_SETFEATURES,
190 0, 0, ATA_C_F_DIS_SRVIRQ, ATA_WAIT_INTR))
191 ata_prtdev(atadev, "disabling service interrupt failed\n");
194 ATA_UNLOCK_CH(atadev->channel);
196 devstat_add_entry(&adp->stats, "ad", adp->lun, DEV_BSIZE,
197 DEVSTAT_NO_ORDERED_TAGS,
198 DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE,
199 DEVSTAT_PRIORITY_DISK);
201 dev = disk_create(adp->lun, &adp->disk, &ad_ops);
202 dev->si_drv1 = adp;
203 dev->si_iosize_max = 256 * DEV_BSIZE;
204 adp->dev = dev;
206 /* construct the disk_info */
207 bzero(&info, sizeof(info));
208 info.d_media_blksize = DEV_BSIZE;
209 info.d_media_blocks = adp->total_secs;
210 info.d_nheads = adp->heads;
211 info.d_secpertrack = adp->sectors;
212 info.d_ncylinders = adp->total_secs /
213 (info.d_nheads * info.d_secpertrack);
214 info.d_secpercyl = info.d_secpertrack * info.d_nheads;
215 disk_setdiskinfo(&adp->disk, &info);
217 atadev->driver = adp;
218 atadev->flags = 0;
220 /* if this disk belongs to an ATA RAID dont print the probe */
221 if (ata_raiddisk_attach(adp))
222 adp->flags |= AD_F_RAID_SUBDISK;
223 else {
224 if (atadev->driver) {
225 ad_print(adp);
226 ata_enclosure_print(atadev);
231 void
232 ad_detach(struct ata_device *atadev, int flush) /* get rid of flush XXX SOS */
234 struct ad_softc *adp = atadev->driver;
235 struct ad_request *request;
236 struct bio *bio;
237 struct buf *bp;
239 atadev->flags |= ATA_D_DETACHING;
240 ata_prtdev(atadev, "removed from configuration\n");
241 ad_invalidatequeue(adp, NULL);
242 TAILQ_FOREACH(request, &atadev->channel->ata_queue, chain) {
243 if (request->softc != adp)
244 continue;
245 TAILQ_REMOVE(&atadev->channel->ata_queue, request, chain);
246 request->bio->bio_buf->b_error = ENXIO;
247 request->bio->bio_buf->b_flags |= B_ERROR;
248 biodone(request->bio);
249 ad_free(request);
251 ata_dmafree(atadev);
252 while ((bio = bioq_first(&adp->bio_queue))) {
253 bioq_remove(&adp->bio_queue, bio);
254 bp = bio->bio_buf;
255 bp->b_error = ENXIO;
256 bp->b_flags |= B_ERROR;
257 biodone(bio);
259 disk_invalidate(&adp->disk);
260 devstat_remove_entry(&adp->stats);
261 disk_destroy(&adp->disk);
262 if (flush) {
263 if (ata_command(atadev, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
264 ata_prtdev(atadev, "flushing cache on detach failed\n");
266 if (adp->flags & AD_F_RAID_SUBDISK)
267 ata_raiddisk_detach(adp);
268 ata_free_name(atadev);
269 ata_free_lun(&adp_lun_map, adp->lun);
270 atadev->driver = NULL;
271 atadev->flags = 0;
272 kfree(adp, M_AD);
275 static int
276 adopen(struct dev_open_args *ap)
278 struct ad_softc *adp = ap->a_head.a_dev->si_drv1;
280 if (adp->flags & AD_F_RAID_SUBDISK)
281 return EBUSY;
282 return 0;
285 static int
286 adclose(struct dev_close_args *ap)
288 struct ad_softc *adp = ap->a_head.a_dev->si_drv1;
290 crit_enter(); /* interlock non-atomic channel lock */
291 ATA_SLEEPLOCK_CH(adp->device->channel, ATA_CONTROL);
292 if (ata_command(adp->device, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
293 ata_prtdev(adp->device, "flushing cache on close failed\n");
294 ATA_UNLOCK_CH(adp->device->channel);
295 crit_exit();
296 return 0;
300 * note: always use the passed device rather then bp->b_dev, as the bp
301 * may have been translated through several layers.
303 static int
304 adstrategy(struct dev_strategy_args *ap)
306 cdev_t dev = ap->a_head.a_dev;
307 struct bio *bio = ap->a_bio;
308 struct buf *bp = bio->bio_buf;
309 struct ad_softc *adp = dev->si_drv1;
311 if (adp->device->flags & ATA_D_DETACHING) {
312 bp->b_error = ENXIO;
313 bp->b_flags |= B_ERROR;
314 biodone(bio);
315 return(0);
317 bio->bio_driver_info = dev;
318 crit_enter();
319 bioqdisksort(&adp->bio_queue, bio);
320 crit_exit();
321 ata_start(adp->device->channel);
322 return(0);
326 addump(struct dev_dump_args *ap)
328 cdev_t dev = ap->a_head.a_dev;
329 struct ad_softc *adp = dev->si_drv1;
330 struct ad_request request;
331 vm_paddr_t addr = 0;
332 long blkcnt;
333 int dumppages = MAXDUMPPGS;
334 int i;
336 if (!adp)
337 return ENXIO;
339 /* force PIO mode for dumps */
340 adp->device->mode = ATA_PIO;
341 ata_reinit(adp->device->channel);
343 blkcnt = howmany(PAGE_SIZE, ap->a_secsize);
345 while (ap->a_count > 0) {
346 caddr_t va = NULL;
347 DELAY(1000);
349 if ((ap->a_count / blkcnt) < dumppages)
350 dumppages = ap->a_count / blkcnt;
352 for (i = 0; i < dumppages; ++i) {
353 vm_paddr_t a = addr + (i * PAGE_SIZE);
354 if (is_physical_memory(a))
355 va = pmap_kenter_temporary(trunc_page(a), i);
356 else
357 va = pmap_kenter_temporary(trunc_page(0), i);
360 bzero(&request, sizeof(struct ad_request));
361 request.softc = adp;
362 request.blockaddr = ap->a_blkno;
363 request.bytecount = PAGE_SIZE * dumppages;
364 request.data = va;
365 callout_init(&request.callout);
367 while (request.bytecount > 0) {
368 ad_transfer(&request);
369 if (request.flags & ADR_F_ERROR)
370 return EIO;
371 request.donecount += request.currentsize;
372 request.bytecount -= request.currentsize;
373 DELAY(20);
376 if (dumpstatus(addr, (off_t)ap->a_count * DEV_BSIZE) < 0)
377 return EINTR;
379 ap->a_blkno += blkcnt * dumppages;
380 ap->a_count -= blkcnt * dumppages;
381 addr += PAGE_SIZE * dumppages;
384 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DSC) < 0)
385 ata_prtdev(adp->device, "timeout waiting for final ready\n");
386 return 0;
390 * Critical section is held when this function is called
391 * by ata_start().
393 void
394 ad_start(struct ata_device *atadev)
396 struct ad_softc *adp = atadev->driver;
397 struct bio *bio = bioq_first(&adp->bio_queue);
398 struct buf *bp;
399 struct ad_request *request;
400 int tag = 0;
402 if (bio == NULL)
403 return;
404 bp = bio->bio_buf;
406 /* if tagged queueing enabled get next free tag */
407 if (adp->flags & AD_F_TAG_ENABLED) {
408 while (tag <= adp->num_tags && adp->tags[tag])
409 tag++;
410 if (tag > adp->num_tags )
411 return;
415 * Allocate a request. The allocation can only fail if the pipeline
416 * is full, in which case the request will be picked up later when
417 * ad_start() is called after another request completes.
419 request = mpipe_alloc_nowait(&atadev->channel->req_mpipe);
420 if (request == NULL) {
421 ata_prtdev(atadev, "pipeline full allocating request in ad_start\n");
422 return;
425 KASSERT((bio->bio_offset & DEV_BMASK) == 0,
426 ("bio_offset not on sector boundary %08llx", bio->bio_offset));
428 /* setup request */
429 request->softc = adp;
430 request->bio = bio;
431 request->blockaddr = (u_int64_t)(bio->bio_offset >> DEV_BSHIFT);
432 request->bytecount = bp->b_bcount;
433 request->data = bp->b_data;
434 request->tag = tag;
435 callout_init(&request->callout);
436 if (bp->b_cmd == BUF_CMD_READ)
437 request->flags |= ADR_F_READ;
438 if (adp->device->mode >= ATA_DMA) {
439 if (ata_dmaalloc(atadev, M_NOWAIT) != 0) {
440 mpipe_free(&atadev->channel->req_mpipe, request);
441 ata_prtdev(atadev, "pipeline full allocated dmabuf in ad_start\n");
442 /* do not revert to PIO, wait for ad_start after I/O completion */
443 return;
447 /* insert in tag array */
448 adp->tags[tag] = request;
450 /* remove from drive queue */
451 bioq_remove(&adp->bio_queue, bio);
453 /* link onto controller queue */
454 TAILQ_INSERT_TAIL(&atadev->channel->ata_queue, request, chain);
457 void
458 ad_requeue(struct ata_channel *chan, struct ad_request *req)
460 if (req->donecount) {
461 ata_printf(chan, -1,
462 "WARNING: resetting donecount %u for retry\n",
463 req->donecount);
464 req->bytecount += req->donecount;
465 req->donecount = 0;
467 TAILQ_INSERT_HEAD(&chan->ata_queue, req, chain);
471 ad_transfer(struct ad_request *request)
473 struct ad_softc *adp;
474 u_int64_t lba;
475 u_int32_t count, max_count;
476 u_int8_t cmd;
477 int flags = ATA_IMMEDIATE;
479 /* get request params */
480 adp = request->softc;
482 /* calculate transfer details */
483 lba = request->blockaddr + (request->donecount / DEV_BSIZE);
485 if (request->donecount == 0) {
487 /* start timeout for this transfer */
488 if (dumping) {
489 callout_stop(&request->callout);
490 } else {
491 callout_reset(&request->callout, 10 * hz,
492 (void *)ad_timeout, request);
495 /* setup transfer parameters */
496 count = howmany(request->bytecount, DEV_BSIZE);
497 max_count = adp->device->param->support.address48 ? 65536 : 256;
498 if (count > max_count) {
499 ata_prtdev(adp->device,
500 "count %d size transfers not supported\n", count);
501 count = max_count;
504 if (adp->flags & AD_F_CHS_USED) {
505 int sector = (lba % adp->sectors) + 1;
506 int cylinder = lba / (adp->sectors * adp->heads);
507 int head = (lba % (adp->sectors * adp->heads)) / adp->sectors;
509 lba = (sector&0xff) | ((cylinder&0xffff)<<8) | ((head&0xf)<<24);
510 adp->device->flags |= ATA_D_USE_CHS;
513 /* setup first transfer length */
514 request->currentsize = min(request->bytecount, adp->transfersize);
516 devstat_start_transaction(&adp->stats);
518 /* does this drive & transfer work with DMA ? */
519 request->flags &= ~ADR_F_DMA_USED;
520 if (adp->device->mode >= ATA_DMA &&
521 !ata_dmasetup(adp->device, request->data, request->bytecount)) {
522 request->flags |= ADR_F_DMA_USED;
523 request->currentsize = request->bytecount;
525 /* do we have tags enabled ? */
526 if (adp->flags & AD_F_TAG_ENABLED) {
527 cmd = (request->flags & ADR_F_READ) ?
528 ATA_C_READ_DMA_QUEUED : ATA_C_WRITE_DMA_QUEUED;
530 if (ata_command(adp->device, cmd, lba,
531 request->tag << 3, count, flags)) {
532 ata_prtdev(adp->device, "error executing command");
533 goto transfer_failed;
535 if (ata_wait(adp->device, ATA_S_READY)) {
536 ata_prtdev(adp->device, "timeout waiting for READY\n");
537 goto transfer_failed;
539 adp->outstanding++;
541 /* if ATA bus RELEASE check for SERVICE */
542 if (adp->flags & AD_F_TAG_ENABLED &&
543 ATA_INB(adp->device->channel->r_io, ATA_IREASON) &
544 ATA_I_RELEASE)
545 return ad_service(adp, 1);
547 else {
548 cmd = (request->flags & ADR_F_READ) ?
549 ATA_C_READ_DMA : ATA_C_WRITE_DMA;
551 if (ata_command(adp->device, cmd, lba, count, 0, flags)) {
552 ata_prtdev(adp->device, "error executing command");
553 goto transfer_failed;
555 #if 0
557 * wait for data transfer phase
559 * well this should be here acording to specs, but older
560 * promise controllers doesn't like it, they lockup!
562 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
563 ata_prtdev(adp->device, "timeout waiting for data phase\n");
564 goto transfer_failed;
566 #endif
569 /* start transfer, return and wait for interrupt */
570 ata_dmastart(adp->device, request->data, request->bytecount,
571 request->flags & ADR_F_READ);
572 return ATA_OP_CONTINUES;
575 /* does this drive support multi sector transfers ? */
576 if (request->currentsize > DEV_BSIZE)
577 cmd = request->flags&ADR_F_READ ? ATA_C_READ_MUL : ATA_C_WRITE_MUL;
579 /* just plain old single sector transfer */
580 else
581 cmd = request->flags&ADR_F_READ ? ATA_C_READ : ATA_C_WRITE;
583 if (ata_command(adp->device, cmd, lba, count, 0, flags)){
584 ata_prtdev(adp->device, "error executing command");
585 goto transfer_failed;
589 /* calculate this transfer length */
590 request->currentsize = min(request->bytecount, adp->transfersize);
592 /* if this is a PIO read operation, return and wait for interrupt */
593 if (request->flags & ADR_F_READ)
594 return ATA_OP_CONTINUES;
596 /* ready to write PIO data ? */
597 if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
598 ata_prtdev(adp->device, "timeout waiting for DRQ");
599 goto transfer_failed;
602 /* output the data */
603 if (adp->device->channel->flags & ATA_USE_16BIT)
604 ATA_OUTSW(adp->device->channel->r_io, ATA_DATA,
605 (void *)((uintptr_t)request->data + request->donecount),
606 request->currentsize / sizeof(int16_t));
607 else
608 ATA_OUTSL(adp->device->channel->r_io, ATA_DATA,
609 (void *)((uintptr_t)request->data + request->donecount),
610 request->currentsize / sizeof(int32_t));
611 return ATA_OP_CONTINUES;
613 transfer_failed:
614 callout_stop(&request->callout);
615 ad_invalidatequeue(adp, request);
616 kprintf(" - resetting\n");
618 /* if retries still permit, reinject this request */
619 if (request->retries++ < AD_MAX_RETRIES)
620 ad_requeue(adp->device->channel, request);
621 else {
622 /* retries all used up, return error */
623 request->bio->bio_buf->b_error = EIO;
624 request->bio->bio_buf->b_flags |= B_ERROR;
625 request->bio->bio_buf->b_resid = request->bytecount;
626 devstat_end_transaction_buf(&adp->stats, request->bio->bio_buf);
627 biodone(request->bio);
628 ad_free(request);
630 ata_reinit(adp->device->channel);
631 return ATA_OP_CONTINUES;
635 ad_interrupt(struct ad_request *request)
637 struct ad_softc *adp = request->softc;
638 int dma_stat = 0;
639 cdev_t dev;
641 /* finish DMA transfer */
642 if (request->flags & ADR_F_DMA_USED)
643 dma_stat = ata_dmadone(adp->device);
645 dev = request->bio->bio_driver_info;
646 /* do we have a corrected soft error ? */
647 if (adp->device->channel->status & ATA_S_CORR)
648 diskerr(request->bio, dev,
649 "soft error (ECC corrected)", LOG_PRINTF,
650 request->donecount);
652 /* did any real errors happen ? */
653 if ((adp->device->channel->status & ATA_S_ERROR) ||
654 (request->flags & ADR_F_DMA_USED && dma_stat & ATA_BMSTAT_ERROR)) {
655 adp->device->channel->error =
656 ATA_INB(adp->device->channel->r_io, ATA_ERROR);
657 diskerr(request->bio, dev,
658 (adp->device->channel->error & ATA_E_ICRC) ?
659 "UDMA ICRC error" : "hard error", LOG_PRINTF,
660 request->donecount);
662 /* if this is a UDMA CRC error, reinject request */
663 if (request->flags & ADR_F_DMA_USED &&
664 adp->device->channel->error & ATA_E_ICRC) {
665 callout_stop(&request->callout);
666 ad_invalidatequeue(adp, request);
668 if (request->retries++ < AD_MAX_RETRIES)
669 kprintf(" retrying\n");
670 else {
671 ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
672 kprintf(" falling back to PIO mode\n");
674 ad_requeue(adp->device->channel, request);
675 return ATA_OP_FINISHED;
678 /* if using DMA, try once again in PIO mode */
679 if (request->flags & ADR_F_DMA_USED) {
680 callout_stop(&request->callout);
681 ad_invalidatequeue(adp, request);
682 ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
683 request->flags |= ADR_F_FORCE_PIO;
684 kprintf(" trying PIO mode\n");
685 ad_requeue(adp->device->channel, request);
686 return ATA_OP_FINISHED;
689 request->flags |= ADR_F_ERROR;
690 kprintf(" status=%02x error=%02x\n",
691 adp->device->channel->status, adp->device->channel->error);
694 /* if we arrived here with forced PIO mode, DMA doesn't work right */
695 if (request->flags & ADR_F_FORCE_PIO && !(request->flags & ADR_F_ERROR))
696 ata_prtdev(adp->device, "DMA problem fallback to PIO mode\n");
698 /* if this was a PIO read operation, get the data */
699 if (!(request->flags & ADR_F_DMA_USED) &&
700 (request->flags & (ADR_F_READ | ADR_F_ERROR)) == ADR_F_READ) {
702 /* ready to receive data? */
703 if ((adp->device->channel->status & ATA_S_READY) == 0)
704 ata_prtdev(adp->device, "read interrupt arrived early");
706 if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) != 0) {
707 ata_prtdev(adp->device, "read error detected (too) late");
708 request->flags |= ADR_F_ERROR;
710 else {
711 /* data ready, read in */
712 if (adp->device->channel->flags & ATA_USE_16BIT)
713 ATA_INSW(adp->device->channel->r_io, ATA_DATA,
714 (void*)((uintptr_t)request->data + request->donecount),
715 request->currentsize / sizeof(int16_t));
716 else
717 ATA_INSL(adp->device->channel->r_io, ATA_DATA,
718 (void*)((uintptr_t)request->data + request->donecount),
719 request->currentsize / sizeof(int32_t));
723 /* finish up transfer */
724 if (request->flags & ADR_F_ERROR) {
725 request->bio->bio_buf->b_error = EIO;
726 request->bio->bio_buf->b_flags |= B_ERROR;
728 else {
729 request->bytecount -= request->currentsize;
730 request->donecount += request->currentsize;
731 if (request->bytecount > 0) {
732 ad_transfer(request);
733 return ATA_OP_CONTINUES;
737 /* disarm timeout for this transfer */
738 callout_stop(&request->callout);
740 request->bio->bio_buf->b_resid = request->bytecount;
742 devstat_end_transaction_buf(&adp->stats, request->bio->bio_buf);
743 biodone(request->bio);
744 ad_free(request);
745 adp->outstanding--;
747 /* check for SERVICE (tagged operations only) */
748 return ad_service(adp, 1);
752 ad_service(struct ad_softc *adp, int change)
754 /* do we have to check the other device on this channel ? */
755 if (adp->device->channel->flags & ATA_QUEUED && change) {
756 int device = adp->device->unit;
758 if (adp->device->unit == ATA_MASTER) {
759 if ((adp->device->channel->devices & ATA_ATA_SLAVE) &&
760 (adp->device->channel->device[SLAVE].driver) &&
761 ((struct ad_softc *) (adp->device->channel->
762 device[SLAVE].driver))->flags & AD_F_TAG_ENABLED)
763 device = ATA_SLAVE;
765 else {
766 if ((adp->device->channel->devices & ATA_ATA_MASTER) &&
767 (adp->device->channel->device[MASTER].driver) &&
768 ((struct ad_softc *) (adp->device->channel->
769 device[MASTER].driver))->flags & AD_F_TAG_ENABLED)
770 device = ATA_MASTER;
772 if (device != adp->device->unit &&
773 ((struct ad_softc *)
774 (adp->device->channel->
775 device[ATA_DEV(device)].driver))->outstanding > 0) {
776 ATA_OUTB(adp->device->channel->r_io, ATA_DRIVE, ATA_D_IBM | device);
777 adp = adp->device->channel->device[ATA_DEV(device)].driver;
778 DELAY(10);
781 adp->device->channel->status =
782 ATA_INB(adp->device->channel->r_altio, ATA_ALTSTAT);
784 /* do we have a SERVICE request from the drive ? */
785 if (adp->flags & AD_F_TAG_ENABLED &&
786 adp->outstanding > 0 &&
787 adp->device->channel->status & ATA_S_SERVICE) {
788 struct ad_request *request;
789 int tag;
791 /* check for error */
792 if (adp->device->channel->status & ATA_S_ERROR) {
793 ata_prtdev(adp->device, "Oops! controller says s=0x%02x e=0x%02x\n",
794 adp->device->channel->status,
795 adp->device->channel->error);
796 ad_invalidatequeue(adp, NULL);
797 return ATA_OP_FINISHED;
800 /* issue SERVICE cmd */
801 if (ata_command(adp->device, ATA_C_SERVICE, 0, 0, 0, ATA_IMMEDIATE)) {
802 ata_prtdev(adp->device, "problem executing SERVICE cmd\n");
803 ad_invalidatequeue(adp, NULL);
804 return ATA_OP_FINISHED;
807 /* setup the transfer environment when ready */
808 if (ata_wait(adp->device, ATA_S_READY)) {
809 ata_prtdev(adp->device, "SERVICE timeout tag=%d s=%02x e=%02x\n",
810 ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3,
811 adp->device->channel->status,
812 adp->device->channel->error);
813 ad_invalidatequeue(adp, NULL);
814 return ATA_OP_FINISHED;
816 tag = ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3;
817 if (!(request = adp->tags[tag])) {
818 ata_prtdev(adp->device, "no request for tag=%d\n", tag);
819 ad_invalidatequeue(adp, NULL);
820 return ATA_OP_FINISHED;
822 ATA_FORCELOCK_CH(adp->device->channel, ATA_ACTIVE_ATA);
823 adp->device->channel->running = request;
824 request->serv++;
826 /* start DMA transfer when ready */
827 if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
828 ata_prtdev(adp->device, "timeout starting DMA s=%02x e=%02x\n",
829 adp->device->channel->status,
830 adp->device->channel->error);
831 ad_invalidatequeue(adp, NULL);
832 return ATA_OP_FINISHED;
834 ata_dmastart(adp->device, request->data, request->bytecount,
835 request->flags & ADR_F_READ);
836 return ATA_OP_CONTINUES;
838 return ATA_OP_FINISHED;
841 static void
842 ad_free(struct ad_request *request)
844 crit_enter();
845 ata_dmafree(request->softc->device);
846 request->softc->tags[request->tag] = NULL;
847 mpipe_free(&request->softc->device->channel->req_mpipe, request);
848 crit_exit();
851 static void
852 ad_invalidatequeue(struct ad_softc *adp, struct ad_request *request)
854 /* if tags used invalidate all other tagged transfers */
855 if (adp->flags & AD_F_TAG_ENABLED) {
856 struct ad_request *tmpreq;
857 int tag;
859 ata_prtdev(adp->device, "invalidating queued requests\n");
860 for (tag = 0; tag <= adp->num_tags; tag++) {
861 tmpreq = adp->tags[tag];
862 adp->tags[tag] = NULL;
863 if (tmpreq == request || tmpreq == NULL)
864 continue;
865 callout_stop(&request->callout);
866 ad_requeue(adp->device->channel, tmpreq);
868 if (ata_command(adp->device, ATA_C_NOP,
869 0, 0, ATA_C_F_FLUSHQUEUE, ATA_WAIT_READY))
870 ata_prtdev(adp->device, "flush queue failed\n");
871 adp->outstanding = 0;
875 static int
876 ad_tagsupported(struct ad_softc *adp)
878 const char *good[] = {"IBM-DPTA", "IBM-DTLA", NULL};
879 int i = 0;
881 switch (adp->device->channel->chiptype) {
882 case 0x4d33105a: /* Promises before TX2 doesn't work with tagged queuing */
883 case 0x4d38105a:
884 case 0x0d30105a:
885 case 0x4d30105a:
886 return 0;
889 /* check that drive does DMA, has tags enabled, and is one we know works */
890 if (adp->device->mode >= ATA_DMA && adp->device->param->support.queued &&
891 adp->device->param->enabled.queued) {
892 while (good[i] != NULL) {
893 if (!strncmp(adp->device->param->model, good[i], strlen(good[i])))
894 return 1;
895 i++;
898 * check IBM's new obscure way of naming drives
899 * we want "IC" (IBM CORP) and "AT" or "AV" (ATA interface)
900 * but doesn't care about the other info (size, capacity etc)
902 if (!strncmp(adp->device->param->model, "IC", 2) &&
903 (!strncmp(adp->device->param->model + 8, "AT", 2) ||
904 !strncmp(adp->device->param->model + 8, "AV", 2)))
905 return 1;
907 return 0;
910 static void
911 ad_timeout(struct ad_request *request)
913 struct ad_softc *adp = request->softc;
915 adp->device->channel->running = NULL;
916 ata_prtdev(adp->device, "%s command timeout tag=%d serv=%d - resetting\n",
917 (request->flags & ADR_F_READ) ? "READ" : "WRITE",
918 request->tag, request->serv);
920 if (request->flags & ADR_F_DMA_USED) {
921 ata_dmadone(adp->device);
922 ad_invalidatequeue(adp, request);
923 if (request->retries == AD_MAX_RETRIES) {
924 ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
925 ata_prtdev(adp->device, "trying fallback to PIO mode\n");
926 request->retries = 0;
930 /* if retries still permit, reinject this request */
931 if (request->retries++ < AD_MAX_RETRIES) {
932 ad_requeue(adp->device->channel, request);
934 else {
935 /* retries all used up, return error */
936 request->bio->bio_buf->b_error = EIO;
937 request->bio->bio_buf->b_flags |= B_ERROR;
938 devstat_end_transaction_buf(&adp->stats, request->bio->bio_buf);
939 biodone(request->bio);
940 ad_free(request);
942 ata_reinit(adp->device->channel);
945 void
946 ad_reinit(struct ata_device *atadev)
948 struct ad_softc *adp = atadev->driver;
950 /* reinit disk parameters */
951 ad_invalidatequeue(atadev->driver, NULL);
952 ata_command(atadev, ATA_C_SET_MULTI, 0,
953 adp->transfersize / DEV_BSIZE, 0, ATA_WAIT_READY);
954 if (adp->device->mode >= ATA_DMA)
955 ata_dmainit(atadev, ata_pmode(adp->device->param),
956 ata_wmode(adp->device->param),
957 ata_umode(adp->device->param));
958 else
959 ata_dmainit(atadev, ata_pmode(adp->device->param), -1, -1);
962 void
963 ad_print(struct ad_softc *adp)
965 if (bootverbose) {
966 ata_prtdev(adp->device, "<%.40s/%.8s> ATA-%d disk at ata%d-%s\n",
967 adp->device->param->model, adp->device->param->revision,
968 ad_version(adp->device->param->version_major),
969 device_get_unit(adp->device->channel->dev),
970 (adp->device->unit == ATA_MASTER) ? "master" : "slave");
972 ata_prtdev(adp->device,
973 "%lluMB (%llu sectors), %llu C, %u H, %u S, %u B\n",
974 (unsigned long long)(adp->total_secs /
975 ((1024L*1024L)/DEV_BSIZE)),
976 (unsigned long long) adp->total_secs,
977 (unsigned long long) (adp->total_secs /
978 (adp->heads * adp->sectors)),
979 adp->heads, adp->sectors, DEV_BSIZE);
981 ata_prtdev(adp->device, "%d secs/int, %d depth queue, %s%s\n",
982 adp->transfersize / DEV_BSIZE, adp->num_tags + 1,
983 (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
984 ata_mode2str(adp->device->mode));
986 ata_prtdev(adp->device, "piomode=%d dmamode=%d udmamode=%d cblid=%d\n",
987 ata_pmode(adp->device->param), ata_wmode(adp->device->param),
988 ata_umode(adp->device->param),
989 adp->device->param->hwres_cblid);
992 else
993 ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s%s\n",
994 (unsigned long long)(adp->total_secs /
995 ((1024L * 1024L) / DEV_BSIZE)),
996 adp->device->param->model,
997 (unsigned long long)(adp->total_secs /
998 (adp->heads*adp->sectors)),
999 adp->heads, adp->sectors,
1000 device_get_unit(adp->device->channel->dev),
1001 (adp->device->unit == ATA_MASTER) ? "master" : "slave",
1002 (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
1003 ata_mode2str(adp->device->mode));
1006 static int
1007 ad_version(u_int16_t version)
1009 int bit;
1011 if (version == 0xffff)
1012 return 0;
1013 for (bit = 15; bit >= 0; bit--)
1014 if (version & (1<<bit))
1015 return bit;
1016 return 0;