Use tsleep in the low level delay functions called when resetting
[dragonfly.git] / sys / dev / disk / nata / ata-all.c
blob1144e230058a868dca453ee46ba51f9ff582930f
1 /*-
2 * Copyright (c) 1998 - 2006 Søren Schmidt <sos@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/ata/ata-all.c,v 1.279 2007/02/23 16:25:08 jhb Exp $
27 * $DragonFly: src/sys/dev/disk/nata/ata-all.c,v 1.14 2008/03/24 06:41:56 dillon Exp $
30 #include "opt_ata.h"
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/conf.h>
36 #include <sys/ctype.h>
37 #include <sys/device.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/libkern.h>
41 #include <sys/lock.h> /* for {get,rel}_mplock() */
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/nata.h>
45 #include <sys/objcache.h>
46 #include <sys/queue.h>
47 #include <sys/spinlock2.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include "ata-all.h"
52 #include "ata_if.h"
54 /* device structure */
55 static d_ioctl_t ata_ioctl;
56 static struct dev_ops ata_ops = {
57 { "ata", 159, 0 },
58 .d_open = nullopen,
59 .d_close = nullclose,
60 .d_ioctl = ata_ioctl,
63 /* prototypes */
64 static void ata_boot_attach(void);
65 static device_t ata_add_child(device_t, struct ata_device *, int);
66 static int ata_getparam(struct ata_device *, int);
67 static void bswap(int8_t *, int);
68 static void btrim(int8_t *, int);
69 static void bpack(int8_t *, int8_t *, int);
71 /* global vars */
72 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
73 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
74 devclass_t ata_devclass;
75 struct objcache *ata_request_cache;
76 struct objcache *ata_composite_cache;
77 struct objcache_malloc_args ata_request_malloc_args = {
78 sizeof(struct ata_request), M_ATA };
79 struct objcache_malloc_args ata_composite_malloc_args = {
80 sizeof(struct ata_composite), M_ATA };
81 int ata_wc = 1;
83 /* local vars */
84 static int ata_dma = 1;
85 static int atapi_dma = 1;
87 /* sysctl vars */
88 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
89 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
90 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RW, &ata_dma, 0,
91 "ATA disk DMA mode control");
92 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
93 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RW, &atapi_dma, 0,
94 "ATAPI device DMA mode control");
95 TUNABLE_INT("hw.ata.wc", &ata_wc);
96 SYSCTL_INT(_hw_ata, OID_AUTO, ata_wc, CTLFLAG_RW, &ata_wc, 0,
97 "ATA disk write caching");
100 * newbus device interface related functions
103 ata_probe(device_t dev)
105 return 0;
109 ata_attach(device_t dev)
111 struct ata_channel *ch = device_get_softc(dev);
112 int error, rid;
114 /* check that we have a virgin channel to attach */
115 if (ch->r_irq)
116 return EEXIST;
118 /* initialize the softc basics */
119 ch->dev = dev;
120 ch->state = ATA_IDLE;
121 spin_init(&ch->state_mtx);
122 spin_init(&ch->queue_mtx);
123 TAILQ_INIT(&ch->ata_queue);
125 /* reset the controller HW, the channel and device(s) */
126 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
127 tsleep(&error, 0, "ataatch", 1);
128 ATA_RESET(dev);
129 ATA_LOCKING(dev, ATA_LF_UNLOCK);
131 /* setup interrupt delivery */
132 rid = ATA_IRQ_RID;
133 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
134 RF_SHAREABLE | RF_ACTIVE);
135 if (!ch->r_irq) {
136 device_printf(dev, "unable to allocate interrupt\n");
137 return ENXIO;
139 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS,
140 (driver_intr_t *)ata_interrupt, ch, &ch->ih,
141 NULL))) {
142 device_printf(dev, "unable to setup interrupt\n");
143 return error;
146 /* probe and attach devices on this channel unless we are in early boot */
147 ata_identify(dev);
148 return 0;
152 ata_detach(device_t dev)
154 struct ata_channel *ch = device_get_softc(dev);
155 device_t *children;
156 int nchildren, i;
158 /* check that we have a valid channel to detach */
159 if (!ch->r_irq)
160 return ENXIO;
162 /* grap the channel lock so no new requests gets launched */
163 spin_lock_wr(&ch->state_mtx);
164 ch->state |= ATA_STALL_QUEUE;
165 spin_unlock_wr(&ch->state_mtx);
167 /* detach & delete all children */
168 if (!device_get_children(dev, &children, &nchildren)) {
169 for (i = 0; i < nchildren; i++)
170 if (children[i])
171 device_delete_child(dev, children[i]);
172 kfree(children, M_TEMP);
175 /* release resources */
176 bus_teardown_intr(dev, ch->r_irq, ch->ih);
177 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
178 ch->r_irq = NULL;
179 spin_uninit(&ch->state_mtx);
180 spin_uninit(&ch->queue_mtx);
181 return 0;
185 ata_reinit(device_t dev)
187 struct ata_channel *ch = device_get_softc(dev);
188 struct ata_request *request;
189 device_t *children;
190 int nchildren, i;
192 /* check that we have a valid channel to reinit */
193 if (!ch || !ch->r_irq)
194 return ENXIO;
196 if (bootverbose)
197 device_printf(dev, "reiniting channel ..\n");
199 /* poll for locking the channel */
200 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
201 tsleep(&dev, 0, "atarini", 1);
203 /* catch eventual request in ch->running */
204 spin_lock_wr(&ch->state_mtx);
205 if ((request = ch->running))
206 callout_stop(&request->callout);
207 ch->running = NULL;
209 /* unconditionally grap the channel lock */
210 ch->state |= ATA_STALL_QUEUE;
211 spin_unlock_wr(&ch->state_mtx);
213 /* reset the controller HW, the channel and device(s) */
214 ATA_RESET(dev);
216 /* reinit the children and delete any that fails */
217 if (!device_get_children(dev, &children, &nchildren)) {
218 get_mplock();
219 for (i = 0; i < nchildren; i++) {
220 /* did any children go missing ? */
221 if (children[i] && device_is_attached(children[i]) &&
222 ATA_REINIT(children[i])) {
224 * if we had a running request and its device matches
225 * this child we need to inform the request that the
226 * device is gone.
228 if (request && request->dev == children[i]) {
229 request->result = ENXIO;
230 device_printf(request->dev, "FAILURE - device detached\n");
232 /* if not timeout finish request here */
233 if (!(request->flags & ATA_R_TIMEOUT))
234 ata_finish(request);
235 request = NULL;
237 device_delete_child(dev, children[i]);
240 kfree(children, M_TEMP);
241 rel_mplock();
244 /* if we still have a good request put it on the queue again */
245 if (request && !(request->flags & ATA_R_TIMEOUT)) {
246 device_printf(request->dev,
247 "WARNING - %s requeued due to channel reset",
248 ata_cmd2str(request));
249 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
250 kprintf(" LBA=%ju", request->u.ata.lba);
251 kprintf("\n");
252 request->flags |= ATA_R_REQUEUE;
253 ata_queue_request(request);
256 /* we're done release the channel for new work */
257 spin_lock_wr(&ch->state_mtx);
258 ch->state = ATA_IDLE;
259 spin_unlock_wr(&ch->state_mtx);
260 ATA_LOCKING(dev, ATA_LF_UNLOCK);
262 if (bootverbose)
263 device_printf(dev, "reinit done ..\n");
265 /* kick off requests on the queue */
266 ata_start(dev);
267 return 0;
271 ata_suspend(device_t dev)
273 struct ata_channel *ch;
275 /* check for valid device */
276 if (!dev || !(ch = device_get_softc(dev)))
277 return ENXIO;
279 /* wait for the channel to be IDLE or detached before suspending */
280 while (ch->r_irq) {
281 spin_lock_wr(&ch->state_mtx);
282 if (ch->state == ATA_IDLE) {
283 ch->state = ATA_ACTIVE;
284 spin_unlock_wr(&ch->state_mtx);
285 break;
287 spin_unlock_wr(&ch->state_mtx);
288 tsleep(ch, 0, "atasusp", hz/10);
290 ATA_LOCKING(dev, ATA_LF_UNLOCK);
291 return 0;
295 ata_resume(device_t dev)
297 struct ata_channel *ch;
298 int error;
300 /* check for valid device */
301 if (!dev || !(ch = device_get_softc(dev)))
302 return ENXIO;
304 /* reinit the devices, we dont know what mode/state they are in */
305 error = ata_reinit(dev);
307 /* kick off requests on the queue */
308 ata_start(dev);
309 return error;
313 ata_interrupt(void *data)
315 struct ata_channel *ch = (struct ata_channel *)data;
316 struct ata_request *request;
318 spin_lock_wr(&ch->state_mtx);
319 do {
321 * Ignore interrupt if its not for us. This may also have the
322 * side effect of processing events unrelated to I/O requests.
324 if (ch->hw.status && !ch->hw.status(ch->dev))
325 break;
328 * Check if we have a running request, and make sure it has been
329 * completely queued. Otherwise the channel status may indicate
330 * not-busy when, in fact, the command had not yet been issued.
332 if ((request = ch->running) == NULL)
333 break;
334 if ((request->flags & ATA_R_HWCMDQUEUED) == 0) {
335 kprintf("ata_interrupt: early interrupt\n");
336 break;
339 ATA_DEBUG_RQ(request, "interrupt");
341 /* safetycheck for the right state */
342 if (ch->state == ATA_IDLE) {
343 device_printf(request->dev, "interrupt on idle channel ignored\n");
344 break;
348 * we have the HW locks, so end the transaction for this request
349 * if it finishes immediately otherwise wait for next interrupt
351 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
352 ch->running = NULL;
353 if (ch->state == ATA_ACTIVE)
354 ch->state = ATA_IDLE;
355 spin_unlock_wr(&ch->state_mtx);
356 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
357 ata_finish(request);
358 return 1;
360 } while (0);
361 spin_unlock_wr(&ch->state_mtx);
362 return 0;
366 * device related interfaces
368 static int
369 ata_ioctl(struct dev_ioctl_args *ap)
371 device_t device, *children;
372 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)ap->a_data;
373 int *value = (int *)ap->a_data;
374 int i, nchildren, error = ENOTTY;
376 switch (ap->a_cmd) {
377 case IOCATAGMAXCHANNEL:
378 *value = devclass_get_maxunit(ata_devclass);
379 error = 0;
380 break;
382 case IOCATAREINIT:
383 if (*value > devclass_get_maxunit(ata_devclass) ||
384 !(device = devclass_get_device(ata_devclass, *value)))
385 return ENXIO;
386 error = ata_reinit(device);
387 ata_start(device);
388 break;
390 case IOCATAATTACH:
391 if (*value > devclass_get_maxunit(ata_devclass) ||
392 !(device = devclass_get_device(ata_devclass, *value)))
393 return ENXIO;
394 /* XXX SOS should enable channel HW on controller */
395 error = ata_attach(device);
396 break;
398 case IOCATADETACH:
399 if (*value > devclass_get_maxunit(ata_devclass) ||
400 !(device = devclass_get_device(ata_devclass, *value)))
401 return ENXIO;
402 error = ata_detach(device);
403 /* XXX SOS should disable channel HW on controller */
404 break;
406 case IOCATADEVICES:
407 if (devices->channel > devclass_get_maxunit(ata_devclass) ||
408 !(device = devclass_get_device(ata_devclass, devices->channel)))
409 return ENXIO;
410 bzero(devices->name[0], 32);
411 bzero(&devices->params[0], sizeof(struct ata_params));
412 bzero(devices->name[1], 32);
413 bzero(&devices->params[1], sizeof(struct ata_params));
414 if (!device_get_children(device, &children, &nchildren)) {
415 for (i = 0; i < nchildren; i++) {
416 if (children[i] && device_is_attached(children[i])) {
417 struct ata_device *atadev = device_get_softc(children[i]);
419 if (atadev->unit == ATA_MASTER) {
420 strncpy(devices->name[0],
421 device_get_nameunit(children[i]), 32);
422 bcopy(&atadev->param, &devices->params[0],
423 sizeof(struct ata_params));
425 if (atadev->unit == ATA_SLAVE) {
426 strncpy(devices->name[1],
427 device_get_nameunit(children[i]), 32);
428 bcopy(&atadev->param, &devices->params[1],
429 sizeof(struct ata_params));
433 kfree(children, M_TEMP);
434 error = 0;
436 else
437 error = ENODEV;
438 break;
440 default:
441 if (ata_raid_ioctl_func)
442 error = ata_raid_ioctl_func(ap->a_cmd, ap->a_data);
444 return error;
448 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
450 struct ata_device *atadev = device_get_softc(dev);
451 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
452 struct ata_params *params = (struct ata_params *)data;
453 int *mode = (int *)data;
454 struct ata_request *request;
455 caddr_t buf;
456 int error;
458 switch (cmd) {
459 case IOCATAREQUEST:
460 if (!(buf = kmalloc(ioc_request->count, M_ATA, M_WAITOK | M_NULLOK))) {
461 return ENOMEM;
463 if (!(request = ata_alloc_request())) {
464 kfree(buf, M_ATA);
465 return ENOMEM;
467 if (ioc_request->flags & ATA_CMD_WRITE) {
468 error = copyin(ioc_request->data, buf, ioc_request->count);
469 if (error) {
470 kfree(buf, M_ATA);
471 ata_free_request(request);
472 return error;
475 request->dev = dev;
476 if (ioc_request->flags & ATA_CMD_ATAPI) {
477 request->flags = ATA_R_ATAPI;
478 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
480 else {
481 request->u.ata.command = ioc_request->u.ata.command;
482 request->u.ata.feature = ioc_request->u.ata.feature;
483 request->u.ata.lba = ioc_request->u.ata.lba;
484 request->u.ata.count = ioc_request->u.ata.count;
486 request->timeout = ioc_request->timeout;
487 request->data = buf;
488 request->bytecount = ioc_request->count;
489 request->transfersize = request->bytecount;
490 if (ioc_request->flags & ATA_CMD_CONTROL)
491 request->flags |= ATA_R_CONTROL;
492 if (ioc_request->flags & ATA_CMD_READ)
493 request->flags |= ATA_R_READ;
494 if (ioc_request->flags & ATA_CMD_WRITE)
495 request->flags |= ATA_R_WRITE;
496 ata_queue_request(request);
497 if (request->flags & ATA_R_ATAPI) {
498 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
499 sizeof(struct atapi_sense));
501 else {
502 ioc_request->u.ata.command = request->u.ata.command;
503 ioc_request->u.ata.feature = request->u.ata.feature;
504 ioc_request->u.ata.lba = request->u.ata.lba;
505 ioc_request->u.ata.count = request->u.ata.count;
507 ioc_request->error = request->result;
508 if (ioc_request->flags & ATA_CMD_READ)
509 error = copyout(buf, ioc_request->data, ioc_request->count);
510 else
511 error = 0;
512 kfree(buf, M_ATA);
513 ata_free_request(request);
514 return error;
516 case IOCATAGPARM:
517 ata_getparam(atadev, 0);
518 bcopy(&atadev->param, params, sizeof(struct ata_params));
519 return 0;
521 case IOCATASMODE:
522 atadev->mode = *mode;
523 ATA_SETMODE(device_get_parent(dev), dev);
524 return 0;
526 case IOCATAGMODE:
527 *mode = atadev->mode;
528 return 0;
529 default:
530 return ENOTTY;
534 static void
535 ata_boot_attach(void)
537 struct ata_channel *ch;
538 int ctlr;
540 get_mplock();
542 /* kick of probe and attach on all channels */
543 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
544 if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
545 ata_identify(ch->dev);
549 rel_mplock();
554 * misc support functions
556 static device_t
557 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
559 device_t child;
561 if ((child = device_add_child(parent, NULL, unit))) {
562 device_set_softc(child, atadev);
563 device_quiet(child);
564 atadev->dev = child;
565 atadev->max_iosize = DEV_BSIZE;
566 atadev->mode = ATA_PIO_MAX;
568 return child;
571 static int
572 ata_getparam(struct ata_device *atadev, int init)
574 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
575 struct ata_request *request;
576 u_int8_t command = 0;
577 int error = ENOMEM, retries = 2;
579 if (ch->devices &
580 (atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE))
581 command = ATA_ATA_IDENTIFY;
582 if (ch->devices &
583 (atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE))
584 command = ATA_ATAPI_IDENTIFY;
585 if (!command)
586 return ENXIO;
588 while (retries-- > 0 && error) {
589 if (!(request = ata_alloc_request()))
590 break;
591 request->dev = atadev->dev;
592 request->timeout = 1;
593 request->retries = 0;
594 request->u.ata.command = command;
595 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET);
596 request->data = (void *)&atadev->param;
597 request->bytecount = sizeof(struct ata_params);
598 request->donecount = 0;
599 request->transfersize = DEV_BSIZE;
600 ata_queue_request(request);
601 error = request->result;
602 ata_free_request(request);
605 if (!error && (isprint(atadev->param.model[0]) ||
606 isprint(atadev->param.model[1]))) {
607 struct ata_params *atacap = &atadev->param;
608 char buffer[64];
609 int16_t *ptr;
611 for (ptr = (int16_t *)atacap;
612 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
613 *ptr = le16toh(*ptr);
615 if (!(!strncmp(atacap->model, "FX", 2) ||
616 !strncmp(atacap->model, "NEC", 3) ||
617 !strncmp(atacap->model, "Pioneer", 7) ||
618 !strncmp(atacap->model, "SHARP", 5))) {
619 bswap(atacap->model, sizeof(atacap->model));
620 bswap(atacap->revision, sizeof(atacap->revision));
621 bswap(atacap->serial, sizeof(atacap->serial));
623 btrim(atacap->model, sizeof(atacap->model));
624 bpack(atacap->model, atacap->model, sizeof(atacap->model));
625 btrim(atacap->revision, sizeof(atacap->revision));
626 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
627 btrim(atacap->serial, sizeof(atacap->serial));
628 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
630 if (bootverbose)
631 kprintf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
632 device_get_unit(ch->dev),
633 atadev->unit == ATA_MASTER ? "master" : "slave",
634 ata_mode2str(ata_pmode(atacap)),
635 ata_mode2str(ata_wmode(atacap)),
636 ata_mode2str(ata_umode(atacap)),
637 (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
639 if (init) {
640 ksprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
641 device_set_desc_copy(atadev->dev, buffer);
642 if ((atadev->param.config & ATA_PROTO_ATAPI) &&
643 (atadev->param.config != ATA_CFA_MAGIC1) &&
644 (atadev->param.config != ATA_CFA_MAGIC2)) {
645 if (atapi_dma && ch->dma &&
646 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
647 ata_umode(&atadev->param) >= ATA_UDMA2)
648 atadev->mode = ATA_DMA_MAX;
650 else {
651 if (ata_dma && ch->dma &&
652 (ata_umode(&atadev->param) > 0 ||
653 ata_wmode(&atadev->param) > 0))
654 atadev->mode = ATA_DMA_MAX;
658 else {
659 if (!error)
660 error = ENXIO;
662 return error;
666 ata_identify(device_t dev)
668 struct ata_channel *ch = device_get_softc(dev);
669 struct ata_device *master = NULL, *slave = NULL;
670 device_t master_child = NULL, slave_child = NULL;
671 int master_unit = -1, slave_unit = -1;
673 if (ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) {
674 if (!(master = kmalloc(sizeof(struct ata_device),
675 M_ATA, M_INTWAIT | M_ZERO))) {
676 device_printf(dev, "out of memory\n");
677 return ENOMEM;
679 master->unit = ATA_MASTER;
681 if (ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) {
682 if (!(slave = kmalloc(sizeof(struct ata_device),
683 M_ATA, M_INTWAIT | M_ZERO))) {
684 kfree(master, M_ATA);
685 device_printf(dev, "out of memory\n");
686 return ENOMEM;
688 slave->unit = ATA_SLAVE;
691 #ifdef ATA_STATIC_ID
692 if (ch->devices & ATA_ATA_MASTER)
693 master_unit = (device_get_unit(dev) << 1);
694 #endif
695 if (master && !(master_child = ata_add_child(dev, master, master_unit))) {
696 kfree(master, M_ATA);
697 master = NULL;
699 #ifdef ATA_STATIC_ID
700 if (ch->devices & ATA_ATA_SLAVE)
701 slave_unit = (device_get_unit(dev) << 1) + 1;
702 #endif
703 if (slave && !(slave_child = ata_add_child(dev, slave, slave_unit))) {
704 kfree(slave, M_ATA);
705 slave = NULL;
708 if (slave && ata_getparam(slave, 1)) {
709 device_delete_child(dev, slave_child);
710 kfree(slave, M_ATA);
712 if (master && ata_getparam(master, 1)) {
713 device_delete_child(dev, master_child);
714 kfree(master, M_ATA);
717 bus_generic_probe(dev);
718 bus_generic_attach(dev);
719 return 0;
722 void
723 ata_default_registers(device_t dev)
725 struct ata_channel *ch = device_get_softc(dev);
727 /* fill in the defaults from whats setup already */
728 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
729 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
730 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
731 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
732 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
733 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
734 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
735 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
738 void
739 ata_modify_if_48bit(struct ata_request *request)
741 struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
742 struct ata_device *atadev = device_get_softc(request->dev);
744 atadev->flags &= ~ATA_D_48BIT_ACTIVE;
746 if ((request->u.ata.lba + request->u.ata.count >= ATA_MAX_28BIT_LBA ||
747 request->u.ata.count > 256) &&
748 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
750 /* translate command into 48bit version */
751 switch (request->u.ata.command) {
752 case ATA_READ:
753 request->u.ata.command = ATA_READ48;
754 break;
755 case ATA_READ_MUL:
756 request->u.ata.command = ATA_READ_MUL48;
757 break;
758 case ATA_READ_DMA:
759 if (ch->flags & ATA_NO_48BIT_DMA) {
760 if (request->transfersize > DEV_BSIZE)
761 request->u.ata.command = ATA_READ_MUL48;
762 else
763 request->u.ata.command = ATA_READ48;
764 request->flags &= ~ATA_R_DMA;
766 else
767 request->u.ata.command = ATA_READ_DMA48;
768 break;
769 case ATA_READ_DMA_QUEUED:
770 if (ch->flags & ATA_NO_48BIT_DMA) {
771 if (request->transfersize > DEV_BSIZE)
772 request->u.ata.command = ATA_READ_MUL48;
773 else
774 request->u.ata.command = ATA_READ48;
775 request->flags &= ~ATA_R_DMA;
777 else
778 request->u.ata.command = ATA_READ_DMA_QUEUED48;
779 break;
780 case ATA_WRITE:
781 request->u.ata.command = ATA_WRITE48;
782 break;
783 case ATA_WRITE_MUL:
784 request->u.ata.command = ATA_WRITE_MUL48;
785 break;
786 case ATA_WRITE_DMA:
787 if (ch->flags & ATA_NO_48BIT_DMA) {
788 if (request->transfersize > DEV_BSIZE)
789 request->u.ata.command = ATA_WRITE_MUL48;
790 else
791 request->u.ata.command = ATA_WRITE48;
792 request->flags &= ~ATA_R_DMA;
794 else
795 request->u.ata.command = ATA_WRITE_DMA48;
796 break;
797 case ATA_WRITE_DMA_QUEUED:
798 if (ch->flags & ATA_NO_48BIT_DMA) {
799 if (request->transfersize > DEV_BSIZE)
800 request->u.ata.command = ATA_WRITE_MUL48;
801 else
802 request->u.ata.command = ATA_WRITE48;
803 request->u.ata.command = ATA_WRITE48;
804 request->flags &= ~ATA_R_DMA;
806 else
807 request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
808 break;
809 case ATA_FLUSHCACHE:
810 request->u.ata.command = ATA_FLUSHCACHE48;
811 break;
812 case ATA_READ_NATIVE_MAX_ADDDRESS:
813 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDDRESS48;
814 break;
815 case ATA_SET_MAX_ADDRESS:
816 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
817 break;
818 default:
819 return;
821 atadev->flags |= ATA_D_48BIT_ACTIVE;
825 void
826 ata_udelay(int interval)
829 * Use tsleep now. Theoretically calls to this function are only made
830 * in non-time-critical code paths, i.e. the ata reset code.
832 #if 0
833 if (interval < (1000000/hz))
834 DELAY(interval);
835 else
836 #endif
837 tsleep(&interval, 0, "ataslp", 1 + interval / (1000000 / hz));
840 char *
841 ata_mode2str(int mode)
843 switch (mode) {
844 case -1: return "UNSUPPORTED";
845 case ATA_PIO0: return "PIO0";
846 case ATA_PIO1: return "PIO1";
847 case ATA_PIO2: return "PIO2";
848 case ATA_PIO3: return "PIO3";
849 case ATA_PIO4: return "PIO4";
850 case ATA_WDMA0: return "WDMA0";
851 case ATA_WDMA1: return "WDMA1";
852 case ATA_WDMA2: return "WDMA2";
853 case ATA_UDMA0: return "UDMA16";
854 case ATA_UDMA1: return "UDMA25";
855 case ATA_UDMA2: return "UDMA33";
856 case ATA_UDMA3: return "UDMA40";
857 case ATA_UDMA4: return "UDMA66";
858 case ATA_UDMA5: return "UDMA100";
859 case ATA_UDMA6: return "UDMA133";
860 case ATA_SA150: return "SATA150";
861 case ATA_SA300: return "SATA300";
862 case ATA_USB: return "USB";
863 case ATA_USB1: return "USB1";
864 case ATA_USB2: return "USB2";
865 default:
866 if (mode & ATA_DMA_MASK)
867 return "BIOSDMA";
868 else
869 return "BIOSPIO";
874 ata_pmode(struct ata_params *ap)
876 if (ap->atavalid & ATA_FLAG_64_70) {
877 if (ap->apiomodes & 0x02)
878 return ATA_PIO4;
879 if (ap->apiomodes & 0x01)
880 return ATA_PIO3;
882 if (ap->mwdmamodes & 0x04)
883 return ATA_PIO4;
884 if (ap->mwdmamodes & 0x02)
885 return ATA_PIO3;
886 if (ap->mwdmamodes & 0x01)
887 return ATA_PIO2;
888 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
889 return ATA_PIO2;
890 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
891 return ATA_PIO1;
892 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
893 return ATA_PIO0;
894 return ATA_PIO0;
898 ata_wmode(struct ata_params *ap)
900 if (ap->mwdmamodes & 0x04)
901 return ATA_WDMA2;
902 if (ap->mwdmamodes & 0x02)
903 return ATA_WDMA1;
904 if (ap->mwdmamodes & 0x01)
905 return ATA_WDMA0;
906 return -1;
910 ata_umode(struct ata_params *ap)
912 if (ap->atavalid & ATA_FLAG_88) {
913 if (ap->udmamodes & 0x40)
914 return ATA_UDMA6;
915 if (ap->udmamodes & 0x20)
916 return ATA_UDMA5;
917 if (ap->udmamodes & 0x10)
918 return ATA_UDMA4;
919 if (ap->udmamodes & 0x08)
920 return ATA_UDMA3;
921 if (ap->udmamodes & 0x04)
922 return ATA_UDMA2;
923 if (ap->udmamodes & 0x02)
924 return ATA_UDMA1;
925 if (ap->udmamodes & 0x01)
926 return ATA_UDMA0;
928 return -1;
932 ata_limit_mode(device_t dev, int mode, int maxmode)
934 struct ata_device *atadev = device_get_softc(dev);
936 if (maxmode && mode > maxmode)
937 mode = maxmode;
939 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
940 return min(mode, ata_umode(&atadev->param));
942 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
943 return min(mode, ata_wmode(&atadev->param));
945 if (mode > ata_pmode(&atadev->param))
946 return min(mode, ata_pmode(&atadev->param));
948 return mode;
951 static void
952 bswap(int8_t *buf, int len)
954 u_int16_t *ptr = (u_int16_t*)(buf + len);
956 while (--ptr >= (u_int16_t*)buf)
957 *ptr = ntohs(*ptr);
960 static void
961 btrim(int8_t *buf, int len)
963 int8_t *ptr;
965 for (ptr = buf; ptr < buf+len; ++ptr)
966 if (!*ptr || *ptr == '_')
967 *ptr = ' ';
968 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
969 *ptr = 0;
972 static void
973 bpack(int8_t *src, int8_t *dst, int len)
975 int i, j, blank;
977 for (i = j = blank = 0 ; i < len; i++) {
978 if (blank && src[i] == ' ') continue;
979 if (blank && src[i] != ' ') {
980 dst[j++] = src[i];
981 blank = 0;
982 continue;
984 if (src[i] == ' ') {
985 blank = 1;
986 if (i == 0)
987 continue;
989 dst[j++] = src[i];
991 if (j < len)
992 dst[j] = 0x00;
997 * module handeling
999 static int
1000 ata_module_event_handler(module_t mod, int what, void *arg)
1002 /* static because we need the reference at destruction time */
1003 static cdev_t atacdev;
1005 switch (what) {
1006 case MOD_LOAD:
1007 /* register controlling device */
1008 dev_ops_add(&ata_ops, 0, 0);
1009 atacdev = make_dev(&ata_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1010 reference_dev(atacdev);
1011 return 0;
1013 case MOD_UNLOAD:
1014 /* deregister controlling device */
1015 destroy_dev(atacdev);
1016 dev_ops_remove(&ata_ops, 0, 0);
1017 return 0;
1019 default:
1020 return EOPNOTSUPP;
1024 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1025 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1026 MODULE_VERSION(ata, 1);
1029 * Construct a completely zero'ed ata_request. On objcache_put(), an
1030 * ata_request object is also zero'ed, so objcache_get() is guaranteed to give
1031 * completely zero'ed objects without spending too much time.
1033 static boolean_t
1034 ata_request_cache_ctor(void *obj, void *private, int ocflags)
1036 struct ata_request *arp = obj;
1038 bzero(arp, sizeof(struct ata_request));
1039 return(TRUE);
1043 * Construct a completely zero'ed ata_composite. On objcache_put(), an
1044 * ata_composite object is also zero'ed, so objcache_get() is guaranteed to give
1045 * completely zero'ed objects without spending too much time.
1047 static boolean_t
1048 ata_composite_cache_ctor(void *obj, void *private, int ocflags)
1050 struct ata_composite *acp = obj;
1052 bzero(acp, sizeof(struct ata_composite));
1053 return(TRUE);
1056 static void
1057 ata_init(void)
1059 ata_request_cache = objcache_create("ata_request", 0, 0,
1060 ata_request_cache_ctor, NULL, NULL,
1061 objcache_malloc_alloc,
1062 objcache_malloc_free,
1063 &ata_request_malloc_args);
1064 ata_composite_cache = objcache_create("ata_composite", 0, 0,
1065 ata_composite_cache_ctor, NULL, NULL,
1066 objcache_malloc_alloc,
1067 objcache_malloc_free,
1068 &ata_composite_malloc_args);
1070 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1072 static void
1073 ata_uninit(void)
1075 objcache_destroy(ata_composite_cache);
1076 objcache_destroy(ata_request_cache);
1078 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);