spapr_events: add support for dedicated hotplug event source
[qemu/ar7.git] / hw / ide / macio.c
blob76f97c25398d3e9ae801c9a5f192d3957f870664
1 /*
2 * QEMU IDE Emulation: MacIO support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/ppc/mac.h"
28 #include "hw/ppc/mac_dbdma.h"
29 #include "sysemu/block-backend.h"
30 #include "sysemu/dma.h"
32 #include "hw/ide/internal.h"
34 /* debug MACIO */
35 // #define DEBUG_MACIO
37 #ifdef DEBUG_MACIO
38 static const int debug_macio = 1;
39 #else
40 static const int debug_macio = 0;
41 #endif
43 #define MACIO_DPRINTF(fmt, ...) do { \
44 if (debug_macio) { \
45 printf(fmt , ## __VA_ARGS__); \
46 } \
47 } while (0)
50 /***********************************************************/
51 /* MacIO based PowerPC IDE */
53 #define MACIO_PAGE_SIZE 4096
56 * Unaligned DMA read/write access functions required for OS X/Darwin which
57 * don't perform DMA transactions on sector boundaries. These functions are
58 * modelled on bdrv_co_preadv()/bdrv_co_pwritev() and so should be easy to
59 * remove if the unaligned block APIs are ever exposed.
62 static void pmac_dma_read(BlockBackend *blk,
63 int64_t offset, unsigned int bytes,
64 void (*cb)(void *opaque, int ret), void *opaque)
66 DBDMA_io *io = opaque;
67 MACIOIDEState *m = io->opaque;
68 IDEState *s = idebus_active_if(&m->bus);
69 dma_addr_t dma_addr;
70 int64_t sector_num;
71 int nsector;
72 uint64_t align = BDRV_SECTOR_SIZE;
73 size_t head_bytes, tail_bytes;
75 qemu_iovec_destroy(&io->iov);
76 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
78 sector_num = (offset >> 9);
79 nsector = (io->len >> 9);
81 MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): "
82 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
83 sector_num, nsector);
85 dma_addr = io->addr;
86 io->dir = DMA_DIRECTION_FROM_DEVICE;
87 io->dma_len = io->len;
88 io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
89 io->dir);
91 if (offset & (align - 1)) {
92 head_bytes = offset & (align - 1);
94 MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", "
95 "discarding %zu bytes\n", sector_num, head_bytes);
97 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
99 bytes += offset & (align - 1);
100 offset = offset & ~(align - 1);
103 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
105 if ((offset + bytes) & (align - 1)) {
106 tail_bytes = (offset + bytes) & (align - 1);
108 MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", "
109 "discarding bytes %zu\n", sector_num, tail_bytes);
111 qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes);
112 bytes = ROUND_UP(bytes, align);
115 s->io_buffer_size -= io->len;
116 s->io_buffer_index += io->len;
118 io->len = 0;
120 MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
121 "nsector: %x\n", (offset >> 9), (bytes >> 9));
123 s->bus->dma->aiocb = blk_aio_preadv(blk, offset, &io->iov, 0, cb, io);
126 static void pmac_dma_write(BlockBackend *blk,
127 int64_t offset, int bytes,
128 void (*cb)(void *opaque, int ret), void *opaque)
130 DBDMA_io *io = opaque;
131 MACIOIDEState *m = io->opaque;
132 IDEState *s = idebus_active_if(&m->bus);
133 dma_addr_t dma_addr;
134 int64_t sector_num;
135 int nsector;
136 uint64_t align = BDRV_SECTOR_SIZE;
137 size_t head_bytes, tail_bytes;
138 bool unaligned_head = false, unaligned_tail = false;
140 qemu_iovec_destroy(&io->iov);
141 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
143 sector_num = (offset >> 9);
144 nsector = (io->len >> 9);
146 MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): "
147 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
148 sector_num, nsector);
150 dma_addr = io->addr;
151 io->dir = DMA_DIRECTION_TO_DEVICE;
152 io->dma_len = io->len;
153 io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
154 io->dir);
156 if (offset & (align - 1)) {
157 head_bytes = offset & (align - 1);
158 sector_num = ((offset & ~(align - 1)) >> 9);
160 MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %"
161 PRId64 "\n", sector_num);
163 blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
165 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
166 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
168 bytes += offset & (align - 1);
169 offset = offset & ~(align - 1);
171 unaligned_head = true;
174 if ((offset + bytes) & (align - 1)) {
175 tail_bytes = (offset + bytes) & (align - 1);
176 sector_num = (((offset + bytes) & ~(align - 1)) >> 9);
178 MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %"
179 PRId64 "\n", sector_num);
181 blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
183 if (!unaligned_head) {
184 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
187 qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
188 align - tail_bytes);
190 bytes = ROUND_UP(bytes, align);
192 unaligned_tail = true;
195 if (!unaligned_head && !unaligned_tail) {
196 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
199 s->io_buffer_size -= io->len;
200 s->io_buffer_index += io->len;
202 io->len = 0;
204 MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
205 "nsector: %x\n", (offset >> 9), (bytes >> 9));
207 s->bus->dma->aiocb = blk_aio_pwritev(blk, offset, &io->iov, 0, cb, io);
210 static void pmac_dma_trim(BlockBackend *blk,
211 int64_t offset, int bytes,
212 void (*cb)(void *opaque, int ret), void *opaque)
214 DBDMA_io *io = opaque;
215 MACIOIDEState *m = io->opaque;
216 IDEState *s = idebus_active_if(&m->bus);
217 dma_addr_t dma_addr;
219 qemu_iovec_destroy(&io->iov);
220 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
222 dma_addr = io->addr;
223 io->dir = DMA_DIRECTION_TO_DEVICE;
224 io->dma_len = io->len;
225 io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
226 io->dir);
228 qemu_iovec_add(&io->iov, io->dma_mem, io->len);
229 s->io_buffer_size -= io->len;
230 s->io_buffer_index += io->len;
231 io->len = 0;
233 s->bus->dma->aiocb = ide_issue_trim(offset, &io->iov, cb, io, blk);
236 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
238 DBDMA_io *io = opaque;
239 MACIOIDEState *m = io->opaque;
240 IDEState *s = idebus_active_if(&m->bus);
241 int64_t offset;
243 MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n");
245 if (ret < 0) {
246 MACIO_DPRINTF("DMA error: %d\n", ret);
247 ide_atapi_io_error(s, ret);
248 goto done;
251 if (!m->dma_active) {
252 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
253 s->nsector, io->len, s->status);
254 /* data not ready yet, wait for the channel to get restarted */
255 io->processing = false;
256 return;
259 if (s->io_buffer_size <= 0) {
260 MACIO_DPRINTF("End of IDE transfer\n");
261 ide_atapi_cmd_ok(s);
262 m->dma_active = false;
263 goto done;
266 if (io->len == 0) {
267 MACIO_DPRINTF("End of DMA transfer\n");
268 goto done;
271 if (s->lba == -1) {
272 /* Non-block ATAPI transfer - just copy to RAM */
273 s->io_buffer_size = MIN(s->io_buffer_size, io->len);
274 dma_memory_write(&address_space_memory, io->addr, s->io_buffer,
275 s->io_buffer_size);
276 io->len = 0;
277 ide_atapi_cmd_ok(s);
278 m->dma_active = false;
279 goto done;
282 /* Calculate current offset */
283 offset = ((int64_t)s->lba << 11) + s->io_buffer_index;
285 pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io);
286 return;
288 done:
289 dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
290 io->dir, io->dma_len);
292 if (ret < 0) {
293 block_acct_failed(blk_get_stats(s->blk), &s->acct);
294 } else {
295 block_acct_done(blk_get_stats(s->blk), &s->acct);
298 ide_set_inactive(s, false);
299 io->dma_end(opaque);
302 static void pmac_ide_transfer_cb(void *opaque, int ret)
304 DBDMA_io *io = opaque;
305 MACIOIDEState *m = io->opaque;
306 IDEState *s = idebus_active_if(&m->bus);
307 int64_t offset;
309 MACIO_DPRINTF("pmac_ide_transfer_cb\n");
311 if (ret < 0) {
312 MACIO_DPRINTF("DMA error: %d\n", ret);
313 ide_dma_error(s);
314 goto done;
317 if (!m->dma_active) {
318 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
319 s->nsector, io->len, s->status);
320 /* data not ready yet, wait for the channel to get restarted */
321 io->processing = false;
322 return;
325 if (s->io_buffer_size <= 0) {
326 MACIO_DPRINTF("End of IDE transfer\n");
327 s->status = READY_STAT | SEEK_STAT;
328 ide_set_irq(s->bus);
329 m->dma_active = false;
330 goto done;
333 if (io->len == 0) {
334 MACIO_DPRINTF("End of DMA transfer\n");
335 goto done;
338 /* Calculate number of sectors */
339 offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
341 switch (s->dma_cmd) {
342 case IDE_DMA_READ:
343 pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
344 break;
345 case IDE_DMA_WRITE:
346 pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
347 break;
348 case IDE_DMA_TRIM:
349 pmac_dma_trim(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
350 break;
351 default:
352 abort();
355 return;
357 done:
358 dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
359 io->dir, io->dma_len);
361 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
362 if (ret < 0) {
363 block_acct_failed(blk_get_stats(s->blk), &s->acct);
364 } else {
365 block_acct_done(blk_get_stats(s->blk), &s->acct);
369 ide_set_inactive(s, false);
370 io->dma_end(opaque);
373 static void pmac_ide_transfer(DBDMA_io *io)
375 MACIOIDEState *m = io->opaque;
376 IDEState *s = idebus_active_if(&m->bus);
378 MACIO_DPRINTF("\n");
380 if (s->drive_kind == IDE_CD) {
381 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
382 BLOCK_ACCT_READ);
384 pmac_ide_atapi_transfer_cb(io, 0);
385 return;
388 switch (s->dma_cmd) {
389 case IDE_DMA_READ:
390 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
391 BLOCK_ACCT_READ);
392 break;
393 case IDE_DMA_WRITE:
394 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
395 BLOCK_ACCT_WRITE);
396 break;
397 default:
398 break;
401 pmac_ide_transfer_cb(io, 0);
404 static void pmac_ide_flush(DBDMA_io *io)
406 MACIOIDEState *m = io->opaque;
407 IDEState *s = idebus_active_if(&m->bus);
409 if (s->bus->dma->aiocb) {
410 blk_drain(s->blk);
414 /* PowerMac IDE memory IO */
415 static void pmac_ide_writeb (void *opaque,
416 hwaddr addr, uint32_t val)
418 MACIOIDEState *d = opaque;
420 addr = (addr & 0xFFF) >> 4;
421 switch (addr) {
422 case 1 ... 7:
423 ide_ioport_write(&d->bus, addr, val);
424 break;
425 case 8:
426 case 22:
427 ide_cmd_write(&d->bus, 0, val);
428 break;
429 default:
430 break;
434 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
436 uint8_t retval;
437 MACIOIDEState *d = opaque;
439 addr = (addr & 0xFFF) >> 4;
440 switch (addr) {
441 case 1 ... 7:
442 retval = ide_ioport_read(&d->bus, addr);
443 break;
444 case 8:
445 case 22:
446 retval = ide_status_read(&d->bus, 0);
447 break;
448 default:
449 retval = 0xFF;
450 break;
452 return retval;
455 static void pmac_ide_writew (void *opaque,
456 hwaddr addr, uint32_t val)
458 MACIOIDEState *d = opaque;
460 addr = (addr & 0xFFF) >> 4;
461 val = bswap16(val);
462 if (addr == 0) {
463 ide_data_writew(&d->bus, 0, val);
467 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
469 uint16_t retval;
470 MACIOIDEState *d = opaque;
472 addr = (addr & 0xFFF) >> 4;
473 if (addr == 0) {
474 retval = ide_data_readw(&d->bus, 0);
475 } else {
476 retval = 0xFFFF;
478 retval = bswap16(retval);
479 return retval;
482 static void pmac_ide_writel (void *opaque,
483 hwaddr addr, uint32_t val)
485 MACIOIDEState *d = opaque;
487 addr = (addr & 0xFFF) >> 4;
488 val = bswap32(val);
489 if (addr == 0) {
490 ide_data_writel(&d->bus, 0, val);
494 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
496 uint32_t retval;
497 MACIOIDEState *d = opaque;
499 addr = (addr & 0xFFF) >> 4;
500 if (addr == 0) {
501 retval = ide_data_readl(&d->bus, 0);
502 } else {
503 retval = 0xFFFFFFFF;
505 retval = bswap32(retval);
506 return retval;
509 static const MemoryRegionOps pmac_ide_ops = {
510 .old_mmio = {
511 .write = {
512 pmac_ide_writeb,
513 pmac_ide_writew,
514 pmac_ide_writel,
516 .read = {
517 pmac_ide_readb,
518 pmac_ide_readw,
519 pmac_ide_readl,
522 .endianness = DEVICE_NATIVE_ENDIAN,
525 static const VMStateDescription vmstate_pmac = {
526 .name = "ide",
527 .version_id = 4,
528 .minimum_version_id = 0,
529 .fields = (VMStateField[]) {
530 VMSTATE_IDE_BUS(bus, MACIOIDEState),
531 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
532 VMSTATE_BOOL(dma_active, MACIOIDEState),
533 VMSTATE_END_OF_LIST()
537 static void macio_ide_reset(DeviceState *dev)
539 MACIOIDEState *d = MACIO_IDE(dev);
541 ide_bus_reset(&d->bus);
544 static int ide_nop_int(IDEDMA *dma, int x)
546 return 0;
549 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
551 return 0;
554 static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
555 BlockCompletionFunc *cb)
557 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
559 s->io_buffer_index = 0;
560 if (s->drive_kind == IDE_CD) {
561 s->io_buffer_size = s->packet_transfer_size;
562 } else {
563 s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE;
566 MACIO_DPRINTF("\n\n------------ IDE transfer\n");
567 MACIO_DPRINTF("buffer_size: %x buffer_index: %x\n",
568 s->io_buffer_size, s->io_buffer_index);
569 MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size);
570 MACIO_DPRINTF("-------------------------\n");
572 m->dma_active = true;
573 DBDMA_kick(m->dbdma);
576 static const IDEDMAOps dbdma_ops = {
577 .start_dma = ide_dbdma_start,
578 .prepare_buf = ide_nop_int32,
579 .rw_buf = ide_nop_int,
582 static void macio_ide_realizefn(DeviceState *dev, Error **errp)
584 MACIOIDEState *s = MACIO_IDE(dev);
586 ide_init2(&s->bus, s->irq);
588 /* Register DMA callbacks */
589 s->dma.ops = &dbdma_ops;
590 s->bus.dma = &s->dma;
593 static void macio_ide_initfn(Object *obj)
595 SysBusDevice *d = SYS_BUS_DEVICE(obj);
596 MACIOIDEState *s = MACIO_IDE(obj);
598 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
599 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
600 sysbus_init_mmio(d, &s->mem);
601 sysbus_init_irq(d, &s->irq);
602 sysbus_init_irq(d, &s->dma_irq);
605 static void macio_ide_class_init(ObjectClass *oc, void *data)
607 DeviceClass *dc = DEVICE_CLASS(oc);
609 dc->realize = macio_ide_realizefn;
610 dc->reset = macio_ide_reset;
611 dc->vmsd = &vmstate_pmac;
612 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
615 static const TypeInfo macio_ide_type_info = {
616 .name = TYPE_MACIO_IDE,
617 .parent = TYPE_SYS_BUS_DEVICE,
618 .instance_size = sizeof(MACIOIDEState),
619 .instance_init = macio_ide_initfn,
620 .class_init = macio_ide_class_init,
623 static void macio_ide_register_types(void)
625 type_register_static(&macio_ide_type_info);
628 /* hd_table must contain 2 block drivers */
629 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
631 int i;
633 for (i = 0; i < 2; i++) {
634 if (hd_table[i]) {
635 ide_create_drive(&s->bus, i, hd_table[i]);
640 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
642 s->dbdma = dbdma;
643 DBDMA_register_channel(dbdma, channel, s->dma_irq,
644 pmac_ide_transfer, pmac_ide_flush, s);
647 type_init(macio_ide_register_types)