Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[qemu.git] / hw / ide / macio.c
blobf6074f20245649d52c550909167c35b73049d9fe
1 /*
2 * QEMU IDE Emulation: MacIO support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include "hw/hw.h"
26 #include "hw/ppc/mac.h"
27 #include "hw/ppc/mac_dbdma.h"
28 #include "sysemu/block-backend.h"
29 #include "sysemu/dma.h"
31 #include <hw/ide/internal.h>
33 /* debug MACIO */
34 // #define DEBUG_MACIO
36 #ifdef DEBUG_MACIO
37 static const int debug_macio = 1;
38 #else
39 static const int debug_macio = 0;
40 #endif
42 #define MACIO_DPRINTF(fmt, ...) do { \
43 if (debug_macio) { \
44 printf(fmt , ## __VA_ARGS__); \
45 } \
46 } while (0)
49 /***********************************************************/
50 /* MacIO based PowerPC IDE */
52 #define MACIO_PAGE_SIZE 4096
54 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
56 DBDMA_io *io = opaque;
57 MACIOIDEState *m = io->opaque;
58 IDEState *s = idebus_active_if(&m->bus);
59 int unaligned;
61 if (ret < 0) {
62 m->aiocb = NULL;
63 qemu_sglist_destroy(&s->sg);
64 ide_atapi_io_error(s, ret);
65 io->remainder_len = 0;
66 goto done;
69 if (!m->dma_active) {
70 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
71 s->nsector, io->len, s->status);
72 /* data not ready yet, wait for the channel to get restarted */
73 io->processing = false;
74 return;
77 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
79 if (s->io_buffer_size > 0) {
80 m->aiocb = NULL;
81 qemu_sglist_destroy(&s->sg);
83 s->packet_transfer_size -= s->io_buffer_size;
85 s->io_buffer_index += s->io_buffer_size;
86 s->lba += s->io_buffer_index >> 11;
87 s->io_buffer_index &= 0x7ff;
90 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
92 MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io->remainder_len,
93 io->len, s->packet_transfer_size);
94 if (io->remainder_len && io->len) {
95 /* guest wants the rest of its previous transfer */
96 int remainder_len = MIN(io->remainder_len, io->len);
98 MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len);
100 cpu_physical_memory_write(io->addr, io->remainder + 0x200 -
101 remainder_len, remainder_len);
103 io->addr += remainder_len;
104 io->len -= remainder_len;
105 s->io_buffer_size = remainder_len;
106 io->remainder_len -= remainder_len;
107 /* treat remainder as individual transfer, start again */
108 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
109 &address_space_memory);
110 pmac_ide_atapi_transfer_cb(opaque, 0);
111 return;
114 if (!s->packet_transfer_size) {
115 MACIO_DPRINTF("end of transfer\n");
116 ide_atapi_cmd_ok(s);
117 m->dma_active = false;
120 if (io->len == 0) {
121 MACIO_DPRINTF("end of DMA\n");
122 goto done;
125 /* launch next transfer */
127 /* handle unaligned accesses first, get them over with and only do the
128 remaining bulk transfer using our async DMA helpers */
129 unaligned = io->len & 0x1ff;
130 if (unaligned) {
131 int sector_num = (s->lba << 2) + (s->io_buffer_index >> 9);
132 int nsector = io->len >> 9;
134 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
135 unaligned, io->addr + io->len - unaligned);
137 blk_read(s->blk, sector_num + nsector, io->remainder, 1);
138 cpu_physical_memory_write(io->addr + io->len - unaligned,
139 io->remainder, unaligned);
141 io->len -= unaligned;
144 MACIO_DPRINTF("io->len = %#x\n", io->len);
146 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
147 &address_space_memory);
148 qemu_sglist_add(&s->sg, io->addr, io->len);
149 io->addr += s->io_buffer_size;
150 io->remainder_len = MIN(s->packet_transfer_size - s->io_buffer_size,
151 (0x200 - unaligned) & 0x1ff);
152 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
154 /* We would read no data from the block layer, thus not get a callback.
155 Just fake completion manually. */
156 if (!io->len) {
157 pmac_ide_atapi_transfer_cb(opaque, 0);
158 return;
161 io->len = 0;
163 MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n",
164 (s->lba << 2) + (s->io_buffer_index >> 9),
165 s->packet_transfer_size, s->dma_cmd);
167 m->aiocb = dma_blk_read(s->blk, &s->sg,
168 (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9),
169 pmac_ide_atapi_transfer_cb, io);
170 return;
172 done:
173 MACIO_DPRINTF("done DMA\n");
174 block_acct_done(blk_get_stats(s->blk), &s->acct);
175 io->dma_end(opaque);
178 static void pmac_ide_transfer_cb(void *opaque, int ret)
180 DBDMA_io *io = opaque;
181 MACIOIDEState *m = io->opaque;
182 IDEState *s = idebus_active_if(&m->bus);
183 int n = 0;
184 int64_t sector_num;
185 int unaligned;
187 if (ret < 0) {
188 MACIO_DPRINTF("DMA error\n");
189 m->aiocb = NULL;
190 qemu_sglist_destroy(&s->sg);
191 ide_dma_error(s);
192 io->remainder_len = 0;
193 goto done;
196 if (--io->requests) {
197 /* More requests still in flight */
198 return;
201 if (!m->dma_active) {
202 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
203 s->nsector, io->len, s->status);
204 /* data not ready yet, wait for the channel to get restarted */
205 io->processing = false;
206 return;
209 sector_num = ide_get_sector(s);
210 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
211 if (s->io_buffer_size > 0) {
212 m->aiocb = NULL;
213 qemu_sglist_destroy(&s->sg);
214 n = (s->io_buffer_size + 0x1ff) >> 9;
215 sector_num += n;
216 ide_set_sector(s, sector_num);
217 s->nsector -= n;
220 if (io->finish_remain_read) {
221 /* Finish a stale read from the last iteration */
222 io->finish_remain_read = false;
223 cpu_physical_memory_write(io->finish_addr, io->remainder,
224 io->finish_len);
227 MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d "
228 "sector_num: %" PRId64 "\n",
229 io->remainder_len, io->len, s->nsector, sector_num);
230 if (io->remainder_len && io->len) {
231 /* guest wants the rest of its previous transfer */
232 int remainder_len = MIN(io->remainder_len, io->len);
233 uint8_t *p = &io->remainder[0x200 - remainder_len];
235 MACIO_DPRINTF("copying remainder %d bytes at %#" HWADDR_PRIx "\n",
236 remainder_len, io->addr);
238 switch (s->dma_cmd) {
239 case IDE_DMA_READ:
240 cpu_physical_memory_write(io->addr, p, remainder_len);
241 break;
242 case IDE_DMA_WRITE:
243 cpu_physical_memory_read(io->addr, p, remainder_len);
244 break;
245 case IDE_DMA_TRIM:
246 break;
248 io->addr += remainder_len;
249 io->len -= remainder_len;
250 io->remainder_len -= remainder_len;
252 if (s->dma_cmd == IDE_DMA_WRITE && !io->remainder_len) {
253 io->requests++;
254 qemu_iovec_reset(&io->iov);
255 qemu_iovec_add(&io->iov, io->remainder, 0x200);
257 m->aiocb = blk_aio_writev(s->blk, sector_num - 1, &io->iov, 1,
258 pmac_ide_transfer_cb, io);
262 if (s->nsector == 0 && !io->remainder_len) {
263 MACIO_DPRINTF("end of transfer\n");
264 s->status = READY_STAT | SEEK_STAT;
265 ide_set_irq(s->bus);
266 m->dma_active = false;
269 if (io->len == 0) {
270 MACIO_DPRINTF("end of DMA\n");
271 goto done;
274 /* launch next transfer */
276 s->io_buffer_index = 0;
277 s->io_buffer_size = MIN(io->len, s->nsector * 512);
279 /* handle unaligned accesses first, get them over with and only do the
280 remaining bulk transfer using our async DMA helpers */
281 unaligned = io->len & 0x1ff;
282 if (unaligned) {
283 int nsector = io->len >> 9;
285 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
286 unaligned, io->addr + io->len - unaligned);
288 switch (s->dma_cmd) {
289 case IDE_DMA_READ:
290 io->requests++;
291 io->finish_addr = io->addr + io->len - unaligned;
292 io->finish_len = unaligned;
293 io->finish_remain_read = true;
294 qemu_iovec_reset(&io->iov);
295 qemu_iovec_add(&io->iov, io->remainder, 0x200);
297 m->aiocb = blk_aio_readv(s->blk, sector_num + nsector, &io->iov, 1,
298 pmac_ide_transfer_cb, io);
299 break;
300 case IDE_DMA_WRITE:
301 /* cache the contents in our io struct */
302 cpu_physical_memory_read(io->addr + io->len - unaligned,
303 io->remainder + io->remainder_len,
304 unaligned);
305 break;
306 case IDE_DMA_TRIM:
307 break;
311 MACIO_DPRINTF("io->len = %#x\n", io->len);
313 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
314 &address_space_memory);
315 qemu_sglist_add(&s->sg, io->addr, io->len);
316 io->addr += io->len + unaligned;
317 io->remainder_len = (0x200 - unaligned) & 0x1ff;
318 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
320 /* Only subsector reads happening */
321 if (!io->len) {
322 if (!io->requests) {
323 io->requests++;
324 pmac_ide_transfer_cb(opaque, ret);
326 return;
329 io->len = 0;
331 MACIO_DPRINTF("sector_num=%" PRId64 " n=%d, nsector=%d, cmd_cmd=%d\n",
332 sector_num, n, s->nsector, s->dma_cmd);
334 switch (s->dma_cmd) {
335 case IDE_DMA_READ:
336 m->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
337 pmac_ide_transfer_cb, io);
338 break;
339 case IDE_DMA_WRITE:
340 m->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
341 pmac_ide_transfer_cb, io);
342 break;
343 case IDE_DMA_TRIM:
344 m->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
345 ide_issue_trim, pmac_ide_transfer_cb, io,
346 DMA_DIRECTION_TO_DEVICE);
347 break;
350 io->requests++;
351 return;
353 done:
354 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
355 block_acct_done(blk_get_stats(s->blk), &s->acct);
357 io->dma_end(io);
360 static void pmac_ide_transfer(DBDMA_io *io)
362 MACIOIDEState *m = io->opaque;
363 IDEState *s = idebus_active_if(&m->bus);
365 MACIO_DPRINTF("\n");
367 s->io_buffer_size = 0;
368 if (s->drive_kind == IDE_CD) {
370 /* Handle non-block ATAPI DMA transfers */
371 if (s->lba == -1) {
372 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
373 block_acct_start(blk_get_stats(s->blk), &s->acct, s->io_buffer_size,
374 BLOCK_ACCT_READ);
375 MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n",
376 s->io_buffer_size);
378 /* Copy ATAPI buffer directly to RAM and finish */
379 cpu_physical_memory_write(io->addr, s->io_buffer,
380 s->io_buffer_size);
381 ide_atapi_cmd_ok(s);
382 m->dma_active = false;
384 MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n");
385 block_acct_done(blk_get_stats(s->blk), &s->acct);
386 io->dma_end(io);
387 return;
390 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
391 BLOCK_ACCT_READ);
392 pmac_ide_atapi_transfer_cb(io, 0);
393 return;
396 switch (s->dma_cmd) {
397 case IDE_DMA_READ:
398 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
399 BLOCK_ACCT_READ);
400 break;
401 case IDE_DMA_WRITE:
402 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
403 BLOCK_ACCT_WRITE);
404 break;
405 default:
406 break;
409 io->requests++;
410 pmac_ide_transfer_cb(io, 0);
413 static void pmac_ide_flush(DBDMA_io *io)
415 MACIOIDEState *m = io->opaque;
417 if (m->aiocb) {
418 blk_drain_all();
422 /* PowerMac IDE memory IO */
423 static void pmac_ide_writeb (void *opaque,
424 hwaddr addr, uint32_t val)
426 MACIOIDEState *d = opaque;
428 addr = (addr & 0xFFF) >> 4;
429 switch (addr) {
430 case 1 ... 7:
431 ide_ioport_write(&d->bus, addr, val);
432 break;
433 case 8:
434 case 22:
435 ide_cmd_write(&d->bus, 0, val);
436 break;
437 default:
438 break;
442 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
444 uint8_t retval;
445 MACIOIDEState *d = opaque;
447 addr = (addr & 0xFFF) >> 4;
448 switch (addr) {
449 case 1 ... 7:
450 retval = ide_ioport_read(&d->bus, addr);
451 break;
452 case 8:
453 case 22:
454 retval = ide_status_read(&d->bus, 0);
455 break;
456 default:
457 retval = 0xFF;
458 break;
460 return retval;
463 static void pmac_ide_writew (void *opaque,
464 hwaddr addr, uint32_t val)
466 MACIOIDEState *d = opaque;
468 addr = (addr & 0xFFF) >> 4;
469 val = bswap16(val);
470 if (addr == 0) {
471 ide_data_writew(&d->bus, 0, val);
475 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
477 uint16_t retval;
478 MACIOIDEState *d = opaque;
480 addr = (addr & 0xFFF) >> 4;
481 if (addr == 0) {
482 retval = ide_data_readw(&d->bus, 0);
483 } else {
484 retval = 0xFFFF;
486 retval = bswap16(retval);
487 return retval;
490 static void pmac_ide_writel (void *opaque,
491 hwaddr addr, uint32_t val)
493 MACIOIDEState *d = opaque;
495 addr = (addr & 0xFFF) >> 4;
496 val = bswap32(val);
497 if (addr == 0) {
498 ide_data_writel(&d->bus, 0, val);
502 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
504 uint32_t retval;
505 MACIOIDEState *d = opaque;
507 addr = (addr & 0xFFF) >> 4;
508 if (addr == 0) {
509 retval = ide_data_readl(&d->bus, 0);
510 } else {
511 retval = 0xFFFFFFFF;
513 retval = bswap32(retval);
514 return retval;
517 static const MemoryRegionOps pmac_ide_ops = {
518 .old_mmio = {
519 .write = {
520 pmac_ide_writeb,
521 pmac_ide_writew,
522 pmac_ide_writel,
524 .read = {
525 pmac_ide_readb,
526 pmac_ide_readw,
527 pmac_ide_readl,
530 .endianness = DEVICE_NATIVE_ENDIAN,
533 static const VMStateDescription vmstate_pmac = {
534 .name = "ide",
535 .version_id = 3,
536 .minimum_version_id = 0,
537 .fields = (VMStateField[]) {
538 VMSTATE_IDE_BUS(bus, MACIOIDEState),
539 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
540 VMSTATE_END_OF_LIST()
544 static void macio_ide_reset(DeviceState *dev)
546 MACIOIDEState *d = MACIO_IDE(dev);
548 ide_bus_reset(&d->bus);
551 static int ide_nop_int(IDEDMA *dma, int x)
553 return 0;
556 static int32_t ide_nop_int32(IDEDMA *dma, int x)
558 return 0;
561 static void ide_nop_restart(void *opaque, int x, RunState y)
565 static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
566 BlockCompletionFunc *cb)
568 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
570 MACIO_DPRINTF("\n");
571 m->dma_active = true;
572 DBDMA_kick(m->dbdma);
575 static const IDEDMAOps dbdma_ops = {
576 .start_dma = ide_dbdma_start,
577 .prepare_buf = ide_nop_int32,
578 .rw_buf = ide_nop_int,
579 .set_unit = ide_nop_int,
580 .restart_cb = ide_nop_restart,
583 static void macio_ide_realizefn(DeviceState *dev, Error **errp)
585 MACIOIDEState *s = MACIO_IDE(dev);
587 ide_init2(&s->bus, s->irq);
589 /* Register DMA callbacks */
590 s->dma.ops = &dbdma_ops;
591 s->bus.dma = &s->dma;
594 static void macio_ide_initfn(Object *obj)
596 SysBusDevice *d = SYS_BUS_DEVICE(obj);
597 MACIOIDEState *s = MACIO_IDE(obj);
599 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
600 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
601 sysbus_init_mmio(d, &s->mem);
602 sysbus_init_irq(d, &s->irq);
603 sysbus_init_irq(d, &s->dma_irq);
606 static void macio_ide_class_init(ObjectClass *oc, void *data)
608 DeviceClass *dc = DEVICE_CLASS(oc);
610 dc->realize = macio_ide_realizefn;
611 dc->reset = macio_ide_reset;
612 dc->vmsd = &vmstate_pmac;
615 static const TypeInfo macio_ide_type_info = {
616 .name = TYPE_MACIO_IDE,
617 .parent = TYPE_SYS_BUS_DEVICE,
618 .instance_size = sizeof(MACIOIDEState),
619 .instance_init = macio_ide_initfn,
620 .class_init = macio_ide_class_init,
623 static void macio_ide_register_types(void)
625 type_register_static(&macio_ide_type_info);
628 /* hd_table must contain 2 block drivers */
629 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
631 int i;
633 for (i = 0; i < 2; i++) {
634 if (hd_table[i]) {
635 ide_create_drive(&s->bus, i, hd_table[i]);
640 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
642 s->dbdma = dbdma;
643 DBDMA_register_channel(dbdma, channel, s->dma_irq,
644 pmac_ide_transfer, pmac_ide_flush, s);
647 type_init(macio_ide_register_types)