ide: simplify reset callbacks
[qemu/ar7.git] / hw / ide / macio.c
blobca39e3f9b6ea5dca4dcb08e70487e81864aa336c
1 /*
2 * QEMU IDE Emulation: MacIO support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include "hw/hw.h"
26 #include "hw/ppc/mac.h"
27 #include "hw/ppc/mac_dbdma.h"
28 #include "block/block.h"
29 #include "sysemu/dma.h"
31 #include <hw/ide/internal.h>
33 /* debug MACIO */
34 // #define DEBUG_MACIO
36 #ifdef DEBUG_MACIO
37 static const int debug_macio = 1;
38 #else
39 static const int debug_macio = 0;
40 #endif
42 #define MACIO_DPRINTF(fmt, ...) do { \
43 if (debug_macio) { \
44 printf(fmt , ## __VA_ARGS__); \
45 } \
46 } while (0)
49 /***********************************************************/
50 /* MacIO based PowerPC IDE */
52 #define MACIO_PAGE_SIZE 4096
54 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
56 DBDMA_io *io = opaque;
57 MACIOIDEState *m = io->opaque;
58 IDEState *s = idebus_active_if(&m->bus);
59 int unaligned;
61 if (ret < 0) {
62 m->aiocb = NULL;
63 qemu_sglist_destroy(&s->sg);
64 ide_atapi_io_error(s, ret);
65 io->remainder_len = 0;
66 goto done;
69 if (!m->dma_active) {
70 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
71 s->nsector, io->len, s->status);
72 /* data not ready yet, wait for the channel to get restarted */
73 io->processing = false;
74 return;
77 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
79 if (s->io_buffer_size > 0) {
80 m->aiocb = NULL;
81 qemu_sglist_destroy(&s->sg);
83 s->packet_transfer_size -= s->io_buffer_size;
85 s->io_buffer_index += s->io_buffer_size;
86 s->lba += s->io_buffer_index >> 11;
87 s->io_buffer_index &= 0x7ff;
90 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
92 MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io->remainder_len,
93 io->len, s->packet_transfer_size);
94 if (io->remainder_len && io->len) {
95 /* guest wants the rest of its previous transfer */
96 int remainder_len = MIN(io->remainder_len, io->len);
98 MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len);
100 cpu_physical_memory_write(io->addr, io->remainder + 0x200 -
101 remainder_len, remainder_len);
103 io->addr += remainder_len;
104 io->len -= remainder_len;
105 s->io_buffer_size = remainder_len;
106 io->remainder_len -= remainder_len;
107 /* treat remainder as individual transfer, start again */
108 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
109 &address_space_memory);
110 pmac_ide_atapi_transfer_cb(opaque, 0);
111 return;
114 if (!s->packet_transfer_size) {
115 MACIO_DPRINTF("end of transfer\n");
116 ide_atapi_cmd_ok(s);
117 m->dma_active = false;
120 if (io->len == 0) {
121 MACIO_DPRINTF("end of DMA\n");
122 goto done;
125 /* launch next transfer */
127 /* handle unaligned accesses first, get them over with and only do the
128 remaining bulk transfer using our async DMA helpers */
129 unaligned = io->len & 0x1ff;
130 if (unaligned) {
131 int sector_num = (s->lba << 2) + (s->io_buffer_index >> 9);
132 int nsector = io->len >> 9;
134 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
135 unaligned, io->addr + io->len - unaligned);
137 bdrv_read(s->bs, sector_num + nsector, io->remainder, 1);
138 cpu_physical_memory_write(io->addr + io->len - unaligned,
139 io->remainder, unaligned);
141 io->len -= unaligned;
144 MACIO_DPRINTF("io->len = %#x\n", io->len);
146 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
147 &address_space_memory);
148 qemu_sglist_add(&s->sg, io->addr, io->len);
149 io->addr += s->io_buffer_size;
150 io->remainder_len = MIN(s->packet_transfer_size - s->io_buffer_size,
151 (0x200 - unaligned) & 0x1ff);
152 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
154 /* We would read no data from the block layer, thus not get a callback.
155 Just fake completion manually. */
156 if (!io->len) {
157 pmac_ide_atapi_transfer_cb(opaque, 0);
158 return;
161 io->len = 0;
163 MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n",
164 (s->lba << 2) + (s->io_buffer_index >> 9),
165 s->packet_transfer_size, s->dma_cmd);
167 m->aiocb = dma_bdrv_read(s->bs, &s->sg,
168 (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9),
169 pmac_ide_atapi_transfer_cb, io);
170 return;
172 done:
173 MACIO_DPRINTF("done DMA\n");
174 bdrv_acct_done(s->bs, &s->acct);
175 io->dma_end(opaque);
178 static void pmac_ide_transfer_cb(void *opaque, int ret)
180 DBDMA_io *io = opaque;
181 MACIOIDEState *m = io->opaque;
182 IDEState *s = idebus_active_if(&m->bus);
183 int n = 0;
184 int64_t sector_num;
185 int unaligned;
187 if (ret < 0) {
188 MACIO_DPRINTF("DMA error\n");
189 m->aiocb = NULL;
190 qemu_sglist_destroy(&s->sg);
191 ide_dma_error(s);
192 io->remainder_len = 0;
193 goto done;
196 if (--io->requests) {
197 /* More requests still in flight */
198 return;
201 if (!m->dma_active) {
202 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
203 s->nsector, io->len, s->status);
204 /* data not ready yet, wait for the channel to get restarted */
205 io->processing = false;
206 return;
209 sector_num = ide_get_sector(s);
210 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
211 if (s->io_buffer_size > 0) {
212 m->aiocb = NULL;
213 qemu_sglist_destroy(&s->sg);
214 n = (s->io_buffer_size + 0x1ff) >> 9;
215 sector_num += n;
216 ide_set_sector(s, sector_num);
217 s->nsector -= n;
220 if (io->finish_remain_read) {
221 /* Finish a stale read from the last iteration */
222 io->finish_remain_read = false;
223 cpu_physical_memory_write(io->finish_addr, io->remainder,
224 io->finish_len);
227 MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d "
228 "sector_num: %" PRId64 "\n",
229 io->remainder_len, io->len, s->nsector, sector_num);
230 if (io->remainder_len && io->len) {
231 /* guest wants the rest of its previous transfer */
232 int remainder_len = MIN(io->remainder_len, io->len);
233 uint8_t *p = &io->remainder[0x200 - remainder_len];
235 MACIO_DPRINTF("copying remainder %d bytes at %#" HWADDR_PRIx "\n",
236 remainder_len, io->addr);
238 switch (s->dma_cmd) {
239 case IDE_DMA_READ:
240 cpu_physical_memory_write(io->addr, p, remainder_len);
241 break;
242 case IDE_DMA_WRITE:
243 cpu_physical_memory_read(io->addr, p, remainder_len);
244 break;
245 case IDE_DMA_TRIM:
246 break;
248 io->addr += remainder_len;
249 io->len -= remainder_len;
250 io->remainder_len -= remainder_len;
252 if (s->dma_cmd == IDE_DMA_WRITE && !io->remainder_len) {
253 io->requests++;
254 qemu_iovec_reset(&io->iov);
255 qemu_iovec_add(&io->iov, io->remainder, 0x200);
257 m->aiocb = bdrv_aio_writev(s->bs, sector_num - 1, &io->iov, 1,
258 pmac_ide_transfer_cb, io);
262 if (s->nsector == 0 && !io->remainder_len) {
263 MACIO_DPRINTF("end of transfer\n");
264 s->status = READY_STAT | SEEK_STAT;
265 ide_set_irq(s->bus);
266 m->dma_active = false;
269 if (io->len == 0) {
270 MACIO_DPRINTF("end of DMA\n");
271 goto done;
274 /* launch next transfer */
276 s->io_buffer_index = 0;
277 s->io_buffer_size = MIN(io->len, s->nsector * 512);
279 /* handle unaligned accesses first, get them over with and only do the
280 remaining bulk transfer using our async DMA helpers */
281 unaligned = io->len & 0x1ff;
282 if (unaligned) {
283 int nsector = io->len >> 9;
285 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
286 unaligned, io->addr + io->len - unaligned);
288 switch (s->dma_cmd) {
289 case IDE_DMA_READ:
290 io->requests++;
291 io->finish_addr = io->addr + io->len - unaligned;
292 io->finish_len = unaligned;
293 io->finish_remain_read = true;
294 qemu_iovec_reset(&io->iov);
295 qemu_iovec_add(&io->iov, io->remainder, 0x200);
297 m->aiocb = bdrv_aio_readv(s->bs, sector_num + nsector, &io->iov, 1,
298 pmac_ide_transfer_cb, io);
299 break;
300 case IDE_DMA_WRITE:
301 /* cache the contents in our io struct */
302 cpu_physical_memory_read(io->addr + io->len - unaligned,
303 io->remainder + io->remainder_len,
304 unaligned);
305 break;
306 case IDE_DMA_TRIM:
307 break;
311 MACIO_DPRINTF("io->len = %#x\n", io->len);
313 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
314 &address_space_memory);
315 qemu_sglist_add(&s->sg, io->addr, io->len);
316 io->addr += io->len + unaligned;
317 io->remainder_len = (0x200 - unaligned) & 0x1ff;
318 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
320 /* Only subsector reads happening */
321 if (!io->len) {
322 if (!io->requests) {
323 io->requests++;
324 pmac_ide_transfer_cb(opaque, ret);
326 return;
329 io->len = 0;
331 MACIO_DPRINTF("sector_num=%" PRId64 " n=%d, nsector=%d, cmd_cmd=%d\n",
332 sector_num, n, s->nsector, s->dma_cmd);
334 switch (s->dma_cmd) {
335 case IDE_DMA_READ:
336 m->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num,
337 pmac_ide_transfer_cb, io);
338 break;
339 case IDE_DMA_WRITE:
340 m->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num,
341 pmac_ide_transfer_cb, io);
342 break;
343 case IDE_DMA_TRIM:
344 m->aiocb = dma_bdrv_io(s->bs, &s->sg, sector_num,
345 ide_issue_trim, pmac_ide_transfer_cb, io,
346 DMA_DIRECTION_TO_DEVICE);
347 break;
350 io->requests++;
351 return;
353 done:
354 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
355 bdrv_acct_done(s->bs, &s->acct);
357 io->dma_end(io);
360 static void pmac_ide_transfer(DBDMA_io *io)
362 MACIOIDEState *m = io->opaque;
363 IDEState *s = idebus_active_if(&m->bus);
365 MACIO_DPRINTF("\n");
367 s->io_buffer_size = 0;
368 if (s->drive_kind == IDE_CD) {
370 /* Handle non-block ATAPI DMA transfers */
371 if (s->lba == -1) {
372 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
373 bdrv_acct_start(s->bs, &s->acct, s->io_buffer_size,
374 BDRV_ACCT_READ);
375 MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n",
376 s->io_buffer_size);
378 /* Copy ATAPI buffer directly to RAM and finish */
379 cpu_physical_memory_write(io->addr, s->io_buffer,
380 s->io_buffer_size);
381 ide_atapi_cmd_ok(s);
382 m->dma_active = false;
384 MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n");
385 bdrv_acct_done(s->bs, &s->acct);
386 io->dma_end(io);
387 return;
390 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
391 pmac_ide_atapi_transfer_cb(io, 0);
392 return;
395 switch (s->dma_cmd) {
396 case IDE_DMA_READ:
397 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
398 break;
399 case IDE_DMA_WRITE:
400 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_WRITE);
401 break;
402 default:
403 break;
406 io->requests++;
407 pmac_ide_transfer_cb(io, 0);
410 static void pmac_ide_flush(DBDMA_io *io)
412 MACIOIDEState *m = io->opaque;
414 if (m->aiocb) {
415 bdrv_drain_all();
419 /* PowerMac IDE memory IO */
420 static void pmac_ide_writeb (void *opaque,
421 hwaddr addr, uint32_t val)
423 MACIOIDEState *d = opaque;
425 addr = (addr & 0xFFF) >> 4;
426 switch (addr) {
427 case 1 ... 7:
428 ide_ioport_write(&d->bus, addr, val);
429 break;
430 case 8:
431 case 22:
432 ide_cmd_write(&d->bus, 0, val);
433 break;
434 default:
435 break;
439 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
441 uint8_t retval;
442 MACIOIDEState *d = opaque;
444 addr = (addr & 0xFFF) >> 4;
445 switch (addr) {
446 case 1 ... 7:
447 retval = ide_ioport_read(&d->bus, addr);
448 break;
449 case 8:
450 case 22:
451 retval = ide_status_read(&d->bus, 0);
452 break;
453 default:
454 retval = 0xFF;
455 break;
457 return retval;
460 static void pmac_ide_writew (void *opaque,
461 hwaddr addr, uint32_t val)
463 MACIOIDEState *d = opaque;
465 addr = (addr & 0xFFF) >> 4;
466 val = bswap16(val);
467 if (addr == 0) {
468 ide_data_writew(&d->bus, 0, val);
472 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
474 uint16_t retval;
475 MACIOIDEState *d = opaque;
477 addr = (addr & 0xFFF) >> 4;
478 if (addr == 0) {
479 retval = ide_data_readw(&d->bus, 0);
480 } else {
481 retval = 0xFFFF;
483 retval = bswap16(retval);
484 return retval;
487 static void pmac_ide_writel (void *opaque,
488 hwaddr addr, uint32_t val)
490 MACIOIDEState *d = opaque;
492 addr = (addr & 0xFFF) >> 4;
493 val = bswap32(val);
494 if (addr == 0) {
495 ide_data_writel(&d->bus, 0, val);
499 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
501 uint32_t retval;
502 MACIOIDEState *d = opaque;
504 addr = (addr & 0xFFF) >> 4;
505 if (addr == 0) {
506 retval = ide_data_readl(&d->bus, 0);
507 } else {
508 retval = 0xFFFFFFFF;
510 retval = bswap32(retval);
511 return retval;
514 static const MemoryRegionOps pmac_ide_ops = {
515 .old_mmio = {
516 .write = {
517 pmac_ide_writeb,
518 pmac_ide_writew,
519 pmac_ide_writel,
521 .read = {
522 pmac_ide_readb,
523 pmac_ide_readw,
524 pmac_ide_readl,
527 .endianness = DEVICE_NATIVE_ENDIAN,
530 static const VMStateDescription vmstate_pmac = {
531 .name = "ide",
532 .version_id = 3,
533 .minimum_version_id = 0,
534 .fields = (VMStateField[]) {
535 VMSTATE_IDE_BUS(bus, MACIOIDEState),
536 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
537 VMSTATE_END_OF_LIST()
541 static void macio_ide_reset(DeviceState *dev)
543 MACIOIDEState *d = MACIO_IDE(dev);
545 ide_bus_reset(&d->bus);
548 static int ide_nop(IDEDMA *dma)
550 return 0;
553 static int ide_nop_int(IDEDMA *dma, int x)
555 return 0;
558 static void ide_nop_restart(void *opaque, int x, RunState y)
562 static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
563 BlockDriverCompletionFunc *cb)
565 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
567 MACIO_DPRINTF("\n");
568 m->dma_active = true;
569 DBDMA_kick(m->dbdma);
572 static const IDEDMAOps dbdma_ops = {
573 .start_dma = ide_dbdma_start,
574 .start_transfer = ide_nop,
575 .prepare_buf = ide_nop_int,
576 .rw_buf = ide_nop_int,
577 .set_unit = ide_nop_int,
578 .add_status = ide_nop_int,
579 .set_inactive = ide_nop,
580 .restart_cb = ide_nop_restart,
583 static void macio_ide_realizefn(DeviceState *dev, Error **errp)
585 MACIOIDEState *s = MACIO_IDE(dev);
587 ide_init2(&s->bus, s->irq);
589 /* Register DMA callbacks */
590 s->dma.ops = &dbdma_ops;
591 s->bus.dma = &s->dma;
594 static void macio_ide_initfn(Object *obj)
596 SysBusDevice *d = SYS_BUS_DEVICE(obj);
597 MACIOIDEState *s = MACIO_IDE(obj);
599 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
600 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
601 sysbus_init_mmio(d, &s->mem);
602 sysbus_init_irq(d, &s->irq);
603 sysbus_init_irq(d, &s->dma_irq);
606 static void macio_ide_class_init(ObjectClass *oc, void *data)
608 DeviceClass *dc = DEVICE_CLASS(oc);
610 dc->realize = macio_ide_realizefn;
611 dc->reset = macio_ide_reset;
612 dc->vmsd = &vmstate_pmac;
615 static const TypeInfo macio_ide_type_info = {
616 .name = TYPE_MACIO_IDE,
617 .parent = TYPE_SYS_BUS_DEVICE,
618 .instance_size = sizeof(MACIOIDEState),
619 .instance_init = macio_ide_initfn,
620 .class_init = macio_ide_class_init,
623 static void macio_ide_register_types(void)
625 type_register_static(&macio_ide_type_info);
628 /* hd_table must contain 2 block drivers */
629 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
631 int i;
633 for (i = 0; i < 2; i++) {
634 if (hd_table[i]) {
635 ide_create_drive(&s->bus, i, hd_table[i]);
640 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
642 s->dbdma = dbdma;
643 DBDMA_register_channel(dbdma, channel, s->dma_irq,
644 pmac_ide_transfer, pmac_ide_flush, s);
647 type_init(macio_ide_register_types)