2 * QEMU IDE Emulation: MacIO support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "hw/ppc/mac.h"
27 #include "hw/ppc/mac_dbdma.h"
28 #include "block/block.h"
29 #include "sysemu/dma.h"
31 #include <hw/ide/internal.h>
34 // #define DEBUG_MACIO
37 static const int debug_macio
= 1;
39 static const int debug_macio
= 0;
42 #define MACIO_DPRINTF(fmt, ...) do { \
44 printf(fmt , ## __VA_ARGS__); \
49 /***********************************************************/
50 /* MacIO based PowerPC IDE */
52 #define MACIO_PAGE_SIZE 4096
54 static void pmac_ide_atapi_transfer_cb(void *opaque
, int ret
)
56 DBDMA_io
*io
= opaque
;
57 MACIOIDEState
*m
= io
->opaque
;
58 IDEState
*s
= idebus_active_if(&m
->bus
);
63 qemu_sglist_destroy(&s
->sg
);
64 ide_atapi_io_error(s
, ret
);
65 io
->remainder_len
= 0;
70 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
71 s
->nsector
, io
->len
, s
->status
);
72 /* data not ready yet, wait for the channel to get restarted */
73 io
->processing
= false;
77 MACIO_DPRINTF("io_buffer_size = %#x\n", s
->io_buffer_size
);
79 if (s
->io_buffer_size
> 0) {
81 qemu_sglist_destroy(&s
->sg
);
83 s
->packet_transfer_size
-= s
->io_buffer_size
;
85 s
->io_buffer_index
+= s
->io_buffer_size
;
86 s
->lba
+= s
->io_buffer_index
>> 11;
87 s
->io_buffer_index
&= 0x7ff;
90 s
->io_buffer_size
= MIN(io
->len
, s
->packet_transfer_size
);
92 MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io
->remainder_len
,
93 io
->len
, s
->packet_transfer_size
);
94 if (io
->remainder_len
&& io
->len
) {
95 /* guest wants the rest of its previous transfer */
96 int remainder_len
= MIN(io
->remainder_len
, io
->len
);
98 MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len
);
100 cpu_physical_memory_write(io
->addr
, io
->remainder
+ 0x200 -
101 remainder_len
, remainder_len
);
103 io
->addr
+= remainder_len
;
104 io
->len
-= remainder_len
;
105 s
->io_buffer_size
= remainder_len
;
106 io
->remainder_len
-= remainder_len
;
107 /* treat remainder as individual transfer, start again */
108 qemu_sglist_init(&s
->sg
, DEVICE(m
), io
->len
/ MACIO_PAGE_SIZE
+ 1,
109 &address_space_memory
);
110 pmac_ide_atapi_transfer_cb(opaque
, 0);
114 if (!s
->packet_transfer_size
) {
115 MACIO_DPRINTF("end of transfer\n");
117 m
->dma_active
= false;
121 MACIO_DPRINTF("end of DMA\n");
125 /* launch next transfer */
127 /* handle unaligned accesses first, get them over with and only do the
128 remaining bulk transfer using our async DMA helpers */
129 unaligned
= io
->len
& 0x1ff;
131 int sector_num
= (s
->lba
<< 2) + (s
->io_buffer_index
>> 9);
132 int nsector
= io
->len
>> 9;
134 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx
"\n",
135 unaligned
, io
->addr
+ io
->len
- unaligned
);
137 bdrv_read(s
->bs
, sector_num
+ nsector
, io
->remainder
, 1);
138 cpu_physical_memory_write(io
->addr
+ io
->len
- unaligned
,
139 io
->remainder
, unaligned
);
141 io
->len
-= unaligned
;
144 MACIO_DPRINTF("io->len = %#x\n", io
->len
);
146 qemu_sglist_init(&s
->sg
, DEVICE(m
), io
->len
/ MACIO_PAGE_SIZE
+ 1,
147 &address_space_memory
);
148 qemu_sglist_add(&s
->sg
, io
->addr
, io
->len
);
149 io
->addr
+= s
->io_buffer_size
;
150 io
->remainder_len
= MIN(s
->packet_transfer_size
- s
->io_buffer_size
,
151 (0x200 - unaligned
) & 0x1ff);
152 MACIO_DPRINTF("set remainder to: %d\n", io
->remainder_len
);
154 /* We would read no data from the block layer, thus not get a callback.
155 Just fake completion manually. */
157 pmac_ide_atapi_transfer_cb(opaque
, 0);
163 MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n",
164 (s
->lba
<< 2) + (s
->io_buffer_index
>> 9),
165 s
->packet_transfer_size
, s
->dma_cmd
);
167 m
->aiocb
= dma_bdrv_read(s
->bs
, &s
->sg
,
168 (int64_t)(s
->lba
<< 2) + (s
->io_buffer_index
>> 9),
169 pmac_ide_atapi_transfer_cb
, io
);
173 MACIO_DPRINTF("done DMA\n");
174 block_acct_done(bdrv_get_stats(s
->bs
), &s
->acct
);
178 static void pmac_ide_transfer_cb(void *opaque
, int ret
)
180 DBDMA_io
*io
= opaque
;
181 MACIOIDEState
*m
= io
->opaque
;
182 IDEState
*s
= idebus_active_if(&m
->bus
);
188 MACIO_DPRINTF("DMA error\n");
190 qemu_sglist_destroy(&s
->sg
);
192 io
->remainder_len
= 0;
196 if (--io
->requests
) {
197 /* More requests still in flight */
201 if (!m
->dma_active
) {
202 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
203 s
->nsector
, io
->len
, s
->status
);
204 /* data not ready yet, wait for the channel to get restarted */
205 io
->processing
= false;
209 sector_num
= ide_get_sector(s
);
210 MACIO_DPRINTF("io_buffer_size = %#x\n", s
->io_buffer_size
);
211 if (s
->io_buffer_size
> 0) {
213 qemu_sglist_destroy(&s
->sg
);
214 n
= (s
->io_buffer_size
+ 0x1ff) >> 9;
216 ide_set_sector(s
, sector_num
);
220 if (io
->finish_remain_read
) {
221 /* Finish a stale read from the last iteration */
222 io
->finish_remain_read
= false;
223 cpu_physical_memory_write(io
->finish_addr
, io
->remainder
,
227 MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d "
228 "sector_num: %" PRId64
"\n",
229 io
->remainder_len
, io
->len
, s
->nsector
, sector_num
);
230 if (io
->remainder_len
&& io
->len
) {
231 /* guest wants the rest of its previous transfer */
232 int remainder_len
= MIN(io
->remainder_len
, io
->len
);
233 uint8_t *p
= &io
->remainder
[0x200 - remainder_len
];
235 MACIO_DPRINTF("copying remainder %d bytes at %#" HWADDR_PRIx
"\n",
236 remainder_len
, io
->addr
);
238 switch (s
->dma_cmd
) {
240 cpu_physical_memory_write(io
->addr
, p
, remainder_len
);
243 cpu_physical_memory_read(io
->addr
, p
, remainder_len
);
248 io
->addr
+= remainder_len
;
249 io
->len
-= remainder_len
;
250 io
->remainder_len
-= remainder_len
;
252 if (s
->dma_cmd
== IDE_DMA_WRITE
&& !io
->remainder_len
) {
254 qemu_iovec_reset(&io
->iov
);
255 qemu_iovec_add(&io
->iov
, io
->remainder
, 0x200);
257 m
->aiocb
= bdrv_aio_writev(s
->bs
, sector_num
- 1, &io
->iov
, 1,
258 pmac_ide_transfer_cb
, io
);
262 if (s
->nsector
== 0 && !io
->remainder_len
) {
263 MACIO_DPRINTF("end of transfer\n");
264 s
->status
= READY_STAT
| SEEK_STAT
;
266 m
->dma_active
= false;
270 MACIO_DPRINTF("end of DMA\n");
274 /* launch next transfer */
276 s
->io_buffer_index
= 0;
277 s
->io_buffer_size
= MIN(io
->len
, s
->nsector
* 512);
279 /* handle unaligned accesses first, get them over with and only do the
280 remaining bulk transfer using our async DMA helpers */
281 unaligned
= io
->len
& 0x1ff;
283 int nsector
= io
->len
>> 9;
285 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx
"\n",
286 unaligned
, io
->addr
+ io
->len
- unaligned
);
288 switch (s
->dma_cmd
) {
291 io
->finish_addr
= io
->addr
+ io
->len
- unaligned
;
292 io
->finish_len
= unaligned
;
293 io
->finish_remain_read
= true;
294 qemu_iovec_reset(&io
->iov
);
295 qemu_iovec_add(&io
->iov
, io
->remainder
, 0x200);
297 m
->aiocb
= bdrv_aio_readv(s
->bs
, sector_num
+ nsector
, &io
->iov
, 1,
298 pmac_ide_transfer_cb
, io
);
301 /* cache the contents in our io struct */
302 cpu_physical_memory_read(io
->addr
+ io
->len
- unaligned
,
303 io
->remainder
+ io
->remainder_len
,
311 MACIO_DPRINTF("io->len = %#x\n", io
->len
);
313 qemu_sglist_init(&s
->sg
, DEVICE(m
), io
->len
/ MACIO_PAGE_SIZE
+ 1,
314 &address_space_memory
);
315 qemu_sglist_add(&s
->sg
, io
->addr
, io
->len
);
316 io
->addr
+= io
->len
+ unaligned
;
317 io
->remainder_len
= (0x200 - unaligned
) & 0x1ff;
318 MACIO_DPRINTF("set remainder to: %d\n", io
->remainder_len
);
320 /* Only subsector reads happening */
324 pmac_ide_transfer_cb(opaque
, ret
);
331 MACIO_DPRINTF("sector_num=%" PRId64
" n=%d, nsector=%d, cmd_cmd=%d\n",
332 sector_num
, n
, s
->nsector
, s
->dma_cmd
);
334 switch (s
->dma_cmd
) {
336 m
->aiocb
= dma_bdrv_read(s
->bs
, &s
->sg
, sector_num
,
337 pmac_ide_transfer_cb
, io
);
340 m
->aiocb
= dma_bdrv_write(s
->bs
, &s
->sg
, sector_num
,
341 pmac_ide_transfer_cb
, io
);
344 m
->aiocb
= dma_bdrv_io(s
->bs
, &s
->sg
, sector_num
,
345 ide_issue_trim
, pmac_ide_transfer_cb
, io
,
346 DMA_DIRECTION_TO_DEVICE
);
354 if (s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) {
355 block_acct_done(bdrv_get_stats(s
->bs
), &s
->acct
);
360 static void pmac_ide_transfer(DBDMA_io
*io
)
362 MACIOIDEState
*m
= io
->opaque
;
363 IDEState
*s
= idebus_active_if(&m
->bus
);
367 s
->io_buffer_size
= 0;
368 if (s
->drive_kind
== IDE_CD
) {
370 /* Handle non-block ATAPI DMA transfers */
372 s
->io_buffer_size
= MIN(io
->len
, s
->packet_transfer_size
);
373 block_acct_start(bdrv_get_stats(s
->bs
), &s
->acct
, s
->io_buffer_size
,
375 MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n",
378 /* Copy ATAPI buffer directly to RAM and finish */
379 cpu_physical_memory_write(io
->addr
, s
->io_buffer
,
382 m
->dma_active
= false;
384 MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n");
385 block_acct_done(bdrv_get_stats(s
->bs
), &s
->acct
);
390 block_acct_start(bdrv_get_stats(s
->bs
), &s
->acct
, io
->len
,
392 pmac_ide_atapi_transfer_cb(io
, 0);
396 switch (s
->dma_cmd
) {
398 block_acct_start(bdrv_get_stats(s
->bs
), &s
->acct
, io
->len
,
402 block_acct_start(bdrv_get_stats(s
->bs
), &s
->acct
, io
->len
,
410 pmac_ide_transfer_cb(io
, 0);
413 static void pmac_ide_flush(DBDMA_io
*io
)
415 MACIOIDEState
*m
= io
->opaque
;
422 /* PowerMac IDE memory IO */
423 static void pmac_ide_writeb (void *opaque
,
424 hwaddr addr
, uint32_t val
)
426 MACIOIDEState
*d
= opaque
;
428 addr
= (addr
& 0xFFF) >> 4;
431 ide_ioport_write(&d
->bus
, addr
, val
);
435 ide_cmd_write(&d
->bus
, 0, val
);
442 static uint32_t pmac_ide_readb (void *opaque
,hwaddr addr
)
445 MACIOIDEState
*d
= opaque
;
447 addr
= (addr
& 0xFFF) >> 4;
450 retval
= ide_ioport_read(&d
->bus
, addr
);
454 retval
= ide_status_read(&d
->bus
, 0);
463 static void pmac_ide_writew (void *opaque
,
464 hwaddr addr
, uint32_t val
)
466 MACIOIDEState
*d
= opaque
;
468 addr
= (addr
& 0xFFF) >> 4;
471 ide_data_writew(&d
->bus
, 0, val
);
475 static uint32_t pmac_ide_readw (void *opaque
,hwaddr addr
)
478 MACIOIDEState
*d
= opaque
;
480 addr
= (addr
& 0xFFF) >> 4;
482 retval
= ide_data_readw(&d
->bus
, 0);
486 retval
= bswap16(retval
);
490 static void pmac_ide_writel (void *opaque
,
491 hwaddr addr
, uint32_t val
)
493 MACIOIDEState
*d
= opaque
;
495 addr
= (addr
& 0xFFF) >> 4;
498 ide_data_writel(&d
->bus
, 0, val
);
502 static uint32_t pmac_ide_readl (void *opaque
,hwaddr addr
)
505 MACIOIDEState
*d
= opaque
;
507 addr
= (addr
& 0xFFF) >> 4;
509 retval
= ide_data_readl(&d
->bus
, 0);
513 retval
= bswap32(retval
);
517 static const MemoryRegionOps pmac_ide_ops
= {
530 .endianness
= DEVICE_NATIVE_ENDIAN
,
533 static const VMStateDescription vmstate_pmac
= {
536 .minimum_version_id
= 0,
537 .fields
= (VMStateField
[]) {
538 VMSTATE_IDE_BUS(bus
, MACIOIDEState
),
539 VMSTATE_IDE_DRIVES(bus
.ifs
, MACIOIDEState
),
540 VMSTATE_END_OF_LIST()
544 static void macio_ide_reset(DeviceState
*dev
)
546 MACIOIDEState
*d
= MACIO_IDE(dev
);
548 ide_bus_reset(&d
->bus
);
551 static int ide_nop_int(IDEDMA
*dma
, int x
)
556 static void ide_nop_restart(void *opaque
, int x
, RunState y
)
560 static void ide_dbdma_start(IDEDMA
*dma
, IDEState
*s
,
561 BlockDriverCompletionFunc
*cb
)
563 MACIOIDEState
*m
= container_of(dma
, MACIOIDEState
, dma
);
566 m
->dma_active
= true;
567 DBDMA_kick(m
->dbdma
);
570 static const IDEDMAOps dbdma_ops
= {
571 .start_dma
= ide_dbdma_start
,
572 .prepare_buf
= ide_nop_int
,
573 .rw_buf
= ide_nop_int
,
574 .set_unit
= ide_nop_int
,
575 .restart_cb
= ide_nop_restart
,
578 static void macio_ide_realizefn(DeviceState
*dev
, Error
**errp
)
580 MACIOIDEState
*s
= MACIO_IDE(dev
);
582 ide_init2(&s
->bus
, s
->irq
);
584 /* Register DMA callbacks */
585 s
->dma
.ops
= &dbdma_ops
;
586 s
->bus
.dma
= &s
->dma
;
589 static void macio_ide_initfn(Object
*obj
)
591 SysBusDevice
*d
= SYS_BUS_DEVICE(obj
);
592 MACIOIDEState
*s
= MACIO_IDE(obj
);
594 ide_bus_new(&s
->bus
, sizeof(s
->bus
), DEVICE(obj
), 0, 2);
595 memory_region_init_io(&s
->mem
, obj
, &pmac_ide_ops
, s
, "pmac-ide", 0x1000);
596 sysbus_init_mmio(d
, &s
->mem
);
597 sysbus_init_irq(d
, &s
->irq
);
598 sysbus_init_irq(d
, &s
->dma_irq
);
601 static void macio_ide_class_init(ObjectClass
*oc
, void *data
)
603 DeviceClass
*dc
= DEVICE_CLASS(oc
);
605 dc
->realize
= macio_ide_realizefn
;
606 dc
->reset
= macio_ide_reset
;
607 dc
->vmsd
= &vmstate_pmac
;
610 static const TypeInfo macio_ide_type_info
= {
611 .name
= TYPE_MACIO_IDE
,
612 .parent
= TYPE_SYS_BUS_DEVICE
,
613 .instance_size
= sizeof(MACIOIDEState
),
614 .instance_init
= macio_ide_initfn
,
615 .class_init
= macio_ide_class_init
,
618 static void macio_ide_register_types(void)
620 type_register_static(&macio_ide_type_info
);
623 /* hd_table must contain 2 block drivers */
624 void macio_ide_init_drives(MACIOIDEState
*s
, DriveInfo
**hd_table
)
628 for (i
= 0; i
< 2; i
++) {
630 ide_create_drive(&s
->bus
, i
, hd_table
[i
]);
635 void macio_ide_register_dma(MACIOIDEState
*s
, void *dbdma
, int channel
)
638 DBDMA_register_channel(dbdma
, channel
, s
->dma_irq
,
639 pmac_ide_transfer
, pmac_ide_flush
, s
);
642 type_init(macio_ide_register_types
)