4 * Copyright (c) 2010 qiaochong@loongson.cn
5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include <hw/pci/msi.h>
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/sysbus.h>
30 #include "monitor/monitor.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/dma.h"
34 #include <hw/ide/pci.h>
35 #include <hw/ide/ahci.h>
39 #define DPRINTF(port, fmt, ...) \
42 fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \
43 fprintf(stderr, fmt, ## __VA_ARGS__); \
47 static void check_cmd(AHCIState
*s
, int port
);
48 static int handle_cmd(AHCIState
*s
,int port
,int slot
);
49 static void ahci_reset_port(AHCIState
*s
, int port
);
50 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
);
51 static void ahci_init_d2h(AHCIDevice
*ad
);
52 static int ahci_dma_prepare_buf(IDEDMA
*dma
, int is_write
);
53 static void ahci_commit_buf(IDEDMA
*dma
, uint32_t tx_bytes
);
54 static bool ahci_map_clb_address(AHCIDevice
*ad
);
55 static bool ahci_map_fis_address(AHCIDevice
*ad
);
56 static void ahci_unmap_clb_address(AHCIDevice
*ad
);
57 static void ahci_unmap_fis_address(AHCIDevice
*ad
);
60 static uint32_t ahci_port_read(AHCIState
*s
, int port
, int offset
)
64 pr
= &s
->dev
[port
].port_regs
;
70 case PORT_LST_ADDR_HI
:
71 val
= pr
->lst_addr_hi
;
76 case PORT_FIS_ADDR_HI
:
77 val
= pr
->fis_addr_hi
;
95 if (s
->dev
[port
].port
.ifs
[0].blk
) {
96 val
= SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP
|
97 SATA_SCR_SSTATUS_SPD_GEN1
| SATA_SCR_SSTATUS_IPM_ACTIVE
;
99 val
= SATA_SCR_SSTATUS_DET_NODEV
;
109 pr
->scr_act
&= ~s
->dev
[port
].finished
;
110 s
->dev
[port
].finished
= 0;
120 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
125 static void ahci_irq_raise(AHCIState
*s
, AHCIDevice
*dev
)
127 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
129 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
131 DPRINTF(0, "raise irq\n");
133 if (pci_dev
&& msi_enabled(pci_dev
)) {
134 msi_notify(pci_dev
, 0);
136 qemu_irq_raise(s
->irq
);
140 static void ahci_irq_lower(AHCIState
*s
, AHCIDevice
*dev
)
142 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
144 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
146 DPRINTF(0, "lower irq\n");
148 if (!pci_dev
|| !msi_enabled(pci_dev
)) {
149 qemu_irq_lower(s
->irq
);
153 static void ahci_check_irq(AHCIState
*s
)
157 DPRINTF(-1, "check irq %#x\n", s
->control_regs
.irqstatus
);
159 s
->control_regs
.irqstatus
= 0;
160 for (i
= 0; i
< s
->ports
; i
++) {
161 AHCIPortRegs
*pr
= &s
->dev
[i
].port_regs
;
162 if (pr
->irq_stat
& pr
->irq_mask
) {
163 s
->control_regs
.irqstatus
|= (1 << i
);
167 if (s
->control_regs
.irqstatus
&&
168 (s
->control_regs
.ghc
& HOST_CTL_IRQ_EN
)) {
169 ahci_irq_raise(s
, NULL
);
171 ahci_irq_lower(s
, NULL
);
175 static void ahci_trigger_irq(AHCIState
*s
, AHCIDevice
*d
,
178 DPRINTF(d
->port_no
, "trigger irq %#x -> %x\n",
179 irq_type
, d
->port_regs
.irq_mask
& irq_type
);
181 d
->port_regs
.irq_stat
|= irq_type
;
185 static void map_page(AddressSpace
*as
, uint8_t **ptr
, uint64_t addr
,
191 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
194 *ptr
= dma_memory_map(as
, addr
, &len
, DMA_DIRECTION_FROM_DEVICE
);
196 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
201 static void ahci_port_write(AHCIState
*s
, int port
, int offset
, uint32_t val
)
203 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
205 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
210 case PORT_LST_ADDR_HI
:
211 pr
->lst_addr_hi
= val
;
216 case PORT_FIS_ADDR_HI
:
217 pr
->fis_addr_hi
= val
;
220 pr
->irq_stat
&= ~val
;
224 pr
->irq_mask
= val
& 0xfdc000ff;
228 /* Block any Read-only fields from being set;
229 * including LIST_ON and FIS_ON. */
230 pr
->cmd
= (pr
->cmd
& PORT_CMD_RO_MASK
) | (val
& ~PORT_CMD_RO_MASK
);
232 if (pr
->cmd
& PORT_CMD_START
) {
233 if (ahci_map_clb_address(&s
->dev
[port
])) {
234 pr
->cmd
|= PORT_CMD_LIST_ON
;
236 error_report("AHCI: Failed to start DMA engine: "
237 "bad command list buffer address");
239 } else if (pr
->cmd
& PORT_CMD_LIST_ON
) {
240 ahci_unmap_clb_address(&s
->dev
[port
]);
241 pr
->cmd
= pr
->cmd
& ~(PORT_CMD_LIST_ON
);
244 if (pr
->cmd
& PORT_CMD_FIS_RX
) {
245 if (ahci_map_fis_address(&s
->dev
[port
])) {
246 pr
->cmd
|= PORT_CMD_FIS_ON
;
248 error_report("AHCI: Failed to start FIS receive engine: "
249 "bad FIS receive buffer address");
251 } else if (pr
->cmd
& PORT_CMD_FIS_ON
) {
252 ahci_unmap_fis_address(&s
->dev
[port
]);
253 pr
->cmd
= pr
->cmd
& ~(PORT_CMD_FIS_ON
);
256 /* XXX usually the FIS would be pending on the bus here and
257 issuing deferred until the OS enables FIS receival.
258 Instead, we only submit it once - which works in most
259 cases, but is a hack. */
260 if ((pr
->cmd
& PORT_CMD_FIS_ON
) &&
261 !s
->dev
[port
].init_d2h_sent
) {
262 ahci_init_d2h(&s
->dev
[port
]);
263 s
->dev
[port
].init_d2h_sent
= true;
278 if (((pr
->scr_ctl
& AHCI_SCR_SCTL_DET
) == 1) &&
279 ((val
& AHCI_SCR_SCTL_DET
) == 0)) {
280 ahci_reset_port(s
, port
);
292 pr
->cmd_issue
|= val
;
300 static uint64_t ahci_mem_read(void *opaque
, hwaddr addr
,
303 AHCIState
*s
= opaque
;
306 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
309 val
= s
->control_regs
.cap
;
312 val
= s
->control_regs
.ghc
;
315 val
= s
->control_regs
.irqstatus
;
317 case HOST_PORTS_IMPL
:
318 val
= s
->control_regs
.impl
;
321 val
= s
->control_regs
.version
;
325 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr
, val
);
326 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
327 (addr
< (AHCI_PORT_REGS_START_ADDR
+
328 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
329 val
= ahci_port_read(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
330 addr
& AHCI_PORT_ADDR_OFFSET_MASK
);
338 static void ahci_mem_write(void *opaque
, hwaddr addr
,
339 uint64_t val
, unsigned size
)
341 AHCIState
*s
= opaque
;
343 /* Only aligned reads are allowed on AHCI */
345 fprintf(stderr
, "ahci: Mis-aligned write to addr 0x"
346 TARGET_FMT_plx
"\n", addr
);
350 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
351 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64
"\n", (unsigned) addr
, val
);
354 case HOST_CAP
: /* R/WO, RO */
355 /* FIXME handle R/WO */
357 case HOST_CTL
: /* R/W */
358 if (val
& HOST_CTL_RESET
) {
359 DPRINTF(-1, "HBA Reset\n");
362 s
->control_regs
.ghc
= (val
& 0x3) | HOST_CTL_AHCI_EN
;
366 case HOST_IRQ_STAT
: /* R/WC, RO */
367 s
->control_regs
.irqstatus
&= ~val
;
370 case HOST_PORTS_IMPL
: /* R/WO, RO */
371 /* FIXME handle R/WO */
373 case HOST_VERSION
: /* RO */
374 /* FIXME report write? */
377 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr
);
379 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
380 (addr
< (AHCI_PORT_REGS_START_ADDR
+
381 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
382 ahci_port_write(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
383 addr
& AHCI_PORT_ADDR_OFFSET_MASK
, val
);
388 static const MemoryRegionOps ahci_mem_ops
= {
389 .read
= ahci_mem_read
,
390 .write
= ahci_mem_write
,
391 .endianness
= DEVICE_LITTLE_ENDIAN
,
394 static uint64_t ahci_idp_read(void *opaque
, hwaddr addr
,
397 AHCIState
*s
= opaque
;
399 if (addr
== s
->idp_offset
) {
402 } else if (addr
== s
->idp_offset
+ 4) {
403 /* data register - do memory read at location selected by index */
404 return ahci_mem_read(opaque
, s
->idp_index
, size
);
410 static void ahci_idp_write(void *opaque
, hwaddr addr
,
411 uint64_t val
, unsigned size
)
413 AHCIState
*s
= opaque
;
415 if (addr
== s
->idp_offset
) {
416 /* index register - mask off reserved bits */
417 s
->idp_index
= (uint32_t)val
& ((AHCI_MEM_BAR_SIZE
- 1) & ~3);
418 } else if (addr
== s
->idp_offset
+ 4) {
419 /* data register - do memory write at location selected by index */
420 ahci_mem_write(opaque
, s
->idp_index
, val
, size
);
424 static const MemoryRegionOps ahci_idp_ops
= {
425 .read
= ahci_idp_read
,
426 .write
= ahci_idp_write
,
427 .endianness
= DEVICE_LITTLE_ENDIAN
,
431 static void ahci_reg_init(AHCIState
*s
)
435 s
->control_regs
.cap
= (s
->ports
- 1) |
436 (AHCI_NUM_COMMAND_SLOTS
<< 8) |
437 (AHCI_SUPPORTED_SPEED_GEN1
<< AHCI_SUPPORTED_SPEED
) |
438 HOST_CAP_NCQ
| HOST_CAP_AHCI
;
440 s
->control_regs
.impl
= (1 << s
->ports
) - 1;
442 s
->control_regs
.version
= AHCI_VERSION_1_0
;
444 for (i
= 0; i
< s
->ports
; i
++) {
445 s
->dev
[i
].port_state
= STATE_RUN
;
449 static void check_cmd(AHCIState
*s
, int port
)
451 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
454 if ((pr
->cmd
& PORT_CMD_START
) && pr
->cmd_issue
) {
455 for (slot
= 0; (slot
< 32) && pr
->cmd_issue
; slot
++) {
456 if ((pr
->cmd_issue
& (1U << slot
)) &&
457 !handle_cmd(s
, port
, slot
)) {
458 pr
->cmd_issue
&= ~(1U << slot
);
464 static void ahci_check_cmd_bh(void *opaque
)
466 AHCIDevice
*ad
= opaque
;
468 qemu_bh_delete(ad
->check_bh
);
471 if ((ad
->busy_slot
!= -1) &&
472 !(ad
->port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
))) {
474 ad
->port_regs
.cmd_issue
&= ~(1 << ad
->busy_slot
);
478 check_cmd(ad
->hba
, ad
->port_no
);
481 static void ahci_init_d2h(AHCIDevice
*ad
)
483 uint8_t init_fis
[20];
484 IDEState
*ide_state
= &ad
->port
.ifs
[0];
486 memset(init_fis
, 0, sizeof(init_fis
));
491 if (ide_state
->drive_kind
== IDE_CD
) {
492 init_fis
[5] = ide_state
->lcyl
;
493 init_fis
[6] = ide_state
->hcyl
;
496 ahci_write_fis_d2h(ad
, init_fis
);
499 static void ahci_reset_port(AHCIState
*s
, int port
)
501 AHCIDevice
*d
= &s
->dev
[port
];
502 AHCIPortRegs
*pr
= &d
->port_regs
;
503 IDEState
*ide_state
= &d
->port
.ifs
[0];
506 DPRINTF(port
, "reset port\n");
508 ide_bus_reset(&d
->port
);
509 ide_state
->ncq_queues
= AHCI_MAX_CMDS
;
515 pr
->sig
= 0xFFFFFFFF;
517 d
->init_d2h_sent
= false;
519 ide_state
= &s
->dev
[port
].port
.ifs
[0];
520 if (!ide_state
->blk
) {
524 /* reset ncq queue */
525 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
526 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[i
];
527 if (!ncq_tfs
->used
) {
531 if (ncq_tfs
->aiocb
) {
532 blk_aio_cancel(ncq_tfs
->aiocb
);
533 ncq_tfs
->aiocb
= NULL
;
536 /* Maybe we just finished the request thanks to blk_aio_cancel() */
537 if (!ncq_tfs
->used
) {
541 qemu_sglist_destroy(&ncq_tfs
->sglist
);
545 s
->dev
[port
].port_state
= STATE_RUN
;
546 if (!ide_state
->blk
) {
548 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
549 } else if (ide_state
->drive_kind
== IDE_CD
) {
550 pr
->sig
= SATA_SIGNATURE_CDROM
;
551 ide_state
->lcyl
= 0x14;
552 ide_state
->hcyl
= 0xeb;
553 DPRINTF(port
, "set lcyl = %d\n", ide_state
->lcyl
);
554 ide_state
->status
= SEEK_STAT
| WRERR_STAT
| READY_STAT
;
556 pr
->sig
= SATA_SIGNATURE_DISK
;
557 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
560 ide_state
->error
= 1;
564 static void debug_print_fis(uint8_t *fis
, int cmd_len
)
569 fprintf(stderr
, "fis:");
570 for (i
= 0; i
< cmd_len
; i
++) {
571 if ((i
& 0xf) == 0) {
572 fprintf(stderr
, "\n%02x:",i
);
574 fprintf(stderr
, "%02x ",fis
[i
]);
576 fprintf(stderr
, "\n");
580 static bool ahci_map_fis_address(AHCIDevice
*ad
)
582 AHCIPortRegs
*pr
= &ad
->port_regs
;
583 map_page(ad
->hba
->as
, &ad
->res_fis
,
584 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
585 return ad
->res_fis
!= NULL
;
588 static void ahci_unmap_fis_address(AHCIDevice
*ad
)
590 dma_memory_unmap(ad
->hba
->as
, ad
->res_fis
, 256,
591 DMA_DIRECTION_FROM_DEVICE
, 256);
595 static bool ahci_map_clb_address(AHCIDevice
*ad
)
597 AHCIPortRegs
*pr
= &ad
->port_regs
;
599 map_page(ad
->hba
->as
, &ad
->lst
,
600 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
601 return ad
->lst
!= NULL
;
604 static void ahci_unmap_clb_address(AHCIDevice
*ad
)
606 dma_memory_unmap(ad
->hba
->as
, ad
->lst
, 1024,
607 DMA_DIRECTION_FROM_DEVICE
, 1024);
611 static void ahci_write_fis_sdb(AHCIState
*s
, int port
, uint32_t finished
)
613 AHCIDevice
*ad
= &s
->dev
[port
];
614 AHCIPortRegs
*pr
= &ad
->port_regs
;
618 if (!s
->dev
[port
].res_fis
||
619 !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
623 sdb_fis
= (SDBFIS
*)&ad
->res_fis
[RES_FIS_SDBFIS
];
624 ide_state
= &ad
->port
.ifs
[0];
626 sdb_fis
->type
= SATA_FIS_TYPE_SDB
;
627 /* Interrupt pending & Notification bit */
628 sdb_fis
->flags
= (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
629 sdb_fis
->status
= ide_state
->status
& 0x77;
630 sdb_fis
->error
= ide_state
->error
;
631 /* update SAct field in SDB_FIS */
632 s
->dev
[port
].finished
|= finished
;
633 sdb_fis
->payload
= cpu_to_le32(ad
->finished
);
635 /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
636 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
637 (ad
->port
.ifs
[0].status
& 0x77) |
640 ahci_trigger_irq(s
, ad
, PORT_IRQ_SDB_FIS
);
643 static void ahci_write_fis_pio(AHCIDevice
*ad
, uint16_t len
)
645 AHCIPortRegs
*pr
= &ad
->port_regs
;
646 uint8_t *pio_fis
, *cmd_fis
;
648 dma_addr_t cmd_len
= 0x80;
649 IDEState
*s
= &ad
->port
.ifs
[0];
651 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
656 tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
657 cmd_fis
= dma_memory_map(ad
->hba
->as
, tbl_addr
, &cmd_len
,
658 DMA_DIRECTION_TO_DEVICE
);
660 if (cmd_fis
== NULL
) {
661 DPRINTF(ad
->port_no
, "dma_memory_map failed in ahci_write_fis_pio");
662 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_HBUS_ERR
);
666 if (cmd_len
!= 0x80) {
668 "dma_memory_map mapped too few bytes in ahci_write_fis_pio");
669 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
670 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
671 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_HBUS_ERR
);
675 pio_fis
= &ad
->res_fis
[RES_FIS_PSFIS
];
677 pio_fis
[0] = SATA_FIS_TYPE_PIO_SETUP
;
678 pio_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
679 pio_fis
[2] = s
->status
;
680 pio_fis
[3] = s
->error
;
682 pio_fis
[4] = s
->sector
;
683 pio_fis
[5] = s
->lcyl
;
684 pio_fis
[6] = s
->hcyl
;
685 pio_fis
[7] = s
->select
;
686 pio_fis
[8] = s
->hob_sector
;
687 pio_fis
[9] = s
->hob_lcyl
;
688 pio_fis
[10] = s
->hob_hcyl
;
690 pio_fis
[12] = cmd_fis
[12];
691 pio_fis
[13] = cmd_fis
[13];
693 pio_fis
[15] = s
->status
;
694 pio_fis
[16] = len
& 255;
695 pio_fis
[17] = len
>> 8;
699 /* Update shadow registers: */
700 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
701 ad
->port
.ifs
[0].status
;
703 if (pio_fis
[2] & ERR_STAT
) {
704 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
707 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_PIOS_FIS
);
709 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
710 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
713 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
)
715 AHCIPortRegs
*pr
= &ad
->port_regs
;
718 dma_addr_t cmd_len
= 0x80;
720 IDEState
*s
= &ad
->port
.ifs
[0];
722 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
728 uint64_t tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
729 cmd_fis
= dma_memory_map(ad
->hba
->as
, tbl_addr
, &cmd_len
,
730 DMA_DIRECTION_TO_DEVICE
);
734 d2h_fis
= &ad
->res_fis
[RES_FIS_RFIS
];
736 d2h_fis
[0] = SATA_FIS_TYPE_REGISTER_D2H
;
737 d2h_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
738 d2h_fis
[2] = s
->status
;
739 d2h_fis
[3] = s
->error
;
741 d2h_fis
[4] = s
->sector
;
742 d2h_fis
[5] = s
->lcyl
;
743 d2h_fis
[6] = s
->hcyl
;
744 d2h_fis
[7] = s
->select
;
745 d2h_fis
[8] = s
->hob_sector
;
746 d2h_fis
[9] = s
->hob_lcyl
;
747 d2h_fis
[10] = s
->hob_hcyl
;
749 d2h_fis
[12] = cmd_fis
[12];
750 d2h_fis
[13] = cmd_fis
[13];
751 for (i
= 14; i
< 20; i
++) {
755 /* Update shadow registers: */
756 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
757 ad
->port
.ifs
[0].status
;
759 if (d2h_fis
[2] & ERR_STAT
) {
760 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
763 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_D2H_REG_FIS
);
766 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
767 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
771 static int prdt_tbl_entry_size(const AHCI_SG
*tbl
)
773 return (le32_to_cpu(tbl
->flags_size
) & AHCI_PRDT_SIZE_MASK
) + 1;
776 static int ahci_populate_sglist(AHCIDevice
*ad
, QEMUSGList
*sglist
,
779 AHCICmdHdr
*cmd
= ad
->cur_cmd
;
780 uint32_t opts
= le32_to_cpu(cmd
->opts
);
781 uint64_t prdt_addr
= le64_to_cpu(cmd
->tbl_addr
) + 0x80;
782 int sglist_alloc_hint
= opts
>> AHCI_CMD_HDR_PRDT_LEN
;
783 dma_addr_t prdt_len
= (sglist_alloc_hint
* sizeof(AHCI_SG
));
784 dma_addr_t real_prdt_len
= prdt_len
;
790 int64_t off_pos
= -1;
792 IDEBus
*bus
= &ad
->port
;
793 BusState
*qbus
= BUS(bus
);
796 * Note: AHCI PRDT can describe up to 256GiB. SATA/ATA only support
797 * transactions of up to 32MiB as of ATA8-ACS3 rev 1b, assuming a
798 * 512 byte sector size. We limit the PRDT in this implementation to
799 * a reasonably large 2GiB, which can accommodate the maximum transfer
800 * request for sector sizes up to 32K.
803 if (!sglist_alloc_hint
) {
804 DPRINTF(ad
->port_no
, "no sg list given by guest: 0x%08x\n", opts
);
809 if (!(prdt
= dma_memory_map(ad
->hba
->as
, prdt_addr
, &prdt_len
,
810 DMA_DIRECTION_TO_DEVICE
))){
811 DPRINTF(ad
->port_no
, "map failed\n");
815 if (prdt_len
< real_prdt_len
) {
816 DPRINTF(ad
->port_no
, "mapped less than expected\n");
821 /* Get entries in the PRDT, init a qemu sglist accordingly */
822 if (sglist_alloc_hint
> 0) {
823 AHCI_SG
*tbl
= (AHCI_SG
*)prdt
;
825 for (i
= 0; i
< sglist_alloc_hint
; i
++) {
826 /* flags_size is zero-based */
827 tbl_entry_size
= prdt_tbl_entry_size(&tbl
[i
]);
828 if (offset
<= (sum
+ tbl_entry_size
)) {
830 off_pos
= offset
- sum
;
833 sum
+= tbl_entry_size
;
835 if ((off_idx
== -1) || (off_pos
< 0) || (off_pos
> tbl_entry_size
)) {
836 DPRINTF(ad
->port_no
, "%s: Incorrect offset! "
837 "off_idx: %d, off_pos: %"PRId64
"\n",
838 __func__
, off_idx
, off_pos
);
843 qemu_sglist_init(sglist
, qbus
->parent
, (sglist_alloc_hint
- off_idx
),
845 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[off_idx
].addr
) + off_pos
,
846 prdt_tbl_entry_size(&tbl
[off_idx
]) - off_pos
);
848 for (i
= off_idx
+ 1; i
< sglist_alloc_hint
; i
++) {
849 /* flags_size is zero-based */
850 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[i
].addr
),
851 prdt_tbl_entry_size(&tbl
[i
]));
852 if (sglist
->size
> INT32_MAX
) {
853 error_report("AHCI Physical Region Descriptor Table describes "
854 "more than 2 GiB.\n");
855 qemu_sglist_destroy(sglist
);
863 dma_memory_unmap(ad
->hba
->as
, prdt
, prdt_len
,
864 DMA_DIRECTION_TO_DEVICE
, prdt_len
);
868 static void ncq_cb(void *opaque
, int ret
)
870 NCQTransferState
*ncq_tfs
= (NCQTransferState
*)opaque
;
871 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
873 if (ret
== -ECANCELED
) {
876 /* Clear bit for this tag in SActive */
877 ncq_tfs
->drive
->port_regs
.scr_act
&= ~(1 << ncq_tfs
->tag
);
881 ide_state
->error
= ABRT_ERR
;
882 ide_state
->status
= READY_STAT
| ERR_STAT
;
883 ncq_tfs
->drive
->port_regs
.scr_err
|= (1 << ncq_tfs
->tag
);
885 ide_state
->status
= READY_STAT
| SEEK_STAT
;
888 ahci_write_fis_sdb(ncq_tfs
->drive
->hba
, ncq_tfs
->drive
->port_no
,
889 (1 << ncq_tfs
->tag
));
891 DPRINTF(ncq_tfs
->drive
->port_no
, "NCQ transfer tag %d finished\n",
894 block_acct_done(blk_get_stats(ncq_tfs
->drive
->port
.ifs
[0].blk
),
896 qemu_sglist_destroy(&ncq_tfs
->sglist
);
900 static int is_ncq(uint8_t ata_cmd
)
902 /* Based on SATA 3.2 section 13.6.3.2 */
904 case READ_FPDMA_QUEUED
:
905 case WRITE_FPDMA_QUEUED
:
907 case RECEIVE_FPDMA_QUEUED
:
908 case SEND_FPDMA_QUEUED
:
915 static void process_ncq_command(AHCIState
*s
, int port
, uint8_t *cmd_fis
,
918 NCQFrame
*ncq_fis
= (NCQFrame
*)cmd_fis
;
919 uint8_t tag
= ncq_fis
->tag
>> 3;
920 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[tag
];
923 /* error - already in use */
924 fprintf(stderr
, "%s: tag %d already used\n", __FUNCTION__
, tag
);
929 ncq_tfs
->drive
= &s
->dev
[port
];
930 ncq_tfs
->slot
= slot
;
931 ncq_tfs
->lba
= ((uint64_t)ncq_fis
->lba5
<< 40) |
932 ((uint64_t)ncq_fis
->lba4
<< 32) |
933 ((uint64_t)ncq_fis
->lba3
<< 24) |
934 ((uint64_t)ncq_fis
->lba2
<< 16) |
935 ((uint64_t)ncq_fis
->lba1
<< 8) |
936 (uint64_t)ncq_fis
->lba0
;
938 /* Note: We calculate the sector count, but don't currently rely on it.
939 * The total size of the DMA buffer tells us the transfer size instead. */
940 ncq_tfs
->sector_count
= ((uint16_t)ncq_fis
->sector_count_high
<< 8) |
941 ncq_fis
->sector_count_low
;
943 DPRINTF(port
, "NCQ transfer LBA from %"PRId64
" to %"PRId64
", "
944 "drive max %"PRId64
"\n",
945 ncq_tfs
->lba
, ncq_tfs
->lba
+ ncq_tfs
->sector_count
- 2,
946 s
->dev
[port
].port
.ifs
[0].nb_sectors
- 1);
948 ahci_populate_sglist(&s
->dev
[port
], &ncq_tfs
->sglist
, 0);
951 switch(ncq_fis
->command
) {
952 case READ_FPDMA_QUEUED
:
953 DPRINTF(port
, "NCQ reading %d sectors from LBA %"PRId64
", "
955 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
957 DPRINTF(port
, "tag %d aio read %"PRId64
"\n",
958 ncq_tfs
->tag
, ncq_tfs
->lba
);
960 dma_acct_start(ncq_tfs
->drive
->port
.ifs
[0].blk
, &ncq_tfs
->acct
,
961 &ncq_tfs
->sglist
, BLOCK_ACCT_READ
);
962 ncq_tfs
->aiocb
= dma_blk_read(ncq_tfs
->drive
->port
.ifs
[0].blk
,
963 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
966 case WRITE_FPDMA_QUEUED
:
967 DPRINTF(port
, "NCQ writing %d sectors to LBA %"PRId64
", tag %d\n",
968 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
970 DPRINTF(port
, "tag %d aio write %"PRId64
"\n",
971 ncq_tfs
->tag
, ncq_tfs
->lba
);
973 dma_acct_start(ncq_tfs
->drive
->port
.ifs
[0].blk
, &ncq_tfs
->acct
,
974 &ncq_tfs
->sglist
, BLOCK_ACCT_WRITE
);
975 ncq_tfs
->aiocb
= dma_blk_write(ncq_tfs
->drive
->port
.ifs
[0].blk
,
976 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
980 if (is_ncq(cmd_fis
[2])) {
982 "error: unsupported NCQ command (0x%02x) received\n",
986 "error: tried to process non-NCQ command as NCQ\n");
988 qemu_sglist_destroy(&ncq_tfs
->sglist
);
992 static void handle_reg_h2d_fis(AHCIState
*s
, int port
,
993 int slot
, uint8_t *cmd_fis
)
995 IDEState
*ide_state
= &s
->dev
[port
].port
.ifs
[0];
996 AHCICmdHdr
*cmd
= s
->dev
[port
].cur_cmd
;
997 uint32_t opts
= le32_to_cpu(cmd
->opts
);
999 if (cmd_fis
[1] & 0x0F) {
1000 DPRINTF(port
, "Port Multiplier not supported."
1001 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
1002 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
1006 if (cmd_fis
[1] & 0x70) {
1007 DPRINTF(port
, "Reserved flags set in H2D Register FIS."
1008 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
1009 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
1013 if (!(cmd_fis
[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
)) {
1014 switch (s
->dev
[port
].port_state
) {
1016 if (cmd_fis
[15] & ATA_SRST
) {
1017 s
->dev
[port
].port_state
= STATE_RESET
;
1021 if (!(cmd_fis
[15] & ATA_SRST
)) {
1022 ahci_reset_port(s
, port
);
1029 /* Check for NCQ command */
1030 if (is_ncq(cmd_fis
[2])) {
1031 process_ncq_command(s
, port
, cmd_fis
, slot
);
1035 /* Decompose the FIS:
1036 * AHCI does not interpret FIS packets, it only forwards them.
1037 * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
1038 * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
1040 * ATA4 describes sector number for LBA28/CHS commands.
1041 * ATA6 describes sector number for LBA48 commands.
1042 * ATA8 deprecates CHS fully, describing only LBA28/48.
1044 * We dutifully convert the FIS into IDE registers, and allow the
1045 * core layer to interpret them as needed. */
1046 ide_state
->feature
= cmd_fis
[3];
1047 ide_state
->sector
= cmd_fis
[4]; /* LBA 7:0 */
1048 ide_state
->lcyl
= cmd_fis
[5]; /* LBA 15:8 */
1049 ide_state
->hcyl
= cmd_fis
[6]; /* LBA 23:16 */
1050 ide_state
->select
= cmd_fis
[7]; /* LBA 27:24 (LBA28) */
1051 ide_state
->hob_sector
= cmd_fis
[8]; /* LBA 31:24 */
1052 ide_state
->hob_lcyl
= cmd_fis
[9]; /* LBA 39:32 */
1053 ide_state
->hob_hcyl
= cmd_fis
[10]; /* LBA 47:40 */
1054 ide_state
->hob_feature
= cmd_fis
[11];
1055 ide_state
->nsector
= (int64_t)((cmd_fis
[13] << 8) | cmd_fis
[12]);
1056 /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
1057 /* 15: Only valid when UPDATE_COMMAND not set. */
1059 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
1060 * table to ide_state->io_buffer */
1061 if (opts
& AHCI_CMD_ATAPI
) {
1062 memcpy(ide_state
->io_buffer
, &cmd_fis
[AHCI_COMMAND_TABLE_ACMD
], 0x10);
1063 debug_print_fis(ide_state
->io_buffer
, 0x10);
1064 s
->dev
[port
].done_atapi_packet
= false;
1065 /* XXX send PIO setup FIS */
1068 ide_state
->error
= 0;
1070 /* Reset transferred byte counter */
1073 /* We're ready to process the command in FIS byte 2. */
1074 ide_exec_cmd(&s
->dev
[port
].port
, cmd_fis
[2]);
1077 static int handle_cmd(AHCIState
*s
, int port
, int slot
)
1079 IDEState
*ide_state
;
1085 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1086 /* Engine currently busy, try again later */
1087 DPRINTF(port
, "engine busy\n");
1091 if (!s
->dev
[port
].lst
) {
1092 DPRINTF(port
, "error: lst not given but cmd handled");
1095 cmd
= &((AHCICmdHdr
*)s
->dev
[port
].lst
)[slot
];
1096 /* remember current slot handle for later */
1097 s
->dev
[port
].cur_cmd
= cmd
;
1099 /* The device we are working for */
1100 ide_state
= &s
->dev
[port
].port
.ifs
[0];
1101 if (!ide_state
->blk
) {
1102 DPRINTF(port
, "error: guest accessed unused port");
1106 tbl_addr
= le64_to_cpu(cmd
->tbl_addr
);
1108 cmd_fis
= dma_memory_map(s
->as
, tbl_addr
, &cmd_len
,
1109 DMA_DIRECTION_FROM_DEVICE
);
1111 DPRINTF(port
, "error: guest passed us an invalid cmd fis\n");
1113 } else if (cmd_len
!= 0x80) {
1114 ahci_trigger_irq(s
, &s
->dev
[port
], PORT_IRQ_HBUS_ERR
);
1115 DPRINTF(port
, "error: dma_memory_map failed: "
1116 "(len(%02"PRIx64
") != 0x80)\n",
1120 debug_print_fis(cmd_fis
, 0x80);
1122 switch (cmd_fis
[0]) {
1123 case SATA_FIS_TYPE_REGISTER_H2D
:
1124 handle_reg_h2d_fis(s
, port
, slot
, cmd_fis
);
1127 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
1128 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
1134 dma_memory_unmap(s
->as
, cmd_fis
, cmd_len
, DMA_DIRECTION_FROM_DEVICE
,
1137 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1138 /* async command, complete later */
1139 s
->dev
[port
].busy_slot
= slot
;
1143 /* done handling the command */
1147 /* DMA dev <-> ram */
1148 static void ahci_start_transfer(IDEDMA
*dma
)
1150 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1151 IDEState
*s
= &ad
->port
.ifs
[0];
1152 uint32_t size
= (uint32_t)(s
->data_end
- s
->data_ptr
);
1153 /* write == ram -> device */
1154 uint32_t opts
= le32_to_cpu(ad
->cur_cmd
->opts
);
1155 int is_write
= opts
& AHCI_CMD_WRITE
;
1156 int is_atapi
= opts
& AHCI_CMD_ATAPI
;
1159 if (is_atapi
&& !ad
->done_atapi_packet
) {
1160 /* already prepopulated iobuffer */
1161 ad
->done_atapi_packet
= true;
1166 if (ahci_dma_prepare_buf(dma
, is_write
)) {
1170 DPRINTF(ad
->port_no
, "%sing %d bytes on %s w/%s sglist\n",
1171 is_write
? "writ" : "read", size
, is_atapi
? "atapi" : "ata",
1172 has_sglist
? "" : "o");
1174 if (has_sglist
&& size
) {
1176 dma_buf_write(s
->data_ptr
, size
, &s
->sg
);
1178 dma_buf_read(s
->data_ptr
, size
, &s
->sg
);
1183 /* declare that we processed everything */
1184 s
->data_ptr
= s
->data_end
;
1186 /* Update number of transferred bytes, destroy sglist */
1187 ahci_commit_buf(dma
, size
);
1189 s
->end_transfer_func(s
);
1191 if (!(s
->status
& DRQ_STAT
)) {
1192 /* done with PIO send/receive */
1193 ahci_write_fis_pio(ad
, le32_to_cpu(ad
->cur_cmd
->status
));
1197 static void ahci_start_dma(IDEDMA
*dma
, IDEState
*s
,
1198 BlockCompletionFunc
*dma_cb
)
1200 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1201 DPRINTF(ad
->port_no
, "\n");
1202 s
->io_buffer_offset
= 0;
1206 static void ahci_restart_dma(IDEDMA
*dma
)
1208 /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
1212 * Called in DMA R/W chains to read the PRDT, utilizing ahci_populate_sglist.
1213 * Not currently invoked by PIO R/W chains,
1214 * which invoke ahci_populate_sglist via ahci_start_transfer.
1216 static int32_t ahci_dma_prepare_buf(IDEDMA
*dma
, int is_write
)
1218 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1219 IDEState
*s
= &ad
->port
.ifs
[0];
1221 if (ahci_populate_sglist(ad
, &s
->sg
, s
->io_buffer_offset
) == -1) {
1222 DPRINTF(ad
->port_no
, "ahci_dma_prepare_buf failed.\n");
1225 s
->io_buffer_size
= s
->sg
.size
;
1227 DPRINTF(ad
->port_no
, "len=%#x\n", s
->io_buffer_size
);
1228 return s
->io_buffer_size
;
1232 * Destroys the scatter-gather list,
1233 * and updates the command header with a bytes-read value.
1234 * called explicitly via ahci_dma_rw_buf (ATAPI DMA),
1235 * and ahci_start_transfer (PIO R/W),
1236 * and called via callback from ide_dma_cb for DMA R/W paths.
1238 static void ahci_commit_buf(IDEDMA
*dma
, uint32_t tx_bytes
)
1240 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1241 IDEState
*s
= &ad
->port
.ifs
[0];
1243 tx_bytes
+= le32_to_cpu(ad
->cur_cmd
->status
);
1244 ad
->cur_cmd
->status
= cpu_to_le32(tx_bytes
);
1246 qemu_sglist_destroy(&s
->sg
);
1249 static int ahci_dma_rw_buf(IDEDMA
*dma
, int is_write
)
1251 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1252 IDEState
*s
= &ad
->port
.ifs
[0];
1253 uint8_t *p
= s
->io_buffer
+ s
->io_buffer_index
;
1254 int l
= s
->io_buffer_size
- s
->io_buffer_index
;
1256 if (ahci_populate_sglist(ad
, &s
->sg
, s
->io_buffer_offset
)) {
1261 dma_buf_read(p
, l
, &s
->sg
);
1263 dma_buf_write(p
, l
, &s
->sg
);
1266 /* free sglist, update byte count */
1267 ahci_commit_buf(dma
, l
);
1269 s
->io_buffer_index
+= l
;
1270 s
->io_buffer_offset
+= l
;
1272 DPRINTF(ad
->port_no
, "len=%#x\n", l
);
1277 static void ahci_cmd_done(IDEDMA
*dma
)
1279 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1281 DPRINTF(ad
->port_no
, "cmd done\n");
1283 /* update d2h status */
1284 ahci_write_fis_d2h(ad
, NULL
);
1286 if (!ad
->check_bh
) {
1287 /* maybe we still have something to process, check later */
1288 ad
->check_bh
= qemu_bh_new(ahci_check_cmd_bh
, ad
);
1289 qemu_bh_schedule(ad
->check_bh
);
1293 static void ahci_irq_set(void *opaque
, int n
, int level
)
1297 static const IDEDMAOps ahci_dma_ops
= {
1298 .start_dma
= ahci_start_dma
,
1299 .restart_dma
= ahci_restart_dma
,
1300 .start_transfer
= ahci_start_transfer
,
1301 .prepare_buf
= ahci_dma_prepare_buf
,
1302 .commit_buf
= ahci_commit_buf
,
1303 .rw_buf
= ahci_dma_rw_buf
,
1304 .cmd_done
= ahci_cmd_done
,
1307 void ahci_init(AHCIState
*s
, DeviceState
*qdev
, AddressSpace
*as
, int ports
)
1314 s
->dev
= g_new0(AHCIDevice
, ports
);
1316 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
1317 memory_region_init_io(&s
->mem
, OBJECT(qdev
), &ahci_mem_ops
, s
,
1318 "ahci", AHCI_MEM_BAR_SIZE
);
1319 memory_region_init_io(&s
->idp
, OBJECT(qdev
), &ahci_idp_ops
, s
,
1322 irqs
= qemu_allocate_irqs(ahci_irq_set
, s
, s
->ports
);
1324 for (i
= 0; i
< s
->ports
; i
++) {
1325 AHCIDevice
*ad
= &s
->dev
[i
];
1327 ide_bus_new(&ad
->port
, sizeof(ad
->port
), qdev
, i
, 1);
1328 ide_init2(&ad
->port
, irqs
[i
]);
1332 ad
->port
.dma
= &ad
->dma
;
1333 ad
->port
.dma
->ops
= &ahci_dma_ops
;
1334 ide_register_restart_cb(&ad
->port
);
1338 void ahci_uninit(AHCIState
*s
)
1343 void ahci_reset(AHCIState
*s
)
1348 s
->control_regs
.irqstatus
= 0;
1350 * The implementation of this bit is dependent upon the value of the
1351 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
1352 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
1353 * read-only and shall have a reset value of '1'.
1355 * We set HOST_CAP_AHCI so we must enable AHCI at reset.
1357 s
->control_regs
.ghc
= HOST_CTL_AHCI_EN
;
1359 for (i
= 0; i
< s
->ports
; i
++) {
1360 pr
= &s
->dev
[i
].port_regs
;
1364 pr
->cmd
= PORT_CMD_SPIN_UP
| PORT_CMD_POWER_ON
;
1365 ahci_reset_port(s
, i
);
1369 static const VMStateDescription vmstate_ahci_device
= {
1370 .name
= "ahci port",
1372 .fields
= (VMStateField
[]) {
1373 VMSTATE_IDE_BUS(port
, AHCIDevice
),
1374 VMSTATE_IDE_DRIVE(port
.ifs
[0], AHCIDevice
),
1375 VMSTATE_UINT32(port_state
, AHCIDevice
),
1376 VMSTATE_UINT32(finished
, AHCIDevice
),
1377 VMSTATE_UINT32(port_regs
.lst_addr
, AHCIDevice
),
1378 VMSTATE_UINT32(port_regs
.lst_addr_hi
, AHCIDevice
),
1379 VMSTATE_UINT32(port_regs
.fis_addr
, AHCIDevice
),
1380 VMSTATE_UINT32(port_regs
.fis_addr_hi
, AHCIDevice
),
1381 VMSTATE_UINT32(port_regs
.irq_stat
, AHCIDevice
),
1382 VMSTATE_UINT32(port_regs
.irq_mask
, AHCIDevice
),
1383 VMSTATE_UINT32(port_regs
.cmd
, AHCIDevice
),
1384 VMSTATE_UINT32(port_regs
.tfdata
, AHCIDevice
),
1385 VMSTATE_UINT32(port_regs
.sig
, AHCIDevice
),
1386 VMSTATE_UINT32(port_regs
.scr_stat
, AHCIDevice
),
1387 VMSTATE_UINT32(port_regs
.scr_ctl
, AHCIDevice
),
1388 VMSTATE_UINT32(port_regs
.scr_err
, AHCIDevice
),
1389 VMSTATE_UINT32(port_regs
.scr_act
, AHCIDevice
),
1390 VMSTATE_UINT32(port_regs
.cmd_issue
, AHCIDevice
),
1391 VMSTATE_BOOL(done_atapi_packet
, AHCIDevice
),
1392 VMSTATE_INT32(busy_slot
, AHCIDevice
),
1393 VMSTATE_BOOL(init_d2h_sent
, AHCIDevice
),
1394 VMSTATE_END_OF_LIST()
1398 static int ahci_state_post_load(void *opaque
, int version_id
)
1401 struct AHCIDevice
*ad
;
1402 AHCIState
*s
= opaque
;
1404 for (i
= 0; i
< s
->ports
; i
++) {
1407 ahci_map_clb_address(ad
);
1408 ahci_map_fis_address(ad
);
1410 * If an error is present, ad->busy_slot will be valid and not -1.
1411 * In this case, an operation is waiting to resume and will re-check
1412 * for additional AHCI commands to execute upon completion.
1414 * In the case where no error was present, busy_slot will be -1,
1415 * and we should check to see if there are additional commands waiting.
1417 if (ad
->busy_slot
== -1) {
1420 /* We are in the middle of a command, and may need to access
1421 * the command header in guest memory again. */
1422 if (ad
->busy_slot
< 0 || ad
->busy_slot
>= AHCI_MAX_CMDS
) {
1425 ad
->cur_cmd
= &((AHCICmdHdr
*)ad
->lst
)[ad
->busy_slot
];
1432 const VMStateDescription vmstate_ahci
= {
1435 .post_load
= ahci_state_post_load
,
1436 .fields
= (VMStateField
[]) {
1437 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev
, AHCIState
, ports
,
1438 vmstate_ahci_device
, AHCIDevice
),
1439 VMSTATE_UINT32(control_regs
.cap
, AHCIState
),
1440 VMSTATE_UINT32(control_regs
.ghc
, AHCIState
),
1441 VMSTATE_UINT32(control_regs
.irqstatus
, AHCIState
),
1442 VMSTATE_UINT32(control_regs
.impl
, AHCIState
),
1443 VMSTATE_UINT32(control_regs
.version
, AHCIState
),
1444 VMSTATE_UINT32(idp_index
, AHCIState
),
1445 VMSTATE_INT32_EQUAL(ports
, AHCIState
),
1446 VMSTATE_END_OF_LIST()
1450 #define TYPE_SYSBUS_AHCI "sysbus-ahci"
1451 #define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI)
1453 typedef struct SysbusAHCIState
{
1455 SysBusDevice parent_obj
;
1462 static const VMStateDescription vmstate_sysbus_ahci
= {
1463 .name
= "sysbus-ahci",
1464 .fields
= (VMStateField
[]) {
1465 VMSTATE_AHCI(ahci
, SysbusAHCIState
),
1466 VMSTATE_END_OF_LIST()
1470 static void sysbus_ahci_reset(DeviceState
*dev
)
1472 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1474 ahci_reset(&s
->ahci
);
1477 static void sysbus_ahci_realize(DeviceState
*dev
, Error
**errp
)
1479 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1480 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1482 ahci_init(&s
->ahci
, dev
, &address_space_memory
, s
->num_ports
);
1484 sysbus_init_mmio(sbd
, &s
->ahci
.mem
);
1485 sysbus_init_irq(sbd
, &s
->ahci
.irq
);
1488 static Property sysbus_ahci_properties
[] = {
1489 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState
, num_ports
, 1),
1490 DEFINE_PROP_END_OF_LIST(),
1493 static void sysbus_ahci_class_init(ObjectClass
*klass
, void *data
)
1495 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1497 dc
->realize
= sysbus_ahci_realize
;
1498 dc
->vmsd
= &vmstate_sysbus_ahci
;
1499 dc
->props
= sysbus_ahci_properties
;
1500 dc
->reset
= sysbus_ahci_reset
;
1501 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1504 static const TypeInfo sysbus_ahci_info
= {
1505 .name
= TYPE_SYSBUS_AHCI
,
1506 .parent
= TYPE_SYS_BUS_DEVICE
,
1507 .instance_size
= sizeof(SysbusAHCIState
),
1508 .class_init
= sysbus_ahci_class_init
,
1511 static void sysbus_ahci_register_types(void)
1513 type_register_static(&sysbus_ahci_info
);
1516 type_init(sysbus_ahci_register_types
)
1518 void ahci_ide_create_devs(PCIDevice
*dev
, DriveInfo
**hd
)
1520 AHCIPCIState
*d
= ICH_AHCI(dev
);
1521 AHCIState
*ahci
= &d
->ahci
;
1524 for (i
= 0; i
< ahci
->ports
; i
++) {
1525 if (hd
[i
] == NULL
) {
1528 ide_create_drive(&ahci
->dev
[i
].port
, 0, hd
[i
]);