4 * Copyright (c) 2010 qiaochong@loongson.cn
5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include <hw/pci/msi.h>
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/sysbus.h>
30 #include "monitor/monitor.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/dma.h"
34 #include <hw/ide/pci.h>
35 #include <hw/ide/ahci.h>
39 #define DPRINTF(port, fmt, ...) \
42 fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \
43 fprintf(stderr, fmt, ## __VA_ARGS__); \
47 static void check_cmd(AHCIState
*s
, int port
);
48 static int handle_cmd(AHCIState
*s
,int port
,int slot
);
49 static void ahci_reset_port(AHCIState
*s
, int port
);
50 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
);
51 static void ahci_init_d2h(AHCIDevice
*ad
);
52 static int ahci_dma_prepare_buf(IDEDMA
*dma
, int is_write
);
53 static void ahci_commit_buf(IDEDMA
*dma
, uint32_t tx_bytes
);
56 static uint32_t ahci_port_read(AHCIState
*s
, int port
, int offset
)
60 pr
= &s
->dev
[port
].port_regs
;
66 case PORT_LST_ADDR_HI
:
67 val
= pr
->lst_addr_hi
;
72 case PORT_FIS_ADDR_HI
:
73 val
= pr
->fis_addr_hi
;
91 if (s
->dev
[port
].port
.ifs
[0].blk
) {
92 val
= SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP
|
93 SATA_SCR_SSTATUS_SPD_GEN1
| SATA_SCR_SSTATUS_IPM_ACTIVE
;
95 val
= SATA_SCR_SSTATUS_DET_NODEV
;
105 pr
->scr_act
&= ~s
->dev
[port
].finished
;
106 s
->dev
[port
].finished
= 0;
116 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
121 static void ahci_irq_raise(AHCIState
*s
, AHCIDevice
*dev
)
123 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
125 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
127 DPRINTF(0, "raise irq\n");
129 if (pci_dev
&& msi_enabled(pci_dev
)) {
130 msi_notify(pci_dev
, 0);
132 qemu_irq_raise(s
->irq
);
136 static void ahci_irq_lower(AHCIState
*s
, AHCIDevice
*dev
)
138 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
140 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
142 DPRINTF(0, "lower irq\n");
144 if (!pci_dev
|| !msi_enabled(pci_dev
)) {
145 qemu_irq_lower(s
->irq
);
149 static void ahci_check_irq(AHCIState
*s
)
153 DPRINTF(-1, "check irq %#x\n", s
->control_regs
.irqstatus
);
155 s
->control_regs
.irqstatus
= 0;
156 for (i
= 0; i
< s
->ports
; i
++) {
157 AHCIPortRegs
*pr
= &s
->dev
[i
].port_regs
;
158 if (pr
->irq_stat
& pr
->irq_mask
) {
159 s
->control_regs
.irqstatus
|= (1 << i
);
163 if (s
->control_regs
.irqstatus
&&
164 (s
->control_regs
.ghc
& HOST_CTL_IRQ_EN
)) {
165 ahci_irq_raise(s
, NULL
);
167 ahci_irq_lower(s
, NULL
);
171 static void ahci_trigger_irq(AHCIState
*s
, AHCIDevice
*d
,
174 DPRINTF(d
->port_no
, "trigger irq %#x -> %x\n",
175 irq_type
, d
->port_regs
.irq_mask
& irq_type
);
177 d
->port_regs
.irq_stat
|= irq_type
;
181 static void map_page(AddressSpace
*as
, uint8_t **ptr
, uint64_t addr
,
187 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
190 *ptr
= dma_memory_map(as
, addr
, &len
, DMA_DIRECTION_FROM_DEVICE
);
192 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
197 static void ahci_port_write(AHCIState
*s
, int port
, int offset
, uint32_t val
)
199 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
201 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
205 map_page(s
->as
, &s
->dev
[port
].lst
,
206 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
207 s
->dev
[port
].cur_cmd
= NULL
;
209 case PORT_LST_ADDR_HI
:
210 pr
->lst_addr_hi
= val
;
211 map_page(s
->as
, &s
->dev
[port
].lst
,
212 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
213 s
->dev
[port
].cur_cmd
= NULL
;
217 map_page(s
->as
, &s
->dev
[port
].res_fis
,
218 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
220 case PORT_FIS_ADDR_HI
:
221 pr
->fis_addr_hi
= val
;
222 map_page(s
->as
, &s
->dev
[port
].res_fis
,
223 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
226 pr
->irq_stat
&= ~val
;
230 pr
->irq_mask
= val
& 0xfdc000ff;
234 pr
->cmd
= val
& ~(PORT_CMD_LIST_ON
| PORT_CMD_FIS_ON
);
236 if (pr
->cmd
& PORT_CMD_START
) {
237 pr
->cmd
|= PORT_CMD_LIST_ON
;
240 if (pr
->cmd
& PORT_CMD_FIS_RX
) {
241 pr
->cmd
|= PORT_CMD_FIS_ON
;
244 /* XXX usually the FIS would be pending on the bus here and
245 issuing deferred until the OS enables FIS receival.
246 Instead, we only submit it once - which works in most
247 cases, but is a hack. */
248 if ((pr
->cmd
& PORT_CMD_FIS_ON
) &&
249 !s
->dev
[port
].init_d2h_sent
) {
250 ahci_init_d2h(&s
->dev
[port
]);
251 s
->dev
[port
].init_d2h_sent
= true;
266 if (((pr
->scr_ctl
& AHCI_SCR_SCTL_DET
) == 1) &&
267 ((val
& AHCI_SCR_SCTL_DET
) == 0)) {
268 ahci_reset_port(s
, port
);
280 pr
->cmd_issue
|= val
;
288 static uint64_t ahci_mem_read(void *opaque
, hwaddr addr
,
291 AHCIState
*s
= opaque
;
294 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
297 val
= s
->control_regs
.cap
;
300 val
= s
->control_regs
.ghc
;
303 val
= s
->control_regs
.irqstatus
;
305 case HOST_PORTS_IMPL
:
306 val
= s
->control_regs
.impl
;
309 val
= s
->control_regs
.version
;
313 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr
, val
);
314 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
315 (addr
< (AHCI_PORT_REGS_START_ADDR
+
316 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
317 val
= ahci_port_read(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
318 addr
& AHCI_PORT_ADDR_OFFSET_MASK
);
326 static void ahci_mem_write(void *opaque
, hwaddr addr
,
327 uint64_t val
, unsigned size
)
329 AHCIState
*s
= opaque
;
331 /* Only aligned reads are allowed on AHCI */
333 fprintf(stderr
, "ahci: Mis-aligned write to addr 0x"
334 TARGET_FMT_plx
"\n", addr
);
338 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
339 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64
"\n", (unsigned) addr
, val
);
342 case HOST_CAP
: /* R/WO, RO */
343 /* FIXME handle R/WO */
345 case HOST_CTL
: /* R/W */
346 if (val
& HOST_CTL_RESET
) {
347 DPRINTF(-1, "HBA Reset\n");
350 s
->control_regs
.ghc
= (val
& 0x3) | HOST_CTL_AHCI_EN
;
354 case HOST_IRQ_STAT
: /* R/WC, RO */
355 s
->control_regs
.irqstatus
&= ~val
;
358 case HOST_PORTS_IMPL
: /* R/WO, RO */
359 /* FIXME handle R/WO */
361 case HOST_VERSION
: /* RO */
362 /* FIXME report write? */
365 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr
);
367 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
368 (addr
< (AHCI_PORT_REGS_START_ADDR
+
369 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
370 ahci_port_write(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
371 addr
& AHCI_PORT_ADDR_OFFSET_MASK
, val
);
376 static const MemoryRegionOps ahci_mem_ops
= {
377 .read
= ahci_mem_read
,
378 .write
= ahci_mem_write
,
379 .endianness
= DEVICE_LITTLE_ENDIAN
,
382 static uint64_t ahci_idp_read(void *opaque
, hwaddr addr
,
385 AHCIState
*s
= opaque
;
387 if (addr
== s
->idp_offset
) {
390 } else if (addr
== s
->idp_offset
+ 4) {
391 /* data register - do memory read at location selected by index */
392 return ahci_mem_read(opaque
, s
->idp_index
, size
);
398 static void ahci_idp_write(void *opaque
, hwaddr addr
,
399 uint64_t val
, unsigned size
)
401 AHCIState
*s
= opaque
;
403 if (addr
== s
->idp_offset
) {
404 /* index register - mask off reserved bits */
405 s
->idp_index
= (uint32_t)val
& ((AHCI_MEM_BAR_SIZE
- 1) & ~3);
406 } else if (addr
== s
->idp_offset
+ 4) {
407 /* data register - do memory write at location selected by index */
408 ahci_mem_write(opaque
, s
->idp_index
, val
, size
);
412 static const MemoryRegionOps ahci_idp_ops
= {
413 .read
= ahci_idp_read
,
414 .write
= ahci_idp_write
,
415 .endianness
= DEVICE_LITTLE_ENDIAN
,
419 static void ahci_reg_init(AHCIState
*s
)
423 s
->control_regs
.cap
= (s
->ports
- 1) |
424 (AHCI_NUM_COMMAND_SLOTS
<< 8) |
425 (AHCI_SUPPORTED_SPEED_GEN1
<< AHCI_SUPPORTED_SPEED
) |
426 HOST_CAP_NCQ
| HOST_CAP_AHCI
;
428 s
->control_regs
.impl
= (1 << s
->ports
) - 1;
430 s
->control_regs
.version
= AHCI_VERSION_1_0
;
432 for (i
= 0; i
< s
->ports
; i
++) {
433 s
->dev
[i
].port_state
= STATE_RUN
;
437 static void check_cmd(AHCIState
*s
, int port
)
439 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
442 if ((pr
->cmd
& PORT_CMD_START
) && pr
->cmd_issue
) {
443 for (slot
= 0; (slot
< 32) && pr
->cmd_issue
; slot
++) {
444 if ((pr
->cmd_issue
& (1U << slot
)) &&
445 !handle_cmd(s
, port
, slot
)) {
446 pr
->cmd_issue
&= ~(1U << slot
);
452 static void ahci_check_cmd_bh(void *opaque
)
454 AHCIDevice
*ad
= opaque
;
456 qemu_bh_delete(ad
->check_bh
);
459 if ((ad
->busy_slot
!= -1) &&
460 !(ad
->port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
))) {
462 ad
->port_regs
.cmd_issue
&= ~(1 << ad
->busy_slot
);
466 check_cmd(ad
->hba
, ad
->port_no
);
469 static void ahci_init_d2h(AHCIDevice
*ad
)
471 uint8_t init_fis
[20];
472 IDEState
*ide_state
= &ad
->port
.ifs
[0];
474 memset(init_fis
, 0, sizeof(init_fis
));
479 if (ide_state
->drive_kind
== IDE_CD
) {
480 init_fis
[5] = ide_state
->lcyl
;
481 init_fis
[6] = ide_state
->hcyl
;
484 ahci_write_fis_d2h(ad
, init_fis
);
487 static void ahci_reset_port(AHCIState
*s
, int port
)
489 AHCIDevice
*d
= &s
->dev
[port
];
490 AHCIPortRegs
*pr
= &d
->port_regs
;
491 IDEState
*ide_state
= &d
->port
.ifs
[0];
494 DPRINTF(port
, "reset port\n");
496 ide_bus_reset(&d
->port
);
497 ide_state
->ncq_queues
= AHCI_MAX_CMDS
;
503 pr
->sig
= 0xFFFFFFFF;
505 d
->init_d2h_sent
= false;
507 ide_state
= &s
->dev
[port
].port
.ifs
[0];
508 if (!ide_state
->blk
) {
512 /* reset ncq queue */
513 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
514 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[i
];
515 if (!ncq_tfs
->used
) {
519 if (ncq_tfs
->aiocb
) {
520 blk_aio_cancel(ncq_tfs
->aiocb
);
521 ncq_tfs
->aiocb
= NULL
;
524 /* Maybe we just finished the request thanks to blk_aio_cancel() */
525 if (!ncq_tfs
->used
) {
529 qemu_sglist_destroy(&ncq_tfs
->sglist
);
533 s
->dev
[port
].port_state
= STATE_RUN
;
534 if (!ide_state
->blk
) {
536 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
537 } else if (ide_state
->drive_kind
== IDE_CD
) {
538 pr
->sig
= SATA_SIGNATURE_CDROM
;
539 ide_state
->lcyl
= 0x14;
540 ide_state
->hcyl
= 0xeb;
541 DPRINTF(port
, "set lcyl = %d\n", ide_state
->lcyl
);
542 ide_state
->status
= SEEK_STAT
| WRERR_STAT
| READY_STAT
;
544 pr
->sig
= SATA_SIGNATURE_DISK
;
545 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
548 ide_state
->error
= 1;
552 static void debug_print_fis(uint8_t *fis
, int cmd_len
)
557 fprintf(stderr
, "fis:");
558 for (i
= 0; i
< cmd_len
; i
++) {
559 if ((i
& 0xf) == 0) {
560 fprintf(stderr
, "\n%02x:",i
);
562 fprintf(stderr
, "%02x ",fis
[i
]);
564 fprintf(stderr
, "\n");
568 static void ahci_write_fis_sdb(AHCIState
*s
, int port
, uint32_t finished
)
570 AHCIDevice
*ad
= &s
->dev
[port
];
571 AHCIPortRegs
*pr
= &ad
->port_regs
;
575 if (!s
->dev
[port
].res_fis
||
576 !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
580 sdb_fis
= (SDBFIS
*)&ad
->res_fis
[RES_FIS_SDBFIS
];
581 ide_state
= &ad
->port
.ifs
[0];
583 sdb_fis
->type
= SATA_FIS_TYPE_SDB
;
584 /* Interrupt pending & Notification bit */
585 sdb_fis
->flags
= (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
586 sdb_fis
->status
= ide_state
->status
& 0x77;
587 sdb_fis
->error
= ide_state
->error
;
588 /* update SAct field in SDB_FIS */
589 s
->dev
[port
].finished
|= finished
;
590 sdb_fis
->payload
= cpu_to_le32(ad
->finished
);
592 /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
593 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
594 (ad
->port
.ifs
[0].status
& 0x77) |
597 ahci_trigger_irq(s
, ad
, PORT_IRQ_SDB_FIS
);
600 static void ahci_write_fis_pio(AHCIDevice
*ad
, uint16_t len
)
602 AHCIPortRegs
*pr
= &ad
->port_regs
;
603 uint8_t *pio_fis
, *cmd_fis
;
605 dma_addr_t cmd_len
= 0x80;
606 IDEState
*s
= &ad
->port
.ifs
[0];
608 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
613 tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
614 cmd_fis
= dma_memory_map(ad
->hba
->as
, tbl_addr
, &cmd_len
,
615 DMA_DIRECTION_TO_DEVICE
);
617 if (cmd_fis
== NULL
) {
618 DPRINTF(ad
->port_no
, "dma_memory_map failed in ahci_write_fis_pio");
619 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_HBUS_ERR
);
623 if (cmd_len
!= 0x80) {
625 "dma_memory_map mapped too few bytes in ahci_write_fis_pio");
626 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
627 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
628 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_HBUS_ERR
);
632 pio_fis
= &ad
->res_fis
[RES_FIS_PSFIS
];
634 pio_fis
[0] = SATA_FIS_TYPE_PIO_SETUP
;
635 pio_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
636 pio_fis
[2] = s
->status
;
637 pio_fis
[3] = s
->error
;
639 pio_fis
[4] = s
->sector
;
640 pio_fis
[5] = s
->lcyl
;
641 pio_fis
[6] = s
->hcyl
;
642 pio_fis
[7] = s
->select
;
643 pio_fis
[8] = s
->hob_sector
;
644 pio_fis
[9] = s
->hob_lcyl
;
645 pio_fis
[10] = s
->hob_hcyl
;
647 pio_fis
[12] = cmd_fis
[12];
648 pio_fis
[13] = cmd_fis
[13];
650 pio_fis
[15] = s
->status
;
651 pio_fis
[16] = len
& 255;
652 pio_fis
[17] = len
>> 8;
656 /* Update shadow registers: */
657 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
658 ad
->port
.ifs
[0].status
;
660 if (pio_fis
[2] & ERR_STAT
) {
661 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
664 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_PIOS_FIS
);
666 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
667 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
670 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
)
672 AHCIPortRegs
*pr
= &ad
->port_regs
;
675 dma_addr_t cmd_len
= 0x80;
677 IDEState
*s
= &ad
->port
.ifs
[0];
679 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
685 uint64_t tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
686 cmd_fis
= dma_memory_map(ad
->hba
->as
, tbl_addr
, &cmd_len
,
687 DMA_DIRECTION_TO_DEVICE
);
691 d2h_fis
= &ad
->res_fis
[RES_FIS_RFIS
];
693 d2h_fis
[0] = SATA_FIS_TYPE_REGISTER_D2H
;
694 d2h_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
695 d2h_fis
[2] = s
->status
;
696 d2h_fis
[3] = s
->error
;
698 d2h_fis
[4] = s
->sector
;
699 d2h_fis
[5] = s
->lcyl
;
700 d2h_fis
[6] = s
->hcyl
;
701 d2h_fis
[7] = s
->select
;
702 d2h_fis
[8] = s
->hob_sector
;
703 d2h_fis
[9] = s
->hob_lcyl
;
704 d2h_fis
[10] = s
->hob_hcyl
;
706 d2h_fis
[12] = cmd_fis
[12];
707 d2h_fis
[13] = cmd_fis
[13];
708 for (i
= 14; i
< 20; i
++) {
712 /* Update shadow registers: */
713 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
714 ad
->port
.ifs
[0].status
;
716 if (d2h_fis
[2] & ERR_STAT
) {
717 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
720 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_D2H_REG_FIS
);
723 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
724 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
728 static int prdt_tbl_entry_size(const AHCI_SG
*tbl
)
730 return (le32_to_cpu(tbl
->flags_size
) & AHCI_PRDT_SIZE_MASK
) + 1;
733 static int ahci_populate_sglist(AHCIDevice
*ad
, QEMUSGList
*sglist
,
736 AHCICmdHdr
*cmd
= ad
->cur_cmd
;
737 uint32_t opts
= le32_to_cpu(cmd
->opts
);
738 uint64_t prdt_addr
= le64_to_cpu(cmd
->tbl_addr
) + 0x80;
739 int sglist_alloc_hint
= opts
>> AHCI_CMD_HDR_PRDT_LEN
;
740 dma_addr_t prdt_len
= (sglist_alloc_hint
* sizeof(AHCI_SG
));
741 dma_addr_t real_prdt_len
= prdt_len
;
747 int64_t off_pos
= -1;
749 IDEBus
*bus
= &ad
->port
;
750 BusState
*qbus
= BUS(bus
);
753 * Note: AHCI PRDT can describe up to 256GiB. SATA/ATA only support
754 * transactions of up to 32MiB as of ATA8-ACS3 rev 1b, assuming a
755 * 512 byte sector size. We limit the PRDT in this implementation to
756 * a reasonably large 2GiB, which can accommodate the maximum transfer
757 * request for sector sizes up to 32K.
760 if (!sglist_alloc_hint
) {
761 DPRINTF(ad
->port_no
, "no sg list given by guest: 0x%08x\n", opts
);
766 if (!(prdt
= dma_memory_map(ad
->hba
->as
, prdt_addr
, &prdt_len
,
767 DMA_DIRECTION_TO_DEVICE
))){
768 DPRINTF(ad
->port_no
, "map failed\n");
772 if (prdt_len
< real_prdt_len
) {
773 DPRINTF(ad
->port_no
, "mapped less than expected\n");
778 /* Get entries in the PRDT, init a qemu sglist accordingly */
779 if (sglist_alloc_hint
> 0) {
780 AHCI_SG
*tbl
= (AHCI_SG
*)prdt
;
782 for (i
= 0; i
< sglist_alloc_hint
; i
++) {
783 /* flags_size is zero-based */
784 tbl_entry_size
= prdt_tbl_entry_size(&tbl
[i
]);
785 if (offset
<= (sum
+ tbl_entry_size
)) {
787 off_pos
= offset
- sum
;
790 sum
+= tbl_entry_size
;
792 if ((off_idx
== -1) || (off_pos
< 0) || (off_pos
> tbl_entry_size
)) {
793 DPRINTF(ad
->port_no
, "%s: Incorrect offset! "
794 "off_idx: %d, off_pos: %"PRId64
"\n",
795 __func__
, off_idx
, off_pos
);
800 qemu_sglist_init(sglist
, qbus
->parent
, (sglist_alloc_hint
- off_idx
),
802 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[off_idx
].addr
+ off_pos
),
803 prdt_tbl_entry_size(&tbl
[off_idx
]) - off_pos
);
805 for (i
= off_idx
+ 1; i
< sglist_alloc_hint
; i
++) {
806 /* flags_size is zero-based */
807 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[i
].addr
),
808 prdt_tbl_entry_size(&tbl
[i
]));
809 if (sglist
->size
> INT32_MAX
) {
810 error_report("AHCI Physical Region Descriptor Table describes "
811 "more than 2 GiB.\n");
812 qemu_sglist_destroy(sglist
);
820 dma_memory_unmap(ad
->hba
->as
, prdt
, prdt_len
,
821 DMA_DIRECTION_TO_DEVICE
, prdt_len
);
825 static void ncq_cb(void *opaque
, int ret
)
827 NCQTransferState
*ncq_tfs
= (NCQTransferState
*)opaque
;
828 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
830 if (ret
== -ECANCELED
) {
833 /* Clear bit for this tag in SActive */
834 ncq_tfs
->drive
->port_regs
.scr_act
&= ~(1 << ncq_tfs
->tag
);
838 ide_state
->error
= ABRT_ERR
;
839 ide_state
->status
= READY_STAT
| ERR_STAT
;
840 ncq_tfs
->drive
->port_regs
.scr_err
|= (1 << ncq_tfs
->tag
);
842 ide_state
->status
= READY_STAT
| SEEK_STAT
;
845 ahci_write_fis_sdb(ncq_tfs
->drive
->hba
, ncq_tfs
->drive
->port_no
,
846 (1 << ncq_tfs
->tag
));
848 DPRINTF(ncq_tfs
->drive
->port_no
, "NCQ transfer tag %d finished\n",
851 block_acct_done(blk_get_stats(ncq_tfs
->drive
->port
.ifs
[0].blk
),
853 qemu_sglist_destroy(&ncq_tfs
->sglist
);
857 static int is_ncq(uint8_t ata_cmd
)
859 /* Based on SATA 3.2 section 13.6.3.2 */
861 case READ_FPDMA_QUEUED
:
862 case WRITE_FPDMA_QUEUED
:
864 case RECEIVE_FPDMA_QUEUED
:
865 case SEND_FPDMA_QUEUED
:
872 static void process_ncq_command(AHCIState
*s
, int port
, uint8_t *cmd_fis
,
875 NCQFrame
*ncq_fis
= (NCQFrame
*)cmd_fis
;
876 uint8_t tag
= ncq_fis
->tag
>> 3;
877 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[tag
];
880 /* error - already in use */
881 fprintf(stderr
, "%s: tag %d already used\n", __FUNCTION__
, tag
);
886 ncq_tfs
->drive
= &s
->dev
[port
];
887 ncq_tfs
->slot
= slot
;
888 ncq_tfs
->lba
= ((uint64_t)ncq_fis
->lba5
<< 40) |
889 ((uint64_t)ncq_fis
->lba4
<< 32) |
890 ((uint64_t)ncq_fis
->lba3
<< 24) |
891 ((uint64_t)ncq_fis
->lba2
<< 16) |
892 ((uint64_t)ncq_fis
->lba1
<< 8) |
893 (uint64_t)ncq_fis
->lba0
;
895 /* Note: We calculate the sector count, but don't currently rely on it.
896 * The total size of the DMA buffer tells us the transfer size instead. */
897 ncq_tfs
->sector_count
= ((uint16_t)ncq_fis
->sector_count_high
<< 8) |
898 ncq_fis
->sector_count_low
;
900 DPRINTF(port
, "NCQ transfer LBA from %"PRId64
" to %"PRId64
", "
901 "drive max %"PRId64
"\n",
902 ncq_tfs
->lba
, ncq_tfs
->lba
+ ncq_tfs
->sector_count
- 2,
903 s
->dev
[port
].port
.ifs
[0].nb_sectors
- 1);
905 ahci_populate_sglist(&s
->dev
[port
], &ncq_tfs
->sglist
, 0);
908 switch(ncq_fis
->command
) {
909 case READ_FPDMA_QUEUED
:
910 DPRINTF(port
, "NCQ reading %d sectors from LBA %"PRId64
", "
912 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
914 DPRINTF(port
, "tag %d aio read %"PRId64
"\n",
915 ncq_tfs
->tag
, ncq_tfs
->lba
);
917 dma_acct_start(ncq_tfs
->drive
->port
.ifs
[0].blk
, &ncq_tfs
->acct
,
918 &ncq_tfs
->sglist
, BLOCK_ACCT_READ
);
919 ncq_tfs
->aiocb
= dma_blk_read(ncq_tfs
->drive
->port
.ifs
[0].blk
,
920 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
923 case WRITE_FPDMA_QUEUED
:
924 DPRINTF(port
, "NCQ writing %d sectors to LBA %"PRId64
", tag %d\n",
925 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
927 DPRINTF(port
, "tag %d aio write %"PRId64
"\n",
928 ncq_tfs
->tag
, ncq_tfs
->lba
);
930 dma_acct_start(ncq_tfs
->drive
->port
.ifs
[0].blk
, &ncq_tfs
->acct
,
931 &ncq_tfs
->sglist
, BLOCK_ACCT_WRITE
);
932 ncq_tfs
->aiocb
= dma_blk_write(ncq_tfs
->drive
->port
.ifs
[0].blk
,
933 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
937 if (is_ncq(cmd_fis
[2])) {
939 "error: unsupported NCQ command (0x%02x) received\n",
943 "error: tried to process non-NCQ command as NCQ\n");
945 qemu_sglist_destroy(&ncq_tfs
->sglist
);
949 static void handle_reg_h2d_fis(AHCIState
*s
, int port
,
950 int slot
, uint8_t *cmd_fis
)
952 IDEState
*ide_state
= &s
->dev
[port
].port
.ifs
[0];
953 AHCICmdHdr
*cmd
= s
->dev
[port
].cur_cmd
;
954 uint32_t opts
= le32_to_cpu(cmd
->opts
);
956 if (cmd_fis
[1] & 0x0F) {
957 DPRINTF(port
, "Port Multiplier not supported."
958 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
959 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
963 if (cmd_fis
[1] & 0x70) {
964 DPRINTF(port
, "Reserved flags set in H2D Register FIS."
965 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
966 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
970 if (!(cmd_fis
[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
)) {
971 switch (s
->dev
[port
].port_state
) {
973 if (cmd_fis
[15] & ATA_SRST
) {
974 s
->dev
[port
].port_state
= STATE_RESET
;
978 if (!(cmd_fis
[15] & ATA_SRST
)) {
979 ahci_reset_port(s
, port
);
986 /* Check for NCQ command */
987 if (is_ncq(cmd_fis
[2])) {
988 process_ncq_command(s
, port
, cmd_fis
, slot
);
992 /* Decompose the FIS:
993 * AHCI does not interpret FIS packets, it only forwards them.
994 * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
995 * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
997 * ATA4 describes sector number for LBA28/CHS commands.
998 * ATA6 describes sector number for LBA48 commands.
999 * ATA8 deprecates CHS fully, describing only LBA28/48.
1001 * We dutifully convert the FIS into IDE registers, and allow the
1002 * core layer to interpret them as needed. */
1003 ide_state
->feature
= cmd_fis
[3];
1004 ide_state
->sector
= cmd_fis
[4]; /* LBA 7:0 */
1005 ide_state
->lcyl
= cmd_fis
[5]; /* LBA 15:8 */
1006 ide_state
->hcyl
= cmd_fis
[6]; /* LBA 23:16 */
1007 ide_state
->select
= cmd_fis
[7]; /* LBA 27:24 (LBA28) */
1008 ide_state
->hob_sector
= cmd_fis
[8]; /* LBA 31:24 */
1009 ide_state
->hob_lcyl
= cmd_fis
[9]; /* LBA 39:32 */
1010 ide_state
->hob_hcyl
= cmd_fis
[10]; /* LBA 47:40 */
1011 ide_state
->hob_feature
= cmd_fis
[11];
1012 ide_state
->nsector
= (int64_t)((cmd_fis
[13] << 8) | cmd_fis
[12]);
1013 /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
1014 /* 15: Only valid when UPDATE_COMMAND not set. */
1016 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
1017 * table to ide_state->io_buffer */
1018 if (opts
& AHCI_CMD_ATAPI
) {
1019 memcpy(ide_state
->io_buffer
, &cmd_fis
[AHCI_COMMAND_TABLE_ACMD
], 0x10);
1020 debug_print_fis(ide_state
->io_buffer
, 0x10);
1021 s
->dev
[port
].done_atapi_packet
= false;
1022 /* XXX send PIO setup FIS */
1025 ide_state
->error
= 0;
1027 /* Reset transferred byte counter */
1030 /* We're ready to process the command in FIS byte 2. */
1031 ide_exec_cmd(&s
->dev
[port
].port
, cmd_fis
[2]);
1034 static int handle_cmd(AHCIState
*s
, int port
, int slot
)
1036 IDEState
*ide_state
;
1042 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1043 /* Engine currently busy, try again later */
1044 DPRINTF(port
, "engine busy\n");
1048 if (!s
->dev
[port
].lst
) {
1049 DPRINTF(port
, "error: lst not given but cmd handled");
1052 cmd
= &((AHCICmdHdr
*)s
->dev
[port
].lst
)[slot
];
1053 /* remember current slot handle for later */
1054 s
->dev
[port
].cur_cmd
= cmd
;
1056 /* The device we are working for */
1057 ide_state
= &s
->dev
[port
].port
.ifs
[0];
1058 if (!ide_state
->blk
) {
1059 DPRINTF(port
, "error: guest accessed unused port");
1063 tbl_addr
= le64_to_cpu(cmd
->tbl_addr
);
1065 cmd_fis
= dma_memory_map(s
->as
, tbl_addr
, &cmd_len
,
1066 DMA_DIRECTION_FROM_DEVICE
);
1068 DPRINTF(port
, "error: guest passed us an invalid cmd fis\n");
1070 } else if (cmd_len
!= 0x80) {
1071 ahci_trigger_irq(s
, &s
->dev
[port
], PORT_IRQ_HBUS_ERR
);
1072 DPRINTF(port
, "error: dma_memory_map failed: "
1073 "(len(%02"PRIx64
") != 0x80)\n",
1077 debug_print_fis(cmd_fis
, 0x80);
1079 switch (cmd_fis
[0]) {
1080 case SATA_FIS_TYPE_REGISTER_H2D
:
1081 handle_reg_h2d_fis(s
, port
, slot
, cmd_fis
);
1084 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
1085 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
1091 dma_memory_unmap(s
->as
, cmd_fis
, cmd_len
, DMA_DIRECTION_FROM_DEVICE
,
1094 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1095 /* async command, complete later */
1096 s
->dev
[port
].busy_slot
= slot
;
1100 /* done handling the command */
1104 /* DMA dev <-> ram */
1105 static void ahci_start_transfer(IDEDMA
*dma
)
1107 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1108 IDEState
*s
= &ad
->port
.ifs
[0];
1109 uint32_t size
= (uint32_t)(s
->data_end
- s
->data_ptr
);
1110 /* write == ram -> device */
1111 uint32_t opts
= le32_to_cpu(ad
->cur_cmd
->opts
);
1112 int is_write
= opts
& AHCI_CMD_WRITE
;
1113 int is_atapi
= opts
& AHCI_CMD_ATAPI
;
1116 if (is_atapi
&& !ad
->done_atapi_packet
) {
1117 /* already prepopulated iobuffer */
1118 ad
->done_atapi_packet
= true;
1123 if (ahci_dma_prepare_buf(dma
, is_write
)) {
1127 DPRINTF(ad
->port_no
, "%sing %d bytes on %s w/%s sglist\n",
1128 is_write
? "writ" : "read", size
, is_atapi
? "atapi" : "ata",
1129 has_sglist
? "" : "o");
1131 if (has_sglist
&& size
) {
1133 dma_buf_write(s
->data_ptr
, size
, &s
->sg
);
1135 dma_buf_read(s
->data_ptr
, size
, &s
->sg
);
1140 /* declare that we processed everything */
1141 s
->data_ptr
= s
->data_end
;
1143 /* Update number of transferred bytes, destroy sglist */
1144 ahci_commit_buf(dma
, size
);
1146 s
->end_transfer_func(s
);
1148 if (!(s
->status
& DRQ_STAT
)) {
1149 /* done with PIO send/receive */
1150 ahci_write_fis_pio(ad
, le32_to_cpu(ad
->cur_cmd
->status
));
1154 static void ahci_start_dma(IDEDMA
*dma
, IDEState
*s
,
1155 BlockCompletionFunc
*dma_cb
)
1157 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1158 DPRINTF(ad
->port_no
, "\n");
1159 s
->io_buffer_offset
= 0;
1163 static void ahci_restart_dma(IDEDMA
*dma
)
1165 /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
1169 * Called in DMA R/W chains to read the PRDT, utilizing ahci_populate_sglist.
1170 * Not currently invoked by PIO R/W chains,
1171 * which invoke ahci_populate_sglist via ahci_start_transfer.
1173 static int32_t ahci_dma_prepare_buf(IDEDMA
*dma
, int is_write
)
1175 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1176 IDEState
*s
= &ad
->port
.ifs
[0];
1178 if (ahci_populate_sglist(ad
, &s
->sg
, s
->io_buffer_offset
) == -1) {
1179 DPRINTF(ad
->port_no
, "ahci_dma_prepare_buf failed.\n");
1182 s
->io_buffer_size
= s
->sg
.size
;
1184 DPRINTF(ad
->port_no
, "len=%#x\n", s
->io_buffer_size
);
1185 return s
->io_buffer_size
;
1189 * Destroys the scatter-gather list,
1190 * and updates the command header with a bytes-read value.
1191 * called explicitly via ahci_dma_rw_buf (ATAPI DMA),
1192 * and ahci_start_transfer (PIO R/W),
1193 * and called via callback from ide_dma_cb for DMA R/W paths.
1195 static void ahci_commit_buf(IDEDMA
*dma
, uint32_t tx_bytes
)
1197 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1198 IDEState
*s
= &ad
->port
.ifs
[0];
1200 tx_bytes
+= le32_to_cpu(ad
->cur_cmd
->status
);
1201 ad
->cur_cmd
->status
= cpu_to_le32(tx_bytes
);
1203 qemu_sglist_destroy(&s
->sg
);
1206 static int ahci_dma_rw_buf(IDEDMA
*dma
, int is_write
)
1208 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1209 IDEState
*s
= &ad
->port
.ifs
[0];
1210 uint8_t *p
= s
->io_buffer
+ s
->io_buffer_index
;
1211 int l
= s
->io_buffer_size
- s
->io_buffer_index
;
1213 if (ahci_populate_sglist(ad
, &s
->sg
, s
->io_buffer_offset
)) {
1218 dma_buf_read(p
, l
, &s
->sg
);
1220 dma_buf_write(p
, l
, &s
->sg
);
1223 /* free sglist, update byte count */
1224 ahci_commit_buf(dma
, l
);
1226 s
->io_buffer_index
+= l
;
1227 s
->io_buffer_offset
+= l
;
1229 DPRINTF(ad
->port_no
, "len=%#x\n", l
);
1234 static void ahci_cmd_done(IDEDMA
*dma
)
1236 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1238 DPRINTF(ad
->port_no
, "cmd done\n");
1240 /* update d2h status */
1241 ahci_write_fis_d2h(ad
, NULL
);
1243 if (!ad
->check_bh
) {
1244 /* maybe we still have something to process, check later */
1245 ad
->check_bh
= qemu_bh_new(ahci_check_cmd_bh
, ad
);
1246 qemu_bh_schedule(ad
->check_bh
);
1250 static void ahci_irq_set(void *opaque
, int n
, int level
)
1254 static const IDEDMAOps ahci_dma_ops
= {
1255 .start_dma
= ahci_start_dma
,
1256 .restart_dma
= ahci_restart_dma
,
1257 .start_transfer
= ahci_start_transfer
,
1258 .prepare_buf
= ahci_dma_prepare_buf
,
1259 .commit_buf
= ahci_commit_buf
,
1260 .rw_buf
= ahci_dma_rw_buf
,
1261 .cmd_done
= ahci_cmd_done
,
1264 void ahci_init(AHCIState
*s
, DeviceState
*qdev
, AddressSpace
*as
, int ports
)
1271 s
->dev
= g_new0(AHCIDevice
, ports
);
1273 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
1274 memory_region_init_io(&s
->mem
, OBJECT(qdev
), &ahci_mem_ops
, s
,
1275 "ahci", AHCI_MEM_BAR_SIZE
);
1276 memory_region_init_io(&s
->idp
, OBJECT(qdev
), &ahci_idp_ops
, s
,
1279 irqs
= qemu_allocate_irqs(ahci_irq_set
, s
, s
->ports
);
1281 for (i
= 0; i
< s
->ports
; i
++) {
1282 AHCIDevice
*ad
= &s
->dev
[i
];
1284 ide_bus_new(&ad
->port
, sizeof(ad
->port
), qdev
, i
, 1);
1285 ide_init2(&ad
->port
, irqs
[i
]);
1289 ad
->port
.dma
= &ad
->dma
;
1290 ad
->port
.dma
->ops
= &ahci_dma_ops
;
1291 ide_register_restart_cb(&ad
->port
);
1295 void ahci_uninit(AHCIState
*s
)
1300 void ahci_reset(AHCIState
*s
)
1305 s
->control_regs
.irqstatus
= 0;
1307 * The implementation of this bit is dependent upon the value of the
1308 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
1309 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
1310 * read-only and shall have a reset value of '1'.
1312 * We set HOST_CAP_AHCI so we must enable AHCI at reset.
1314 s
->control_regs
.ghc
= HOST_CTL_AHCI_EN
;
1316 for (i
= 0; i
< s
->ports
; i
++) {
1317 pr
= &s
->dev
[i
].port_regs
;
1321 pr
->cmd
= PORT_CMD_SPIN_UP
| PORT_CMD_POWER_ON
;
1322 ahci_reset_port(s
, i
);
1326 static const VMStateDescription vmstate_ahci_device
= {
1327 .name
= "ahci port",
1329 .fields
= (VMStateField
[]) {
1330 VMSTATE_IDE_BUS(port
, AHCIDevice
),
1331 VMSTATE_IDE_DRIVE(port
.ifs
[0], AHCIDevice
),
1332 VMSTATE_UINT32(port_state
, AHCIDevice
),
1333 VMSTATE_UINT32(finished
, AHCIDevice
),
1334 VMSTATE_UINT32(port_regs
.lst_addr
, AHCIDevice
),
1335 VMSTATE_UINT32(port_regs
.lst_addr_hi
, AHCIDevice
),
1336 VMSTATE_UINT32(port_regs
.fis_addr
, AHCIDevice
),
1337 VMSTATE_UINT32(port_regs
.fis_addr_hi
, AHCIDevice
),
1338 VMSTATE_UINT32(port_regs
.irq_stat
, AHCIDevice
),
1339 VMSTATE_UINT32(port_regs
.irq_mask
, AHCIDevice
),
1340 VMSTATE_UINT32(port_regs
.cmd
, AHCIDevice
),
1341 VMSTATE_UINT32(port_regs
.tfdata
, AHCIDevice
),
1342 VMSTATE_UINT32(port_regs
.sig
, AHCIDevice
),
1343 VMSTATE_UINT32(port_regs
.scr_stat
, AHCIDevice
),
1344 VMSTATE_UINT32(port_regs
.scr_ctl
, AHCIDevice
),
1345 VMSTATE_UINT32(port_regs
.scr_err
, AHCIDevice
),
1346 VMSTATE_UINT32(port_regs
.scr_act
, AHCIDevice
),
1347 VMSTATE_UINT32(port_regs
.cmd_issue
, AHCIDevice
),
1348 VMSTATE_BOOL(done_atapi_packet
, AHCIDevice
),
1349 VMSTATE_INT32(busy_slot
, AHCIDevice
),
1350 VMSTATE_BOOL(init_d2h_sent
, AHCIDevice
),
1351 VMSTATE_END_OF_LIST()
1355 static int ahci_state_post_load(void *opaque
, int version_id
)
1358 struct AHCIDevice
*ad
;
1359 AHCIState
*s
= opaque
;
1361 for (i
= 0; i
< s
->ports
; i
++) {
1363 AHCIPortRegs
*pr
= &ad
->port_regs
;
1365 map_page(s
->as
, &ad
->lst
,
1366 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
1367 map_page(s
->as
, &ad
->res_fis
,
1368 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
1370 * If an error is present, ad->busy_slot will be valid and not -1.
1371 * In this case, an operation is waiting to resume and will re-check
1372 * for additional AHCI commands to execute upon completion.
1374 * In the case where no error was present, busy_slot will be -1,
1375 * and we should check to see if there are additional commands waiting.
1377 if (ad
->busy_slot
== -1) {
1380 /* We are in the middle of a command, and may need to access
1381 * the command header in guest memory again. */
1382 if (ad
->busy_slot
< 0 || ad
->busy_slot
>= AHCI_MAX_CMDS
) {
1385 ad
->cur_cmd
= &((AHCICmdHdr
*)ad
->lst
)[ad
->busy_slot
];
1392 const VMStateDescription vmstate_ahci
= {
1395 .post_load
= ahci_state_post_load
,
1396 .fields
= (VMStateField
[]) {
1397 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev
, AHCIState
, ports
,
1398 vmstate_ahci_device
, AHCIDevice
),
1399 VMSTATE_UINT32(control_regs
.cap
, AHCIState
),
1400 VMSTATE_UINT32(control_regs
.ghc
, AHCIState
),
1401 VMSTATE_UINT32(control_regs
.irqstatus
, AHCIState
),
1402 VMSTATE_UINT32(control_regs
.impl
, AHCIState
),
1403 VMSTATE_UINT32(control_regs
.version
, AHCIState
),
1404 VMSTATE_UINT32(idp_index
, AHCIState
),
1405 VMSTATE_INT32_EQUAL(ports
, AHCIState
),
1406 VMSTATE_END_OF_LIST()
1410 #define TYPE_SYSBUS_AHCI "sysbus-ahci"
1411 #define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI)
1413 typedef struct SysbusAHCIState
{
1415 SysBusDevice parent_obj
;
1422 static const VMStateDescription vmstate_sysbus_ahci
= {
1423 .name
= "sysbus-ahci",
1424 .unmigratable
= 1, /* Still buggy under I/O load */
1425 .fields
= (VMStateField
[]) {
1426 VMSTATE_AHCI(ahci
, SysbusAHCIState
),
1427 VMSTATE_END_OF_LIST()
1431 static void sysbus_ahci_reset(DeviceState
*dev
)
1433 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1435 ahci_reset(&s
->ahci
);
1438 static void sysbus_ahci_realize(DeviceState
*dev
, Error
**errp
)
1440 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1441 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1443 ahci_init(&s
->ahci
, dev
, &address_space_memory
, s
->num_ports
);
1445 sysbus_init_mmio(sbd
, &s
->ahci
.mem
);
1446 sysbus_init_irq(sbd
, &s
->ahci
.irq
);
1449 static Property sysbus_ahci_properties
[] = {
1450 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState
, num_ports
, 1),
1451 DEFINE_PROP_END_OF_LIST(),
1454 static void sysbus_ahci_class_init(ObjectClass
*klass
, void *data
)
1456 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1458 dc
->realize
= sysbus_ahci_realize
;
1459 dc
->vmsd
= &vmstate_sysbus_ahci
;
1460 dc
->props
= sysbus_ahci_properties
;
1461 dc
->reset
= sysbus_ahci_reset
;
1462 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1465 static const TypeInfo sysbus_ahci_info
= {
1466 .name
= TYPE_SYSBUS_AHCI
,
1467 .parent
= TYPE_SYS_BUS_DEVICE
,
1468 .instance_size
= sizeof(SysbusAHCIState
),
1469 .class_init
= sysbus_ahci_class_init
,
1472 static void sysbus_ahci_register_types(void)
1474 type_register_static(&sysbus_ahci_info
);
1477 type_init(sysbus_ahci_register_types
)
1479 void ahci_ide_create_devs(PCIDevice
*dev
, DriveInfo
**hd
)
1481 AHCIPCIState
*d
= ICH_AHCI(dev
);
1482 AHCIState
*ahci
= &d
->ahci
;
1485 for (i
= 0; i
< ahci
->ports
; i
++) {
1486 if (hd
[i
] == NULL
) {
1489 ide_create_drive(&ahci
->dev
[i
].port
, 0, hd
[i
]);