4 * Copyright (c) 2010 qiaochong@loongson.cn
5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include <hw/pci/msi.h>
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/sysbus.h>
30 #include "qemu/error-report.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/dma.h"
34 #include <hw/ide/pci.h>
35 #include <hw/ide/ahci.h>
39 #define DPRINTF(port, fmt, ...) \
42 fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \
43 fprintf(stderr, fmt, ## __VA_ARGS__); \
47 static void check_cmd(AHCIState
*s
, int port
);
48 static int handle_cmd(AHCIState
*s
,int port
,int slot
);
49 static void ahci_reset_port(AHCIState
*s
, int port
);
50 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
);
51 static void ahci_init_d2h(AHCIDevice
*ad
);
52 static int ahci_dma_prepare_buf(IDEDMA
*dma
, int32_t limit
);
53 static void ahci_commit_buf(IDEDMA
*dma
, uint32_t tx_bytes
);
54 static bool ahci_map_clb_address(AHCIDevice
*ad
);
55 static bool ahci_map_fis_address(AHCIDevice
*ad
);
56 static void ahci_unmap_clb_address(AHCIDevice
*ad
);
57 static void ahci_unmap_fis_address(AHCIDevice
*ad
);
60 static uint32_t ahci_port_read(AHCIState
*s
, int port
, int offset
)
64 pr
= &s
->dev
[port
].port_regs
;
70 case PORT_LST_ADDR_HI
:
71 val
= pr
->lst_addr_hi
;
76 case PORT_FIS_ADDR_HI
:
77 val
= pr
->fis_addr_hi
;
95 if (s
->dev
[port
].port
.ifs
[0].blk
) {
96 val
= SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP
|
97 SATA_SCR_SSTATUS_SPD_GEN1
| SATA_SCR_SSTATUS_IPM_ACTIVE
;
99 val
= SATA_SCR_SSTATUS_DET_NODEV
;
109 pr
->scr_act
&= ~s
->dev
[port
].finished
;
110 s
->dev
[port
].finished
= 0;
120 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
125 static void ahci_irq_raise(AHCIState
*s
, AHCIDevice
*dev
)
127 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
129 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
131 DPRINTF(0, "raise irq\n");
133 if (pci_dev
&& msi_enabled(pci_dev
)) {
134 msi_notify(pci_dev
, 0);
136 qemu_irq_raise(s
->irq
);
140 static void ahci_irq_lower(AHCIState
*s
, AHCIDevice
*dev
)
142 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
144 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
146 DPRINTF(0, "lower irq\n");
148 if (!pci_dev
|| !msi_enabled(pci_dev
)) {
149 qemu_irq_lower(s
->irq
);
153 static void ahci_check_irq(AHCIState
*s
)
157 DPRINTF(-1, "check irq %#x\n", s
->control_regs
.irqstatus
);
159 s
->control_regs
.irqstatus
= 0;
160 for (i
= 0; i
< s
->ports
; i
++) {
161 AHCIPortRegs
*pr
= &s
->dev
[i
].port_regs
;
162 if (pr
->irq_stat
& pr
->irq_mask
) {
163 s
->control_regs
.irqstatus
|= (1 << i
);
167 if (s
->control_regs
.irqstatus
&&
168 (s
->control_regs
.ghc
& HOST_CTL_IRQ_EN
)) {
169 ahci_irq_raise(s
, NULL
);
171 ahci_irq_lower(s
, NULL
);
175 static void ahci_trigger_irq(AHCIState
*s
, AHCIDevice
*d
,
178 DPRINTF(d
->port_no
, "trigger irq %#x -> %x\n",
179 irq_type
, d
->port_regs
.irq_mask
& irq_type
);
181 d
->port_regs
.irq_stat
|= irq_type
;
185 static void map_page(AddressSpace
*as
, uint8_t **ptr
, uint64_t addr
,
191 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
194 *ptr
= dma_memory_map(as
, addr
, &len
, DMA_DIRECTION_FROM_DEVICE
);
196 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
202 * Check the cmd register to see if we should start or stop
203 * the DMA or FIS RX engines.
205 * @ad: Device to engage.
206 * @allow_stop: Allow device to transition from started to stopped?
207 * 'no' is useful for migration post_load, which does not expect a transition.
209 * @return 0 on success, -1 on error.
211 static int ahci_cond_start_engines(AHCIDevice
*ad
, bool allow_stop
)
213 AHCIPortRegs
*pr
= &ad
->port_regs
;
215 if (pr
->cmd
& PORT_CMD_START
) {
216 if (ahci_map_clb_address(ad
)) {
217 pr
->cmd
|= PORT_CMD_LIST_ON
;
219 error_report("AHCI: Failed to start DMA engine: "
220 "bad command list buffer address");
223 } else if (pr
->cmd
& PORT_CMD_LIST_ON
) {
225 ahci_unmap_clb_address(ad
);
226 pr
->cmd
= pr
->cmd
& ~(PORT_CMD_LIST_ON
);
228 error_report("AHCI: DMA engine should be off, "
229 "but appears to still be running");
234 if (pr
->cmd
& PORT_CMD_FIS_RX
) {
235 if (ahci_map_fis_address(ad
)) {
236 pr
->cmd
|= PORT_CMD_FIS_ON
;
238 error_report("AHCI: Failed to start FIS receive engine: "
239 "bad FIS receive buffer address");
242 } else if (pr
->cmd
& PORT_CMD_FIS_ON
) {
244 ahci_unmap_fis_address(ad
);
245 pr
->cmd
= pr
->cmd
& ~(PORT_CMD_FIS_ON
);
247 error_report("AHCI: FIS receive engine should be off, "
248 "but appears to still be running");
256 static void ahci_port_write(AHCIState
*s
, int port
, int offset
, uint32_t val
)
258 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
260 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
265 case PORT_LST_ADDR_HI
:
266 pr
->lst_addr_hi
= val
;
271 case PORT_FIS_ADDR_HI
:
272 pr
->fis_addr_hi
= val
;
275 pr
->irq_stat
&= ~val
;
279 pr
->irq_mask
= val
& 0xfdc000ff;
283 /* Block any Read-only fields from being set;
284 * including LIST_ON and FIS_ON. */
285 pr
->cmd
= (pr
->cmd
& PORT_CMD_RO_MASK
) | (val
& ~PORT_CMD_RO_MASK
);
287 /* Check FIS RX and CLB engines, allow transition to false: */
288 ahci_cond_start_engines(&s
->dev
[port
], true);
290 /* XXX usually the FIS would be pending on the bus here and
291 issuing deferred until the OS enables FIS receival.
292 Instead, we only submit it once - which works in most
293 cases, but is a hack. */
294 if ((pr
->cmd
& PORT_CMD_FIS_ON
) &&
295 !s
->dev
[port
].init_d2h_sent
) {
296 ahci_init_d2h(&s
->dev
[port
]);
297 s
->dev
[port
].init_d2h_sent
= true;
312 if (((pr
->scr_ctl
& AHCI_SCR_SCTL_DET
) == 1) &&
313 ((val
& AHCI_SCR_SCTL_DET
) == 0)) {
314 ahci_reset_port(s
, port
);
326 pr
->cmd_issue
|= val
;
334 static uint64_t ahci_mem_read_32(void *opaque
, hwaddr addr
)
336 AHCIState
*s
= opaque
;
339 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
342 val
= s
->control_regs
.cap
;
345 val
= s
->control_regs
.ghc
;
348 val
= s
->control_regs
.irqstatus
;
350 case HOST_PORTS_IMPL
:
351 val
= s
->control_regs
.impl
;
354 val
= s
->control_regs
.version
;
358 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr
, val
);
359 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
360 (addr
< (AHCI_PORT_REGS_START_ADDR
+
361 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
362 val
= ahci_port_read(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
363 addr
& AHCI_PORT_ADDR_OFFSET_MASK
);
371 * AHCI 1.3 section 3 ("HBA Memory Registers")
372 * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads.
373 * Caller is responsible for masking unwanted higher order bytes.
375 static uint64_t ahci_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
377 hwaddr aligned
= addr
& ~0x3;
378 int ofst
= addr
- aligned
;
379 uint64_t lo
= ahci_mem_read_32(opaque
, aligned
);
382 /* if < 8 byte read does not cross 4 byte boundary */
383 if (ofst
+ size
<= 4) {
384 return lo
>> (ofst
* 8);
386 g_assert_cmpint(size
, >, 1);
388 /* If the 64bit read is unaligned, we will produce undefined
389 * results. AHCI does not support unaligned 64bit reads. */
390 hi
= ahci_mem_read_32(opaque
, aligned
+ 4);
391 return (hi
<< 32 | lo
) >> (ofst
* 8);
395 static void ahci_mem_write(void *opaque
, hwaddr addr
,
396 uint64_t val
, unsigned size
)
398 AHCIState
*s
= opaque
;
400 /* Only aligned reads are allowed on AHCI */
402 fprintf(stderr
, "ahci: Mis-aligned write to addr 0x"
403 TARGET_FMT_plx
"\n", addr
);
407 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
408 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64
"\n", (unsigned) addr
, val
);
411 case HOST_CAP
: /* R/WO, RO */
412 /* FIXME handle R/WO */
414 case HOST_CTL
: /* R/W */
415 if (val
& HOST_CTL_RESET
) {
416 DPRINTF(-1, "HBA Reset\n");
419 s
->control_regs
.ghc
= (val
& 0x3) | HOST_CTL_AHCI_EN
;
423 case HOST_IRQ_STAT
: /* R/WC, RO */
424 s
->control_regs
.irqstatus
&= ~val
;
427 case HOST_PORTS_IMPL
: /* R/WO, RO */
428 /* FIXME handle R/WO */
430 case HOST_VERSION
: /* RO */
431 /* FIXME report write? */
434 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr
);
436 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
437 (addr
< (AHCI_PORT_REGS_START_ADDR
+
438 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
439 ahci_port_write(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
440 addr
& AHCI_PORT_ADDR_OFFSET_MASK
, val
);
445 static const MemoryRegionOps ahci_mem_ops
= {
446 .read
= ahci_mem_read
,
447 .write
= ahci_mem_write
,
448 .endianness
= DEVICE_LITTLE_ENDIAN
,
451 static uint64_t ahci_idp_read(void *opaque
, hwaddr addr
,
454 AHCIState
*s
= opaque
;
456 if (addr
== s
->idp_offset
) {
459 } else if (addr
== s
->idp_offset
+ 4) {
460 /* data register - do memory read at location selected by index */
461 return ahci_mem_read(opaque
, s
->idp_index
, size
);
467 static void ahci_idp_write(void *opaque
, hwaddr addr
,
468 uint64_t val
, unsigned size
)
470 AHCIState
*s
= opaque
;
472 if (addr
== s
->idp_offset
) {
473 /* index register - mask off reserved bits */
474 s
->idp_index
= (uint32_t)val
& ((AHCI_MEM_BAR_SIZE
- 1) & ~3);
475 } else if (addr
== s
->idp_offset
+ 4) {
476 /* data register - do memory write at location selected by index */
477 ahci_mem_write(opaque
, s
->idp_index
, val
, size
);
481 static const MemoryRegionOps ahci_idp_ops
= {
482 .read
= ahci_idp_read
,
483 .write
= ahci_idp_write
,
484 .endianness
= DEVICE_LITTLE_ENDIAN
,
488 static void ahci_reg_init(AHCIState
*s
)
492 s
->control_regs
.cap
= (s
->ports
- 1) |
493 (AHCI_NUM_COMMAND_SLOTS
<< 8) |
494 (AHCI_SUPPORTED_SPEED_GEN1
<< AHCI_SUPPORTED_SPEED
) |
495 HOST_CAP_NCQ
| HOST_CAP_AHCI
;
497 s
->control_regs
.impl
= (1 << s
->ports
) - 1;
499 s
->control_regs
.version
= AHCI_VERSION_1_0
;
501 for (i
= 0; i
< s
->ports
; i
++) {
502 s
->dev
[i
].port_state
= STATE_RUN
;
506 static void check_cmd(AHCIState
*s
, int port
)
508 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
511 if ((pr
->cmd
& PORT_CMD_START
) && pr
->cmd_issue
) {
512 for (slot
= 0; (slot
< 32) && pr
->cmd_issue
; slot
++) {
513 if ((pr
->cmd_issue
& (1U << slot
)) &&
514 !handle_cmd(s
, port
, slot
)) {
515 pr
->cmd_issue
&= ~(1U << slot
);
521 static void ahci_check_cmd_bh(void *opaque
)
523 AHCIDevice
*ad
= opaque
;
525 qemu_bh_delete(ad
->check_bh
);
528 if ((ad
->busy_slot
!= -1) &&
529 !(ad
->port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
))) {
531 ad
->port_regs
.cmd_issue
&= ~(1 << ad
->busy_slot
);
535 check_cmd(ad
->hba
, ad
->port_no
);
538 static void ahci_init_d2h(AHCIDevice
*ad
)
540 uint8_t init_fis
[20];
541 IDEState
*ide_state
= &ad
->port
.ifs
[0];
543 memset(init_fis
, 0, sizeof(init_fis
));
548 if (ide_state
->drive_kind
== IDE_CD
) {
549 init_fis
[5] = ide_state
->lcyl
;
550 init_fis
[6] = ide_state
->hcyl
;
553 ahci_write_fis_d2h(ad
, init_fis
);
556 static void ahci_reset_port(AHCIState
*s
, int port
)
558 AHCIDevice
*d
= &s
->dev
[port
];
559 AHCIPortRegs
*pr
= &d
->port_regs
;
560 IDEState
*ide_state
= &d
->port
.ifs
[0];
563 DPRINTF(port
, "reset port\n");
565 ide_bus_reset(&d
->port
);
566 ide_state
->ncq_queues
= AHCI_MAX_CMDS
;
572 pr
->sig
= 0xFFFFFFFF;
574 d
->init_d2h_sent
= false;
576 ide_state
= &s
->dev
[port
].port
.ifs
[0];
577 if (!ide_state
->blk
) {
581 /* reset ncq queue */
582 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
583 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[i
];
584 if (!ncq_tfs
->used
) {
588 if (ncq_tfs
->aiocb
) {
589 blk_aio_cancel(ncq_tfs
->aiocb
);
590 ncq_tfs
->aiocb
= NULL
;
593 /* Maybe we just finished the request thanks to blk_aio_cancel() */
594 if (!ncq_tfs
->used
) {
598 qemu_sglist_destroy(&ncq_tfs
->sglist
);
602 s
->dev
[port
].port_state
= STATE_RUN
;
603 if (!ide_state
->blk
) {
605 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
606 } else if (ide_state
->drive_kind
== IDE_CD
) {
607 pr
->sig
= SATA_SIGNATURE_CDROM
;
608 ide_state
->lcyl
= 0x14;
609 ide_state
->hcyl
= 0xeb;
610 DPRINTF(port
, "set lcyl = %d\n", ide_state
->lcyl
);
611 ide_state
->status
= SEEK_STAT
| WRERR_STAT
| READY_STAT
;
613 pr
->sig
= SATA_SIGNATURE_DISK
;
614 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
617 ide_state
->error
= 1;
621 static void debug_print_fis(uint8_t *fis
, int cmd_len
)
626 fprintf(stderr
, "fis:");
627 for (i
= 0; i
< cmd_len
; i
++) {
628 if ((i
& 0xf) == 0) {
629 fprintf(stderr
, "\n%02x:",i
);
631 fprintf(stderr
, "%02x ",fis
[i
]);
633 fprintf(stderr
, "\n");
637 static bool ahci_map_fis_address(AHCIDevice
*ad
)
639 AHCIPortRegs
*pr
= &ad
->port_regs
;
640 map_page(ad
->hba
->as
, &ad
->res_fis
,
641 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
642 return ad
->res_fis
!= NULL
;
645 static void ahci_unmap_fis_address(AHCIDevice
*ad
)
647 dma_memory_unmap(ad
->hba
->as
, ad
->res_fis
, 256,
648 DMA_DIRECTION_FROM_DEVICE
, 256);
652 static bool ahci_map_clb_address(AHCIDevice
*ad
)
654 AHCIPortRegs
*pr
= &ad
->port_regs
;
656 map_page(ad
->hba
->as
, &ad
->lst
,
657 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
658 return ad
->lst
!= NULL
;
661 static void ahci_unmap_clb_address(AHCIDevice
*ad
)
663 dma_memory_unmap(ad
->hba
->as
, ad
->lst
, 1024,
664 DMA_DIRECTION_FROM_DEVICE
, 1024);
668 static void ahci_write_fis_sdb(AHCIState
*s
, int port
, uint32_t finished
)
670 AHCIDevice
*ad
= &s
->dev
[port
];
671 AHCIPortRegs
*pr
= &ad
->port_regs
;
675 if (!s
->dev
[port
].res_fis
||
676 !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
680 sdb_fis
= (SDBFIS
*)&ad
->res_fis
[RES_FIS_SDBFIS
];
681 ide_state
= &ad
->port
.ifs
[0];
683 sdb_fis
->type
= SATA_FIS_TYPE_SDB
;
684 /* Interrupt pending & Notification bit */
685 sdb_fis
->flags
= (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
686 sdb_fis
->status
= ide_state
->status
& 0x77;
687 sdb_fis
->error
= ide_state
->error
;
688 /* update SAct field in SDB_FIS */
689 s
->dev
[port
].finished
|= finished
;
690 sdb_fis
->payload
= cpu_to_le32(ad
->finished
);
692 /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
693 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
694 (ad
->port
.ifs
[0].status
& 0x77) |
697 ahci_trigger_irq(s
, ad
, PORT_IRQ_SDB_FIS
);
700 static void ahci_write_fis_pio(AHCIDevice
*ad
, uint16_t len
)
702 AHCIPortRegs
*pr
= &ad
->port_regs
;
703 uint8_t *pio_fis
, *cmd_fis
;
705 dma_addr_t cmd_len
= 0x80;
706 IDEState
*s
= &ad
->port
.ifs
[0];
708 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
713 tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
714 cmd_fis
= dma_memory_map(ad
->hba
->as
, tbl_addr
, &cmd_len
,
715 DMA_DIRECTION_TO_DEVICE
);
717 if (cmd_fis
== NULL
) {
718 DPRINTF(ad
->port_no
, "dma_memory_map failed in ahci_write_fis_pio");
719 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_HBUS_ERR
);
723 if (cmd_len
!= 0x80) {
725 "dma_memory_map mapped too few bytes in ahci_write_fis_pio");
726 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
727 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
728 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_HBUS_ERR
);
732 pio_fis
= &ad
->res_fis
[RES_FIS_PSFIS
];
734 pio_fis
[0] = SATA_FIS_TYPE_PIO_SETUP
;
735 pio_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
736 pio_fis
[2] = s
->status
;
737 pio_fis
[3] = s
->error
;
739 pio_fis
[4] = s
->sector
;
740 pio_fis
[5] = s
->lcyl
;
741 pio_fis
[6] = s
->hcyl
;
742 pio_fis
[7] = s
->select
;
743 pio_fis
[8] = s
->hob_sector
;
744 pio_fis
[9] = s
->hob_lcyl
;
745 pio_fis
[10] = s
->hob_hcyl
;
747 pio_fis
[12] = cmd_fis
[12];
748 pio_fis
[13] = cmd_fis
[13];
750 pio_fis
[15] = s
->status
;
751 pio_fis
[16] = len
& 255;
752 pio_fis
[17] = len
>> 8;
756 /* Update shadow registers: */
757 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
758 ad
->port
.ifs
[0].status
;
760 if (pio_fis
[2] & ERR_STAT
) {
761 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
764 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_PIOS_FIS
);
766 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
767 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
770 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
)
772 AHCIPortRegs
*pr
= &ad
->port_regs
;
775 dma_addr_t cmd_len
= 0x80;
777 IDEState
*s
= &ad
->port
.ifs
[0];
779 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
785 uint64_t tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
786 cmd_fis
= dma_memory_map(ad
->hba
->as
, tbl_addr
, &cmd_len
,
787 DMA_DIRECTION_TO_DEVICE
);
791 d2h_fis
= &ad
->res_fis
[RES_FIS_RFIS
];
793 d2h_fis
[0] = SATA_FIS_TYPE_REGISTER_D2H
;
794 d2h_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
795 d2h_fis
[2] = s
->status
;
796 d2h_fis
[3] = s
->error
;
798 d2h_fis
[4] = s
->sector
;
799 d2h_fis
[5] = s
->lcyl
;
800 d2h_fis
[6] = s
->hcyl
;
801 d2h_fis
[7] = s
->select
;
802 d2h_fis
[8] = s
->hob_sector
;
803 d2h_fis
[9] = s
->hob_lcyl
;
804 d2h_fis
[10] = s
->hob_hcyl
;
806 d2h_fis
[12] = cmd_fis
[12];
807 d2h_fis
[13] = cmd_fis
[13];
808 for (i
= 14; i
< 20; i
++) {
812 /* Update shadow registers: */
813 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
814 ad
->port
.ifs
[0].status
;
816 if (d2h_fis
[2] & ERR_STAT
) {
817 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
820 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_D2H_REG_FIS
);
823 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
824 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
828 static int prdt_tbl_entry_size(const AHCI_SG
*tbl
)
830 /* flags_size is zero-based */
831 return (le32_to_cpu(tbl
->flags_size
) & AHCI_PRDT_SIZE_MASK
) + 1;
834 static int ahci_populate_sglist(AHCIDevice
*ad
, QEMUSGList
*sglist
,
835 int64_t limit
, int32_t offset
)
837 AHCICmdHdr
*cmd
= ad
->cur_cmd
;
838 uint16_t opts
= le16_to_cpu(cmd
->opts
);
839 uint16_t prdtl
= le16_to_cpu(cmd
->prdtl
);
840 uint64_t cfis_addr
= le64_to_cpu(cmd
->tbl_addr
);
841 uint64_t prdt_addr
= cfis_addr
+ 0x80;
842 dma_addr_t prdt_len
= (prdtl
* sizeof(AHCI_SG
));
843 dma_addr_t real_prdt_len
= prdt_len
;
849 int64_t off_pos
= -1;
851 IDEBus
*bus
= &ad
->port
;
852 BusState
*qbus
= BUS(bus
);
855 * Note: AHCI PRDT can describe up to 256GiB. SATA/ATA only support
856 * transactions of up to 32MiB as of ATA8-ACS3 rev 1b, assuming a
857 * 512 byte sector size. We limit the PRDT in this implementation to
858 * a reasonably large 2GiB, which can accommodate the maximum transfer
859 * request for sector sizes up to 32K.
863 DPRINTF(ad
->port_no
, "no sg list given by guest: 0x%08x\n", opts
);
868 if (!(prdt
= dma_memory_map(ad
->hba
->as
, prdt_addr
, &prdt_len
,
869 DMA_DIRECTION_TO_DEVICE
))){
870 DPRINTF(ad
->port_no
, "map failed\n");
874 if (prdt_len
< real_prdt_len
) {
875 DPRINTF(ad
->port_no
, "mapped less than expected\n");
880 /* Get entries in the PRDT, init a qemu sglist accordingly */
882 AHCI_SG
*tbl
= (AHCI_SG
*)prdt
;
884 for (i
= 0; i
< prdtl
; i
++) {
885 tbl_entry_size
= prdt_tbl_entry_size(&tbl
[i
]);
886 if (offset
< (sum
+ tbl_entry_size
)) {
888 off_pos
= offset
- sum
;
891 sum
+= tbl_entry_size
;
893 if ((off_idx
== -1) || (off_pos
< 0) || (off_pos
> tbl_entry_size
)) {
894 DPRINTF(ad
->port_no
, "%s: Incorrect offset! "
895 "off_idx: %d, off_pos: %"PRId64
"\n",
896 __func__
, off_idx
, off_pos
);
901 qemu_sglist_init(sglist
, qbus
->parent
, (prdtl
- off_idx
),
903 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[off_idx
].addr
) + off_pos
,
904 MIN(prdt_tbl_entry_size(&tbl
[off_idx
]) - off_pos
,
907 for (i
= off_idx
+ 1; i
< prdtl
&& sglist
->size
< limit
; i
++) {
908 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[i
].addr
),
909 MIN(prdt_tbl_entry_size(&tbl
[i
]),
910 limit
- sglist
->size
));
911 if (sglist
->size
> INT32_MAX
) {
912 error_report("AHCI Physical Region Descriptor Table describes "
913 "more than 2 GiB.\n");
914 qemu_sglist_destroy(sglist
);
922 dma_memory_unmap(ad
->hba
->as
, prdt
, prdt_len
,
923 DMA_DIRECTION_TO_DEVICE
, prdt_len
);
927 static void ncq_err(NCQTransferState
*ncq_tfs
)
929 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
931 ide_state
->error
= ABRT_ERR
;
932 ide_state
->status
= READY_STAT
| ERR_STAT
;
933 ncq_tfs
->drive
->port_regs
.scr_err
|= (1 << ncq_tfs
->tag
);
936 static void ncq_cb(void *opaque
, int ret
)
938 NCQTransferState
*ncq_tfs
= (NCQTransferState
*)opaque
;
939 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
941 if (ret
== -ECANCELED
) {
944 /* Clear bit for this tag in SActive */
945 ncq_tfs
->drive
->port_regs
.scr_act
&= ~(1 << ncq_tfs
->tag
);
950 ide_state
->status
= READY_STAT
| SEEK_STAT
;
953 ahci_write_fis_sdb(ncq_tfs
->drive
->hba
, ncq_tfs
->drive
->port_no
,
954 (1 << ncq_tfs
->tag
));
956 DPRINTF(ncq_tfs
->drive
->port_no
, "NCQ transfer tag %d finished\n",
959 block_acct_done(blk_get_stats(ncq_tfs
->drive
->port
.ifs
[0].blk
),
961 qemu_sglist_destroy(&ncq_tfs
->sglist
);
965 static int is_ncq(uint8_t ata_cmd
)
967 /* Based on SATA 3.2 section 13.6.3.2 */
969 case READ_FPDMA_QUEUED
:
970 case WRITE_FPDMA_QUEUED
:
972 case RECEIVE_FPDMA_QUEUED
:
973 case SEND_FPDMA_QUEUED
:
980 static void process_ncq_command(AHCIState
*s
, int port
, uint8_t *cmd_fis
,
983 AHCIDevice
*ad
= &s
->dev
[port
];
984 IDEState
*ide_state
= &ad
->port
.ifs
[0];
985 NCQFrame
*ncq_fis
= (NCQFrame
*)cmd_fis
;
986 uint8_t tag
= ncq_fis
->tag
>> 3;
987 NCQTransferState
*ncq_tfs
= &ad
->ncq_tfs
[tag
];
991 /* error - already in use */
992 fprintf(stderr
, "%s: tag %d already used\n", __FUNCTION__
, tag
);
998 ncq_tfs
->slot
= slot
;
999 ncq_tfs
->lba
= ((uint64_t)ncq_fis
->lba5
<< 40) |
1000 ((uint64_t)ncq_fis
->lba4
<< 32) |
1001 ((uint64_t)ncq_fis
->lba3
<< 24) |
1002 ((uint64_t)ncq_fis
->lba2
<< 16) |
1003 ((uint64_t)ncq_fis
->lba1
<< 8) |
1004 (uint64_t)ncq_fis
->lba0
;
1007 /* Sanity-check the NCQ packet */
1009 DPRINTF(port
, "Warn: NCQ slot (%d) did not match the given tag (%d)\n",
1013 if (ncq_fis
->aux0
|| ncq_fis
->aux1
|| ncq_fis
->aux2
|| ncq_fis
->aux3
) {
1014 DPRINTF(port
, "Warn: Attempt to use NCQ auxiliary fields.\n");
1016 if (ncq_fis
->prio
|| ncq_fis
->icc
) {
1017 DPRINTF(port
, "Warn: Unsupported attempt to use PRIO/ICC fields\n");
1019 if (ncq_fis
->fua
& NCQ_FIS_FUA_MASK
) {
1020 DPRINTF(port
, "Warn: Unsupported attempt to use Force Unit Access\n");
1022 if (ncq_fis
->tag
& NCQ_FIS_RARC_MASK
) {
1023 DPRINTF(port
, "Warn: Unsupported attempt to use Rebuild Assist\n");
1026 ncq_tfs
->sector_count
= ((uint16_t)ncq_fis
->sector_count_high
<< 8) |
1027 ncq_fis
->sector_count_low
;
1028 size
= ncq_tfs
->sector_count
* 512;
1029 ahci_populate_sglist(ad
, &ncq_tfs
->sglist
, size
, 0);
1031 if (ncq_tfs
->sglist
.size
< size
) {
1032 error_report("ahci: PRDT length for NCQ command (0x%zx) "
1033 "is smaller than the requested size (0x%zx)",
1034 ncq_tfs
->sglist
.size
, size
);
1035 qemu_sglist_destroy(&ncq_tfs
->sglist
);
1037 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_OVERFLOW
);
1039 } else if (ncq_tfs
->sglist
.size
!= size
) {
1040 DPRINTF(port
, "Warn: PRDTL (0x%zx)"
1041 " does not match requested size (0x%zx)",
1042 ncq_tfs
->sglist
.size
, size
);
1045 DPRINTF(port
, "NCQ transfer LBA from %"PRId64
" to %"PRId64
", "
1046 "drive max %"PRId64
"\n",
1047 ncq_tfs
->lba
, ncq_tfs
->lba
+ ncq_tfs
->sector_count
- 1,
1048 ide_state
->nb_sectors
- 1);
1050 switch(ncq_fis
->command
) {
1051 case READ_FPDMA_QUEUED
:
1052 DPRINTF(port
, "NCQ reading %d sectors from LBA %"PRId64
", "
1054 ncq_tfs
->sector_count
, ncq_tfs
->lba
, ncq_tfs
->tag
);
1056 DPRINTF(port
, "tag %d aio read %"PRId64
"\n",
1057 ncq_tfs
->tag
, ncq_tfs
->lba
);
1059 dma_acct_start(ide_state
->blk
, &ncq_tfs
->acct
,
1060 &ncq_tfs
->sglist
, BLOCK_ACCT_READ
);
1061 ncq_tfs
->aiocb
= dma_blk_read(ide_state
->blk
,
1062 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
1065 case WRITE_FPDMA_QUEUED
:
1066 DPRINTF(port
, "NCQ writing %d sectors to LBA %"PRId64
", tag %d\n",
1067 ncq_tfs
->sector_count
, ncq_tfs
->lba
, ncq_tfs
->tag
);
1069 DPRINTF(port
, "tag %d aio write %"PRId64
"\n",
1070 ncq_tfs
->tag
, ncq_tfs
->lba
);
1072 dma_acct_start(ide_state
->blk
, &ncq_tfs
->acct
,
1073 &ncq_tfs
->sglist
, BLOCK_ACCT_WRITE
);
1074 ncq_tfs
->aiocb
= dma_blk_write(ide_state
->blk
,
1075 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
1079 if (is_ncq(cmd_fis
[2])) {
1081 "error: unsupported NCQ command (0x%02x) received\n",
1085 "error: tried to process non-NCQ command as NCQ\n");
1087 qemu_sglist_destroy(&ncq_tfs
->sglist
);
1092 static void handle_reg_h2d_fis(AHCIState
*s
, int port
,
1093 int slot
, uint8_t *cmd_fis
)
1095 IDEState
*ide_state
= &s
->dev
[port
].port
.ifs
[0];
1096 AHCICmdHdr
*cmd
= s
->dev
[port
].cur_cmd
;
1097 uint16_t opts
= le16_to_cpu(cmd
->opts
);
1099 if (cmd_fis
[1] & 0x0F) {
1100 DPRINTF(port
, "Port Multiplier not supported."
1101 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
1102 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
1106 if (cmd_fis
[1] & 0x70) {
1107 DPRINTF(port
, "Reserved flags set in H2D Register FIS."
1108 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
1109 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
1113 if (!(cmd_fis
[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
)) {
1114 switch (s
->dev
[port
].port_state
) {
1116 if (cmd_fis
[15] & ATA_SRST
) {
1117 s
->dev
[port
].port_state
= STATE_RESET
;
1121 if (!(cmd_fis
[15] & ATA_SRST
)) {
1122 ahci_reset_port(s
, port
);
1129 /* Check for NCQ command */
1130 if (is_ncq(cmd_fis
[2])) {
1131 process_ncq_command(s
, port
, cmd_fis
, slot
);
1135 /* Decompose the FIS:
1136 * AHCI does not interpret FIS packets, it only forwards them.
1137 * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
1138 * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
1140 * ATA4 describes sector number for LBA28/CHS commands.
1141 * ATA6 describes sector number for LBA48 commands.
1142 * ATA8 deprecates CHS fully, describing only LBA28/48.
1144 * We dutifully convert the FIS into IDE registers, and allow the
1145 * core layer to interpret them as needed. */
1146 ide_state
->feature
= cmd_fis
[3];
1147 ide_state
->sector
= cmd_fis
[4]; /* LBA 7:0 */
1148 ide_state
->lcyl
= cmd_fis
[5]; /* LBA 15:8 */
1149 ide_state
->hcyl
= cmd_fis
[6]; /* LBA 23:16 */
1150 ide_state
->select
= cmd_fis
[7]; /* LBA 27:24 (LBA28) */
1151 ide_state
->hob_sector
= cmd_fis
[8]; /* LBA 31:24 */
1152 ide_state
->hob_lcyl
= cmd_fis
[9]; /* LBA 39:32 */
1153 ide_state
->hob_hcyl
= cmd_fis
[10]; /* LBA 47:40 */
1154 ide_state
->hob_feature
= cmd_fis
[11];
1155 ide_state
->nsector
= (int64_t)((cmd_fis
[13] << 8) | cmd_fis
[12]);
1156 /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
1157 /* 15: Only valid when UPDATE_COMMAND not set. */
1159 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
1160 * table to ide_state->io_buffer */
1161 if (opts
& AHCI_CMD_ATAPI
) {
1162 memcpy(ide_state
->io_buffer
, &cmd_fis
[AHCI_COMMAND_TABLE_ACMD
], 0x10);
1163 debug_print_fis(ide_state
->io_buffer
, 0x10);
1164 s
->dev
[port
].done_atapi_packet
= false;
1165 /* XXX send PIO setup FIS */
1168 ide_state
->error
= 0;
1170 /* Reset transferred byte counter */
1173 /* We're ready to process the command in FIS byte 2. */
1174 ide_exec_cmd(&s
->dev
[port
].port
, cmd_fis
[2]);
1177 static int handle_cmd(AHCIState
*s
, int port
, int slot
)
1179 IDEState
*ide_state
;
1185 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1186 /* Engine currently busy, try again later */
1187 DPRINTF(port
, "engine busy\n");
1191 if (!s
->dev
[port
].lst
) {
1192 DPRINTF(port
, "error: lst not given but cmd handled");
1195 cmd
= &((AHCICmdHdr
*)s
->dev
[port
].lst
)[slot
];
1196 /* remember current slot handle for later */
1197 s
->dev
[port
].cur_cmd
= cmd
;
1199 /* The device we are working for */
1200 ide_state
= &s
->dev
[port
].port
.ifs
[0];
1201 if (!ide_state
->blk
) {
1202 DPRINTF(port
, "error: guest accessed unused port");
1206 tbl_addr
= le64_to_cpu(cmd
->tbl_addr
);
1208 cmd_fis
= dma_memory_map(s
->as
, tbl_addr
, &cmd_len
,
1209 DMA_DIRECTION_FROM_DEVICE
);
1211 DPRINTF(port
, "error: guest passed us an invalid cmd fis\n");
1213 } else if (cmd_len
!= 0x80) {
1214 ahci_trigger_irq(s
, &s
->dev
[port
], PORT_IRQ_HBUS_ERR
);
1215 DPRINTF(port
, "error: dma_memory_map failed: "
1216 "(len(%02"PRIx64
") != 0x80)\n",
1220 debug_print_fis(cmd_fis
, 0x80);
1222 switch (cmd_fis
[0]) {
1223 case SATA_FIS_TYPE_REGISTER_H2D
:
1224 handle_reg_h2d_fis(s
, port
, slot
, cmd_fis
);
1227 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
1228 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
1234 dma_memory_unmap(s
->as
, cmd_fis
, cmd_len
, DMA_DIRECTION_FROM_DEVICE
,
1237 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1238 /* async command, complete later */
1239 s
->dev
[port
].busy_slot
= slot
;
1243 /* done handling the command */
1247 /* DMA dev <-> ram */
1248 static void ahci_start_transfer(IDEDMA
*dma
)
1250 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1251 IDEState
*s
= &ad
->port
.ifs
[0];
1252 uint32_t size
= (uint32_t)(s
->data_end
- s
->data_ptr
);
1253 /* write == ram -> device */
1254 uint16_t opts
= le16_to_cpu(ad
->cur_cmd
->opts
);
1255 int is_write
= opts
& AHCI_CMD_WRITE
;
1256 int is_atapi
= opts
& AHCI_CMD_ATAPI
;
1259 if (is_atapi
&& !ad
->done_atapi_packet
) {
1260 /* already prepopulated iobuffer */
1261 ad
->done_atapi_packet
= true;
1266 if (ahci_dma_prepare_buf(dma
, size
)) {
1270 DPRINTF(ad
->port_no
, "%sing %d bytes on %s w/%s sglist\n",
1271 is_write
? "writ" : "read", size
, is_atapi
? "atapi" : "ata",
1272 has_sglist
? "" : "o");
1274 if (has_sglist
&& size
) {
1276 dma_buf_write(s
->data_ptr
, size
, &s
->sg
);
1278 dma_buf_read(s
->data_ptr
, size
, &s
->sg
);
1283 /* declare that we processed everything */
1284 s
->data_ptr
= s
->data_end
;
1286 /* Update number of transferred bytes, destroy sglist */
1287 ahci_commit_buf(dma
, size
);
1289 s
->end_transfer_func(s
);
1291 if (!(s
->status
& DRQ_STAT
)) {
1292 /* done with PIO send/receive */
1293 ahci_write_fis_pio(ad
, le32_to_cpu(ad
->cur_cmd
->status
));
1297 static void ahci_start_dma(IDEDMA
*dma
, IDEState
*s
,
1298 BlockCompletionFunc
*dma_cb
)
1300 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1301 DPRINTF(ad
->port_no
, "\n");
1302 s
->io_buffer_offset
= 0;
1306 static void ahci_restart_dma(IDEDMA
*dma
)
1308 /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
1312 * Called in DMA R/W chains to read the PRDT, utilizing ahci_populate_sglist.
1313 * Not currently invoked by PIO R/W chains,
1314 * which invoke ahci_populate_sglist via ahci_start_transfer.
1316 static int32_t ahci_dma_prepare_buf(IDEDMA
*dma
, int32_t limit
)
1318 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1319 IDEState
*s
= &ad
->port
.ifs
[0];
1321 if (ahci_populate_sglist(ad
, &s
->sg
, limit
, s
->io_buffer_offset
) == -1) {
1322 DPRINTF(ad
->port_no
, "ahci_dma_prepare_buf failed.\n");
1325 s
->io_buffer_size
= s
->sg
.size
;
1327 DPRINTF(ad
->port_no
, "len=%#x\n", s
->io_buffer_size
);
1328 return s
->io_buffer_size
;
1332 * Destroys the scatter-gather list,
1333 * and updates the command header with a bytes-read value.
1334 * called explicitly via ahci_dma_rw_buf (ATAPI DMA),
1335 * and ahci_start_transfer (PIO R/W),
1336 * and called via callback from ide_dma_cb for DMA R/W paths.
1338 static void ahci_commit_buf(IDEDMA
*dma
, uint32_t tx_bytes
)
1340 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1341 IDEState
*s
= &ad
->port
.ifs
[0];
1343 tx_bytes
+= le32_to_cpu(ad
->cur_cmd
->status
);
1344 ad
->cur_cmd
->status
= cpu_to_le32(tx_bytes
);
1346 qemu_sglist_destroy(&s
->sg
);
1349 static int ahci_dma_rw_buf(IDEDMA
*dma
, int is_write
)
1351 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1352 IDEState
*s
= &ad
->port
.ifs
[0];
1353 uint8_t *p
= s
->io_buffer
+ s
->io_buffer_index
;
1354 int l
= s
->io_buffer_size
- s
->io_buffer_index
;
1356 if (ahci_populate_sglist(ad
, &s
->sg
, l
, s
->io_buffer_offset
)) {
1361 dma_buf_read(p
, l
, &s
->sg
);
1363 dma_buf_write(p
, l
, &s
->sg
);
1366 /* free sglist, update byte count */
1367 ahci_commit_buf(dma
, l
);
1369 s
->io_buffer_index
+= l
;
1370 s
->io_buffer_offset
+= l
;
1372 DPRINTF(ad
->port_no
, "len=%#x\n", l
);
1377 static void ahci_cmd_done(IDEDMA
*dma
)
1379 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1381 DPRINTF(ad
->port_no
, "cmd done\n");
1383 /* update d2h status */
1384 ahci_write_fis_d2h(ad
, NULL
);
1386 if (!ad
->check_bh
) {
1387 /* maybe we still have something to process, check later */
1388 ad
->check_bh
= qemu_bh_new(ahci_check_cmd_bh
, ad
);
1389 qemu_bh_schedule(ad
->check_bh
);
1393 static void ahci_irq_set(void *opaque
, int n
, int level
)
1397 static const IDEDMAOps ahci_dma_ops
= {
1398 .start_dma
= ahci_start_dma
,
1399 .restart_dma
= ahci_restart_dma
,
1400 .start_transfer
= ahci_start_transfer
,
1401 .prepare_buf
= ahci_dma_prepare_buf
,
1402 .commit_buf
= ahci_commit_buf
,
1403 .rw_buf
= ahci_dma_rw_buf
,
1404 .cmd_done
= ahci_cmd_done
,
1407 void ahci_init(AHCIState
*s
, DeviceState
*qdev
, AddressSpace
*as
, int ports
)
1414 s
->dev
= g_new0(AHCIDevice
, ports
);
1416 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
1417 memory_region_init_io(&s
->mem
, OBJECT(qdev
), &ahci_mem_ops
, s
,
1418 "ahci", AHCI_MEM_BAR_SIZE
);
1419 memory_region_init_io(&s
->idp
, OBJECT(qdev
), &ahci_idp_ops
, s
,
1422 irqs
= qemu_allocate_irqs(ahci_irq_set
, s
, s
->ports
);
1424 for (i
= 0; i
< s
->ports
; i
++) {
1425 AHCIDevice
*ad
= &s
->dev
[i
];
1427 ide_bus_new(&ad
->port
, sizeof(ad
->port
), qdev
, i
, 1);
1428 ide_init2(&ad
->port
, irqs
[i
]);
1432 ad
->port
.dma
= &ad
->dma
;
1433 ad
->port
.dma
->ops
= &ahci_dma_ops
;
1434 ide_register_restart_cb(&ad
->port
);
1438 void ahci_uninit(AHCIState
*s
)
1443 void ahci_reset(AHCIState
*s
)
1448 s
->control_regs
.irqstatus
= 0;
1450 * The implementation of this bit is dependent upon the value of the
1451 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
1452 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
1453 * read-only and shall have a reset value of '1'.
1455 * We set HOST_CAP_AHCI so we must enable AHCI at reset.
1457 s
->control_regs
.ghc
= HOST_CTL_AHCI_EN
;
1459 for (i
= 0; i
< s
->ports
; i
++) {
1460 pr
= &s
->dev
[i
].port_regs
;
1464 pr
->cmd
= PORT_CMD_SPIN_UP
| PORT_CMD_POWER_ON
;
1465 ahci_reset_port(s
, i
);
1469 static const VMStateDescription vmstate_ahci_device
= {
1470 .name
= "ahci port",
1472 .fields
= (VMStateField
[]) {
1473 VMSTATE_IDE_BUS(port
, AHCIDevice
),
1474 VMSTATE_IDE_DRIVE(port
.ifs
[0], AHCIDevice
),
1475 VMSTATE_UINT32(port_state
, AHCIDevice
),
1476 VMSTATE_UINT32(finished
, AHCIDevice
),
1477 VMSTATE_UINT32(port_regs
.lst_addr
, AHCIDevice
),
1478 VMSTATE_UINT32(port_regs
.lst_addr_hi
, AHCIDevice
),
1479 VMSTATE_UINT32(port_regs
.fis_addr
, AHCIDevice
),
1480 VMSTATE_UINT32(port_regs
.fis_addr_hi
, AHCIDevice
),
1481 VMSTATE_UINT32(port_regs
.irq_stat
, AHCIDevice
),
1482 VMSTATE_UINT32(port_regs
.irq_mask
, AHCIDevice
),
1483 VMSTATE_UINT32(port_regs
.cmd
, AHCIDevice
),
1484 VMSTATE_UINT32(port_regs
.tfdata
, AHCIDevice
),
1485 VMSTATE_UINT32(port_regs
.sig
, AHCIDevice
),
1486 VMSTATE_UINT32(port_regs
.scr_stat
, AHCIDevice
),
1487 VMSTATE_UINT32(port_regs
.scr_ctl
, AHCIDevice
),
1488 VMSTATE_UINT32(port_regs
.scr_err
, AHCIDevice
),
1489 VMSTATE_UINT32(port_regs
.scr_act
, AHCIDevice
),
1490 VMSTATE_UINT32(port_regs
.cmd_issue
, AHCIDevice
),
1491 VMSTATE_BOOL(done_atapi_packet
, AHCIDevice
),
1492 VMSTATE_INT32(busy_slot
, AHCIDevice
),
1493 VMSTATE_BOOL(init_d2h_sent
, AHCIDevice
),
1494 VMSTATE_END_OF_LIST()
1498 static int ahci_state_post_load(void *opaque
, int version_id
)
1501 struct AHCIDevice
*ad
;
1502 AHCIState
*s
= opaque
;
1504 for (i
= 0; i
< s
->ports
; i
++) {
1507 /* Only remap the CLB address if appropriate, disallowing a state
1508 * transition from 'on' to 'off' it should be consistent here. */
1509 if (ahci_cond_start_engines(ad
, false) != 0) {
1514 * If an error is present, ad->busy_slot will be valid and not -1.
1515 * In this case, an operation is waiting to resume and will re-check
1516 * for additional AHCI commands to execute upon completion.
1518 * In the case where no error was present, busy_slot will be -1,
1519 * and we should check to see if there are additional commands waiting.
1521 if (ad
->busy_slot
== -1) {
1524 /* We are in the middle of a command, and may need to access
1525 * the command header in guest memory again. */
1526 if (ad
->busy_slot
< 0 || ad
->busy_slot
>= AHCI_MAX_CMDS
) {
1529 ad
->cur_cmd
= &((AHCICmdHdr
*)ad
->lst
)[ad
->busy_slot
];
1536 const VMStateDescription vmstate_ahci
= {
1539 .post_load
= ahci_state_post_load
,
1540 .fields
= (VMStateField
[]) {
1541 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev
, AHCIState
, ports
,
1542 vmstate_ahci_device
, AHCIDevice
),
1543 VMSTATE_UINT32(control_regs
.cap
, AHCIState
),
1544 VMSTATE_UINT32(control_regs
.ghc
, AHCIState
),
1545 VMSTATE_UINT32(control_regs
.irqstatus
, AHCIState
),
1546 VMSTATE_UINT32(control_regs
.impl
, AHCIState
),
1547 VMSTATE_UINT32(control_regs
.version
, AHCIState
),
1548 VMSTATE_UINT32(idp_index
, AHCIState
),
1549 VMSTATE_INT32_EQUAL(ports
, AHCIState
),
1550 VMSTATE_END_OF_LIST()
1554 #define TYPE_SYSBUS_AHCI "sysbus-ahci"
1555 #define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI)
1557 typedef struct SysbusAHCIState
{
1559 SysBusDevice parent_obj
;
1566 static const VMStateDescription vmstate_sysbus_ahci
= {
1567 .name
= "sysbus-ahci",
1568 .fields
= (VMStateField
[]) {
1569 VMSTATE_AHCI(ahci
, SysbusAHCIState
),
1570 VMSTATE_END_OF_LIST()
1574 static void sysbus_ahci_reset(DeviceState
*dev
)
1576 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1578 ahci_reset(&s
->ahci
);
1581 static void sysbus_ahci_realize(DeviceState
*dev
, Error
**errp
)
1583 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1584 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1586 ahci_init(&s
->ahci
, dev
, &address_space_memory
, s
->num_ports
);
1588 sysbus_init_mmio(sbd
, &s
->ahci
.mem
);
1589 sysbus_init_irq(sbd
, &s
->ahci
.irq
);
1592 static Property sysbus_ahci_properties
[] = {
1593 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState
, num_ports
, 1),
1594 DEFINE_PROP_END_OF_LIST(),
1597 static void sysbus_ahci_class_init(ObjectClass
*klass
, void *data
)
1599 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1601 dc
->realize
= sysbus_ahci_realize
;
1602 dc
->vmsd
= &vmstate_sysbus_ahci
;
1603 dc
->props
= sysbus_ahci_properties
;
1604 dc
->reset
= sysbus_ahci_reset
;
1605 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1608 static const TypeInfo sysbus_ahci_info
= {
1609 .name
= TYPE_SYSBUS_AHCI
,
1610 .parent
= TYPE_SYS_BUS_DEVICE
,
1611 .instance_size
= sizeof(SysbusAHCIState
),
1612 .class_init
= sysbus_ahci_class_init
,
1615 static void sysbus_ahci_register_types(void)
1617 type_register_static(&sysbus_ahci_info
);
1620 type_init(sysbus_ahci_register_types
)
1622 void ahci_ide_create_devs(PCIDevice
*dev
, DriveInfo
**hd
)
1624 AHCIPCIState
*d
= ICH_AHCI(dev
);
1625 AHCIState
*ahci
= &d
->ahci
;
1628 for (i
= 0; i
< ahci
->ports
; i
++) {
1629 if (hd
[i
] == NULL
) {
1632 ide_create_drive(&ahci
->dev
[i
].port
, 0, hd
[i
]);