4 * Copyright (c) 2010 qiaochong@loongson.cn
5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
26 #include <hw/pci/msi.h>
27 #include <hw/i386/pc.h>
28 #include <hw/pci/pci.h>
30 #include "qemu/error-report.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/dma.h"
34 #include <hw/ide/pci.h>
35 #include <hw/ide/ahci.h>
39 #define DPRINTF(port, fmt, ...) \
42 fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \
43 fprintf(stderr, fmt, ## __VA_ARGS__); \
47 static void check_cmd(AHCIState
*s
, int port
);
48 static int handle_cmd(AHCIState
*s
, int port
, uint8_t slot
);
49 static void ahci_reset_port(AHCIState
*s
, int port
);
50 static bool ahci_write_fis_d2h(AHCIDevice
*ad
);
51 static void ahci_init_d2h(AHCIDevice
*ad
);
52 static int ahci_dma_prepare_buf(IDEDMA
*dma
, int32_t limit
);
53 static bool ahci_map_clb_address(AHCIDevice
*ad
);
54 static bool ahci_map_fis_address(AHCIDevice
*ad
);
55 static void ahci_unmap_clb_address(AHCIDevice
*ad
);
56 static void ahci_unmap_fis_address(AHCIDevice
*ad
);
59 static uint32_t ahci_port_read(AHCIState
*s
, int port
, int offset
)
63 pr
= &s
->dev
[port
].port_regs
;
69 case PORT_LST_ADDR_HI
:
70 val
= pr
->lst_addr_hi
;
75 case PORT_FIS_ADDR_HI
:
76 val
= pr
->fis_addr_hi
;
94 if (s
->dev
[port
].port
.ifs
[0].blk
) {
95 val
= SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP
|
96 SATA_SCR_SSTATUS_SPD_GEN1
| SATA_SCR_SSTATUS_IPM_ACTIVE
;
98 val
= SATA_SCR_SSTATUS_DET_NODEV
;
117 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
122 static void ahci_irq_raise(AHCIState
*s
, AHCIDevice
*dev
)
124 DeviceState
*dev_state
= s
->container
;
125 PCIDevice
*pci_dev
= (PCIDevice
*) object_dynamic_cast(OBJECT(dev_state
),
128 DPRINTF(0, "raise irq\n");
130 if (pci_dev
&& msi_enabled(pci_dev
)) {
131 msi_notify(pci_dev
, 0);
133 qemu_irq_raise(s
->irq
);
137 static void ahci_irq_lower(AHCIState
*s
, AHCIDevice
*dev
)
139 DeviceState
*dev_state
= s
->container
;
140 PCIDevice
*pci_dev
= (PCIDevice
*) object_dynamic_cast(OBJECT(dev_state
),
143 DPRINTF(0, "lower irq\n");
145 if (!pci_dev
|| !msi_enabled(pci_dev
)) {
146 qemu_irq_lower(s
->irq
);
150 static void ahci_check_irq(AHCIState
*s
)
154 DPRINTF(-1, "check irq %#x\n", s
->control_regs
.irqstatus
);
156 s
->control_regs
.irqstatus
= 0;
157 for (i
= 0; i
< s
->ports
; i
++) {
158 AHCIPortRegs
*pr
= &s
->dev
[i
].port_regs
;
159 if (pr
->irq_stat
& pr
->irq_mask
) {
160 s
->control_regs
.irqstatus
|= (1 << i
);
164 if (s
->control_regs
.irqstatus
&&
165 (s
->control_regs
.ghc
& HOST_CTL_IRQ_EN
)) {
166 ahci_irq_raise(s
, NULL
);
168 ahci_irq_lower(s
, NULL
);
172 static void ahci_trigger_irq(AHCIState
*s
, AHCIDevice
*d
,
175 DPRINTF(d
->port_no
, "trigger irq %#x -> %x\n",
176 irq_type
, d
->port_regs
.irq_mask
& irq_type
);
178 d
->port_regs
.irq_stat
|= irq_type
;
182 static void map_page(AddressSpace
*as
, uint8_t **ptr
, uint64_t addr
,
188 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
191 *ptr
= dma_memory_map(as
, addr
, &len
, DMA_DIRECTION_FROM_DEVICE
);
193 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
199 * Check the cmd register to see if we should start or stop
200 * the DMA or FIS RX engines.
202 * @ad: Device to dis/engage.
204 * @return 0 on success, -1 on error.
206 static int ahci_cond_start_engines(AHCIDevice
*ad
)
208 AHCIPortRegs
*pr
= &ad
->port_regs
;
209 bool cmd_start
= pr
->cmd
& PORT_CMD_START
;
210 bool cmd_on
= pr
->cmd
& PORT_CMD_LIST_ON
;
211 bool fis_start
= pr
->cmd
& PORT_CMD_FIS_RX
;
212 bool fis_on
= pr
->cmd
& PORT_CMD_FIS_ON
;
214 if (cmd_start
&& !cmd_on
) {
215 if (!ahci_map_clb_address(ad
)) {
216 pr
->cmd
&= ~PORT_CMD_START
;
217 error_report("AHCI: Failed to start DMA engine: "
218 "bad command list buffer address");
221 } else if (!cmd_start
&& cmd_on
) {
222 ahci_unmap_clb_address(ad
);
225 if (fis_start
&& !fis_on
) {
226 if (!ahci_map_fis_address(ad
)) {
227 pr
->cmd
&= ~PORT_CMD_FIS_RX
;
228 error_report("AHCI: Failed to start FIS receive engine: "
229 "bad FIS receive buffer address");
232 } else if (!fis_start
&& fis_on
) {
233 ahci_unmap_fis_address(ad
);
239 static void ahci_port_write(AHCIState
*s
, int port
, int offset
, uint32_t val
)
241 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
243 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
248 case PORT_LST_ADDR_HI
:
249 pr
->lst_addr_hi
= val
;
254 case PORT_FIS_ADDR_HI
:
255 pr
->fis_addr_hi
= val
;
258 pr
->irq_stat
&= ~val
;
262 pr
->irq_mask
= val
& 0xfdc000ff;
266 /* Block any Read-only fields from being set;
267 * including LIST_ON and FIS_ON.
268 * The spec requires to set ICC bits to zero after the ICC change
269 * is done. We don't support ICC state changes, therefore always
270 * force the ICC bits to zero.
272 pr
->cmd
= (pr
->cmd
& PORT_CMD_RO_MASK
) |
273 (val
& ~(PORT_CMD_RO_MASK
|PORT_CMD_ICC_MASK
));
275 /* Check FIS RX and CLB engines */
276 ahci_cond_start_engines(&s
->dev
[port
]);
278 /* XXX usually the FIS would be pending on the bus here and
279 issuing deferred until the OS enables FIS receival.
280 Instead, we only submit it once - which works in most
281 cases, but is a hack. */
282 if ((pr
->cmd
& PORT_CMD_FIS_ON
) &&
283 !s
->dev
[port
].init_d2h_sent
) {
284 ahci_init_d2h(&s
->dev
[port
]);
299 if (((pr
->scr_ctl
& AHCI_SCR_SCTL_DET
) == 1) &&
300 ((val
& AHCI_SCR_SCTL_DET
) == 0)) {
301 ahci_reset_port(s
, port
);
313 pr
->cmd_issue
|= val
;
321 static uint64_t ahci_mem_read_32(void *opaque
, hwaddr addr
)
323 AHCIState
*s
= opaque
;
326 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
329 val
= s
->control_regs
.cap
;
332 val
= s
->control_regs
.ghc
;
335 val
= s
->control_regs
.irqstatus
;
337 case HOST_PORTS_IMPL
:
338 val
= s
->control_regs
.impl
;
341 val
= s
->control_regs
.version
;
345 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr
, val
);
346 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
347 (addr
< (AHCI_PORT_REGS_START_ADDR
+
348 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
349 val
= ahci_port_read(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
350 addr
& AHCI_PORT_ADDR_OFFSET_MASK
);
358 * AHCI 1.3 section 3 ("HBA Memory Registers")
359 * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads.
360 * Caller is responsible for masking unwanted higher order bytes.
362 static uint64_t ahci_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
364 hwaddr aligned
= addr
& ~0x3;
365 int ofst
= addr
- aligned
;
366 uint64_t lo
= ahci_mem_read_32(opaque
, aligned
);
370 /* if < 8 byte read does not cross 4 byte boundary */
371 if (ofst
+ size
<= 4) {
372 val
= lo
>> (ofst
* 8);
374 g_assert_cmpint(size
, >, 1);
376 /* If the 64bit read is unaligned, we will produce undefined
377 * results. AHCI does not support unaligned 64bit reads. */
378 hi
= ahci_mem_read_32(opaque
, aligned
+ 4);
379 val
= (hi
<< 32 | lo
) >> (ofst
* 8);
382 DPRINTF(-1, "addr=0x%" HWADDR_PRIx
" val=0x%" PRIx64
", size=%d\n",
388 static void ahci_mem_write(void *opaque
, hwaddr addr
,
389 uint64_t val
, unsigned size
)
391 AHCIState
*s
= opaque
;
393 DPRINTF(-1, "addr=0x%" HWADDR_PRIx
" val=0x%" PRIx64
", size=%d\n",
396 /* Only aligned reads are allowed on AHCI */
398 fprintf(stderr
, "ahci: Mis-aligned write to addr 0x"
399 TARGET_FMT_plx
"\n", addr
);
403 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
404 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64
"\n", (unsigned) addr
, val
);
407 case HOST_CAP
: /* R/WO, RO */
408 /* FIXME handle R/WO */
410 case HOST_CTL
: /* R/W */
411 if (val
& HOST_CTL_RESET
) {
412 DPRINTF(-1, "HBA Reset\n");
415 s
->control_regs
.ghc
= (val
& 0x3) | HOST_CTL_AHCI_EN
;
419 case HOST_IRQ_STAT
: /* R/WC, RO */
420 s
->control_regs
.irqstatus
&= ~val
;
423 case HOST_PORTS_IMPL
: /* R/WO, RO */
424 /* FIXME handle R/WO */
426 case HOST_VERSION
: /* RO */
427 /* FIXME report write? */
430 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr
);
432 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
433 (addr
< (AHCI_PORT_REGS_START_ADDR
+
434 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
435 ahci_port_write(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
436 addr
& AHCI_PORT_ADDR_OFFSET_MASK
, val
);
441 static const MemoryRegionOps ahci_mem_ops
= {
442 .read
= ahci_mem_read
,
443 .write
= ahci_mem_write
,
444 .endianness
= DEVICE_LITTLE_ENDIAN
,
447 static uint64_t ahci_idp_read(void *opaque
, hwaddr addr
,
450 AHCIState
*s
= opaque
;
452 if (addr
== s
->idp_offset
) {
455 } else if (addr
== s
->idp_offset
+ 4) {
456 /* data register - do memory read at location selected by index */
457 return ahci_mem_read(opaque
, s
->idp_index
, size
);
463 static void ahci_idp_write(void *opaque
, hwaddr addr
,
464 uint64_t val
, unsigned size
)
466 AHCIState
*s
= opaque
;
468 if (addr
== s
->idp_offset
) {
469 /* index register - mask off reserved bits */
470 s
->idp_index
= (uint32_t)val
& ((AHCI_MEM_BAR_SIZE
- 1) & ~3);
471 } else if (addr
== s
->idp_offset
+ 4) {
472 /* data register - do memory write at location selected by index */
473 ahci_mem_write(opaque
, s
->idp_index
, val
, size
);
477 static const MemoryRegionOps ahci_idp_ops
= {
478 .read
= ahci_idp_read
,
479 .write
= ahci_idp_write
,
480 .endianness
= DEVICE_LITTLE_ENDIAN
,
484 static void ahci_reg_init(AHCIState
*s
)
488 s
->control_regs
.cap
= (s
->ports
- 1) |
489 (AHCI_NUM_COMMAND_SLOTS
<< 8) |
490 (AHCI_SUPPORTED_SPEED_GEN1
<< AHCI_SUPPORTED_SPEED
) |
491 HOST_CAP_NCQ
| HOST_CAP_AHCI
;
493 s
->control_regs
.impl
= (1 << s
->ports
) - 1;
495 s
->control_regs
.version
= AHCI_VERSION_1_0
;
497 for (i
= 0; i
< s
->ports
; i
++) {
498 s
->dev
[i
].port_state
= STATE_RUN
;
502 static void check_cmd(AHCIState
*s
, int port
)
504 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
507 if ((pr
->cmd
& PORT_CMD_START
) && pr
->cmd_issue
) {
508 for (slot
= 0; (slot
< 32) && pr
->cmd_issue
; slot
++) {
509 if ((pr
->cmd_issue
& (1U << slot
)) &&
510 !handle_cmd(s
, port
, slot
)) {
511 pr
->cmd_issue
&= ~(1U << slot
);
517 static void ahci_check_cmd_bh(void *opaque
)
519 AHCIDevice
*ad
= opaque
;
521 qemu_bh_delete(ad
->check_bh
);
524 if ((ad
->busy_slot
!= -1) &&
525 !(ad
->port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
))) {
527 ad
->port_regs
.cmd_issue
&= ~(1 << ad
->busy_slot
);
531 check_cmd(ad
->hba
, ad
->port_no
);
534 static void ahci_init_d2h(AHCIDevice
*ad
)
536 IDEState
*ide_state
= &ad
->port
.ifs
[0];
537 AHCIPortRegs
*pr
= &ad
->port_regs
;
539 if (ad
->init_d2h_sent
) {
543 if (ahci_write_fis_d2h(ad
)) {
544 ad
->init_d2h_sent
= true;
545 /* We're emulating receiving the first Reg H2D Fis from the device;
546 * Update the SIG register, but otherwise proceed as normal. */
547 pr
->sig
= ((uint32_t)ide_state
->hcyl
<< 24) |
548 (ide_state
->lcyl
<< 16) |
549 (ide_state
->sector
<< 8) |
550 (ide_state
->nsector
& 0xFF);
554 static void ahci_set_signature(AHCIDevice
*ad
, uint32_t sig
)
556 IDEState
*s
= &ad
->port
.ifs
[0];
557 s
->hcyl
= sig
>> 24 & 0xFF;
558 s
->lcyl
= sig
>> 16 & 0xFF;
559 s
->sector
= sig
>> 8 & 0xFF;
560 s
->nsector
= sig
& 0xFF;
562 DPRINTF(ad
->port_no
, "set hcyl:lcyl:sect:nsect = 0x%08x\n", sig
);
565 static void ahci_reset_port(AHCIState
*s
, int port
)
567 AHCIDevice
*d
= &s
->dev
[port
];
568 AHCIPortRegs
*pr
= &d
->port_regs
;
569 IDEState
*ide_state
= &d
->port
.ifs
[0];
572 DPRINTF(port
, "reset port\n");
574 ide_bus_reset(&d
->port
);
575 ide_state
->ncq_queues
= AHCI_MAX_CMDS
;
581 pr
->sig
= 0xFFFFFFFF;
583 d
->init_d2h_sent
= false;
585 ide_state
= &s
->dev
[port
].port
.ifs
[0];
586 if (!ide_state
->blk
) {
590 /* reset ncq queue */
591 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
592 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[i
];
593 ncq_tfs
->halt
= false;
594 if (!ncq_tfs
->used
) {
598 if (ncq_tfs
->aiocb
) {
599 blk_aio_cancel(ncq_tfs
->aiocb
);
600 ncq_tfs
->aiocb
= NULL
;
603 /* Maybe we just finished the request thanks to blk_aio_cancel() */
604 if (!ncq_tfs
->used
) {
608 qemu_sglist_destroy(&ncq_tfs
->sglist
);
612 s
->dev
[port
].port_state
= STATE_RUN
;
613 if (ide_state
->drive_kind
== IDE_CD
) {
614 ahci_set_signature(d
, SATA_SIGNATURE_CDROM
);\
615 ide_state
->status
= SEEK_STAT
| WRERR_STAT
| READY_STAT
;
617 ahci_set_signature(d
, SATA_SIGNATURE_DISK
);
618 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
621 ide_state
->error
= 1;
625 static void debug_print_fis(uint8_t *fis
, int cmd_len
)
630 fprintf(stderr
, "fis:");
631 for (i
= 0; i
< cmd_len
; i
++) {
632 if ((i
& 0xf) == 0) {
633 fprintf(stderr
, "\n%02x:",i
);
635 fprintf(stderr
, "%02x ",fis
[i
]);
637 fprintf(stderr
, "\n");
641 static bool ahci_map_fis_address(AHCIDevice
*ad
)
643 AHCIPortRegs
*pr
= &ad
->port_regs
;
644 map_page(ad
->hba
->as
, &ad
->res_fis
,
645 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
646 if (ad
->res_fis
!= NULL
) {
647 pr
->cmd
|= PORT_CMD_FIS_ON
;
651 pr
->cmd
&= ~PORT_CMD_FIS_ON
;
655 static void ahci_unmap_fis_address(AHCIDevice
*ad
)
657 if (ad
->res_fis
== NULL
) {
658 DPRINTF(ad
->port_no
, "Attempt to unmap NULL FIS address\n");
661 ad
->port_regs
.cmd
&= ~PORT_CMD_FIS_ON
;
662 dma_memory_unmap(ad
->hba
->as
, ad
->res_fis
, 256,
663 DMA_DIRECTION_FROM_DEVICE
, 256);
667 static bool ahci_map_clb_address(AHCIDevice
*ad
)
669 AHCIPortRegs
*pr
= &ad
->port_regs
;
671 map_page(ad
->hba
->as
, &ad
->lst
,
672 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
673 if (ad
->lst
!= NULL
) {
674 pr
->cmd
|= PORT_CMD_LIST_ON
;
678 pr
->cmd
&= ~PORT_CMD_LIST_ON
;
682 static void ahci_unmap_clb_address(AHCIDevice
*ad
)
684 if (ad
->lst
== NULL
) {
685 DPRINTF(ad
->port_no
, "Attempt to unmap NULL CLB address\n");
688 ad
->port_regs
.cmd
&= ~PORT_CMD_LIST_ON
;
689 dma_memory_unmap(ad
->hba
->as
, ad
->lst
, 1024,
690 DMA_DIRECTION_FROM_DEVICE
, 1024);
694 static void ahci_write_fis_sdb(AHCIState
*s
, NCQTransferState
*ncq_tfs
)
696 AHCIDevice
*ad
= ncq_tfs
->drive
;
697 AHCIPortRegs
*pr
= &ad
->port_regs
;
702 !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
706 sdb_fis
= (SDBFIS
*)&ad
->res_fis
[RES_FIS_SDBFIS
];
707 ide_state
= &ad
->port
.ifs
[0];
709 sdb_fis
->type
= SATA_FIS_TYPE_SDB
;
710 /* Interrupt pending & Notification bit */
711 sdb_fis
->flags
= 0x40; /* Interrupt bit, always 1 for NCQ */
712 sdb_fis
->status
= ide_state
->status
& 0x77;
713 sdb_fis
->error
= ide_state
->error
;
714 /* update SAct field in SDB_FIS */
715 sdb_fis
->payload
= cpu_to_le32(ad
->finished
);
717 /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
718 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
719 (ad
->port
.ifs
[0].status
& 0x77) |
721 pr
->scr_act
&= ~ad
->finished
;
724 /* Trigger IRQ if interrupt bit is set (which currently, it always is) */
725 if (sdb_fis
->flags
& 0x40) {
726 ahci_trigger_irq(s
, ad
, PORT_IRQ_SDB_FIS
);
730 static void ahci_write_fis_pio(AHCIDevice
*ad
, uint16_t len
)
732 AHCIPortRegs
*pr
= &ad
->port_regs
;
734 IDEState
*s
= &ad
->port
.ifs
[0];
736 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
740 pio_fis
= &ad
->res_fis
[RES_FIS_PSFIS
];
742 pio_fis
[0] = SATA_FIS_TYPE_PIO_SETUP
;
743 pio_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
744 pio_fis
[2] = s
->status
;
745 pio_fis
[3] = s
->error
;
747 pio_fis
[4] = s
->sector
;
748 pio_fis
[5] = s
->lcyl
;
749 pio_fis
[6] = s
->hcyl
;
750 pio_fis
[7] = s
->select
;
751 pio_fis
[8] = s
->hob_sector
;
752 pio_fis
[9] = s
->hob_lcyl
;
753 pio_fis
[10] = s
->hob_hcyl
;
755 pio_fis
[12] = s
->nsector
& 0xFF;
756 pio_fis
[13] = (s
->nsector
>> 8) & 0xFF;
758 pio_fis
[15] = s
->status
;
759 pio_fis
[16] = len
& 255;
760 pio_fis
[17] = len
>> 8;
764 /* Update shadow registers: */
765 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
766 ad
->port
.ifs
[0].status
;
768 if (pio_fis
[2] & ERR_STAT
) {
769 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
772 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_PIOS_FIS
);
775 static bool ahci_write_fis_d2h(AHCIDevice
*ad
)
777 AHCIPortRegs
*pr
= &ad
->port_regs
;
780 IDEState
*s
= &ad
->port
.ifs
[0];
782 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
786 d2h_fis
= &ad
->res_fis
[RES_FIS_RFIS
];
788 d2h_fis
[0] = SATA_FIS_TYPE_REGISTER_D2H
;
789 d2h_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
790 d2h_fis
[2] = s
->status
;
791 d2h_fis
[3] = s
->error
;
793 d2h_fis
[4] = s
->sector
;
794 d2h_fis
[5] = s
->lcyl
;
795 d2h_fis
[6] = s
->hcyl
;
796 d2h_fis
[7] = s
->select
;
797 d2h_fis
[8] = s
->hob_sector
;
798 d2h_fis
[9] = s
->hob_lcyl
;
799 d2h_fis
[10] = s
->hob_hcyl
;
801 d2h_fis
[12] = s
->nsector
& 0xFF;
802 d2h_fis
[13] = (s
->nsector
>> 8) & 0xFF;
803 for (i
= 14; i
< 20; i
++) {
807 /* Update shadow registers: */
808 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
809 ad
->port
.ifs
[0].status
;
811 if (d2h_fis
[2] & ERR_STAT
) {
812 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_TF_ERR
);
815 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_D2H_REG_FIS
);
819 static int prdt_tbl_entry_size(const AHCI_SG
*tbl
)
821 /* flags_size is zero-based */
822 return (le32_to_cpu(tbl
->flags_size
) & AHCI_PRDT_SIZE_MASK
) + 1;
826 * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist.
827 * @ad: The AHCIDevice for whom we are building the SGList.
828 * @sglist: The SGList target to add PRD entries to.
829 * @cmd: The AHCI Command Header that describes where the PRDT is.
830 * @limit: The remaining size of the S/ATA transaction, in bytes.
831 * @offset: The number of bytes already transferred, in bytes.
833 * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of
834 * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop
835 * building the sglist from the PRDT as soon as we hit @limit bytes,
836 * which is <= INT32_MAX/2GiB.
838 static int ahci_populate_sglist(AHCIDevice
*ad
, QEMUSGList
*sglist
,
839 AHCICmdHdr
*cmd
, int64_t limit
, uint64_t offset
)
841 uint16_t opts
= le16_to_cpu(cmd
->opts
);
842 uint16_t prdtl
= le16_to_cpu(cmd
->prdtl
);
843 uint64_t cfis_addr
= le64_to_cpu(cmd
->tbl_addr
);
844 uint64_t prdt_addr
= cfis_addr
+ 0x80;
845 dma_addr_t prdt_len
= (prdtl
* sizeof(AHCI_SG
));
846 dma_addr_t real_prdt_len
= prdt_len
;
852 int64_t off_pos
= -1;
854 IDEBus
*bus
= &ad
->port
;
855 BusState
*qbus
= BUS(bus
);
858 DPRINTF(ad
->port_no
, "no sg list given by guest: 0x%08x\n", opts
);
863 if (!(prdt
= dma_memory_map(ad
->hba
->as
, prdt_addr
, &prdt_len
,
864 DMA_DIRECTION_TO_DEVICE
))){
865 DPRINTF(ad
->port_no
, "map failed\n");
869 if (prdt_len
< real_prdt_len
) {
870 DPRINTF(ad
->port_no
, "mapped less than expected\n");
875 /* Get entries in the PRDT, init a qemu sglist accordingly */
877 AHCI_SG
*tbl
= (AHCI_SG
*)prdt
;
879 for (i
= 0; i
< prdtl
; i
++) {
880 tbl_entry_size
= prdt_tbl_entry_size(&tbl
[i
]);
881 if (offset
< (sum
+ tbl_entry_size
)) {
883 off_pos
= offset
- sum
;
886 sum
+= tbl_entry_size
;
888 if ((off_idx
== -1) || (off_pos
< 0) || (off_pos
> tbl_entry_size
)) {
889 DPRINTF(ad
->port_no
, "%s: Incorrect offset! "
890 "off_idx: %d, off_pos: %"PRId64
"\n",
891 __func__
, off_idx
, off_pos
);
896 qemu_sglist_init(sglist
, qbus
->parent
, (prdtl
- off_idx
),
898 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[off_idx
].addr
) + off_pos
,
899 MIN(prdt_tbl_entry_size(&tbl
[off_idx
]) - off_pos
,
902 for (i
= off_idx
+ 1; i
< prdtl
&& sglist
->size
< limit
; i
++) {
903 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[i
].addr
),
904 MIN(prdt_tbl_entry_size(&tbl
[i
]),
905 limit
- sglist
->size
));
910 dma_memory_unmap(ad
->hba
->as
, prdt
, prdt_len
,
911 DMA_DIRECTION_TO_DEVICE
, prdt_len
);
915 static void ncq_err(NCQTransferState
*ncq_tfs
)
917 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
919 ide_state
->error
= ABRT_ERR
;
920 ide_state
->status
= READY_STAT
| ERR_STAT
;
921 ncq_tfs
->drive
->port_regs
.scr_err
|= (1 << ncq_tfs
->tag
);
925 static void ncq_finish(NCQTransferState
*ncq_tfs
)
927 /* If we didn't error out, set our finished bit. Errored commands
928 * do not get a bit set for the SDB FIS ACT register, nor do they
929 * clear the outstanding bit in scr_act (PxSACT). */
930 if (!(ncq_tfs
->drive
->port_regs
.scr_err
& (1 << ncq_tfs
->tag
))) {
931 ncq_tfs
->drive
->finished
|= (1 << ncq_tfs
->tag
);
934 ahci_write_fis_sdb(ncq_tfs
->drive
->hba
, ncq_tfs
);
936 DPRINTF(ncq_tfs
->drive
->port_no
, "NCQ transfer tag %d finished\n",
939 block_acct_done(blk_get_stats(ncq_tfs
->drive
->port
.ifs
[0].blk
),
941 qemu_sglist_destroy(&ncq_tfs
->sglist
);
945 static void ncq_cb(void *opaque
, int ret
)
947 NCQTransferState
*ncq_tfs
= (NCQTransferState
*)opaque
;
948 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
950 if (ret
== -ECANCELED
) {
955 bool is_read
= ncq_tfs
->cmd
== READ_FPDMA_QUEUED
;
956 BlockErrorAction action
= blk_get_error_action(ide_state
->blk
,
958 if (action
== BLOCK_ERROR_ACTION_STOP
) {
959 ncq_tfs
->halt
= true;
960 ide_state
->bus
->error_status
= IDE_RETRY_HBA
;
961 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
964 blk_error_action(ide_state
->blk
, action
, is_read
, -ret
);
966 ide_state
->status
= READY_STAT
| SEEK_STAT
;
969 if (!ncq_tfs
->halt
) {
974 static int is_ncq(uint8_t ata_cmd
)
976 /* Based on SATA 3.2 section 13.6.3.2 */
978 case READ_FPDMA_QUEUED
:
979 case WRITE_FPDMA_QUEUED
:
981 case RECEIVE_FPDMA_QUEUED
:
982 case SEND_FPDMA_QUEUED
:
989 static void execute_ncq_command(NCQTransferState
*ncq_tfs
)
991 AHCIDevice
*ad
= ncq_tfs
->drive
;
992 IDEState
*ide_state
= &ad
->port
.ifs
[0];
993 int port
= ad
->port_no
;
995 g_assert(is_ncq(ncq_tfs
->cmd
));
996 ncq_tfs
->halt
= false;
998 switch (ncq_tfs
->cmd
) {
999 case READ_FPDMA_QUEUED
:
1000 DPRINTF(port
, "NCQ reading %d sectors from LBA %"PRId64
", tag %d\n",
1001 ncq_tfs
->sector_count
, ncq_tfs
->lba
, ncq_tfs
->tag
);
1003 DPRINTF(port
, "tag %d aio read %"PRId64
"\n",
1004 ncq_tfs
->tag
, ncq_tfs
->lba
);
1006 dma_acct_start(ide_state
->blk
, &ncq_tfs
->acct
,
1007 &ncq_tfs
->sglist
, BLOCK_ACCT_READ
);
1008 ncq_tfs
->aiocb
= dma_blk_read(ide_state
->blk
, &ncq_tfs
->sglist
,
1009 ncq_tfs
->lba
<< BDRV_SECTOR_BITS
,
1012 case WRITE_FPDMA_QUEUED
:
1013 DPRINTF(port
, "NCQ writing %d sectors to LBA %"PRId64
", tag %d\n",
1014 ncq_tfs
->sector_count
, ncq_tfs
->lba
, ncq_tfs
->tag
);
1016 DPRINTF(port
, "tag %d aio write %"PRId64
"\n",
1017 ncq_tfs
->tag
, ncq_tfs
->lba
);
1019 dma_acct_start(ide_state
->blk
, &ncq_tfs
->acct
,
1020 &ncq_tfs
->sglist
, BLOCK_ACCT_WRITE
);
1021 ncq_tfs
->aiocb
= dma_blk_write(ide_state
->blk
, &ncq_tfs
->sglist
,
1022 ncq_tfs
->lba
<< BDRV_SECTOR_BITS
,
1026 DPRINTF(port
, "error: unsupported NCQ command (0x%02x) received\n",
1028 qemu_sglist_destroy(&ncq_tfs
->sglist
);
1034 static void process_ncq_command(AHCIState
*s
, int port
, uint8_t *cmd_fis
,
1037 AHCIDevice
*ad
= &s
->dev
[port
];
1038 IDEState
*ide_state
= &ad
->port
.ifs
[0];
1039 NCQFrame
*ncq_fis
= (NCQFrame
*)cmd_fis
;
1040 uint8_t tag
= ncq_fis
->tag
>> 3;
1041 NCQTransferState
*ncq_tfs
= &ad
->ncq_tfs
[tag
];
1044 g_assert(is_ncq(ncq_fis
->command
));
1045 if (ncq_tfs
->used
) {
1046 /* error - already in use */
1047 fprintf(stderr
, "%s: tag %d already used\n", __FUNCTION__
, tag
);
1052 ncq_tfs
->drive
= ad
;
1053 ncq_tfs
->slot
= slot
;
1054 ncq_tfs
->cmdh
= &((AHCICmdHdr
*)ad
->lst
)[slot
];
1055 ncq_tfs
->cmd
= ncq_fis
->command
;
1056 ncq_tfs
->lba
= ((uint64_t)ncq_fis
->lba5
<< 40) |
1057 ((uint64_t)ncq_fis
->lba4
<< 32) |
1058 ((uint64_t)ncq_fis
->lba3
<< 24) |
1059 ((uint64_t)ncq_fis
->lba2
<< 16) |
1060 ((uint64_t)ncq_fis
->lba1
<< 8) |
1061 (uint64_t)ncq_fis
->lba0
;
1064 /* Sanity-check the NCQ packet */
1066 DPRINTF(port
, "Warn: NCQ slot (%d) did not match the given tag (%d)\n",
1070 if (ncq_fis
->aux0
|| ncq_fis
->aux1
|| ncq_fis
->aux2
|| ncq_fis
->aux3
) {
1071 DPRINTF(port
, "Warn: Attempt to use NCQ auxiliary fields.\n");
1073 if (ncq_fis
->prio
|| ncq_fis
->icc
) {
1074 DPRINTF(port
, "Warn: Unsupported attempt to use PRIO/ICC fields\n");
1076 if (ncq_fis
->fua
& NCQ_FIS_FUA_MASK
) {
1077 DPRINTF(port
, "Warn: Unsupported attempt to use Force Unit Access\n");
1079 if (ncq_fis
->tag
& NCQ_FIS_RARC_MASK
) {
1080 DPRINTF(port
, "Warn: Unsupported attempt to use Rebuild Assist\n");
1083 ncq_tfs
->sector_count
= ((ncq_fis
->sector_count_high
<< 8) |
1084 ncq_fis
->sector_count_low
);
1085 if (!ncq_tfs
->sector_count
) {
1086 ncq_tfs
->sector_count
= 0x10000;
1088 size
= ncq_tfs
->sector_count
* 512;
1089 ahci_populate_sglist(ad
, &ncq_tfs
->sglist
, ncq_tfs
->cmdh
, size
, 0);
1091 if (ncq_tfs
->sglist
.size
< size
) {
1092 error_report("ahci: PRDT length for NCQ command (0x%zx) "
1093 "is smaller than the requested size (0x%zx)",
1094 ncq_tfs
->sglist
.size
, size
);
1095 qemu_sglist_destroy(&ncq_tfs
->sglist
);
1097 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_OVERFLOW
);
1099 } else if (ncq_tfs
->sglist
.size
!= size
) {
1100 DPRINTF(port
, "Warn: PRDTL (0x%zx)"
1101 " does not match requested size (0x%zx)",
1102 ncq_tfs
->sglist
.size
, size
);
1105 DPRINTF(port
, "NCQ transfer LBA from %"PRId64
" to %"PRId64
", "
1106 "drive max %"PRId64
"\n",
1107 ncq_tfs
->lba
, ncq_tfs
->lba
+ ncq_tfs
->sector_count
- 1,
1108 ide_state
->nb_sectors
- 1);
1110 execute_ncq_command(ncq_tfs
);
1113 static AHCICmdHdr
*get_cmd_header(AHCIState
*s
, uint8_t port
, uint8_t slot
)
1115 if (port
>= s
->ports
|| slot
>= AHCI_MAX_CMDS
) {
1119 return s
->dev
[port
].lst
? &((AHCICmdHdr
*)s
->dev
[port
].lst
)[slot
] : NULL
;
1122 static void handle_reg_h2d_fis(AHCIState
*s
, int port
,
1123 uint8_t slot
, uint8_t *cmd_fis
)
1125 IDEState
*ide_state
= &s
->dev
[port
].port
.ifs
[0];
1126 AHCICmdHdr
*cmd
= get_cmd_header(s
, port
, slot
);
1127 uint16_t opts
= le16_to_cpu(cmd
->opts
);
1129 if (cmd_fis
[1] & 0x0F) {
1130 DPRINTF(port
, "Port Multiplier not supported."
1131 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
1132 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
1136 if (cmd_fis
[1] & 0x70) {
1137 DPRINTF(port
, "Reserved flags set in H2D Register FIS."
1138 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
1139 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
1143 if (!(cmd_fis
[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
)) {
1144 switch (s
->dev
[port
].port_state
) {
1146 if (cmd_fis
[15] & ATA_SRST
) {
1147 s
->dev
[port
].port_state
= STATE_RESET
;
1151 if (!(cmd_fis
[15] & ATA_SRST
)) {
1152 ahci_reset_port(s
, port
);
1159 /* Check for NCQ command */
1160 if (is_ncq(cmd_fis
[2])) {
1161 process_ncq_command(s
, port
, cmd_fis
, slot
);
1165 /* Decompose the FIS:
1166 * AHCI does not interpret FIS packets, it only forwards them.
1167 * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
1168 * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
1170 * ATA4 describes sector number for LBA28/CHS commands.
1171 * ATA6 describes sector number for LBA48 commands.
1172 * ATA8 deprecates CHS fully, describing only LBA28/48.
1174 * We dutifully convert the FIS into IDE registers, and allow the
1175 * core layer to interpret them as needed. */
1176 ide_state
->feature
= cmd_fis
[3];
1177 ide_state
->sector
= cmd_fis
[4]; /* LBA 7:0 */
1178 ide_state
->lcyl
= cmd_fis
[5]; /* LBA 15:8 */
1179 ide_state
->hcyl
= cmd_fis
[6]; /* LBA 23:16 */
1180 ide_state
->select
= cmd_fis
[7]; /* LBA 27:24 (LBA28) */
1181 ide_state
->hob_sector
= cmd_fis
[8]; /* LBA 31:24 */
1182 ide_state
->hob_lcyl
= cmd_fis
[9]; /* LBA 39:32 */
1183 ide_state
->hob_hcyl
= cmd_fis
[10]; /* LBA 47:40 */
1184 ide_state
->hob_feature
= cmd_fis
[11];
1185 ide_state
->nsector
= (int64_t)((cmd_fis
[13] << 8) | cmd_fis
[12]);
1186 /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
1187 /* 15: Only valid when UPDATE_COMMAND not set. */
1189 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
1190 * table to ide_state->io_buffer */
1191 if (opts
& AHCI_CMD_ATAPI
) {
1192 memcpy(ide_state
->io_buffer
, &cmd_fis
[AHCI_COMMAND_TABLE_ACMD
], 0x10);
1193 debug_print_fis(ide_state
->io_buffer
, 0x10);
1194 s
->dev
[port
].done_atapi_packet
= false;
1195 /* XXX send PIO setup FIS */
1198 ide_state
->error
= 0;
1200 /* Reset transferred byte counter */
1203 /* We're ready to process the command in FIS byte 2. */
1204 ide_exec_cmd(&s
->dev
[port
].port
, cmd_fis
[2]);
1207 static int handle_cmd(AHCIState
*s
, int port
, uint8_t slot
)
1209 IDEState
*ide_state
;
1215 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1216 /* Engine currently busy, try again later */
1217 DPRINTF(port
, "engine busy\n");
1221 if (!s
->dev
[port
].lst
) {
1222 DPRINTF(port
, "error: lst not given but cmd handled");
1225 cmd
= get_cmd_header(s
, port
, slot
);
1226 /* remember current slot handle for later */
1227 s
->dev
[port
].cur_cmd
= cmd
;
1229 /* The device we are working for */
1230 ide_state
= &s
->dev
[port
].port
.ifs
[0];
1231 if (!ide_state
->blk
) {
1232 DPRINTF(port
, "error: guest accessed unused port");
1236 tbl_addr
= le64_to_cpu(cmd
->tbl_addr
);
1238 cmd_fis
= dma_memory_map(s
->as
, tbl_addr
, &cmd_len
,
1239 DMA_DIRECTION_FROM_DEVICE
);
1241 DPRINTF(port
, "error: guest passed us an invalid cmd fis\n");
1243 } else if (cmd_len
!= 0x80) {
1244 ahci_trigger_irq(s
, &s
->dev
[port
], PORT_IRQ_HBUS_ERR
);
1245 DPRINTF(port
, "error: dma_memory_map failed: "
1246 "(len(%02"PRIx64
") != 0x80)\n",
1250 debug_print_fis(cmd_fis
, 0x80);
1252 switch (cmd_fis
[0]) {
1253 case SATA_FIS_TYPE_REGISTER_H2D
:
1254 handle_reg_h2d_fis(s
, port
, slot
, cmd_fis
);
1257 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
1258 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
1264 dma_memory_unmap(s
->as
, cmd_fis
, cmd_len
, DMA_DIRECTION_FROM_DEVICE
,
1267 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1268 /* async command, complete later */
1269 s
->dev
[port
].busy_slot
= slot
;
1273 /* done handling the command */
1277 /* DMA dev <-> ram */
1278 static void ahci_start_transfer(IDEDMA
*dma
)
1280 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1281 IDEState
*s
= &ad
->port
.ifs
[0];
1282 uint32_t size
= (uint32_t)(s
->data_end
- s
->data_ptr
);
1283 /* write == ram -> device */
1284 uint16_t opts
= le16_to_cpu(ad
->cur_cmd
->opts
);
1285 int is_write
= opts
& AHCI_CMD_WRITE
;
1286 int is_atapi
= opts
& AHCI_CMD_ATAPI
;
1289 if (is_atapi
&& !ad
->done_atapi_packet
) {
1290 /* already prepopulated iobuffer */
1291 ad
->done_atapi_packet
= true;
1296 if (ahci_dma_prepare_buf(dma
, size
)) {
1300 DPRINTF(ad
->port_no
, "%sing %d bytes on %s w/%s sglist\n",
1301 is_write
? "writ" : "read", size
, is_atapi
? "atapi" : "ata",
1302 has_sglist
? "" : "o");
1304 if (has_sglist
&& size
) {
1306 dma_buf_write(s
->data_ptr
, size
, &s
->sg
);
1308 dma_buf_read(s
->data_ptr
, size
, &s
->sg
);
1313 /* declare that we processed everything */
1314 s
->data_ptr
= s
->data_end
;
1316 /* Update number of transferred bytes, destroy sglist */
1317 dma_buf_commit(s
, size
);
1319 s
->end_transfer_func(s
);
1321 if (!(s
->status
& DRQ_STAT
)) {
1322 /* done with PIO send/receive */
1323 ahci_write_fis_pio(ad
, le32_to_cpu(ad
->cur_cmd
->status
));
1327 static void ahci_start_dma(IDEDMA
*dma
, IDEState
*s
,
1328 BlockCompletionFunc
*dma_cb
)
1330 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1331 DPRINTF(ad
->port_no
, "\n");
1332 s
->io_buffer_offset
= 0;
1336 static void ahci_restart_dma(IDEDMA
*dma
)
1338 /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
1342 * IDE/PIO restarts are handled by the core layer, but NCQ commands
1343 * need an extra kick from the AHCI HBA.
1345 static void ahci_restart(IDEDMA
*dma
)
1347 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1350 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
1351 NCQTransferState
*ncq_tfs
= &ad
->ncq_tfs
[i
];
1352 if (ncq_tfs
->halt
) {
1353 execute_ncq_command(ncq_tfs
);
1359 * Called in DMA and PIO R/W chains to read the PRDT.
1360 * Not shared with NCQ pathways.
1362 static int32_t ahci_dma_prepare_buf(IDEDMA
*dma
, int32_t limit
)
1364 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1365 IDEState
*s
= &ad
->port
.ifs
[0];
1367 if (ahci_populate_sglist(ad
, &s
->sg
, ad
->cur_cmd
,
1368 limit
, s
->io_buffer_offset
) == -1) {
1369 DPRINTF(ad
->port_no
, "ahci_dma_prepare_buf failed.\n");
1372 s
->io_buffer_size
= s
->sg
.size
;
1374 DPRINTF(ad
->port_no
, "len=%#x\n", s
->io_buffer_size
);
1375 return s
->io_buffer_size
;
1379 * Updates the command header with a bytes-read value.
1380 * Called via dma_buf_commit, for both DMA and PIO paths.
1381 * sglist destruction is handled within dma_buf_commit.
1383 static void ahci_commit_buf(IDEDMA
*dma
, uint32_t tx_bytes
)
1385 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1387 tx_bytes
+= le32_to_cpu(ad
->cur_cmd
->status
);
1388 ad
->cur_cmd
->status
= cpu_to_le32(tx_bytes
);
1391 static int ahci_dma_rw_buf(IDEDMA
*dma
, int is_write
)
1393 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1394 IDEState
*s
= &ad
->port
.ifs
[0];
1395 uint8_t *p
= s
->io_buffer
+ s
->io_buffer_index
;
1396 int l
= s
->io_buffer_size
- s
->io_buffer_index
;
1398 if (ahci_populate_sglist(ad
, &s
->sg
, ad
->cur_cmd
, l
, s
->io_buffer_offset
)) {
1403 dma_buf_read(p
, l
, &s
->sg
);
1405 dma_buf_write(p
, l
, &s
->sg
);
1408 /* free sglist, update byte count */
1409 dma_buf_commit(s
, l
);
1411 s
->io_buffer_index
+= l
;
1413 DPRINTF(ad
->port_no
, "len=%#x\n", l
);
1418 static void ahci_cmd_done(IDEDMA
*dma
)
1420 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1422 DPRINTF(ad
->port_no
, "cmd done\n");
1424 /* update d2h status */
1425 ahci_write_fis_d2h(ad
);
1427 if (!ad
->check_bh
) {
1428 /* maybe we still have something to process, check later */
1429 ad
->check_bh
= qemu_bh_new(ahci_check_cmd_bh
, ad
);
1430 qemu_bh_schedule(ad
->check_bh
);
1434 static void ahci_irq_set(void *opaque
, int n
, int level
)
1438 static const IDEDMAOps ahci_dma_ops
= {
1439 .start_dma
= ahci_start_dma
,
1440 .restart
= ahci_restart
,
1441 .restart_dma
= ahci_restart_dma
,
1442 .start_transfer
= ahci_start_transfer
,
1443 .prepare_buf
= ahci_dma_prepare_buf
,
1444 .commit_buf
= ahci_commit_buf
,
1445 .rw_buf
= ahci_dma_rw_buf
,
1446 .cmd_done
= ahci_cmd_done
,
1449 void ahci_init(AHCIState
*s
, DeviceState
*qdev
)
1451 s
->container
= qdev
;
1452 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
1453 memory_region_init_io(&s
->mem
, OBJECT(qdev
), &ahci_mem_ops
, s
,
1454 "ahci", AHCI_MEM_BAR_SIZE
);
1455 memory_region_init_io(&s
->idp
, OBJECT(qdev
), &ahci_idp_ops
, s
,
1459 void ahci_realize(AHCIState
*s
, DeviceState
*qdev
, AddressSpace
*as
, int ports
)
1466 s
->dev
= g_new0(AHCIDevice
, ports
);
1468 irqs
= qemu_allocate_irqs(ahci_irq_set
, s
, s
->ports
);
1469 for (i
= 0; i
< s
->ports
; i
++) {
1470 AHCIDevice
*ad
= &s
->dev
[i
];
1472 ide_bus_new(&ad
->port
, sizeof(ad
->port
), qdev
, i
, 1);
1473 ide_init2(&ad
->port
, irqs
[i
]);
1477 ad
->port
.dma
= &ad
->dma
;
1478 ad
->port
.dma
->ops
= &ahci_dma_ops
;
1479 ide_register_restart_cb(&ad
->port
);
1483 void ahci_uninit(AHCIState
*s
)
1488 void ahci_reset(AHCIState
*s
)
1493 s
->control_regs
.irqstatus
= 0;
1495 * The implementation of this bit is dependent upon the value of the
1496 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
1497 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
1498 * read-only and shall have a reset value of '1'.
1500 * We set HOST_CAP_AHCI so we must enable AHCI at reset.
1502 s
->control_regs
.ghc
= HOST_CTL_AHCI_EN
;
1504 for (i
= 0; i
< s
->ports
; i
++) {
1505 pr
= &s
->dev
[i
].port_regs
;
1509 pr
->cmd
= PORT_CMD_SPIN_UP
| PORT_CMD_POWER_ON
;
1510 ahci_reset_port(s
, i
);
1514 static const VMStateDescription vmstate_ncq_tfs
= {
1515 .name
= "ncq state",
1517 .fields
= (VMStateField
[]) {
1518 VMSTATE_UINT32(sector_count
, NCQTransferState
),
1519 VMSTATE_UINT64(lba
, NCQTransferState
),
1520 VMSTATE_UINT8(tag
, NCQTransferState
),
1521 VMSTATE_UINT8(cmd
, NCQTransferState
),
1522 VMSTATE_UINT8(slot
, NCQTransferState
),
1523 VMSTATE_BOOL(used
, NCQTransferState
),
1524 VMSTATE_BOOL(halt
, NCQTransferState
),
1525 VMSTATE_END_OF_LIST()
1529 static const VMStateDescription vmstate_ahci_device
= {
1530 .name
= "ahci port",
1532 .fields
= (VMStateField
[]) {
1533 VMSTATE_IDE_BUS(port
, AHCIDevice
),
1534 VMSTATE_IDE_DRIVE(port
.ifs
[0], AHCIDevice
),
1535 VMSTATE_UINT32(port_state
, AHCIDevice
),
1536 VMSTATE_UINT32(finished
, AHCIDevice
),
1537 VMSTATE_UINT32(port_regs
.lst_addr
, AHCIDevice
),
1538 VMSTATE_UINT32(port_regs
.lst_addr_hi
, AHCIDevice
),
1539 VMSTATE_UINT32(port_regs
.fis_addr
, AHCIDevice
),
1540 VMSTATE_UINT32(port_regs
.fis_addr_hi
, AHCIDevice
),
1541 VMSTATE_UINT32(port_regs
.irq_stat
, AHCIDevice
),
1542 VMSTATE_UINT32(port_regs
.irq_mask
, AHCIDevice
),
1543 VMSTATE_UINT32(port_regs
.cmd
, AHCIDevice
),
1544 VMSTATE_UINT32(port_regs
.tfdata
, AHCIDevice
),
1545 VMSTATE_UINT32(port_regs
.sig
, AHCIDevice
),
1546 VMSTATE_UINT32(port_regs
.scr_stat
, AHCIDevice
),
1547 VMSTATE_UINT32(port_regs
.scr_ctl
, AHCIDevice
),
1548 VMSTATE_UINT32(port_regs
.scr_err
, AHCIDevice
),
1549 VMSTATE_UINT32(port_regs
.scr_act
, AHCIDevice
),
1550 VMSTATE_UINT32(port_regs
.cmd_issue
, AHCIDevice
),
1551 VMSTATE_BOOL(done_atapi_packet
, AHCIDevice
),
1552 VMSTATE_INT32(busy_slot
, AHCIDevice
),
1553 VMSTATE_BOOL(init_d2h_sent
, AHCIDevice
),
1554 VMSTATE_STRUCT_ARRAY(ncq_tfs
, AHCIDevice
, AHCI_MAX_CMDS
,
1555 1, vmstate_ncq_tfs
, NCQTransferState
),
1556 VMSTATE_END_OF_LIST()
1560 static int ahci_state_post_load(void *opaque
, int version_id
)
1563 struct AHCIDevice
*ad
;
1564 NCQTransferState
*ncq_tfs
;
1566 AHCIState
*s
= opaque
;
1568 for (i
= 0; i
< s
->ports
; i
++) {
1570 pr
= &ad
->port_regs
;
1572 if (!(pr
->cmd
& PORT_CMD_START
) && (pr
->cmd
& PORT_CMD_LIST_ON
)) {
1573 error_report("AHCI: DMA engine should be off, but status bit "
1574 "indicates it is still running.");
1577 if (!(pr
->cmd
& PORT_CMD_FIS_RX
) && (pr
->cmd
& PORT_CMD_FIS_ON
)) {
1578 error_report("AHCI: FIS RX engine should be off, but status bit "
1579 "indicates it is still running.");
1583 /* After a migrate, the DMA/FIS engines are "off" and
1584 * need to be conditionally restarted */
1585 pr
->cmd
&= ~(PORT_CMD_LIST_ON
| PORT_CMD_FIS_ON
);
1586 if (ahci_cond_start_engines(ad
) != 0) {
1590 for (j
= 0; j
< AHCI_MAX_CMDS
; j
++) {
1591 ncq_tfs
= &ad
->ncq_tfs
[j
];
1592 ncq_tfs
->drive
= ad
;
1594 if (ncq_tfs
->used
!= ncq_tfs
->halt
) {
1597 if (!ncq_tfs
->halt
) {
1600 if (!is_ncq(ncq_tfs
->cmd
)) {
1603 if (ncq_tfs
->slot
!= ncq_tfs
->tag
) {
1606 /* If ncq_tfs->halt is justly set, the engine should be engaged,
1607 * and the command list buffer should be mapped. */
1608 ncq_tfs
->cmdh
= get_cmd_header(s
, i
, ncq_tfs
->slot
);
1609 if (!ncq_tfs
->cmdh
) {
1612 ahci_populate_sglist(ncq_tfs
->drive
, &ncq_tfs
->sglist
,
1613 ncq_tfs
->cmdh
, ncq_tfs
->sector_count
* 512,
1615 if (ncq_tfs
->sector_count
!= ncq_tfs
->sglist
.size
>> 9) {
1622 * If an error is present, ad->busy_slot will be valid and not -1.
1623 * In this case, an operation is waiting to resume and will re-check
1624 * for additional AHCI commands to execute upon completion.
1626 * In the case where no error was present, busy_slot will be -1,
1627 * and we should check to see if there are additional commands waiting.
1629 if (ad
->busy_slot
== -1) {
1632 /* We are in the middle of a command, and may need to access
1633 * the command header in guest memory again. */
1634 if (ad
->busy_slot
< 0 || ad
->busy_slot
>= AHCI_MAX_CMDS
) {
1637 ad
->cur_cmd
= get_cmd_header(s
, i
, ad
->busy_slot
);
1644 const VMStateDescription vmstate_ahci
= {
1647 .post_load
= ahci_state_post_load
,
1648 .fields
= (VMStateField
[]) {
1649 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev
, AHCIState
, ports
,
1650 vmstate_ahci_device
, AHCIDevice
),
1651 VMSTATE_UINT32(control_regs
.cap
, AHCIState
),
1652 VMSTATE_UINT32(control_regs
.ghc
, AHCIState
),
1653 VMSTATE_UINT32(control_regs
.irqstatus
, AHCIState
),
1654 VMSTATE_UINT32(control_regs
.impl
, AHCIState
),
1655 VMSTATE_UINT32(control_regs
.version
, AHCIState
),
1656 VMSTATE_UINT32(idp_index
, AHCIState
),
1657 VMSTATE_INT32_EQUAL(ports
, AHCIState
),
1658 VMSTATE_END_OF_LIST()
1662 static const VMStateDescription vmstate_sysbus_ahci
= {
1663 .name
= "sysbus-ahci",
1664 .fields
= (VMStateField
[]) {
1665 VMSTATE_AHCI(ahci
, SysbusAHCIState
),
1666 VMSTATE_END_OF_LIST()
1670 static void sysbus_ahci_reset(DeviceState
*dev
)
1672 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1674 ahci_reset(&s
->ahci
);
1677 static void sysbus_ahci_init(Object
*obj
)
1679 SysbusAHCIState
*s
= SYSBUS_AHCI(obj
);
1680 SysBusDevice
*sbd
= SYS_BUS_DEVICE(obj
);
1682 ahci_init(&s
->ahci
, DEVICE(obj
));
1684 sysbus_init_mmio(sbd
, &s
->ahci
.mem
);
1685 sysbus_init_irq(sbd
, &s
->ahci
.irq
);
1688 static void sysbus_ahci_realize(DeviceState
*dev
, Error
**errp
)
1690 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1692 ahci_realize(&s
->ahci
, dev
, &address_space_memory
, s
->num_ports
);
1695 static Property sysbus_ahci_properties
[] = {
1696 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState
, num_ports
, 1),
1697 DEFINE_PROP_END_OF_LIST(),
1700 static void sysbus_ahci_class_init(ObjectClass
*klass
, void *data
)
1702 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1704 dc
->realize
= sysbus_ahci_realize
;
1705 dc
->vmsd
= &vmstate_sysbus_ahci
;
1706 dc
->props
= sysbus_ahci_properties
;
1707 dc
->reset
= sysbus_ahci_reset
;
1708 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1711 static const TypeInfo sysbus_ahci_info
= {
1712 .name
= TYPE_SYSBUS_AHCI
,
1713 .parent
= TYPE_SYS_BUS_DEVICE
,
1714 .instance_size
= sizeof(SysbusAHCIState
),
1715 .instance_init
= sysbus_ahci_init
,
1716 .class_init
= sysbus_ahci_class_init
,
1719 #define ALLWINNER_AHCI_BISTAFR ((0xa0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1720 #define ALLWINNER_AHCI_BISTCR ((0xa4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1721 #define ALLWINNER_AHCI_BISTFCTR ((0xa8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1722 #define ALLWINNER_AHCI_BISTSR ((0xac - ALLWINNER_AHCI_MMIO_OFF) / 4)
1723 #define ALLWINNER_AHCI_BISTDECR ((0xb0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1724 #define ALLWINNER_AHCI_DIAGNR0 ((0xb4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1725 #define ALLWINNER_AHCI_DIAGNR1 ((0xb8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1726 #define ALLWINNER_AHCI_OOBR ((0xbc - ALLWINNER_AHCI_MMIO_OFF) / 4)
1727 #define ALLWINNER_AHCI_PHYCS0R ((0xc0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1728 #define ALLWINNER_AHCI_PHYCS1R ((0xc4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1729 #define ALLWINNER_AHCI_PHYCS2R ((0xc8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1730 #define ALLWINNER_AHCI_TIMER1MS ((0xe0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1731 #define ALLWINNER_AHCI_GPARAM1R ((0xe8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1732 #define ALLWINNER_AHCI_GPARAM2R ((0xec - ALLWINNER_AHCI_MMIO_OFF) / 4)
1733 #define ALLWINNER_AHCI_PPARAMR ((0xf0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1734 #define ALLWINNER_AHCI_TESTR ((0xf4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1735 #define ALLWINNER_AHCI_VERSIONR ((0xf8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
1736 #define ALLWINNER_AHCI_IDR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
1737 #define ALLWINNER_AHCI_RWCR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
1739 static uint64_t allwinner_ahci_mem_read(void *opaque
, hwaddr addr
,
1742 AllwinnerAHCIState
*a
= opaque
;
1743 uint64_t val
= a
->regs
[addr
/4];
1746 case ALLWINNER_AHCI_PHYCS0R
:
1749 case ALLWINNER_AHCI_PHYCS2R
:
1750 val
&= ~(0x1 << 24);
1753 DPRINTF(-1, "addr=0x%" HWADDR_PRIx
" val=0x%" PRIx64
", size=%d\n",
1758 static void allwinner_ahci_mem_write(void *opaque
, hwaddr addr
,
1759 uint64_t val
, unsigned size
)
1761 AllwinnerAHCIState
*a
= opaque
;
1763 DPRINTF(-1, "addr=0x%" HWADDR_PRIx
" val=0x%" PRIx64
", size=%d\n",
1765 a
->regs
[addr
/4] = val
;
1768 static const MemoryRegionOps allwinner_ahci_mem_ops
= {
1769 .read
= allwinner_ahci_mem_read
,
1770 .write
= allwinner_ahci_mem_write
,
1771 .valid
.min_access_size
= 4,
1772 .valid
.max_access_size
= 4,
1773 .endianness
= DEVICE_LITTLE_ENDIAN
,
1776 static void allwinner_ahci_init(Object
*obj
)
1778 SysbusAHCIState
*s
= SYSBUS_AHCI(obj
);
1779 AllwinnerAHCIState
*a
= ALLWINNER_AHCI(obj
);
1781 memory_region_init_io(&a
->mmio
, OBJECT(obj
), &allwinner_ahci_mem_ops
, a
,
1782 "allwinner-ahci", ALLWINNER_AHCI_MMIO_SIZE
);
1783 memory_region_add_subregion(&s
->ahci
.mem
, ALLWINNER_AHCI_MMIO_OFF
,
1787 static const VMStateDescription vmstate_allwinner_ahci
= {
1788 .name
= "allwinner-ahci",
1790 .minimum_version_id
= 1,
1791 .fields
= (VMStateField
[]) {
1792 VMSTATE_UINT32_ARRAY(regs
, AllwinnerAHCIState
,
1793 ALLWINNER_AHCI_MMIO_SIZE
/4),
1794 VMSTATE_END_OF_LIST()
1798 static void allwinner_ahci_class_init(ObjectClass
*klass
, void *data
)
1800 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1802 dc
->vmsd
= &vmstate_allwinner_ahci
;
1805 static const TypeInfo allwinner_ahci_info
= {
1806 .name
= TYPE_ALLWINNER_AHCI
,
1807 .parent
= TYPE_SYSBUS_AHCI
,
1808 .instance_size
= sizeof(AllwinnerAHCIState
),
1809 .instance_init
= allwinner_ahci_init
,
1810 .class_init
= allwinner_ahci_class_init
,
1813 static void sysbus_ahci_register_types(void)
1815 type_register_static(&sysbus_ahci_info
);
1816 type_register_static(&allwinner_ahci_info
);
1819 type_init(sysbus_ahci_register_types
)
1821 void ahci_ide_create_devs(PCIDevice
*dev
, DriveInfo
**hd
)
1823 AHCIPCIState
*d
= ICH_AHCI(dev
);
1824 AHCIState
*ahci
= &d
->ahci
;
1827 for (i
= 0; i
< ahci
->ports
; i
++) {
1828 if (hd
[i
] == NULL
) {
1831 ide_create_drive(&ahci
->dev
[i
].port
, 0, hd
[i
]);