4 * Copyright (c) 2010 qiaochong@loongson.cn
5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
25 #include "hw/pci/msi.h"
26 #include "hw/pci/pci.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
30 #include "qemu/error-report.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/module.h"
34 #include "sysemu/block-backend.h"
35 #include "sysemu/dma.h"
36 #include "hw/ide/internal.h"
37 #include "hw/ide/pci.h"
38 #include "ahci_internal.h"
42 static void check_cmd(AHCIState
*s
, int port
);
43 static int handle_cmd(AHCIState
*s
, int port
, uint8_t slot
);
44 static void ahci_reset_port(AHCIState
*s
, int port
);
45 static bool ahci_write_fis_d2h(AHCIDevice
*ad
);
46 static void ahci_init_d2h(AHCIDevice
*ad
);
47 static int ahci_dma_prepare_buf(const IDEDMA
*dma
, int32_t limit
);
48 static bool ahci_map_clb_address(AHCIDevice
*ad
);
49 static bool ahci_map_fis_address(AHCIDevice
*ad
);
50 static void ahci_unmap_clb_address(AHCIDevice
*ad
);
51 static void ahci_unmap_fis_address(AHCIDevice
*ad
);
53 static const char *AHCIHostReg_lookup
[AHCI_HOST_REG__COUNT
] = {
54 [AHCI_HOST_REG_CAP
] = "CAP",
55 [AHCI_HOST_REG_CTL
] = "GHC",
56 [AHCI_HOST_REG_IRQ_STAT
] = "IS",
57 [AHCI_HOST_REG_PORTS_IMPL
] = "PI",
58 [AHCI_HOST_REG_VERSION
] = "VS",
59 [AHCI_HOST_REG_CCC_CTL
] = "CCC_CTL",
60 [AHCI_HOST_REG_CCC_PORTS
] = "CCC_PORTS",
61 [AHCI_HOST_REG_EM_LOC
] = "EM_LOC",
62 [AHCI_HOST_REG_EM_CTL
] = "EM_CTL",
63 [AHCI_HOST_REG_CAP2
] = "CAP2",
64 [AHCI_HOST_REG_BOHC
] = "BOHC",
67 static const char *AHCIPortReg_lookup
[AHCI_PORT_REG__COUNT
] = {
68 [AHCI_PORT_REG_LST_ADDR
] = "PxCLB",
69 [AHCI_PORT_REG_LST_ADDR_HI
] = "PxCLBU",
70 [AHCI_PORT_REG_FIS_ADDR
] = "PxFB",
71 [AHCI_PORT_REG_FIS_ADDR_HI
] = "PxFBU",
72 [AHCI_PORT_REG_IRQ_STAT
] = "PxIS",
73 [AHCI_PORT_REG_IRQ_MASK
] = "PXIE",
74 [AHCI_PORT_REG_CMD
] = "PxCMD",
76 [AHCI_PORT_REG_TFDATA
] = "PxTFD",
77 [AHCI_PORT_REG_SIG
] = "PxSIG",
78 [AHCI_PORT_REG_SCR_STAT
] = "PxSSTS",
79 [AHCI_PORT_REG_SCR_CTL
] = "PxSCTL",
80 [AHCI_PORT_REG_SCR_ERR
] = "PxSERR",
81 [AHCI_PORT_REG_SCR_ACT
] = "PxSACT",
82 [AHCI_PORT_REG_CMD_ISSUE
] = "PxCI",
83 [AHCI_PORT_REG_SCR_NOTIF
] = "PxSNTF",
84 [AHCI_PORT_REG_FIS_CTL
] = "PxFBS",
85 [AHCI_PORT_REG_DEV_SLEEP
] = "PxDEVSLP",
86 [18 ... 27] = "Reserved",
87 [AHCI_PORT_REG_VENDOR_1
...
88 AHCI_PORT_REG_VENDOR_4
] = "PxVS",
91 static const char *AHCIPortIRQ_lookup
[AHCI_PORT_IRQ__COUNT
] = {
92 [AHCI_PORT_IRQ_BIT_DHRS
] = "DHRS",
93 [AHCI_PORT_IRQ_BIT_PSS
] = "PSS",
94 [AHCI_PORT_IRQ_BIT_DSS
] = "DSS",
95 [AHCI_PORT_IRQ_BIT_SDBS
] = "SDBS",
96 [AHCI_PORT_IRQ_BIT_UFS
] = "UFS",
97 [AHCI_PORT_IRQ_BIT_DPS
] = "DPS",
98 [AHCI_PORT_IRQ_BIT_PCS
] = "PCS",
99 [AHCI_PORT_IRQ_BIT_DMPS
] = "DMPS",
100 [8 ... 21] = "RESERVED",
101 [AHCI_PORT_IRQ_BIT_PRCS
] = "PRCS",
102 [AHCI_PORT_IRQ_BIT_IPMS
] = "IPMS",
103 [AHCI_PORT_IRQ_BIT_OFS
] = "OFS",
105 [AHCI_PORT_IRQ_BIT_INFS
] = "INFS",
106 [AHCI_PORT_IRQ_BIT_IFS
] = "IFS",
107 [AHCI_PORT_IRQ_BIT_HBDS
] = "HBDS",
108 [AHCI_PORT_IRQ_BIT_HBFS
] = "HBFS",
109 [AHCI_PORT_IRQ_BIT_TFES
] = "TFES",
110 [AHCI_PORT_IRQ_BIT_CPDS
] = "CPDS"
113 static uint32_t ahci_port_read(AHCIState
*s
, int port
, int offset
)
116 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
117 enum AHCIPortReg regnum
= offset
/ sizeof(uint32_t);
118 assert(regnum
< (AHCI_PORT_ADDR_OFFSET_LEN
/ sizeof(uint32_t)));
121 case AHCI_PORT_REG_LST_ADDR
:
124 case AHCI_PORT_REG_LST_ADDR_HI
:
125 val
= pr
->lst_addr_hi
;
127 case AHCI_PORT_REG_FIS_ADDR
:
130 case AHCI_PORT_REG_FIS_ADDR_HI
:
131 val
= pr
->fis_addr_hi
;
133 case AHCI_PORT_REG_IRQ_STAT
:
136 case AHCI_PORT_REG_IRQ_MASK
:
139 case AHCI_PORT_REG_CMD
:
142 case AHCI_PORT_REG_TFDATA
:
145 case AHCI_PORT_REG_SIG
:
148 case AHCI_PORT_REG_SCR_STAT
:
149 if (s
->dev
[port
].port
.ifs
[0].blk
) {
150 val
= SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP
|
151 SATA_SCR_SSTATUS_SPD_GEN1
| SATA_SCR_SSTATUS_IPM_ACTIVE
;
153 val
= SATA_SCR_SSTATUS_DET_NODEV
;
156 case AHCI_PORT_REG_SCR_CTL
:
159 case AHCI_PORT_REG_SCR_ERR
:
162 case AHCI_PORT_REG_SCR_ACT
:
165 case AHCI_PORT_REG_CMD_ISSUE
:
169 trace_ahci_port_read_default(s
, port
, AHCIPortReg_lookup
[regnum
],
174 trace_ahci_port_read(s
, port
, AHCIPortReg_lookup
[regnum
], offset
, val
);
178 static void ahci_irq_raise(AHCIState
*s
)
180 DeviceState
*dev_state
= s
->container
;
181 PCIDevice
*pci_dev
= (PCIDevice
*) object_dynamic_cast(OBJECT(dev_state
),
184 trace_ahci_irq_raise(s
);
186 if (pci_dev
&& msi_enabled(pci_dev
)) {
187 msi_notify(pci_dev
, 0);
189 qemu_irq_raise(s
->irq
);
193 static void ahci_irq_lower(AHCIState
*s
)
195 DeviceState
*dev_state
= s
->container
;
196 PCIDevice
*pci_dev
= (PCIDevice
*) object_dynamic_cast(OBJECT(dev_state
),
199 trace_ahci_irq_lower(s
);
201 if (!pci_dev
|| !msi_enabled(pci_dev
)) {
202 qemu_irq_lower(s
->irq
);
206 static void ahci_check_irq(AHCIState
*s
)
209 uint32_t old_irq
= s
->control_regs
.irqstatus
;
211 s
->control_regs
.irqstatus
= 0;
212 for (i
= 0; i
< s
->ports
; i
++) {
213 AHCIPortRegs
*pr
= &s
->dev
[i
].port_regs
;
214 if (pr
->irq_stat
& pr
->irq_mask
) {
215 s
->control_regs
.irqstatus
|= (1 << i
);
218 trace_ahci_check_irq(s
, old_irq
, s
->control_regs
.irqstatus
);
219 if (s
->control_regs
.irqstatus
&&
220 (s
->control_regs
.ghc
& HOST_CTL_IRQ_EN
)) {
227 static void ahci_trigger_irq(AHCIState
*s
, AHCIDevice
*d
,
228 enum AHCIPortIRQ irqbit
)
230 g_assert((unsigned)irqbit
< 32);
231 uint32_t irq
= 1U << irqbit
;
232 uint32_t irqstat
= d
->port_regs
.irq_stat
| irq
;
234 trace_ahci_trigger_irq(s
, d
->port_no
,
235 AHCIPortIRQ_lookup
[irqbit
], irq
,
236 d
->port_regs
.irq_stat
, irqstat
,
237 irqstat
& d
->port_regs
.irq_mask
);
239 d
->port_regs
.irq_stat
= irqstat
;
243 static void map_page(AddressSpace
*as
, uint8_t **ptr
, uint64_t addr
,
249 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
252 *ptr
= dma_memory_map(as
, addr
, &len
, DMA_DIRECTION_FROM_DEVICE
);
253 if (len
< wanted
&& *ptr
) {
254 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
260 * Check the cmd register to see if we should start or stop
261 * the DMA or FIS RX engines.
263 * @ad: Device to dis/engage.
265 * @return 0 on success, -1 on error.
267 static int ahci_cond_start_engines(AHCIDevice
*ad
)
269 AHCIPortRegs
*pr
= &ad
->port_regs
;
270 bool cmd_start
= pr
->cmd
& PORT_CMD_START
;
271 bool cmd_on
= pr
->cmd
& PORT_CMD_LIST_ON
;
272 bool fis_start
= pr
->cmd
& PORT_CMD_FIS_RX
;
273 bool fis_on
= pr
->cmd
& PORT_CMD_FIS_ON
;
275 if (cmd_start
&& !cmd_on
) {
276 if (!ahci_map_clb_address(ad
)) {
277 pr
->cmd
&= ~PORT_CMD_START
;
278 error_report("AHCI: Failed to start DMA engine: "
279 "bad command list buffer address");
282 } else if (!cmd_start
&& cmd_on
) {
283 ahci_unmap_clb_address(ad
);
286 if (fis_start
&& !fis_on
) {
287 if (!ahci_map_fis_address(ad
)) {
288 pr
->cmd
&= ~PORT_CMD_FIS_RX
;
289 error_report("AHCI: Failed to start FIS receive engine: "
290 "bad FIS receive buffer address");
293 } else if (!fis_start
&& fis_on
) {
294 ahci_unmap_fis_address(ad
);
300 static void ahci_port_write(AHCIState
*s
, int port
, int offset
, uint32_t val
)
302 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
303 enum AHCIPortReg regnum
= offset
/ sizeof(uint32_t);
304 assert(regnum
< (AHCI_PORT_ADDR_OFFSET_LEN
/ sizeof(uint32_t)));
305 trace_ahci_port_write(s
, port
, AHCIPortReg_lookup
[regnum
], offset
, val
);
308 case AHCI_PORT_REG_LST_ADDR
:
311 case AHCI_PORT_REG_LST_ADDR_HI
:
312 pr
->lst_addr_hi
= val
;
314 case AHCI_PORT_REG_FIS_ADDR
:
317 case AHCI_PORT_REG_FIS_ADDR_HI
:
318 pr
->fis_addr_hi
= val
;
320 case AHCI_PORT_REG_IRQ_STAT
:
321 pr
->irq_stat
&= ~val
;
324 case AHCI_PORT_REG_IRQ_MASK
:
325 pr
->irq_mask
= val
& 0xfdc000ff;
328 case AHCI_PORT_REG_CMD
:
329 /* Block any Read-only fields from being set;
330 * including LIST_ON and FIS_ON.
331 * The spec requires to set ICC bits to zero after the ICC change
332 * is done. We don't support ICC state changes, therefore always
333 * force the ICC bits to zero.
335 pr
->cmd
= (pr
->cmd
& PORT_CMD_RO_MASK
) |
336 (val
& ~(PORT_CMD_RO_MASK
| PORT_CMD_ICC_MASK
));
338 /* Check FIS RX and CLB engines */
339 ahci_cond_start_engines(&s
->dev
[port
]);
341 /* XXX usually the FIS would be pending on the bus here and
342 issuing deferred until the OS enables FIS receival.
343 Instead, we only submit it once - which works in most
344 cases, but is a hack. */
345 if ((pr
->cmd
& PORT_CMD_FIS_ON
) &&
346 !s
->dev
[port
].init_d2h_sent
) {
347 ahci_init_d2h(&s
->dev
[port
]);
352 case AHCI_PORT_REG_TFDATA
:
353 case AHCI_PORT_REG_SIG
:
354 case AHCI_PORT_REG_SCR_STAT
:
357 case AHCI_PORT_REG_SCR_CTL
:
358 if (((pr
->scr_ctl
& AHCI_SCR_SCTL_DET
) == 1) &&
359 ((val
& AHCI_SCR_SCTL_DET
) == 0)) {
360 ahci_reset_port(s
, port
);
364 case AHCI_PORT_REG_SCR_ERR
:
367 case AHCI_PORT_REG_SCR_ACT
:
371 case AHCI_PORT_REG_CMD_ISSUE
:
372 pr
->cmd_issue
|= val
;
376 trace_ahci_port_write_unimpl(s
, port
, AHCIPortReg_lookup
[regnum
],
378 qemu_log_mask(LOG_UNIMP
, "Attempted write to unimplemented register: "
379 "AHCI port %d register %s, offset 0x%x: 0x%"PRIx32
,
380 port
, AHCIPortReg_lookup
[regnum
], offset
, val
);
385 static uint64_t ahci_mem_read_32(void *opaque
, hwaddr addr
)
387 AHCIState
*s
= opaque
;
390 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
391 enum AHCIHostReg regnum
= addr
/ 4;
392 assert(regnum
< AHCI_HOST_REG__COUNT
);
395 case AHCI_HOST_REG_CAP
:
396 val
= s
->control_regs
.cap
;
398 case AHCI_HOST_REG_CTL
:
399 val
= s
->control_regs
.ghc
;
401 case AHCI_HOST_REG_IRQ_STAT
:
402 val
= s
->control_regs
.irqstatus
;
404 case AHCI_HOST_REG_PORTS_IMPL
:
405 val
= s
->control_regs
.impl
;
407 case AHCI_HOST_REG_VERSION
:
408 val
= s
->control_regs
.version
;
411 trace_ahci_mem_read_32_host_default(s
, AHCIHostReg_lookup
[regnum
],
414 trace_ahci_mem_read_32_host(s
, AHCIHostReg_lookup
[regnum
], addr
, val
);
415 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
416 (addr
< (AHCI_PORT_REGS_START_ADDR
+
417 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
418 val
= ahci_port_read(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
419 addr
& AHCI_PORT_ADDR_OFFSET_MASK
);
421 trace_ahci_mem_read_32_default(s
, addr
, val
);
424 trace_ahci_mem_read_32(s
, addr
, val
);
430 * AHCI 1.3 section 3 ("HBA Memory Registers")
431 * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads.
432 * Caller is responsible for masking unwanted higher order bytes.
434 static uint64_t ahci_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
436 hwaddr aligned
= addr
& ~0x3;
437 int ofst
= addr
- aligned
;
438 uint64_t lo
= ahci_mem_read_32(opaque
, aligned
);
442 /* if < 8 byte read does not cross 4 byte boundary */
443 if (ofst
+ size
<= 4) {
444 val
= lo
>> (ofst
* 8);
448 /* If the 64bit read is unaligned, we will produce undefined
449 * results. AHCI does not support unaligned 64bit reads. */
450 hi
= ahci_mem_read_32(opaque
, aligned
+ 4);
451 val
= (hi
<< 32 | lo
) >> (ofst
* 8);
454 trace_ahci_mem_read(opaque
, size
, addr
, val
);
459 static void ahci_mem_write(void *opaque
, hwaddr addr
,
460 uint64_t val
, unsigned size
)
462 AHCIState
*s
= opaque
;
464 trace_ahci_mem_write(s
, size
, addr
, val
);
466 /* Only aligned reads are allowed on AHCI */
468 qemu_log_mask(LOG_GUEST_ERROR
,
469 "ahci: Mis-aligned write to addr 0x%03" HWADDR_PRIX
"\n",
474 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
475 enum AHCIHostReg regnum
= addr
/ 4;
476 assert(regnum
< AHCI_HOST_REG__COUNT
);
479 case AHCI_HOST_REG_CAP
: /* R/WO, RO */
480 /* FIXME handle R/WO */
482 case AHCI_HOST_REG_CTL
: /* R/W */
483 if (val
& HOST_CTL_RESET
) {
486 s
->control_regs
.ghc
= (val
& 0x3) | HOST_CTL_AHCI_EN
;
490 case AHCI_HOST_REG_IRQ_STAT
: /* R/WC, RO */
491 s
->control_regs
.irqstatus
&= ~val
;
494 case AHCI_HOST_REG_PORTS_IMPL
: /* R/WO, RO */
495 /* FIXME handle R/WO */
497 case AHCI_HOST_REG_VERSION
: /* RO */
498 /* FIXME report write? */
501 qemu_log_mask(LOG_UNIMP
,
502 "Attempted write to unimplemented register: "
503 "AHCI host register %s, "
504 "offset 0x%"PRIx64
": 0x%"PRIx64
,
505 AHCIHostReg_lookup
[regnum
], addr
, val
);
506 trace_ahci_mem_write_host_unimpl(s
, size
,
507 AHCIHostReg_lookup
[regnum
], addr
);
509 trace_ahci_mem_write_host(s
, size
, AHCIHostReg_lookup
[regnum
],
511 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
512 (addr
< (AHCI_PORT_REGS_START_ADDR
+
513 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
514 ahci_port_write(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
515 addr
& AHCI_PORT_ADDR_OFFSET_MASK
, val
);
517 qemu_log_mask(LOG_UNIMP
, "Attempted write to unimplemented register: "
518 "AHCI global register at offset 0x%"PRIx64
": 0x%"PRIx64
,
520 trace_ahci_mem_write_unimpl(s
, size
, addr
, val
);
524 static const MemoryRegionOps ahci_mem_ops
= {
525 .read
= ahci_mem_read
,
526 .write
= ahci_mem_write
,
527 .endianness
= DEVICE_LITTLE_ENDIAN
,
530 static uint64_t ahci_idp_read(void *opaque
, hwaddr addr
,
533 AHCIState
*s
= opaque
;
535 if (addr
== s
->idp_offset
) {
538 } else if (addr
== s
->idp_offset
+ 4) {
539 /* data register - do memory read at location selected by index */
540 return ahci_mem_read(opaque
, s
->idp_index
, size
);
546 static void ahci_idp_write(void *opaque
, hwaddr addr
,
547 uint64_t val
, unsigned size
)
549 AHCIState
*s
= opaque
;
551 if (addr
== s
->idp_offset
) {
552 /* index register - mask off reserved bits */
553 s
->idp_index
= (uint32_t)val
& ((AHCI_MEM_BAR_SIZE
- 1) & ~3);
554 } else if (addr
== s
->idp_offset
+ 4) {
555 /* data register - do memory write at location selected by index */
556 ahci_mem_write(opaque
, s
->idp_index
, val
, size
);
560 static const MemoryRegionOps ahci_idp_ops
= {
561 .read
= ahci_idp_read
,
562 .write
= ahci_idp_write
,
563 .endianness
= DEVICE_LITTLE_ENDIAN
,
567 static void ahci_reg_init(AHCIState
*s
)
571 s
->control_regs
.cap
= (s
->ports
- 1) |
572 (AHCI_NUM_COMMAND_SLOTS
<< 8) |
573 (AHCI_SUPPORTED_SPEED_GEN1
<< AHCI_SUPPORTED_SPEED
) |
574 HOST_CAP_NCQ
| HOST_CAP_AHCI
| HOST_CAP_64
;
576 s
->control_regs
.impl
= (1 << s
->ports
) - 1;
578 s
->control_regs
.version
= AHCI_VERSION_1_0
;
580 for (i
= 0; i
< s
->ports
; i
++) {
581 s
->dev
[i
].port_state
= STATE_RUN
;
585 static void check_cmd(AHCIState
*s
, int port
)
587 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
590 if ((pr
->cmd
& PORT_CMD_START
) && pr
->cmd_issue
) {
591 for (slot
= 0; (slot
< 32) && pr
->cmd_issue
; slot
++) {
592 if ((pr
->cmd_issue
& (1U << slot
)) &&
593 !handle_cmd(s
, port
, slot
)) {
594 pr
->cmd_issue
&= ~(1U << slot
);
600 static void ahci_check_cmd_bh(void *opaque
)
602 AHCIDevice
*ad
= opaque
;
604 qemu_bh_delete(ad
->check_bh
);
607 check_cmd(ad
->hba
, ad
->port_no
);
610 static void ahci_init_d2h(AHCIDevice
*ad
)
612 IDEState
*ide_state
= &ad
->port
.ifs
[0];
613 AHCIPortRegs
*pr
= &ad
->port_regs
;
615 if (ad
->init_d2h_sent
) {
619 if (ahci_write_fis_d2h(ad
)) {
620 ad
->init_d2h_sent
= true;
621 /* We're emulating receiving the first Reg H2D Fis from the device;
622 * Update the SIG register, but otherwise proceed as normal. */
623 pr
->sig
= ((uint32_t)ide_state
->hcyl
<< 24) |
624 (ide_state
->lcyl
<< 16) |
625 (ide_state
->sector
<< 8) |
626 (ide_state
->nsector
& 0xFF);
630 static void ahci_set_signature(AHCIDevice
*ad
, uint32_t sig
)
632 IDEState
*s
= &ad
->port
.ifs
[0];
633 s
->hcyl
= sig
>> 24 & 0xFF;
634 s
->lcyl
= sig
>> 16 & 0xFF;
635 s
->sector
= sig
>> 8 & 0xFF;
636 s
->nsector
= sig
& 0xFF;
638 trace_ahci_set_signature(ad
->hba
, ad
->port_no
, s
->nsector
, s
->sector
,
639 s
->lcyl
, s
->hcyl
, sig
);
642 static void ahci_reset_port(AHCIState
*s
, int port
)
644 AHCIDevice
*d
= &s
->dev
[port
];
645 AHCIPortRegs
*pr
= &d
->port_regs
;
646 IDEState
*ide_state
= &d
->port
.ifs
[0];
649 trace_ahci_reset_port(s
, port
);
651 ide_bus_reset(&d
->port
);
652 ide_state
->ncq_queues
= AHCI_MAX_CMDS
;
658 pr
->sig
= 0xFFFFFFFF;
660 d
->init_d2h_sent
= false;
662 ide_state
= &s
->dev
[port
].port
.ifs
[0];
663 if (!ide_state
->blk
) {
667 /* reset ncq queue */
668 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
669 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[i
];
670 ncq_tfs
->halt
= false;
671 if (!ncq_tfs
->used
) {
675 if (ncq_tfs
->aiocb
) {
676 blk_aio_cancel(ncq_tfs
->aiocb
);
677 ncq_tfs
->aiocb
= NULL
;
680 /* Maybe we just finished the request thanks to blk_aio_cancel() */
681 if (!ncq_tfs
->used
) {
685 qemu_sglist_destroy(&ncq_tfs
->sglist
);
689 s
->dev
[port
].port_state
= STATE_RUN
;
690 if (ide_state
->drive_kind
== IDE_CD
) {
691 ahci_set_signature(d
, SATA_SIGNATURE_CDROM
);\
692 ide_state
->status
= SEEK_STAT
| WRERR_STAT
| READY_STAT
;
694 ahci_set_signature(d
, SATA_SIGNATURE_DISK
);
695 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
698 ide_state
->error
= 1;
702 /* Buffer pretty output based on a raw FIS structure. */
703 static char *ahci_pretty_buffer_fis(const uint8_t *fis
, int cmd_len
)
706 GString
*s
= g_string_new("FIS:");
708 for (i
= 0; i
< cmd_len
; i
++) {
709 if ((i
& 0xf) == 0) {
710 g_string_append_printf(s
, "\n0x%02x: ", i
);
712 g_string_append_printf(s
, "%02x ", fis
[i
]);
714 g_string_append_c(s
, '\n');
716 return g_string_free(s
, FALSE
);
719 static bool ahci_map_fis_address(AHCIDevice
*ad
)
721 AHCIPortRegs
*pr
= &ad
->port_regs
;
722 map_page(ad
->hba
->as
, &ad
->res_fis
,
723 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
724 if (ad
->res_fis
!= NULL
) {
725 pr
->cmd
|= PORT_CMD_FIS_ON
;
729 pr
->cmd
&= ~PORT_CMD_FIS_ON
;
733 static void ahci_unmap_fis_address(AHCIDevice
*ad
)
735 if (ad
->res_fis
== NULL
) {
736 trace_ahci_unmap_fis_address_null(ad
->hba
, ad
->port_no
);
739 ad
->port_regs
.cmd
&= ~PORT_CMD_FIS_ON
;
740 dma_memory_unmap(ad
->hba
->as
, ad
->res_fis
, 256,
741 DMA_DIRECTION_FROM_DEVICE
, 256);
745 static bool ahci_map_clb_address(AHCIDevice
*ad
)
747 AHCIPortRegs
*pr
= &ad
->port_regs
;
749 map_page(ad
->hba
->as
, &ad
->lst
,
750 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
751 if (ad
->lst
!= NULL
) {
752 pr
->cmd
|= PORT_CMD_LIST_ON
;
756 pr
->cmd
&= ~PORT_CMD_LIST_ON
;
760 static void ahci_unmap_clb_address(AHCIDevice
*ad
)
762 if (ad
->lst
== NULL
) {
763 trace_ahci_unmap_clb_address_null(ad
->hba
, ad
->port_no
);
766 ad
->port_regs
.cmd
&= ~PORT_CMD_LIST_ON
;
767 dma_memory_unmap(ad
->hba
->as
, ad
->lst
, 1024,
768 DMA_DIRECTION_FROM_DEVICE
, 1024);
772 static void ahci_write_fis_sdb(AHCIState
*s
, NCQTransferState
*ncq_tfs
)
774 AHCIDevice
*ad
= ncq_tfs
->drive
;
775 AHCIPortRegs
*pr
= &ad
->port_regs
;
780 !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
784 sdb_fis
= (SDBFIS
*)&ad
->res_fis
[RES_FIS_SDBFIS
];
785 ide_state
= &ad
->port
.ifs
[0];
787 sdb_fis
->type
= SATA_FIS_TYPE_SDB
;
788 /* Interrupt pending & Notification bit */
789 sdb_fis
->flags
= 0x40; /* Interrupt bit, always 1 for NCQ */
790 sdb_fis
->status
= ide_state
->status
& 0x77;
791 sdb_fis
->error
= ide_state
->error
;
792 /* update SAct field in SDB_FIS */
793 sdb_fis
->payload
= cpu_to_le32(ad
->finished
);
795 /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
796 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
797 (ad
->port
.ifs
[0].status
& 0x77) |
799 pr
->scr_act
&= ~ad
->finished
;
802 /* Trigger IRQ if interrupt bit is set (which currently, it always is) */
803 if (sdb_fis
->flags
& 0x40) {
804 ahci_trigger_irq(s
, ad
, AHCI_PORT_IRQ_BIT_SDBS
);
808 static void ahci_write_fis_pio(AHCIDevice
*ad
, uint16_t len
, bool pio_fis_i
)
810 AHCIPortRegs
*pr
= &ad
->port_regs
;
812 IDEState
*s
= &ad
->port
.ifs
[0];
814 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
818 pio_fis
= &ad
->res_fis
[RES_FIS_PSFIS
];
820 pio_fis
[0] = SATA_FIS_TYPE_PIO_SETUP
;
821 pio_fis
[1] = (pio_fis_i
? (1 << 6) : 0);
822 pio_fis
[2] = s
->status
;
823 pio_fis
[3] = s
->error
;
825 pio_fis
[4] = s
->sector
;
826 pio_fis
[5] = s
->lcyl
;
827 pio_fis
[6] = s
->hcyl
;
828 pio_fis
[7] = s
->select
;
829 pio_fis
[8] = s
->hob_sector
;
830 pio_fis
[9] = s
->hob_lcyl
;
831 pio_fis
[10] = s
->hob_hcyl
;
833 pio_fis
[12] = s
->nsector
& 0xFF;
834 pio_fis
[13] = (s
->nsector
>> 8) & 0xFF;
836 pio_fis
[15] = s
->status
;
837 pio_fis
[16] = len
& 255;
838 pio_fis
[17] = len
>> 8;
842 /* Update shadow registers: */
843 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
844 ad
->port
.ifs
[0].status
;
846 if (pio_fis
[2] & ERR_STAT
) {
847 ahci_trigger_irq(ad
->hba
, ad
, AHCI_PORT_IRQ_BIT_TFES
);
851 static bool ahci_write_fis_d2h(AHCIDevice
*ad
)
853 AHCIPortRegs
*pr
= &ad
->port_regs
;
856 IDEState
*s
= &ad
->port
.ifs
[0];
858 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
862 d2h_fis
= &ad
->res_fis
[RES_FIS_RFIS
];
864 d2h_fis
[0] = SATA_FIS_TYPE_REGISTER_D2H
;
865 d2h_fis
[1] = (1 << 6); /* interrupt bit */
866 d2h_fis
[2] = s
->status
;
867 d2h_fis
[3] = s
->error
;
869 d2h_fis
[4] = s
->sector
;
870 d2h_fis
[5] = s
->lcyl
;
871 d2h_fis
[6] = s
->hcyl
;
872 d2h_fis
[7] = s
->select
;
873 d2h_fis
[8] = s
->hob_sector
;
874 d2h_fis
[9] = s
->hob_lcyl
;
875 d2h_fis
[10] = s
->hob_hcyl
;
877 d2h_fis
[12] = s
->nsector
& 0xFF;
878 d2h_fis
[13] = (s
->nsector
>> 8) & 0xFF;
879 for (i
= 14; i
< 20; i
++) {
883 /* Update shadow registers: */
884 pr
->tfdata
= (ad
->port
.ifs
[0].error
<< 8) |
885 ad
->port
.ifs
[0].status
;
887 if (d2h_fis
[2] & ERR_STAT
) {
888 ahci_trigger_irq(ad
->hba
, ad
, AHCI_PORT_IRQ_BIT_TFES
);
891 ahci_trigger_irq(ad
->hba
, ad
, AHCI_PORT_IRQ_BIT_DHRS
);
895 static int prdt_tbl_entry_size(const AHCI_SG
*tbl
)
897 /* flags_size is zero-based */
898 return (le32_to_cpu(tbl
->flags_size
) & AHCI_PRDT_SIZE_MASK
) + 1;
902 * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist.
903 * @ad: The AHCIDevice for whom we are building the SGList.
904 * @sglist: The SGList target to add PRD entries to.
905 * @cmd: The AHCI Command Header that describes where the PRDT is.
906 * @limit: The remaining size of the S/ATA transaction, in bytes.
907 * @offset: The number of bytes already transferred, in bytes.
909 * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of
910 * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop
911 * building the sglist from the PRDT as soon as we hit @limit bytes,
912 * which is <= INT32_MAX/2GiB.
914 static int ahci_populate_sglist(AHCIDevice
*ad
, QEMUSGList
*sglist
,
915 AHCICmdHdr
*cmd
, int64_t limit
, uint64_t offset
)
917 uint16_t opts
= le16_to_cpu(cmd
->opts
);
918 uint16_t prdtl
= le16_to_cpu(cmd
->prdtl
);
919 uint64_t cfis_addr
= le64_to_cpu(cmd
->tbl_addr
);
920 uint64_t prdt_addr
= cfis_addr
+ 0x80;
921 dma_addr_t prdt_len
= (prdtl
* sizeof(AHCI_SG
));
922 dma_addr_t real_prdt_len
= prdt_len
;
928 int64_t off_pos
= -1;
930 IDEBus
*bus
= &ad
->port
;
931 BusState
*qbus
= BUS(bus
);
933 trace_ahci_populate_sglist(ad
->hba
, ad
->port_no
);
936 trace_ahci_populate_sglist_no_prdtl(ad
->hba
, ad
->port_no
, opts
);
941 if (!(prdt
= dma_memory_map(ad
->hba
->as
, prdt_addr
, &prdt_len
,
942 DMA_DIRECTION_TO_DEVICE
))){
943 trace_ahci_populate_sglist_no_map(ad
->hba
, ad
->port_no
);
947 if (prdt_len
< real_prdt_len
) {
948 trace_ahci_populate_sglist_short_map(ad
->hba
, ad
->port_no
);
953 /* Get entries in the PRDT, init a qemu sglist accordingly */
955 AHCI_SG
*tbl
= (AHCI_SG
*)prdt
;
957 for (i
= 0; i
< prdtl
; i
++) {
958 tbl_entry_size
= prdt_tbl_entry_size(&tbl
[i
]);
959 if (offset
< (sum
+ tbl_entry_size
)) {
961 off_pos
= offset
- sum
;
964 sum
+= tbl_entry_size
;
966 if ((off_idx
== -1) || (off_pos
< 0) || (off_pos
> tbl_entry_size
)) {
967 trace_ahci_populate_sglist_bad_offset(ad
->hba
, ad
->port_no
,
973 qemu_sglist_init(sglist
, qbus
->parent
, (prdtl
- off_idx
),
975 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[off_idx
].addr
) + off_pos
,
976 MIN(prdt_tbl_entry_size(&tbl
[off_idx
]) - off_pos
,
979 for (i
= off_idx
+ 1; i
< prdtl
&& sglist
->size
< limit
; i
++) {
980 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[i
].addr
),
981 MIN(prdt_tbl_entry_size(&tbl
[i
]),
982 limit
- sglist
->size
));
987 dma_memory_unmap(ad
->hba
->as
, prdt
, prdt_len
,
988 DMA_DIRECTION_TO_DEVICE
, prdt_len
);
992 static void ncq_err(NCQTransferState
*ncq_tfs
)
994 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
996 ide_state
->error
= ABRT_ERR
;
997 ide_state
->status
= READY_STAT
| ERR_STAT
;
998 ncq_tfs
->drive
->port_regs
.scr_err
|= (1 << ncq_tfs
->tag
);
999 qemu_sglist_destroy(&ncq_tfs
->sglist
);
1003 static void ncq_finish(NCQTransferState
*ncq_tfs
)
1005 /* If we didn't error out, set our finished bit. Errored commands
1006 * do not get a bit set for the SDB FIS ACT register, nor do they
1007 * clear the outstanding bit in scr_act (PxSACT). */
1008 if (!(ncq_tfs
->drive
->port_regs
.scr_err
& (1 << ncq_tfs
->tag
))) {
1009 ncq_tfs
->drive
->finished
|= (1 << ncq_tfs
->tag
);
1012 ahci_write_fis_sdb(ncq_tfs
->drive
->hba
, ncq_tfs
);
1014 trace_ncq_finish(ncq_tfs
->drive
->hba
, ncq_tfs
->drive
->port_no
,
1017 block_acct_done(blk_get_stats(ncq_tfs
->drive
->port
.ifs
[0].blk
),
1019 qemu_sglist_destroy(&ncq_tfs
->sglist
);
1023 static void ncq_cb(void *opaque
, int ret
)
1025 NCQTransferState
*ncq_tfs
= (NCQTransferState
*)opaque
;
1026 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
1028 ncq_tfs
->aiocb
= NULL
;
1031 bool is_read
= ncq_tfs
->cmd
== READ_FPDMA_QUEUED
;
1032 BlockErrorAction action
= blk_get_error_action(ide_state
->blk
,
1034 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1035 ncq_tfs
->halt
= true;
1036 ide_state
->bus
->error_status
= IDE_RETRY_HBA
;
1037 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
1040 blk_error_action(ide_state
->blk
, action
, is_read
, -ret
);
1042 ide_state
->status
= READY_STAT
| SEEK_STAT
;
1045 if (!ncq_tfs
->halt
) {
1046 ncq_finish(ncq_tfs
);
1050 static int is_ncq(uint8_t ata_cmd
)
1052 /* Based on SATA 3.2 section 13.6.3.2 */
1054 case READ_FPDMA_QUEUED
:
1055 case WRITE_FPDMA_QUEUED
:
1057 case RECEIVE_FPDMA_QUEUED
:
1058 case SEND_FPDMA_QUEUED
:
1065 static void execute_ncq_command(NCQTransferState
*ncq_tfs
)
1067 AHCIDevice
*ad
= ncq_tfs
->drive
;
1068 IDEState
*ide_state
= &ad
->port
.ifs
[0];
1069 int port
= ad
->port_no
;
1071 g_assert(is_ncq(ncq_tfs
->cmd
));
1072 ncq_tfs
->halt
= false;
1074 switch (ncq_tfs
->cmd
) {
1075 case READ_FPDMA_QUEUED
:
1076 trace_execute_ncq_command_read(ad
->hba
, port
, ncq_tfs
->tag
,
1077 ncq_tfs
->sector_count
, ncq_tfs
->lba
);
1078 dma_acct_start(ide_state
->blk
, &ncq_tfs
->acct
,
1079 &ncq_tfs
->sglist
, BLOCK_ACCT_READ
);
1080 ncq_tfs
->aiocb
= dma_blk_read(ide_state
->blk
, &ncq_tfs
->sglist
,
1081 ncq_tfs
->lba
<< BDRV_SECTOR_BITS
,
1085 case WRITE_FPDMA_QUEUED
:
1086 trace_execute_ncq_command_read(ad
->hba
, port
, ncq_tfs
->tag
,
1087 ncq_tfs
->sector_count
, ncq_tfs
->lba
);
1088 dma_acct_start(ide_state
->blk
, &ncq_tfs
->acct
,
1089 &ncq_tfs
->sglist
, BLOCK_ACCT_WRITE
);
1090 ncq_tfs
->aiocb
= dma_blk_write(ide_state
->blk
, &ncq_tfs
->sglist
,
1091 ncq_tfs
->lba
<< BDRV_SECTOR_BITS
,
1096 trace_execute_ncq_command_unsup(ad
->hba
, port
,
1097 ncq_tfs
->tag
, ncq_tfs
->cmd
);
1103 static void process_ncq_command(AHCIState
*s
, int port
, const uint8_t *cmd_fis
,
1106 AHCIDevice
*ad
= &s
->dev
[port
];
1107 const NCQFrame
*ncq_fis
= (NCQFrame
*)cmd_fis
;
1108 uint8_t tag
= ncq_fis
->tag
>> 3;
1109 NCQTransferState
*ncq_tfs
= &ad
->ncq_tfs
[tag
];
1112 g_assert(is_ncq(ncq_fis
->command
));
1113 if (ncq_tfs
->used
) {
1114 /* error - already in use */
1115 qemu_log_mask(LOG_GUEST_ERROR
, "%s: tag %d already used\n",
1121 ncq_tfs
->drive
= ad
;
1122 ncq_tfs
->slot
= slot
;
1123 ncq_tfs
->cmdh
= &((AHCICmdHdr
*)ad
->lst
)[slot
];
1124 ncq_tfs
->cmd
= ncq_fis
->command
;
1125 ncq_tfs
->lba
= ((uint64_t)ncq_fis
->lba5
<< 40) |
1126 ((uint64_t)ncq_fis
->lba4
<< 32) |
1127 ((uint64_t)ncq_fis
->lba3
<< 24) |
1128 ((uint64_t)ncq_fis
->lba2
<< 16) |
1129 ((uint64_t)ncq_fis
->lba1
<< 8) |
1130 (uint64_t)ncq_fis
->lba0
;
1133 /* Sanity-check the NCQ packet */
1135 trace_process_ncq_command_mismatch(s
, port
, tag
, slot
);
1138 if (ncq_fis
->aux0
|| ncq_fis
->aux1
|| ncq_fis
->aux2
|| ncq_fis
->aux3
) {
1139 trace_process_ncq_command_aux(s
, port
, tag
);
1141 if (ncq_fis
->prio
|| ncq_fis
->icc
) {
1142 trace_process_ncq_command_prioicc(s
, port
, tag
);
1144 if (ncq_fis
->fua
& NCQ_FIS_FUA_MASK
) {
1145 trace_process_ncq_command_fua(s
, port
, tag
);
1147 if (ncq_fis
->tag
& NCQ_FIS_RARC_MASK
) {
1148 trace_process_ncq_command_rarc(s
, port
, tag
);
1151 ncq_tfs
->sector_count
= ((ncq_fis
->sector_count_high
<< 8) |
1152 ncq_fis
->sector_count_low
);
1153 if (!ncq_tfs
->sector_count
) {
1154 ncq_tfs
->sector_count
= 0x10000;
1156 size
= ncq_tfs
->sector_count
* BDRV_SECTOR_SIZE
;
1157 ahci_populate_sglist(ad
, &ncq_tfs
->sglist
, ncq_tfs
->cmdh
, size
, 0);
1159 if (ncq_tfs
->sglist
.size
< size
) {
1160 error_report("ahci: PRDT length for NCQ command (0x%zx) "
1161 "is smaller than the requested size (0x%zx)",
1162 ncq_tfs
->sglist
.size
, size
);
1164 ahci_trigger_irq(ad
->hba
, ad
, AHCI_PORT_IRQ_BIT_OFS
);
1166 } else if (ncq_tfs
->sglist
.size
!= size
) {
1167 trace_process_ncq_command_large(s
, port
, tag
,
1168 ncq_tfs
->sglist
.size
, size
);
1171 trace_process_ncq_command(s
, port
, tag
,
1174 ncq_tfs
->lba
+ ncq_tfs
->sector_count
- 1);
1175 execute_ncq_command(ncq_tfs
);
1178 static AHCICmdHdr
*get_cmd_header(AHCIState
*s
, uint8_t port
, uint8_t slot
)
1180 if (port
>= s
->ports
|| slot
>= AHCI_MAX_CMDS
) {
1184 return s
->dev
[port
].lst
? &((AHCICmdHdr
*)s
->dev
[port
].lst
)[slot
] : NULL
;
1187 static void handle_reg_h2d_fis(AHCIState
*s
, int port
,
1188 uint8_t slot
, const uint8_t *cmd_fis
)
1190 IDEState
*ide_state
= &s
->dev
[port
].port
.ifs
[0];
1191 AHCICmdHdr
*cmd
= get_cmd_header(s
, port
, slot
);
1192 uint16_t opts
= le16_to_cpu(cmd
->opts
);
1194 if (cmd_fis
[1] & 0x0F) {
1195 trace_handle_reg_h2d_fis_pmp(s
, port
, cmd_fis
[1],
1196 cmd_fis
[2], cmd_fis
[3]);
1200 if (cmd_fis
[1] & 0x70) {
1201 trace_handle_reg_h2d_fis_res(s
, port
, cmd_fis
[1],
1202 cmd_fis
[2], cmd_fis
[3]);
1206 if (!(cmd_fis
[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
)) {
1207 switch (s
->dev
[port
].port_state
) {
1209 if (cmd_fis
[15] & ATA_SRST
) {
1210 s
->dev
[port
].port_state
= STATE_RESET
;
1214 if (!(cmd_fis
[15] & ATA_SRST
)) {
1215 ahci_reset_port(s
, port
);
1222 /* Check for NCQ command */
1223 if (is_ncq(cmd_fis
[2])) {
1224 process_ncq_command(s
, port
, cmd_fis
, slot
);
1228 /* Decompose the FIS:
1229 * AHCI does not interpret FIS packets, it only forwards them.
1230 * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
1231 * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
1233 * ATA4 describes sector number for LBA28/CHS commands.
1234 * ATA6 describes sector number for LBA48 commands.
1235 * ATA8 deprecates CHS fully, describing only LBA28/48.
1237 * We dutifully convert the FIS into IDE registers, and allow the
1238 * core layer to interpret them as needed. */
1239 ide_state
->feature
= cmd_fis
[3];
1240 ide_state
->sector
= cmd_fis
[4]; /* LBA 7:0 */
1241 ide_state
->lcyl
= cmd_fis
[5]; /* LBA 15:8 */
1242 ide_state
->hcyl
= cmd_fis
[6]; /* LBA 23:16 */
1243 ide_state
->select
= cmd_fis
[7]; /* LBA 27:24 (LBA28) */
1244 ide_state
->hob_sector
= cmd_fis
[8]; /* LBA 31:24 */
1245 ide_state
->hob_lcyl
= cmd_fis
[9]; /* LBA 39:32 */
1246 ide_state
->hob_hcyl
= cmd_fis
[10]; /* LBA 47:40 */
1247 ide_state
->hob_feature
= cmd_fis
[11];
1248 ide_state
->nsector
= (int64_t)((cmd_fis
[13] << 8) | cmd_fis
[12]);
1249 /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
1250 /* 15: Only valid when UPDATE_COMMAND not set. */
1252 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
1253 * table to ide_state->io_buffer */
1254 if (opts
& AHCI_CMD_ATAPI
) {
1255 memcpy(ide_state
->io_buffer
, &cmd_fis
[AHCI_COMMAND_TABLE_ACMD
], 0x10);
1256 if (trace_event_get_state_backends(TRACE_HANDLE_REG_H2D_FIS_DUMP
)) {
1257 char *pretty_fis
= ahci_pretty_buffer_fis(ide_state
->io_buffer
, 0x10);
1258 trace_handle_reg_h2d_fis_dump(s
, port
, pretty_fis
);
1263 ide_state
->error
= 0;
1264 s
->dev
[port
].done_first_drq
= false;
1265 /* Reset transferred byte counter */
1268 /* We're ready to process the command in FIS byte 2. */
1269 ide_exec_cmd(&s
->dev
[port
].port
, cmd_fis
[2]);
1272 static int handle_cmd(AHCIState
*s
, int port
, uint8_t slot
)
1274 IDEState
*ide_state
;
1280 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1281 /* Engine currently busy, try again later */
1282 trace_handle_cmd_busy(s
, port
);
1286 if (!s
->dev
[port
].lst
) {
1287 trace_handle_cmd_nolist(s
, port
);
1290 cmd
= get_cmd_header(s
, port
, slot
);
1291 /* remember current slot handle for later */
1292 s
->dev
[port
].cur_cmd
= cmd
;
1294 /* The device we are working for */
1295 ide_state
= &s
->dev
[port
].port
.ifs
[0];
1296 if (!ide_state
->blk
) {
1297 trace_handle_cmd_badport(s
, port
);
1301 tbl_addr
= le64_to_cpu(cmd
->tbl_addr
);
1303 cmd_fis
= dma_memory_map(s
->as
, tbl_addr
, &cmd_len
,
1304 DMA_DIRECTION_TO_DEVICE
);
1306 trace_handle_cmd_badfis(s
, port
);
1308 } else if (cmd_len
!= 0x80) {
1309 ahci_trigger_irq(s
, &s
->dev
[port
], AHCI_PORT_IRQ_BIT_HBFS
);
1310 trace_handle_cmd_badmap(s
, port
, cmd_len
);
1313 if (trace_event_get_state_backends(TRACE_HANDLE_CMD_FIS_DUMP
)) {
1314 char *pretty_fis
= ahci_pretty_buffer_fis(cmd_fis
, 0x80);
1315 trace_handle_cmd_fis_dump(s
, port
, pretty_fis
);
1318 switch (cmd_fis
[0]) {
1319 case SATA_FIS_TYPE_REGISTER_H2D
:
1320 handle_reg_h2d_fis(s
, port
, slot
, cmd_fis
);
1323 trace_handle_cmd_unhandled_fis(s
, port
,
1324 cmd_fis
[0], cmd_fis
[1], cmd_fis
[2]);
1329 dma_memory_unmap(s
->as
, cmd_fis
, cmd_len
, DMA_DIRECTION_TO_DEVICE
,
1332 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
1333 /* async command, complete later */
1334 s
->dev
[port
].busy_slot
= slot
;
1338 /* done handling the command */
1342 /* Transfer PIO data between RAM and device */
1343 static void ahci_pio_transfer(const IDEDMA
*dma
)
1345 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1346 IDEState
*s
= &ad
->port
.ifs
[0];
1347 uint32_t size
= (uint32_t)(s
->data_end
- s
->data_ptr
);
1348 /* write == ram -> device */
1349 uint16_t opts
= le16_to_cpu(ad
->cur_cmd
->opts
);
1350 int is_write
= opts
& AHCI_CMD_WRITE
;
1351 int is_atapi
= opts
& AHCI_CMD_ATAPI
;
1355 /* The PIO Setup FIS is received prior to transfer, but the interrupt
1356 * is only triggered after data is received.
1358 * The device only sets the 'I' bit in the PIO Setup FIS for device->host
1359 * requests (see "DPIOI1" in the SATA spec), or for host->device DRQs after
1360 * the first (see "DPIOO1"). The latter is consistent with the spec's
1361 * description of the PACKET protocol, where the command part of ATAPI requests
1362 * ("DPKT0") has the 'I' bit clear, while the data part of PIO ATAPI requests
1363 * ("DPKT4a" and "DPKT7") has the 'I' bit set for both directions for all DRQs.
1365 pio_fis_i
= ad
->done_first_drq
|| (!is_atapi
&& !is_write
);
1366 ahci_write_fis_pio(ad
, size
, pio_fis_i
);
1368 if (is_atapi
&& !ad
->done_first_drq
) {
1369 /* already prepopulated iobuffer */
1373 if (ahci_dma_prepare_buf(dma
, size
)) {
1377 trace_ahci_pio_transfer(ad
->hba
, ad
->port_no
, is_write
? "writ" : "read",
1378 size
, is_atapi
? "atapi" : "ata",
1379 has_sglist
? "" : "o");
1381 if (has_sglist
&& size
) {
1383 dma_buf_write(s
->data_ptr
, size
, &s
->sg
);
1385 dma_buf_read(s
->data_ptr
, size
, &s
->sg
);
1389 /* Update number of transferred bytes, destroy sglist */
1390 dma_buf_commit(s
, size
);
1393 /* declare that we processed everything */
1394 s
->data_ptr
= s
->data_end
;
1396 ad
->done_first_drq
= true;
1398 ahci_trigger_irq(ad
->hba
, ad
, AHCI_PORT_IRQ_BIT_PSS
);
1402 static void ahci_start_dma(const IDEDMA
*dma
, IDEState
*s
,
1403 BlockCompletionFunc
*dma_cb
)
1405 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1406 trace_ahci_start_dma(ad
->hba
, ad
->port_no
);
1407 s
->io_buffer_offset
= 0;
1411 static void ahci_restart_dma(const IDEDMA
*dma
)
1413 /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
1417 * IDE/PIO restarts are handled by the core layer, but NCQ commands
1418 * need an extra kick from the AHCI HBA.
1420 static void ahci_restart(const IDEDMA
*dma
)
1422 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1425 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
1426 NCQTransferState
*ncq_tfs
= &ad
->ncq_tfs
[i
];
1427 if (ncq_tfs
->halt
) {
1428 execute_ncq_command(ncq_tfs
);
1434 * Called in DMA and PIO R/W chains to read the PRDT.
1435 * Not shared with NCQ pathways.
1437 static int32_t ahci_dma_prepare_buf(const IDEDMA
*dma
, int32_t limit
)
1439 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1440 IDEState
*s
= &ad
->port
.ifs
[0];
1442 if (ahci_populate_sglist(ad
, &s
->sg
, ad
->cur_cmd
,
1443 limit
, s
->io_buffer_offset
) == -1) {
1444 trace_ahci_dma_prepare_buf_fail(ad
->hba
, ad
->port_no
);
1447 s
->io_buffer_size
= s
->sg
.size
;
1449 trace_ahci_dma_prepare_buf(ad
->hba
, ad
->port_no
, limit
, s
->io_buffer_size
);
1450 return s
->io_buffer_size
;
1454 * Updates the command header with a bytes-read value.
1455 * Called via dma_buf_commit, for both DMA and PIO paths.
1456 * sglist destruction is handled within dma_buf_commit.
1458 static void ahci_commit_buf(const IDEDMA
*dma
, uint32_t tx_bytes
)
1460 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1462 tx_bytes
+= le32_to_cpu(ad
->cur_cmd
->status
);
1463 ad
->cur_cmd
->status
= cpu_to_le32(tx_bytes
);
1466 static int ahci_dma_rw_buf(const IDEDMA
*dma
, bool is_write
)
1468 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1469 IDEState
*s
= &ad
->port
.ifs
[0];
1470 uint8_t *p
= s
->io_buffer
+ s
->io_buffer_index
;
1471 int l
= s
->io_buffer_size
- s
->io_buffer_index
;
1473 if (ahci_populate_sglist(ad
, &s
->sg
, ad
->cur_cmd
, l
, s
->io_buffer_offset
)) {
1478 dma_buf_read(p
, l
, &s
->sg
);
1480 dma_buf_write(p
, l
, &s
->sg
);
1483 /* free sglist, update byte count */
1484 dma_buf_commit(s
, l
);
1485 s
->io_buffer_index
+= l
;
1487 trace_ahci_dma_rw_buf(ad
->hba
, ad
->port_no
, l
);
1491 static void ahci_cmd_done(const IDEDMA
*dma
)
1493 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1495 trace_ahci_cmd_done(ad
->hba
, ad
->port_no
);
1497 /* no longer busy */
1498 if (ad
->busy_slot
!= -1) {
1499 ad
->port_regs
.cmd_issue
&= ~(1 << ad
->busy_slot
);
1503 /* update d2h status */
1504 ahci_write_fis_d2h(ad
);
1506 if (ad
->port_regs
.cmd_issue
&& !ad
->check_bh
) {
1507 ad
->check_bh
= qemu_bh_new(ahci_check_cmd_bh
, ad
);
1508 qemu_bh_schedule(ad
->check_bh
);
1512 static void ahci_irq_set(void *opaque
, int n
, int level
)
1514 qemu_log_mask(LOG_UNIMP
, "ahci: IRQ#%d level:%d\n", n
, level
);
1517 static const IDEDMAOps ahci_dma_ops
= {
1518 .start_dma
= ahci_start_dma
,
1519 .restart
= ahci_restart
,
1520 .restart_dma
= ahci_restart_dma
,
1521 .pio_transfer
= ahci_pio_transfer
,
1522 .prepare_buf
= ahci_dma_prepare_buf
,
1523 .commit_buf
= ahci_commit_buf
,
1524 .rw_buf
= ahci_dma_rw_buf
,
1525 .cmd_done
= ahci_cmd_done
,
1528 void ahci_init(AHCIState
*s
, DeviceState
*qdev
)
1530 s
->container
= qdev
;
1531 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
1532 memory_region_init_io(&s
->mem
, OBJECT(qdev
), &ahci_mem_ops
, s
,
1533 "ahci", AHCI_MEM_BAR_SIZE
);
1534 memory_region_init_io(&s
->idp
, OBJECT(qdev
), &ahci_idp_ops
, s
,
1538 void ahci_realize(AHCIState
*s
, DeviceState
*qdev
, AddressSpace
*as
, int ports
)
1545 s
->dev
= g_new0(AHCIDevice
, ports
);
1547 irqs
= qemu_allocate_irqs(ahci_irq_set
, s
, s
->ports
);
1548 for (i
= 0; i
< s
->ports
; i
++) {
1549 AHCIDevice
*ad
= &s
->dev
[i
];
1551 ide_bus_new(&ad
->port
, sizeof(ad
->port
), qdev
, i
, 1);
1552 ide_init2(&ad
->port
, irqs
[i
]);
1556 ad
->port
.dma
= &ad
->dma
;
1557 ad
->port
.dma
->ops
= &ahci_dma_ops
;
1558 ide_register_restart_cb(&ad
->port
);
1563 void ahci_uninit(AHCIState
*s
)
1567 for (i
= 0; i
< s
->ports
; i
++) {
1568 AHCIDevice
*ad
= &s
->dev
[i
];
1570 for (j
= 0; j
< 2; j
++) {
1571 IDEState
*s
= &ad
->port
.ifs
[j
];
1575 object_unparent(OBJECT(&ad
->port
));
1581 void ahci_reset(AHCIState
*s
)
1586 trace_ahci_reset(s
);
1588 s
->control_regs
.irqstatus
= 0;
1590 * The implementation of this bit is dependent upon the value of the
1591 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
1592 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
1593 * read-only and shall have a reset value of '1'.
1595 * We set HOST_CAP_AHCI so we must enable AHCI at reset.
1597 s
->control_regs
.ghc
= HOST_CTL_AHCI_EN
;
1599 for (i
= 0; i
< s
->ports
; i
++) {
1600 pr
= &s
->dev
[i
].port_regs
;
1604 pr
->cmd
= PORT_CMD_SPIN_UP
| PORT_CMD_POWER_ON
;
1605 ahci_reset_port(s
, i
);
1609 static const VMStateDescription vmstate_ncq_tfs
= {
1610 .name
= "ncq state",
1612 .fields
= (VMStateField
[]) {
1613 VMSTATE_UINT32(sector_count
, NCQTransferState
),
1614 VMSTATE_UINT64(lba
, NCQTransferState
),
1615 VMSTATE_UINT8(tag
, NCQTransferState
),
1616 VMSTATE_UINT8(cmd
, NCQTransferState
),
1617 VMSTATE_UINT8(slot
, NCQTransferState
),
1618 VMSTATE_BOOL(used
, NCQTransferState
),
1619 VMSTATE_BOOL(halt
, NCQTransferState
),
1620 VMSTATE_END_OF_LIST()
1624 static const VMStateDescription vmstate_ahci_device
= {
1625 .name
= "ahci port",
1627 .fields
= (VMStateField
[]) {
1628 VMSTATE_IDE_BUS(port
, AHCIDevice
),
1629 VMSTATE_IDE_DRIVE(port
.ifs
[0], AHCIDevice
),
1630 VMSTATE_UINT32(port_state
, AHCIDevice
),
1631 VMSTATE_UINT32(finished
, AHCIDevice
),
1632 VMSTATE_UINT32(port_regs
.lst_addr
, AHCIDevice
),
1633 VMSTATE_UINT32(port_regs
.lst_addr_hi
, AHCIDevice
),
1634 VMSTATE_UINT32(port_regs
.fis_addr
, AHCIDevice
),
1635 VMSTATE_UINT32(port_regs
.fis_addr_hi
, AHCIDevice
),
1636 VMSTATE_UINT32(port_regs
.irq_stat
, AHCIDevice
),
1637 VMSTATE_UINT32(port_regs
.irq_mask
, AHCIDevice
),
1638 VMSTATE_UINT32(port_regs
.cmd
, AHCIDevice
),
1639 VMSTATE_UINT32(port_regs
.tfdata
, AHCIDevice
),
1640 VMSTATE_UINT32(port_regs
.sig
, AHCIDevice
),
1641 VMSTATE_UINT32(port_regs
.scr_stat
, AHCIDevice
),
1642 VMSTATE_UINT32(port_regs
.scr_ctl
, AHCIDevice
),
1643 VMSTATE_UINT32(port_regs
.scr_err
, AHCIDevice
),
1644 VMSTATE_UINT32(port_regs
.scr_act
, AHCIDevice
),
1645 VMSTATE_UINT32(port_regs
.cmd_issue
, AHCIDevice
),
1646 VMSTATE_BOOL(done_first_drq
, AHCIDevice
),
1647 VMSTATE_INT32(busy_slot
, AHCIDevice
),
1648 VMSTATE_BOOL(init_d2h_sent
, AHCIDevice
),
1649 VMSTATE_STRUCT_ARRAY(ncq_tfs
, AHCIDevice
, AHCI_MAX_CMDS
,
1650 1, vmstate_ncq_tfs
, NCQTransferState
),
1651 VMSTATE_END_OF_LIST()
1655 static int ahci_state_post_load(void *opaque
, int version_id
)
1658 struct AHCIDevice
*ad
;
1659 NCQTransferState
*ncq_tfs
;
1661 AHCIState
*s
= opaque
;
1663 for (i
= 0; i
< s
->ports
; i
++) {
1665 pr
= &ad
->port_regs
;
1667 if (!(pr
->cmd
& PORT_CMD_START
) && (pr
->cmd
& PORT_CMD_LIST_ON
)) {
1668 error_report("AHCI: DMA engine should be off, but status bit "
1669 "indicates it is still running.");
1672 if (!(pr
->cmd
& PORT_CMD_FIS_RX
) && (pr
->cmd
& PORT_CMD_FIS_ON
)) {
1673 error_report("AHCI: FIS RX engine should be off, but status bit "
1674 "indicates it is still running.");
1678 /* After a migrate, the DMA/FIS engines are "off" and
1679 * need to be conditionally restarted */
1680 pr
->cmd
&= ~(PORT_CMD_LIST_ON
| PORT_CMD_FIS_ON
);
1681 if (ahci_cond_start_engines(ad
) != 0) {
1685 for (j
= 0; j
< AHCI_MAX_CMDS
; j
++) {
1686 ncq_tfs
= &ad
->ncq_tfs
[j
];
1687 ncq_tfs
->drive
= ad
;
1689 if (ncq_tfs
->used
!= ncq_tfs
->halt
) {
1692 if (!ncq_tfs
->halt
) {
1695 if (!is_ncq(ncq_tfs
->cmd
)) {
1698 if (ncq_tfs
->slot
!= ncq_tfs
->tag
) {
1701 /* If ncq_tfs->halt is justly set, the engine should be engaged,
1702 * and the command list buffer should be mapped. */
1703 ncq_tfs
->cmdh
= get_cmd_header(s
, i
, ncq_tfs
->slot
);
1704 if (!ncq_tfs
->cmdh
) {
1707 ahci_populate_sglist(ncq_tfs
->drive
, &ncq_tfs
->sglist
,
1709 ncq_tfs
->sector_count
* BDRV_SECTOR_SIZE
,
1711 if (ncq_tfs
->sector_count
!= ncq_tfs
->sglist
.size
>> 9) {
1718 * If an error is present, ad->busy_slot will be valid and not -1.
1719 * In this case, an operation is waiting to resume and will re-check
1720 * for additional AHCI commands to execute upon completion.
1722 * In the case where no error was present, busy_slot will be -1,
1723 * and we should check to see if there are additional commands waiting.
1725 if (ad
->busy_slot
== -1) {
1728 /* We are in the middle of a command, and may need to access
1729 * the command header in guest memory again. */
1730 if (ad
->busy_slot
< 0 || ad
->busy_slot
>= AHCI_MAX_CMDS
) {
1733 ad
->cur_cmd
= get_cmd_header(s
, i
, ad
->busy_slot
);
1740 const VMStateDescription vmstate_ahci
= {
1743 .post_load
= ahci_state_post_load
,
1744 .fields
= (VMStateField
[]) {
1745 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev
, AHCIState
, ports
,
1746 vmstate_ahci_device
, AHCIDevice
),
1747 VMSTATE_UINT32(control_regs
.cap
, AHCIState
),
1748 VMSTATE_UINT32(control_regs
.ghc
, AHCIState
),
1749 VMSTATE_UINT32(control_regs
.irqstatus
, AHCIState
),
1750 VMSTATE_UINT32(control_regs
.impl
, AHCIState
),
1751 VMSTATE_UINT32(control_regs
.version
, AHCIState
),
1752 VMSTATE_UINT32(idp_index
, AHCIState
),
1753 VMSTATE_INT32_EQUAL(ports
, AHCIState
, NULL
),
1754 VMSTATE_END_OF_LIST()
1758 static const VMStateDescription vmstate_sysbus_ahci
= {
1759 .name
= "sysbus-ahci",
1760 .fields
= (VMStateField
[]) {
1761 VMSTATE_AHCI(ahci
, SysbusAHCIState
),
1762 VMSTATE_END_OF_LIST()
1766 static void sysbus_ahci_reset(DeviceState
*dev
)
1768 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1770 ahci_reset(&s
->ahci
);
1773 static void sysbus_ahci_init(Object
*obj
)
1775 SysbusAHCIState
*s
= SYSBUS_AHCI(obj
);
1776 SysBusDevice
*sbd
= SYS_BUS_DEVICE(obj
);
1778 ahci_init(&s
->ahci
, DEVICE(obj
));
1780 sysbus_init_mmio(sbd
, &s
->ahci
.mem
);
1781 sysbus_init_irq(sbd
, &s
->ahci
.irq
);
1784 static void sysbus_ahci_realize(DeviceState
*dev
, Error
**errp
)
1786 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1788 ahci_realize(&s
->ahci
, dev
, &address_space_memory
, s
->num_ports
);
1791 static Property sysbus_ahci_properties
[] = {
1792 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState
, num_ports
, 1),
1793 DEFINE_PROP_END_OF_LIST(),
1796 static void sysbus_ahci_class_init(ObjectClass
*klass
, void *data
)
1798 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1800 dc
->realize
= sysbus_ahci_realize
;
1801 dc
->vmsd
= &vmstate_sysbus_ahci
;
1802 device_class_set_props(dc
, sysbus_ahci_properties
);
1803 dc
->reset
= sysbus_ahci_reset
;
1804 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1807 static const TypeInfo sysbus_ahci_info
= {
1808 .name
= TYPE_SYSBUS_AHCI
,
1809 .parent
= TYPE_SYS_BUS_DEVICE
,
1810 .instance_size
= sizeof(SysbusAHCIState
),
1811 .instance_init
= sysbus_ahci_init
,
1812 .class_init
= sysbus_ahci_class_init
,
1815 static void sysbus_ahci_register_types(void)
1817 type_register_static(&sysbus_ahci_info
);
1820 type_init(sysbus_ahci_register_types
)
1822 int32_t ahci_get_num_ports(PCIDevice
*dev
)
1824 AHCIPCIState
*d
= ICH9_AHCI(dev
);
1825 AHCIState
*ahci
= &d
->ahci
;
1830 void ahci_ide_create_devs(PCIDevice
*dev
, DriveInfo
**hd
)
1832 AHCIPCIState
*d
= ICH9_AHCI(dev
);
1833 AHCIState
*ahci
= &d
->ahci
;
1836 for (i
= 0; i
< ahci
->ports
; i
++) {
1837 if (hd
[i
] == NULL
) {
1840 ide_create_drive(&ahci
->dev
[i
].port
, 0, hd
[i
]);