4 * Copyright (c) 2010 qiaochong@loongson.cn
5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
31 #include "cpu-common.h"
33 #include <hw/ide/pci.h>
34 #include <hw/ide/ahci.h>
36 /* #define DEBUG_AHCI */
39 #define DPRINTF(port, fmt, ...) \
40 do { fprintf(stderr, "ahci: %s: [%d] ", __FUNCTION__, port); \
41 fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
43 #define DPRINTF(port, fmt, ...) do {} while(0)
46 static void check_cmd(AHCIState
*s
, int port
);
47 static int handle_cmd(AHCIState
*s
,int port
,int slot
);
48 static void ahci_reset_port(AHCIState
*s
, int port
);
49 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
);
50 static void ahci_init_d2h(AHCIDevice
*ad
);
52 static uint32_t ahci_port_read(AHCIState
*s
, int port
, int offset
)
56 pr
= &s
->dev
[port
].port_regs
;
62 case PORT_LST_ADDR_HI
:
63 val
= pr
->lst_addr_hi
;
68 case PORT_FIS_ADDR_HI
:
69 val
= pr
->fis_addr_hi
;
81 val
= ((uint16_t)s
->dev
[port
].port
.ifs
[0].error
<< 8) |
82 s
->dev
[port
].port
.ifs
[0].status
;
88 if (s
->dev
[port
].port
.ifs
[0].bs
) {
89 val
= SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP
|
90 SATA_SCR_SSTATUS_SPD_GEN1
| SATA_SCR_SSTATUS_IPM_ACTIVE
;
92 val
= SATA_SCR_SSTATUS_DET_NODEV
;
102 pr
->scr_act
&= ~s
->dev
[port
].finished
;
103 s
->dev
[port
].finished
= 0;
113 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
118 static void ahci_irq_raise(AHCIState
*s
, AHCIDevice
*dev
)
120 struct AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
122 DPRINTF(0, "raise irq\n");
124 if (msi_enabled(&d
->card
)) {
125 msi_notify(&d
->card
, 0);
127 qemu_irq_raise(s
->irq
);
131 static void ahci_irq_lower(AHCIState
*s
, AHCIDevice
*dev
)
133 struct AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
135 DPRINTF(0, "lower irq\n");
137 if (!msi_enabled(&d
->card
)) {
138 qemu_irq_lower(s
->irq
);
142 static void ahci_check_irq(AHCIState
*s
)
146 DPRINTF(-1, "check irq %#x\n", s
->control_regs
.irqstatus
);
148 for (i
= 0; i
< s
->ports
; i
++) {
149 AHCIPortRegs
*pr
= &s
->dev
[i
].port_regs
;
150 if (pr
->irq_stat
& pr
->irq_mask
) {
151 s
->control_regs
.irqstatus
|= (1 << i
);
155 if (s
->control_regs
.irqstatus
&&
156 (s
->control_regs
.ghc
& HOST_CTL_IRQ_EN
)) {
157 ahci_irq_raise(s
, NULL
);
159 ahci_irq_lower(s
, NULL
);
163 static void ahci_trigger_irq(AHCIState
*s
, AHCIDevice
*d
,
166 DPRINTF(d
->port_no
, "trigger irq %#x -> %x\n",
167 irq_type
, d
->port_regs
.irq_mask
& irq_type
);
169 d
->port_regs
.irq_stat
|= irq_type
;
173 static void map_page(uint8_t **ptr
, uint64_t addr
, uint32_t wanted
)
175 target_phys_addr_t len
= wanted
;
178 cpu_physical_memory_unmap(*ptr
, len
, 1, len
);
181 *ptr
= cpu_physical_memory_map(addr
, &len
, 1);
183 cpu_physical_memory_unmap(*ptr
, len
, 1, len
);
188 static void ahci_port_write(AHCIState
*s
, int port
, int offset
, uint32_t val
)
190 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
192 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
196 map_page(&s
->dev
[port
].lst
,
197 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
198 s
->dev
[port
].cur_cmd
= NULL
;
200 case PORT_LST_ADDR_HI
:
201 pr
->lst_addr_hi
= val
;
202 map_page(&s
->dev
[port
].lst
,
203 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
204 s
->dev
[port
].cur_cmd
= NULL
;
208 map_page(&s
->dev
[port
].res_fis
,
209 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
211 case PORT_FIS_ADDR_HI
:
212 pr
->fis_addr_hi
= val
;
213 map_page(&s
->dev
[port
].res_fis
,
214 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
217 pr
->irq_stat
&= ~val
;
220 pr
->irq_mask
= val
& 0xfdc000ff;
224 pr
->cmd
= val
& ~(PORT_CMD_LIST_ON
| PORT_CMD_FIS_ON
);
226 if (pr
->cmd
& PORT_CMD_START
) {
227 pr
->cmd
|= PORT_CMD_LIST_ON
;
230 if (pr
->cmd
& PORT_CMD_FIS_RX
) {
231 pr
->cmd
|= PORT_CMD_FIS_ON
;
234 /* XXX usually the FIS would be pending on the bus here and
235 issuing deferred until the OS enables FIS receival.
236 Instead, we only submit it once - which works in most
237 cases, but is a hack. */
238 if ((pr
->cmd
& PORT_CMD_FIS_ON
) &&
239 !s
->dev
[port
].init_d2h_sent
) {
240 ahci_init_d2h(&s
->dev
[port
]);
241 s
->dev
[port
].init_d2h_sent
= 1;
247 s
->dev
[port
].port
.ifs
[0].error
= (val
>> 8) & 0xff;
248 s
->dev
[port
].port
.ifs
[0].status
= val
& 0xff;
257 if (((pr
->scr_ctl
& AHCI_SCR_SCTL_DET
) == 1) &&
258 ((val
& AHCI_SCR_SCTL_DET
) == 0)) {
259 ahci_reset_port(s
, port
);
271 pr
->cmd_issue
|= val
;
279 static uint64_t ahci_mem_read(void *opaque
, target_phys_addr_t addr
,
282 AHCIState
*s
= opaque
;
285 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
288 val
= s
->control_regs
.cap
;
291 val
= s
->control_regs
.ghc
;
294 val
= s
->control_regs
.irqstatus
;
296 case HOST_PORTS_IMPL
:
297 val
= s
->control_regs
.impl
;
300 val
= s
->control_regs
.version
;
304 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr
, val
);
305 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
306 (addr
< (AHCI_PORT_REGS_START_ADDR
+
307 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
308 val
= ahci_port_read(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
309 addr
& AHCI_PORT_ADDR_OFFSET_MASK
);
317 static void ahci_mem_write(void *opaque
, target_phys_addr_t addr
,
318 uint64_t val
, unsigned size
)
320 AHCIState
*s
= opaque
;
322 /* Only aligned reads are allowed on AHCI */
324 fprintf(stderr
, "ahci: Mis-aligned write to addr 0x"
325 TARGET_FMT_plx
"\n", addr
);
329 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
330 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64
"\n", (unsigned) addr
, val
);
333 case HOST_CAP
: /* R/WO, RO */
334 /* FIXME handle R/WO */
336 case HOST_CTL
: /* R/W */
337 if (val
& HOST_CTL_RESET
) {
338 DPRINTF(-1, "HBA Reset\n");
339 ahci_reset(container_of(s
, AHCIPCIState
, ahci
));
341 s
->control_regs
.ghc
= (val
& 0x3) | HOST_CTL_AHCI_EN
;
345 case HOST_IRQ_STAT
: /* R/WC, RO */
346 s
->control_regs
.irqstatus
&= ~val
;
349 case HOST_PORTS_IMPL
: /* R/WO, RO */
350 /* FIXME handle R/WO */
352 case HOST_VERSION
: /* RO */
353 /* FIXME report write? */
356 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr
);
358 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
359 (addr
< (AHCI_PORT_REGS_START_ADDR
+
360 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
361 ahci_port_write(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
362 addr
& AHCI_PORT_ADDR_OFFSET_MASK
, val
);
367 static MemoryRegionOps ahci_mem_ops
= {
368 .read
= ahci_mem_read
,
369 .write
= ahci_mem_write
,
370 .endianness
= DEVICE_LITTLE_ENDIAN
,
373 static uint64_t ahci_idp_read(void *opaque
, target_phys_addr_t addr
,
376 AHCIState
*s
= opaque
;
378 if (addr
== s
->idp_offset
) {
381 } else if (addr
== s
->idp_offset
+ 4) {
382 /* data register - do memory read at location selected by index */
383 return ahci_mem_read(opaque
, s
->idp_index
, size
);
389 static void ahci_idp_write(void *opaque
, target_phys_addr_t addr
,
390 uint64_t val
, unsigned size
)
392 AHCIState
*s
= opaque
;
394 if (addr
== s
->idp_offset
) {
395 /* index register - mask off reserved bits */
396 s
->idp_index
= (uint32_t)val
& ((AHCI_MEM_BAR_SIZE
- 1) & ~3);
397 } else if (addr
== s
->idp_offset
+ 4) {
398 /* data register - do memory write at location selected by index */
399 ahci_mem_write(opaque
, s
->idp_index
, val
, size
);
403 static MemoryRegionOps ahci_idp_ops
= {
404 .read
= ahci_idp_read
,
405 .write
= ahci_idp_write
,
406 .endianness
= DEVICE_LITTLE_ENDIAN
,
410 static void ahci_reg_init(AHCIState
*s
)
414 s
->control_regs
.cap
= (s
->ports
- 1) |
415 (AHCI_NUM_COMMAND_SLOTS
<< 8) |
416 (AHCI_SUPPORTED_SPEED_GEN1
<< AHCI_SUPPORTED_SPEED
) |
417 HOST_CAP_NCQ
| HOST_CAP_AHCI
;
419 s
->control_regs
.impl
= (1 << s
->ports
) - 1;
421 s
->control_regs
.version
= AHCI_VERSION_1_0
;
423 for (i
= 0; i
< s
->ports
; i
++) {
424 s
->dev
[i
].port_state
= STATE_RUN
;
428 static uint32_t read_from_sglist(uint8_t *buffer
, uint32_t len
,
432 uint32_t total
= 0, once
;
433 ScatterGatherEntry
*cur_prd
;
436 cur_prd
= sglist
->sg
;
437 sgcount
= sglist
->nsg
;
438 for (i
= 0; len
&& sgcount
; i
++) {
439 once
= MIN(cur_prd
->len
, len
);
440 cpu_physical_memory_read(cur_prd
->base
, buffer
, once
);
451 static uint32_t write_to_sglist(uint8_t *buffer
, uint32_t len
,
455 uint32_t total
= 0, once
;
456 ScatterGatherEntry
*cur_prd
;
459 DPRINTF(-1, "total: 0x%x bytes\n", len
);
461 cur_prd
= sglist
->sg
;
462 sgcount
= sglist
->nsg
;
463 for (i
= 0; len
&& sgcount
; i
++) {
464 once
= MIN(cur_prd
->len
, len
);
465 DPRINTF(-1, "write 0x%x bytes to 0x%lx\n", once
, (long)cur_prd
->base
);
466 cpu_physical_memory_write(cur_prd
->base
, buffer
, once
);
477 static void check_cmd(AHCIState
*s
, int port
)
479 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
482 if ((pr
->cmd
& PORT_CMD_START
) && pr
->cmd_issue
) {
483 for (slot
= 0; (slot
< 32) && pr
->cmd_issue
; slot
++) {
484 if ((pr
->cmd_issue
& (1 << slot
)) &&
485 !handle_cmd(s
, port
, slot
)) {
486 pr
->cmd_issue
&= ~(1 << slot
);
492 static void ahci_check_cmd_bh(void *opaque
)
494 AHCIDevice
*ad
= opaque
;
496 qemu_bh_delete(ad
->check_bh
);
499 if ((ad
->busy_slot
!= -1) &&
500 !(ad
->port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
))) {
502 ad
->port_regs
.cmd_issue
&= ~(1 << ad
->busy_slot
);
506 check_cmd(ad
->hba
, ad
->port_no
);
509 static void ahci_init_d2h(AHCIDevice
*ad
)
511 uint8_t init_fis
[0x20];
512 IDEState
*ide_state
= &ad
->port
.ifs
[0];
514 memset(init_fis
, 0, sizeof(init_fis
));
519 if (ide_state
->drive_kind
== IDE_CD
) {
520 init_fis
[5] = ide_state
->lcyl
;
521 init_fis
[6] = ide_state
->hcyl
;
524 ahci_write_fis_d2h(ad
, init_fis
);
527 static void ahci_reset_port(AHCIState
*s
, int port
)
529 AHCIDevice
*d
= &s
->dev
[port
];
530 AHCIPortRegs
*pr
= &d
->port_regs
;
531 IDEState
*ide_state
= &d
->port
.ifs
[0];
534 DPRINTF(port
, "reset port\n");
536 ide_bus_reset(&d
->port
);
537 ide_state
->ncq_queues
= AHCI_MAX_CMDS
;
543 d
->init_d2h_sent
= 0;
545 ide_state
= &s
->dev
[port
].port
.ifs
[0];
546 if (!ide_state
->bs
) {
550 /* reset ncq queue */
551 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
552 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[i
];
553 if (!ncq_tfs
->used
) {
557 if (ncq_tfs
->aiocb
) {
558 bdrv_aio_cancel(ncq_tfs
->aiocb
);
559 ncq_tfs
->aiocb
= NULL
;
562 qemu_sglist_destroy(&ncq_tfs
->sglist
);
566 s
->dev
[port
].port_state
= STATE_RUN
;
567 if (!ide_state
->bs
) {
568 s
->dev
[port
].port_regs
.sig
= 0;
569 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
570 } else if (ide_state
->drive_kind
== IDE_CD
) {
571 s
->dev
[port
].port_regs
.sig
= SATA_SIGNATURE_CDROM
;
572 ide_state
->lcyl
= 0x14;
573 ide_state
->hcyl
= 0xeb;
574 DPRINTF(port
, "set lcyl = %d\n", ide_state
->lcyl
);
575 ide_state
->status
= SEEK_STAT
| WRERR_STAT
| READY_STAT
;
577 s
->dev
[port
].port_regs
.sig
= SATA_SIGNATURE_DISK
;
578 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
581 ide_state
->error
= 1;
585 static void debug_print_fis(uint8_t *fis
, int cmd_len
)
590 fprintf(stderr
, "fis:");
591 for (i
= 0; i
< cmd_len
; i
++) {
592 if ((i
& 0xf) == 0) {
593 fprintf(stderr
, "\n%02x:",i
);
595 fprintf(stderr
, "%02x ",fis
[i
]);
597 fprintf(stderr
, "\n");
601 static void ahci_write_fis_sdb(AHCIState
*s
, int port
, uint32_t finished
)
603 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
607 if (!s
->dev
[port
].res_fis
||
608 !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
612 sdb_fis
= &s
->dev
[port
].res_fis
[RES_FIS_SDBFIS
];
613 ide_state
= &s
->dev
[port
].port
.ifs
[0];
616 *(uint32_t*)sdb_fis
= 0;
619 sdb_fis
[0] = ide_state
->error
;
620 sdb_fis
[2] = ide_state
->status
& 0x77;
621 s
->dev
[port
].finished
|= finished
;
622 *(uint32_t*)(sdb_fis
+ 4) = cpu_to_le32(s
->dev
[port
].finished
);
624 ahci_trigger_irq(s
, &s
->dev
[port
], PORT_IRQ_STAT_SDBS
);
627 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
)
629 AHCIPortRegs
*pr
= &ad
->port_regs
;
632 target_phys_addr_t cmd_len
= 0x80;
635 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
641 uint64_t tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
642 cmd_fis
= cpu_physical_memory_map(tbl_addr
, &cmd_len
, 0);
646 d2h_fis
= &ad
->res_fis
[RES_FIS_RFIS
];
649 d2h_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
650 d2h_fis
[2] = ad
->port
.ifs
[0].status
;
651 d2h_fis
[3] = ad
->port
.ifs
[0].error
;
653 d2h_fis
[4] = cmd_fis
[4];
654 d2h_fis
[5] = cmd_fis
[5];
655 d2h_fis
[6] = cmd_fis
[6];
656 d2h_fis
[7] = cmd_fis
[7];
657 d2h_fis
[8] = cmd_fis
[8];
658 d2h_fis
[9] = cmd_fis
[9];
659 d2h_fis
[10] = cmd_fis
[10];
660 d2h_fis
[11] = cmd_fis
[11];
661 d2h_fis
[12] = cmd_fis
[12];
662 d2h_fis
[13] = cmd_fis
[13];
663 for (i
= 14; i
< 0x20; i
++) {
667 if (d2h_fis
[2] & ERR_STAT
) {
668 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_STAT_TFES
);
671 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_D2H_REG_FIS
);
674 cpu_physical_memory_unmap(cmd_fis
, cmd_len
, 0, cmd_len
);
678 static int ahci_populate_sglist(AHCIDevice
*ad
, QEMUSGList
*sglist
)
680 AHCICmdHdr
*cmd
= ad
->cur_cmd
;
681 uint32_t opts
= le32_to_cpu(cmd
->opts
);
682 uint64_t prdt_addr
= le64_to_cpu(cmd
->tbl_addr
) + 0x80;
683 int sglist_alloc_hint
= opts
>> AHCI_CMD_HDR_PRDT_LEN
;
684 target_phys_addr_t prdt_len
= (sglist_alloc_hint
* sizeof(AHCI_SG
));
685 target_phys_addr_t real_prdt_len
= prdt_len
;
690 if (!sglist_alloc_hint
) {
691 DPRINTF(ad
->port_no
, "no sg list given by guest: 0x%08x\n", opts
);
696 if (!(prdt
= cpu_physical_memory_map(prdt_addr
, &prdt_len
, 0))){
697 DPRINTF(ad
->port_no
, "map failed\n");
701 if (prdt_len
< real_prdt_len
) {
702 DPRINTF(ad
->port_no
, "mapped less than expected\n");
707 /* Get entries in the PRDT, init a qemu sglist accordingly */
708 if (sglist_alloc_hint
> 0) {
709 AHCI_SG
*tbl
= (AHCI_SG
*)prdt
;
711 qemu_sglist_init(sglist
, sglist_alloc_hint
);
712 for (i
= 0; i
< sglist_alloc_hint
; i
++) {
713 /* flags_size is zero-based */
714 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[i
].addr
),
715 le32_to_cpu(tbl
[i
].flags_size
) + 1);
720 cpu_physical_memory_unmap(prdt
, prdt_len
, 0, prdt_len
);
724 static void ncq_cb(void *opaque
, int ret
)
726 NCQTransferState
*ncq_tfs
= (NCQTransferState
*)opaque
;
727 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
729 /* Clear bit for this tag in SActive */
730 ncq_tfs
->drive
->port_regs
.scr_act
&= ~(1 << ncq_tfs
->tag
);
734 ide_state
->error
= ABRT_ERR
;
735 ide_state
->status
= READY_STAT
| ERR_STAT
;
736 ncq_tfs
->drive
->port_regs
.scr_err
|= (1 << ncq_tfs
->tag
);
738 ide_state
->status
= READY_STAT
| SEEK_STAT
;
741 ahci_write_fis_sdb(ncq_tfs
->drive
->hba
, ncq_tfs
->drive
->port_no
,
742 (1 << ncq_tfs
->tag
));
744 DPRINTF(ncq_tfs
->drive
->port_no
, "NCQ transfer tag %d finished\n",
747 bdrv_acct_done(ncq_tfs
->drive
->port
.ifs
[0].bs
, &ncq_tfs
->acct
);
748 qemu_sglist_destroy(&ncq_tfs
->sglist
);
752 static void process_ncq_command(AHCIState
*s
, int port
, uint8_t *cmd_fis
,
755 NCQFrame
*ncq_fis
= (NCQFrame
*)cmd_fis
;
756 uint8_t tag
= ncq_fis
->tag
>> 3;
757 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[tag
];
760 /* error - already in use */
761 fprintf(stderr
, "%s: tag %d already used\n", __FUNCTION__
, tag
);
766 ncq_tfs
->drive
= &s
->dev
[port
];
767 ncq_tfs
->slot
= slot
;
768 ncq_tfs
->lba
= ((uint64_t)ncq_fis
->lba5
<< 40) |
769 ((uint64_t)ncq_fis
->lba4
<< 32) |
770 ((uint64_t)ncq_fis
->lba3
<< 24) |
771 ((uint64_t)ncq_fis
->lba2
<< 16) |
772 ((uint64_t)ncq_fis
->lba1
<< 8) |
773 (uint64_t)ncq_fis
->lba0
;
775 /* Note: We calculate the sector count, but don't currently rely on it.
776 * The total size of the DMA buffer tells us the transfer size instead. */
777 ncq_tfs
->sector_count
= ((uint16_t)ncq_fis
->sector_count_high
<< 8) |
778 ncq_fis
->sector_count_low
;
780 DPRINTF(port
, "NCQ transfer LBA from %"PRId64
" to %"PRId64
", "
781 "drive max %"PRId64
"\n",
782 ncq_tfs
->lba
, ncq_tfs
->lba
+ ncq_tfs
->sector_count
- 2,
783 s
->dev
[port
].port
.ifs
[0].nb_sectors
- 1);
785 ahci_populate_sglist(&s
->dev
[port
], &ncq_tfs
->sglist
);
788 switch(ncq_fis
->command
) {
789 case READ_FPDMA_QUEUED
:
790 DPRINTF(port
, "NCQ reading %d sectors from LBA %"PRId64
", "
792 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
794 DPRINTF(port
, "tag %d aio read %"PRId64
"\n",
795 ncq_tfs
->tag
, ncq_tfs
->lba
);
797 bdrv_acct_start(ncq_tfs
->drive
->port
.ifs
[0].bs
, &ncq_tfs
->acct
,
798 (ncq_tfs
->sector_count
-1) * BDRV_SECTOR_SIZE
,
800 ncq_tfs
->aiocb
= dma_bdrv_read(ncq_tfs
->drive
->port
.ifs
[0].bs
,
801 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
804 case WRITE_FPDMA_QUEUED
:
805 DPRINTF(port
, "NCQ writing %d sectors to LBA %"PRId64
", tag %d\n",
806 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
808 DPRINTF(port
, "tag %d aio write %"PRId64
"\n",
809 ncq_tfs
->tag
, ncq_tfs
->lba
);
811 bdrv_acct_start(ncq_tfs
->drive
->port
.ifs
[0].bs
, &ncq_tfs
->acct
,
812 (ncq_tfs
->sector_count
-1) * BDRV_SECTOR_SIZE
,
814 ncq_tfs
->aiocb
= dma_bdrv_write(ncq_tfs
->drive
->port
.ifs
[0].bs
,
815 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
819 DPRINTF(port
, "error: tried to process non-NCQ command as NCQ\n");
820 qemu_sglist_destroy(&ncq_tfs
->sglist
);
825 static int handle_cmd(AHCIState
*s
, int port
, int slot
)
832 target_phys_addr_t cmd_len
;
834 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
835 /* Engine currently busy, try again later */
836 DPRINTF(port
, "engine busy\n");
840 cmd
= &((AHCICmdHdr
*)s
->dev
[port
].lst
)[slot
];
842 if (!s
->dev
[port
].lst
) {
843 DPRINTF(port
, "error: lst not given but cmd handled");
847 /* remember current slot handle for later */
848 s
->dev
[port
].cur_cmd
= cmd
;
850 opts
= le32_to_cpu(cmd
->opts
);
851 tbl_addr
= le64_to_cpu(cmd
->tbl_addr
);
854 cmd_fis
= cpu_physical_memory_map(tbl_addr
, &cmd_len
, 1);
857 DPRINTF(port
, "error: guest passed us an invalid cmd fis\n");
861 /* The device we are working for */
862 ide_state
= &s
->dev
[port
].port
.ifs
[0];
864 if (!ide_state
->bs
) {
865 DPRINTF(port
, "error: guest accessed unused port");
869 debug_print_fis(cmd_fis
, 0x90);
870 //debug_print_fis(cmd_fis, (opts & AHCI_CMD_HDR_CMD_FIS_LEN) * 4);
872 switch (cmd_fis
[0]) {
873 case SATA_FIS_TYPE_REGISTER_H2D
:
876 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
877 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
883 switch (cmd_fis
[1]) {
884 case SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
:
889 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
890 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
896 switch (s
->dev
[port
].port_state
) {
898 if (cmd_fis
[15] & ATA_SRST
) {
899 s
->dev
[port
].port_state
= STATE_RESET
;
903 if (!(cmd_fis
[15] & ATA_SRST
)) {
904 ahci_reset_port(s
, port
);
909 if (cmd_fis
[1] == SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
) {
911 /* Check for NCQ command */
912 if ((cmd_fis
[2] == READ_FPDMA_QUEUED
) ||
913 (cmd_fis
[2] == WRITE_FPDMA_QUEUED
)) {
914 process_ncq_command(s
, port
, cmd_fis
, slot
);
918 /* Decompose the FIS */
919 ide_state
->nsector
= (int64_t)((cmd_fis
[13] << 8) | cmd_fis
[12]);
920 ide_state
->feature
= cmd_fis
[3];
921 if (!ide_state
->nsector
) {
922 ide_state
->nsector
= 256;
925 if (ide_state
->drive_kind
!= IDE_CD
) {
927 * We set the sector depending on the sector defined in the FIS.
928 * Unfortunately, the spec isn't exactly obvious on this one.
930 * Apparently LBA48 commands set fis bytes 10,9,8,6,5,4 to the
931 * 48 bit sector number. ATA_CMD_READ_DMA_EXT is an example for
934 * Non-LBA48 commands however use 7[lower 4 bits],6,5,4 to define a
935 * 28-bit sector number. ATA_CMD_READ_DMA is an example for such
938 * Since the spec doesn't explicitly state what each field should
939 * do, I simply assume non-used fields as reserved and OR everything
940 * together, independent of the command.
942 ide_set_sector(ide_state
, ((uint64_t)cmd_fis
[10] << 40)
943 | ((uint64_t)cmd_fis
[9] << 32)
944 /* This is used for LBA48 commands */
945 | ((uint64_t)cmd_fis
[8] << 24)
946 /* This is used for non-LBA48 commands */
947 | ((uint64_t)(cmd_fis
[7] & 0xf) << 24)
948 | ((uint64_t)cmd_fis
[6] << 16)
949 | ((uint64_t)cmd_fis
[5] << 8)
953 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
954 * table to ide_state->io_buffer
956 if (opts
& AHCI_CMD_ATAPI
) {
957 memcpy(ide_state
->io_buffer
, &cmd_fis
[AHCI_COMMAND_TABLE_ACMD
], 0x10);
958 ide_state
->lcyl
= 0x14;
959 ide_state
->hcyl
= 0xeb;
960 debug_print_fis(ide_state
->io_buffer
, 0x10);
961 ide_state
->feature
= IDE_FEATURE_DMA
;
962 s
->dev
[port
].done_atapi_packet
= 0;
963 /* XXX send PIO setup FIS */
966 ide_state
->error
= 0;
968 /* Reset transferred byte counter */
971 /* We're ready to process the command in FIS byte 2. */
972 ide_exec_cmd(&s
->dev
[port
].port
, cmd_fis
[2]);
974 if (s
->dev
[port
].port
.ifs
[0].status
& READY_STAT
) {
975 ahci_write_fis_d2h(&s
->dev
[port
], cmd_fis
);
980 cpu_physical_memory_unmap(cmd_fis
, cmd_len
, 1, cmd_len
);
982 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
983 /* async command, complete later */
984 s
->dev
[port
].busy_slot
= slot
;
988 /* done handling the command */
992 /* DMA dev <-> ram */
993 static int ahci_start_transfer(IDEDMA
*dma
)
995 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
996 IDEState
*s
= &ad
->port
.ifs
[0];
997 uint32_t size
= (uint32_t)(s
->data_end
- s
->data_ptr
);
998 /* write == ram -> device */
999 uint32_t opts
= le32_to_cpu(ad
->cur_cmd
->opts
);
1000 int is_write
= opts
& AHCI_CMD_WRITE
;
1001 int is_atapi
= opts
& AHCI_CMD_ATAPI
;
1004 if (is_atapi
&& !ad
->done_atapi_packet
) {
1005 /* already prepopulated iobuffer */
1006 ad
->done_atapi_packet
= 1;
1010 if (!ahci_populate_sglist(ad
, &s
->sg
)) {
1014 DPRINTF(ad
->port_no
, "%sing %d bytes on %s w/%s sglist\n",
1015 is_write
? "writ" : "read", size
, is_atapi
? "atapi" : "ata",
1016 has_sglist
? "" : "o");
1018 if (is_write
&& has_sglist
&& (s
->data_ptr
< s
->data_end
)) {
1019 read_from_sglist(s
->data_ptr
, size
, &s
->sg
);
1022 if (!is_write
&& has_sglist
&& (s
->data_ptr
< s
->data_end
)) {
1023 write_to_sglist(s
->data_ptr
, size
, &s
->sg
);
1026 /* update number of transferred bytes */
1027 ad
->cur_cmd
->status
= cpu_to_le32(le32_to_cpu(ad
->cur_cmd
->status
) + size
);
1030 /* declare that we processed everything */
1031 s
->data_ptr
= s
->data_end
;
1034 qemu_sglist_destroy(&s
->sg
);
1037 s
->end_transfer_func(s
);
1039 if (!(s
->status
& DRQ_STAT
)) {
1041 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_STAT_DSS
);
1047 static void ahci_start_dma(IDEDMA
*dma
, IDEState
*s
,
1048 BlockDriverCompletionFunc
*dma_cb
)
1050 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1052 DPRINTF(ad
->port_no
, "\n");
1053 ad
->dma_cb
= dma_cb
;
1054 ad
->dma_status
|= BM_STATUS_DMAING
;
1058 static int ahci_dma_prepare_buf(IDEDMA
*dma
, int is_write
)
1060 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1061 IDEState
*s
= &ad
->port
.ifs
[0];
1064 ahci_populate_sglist(ad
, &s
->sg
);
1066 s
->io_buffer_size
= 0;
1067 for (i
= 0; i
< s
->sg
.nsg
; i
++) {
1068 s
->io_buffer_size
+= s
->sg
.sg
[i
].len
;
1071 DPRINTF(ad
->port_no
, "len=%#x\n", s
->io_buffer_size
);
1072 return s
->io_buffer_size
!= 0;
1075 static int ahci_dma_rw_buf(IDEDMA
*dma
, int is_write
)
1077 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1078 IDEState
*s
= &ad
->port
.ifs
[0];
1079 uint8_t *p
= s
->io_buffer
+ s
->io_buffer_index
;
1080 int l
= s
->io_buffer_size
- s
->io_buffer_index
;
1082 if (ahci_populate_sglist(ad
, &s
->sg
)) {
1087 write_to_sglist(p
, l
, &s
->sg
);
1089 read_from_sglist(p
, l
, &s
->sg
);
1092 /* update number of transferred bytes */
1093 ad
->cur_cmd
->status
= cpu_to_le32(le32_to_cpu(ad
->cur_cmd
->status
) + l
);
1094 s
->io_buffer_index
+= l
;
1096 DPRINTF(ad
->port_no
, "len=%#x\n", l
);
1101 static int ahci_dma_set_unit(IDEDMA
*dma
, int unit
)
1103 /* only a single unit per link */
1107 static int ahci_dma_add_status(IDEDMA
*dma
, int status
)
1109 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1110 ad
->dma_status
|= status
;
1111 DPRINTF(ad
->port_no
, "set status: %x\n", status
);
1113 if (status
& BM_STATUS_INT
) {
1114 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_STAT_DSS
);
1120 static int ahci_dma_set_inactive(IDEDMA
*dma
)
1122 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1124 DPRINTF(ad
->port_no
, "dma done\n");
1126 /* update d2h status */
1127 ahci_write_fis_d2h(ad
, NULL
);
1131 if (!ad
->check_bh
) {
1132 /* maybe we still have something to process, check later */
1133 ad
->check_bh
= qemu_bh_new(ahci_check_cmd_bh
, ad
);
1134 qemu_bh_schedule(ad
->check_bh
);
1140 static void ahci_irq_set(void *opaque
, int n
, int level
)
1144 static void ahci_dma_restart_cb(void *opaque
, int running
, RunState state
)
1148 static int ahci_dma_reset(IDEDMA
*dma
)
1153 static const IDEDMAOps ahci_dma_ops
= {
1154 .start_dma
= ahci_start_dma
,
1155 .start_transfer
= ahci_start_transfer
,
1156 .prepare_buf
= ahci_dma_prepare_buf
,
1157 .rw_buf
= ahci_dma_rw_buf
,
1158 .set_unit
= ahci_dma_set_unit
,
1159 .add_status
= ahci_dma_add_status
,
1160 .set_inactive
= ahci_dma_set_inactive
,
1161 .restart_cb
= ahci_dma_restart_cb
,
1162 .reset
= ahci_dma_reset
,
1165 void ahci_init(AHCIState
*s
, DeviceState
*qdev
, int ports
)
1171 s
->dev
= g_malloc0(sizeof(AHCIDevice
) * ports
);
1173 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
1174 memory_region_init_io(&s
->mem
, &ahci_mem_ops
, s
, "ahci", AHCI_MEM_BAR_SIZE
);
1175 memory_region_init_io(&s
->idp
, &ahci_idp_ops
, s
, "ahci-idp", 32);
1177 irqs
= qemu_allocate_irqs(ahci_irq_set
, s
, s
->ports
);
1179 for (i
= 0; i
< s
->ports
; i
++) {
1180 AHCIDevice
*ad
= &s
->dev
[i
];
1182 ide_bus_new(&ad
->port
, qdev
, i
);
1183 ide_init2(&ad
->port
, irqs
[i
]);
1187 ad
->port
.dma
= &ad
->dma
;
1188 ad
->port
.dma
->ops
= &ahci_dma_ops
;
1189 ad
->port_regs
.cmd
= PORT_CMD_SPIN_UP
| PORT_CMD_POWER_ON
;
1193 void ahci_uninit(AHCIState
*s
)
1195 memory_region_destroy(&s
->mem
);
1196 memory_region_destroy(&s
->idp
);
1200 void ahci_reset(void *opaque
)
1202 struct AHCIPCIState
*d
= opaque
;
1206 d
->ahci
.control_regs
.irqstatus
= 0;
1207 d
->ahci
.control_regs
.ghc
= 0;
1209 for (i
= 0; i
< d
->ahci
.ports
; i
++) {
1210 pr
= &d
->ahci
.dev
[i
].port_regs
;
1214 ahci_reset_port(&d
->ahci
, i
);