2 * QEMU IDE Emulation: PCI Bus support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include <hw/i386/pc.h>
28 #include <hw/pci/pci.h>
29 #include <hw/isa/isa.h>
30 #include "sysemu/block-backend.h"
31 #include "sysemu/dma.h"
32 #include "qemu/error-report.h"
33 #include <hw/ide/pci.h>
35 #define BMDMA_PAGE_SIZE 4096
37 #define BM_MIGRATION_COMPAT_STATUS_BITS \
38 (IDE_RETRY_DMA | IDE_RETRY_PIO | \
39 IDE_RETRY_READ | IDE_RETRY_FLUSH)
41 static void bmdma_start_dma(IDEDMA
*dma
, IDEState
*s
,
42 BlockCompletionFunc
*dma_cb
)
44 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
51 if (bm
->status
& BM_STATUS_DMAING
) {
52 bm
->dma_cb(bmdma_active_if(bm
), 0);
57 * Prepare an sglist based on available PRDs.
58 * @limit: How many bytes to prepare total.
60 * Returns the number of bytes prepared, -1 on error.
61 * IDEState.io_buffer_size will contain the number of bytes described
62 * by the PRDs, whether or not we added them to the sglist.
64 static int32_t bmdma_prepare_buf(IDEDMA
*dma
, int32_t limit
)
66 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
67 IDEState
*s
= bmdma_active_if(bm
);
68 PCIDevice
*pci_dev
= PCI_DEVICE(bm
->pci_dev
);
75 pci_dma_sglist_init(&s
->sg
, pci_dev
,
76 s
->nsector
/ (BMDMA_PAGE_SIZE
/ 512) + 1);
77 s
->io_buffer_size
= 0;
79 if (bm
->cur_prd_len
== 0) {
80 /* end of table (with a fail safe of one page) */
81 if (bm
->cur_prd_last
||
82 (bm
->cur_addr
- bm
->addr
) >= BMDMA_PAGE_SIZE
) {
85 pci_dma_read(pci_dev
, bm
->cur_addr
, &prd
, 8);
87 prd
.addr
= le32_to_cpu(prd
.addr
);
88 prd
.size
= le32_to_cpu(prd
.size
);
89 len
= prd
.size
& 0xfffe;
92 bm
->cur_prd_len
= len
;
93 bm
->cur_prd_addr
= prd
.addr
;
94 bm
->cur_prd_last
= (prd
.size
& 0x80000000);
100 /* Don't add extra bytes to the SGList; consume any remaining
101 * PRDs from the guest, but ignore them. */
102 sg_len
= MIN(limit
- s
->sg
.size
, bm
->cur_prd_len
);
104 qemu_sglist_add(&s
->sg
, bm
->cur_prd_addr
, sg_len
);
107 bm
->cur_prd_addr
+= l
;
108 bm
->cur_prd_len
-= l
;
109 s
->io_buffer_size
+= l
;
113 qemu_sglist_destroy(&s
->sg
);
114 s
->io_buffer_size
= 0;
118 /* return 0 if buffer completed */
119 static int bmdma_rw_buf(IDEDMA
*dma
, int is_write
)
121 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
122 IDEState
*s
= bmdma_active_if(bm
);
123 PCIDevice
*pci_dev
= PCI_DEVICE(bm
->pci_dev
);
131 l
= s
->io_buffer_size
- s
->io_buffer_index
;
134 if (bm
->cur_prd_len
== 0) {
135 /* end of table (with a fail safe of one page) */
136 if (bm
->cur_prd_last
||
137 (bm
->cur_addr
- bm
->addr
) >= BMDMA_PAGE_SIZE
)
139 pci_dma_read(pci_dev
, bm
->cur_addr
, &prd
, 8);
141 prd
.addr
= le32_to_cpu(prd
.addr
);
142 prd
.size
= le32_to_cpu(prd
.size
);
143 len
= prd
.size
& 0xfffe;
146 bm
->cur_prd_len
= len
;
147 bm
->cur_prd_addr
= prd
.addr
;
148 bm
->cur_prd_last
= (prd
.size
& 0x80000000);
150 if (l
> bm
->cur_prd_len
)
154 pci_dma_write(pci_dev
, bm
->cur_prd_addr
,
155 s
->io_buffer
+ s
->io_buffer_index
, l
);
157 pci_dma_read(pci_dev
, bm
->cur_prd_addr
,
158 s
->io_buffer
+ s
->io_buffer_index
, l
);
160 bm
->cur_prd_addr
+= l
;
161 bm
->cur_prd_len
-= l
;
162 s
->io_buffer_index
+= l
;
168 static void bmdma_set_inactive(IDEDMA
*dma
, bool more
)
170 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
174 bm
->status
|= BM_STATUS_DMAING
;
176 bm
->status
&= ~BM_STATUS_DMAING
;
180 static void bmdma_restart_dma(IDEDMA
*dma
)
182 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
184 bm
->cur_addr
= bm
->addr
;
187 static void bmdma_cancel(BMDMAState
*bm
)
189 if (bm
->status
& BM_STATUS_DMAING
) {
190 /* cancel DMA request */
191 bmdma_set_inactive(&bm
->dma
, false);
195 static void bmdma_reset(IDEDMA
*dma
)
197 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
200 printf("ide: dma_reset\n");
207 bm
->cur_prd_last
= 0;
208 bm
->cur_prd_addr
= 0;
212 static void bmdma_irq(void *opaque
, int n
, int level
)
214 BMDMAState
*bm
= opaque
;
217 /* pass through lower */
218 qemu_set_irq(bm
->irq
, level
);
222 bm
->status
|= BM_STATUS_INT
;
224 /* trigger the real irq */
225 qemu_set_irq(bm
->irq
, level
);
228 void bmdma_cmd_writeb(BMDMAState
*bm
, uint32_t val
)
231 printf("%s: 0x%08x\n", __func__
, val
);
234 /* Ignore writes to SSBM if it keeps the old value */
235 if ((val
& BM_CMD_START
) != (bm
->cmd
& BM_CMD_START
)) {
236 if (!(val
& BM_CMD_START
)) {
237 /* First invoke the callbacks of all buffered requests
238 * and flag those requests as orphaned. Ideally there
239 * are no unbuffered (Scatter Gather DMA Requests or
240 * write requests) pending and we can avoid to drain. */
241 IDEBufferedRequest
*req
;
242 IDEState
*s
= idebus_active_if(bm
->bus
);
243 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
244 if (!req
->orphaned
) {
246 printf("%s: invoking cb %p of buffered request %p with"
247 " -ECANCELED\n", __func__
, req
->original_cb
, req
);
249 req
->original_cb(req
->original_opaque
, -ECANCELED
);
251 req
->orphaned
= true;
254 * We can't cancel Scatter Gather DMA in the middle of the
255 * operation or a partial (not full) DMA transfer would reach
256 * the storage so we wait for completion instead (we beahve
257 * like if the DMA was completed by the time the guest trying
258 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
261 * In the future we'll be able to safely cancel the I/O if the
262 * whole DMA operation will be submitted to disk with a single
263 * aio operation with preadv/pwritev.
265 if (bm
->bus
->dma
->aiocb
) {
267 printf("%s: draining all remaining requests", __func__
);
270 assert(bm
->bus
->dma
->aiocb
== NULL
);
272 bm
->status
&= ~BM_STATUS_DMAING
;
274 bm
->cur_addr
= bm
->addr
;
275 if (!(bm
->status
& BM_STATUS_DMAING
)) {
276 bm
->status
|= BM_STATUS_DMAING
;
277 /* start dma transfer if possible */
279 bm
->dma_cb(bmdma_active_if(bm
), 0);
284 bm
->cmd
= val
& 0x09;
287 static uint64_t bmdma_addr_read(void *opaque
, hwaddr addr
,
290 BMDMAState
*bm
= opaque
;
291 uint32_t mask
= (1ULL << (width
* 8)) - 1;
294 data
= (bm
->addr
>> (addr
* 8)) & mask
;
296 printf("%s: 0x%08x\n", __func__
, (unsigned)data
);
301 static void bmdma_addr_write(void *opaque
, hwaddr addr
,
302 uint64_t data
, unsigned width
)
304 BMDMAState
*bm
= opaque
;
305 int shift
= addr
* 8;
306 uint32_t mask
= (1ULL << (width
* 8)) - 1;
309 printf("%s: 0x%08x\n", __func__
, (unsigned)data
);
311 bm
->addr
&= ~(mask
<< shift
);
312 bm
->addr
|= ((data
& mask
) << shift
) & ~3;
315 MemoryRegionOps bmdma_addr_ioport_ops
= {
316 .read
= bmdma_addr_read
,
317 .write
= bmdma_addr_write
,
318 .endianness
= DEVICE_LITTLE_ENDIAN
,
321 static bool ide_bmdma_current_needed(void *opaque
)
323 BMDMAState
*bm
= opaque
;
325 return (bm
->cur_prd_len
!= 0);
328 static bool ide_bmdma_status_needed(void *opaque
)
330 BMDMAState
*bm
= opaque
;
332 /* Older versions abused some bits in the status register for internal
333 * error state. If any of these bits are set, we must add a subsection to
334 * transfer the real status register */
335 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
337 return ((bm
->status
& abused_bits
) != 0);
340 static void ide_bmdma_pre_save(void *opaque
)
342 BMDMAState
*bm
= opaque
;
343 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
345 bm
->migration_retry_unit
= bm
->bus
->retry_unit
;
346 bm
->migration_retry_sector_num
= bm
->bus
->retry_sector_num
;
347 bm
->migration_retry_nsector
= bm
->bus
->retry_nsector
;
348 bm
->migration_compat_status
=
349 (bm
->status
& ~abused_bits
) | (bm
->bus
->error_status
& abused_bits
);
352 /* This function accesses bm->bus->error_status which is loaded only after
353 * BMDMA itself. This is why the function is called from ide_pci_post_load
354 * instead of being registered with VMState where it would run too early. */
355 static int ide_bmdma_post_load(void *opaque
, int version_id
)
357 BMDMAState
*bm
= opaque
;
358 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
360 if (bm
->status
== 0) {
361 bm
->status
= bm
->migration_compat_status
& ~abused_bits
;
362 bm
->bus
->error_status
|= bm
->migration_compat_status
& abused_bits
;
364 if (bm
->bus
->error_status
) {
365 bm
->bus
->retry_sector_num
= bm
->migration_retry_sector_num
;
366 bm
->bus
->retry_nsector
= bm
->migration_retry_nsector
;
367 bm
->bus
->retry_unit
= bm
->migration_retry_unit
;
373 static const VMStateDescription vmstate_bmdma_current
= {
374 .name
= "ide bmdma_current",
376 .minimum_version_id
= 1,
377 .needed
= ide_bmdma_current_needed
,
378 .fields
= (VMStateField
[]) {
379 VMSTATE_UINT32(cur_addr
, BMDMAState
),
380 VMSTATE_UINT32(cur_prd_last
, BMDMAState
),
381 VMSTATE_UINT32(cur_prd_addr
, BMDMAState
),
382 VMSTATE_UINT32(cur_prd_len
, BMDMAState
),
383 VMSTATE_END_OF_LIST()
387 static const VMStateDescription vmstate_bmdma_status
= {
388 .name
="ide bmdma/status",
390 .minimum_version_id
= 1,
391 .needed
= ide_bmdma_status_needed
,
392 .fields
= (VMStateField
[]) {
393 VMSTATE_UINT8(status
, BMDMAState
),
394 VMSTATE_END_OF_LIST()
398 static const VMStateDescription vmstate_bmdma
= {
401 .minimum_version_id
= 0,
402 .pre_save
= ide_bmdma_pre_save
,
403 .fields
= (VMStateField
[]) {
404 VMSTATE_UINT8(cmd
, BMDMAState
),
405 VMSTATE_UINT8(migration_compat_status
, BMDMAState
),
406 VMSTATE_UINT32(addr
, BMDMAState
),
407 VMSTATE_INT64(migration_retry_sector_num
, BMDMAState
),
408 VMSTATE_UINT32(migration_retry_nsector
, BMDMAState
),
409 VMSTATE_UINT8(migration_retry_unit
, BMDMAState
),
410 VMSTATE_END_OF_LIST()
412 .subsections
= (const VMStateDescription
*[]) {
413 &vmstate_bmdma_current
,
414 &vmstate_bmdma_status
,
419 static int ide_pci_post_load(void *opaque
, int version_id
)
421 PCIIDEState
*d
= opaque
;
424 for(i
= 0; i
< 2; i
++) {
425 /* current versions always store 0/1, but older version
426 stored bigger values. We only need last bit */
427 d
->bmdma
[i
].migration_retry_unit
&= 1;
428 ide_bmdma_post_load(&d
->bmdma
[i
], -1);
434 const VMStateDescription vmstate_ide_pci
= {
437 .minimum_version_id
= 0,
438 .post_load
= ide_pci_post_load
,
439 .fields
= (VMStateField
[]) {
440 VMSTATE_PCI_DEVICE(parent_obj
, PCIIDEState
),
441 VMSTATE_STRUCT_ARRAY(bmdma
, PCIIDEState
, 2, 0,
442 vmstate_bmdma
, BMDMAState
),
443 VMSTATE_IDE_BUS_ARRAY(bus
, PCIIDEState
, 2),
444 VMSTATE_IDE_DRIVES(bus
[0].ifs
, PCIIDEState
),
445 VMSTATE_IDE_DRIVES(bus
[1].ifs
, PCIIDEState
),
446 VMSTATE_END_OF_LIST()
450 void pci_ide_create_devs(PCIDevice
*dev
, DriveInfo
**hd_table
)
452 PCIIDEState
*d
= PCI_IDE(dev
);
453 static const int bus
[4] = { 0, 0, 1, 1 };
454 static const int unit
[4] = { 0, 1, 0, 1 };
457 for (i
= 0; i
< 4; i
++) {
458 if (hd_table
[i
] == NULL
)
460 ide_create_drive(d
->bus
+bus
[i
], unit
[i
], hd_table
[i
]);
464 static const struct IDEDMAOps bmdma_ops
= {
465 .start_dma
= bmdma_start_dma
,
466 .prepare_buf
= bmdma_prepare_buf
,
467 .rw_buf
= bmdma_rw_buf
,
468 .restart_dma
= bmdma_restart_dma
,
469 .set_inactive
= bmdma_set_inactive
,
470 .reset
= bmdma_reset
,
473 void bmdma_init(IDEBus
*bus
, BMDMAState
*bm
, PCIIDEState
*d
)
475 if (bus
->dma
== &bm
->dma
) {
479 bm
->dma
.ops
= &bmdma_ops
;
482 bus
->irq
= qemu_allocate_irq(bmdma_irq
, bm
, 0);
486 static const TypeInfo pci_ide_type_info
= {
487 .name
= TYPE_PCI_IDE
,
488 .parent
= TYPE_PCI_DEVICE
,
489 .instance_size
= sizeof(PCIIDEState
),
493 static void pci_ide_register_types(void)
495 type_register_static(&pci_ide_type_info
);
498 type_init(pci_ide_register_types
)