2 * QEMU IDE Emulation: PCI Bus support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "hw/pci/pci.h"
28 #include "hw/isa/isa.h"
29 #include "sysemu/block-backend.h"
30 #include "sysemu/dma.h"
31 #include "qemu/error-report.h"
32 #include "hw/ide/pci.h"
35 #define BMDMA_PAGE_SIZE 4096
37 #define BM_MIGRATION_COMPAT_STATUS_BITS \
38 (IDE_RETRY_DMA | IDE_RETRY_PIO | \
39 IDE_RETRY_READ | IDE_RETRY_FLUSH)
41 static void bmdma_start_dma(IDEDMA
*dma
, IDEState
*s
,
42 BlockCompletionFunc
*dma_cb
)
44 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
51 if (bm
->status
& BM_STATUS_DMAING
) {
52 bm
->dma_cb(bmdma_active_if(bm
), 0);
57 * Prepare an sglist based on available PRDs.
58 * @limit: How many bytes to prepare total.
60 * Returns the number of bytes prepared, -1 on error.
61 * IDEState.io_buffer_size will contain the number of bytes described
62 * by the PRDs, whether or not we added them to the sglist.
64 static int32_t bmdma_prepare_buf(IDEDMA
*dma
, int32_t limit
)
66 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
67 IDEState
*s
= bmdma_active_if(bm
);
68 PCIDevice
*pci_dev
= PCI_DEVICE(bm
->pci_dev
);
75 pci_dma_sglist_init(&s
->sg
, pci_dev
,
76 s
->nsector
/ (BMDMA_PAGE_SIZE
/ 512) + 1);
77 s
->io_buffer_size
= 0;
79 if (bm
->cur_prd_len
== 0) {
80 /* end of table (with a fail safe of one page) */
81 if (bm
->cur_prd_last
||
82 (bm
->cur_addr
- bm
->addr
) >= BMDMA_PAGE_SIZE
) {
85 pci_dma_read(pci_dev
, bm
->cur_addr
, &prd
, 8);
87 prd
.addr
= le32_to_cpu(prd
.addr
);
88 prd
.size
= le32_to_cpu(prd
.size
);
89 len
= prd
.size
& 0xfffe;
92 bm
->cur_prd_len
= len
;
93 bm
->cur_prd_addr
= prd
.addr
;
94 bm
->cur_prd_last
= (prd
.size
& 0x80000000);
100 /* Don't add extra bytes to the SGList; consume any remaining
101 * PRDs from the guest, but ignore them. */
102 sg_len
= MIN(limit
- s
->sg
.size
, bm
->cur_prd_len
);
104 qemu_sglist_add(&s
->sg
, bm
->cur_prd_addr
, sg_len
);
107 bm
->cur_prd_addr
+= l
;
108 bm
->cur_prd_len
-= l
;
109 s
->io_buffer_size
+= l
;
113 qemu_sglist_destroy(&s
->sg
);
114 s
->io_buffer_size
= 0;
118 /* return 0 if buffer completed */
119 static int bmdma_rw_buf(IDEDMA
*dma
, int is_write
)
121 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
122 IDEState
*s
= bmdma_active_if(bm
);
123 PCIDevice
*pci_dev
= PCI_DEVICE(bm
->pci_dev
);
131 l
= s
->io_buffer_size
- s
->io_buffer_index
;
134 if (bm
->cur_prd_len
== 0) {
135 /* end of table (with a fail safe of one page) */
136 if (bm
->cur_prd_last
||
137 (bm
->cur_addr
- bm
->addr
) >= BMDMA_PAGE_SIZE
)
139 pci_dma_read(pci_dev
, bm
->cur_addr
, &prd
, 8);
141 prd
.addr
= le32_to_cpu(prd
.addr
);
142 prd
.size
= le32_to_cpu(prd
.size
);
143 len
= prd
.size
& 0xfffe;
146 bm
->cur_prd_len
= len
;
147 bm
->cur_prd_addr
= prd
.addr
;
148 bm
->cur_prd_last
= (prd
.size
& 0x80000000);
150 if (l
> bm
->cur_prd_len
)
154 pci_dma_write(pci_dev
, bm
->cur_prd_addr
,
155 s
->io_buffer
+ s
->io_buffer_index
, l
);
157 pci_dma_read(pci_dev
, bm
->cur_prd_addr
,
158 s
->io_buffer
+ s
->io_buffer_index
, l
);
160 bm
->cur_prd_addr
+= l
;
161 bm
->cur_prd_len
-= l
;
162 s
->io_buffer_index
+= l
;
168 static void bmdma_set_inactive(IDEDMA
*dma
, bool more
)
170 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
174 bm
->status
|= BM_STATUS_DMAING
;
176 bm
->status
&= ~BM_STATUS_DMAING
;
180 static void bmdma_restart_dma(IDEDMA
*dma
)
182 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
184 bm
->cur_addr
= bm
->addr
;
187 static void bmdma_cancel(BMDMAState
*bm
)
189 if (bm
->status
& BM_STATUS_DMAING
) {
190 /* cancel DMA request */
191 bmdma_set_inactive(&bm
->dma
, false);
195 static void bmdma_reset(IDEDMA
*dma
)
197 BMDMAState
*bm
= DO_UPCAST(BMDMAState
, dma
, dma
);
205 bm
->cur_prd_last
= 0;
206 bm
->cur_prd_addr
= 0;
210 static void bmdma_irq(void *opaque
, int n
, int level
)
212 BMDMAState
*bm
= opaque
;
215 /* pass through lower */
216 qemu_set_irq(bm
->irq
, level
);
220 bm
->status
|= BM_STATUS_INT
;
222 /* trigger the real irq */
223 qemu_set_irq(bm
->irq
, level
);
226 void bmdma_cmd_writeb(BMDMAState
*bm
, uint32_t val
)
228 trace_bmdma_cmd_writeb(val
);
230 /* Ignore writes to SSBM if it keeps the old value */
231 if ((val
& BM_CMD_START
) != (bm
->cmd
& BM_CMD_START
)) {
232 if (!(val
& BM_CMD_START
)) {
233 ide_cancel_dma_sync(idebus_active_if(bm
->bus
));
234 bm
->status
&= ~BM_STATUS_DMAING
;
236 bm
->cur_addr
= bm
->addr
;
237 if (!(bm
->status
& BM_STATUS_DMAING
)) {
238 bm
->status
|= BM_STATUS_DMAING
;
239 /* start dma transfer if possible */
241 bm
->dma_cb(bmdma_active_if(bm
), 0);
246 bm
->cmd
= val
& 0x09;
249 static uint64_t bmdma_addr_read(void *opaque
, hwaddr addr
,
252 BMDMAState
*bm
= opaque
;
253 uint32_t mask
= (1ULL << (width
* 8)) - 1;
256 data
= (bm
->addr
>> (addr
* 8)) & mask
;
257 trace_bmdma_addr_read(data
);
261 static void bmdma_addr_write(void *opaque
, hwaddr addr
,
262 uint64_t data
, unsigned width
)
264 BMDMAState
*bm
= opaque
;
265 int shift
= addr
* 8;
266 uint32_t mask
= (1ULL << (width
* 8)) - 1;
268 trace_bmdma_addr_write(data
);
269 bm
->addr
&= ~(mask
<< shift
);
270 bm
->addr
|= ((data
& mask
) << shift
) & ~3;
273 MemoryRegionOps bmdma_addr_ioport_ops
= {
274 .read
= bmdma_addr_read
,
275 .write
= bmdma_addr_write
,
276 .endianness
= DEVICE_LITTLE_ENDIAN
,
279 static bool ide_bmdma_current_needed(void *opaque
)
281 BMDMAState
*bm
= opaque
;
283 return (bm
->cur_prd_len
!= 0);
286 static bool ide_bmdma_status_needed(void *opaque
)
288 BMDMAState
*bm
= opaque
;
290 /* Older versions abused some bits in the status register for internal
291 * error state. If any of these bits are set, we must add a subsection to
292 * transfer the real status register */
293 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
295 return ((bm
->status
& abused_bits
) != 0);
298 static int ide_bmdma_pre_save(void *opaque
)
300 BMDMAState
*bm
= opaque
;
301 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
303 if (!(bm
->status
& BM_STATUS_DMAING
) && bm
->dma_cb
) {
304 bm
->bus
->error_status
=
305 ide_dma_cmd_to_retry(bmdma_active_if(bm
)->dma_cmd
);
307 bm
->migration_retry_unit
= bm
->bus
->retry_unit
;
308 bm
->migration_retry_sector_num
= bm
->bus
->retry_sector_num
;
309 bm
->migration_retry_nsector
= bm
->bus
->retry_nsector
;
310 bm
->migration_compat_status
=
311 (bm
->status
& ~abused_bits
) | (bm
->bus
->error_status
& abused_bits
);
316 /* This function accesses bm->bus->error_status which is loaded only after
317 * BMDMA itself. This is why the function is called from ide_pci_post_load
318 * instead of being registered with VMState where it would run too early. */
319 static int ide_bmdma_post_load(void *opaque
, int version_id
)
321 BMDMAState
*bm
= opaque
;
322 uint8_t abused_bits
= BM_MIGRATION_COMPAT_STATUS_BITS
;
324 if (bm
->status
== 0) {
325 bm
->status
= bm
->migration_compat_status
& ~abused_bits
;
326 bm
->bus
->error_status
|= bm
->migration_compat_status
& abused_bits
;
328 if (bm
->bus
->error_status
) {
329 bm
->bus
->retry_sector_num
= bm
->migration_retry_sector_num
;
330 bm
->bus
->retry_nsector
= bm
->migration_retry_nsector
;
331 bm
->bus
->retry_unit
= bm
->migration_retry_unit
;
337 static const VMStateDescription vmstate_bmdma_current
= {
338 .name
= "ide bmdma_current",
340 .minimum_version_id
= 1,
341 .needed
= ide_bmdma_current_needed
,
342 .fields
= (VMStateField
[]) {
343 VMSTATE_UINT32(cur_addr
, BMDMAState
),
344 VMSTATE_UINT32(cur_prd_last
, BMDMAState
),
345 VMSTATE_UINT32(cur_prd_addr
, BMDMAState
),
346 VMSTATE_UINT32(cur_prd_len
, BMDMAState
),
347 VMSTATE_END_OF_LIST()
351 static const VMStateDescription vmstate_bmdma_status
= {
352 .name
="ide bmdma/status",
354 .minimum_version_id
= 1,
355 .needed
= ide_bmdma_status_needed
,
356 .fields
= (VMStateField
[]) {
357 VMSTATE_UINT8(status
, BMDMAState
),
358 VMSTATE_END_OF_LIST()
362 static const VMStateDescription vmstate_bmdma
= {
365 .minimum_version_id
= 0,
366 .pre_save
= ide_bmdma_pre_save
,
367 .fields
= (VMStateField
[]) {
368 VMSTATE_UINT8(cmd
, BMDMAState
),
369 VMSTATE_UINT8(migration_compat_status
, BMDMAState
),
370 VMSTATE_UINT32(addr
, BMDMAState
),
371 VMSTATE_INT64(migration_retry_sector_num
, BMDMAState
),
372 VMSTATE_UINT32(migration_retry_nsector
, BMDMAState
),
373 VMSTATE_UINT8(migration_retry_unit
, BMDMAState
),
374 VMSTATE_END_OF_LIST()
376 .subsections
= (const VMStateDescription
*[]) {
377 &vmstate_bmdma_current
,
378 &vmstate_bmdma_status
,
383 static int ide_pci_post_load(void *opaque
, int version_id
)
385 PCIIDEState
*d
= opaque
;
388 for(i
= 0; i
< 2; i
++) {
389 /* current versions always store 0/1, but older version
390 stored bigger values. We only need last bit */
391 d
->bmdma
[i
].migration_retry_unit
&= 1;
392 ide_bmdma_post_load(&d
->bmdma
[i
], -1);
398 const VMStateDescription vmstate_ide_pci
= {
401 .minimum_version_id
= 0,
402 .post_load
= ide_pci_post_load
,
403 .fields
= (VMStateField
[]) {
404 VMSTATE_PCI_DEVICE(parent_obj
, PCIIDEState
),
405 VMSTATE_STRUCT_ARRAY(bmdma
, PCIIDEState
, 2, 0,
406 vmstate_bmdma
, BMDMAState
),
407 VMSTATE_IDE_BUS_ARRAY(bus
, PCIIDEState
, 2),
408 VMSTATE_IDE_DRIVES(bus
[0].ifs
, PCIIDEState
),
409 VMSTATE_IDE_DRIVES(bus
[1].ifs
, PCIIDEState
),
410 VMSTATE_END_OF_LIST()
414 void pci_ide_create_devs(PCIDevice
*dev
, DriveInfo
**hd_table
)
416 PCIIDEState
*d
= PCI_IDE(dev
);
417 static const int bus
[4] = { 0, 0, 1, 1 };
418 static const int unit
[4] = { 0, 1, 0, 1 };
421 for (i
= 0; i
< 4; i
++) {
422 if (hd_table
[i
] == NULL
)
424 ide_create_drive(d
->bus
+bus
[i
], unit
[i
], hd_table
[i
]);
428 static const struct IDEDMAOps bmdma_ops
= {
429 .start_dma
= bmdma_start_dma
,
430 .prepare_buf
= bmdma_prepare_buf
,
431 .rw_buf
= bmdma_rw_buf
,
432 .restart_dma
= bmdma_restart_dma
,
433 .set_inactive
= bmdma_set_inactive
,
434 .reset
= bmdma_reset
,
437 void bmdma_init(IDEBus
*bus
, BMDMAState
*bm
, PCIIDEState
*d
)
439 if (bus
->dma
== &bm
->dma
) {
443 bm
->dma
.ops
= &bmdma_ops
;
446 bus
->irq
= qemu_allocate_irq(bmdma_irq
, bm
, 0);
450 static const TypeInfo pci_ide_type_info
= {
451 .name
= TYPE_PCI_IDE
,
452 .parent
= TYPE_PCI_DEVICE
,
453 .instance_size
= sizeof(PCIIDEState
),
455 .interfaces
= (InterfaceInfo
[]) {
456 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
461 static void pci_ide_register_types(void)
463 type_register_static(&pci_ide_type_info
);
466 type_init(pci_ide_register_types
)