2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 #include "qemu/error-report.h"
39 #include "hw/ide/internal.h"
42 /* These values were based on a Seagate ST3500418AS but have been modified
43 to make more sense in QEMU */
44 static const int smart_attributes
[][12] = {
45 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
46 /* raw read error rate*/
47 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
49 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
50 /* start stop count */
51 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
52 /* remapped sectors */
53 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
55 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56 /* power cycle count */
57 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58 /* airflow-temperature-celsius */
59 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
62 const char *IDE_DMA_CMD_lookup
[IDE_DMA__COUNT
] = {
63 [IDE_DMA_READ
] = "DMA READ",
64 [IDE_DMA_WRITE
] = "DMA WRITE",
65 [IDE_DMA_TRIM
] = "DMA TRIM",
66 [IDE_DMA_ATAPI
] = "DMA ATAPI"
69 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval
)
71 if ((unsigned)enval
< IDE_DMA__COUNT
) {
72 return IDE_DMA_CMD_lookup
[enval
];
74 return "DMA UNKNOWN CMD";
77 static void ide_dummy_transfer_stop(IDEState
*s
);
79 static void padstr(char *str
, const char *src
, int len
)
82 for(i
= 0; i
< len
; i
++) {
91 static void put_le16(uint16_t *p
, unsigned int v
)
96 static void ide_identify_size(IDEState
*s
)
98 uint16_t *p
= (uint16_t *)s
->identify_data
;
99 put_le16(p
+ 60, s
->nb_sectors
);
100 put_le16(p
+ 61, s
->nb_sectors
>> 16);
101 put_le16(p
+ 100, s
->nb_sectors
);
102 put_le16(p
+ 101, s
->nb_sectors
>> 16);
103 put_le16(p
+ 102, s
->nb_sectors
>> 32);
104 put_le16(p
+ 103, s
->nb_sectors
>> 48);
107 static void ide_identify(IDEState
*s
)
110 unsigned int oldsize
;
111 IDEDevice
*dev
= s
->unit
? s
->bus
->slave
: s
->bus
->master
;
113 p
= (uint16_t *)s
->identify_data
;
114 if (s
->identify_set
) {
117 memset(p
, 0, sizeof(s
->identify_data
));
119 put_le16(p
+ 0, 0x0040);
120 put_le16(p
+ 1, s
->cylinders
);
121 put_le16(p
+ 3, s
->heads
);
122 put_le16(p
+ 4, 512 * s
->sectors
); /* XXX: retired, remove ? */
123 put_le16(p
+ 5, 512); /* XXX: retired, remove ? */
124 put_le16(p
+ 6, s
->sectors
);
125 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
126 put_le16(p
+ 20, 3); /* XXX: retired, remove ? */
127 put_le16(p
+ 21, 512); /* cache size in sectors */
128 put_le16(p
+ 22, 4); /* ecc bytes */
129 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
130 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
131 #if MAX_MULT_SECTORS > 1
132 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
134 put_le16(p
+ 48, 1); /* dword I/O */
135 put_le16(p
+ 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
136 put_le16(p
+ 51, 0x200); /* PIO transfer cycle */
137 put_le16(p
+ 52, 0x200); /* DMA transfer cycle */
138 put_le16(p
+ 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
139 put_le16(p
+ 54, s
->cylinders
);
140 put_le16(p
+ 55, s
->heads
);
141 put_le16(p
+ 56, s
->sectors
);
142 oldsize
= s
->cylinders
* s
->heads
* s
->sectors
;
143 put_le16(p
+ 57, oldsize
);
144 put_le16(p
+ 58, oldsize
>> 16);
146 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
147 /* *(p + 60) := nb_sectors -- see ide_identify_size */
148 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
149 put_le16(p
+ 62, 0x07); /* single word dma0-2 supported */
150 put_le16(p
+ 63, 0x07); /* mdma0-2 supported */
151 put_le16(p
+ 64, 0x03); /* pio3-4 supported */
152 put_le16(p
+ 65, 120);
153 put_le16(p
+ 66, 120);
154 put_le16(p
+ 67, 120);
155 put_le16(p
+ 68, 120);
156 if (dev
&& dev
->conf
.discard_granularity
) {
157 put_le16(p
+ 69, (1 << 14)); /* determinate TRIM behavior */
161 put_le16(p
+ 75, s
->ncq_queues
- 1);
163 put_le16(p
+ 76, (1 << 8));
166 put_le16(p
+ 80, 0xf0); /* ata3 -> ata6 supported */
167 put_le16(p
+ 81, 0x16); /* conforms to ata5 */
168 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
169 put_le16(p
+ 82, (1 << 14) | (1 << 5) | 1);
170 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
171 put_le16(p
+ 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
172 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
174 put_le16(p
+ 84, (1 << 14) | (1 << 8) | 0);
176 put_le16(p
+ 84, (1 << 14) | 0);
178 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
179 if (blk_enable_write_cache(s
->blk
)) {
180 put_le16(p
+ 85, (1 << 14) | (1 << 5) | 1);
182 put_le16(p
+ 85, (1 << 14) | 1);
184 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
185 put_le16(p
+ 86, (1 << 13) | (1 <<12) | (1 << 10));
186 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
188 put_le16(p
+ 87, (1 << 14) | (1 << 8) | 0);
190 put_le16(p
+ 87, (1 << 14) | 0);
192 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
193 put_le16(p
+ 93, 1 | (1 << 14) | 0x2000);
194 /* *(p + 100) := nb_sectors -- see ide_identify_size */
195 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
196 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
197 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
199 if (dev
&& dev
->conf
.physical_block_size
)
200 put_le16(p
+ 106, 0x6000 | get_physical_block_exp(&dev
->conf
));
202 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
203 put_le16(p
+ 108, s
->wwn
>> 48);
204 put_le16(p
+ 109, s
->wwn
>> 32);
205 put_le16(p
+ 110, s
->wwn
>> 16);
206 put_le16(p
+ 111, s
->wwn
);
208 if (dev
&& dev
->conf
.discard_granularity
) {
209 put_le16(p
+ 169, 1); /* TRIM support */
212 put_le16(p
+ 217, dev
->rotation_rate
); /* Nominal media rotation rate */
215 ide_identify_size(s
);
219 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
222 static void ide_atapi_identify(IDEState
*s
)
226 p
= (uint16_t *)s
->identify_data
;
227 if (s
->identify_set
) {
230 memset(p
, 0, sizeof(s
->identify_data
));
232 /* Removable CDROM, 50us response, 12 byte packets */
233 put_le16(p
+ 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
234 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
235 put_le16(p
+ 20, 3); /* buffer type */
236 put_le16(p
+ 21, 512); /* cache size in sectors */
237 put_le16(p
+ 22, 4); /* ecc bytes */
238 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
239 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
240 put_le16(p
+ 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
242 put_le16(p
+ 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
243 put_le16(p
+ 53, 7); /* words 64-70, 54-58, 88 valid */
244 put_le16(p
+ 62, 7); /* single word dma0-2 supported */
245 put_le16(p
+ 63, 7); /* mdma0-2 supported */
247 put_le16(p
+ 49, 1 << 9); /* LBA supported, no DMA */
248 put_le16(p
+ 53, 3); /* words 64-70, 54-58 valid */
249 put_le16(p
+ 63, 0x103); /* DMA modes XXX: may be incorrect */
251 put_le16(p
+ 64, 3); /* pio3-4 supported */
252 put_le16(p
+ 65, 0xb4); /* minimum DMA multiword tx cycle time */
253 put_le16(p
+ 66, 0xb4); /* recommended DMA multiword tx cycle time */
254 put_le16(p
+ 67, 0x12c); /* minimum PIO cycle time without flow control */
255 put_le16(p
+ 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
257 put_le16(p
+ 71, 30); /* in ns */
258 put_le16(p
+ 72, 30); /* in ns */
261 put_le16(p
+ 75, s
->ncq_queues
- 1);
263 put_le16(p
+ 76, (1 << 8));
266 put_le16(p
+ 80, 0x1e); /* support up to ATA/ATAPI-4 */
268 put_le16(p
+ 84, (1 << 8)); /* supports WWN for words 108-111 */
269 put_le16(p
+ 87, (1 << 8)); /* WWN enabled */
273 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
277 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
278 put_le16(p
+ 108, s
->wwn
>> 48);
279 put_le16(p
+ 109, s
->wwn
>> 32);
280 put_le16(p
+ 110, s
->wwn
>> 16);
281 put_le16(p
+ 111, s
->wwn
);
287 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
290 static void ide_cfata_identify_size(IDEState
*s
)
292 uint16_t *p
= (uint16_t *)s
->identify_data
;
293 put_le16(p
+ 7, s
->nb_sectors
>> 16); /* Sectors per card */
294 put_le16(p
+ 8, s
->nb_sectors
); /* Sectors per card */
295 put_le16(p
+ 60, s
->nb_sectors
); /* Total LBA sectors */
296 put_le16(p
+ 61, s
->nb_sectors
>> 16); /* Total LBA sectors */
299 static void ide_cfata_identify(IDEState
*s
)
304 p
= (uint16_t *)s
->identify_data
;
305 if (s
->identify_set
) {
308 memset(p
, 0, sizeof(s
->identify_data
));
310 cur_sec
= s
->cylinders
* s
->heads
* s
->sectors
;
312 put_le16(p
+ 0, 0x848a); /* CF Storage Card signature */
313 put_le16(p
+ 1, s
->cylinders
); /* Default cylinders */
314 put_le16(p
+ 3, s
->heads
); /* Default heads */
315 put_le16(p
+ 6, s
->sectors
); /* Default sectors per track */
316 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
317 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
318 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
319 put_le16(p
+ 22, 0x0004); /* ECC bytes */
320 padstr((char *) (p
+ 23), s
->version
, 8); /* Firmware Revision */
321 padstr((char *) (p
+ 27), s
->drive_model_str
, 40);/* Model number */
322 #if MAX_MULT_SECTORS > 1
323 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
325 put_le16(p
+ 47, 0x0000);
327 put_le16(p
+ 49, 0x0f00); /* Capabilities */
328 put_le16(p
+ 51, 0x0002); /* PIO cycle timing mode */
329 put_le16(p
+ 52, 0x0001); /* DMA cycle timing mode */
330 put_le16(p
+ 53, 0x0003); /* Translation params valid */
331 put_le16(p
+ 54, s
->cylinders
); /* Current cylinders */
332 put_le16(p
+ 55, s
->heads
); /* Current heads */
333 put_le16(p
+ 56, s
->sectors
); /* Current sectors */
334 put_le16(p
+ 57, cur_sec
); /* Current capacity */
335 put_le16(p
+ 58, cur_sec
>> 16); /* Current capacity */
336 if (s
->mult_sectors
) /* Multiple sector setting */
337 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
338 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
339 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
340 put_le16(p
+ 63, 0x0203); /* Multiword DMA capability */
341 put_le16(p
+ 64, 0x0001); /* Flow Control PIO support */
342 put_le16(p
+ 65, 0x0096); /* Min. Multiword DMA cycle */
343 put_le16(p
+ 66, 0x0096); /* Rec. Multiword DMA cycle */
344 put_le16(p
+ 68, 0x00b4); /* Min. PIO cycle time */
345 put_le16(p
+ 82, 0x400c); /* Command Set supported */
346 put_le16(p
+ 83, 0x7068); /* Command Set supported */
347 put_le16(p
+ 84, 0x4000); /* Features supported */
348 put_le16(p
+ 85, 0x000c); /* Command Set enabled */
349 put_le16(p
+ 86, 0x7044); /* Command Set enabled */
350 put_le16(p
+ 87, 0x4000); /* Features enabled */
351 put_le16(p
+ 91, 0x4060); /* Current APM level */
352 put_le16(p
+ 129, 0x0002); /* Current features option */
353 put_le16(p
+ 130, 0x0005); /* Reassigned sectors */
354 put_le16(p
+ 131, 0x0001); /* Initial power mode */
355 put_le16(p
+ 132, 0x0000); /* User signature */
356 put_le16(p
+ 160, 0x8100); /* Power requirement */
357 put_le16(p
+ 161, 0x8001); /* CF command set */
359 ide_cfata_identify_size(s
);
363 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
366 static void ide_set_signature(IDEState
*s
)
368 s
->select
&= 0xf0; /* clear head */
372 if (s
->drive_kind
== IDE_CD
) {
384 typedef struct TrimAIOCB
{
394 static void trim_aio_cancel(BlockAIOCB
*acb
)
396 TrimAIOCB
*iocb
= container_of(acb
, TrimAIOCB
, common
);
398 /* Exit the loop so ide_issue_trim_cb will not continue */
399 iocb
->j
= iocb
->qiov
->niov
- 1;
400 iocb
->i
= (iocb
->qiov
->iov
[iocb
->j
].iov_len
/ 8) - 1;
402 iocb
->ret
= -ECANCELED
;
405 blk_aio_cancel_async(iocb
->aiocb
);
410 static const AIOCBInfo trim_aiocb_info
= {
411 .aiocb_size
= sizeof(TrimAIOCB
),
412 .cancel_async
= trim_aio_cancel
,
415 static void ide_trim_bh_cb(void *opaque
)
417 TrimAIOCB
*iocb
= opaque
;
419 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
421 qemu_bh_delete(iocb
->bh
);
423 qemu_aio_unref(iocb
);
426 static void ide_issue_trim_cb(void *opaque
, int ret
)
428 TrimAIOCB
*iocb
= opaque
;
430 while (iocb
->j
< iocb
->qiov
->niov
) {
432 while (++iocb
->i
< iocb
->qiov
->iov
[j
].iov_len
/ 8) {
434 uint64_t *buffer
= iocb
->qiov
->iov
[j
].iov_base
;
436 /* 6-byte LBA + 2-byte range per entry */
437 uint64_t entry
= le64_to_cpu(buffer
[i
]);
438 uint64_t sector
= entry
& 0x0000ffffffffffffULL
;
439 uint16_t count
= entry
>> 48;
445 /* Got an entry! Submit and exit. */
446 iocb
->aiocb
= blk_aio_pdiscard(iocb
->blk
,
447 sector
<< BDRV_SECTOR_BITS
,
448 count
<< BDRV_SECTOR_BITS
,
449 ide_issue_trim_cb
, opaque
);
462 qemu_bh_schedule(iocb
->bh
);
466 BlockAIOCB
*ide_issue_trim(
467 int64_t offset
, QEMUIOVector
*qiov
,
468 BlockCompletionFunc
*cb
, void *cb_opaque
, void *opaque
)
470 BlockBackend
*blk
= opaque
;
473 iocb
= blk_aio_get(&trim_aiocb_info
, blk
, cb
, cb_opaque
);
475 iocb
->bh
= qemu_bh_new(ide_trim_bh_cb
, iocb
);
480 ide_issue_trim_cb(iocb
, 0);
481 return &iocb
->common
;
484 void ide_abort_command(IDEState
*s
)
486 ide_transfer_stop(s
);
487 s
->status
= READY_STAT
| ERR_STAT
;
491 static void ide_set_retry(IDEState
*s
)
493 s
->bus
->retry_unit
= s
->unit
;
494 s
->bus
->retry_sector_num
= ide_get_sector(s
);
495 s
->bus
->retry_nsector
= s
->nsector
;
498 static void ide_clear_retry(IDEState
*s
)
500 s
->bus
->retry_unit
= -1;
501 s
->bus
->retry_sector_num
= 0;
502 s
->bus
->retry_nsector
= 0;
505 /* prepare data transfer and tell what to do after */
506 void ide_transfer_start(IDEState
*s
, uint8_t *buf
, int size
,
507 EndTransferFunc
*end_transfer_func
)
509 s
->end_transfer_func
= end_transfer_func
;
511 s
->data_end
= buf
+ size
;
513 if (!(s
->status
& ERR_STAT
)) {
514 s
->status
|= DRQ_STAT
;
516 if (s
->bus
->dma
->ops
->start_transfer
) {
517 s
->bus
->dma
->ops
->start_transfer(s
->bus
->dma
);
521 static void ide_cmd_done(IDEState
*s
)
523 if (s
->bus
->dma
->ops
->cmd_done
) {
524 s
->bus
->dma
->ops
->cmd_done(s
->bus
->dma
);
528 static void ide_transfer_halt(IDEState
*s
,
529 void(*end_transfer_func
)(IDEState
*),
532 s
->end_transfer_func
= end_transfer_func
;
533 s
->data_ptr
= s
->io_buffer
;
534 s
->data_end
= s
->io_buffer
;
535 s
->status
&= ~DRQ_STAT
;
541 void ide_transfer_stop(IDEState
*s
)
543 ide_transfer_halt(s
, ide_transfer_stop
, true);
546 static void ide_transfer_cancel(IDEState
*s
)
548 ide_transfer_halt(s
, ide_transfer_cancel
, false);
551 int64_t ide_get_sector(IDEState
*s
)
554 if (s
->select
& 0x40) {
557 sector_num
= ((s
->select
& 0x0f) << 24) | (s
->hcyl
<< 16) |
558 (s
->lcyl
<< 8) | s
->sector
;
560 sector_num
= ((int64_t)s
->hob_hcyl
<< 40) |
561 ((int64_t) s
->hob_lcyl
<< 32) |
562 ((int64_t) s
->hob_sector
<< 24) |
563 ((int64_t) s
->hcyl
<< 16) |
564 ((int64_t) s
->lcyl
<< 8) | s
->sector
;
567 sector_num
= ((s
->hcyl
<< 8) | s
->lcyl
) * s
->heads
* s
->sectors
+
568 (s
->select
& 0x0f) * s
->sectors
+ (s
->sector
- 1);
573 void ide_set_sector(IDEState
*s
, int64_t sector_num
)
576 if (s
->select
& 0x40) {
578 s
->select
= (s
->select
& 0xf0) | (sector_num
>> 24);
579 s
->hcyl
= (sector_num
>> 16);
580 s
->lcyl
= (sector_num
>> 8);
581 s
->sector
= (sector_num
);
583 s
->sector
= sector_num
;
584 s
->lcyl
= sector_num
>> 8;
585 s
->hcyl
= sector_num
>> 16;
586 s
->hob_sector
= sector_num
>> 24;
587 s
->hob_lcyl
= sector_num
>> 32;
588 s
->hob_hcyl
= sector_num
>> 40;
591 cyl
= sector_num
/ (s
->heads
* s
->sectors
);
592 r
= sector_num
% (s
->heads
* s
->sectors
);
595 s
->select
= (s
->select
& 0xf0) | ((r
/ s
->sectors
) & 0x0f);
596 s
->sector
= (r
% s
->sectors
) + 1;
600 static void ide_rw_error(IDEState
*s
) {
601 ide_abort_command(s
);
605 static bool ide_sect_range_ok(IDEState
*s
,
606 uint64_t sector
, uint64_t nb_sectors
)
608 uint64_t total_sectors
;
610 blk_get_geometry(s
->blk
, &total_sectors
);
611 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
617 static void ide_buffered_readv_cb(void *opaque
, int ret
)
619 IDEBufferedRequest
*req
= opaque
;
620 if (!req
->orphaned
) {
622 qemu_iovec_from_buf(req
->original_qiov
, 0, req
->iov
.iov_base
,
623 req
->original_qiov
->size
);
625 req
->original_cb(req
->original_opaque
, ret
);
627 QLIST_REMOVE(req
, list
);
628 qemu_vfree(req
->iov
.iov_base
);
632 #define MAX_BUFFERED_REQS 16
634 BlockAIOCB
*ide_buffered_readv(IDEState
*s
, int64_t sector_num
,
635 QEMUIOVector
*iov
, int nb_sectors
,
636 BlockCompletionFunc
*cb
, void *opaque
)
639 IDEBufferedRequest
*req
;
642 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
645 if (c
> MAX_BUFFERED_REQS
) {
646 return blk_abort_aio_request(s
->blk
, cb
, opaque
, -EIO
);
649 req
= g_new0(IDEBufferedRequest
, 1);
650 req
->original_qiov
= iov
;
651 req
->original_cb
= cb
;
652 req
->original_opaque
= opaque
;
653 req
->iov
.iov_base
= qemu_blockalign(blk_bs(s
->blk
), iov
->size
);
654 req
->iov
.iov_len
= iov
->size
;
655 qemu_iovec_init_external(&req
->qiov
, &req
->iov
, 1);
657 aioreq
= blk_aio_preadv(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
658 &req
->qiov
, 0, ide_buffered_readv_cb
, req
);
660 QLIST_INSERT_HEAD(&s
->buffered_requests
, req
, list
);
665 * Cancel all pending DMA requests.
666 * Any buffered DMA requests are instantly canceled,
667 * but any pending unbuffered DMA requests must be waited on.
669 void ide_cancel_dma_sync(IDEState
*s
)
671 IDEBufferedRequest
*req
;
673 /* First invoke the callbacks of all buffered requests
674 * and flag those requests as orphaned. Ideally there
675 * are no unbuffered (Scatter Gather DMA Requests or
676 * write requests) pending and we can avoid to drain. */
677 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
678 if (!req
->orphaned
) {
679 trace_ide_cancel_dma_sync_buffered(req
->original_cb
, req
);
680 req
->original_cb(req
->original_opaque
, -ECANCELED
);
682 req
->orphaned
= true;
686 * We can't cancel Scatter Gather DMA in the middle of the
687 * operation or a partial (not full) DMA transfer would reach
688 * the storage so we wait for completion instead (we beahve
689 * like if the DMA was completed by the time the guest trying
690 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
693 * In the future we'll be able to safely cancel the I/O if the
694 * whole DMA operation will be submitted to disk with a single
695 * aio operation with preadv/pwritev.
697 if (s
->bus
->dma
->aiocb
) {
698 trace_ide_cancel_dma_sync_remaining();
700 assert(s
->bus
->dma
->aiocb
== NULL
);
704 static void ide_sector_read(IDEState
*s
);
706 static void ide_sector_read_cb(void *opaque
, int ret
)
708 IDEState
*s
= opaque
;
712 s
->status
&= ~BUSY_STAT
;
714 if (ret
== -ECANCELED
) {
718 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
|
724 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
727 if (n
> s
->req_nb_sectors
) {
728 n
= s
->req_nb_sectors
;
731 ide_set_sector(s
, ide_get_sector(s
) + n
);
733 /* Allow the guest to read the io_buffer */
734 ide_transfer_start(s
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
, ide_sector_read
);
738 static void ide_sector_read(IDEState
*s
)
743 s
->status
= READY_STAT
| SEEK_STAT
;
744 s
->error
= 0; /* not needed by IDE spec, but needed by Windows */
745 sector_num
= ide_get_sector(s
);
749 ide_transfer_stop(s
);
753 s
->status
|= BUSY_STAT
;
755 if (n
> s
->req_nb_sectors
) {
756 n
= s
->req_nb_sectors
;
759 trace_ide_sector_read(sector_num
, n
);
761 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
763 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_READ
);
767 s
->iov
.iov_base
= s
->io_buffer
;
768 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
769 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
771 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
772 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
773 s
->pio_aiocb
= ide_buffered_readv(s
, sector_num
, &s
->qiov
, n
,
774 ide_sector_read_cb
, s
);
777 void dma_buf_commit(IDEState
*s
, uint32_t tx_bytes
)
779 if (s
->bus
->dma
->ops
->commit_buf
) {
780 s
->bus
->dma
->ops
->commit_buf(s
->bus
->dma
, tx_bytes
);
782 s
->io_buffer_offset
+= tx_bytes
;
783 qemu_sglist_destroy(&s
->sg
);
786 void ide_set_inactive(IDEState
*s
, bool more
)
788 s
->bus
->dma
->aiocb
= NULL
;
790 if (s
->bus
->dma
->ops
->set_inactive
) {
791 s
->bus
->dma
->ops
->set_inactive(s
->bus
->dma
, more
);
796 void ide_dma_error(IDEState
*s
)
798 dma_buf_commit(s
, 0);
799 ide_abort_command(s
);
800 ide_set_inactive(s
, false);
804 int ide_handle_rw_error(IDEState
*s
, int error
, int op
)
806 bool is_read
= (op
& IDE_RETRY_READ
) != 0;
807 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
809 if (action
== BLOCK_ERROR_ACTION_STOP
) {
810 assert(s
->bus
->retry_unit
== s
->unit
);
811 s
->bus
->error_status
= op
;
812 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
813 block_acct_failed(blk_get_stats(s
->blk
), &s
->acct
);
814 if (IS_IDE_RETRY_DMA(op
)) {
816 } else if (IS_IDE_RETRY_ATAPI(op
)) {
817 ide_atapi_io_error(s
, -error
);
822 blk_error_action(s
->blk
, action
, is_read
, error
);
823 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
826 static void ide_dma_cb(void *opaque
, int ret
)
828 IDEState
*s
= opaque
;
832 bool stay_active
= false;
834 if (ret
== -ECANCELED
) {
838 if (ide_handle_rw_error(s
, -ret
, ide_dma_cmd_to_retry(s
->dma_cmd
))) {
839 s
->bus
->dma
->aiocb
= NULL
;
840 dma_buf_commit(s
, 0);
845 n
= s
->io_buffer_size
>> 9;
846 if (n
> s
->nsector
) {
847 /* The PRDs were longer than needed for this request. Shorten them so
848 * we don't get a negative remainder. The Active bit must remain set
849 * after the request completes. */
854 sector_num
= ide_get_sector(s
);
856 assert(n
* 512 == s
->sg
.size
);
857 dma_buf_commit(s
, s
->sg
.size
);
859 ide_set_sector(s
, sector_num
);
863 /* end of transfer ? */
864 if (s
->nsector
== 0) {
865 s
->status
= READY_STAT
| SEEK_STAT
;
870 /* launch next transfer */
872 s
->io_buffer_index
= 0;
873 s
->io_buffer_size
= n
* 512;
874 if (s
->bus
->dma
->ops
->prepare_buf(s
->bus
->dma
, s
->io_buffer_size
) < 512) {
875 /* The PRDs were too short. Reset the Active bit, but don't raise an
877 s
->status
= READY_STAT
| SEEK_STAT
;
878 dma_buf_commit(s
, 0);
882 trace_ide_dma_cb(s
, sector_num
, n
, IDE_DMA_CMD_str(s
->dma_cmd
));
884 if ((s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) &&
885 !ide_sect_range_ok(s
, sector_num
, n
)) {
887 block_acct_invalid(blk_get_stats(s
->blk
), s
->acct
.type
);
891 offset
= sector_num
<< BDRV_SECTOR_BITS
;
892 switch (s
->dma_cmd
) {
894 s
->bus
->dma
->aiocb
= dma_blk_read(s
->blk
, &s
->sg
, offset
,
895 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
898 s
->bus
->dma
->aiocb
= dma_blk_write(s
->blk
, &s
->sg
, offset
,
899 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
902 s
->bus
->dma
->aiocb
= dma_blk_io(blk_get_aio_context(s
->blk
),
903 &s
->sg
, offset
, BDRV_SECTOR_SIZE
,
904 ide_issue_trim
, s
->blk
, ide_dma_cb
, s
,
905 DMA_DIRECTION_TO_DEVICE
);
913 if (s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) {
914 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
916 ide_set_inactive(s
, stay_active
);
919 static void ide_sector_start_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
921 s
->status
= READY_STAT
| SEEK_STAT
| DRQ_STAT
;
922 s
->io_buffer_size
= 0;
923 s
->dma_cmd
= dma_cmd
;
927 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
928 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
931 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
932 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
938 ide_start_dma(s
, ide_dma_cb
);
941 void ide_start_dma(IDEState
*s
, BlockCompletionFunc
*cb
)
943 s
->io_buffer_index
= 0;
945 if (s
->bus
->dma
->ops
->start_dma
) {
946 s
->bus
->dma
->ops
->start_dma(s
->bus
->dma
, s
, cb
);
950 static void ide_sector_write(IDEState
*s
);
952 static void ide_sector_write_timer_cb(void *opaque
)
954 IDEState
*s
= opaque
;
958 static void ide_sector_write_cb(void *opaque
, int ret
)
960 IDEState
*s
= opaque
;
963 if (ret
== -ECANCELED
) {
968 s
->status
&= ~BUSY_STAT
;
971 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
)) {
976 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
979 if (n
> s
->req_nb_sectors
) {
980 n
= s
->req_nb_sectors
;
984 ide_set_sector(s
, ide_get_sector(s
) + n
);
985 if (s
->nsector
== 0) {
986 /* no more sectors to write */
987 ide_transfer_stop(s
);
990 if (n1
> s
->req_nb_sectors
) {
991 n1
= s
->req_nb_sectors
;
993 ide_transfer_start(s
, s
->io_buffer
, n1
* BDRV_SECTOR_SIZE
,
997 if (win2k_install_hack
&& ((++s
->irq_count
% 16) == 0)) {
998 /* It seems there is a bug in the Windows 2000 installer HDD
999 IDE driver which fills the disk with empty logs when the
1000 IDE write IRQ comes too early. This hack tries to correct
1001 that at the expense of slower write performances. Use this
1002 option _only_ to install Windows 2000. You must disable it
1004 timer_mod(s
->sector_write_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1005 (NANOSECONDS_PER_SECOND
/ 1000));
1007 ide_set_irq(s
->bus
);
1011 static void ide_sector_write(IDEState
*s
)
1016 s
->status
= READY_STAT
| SEEK_STAT
| BUSY_STAT
;
1017 sector_num
= ide_get_sector(s
);
1020 if (n
> s
->req_nb_sectors
) {
1021 n
= s
->req_nb_sectors
;
1024 trace_ide_sector_write(sector_num
, n
);
1026 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
1028 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_WRITE
);
1032 s
->iov
.iov_base
= s
->io_buffer
;
1033 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
1034 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
1036 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
1037 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
1038 s
->pio_aiocb
= blk_aio_pwritev(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
1039 &s
->qiov
, 0, ide_sector_write_cb
, s
);
1042 static void ide_flush_cb(void *opaque
, int ret
)
1044 IDEState
*s
= opaque
;
1046 s
->pio_aiocb
= NULL
;
1048 if (ret
== -ECANCELED
) {
1052 /* XXX: What sector number to set here? */
1053 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_FLUSH
)) {
1059 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
1061 s
->status
= READY_STAT
| SEEK_STAT
;
1063 ide_set_irq(s
->bus
);
1066 static void ide_flush_cache(IDEState
*s
)
1068 if (s
->blk
== NULL
) {
1073 s
->status
|= BUSY_STAT
;
1075 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
, 0, BLOCK_ACCT_FLUSH
);
1077 if (blk_bs(s
->blk
)) {
1078 s
->pio_aiocb
= blk_aio_flush(s
->blk
, ide_flush_cb
, s
);
1080 /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1081 * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1087 static void ide_cfata_metadata_inquiry(IDEState
*s
)
1092 p
= (uint16_t *) s
->io_buffer
;
1093 memset(p
, 0, 0x200);
1094 spd
= ((s
->mdata_size
- 1) >> 9) + 1;
1096 put_le16(p
+ 0, 0x0001); /* Data format revision */
1097 put_le16(p
+ 1, 0x0000); /* Media property: silicon */
1098 put_le16(p
+ 2, s
->media_changed
); /* Media status */
1099 put_le16(p
+ 3, s
->mdata_size
& 0xffff); /* Capacity in bytes (low) */
1100 put_le16(p
+ 4, s
->mdata_size
>> 16); /* Capacity in bytes (high) */
1101 put_le16(p
+ 5, spd
& 0xffff); /* Sectors per device (low) */
1102 put_le16(p
+ 6, spd
>> 16); /* Sectors per device (high) */
1105 static void ide_cfata_metadata_read(IDEState
*s
)
1109 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1110 s
->status
= ERR_STAT
;
1111 s
->error
= ABRT_ERR
;
1115 p
= (uint16_t *) s
->io_buffer
;
1116 memset(p
, 0, 0x200);
1118 put_le16(p
+ 0, s
->media_changed
); /* Media status */
1119 memcpy(p
+ 1, s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1120 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1121 s
->nsector
<< 9), 0x200 - 2));
1124 static void ide_cfata_metadata_write(IDEState
*s
)
1126 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1127 s
->status
= ERR_STAT
;
1128 s
->error
= ABRT_ERR
;
1132 s
->media_changed
= 0;
1134 memcpy(s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1136 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1137 s
->nsector
<< 9), 0x200 - 2));
1140 /* called when the inserted state of the media has changed */
1141 static void ide_cd_change_cb(void *opaque
, bool load
, Error
**errp
)
1143 IDEState
*s
= opaque
;
1144 uint64_t nb_sectors
;
1146 s
->tray_open
= !load
;
1147 blk_get_geometry(s
->blk
, &nb_sectors
);
1148 s
->nb_sectors
= nb_sectors
;
1151 * First indicate to the guest that a CD has been removed. That's
1152 * done on the next command the guest sends us.
1154 * Then we set UNIT_ATTENTION, by which the guest will
1155 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1157 s
->cdrom_changed
= 1;
1158 s
->events
.new_media
= true;
1159 s
->events
.eject_request
= false;
1160 ide_set_irq(s
->bus
);
1163 static void ide_cd_eject_request_cb(void *opaque
, bool force
)
1165 IDEState
*s
= opaque
;
1167 s
->events
.eject_request
= true;
1169 s
->tray_locked
= false;
1171 ide_set_irq(s
->bus
);
1174 static void ide_cmd_lba48_transform(IDEState
*s
, int lba48
)
1178 /* handle the 'magic' 0 nsector count conversion here. to avoid
1179 * fiddling with the rest of the read logic, we just store the
1180 * full sector count in ->nsector and ignore ->hob_nsector from now
1186 if (!s
->nsector
&& !s
->hob_nsector
)
1189 int lo
= s
->nsector
;
1190 int hi
= s
->hob_nsector
;
1192 s
->nsector
= (hi
<< 8) | lo
;
1197 static void ide_clear_hob(IDEBus
*bus
)
1199 /* any write clears HOB high bit of device control register */
1200 bus
->ifs
[0].select
&= ~(1 << 7);
1201 bus
->ifs
[1].select
&= ~(1 << 7);
1204 /* IOport [W]rite [R]egisters */
1205 enum ATA_IOPORT_WR
{
1206 ATA_IOPORT_WR_DATA
= 0,
1207 ATA_IOPORT_WR_FEATURES
= 1,
1208 ATA_IOPORT_WR_SECTOR_COUNT
= 2,
1209 ATA_IOPORT_WR_SECTOR_NUMBER
= 3,
1210 ATA_IOPORT_WR_CYLINDER_LOW
= 4,
1211 ATA_IOPORT_WR_CYLINDER_HIGH
= 5,
1212 ATA_IOPORT_WR_DEVICE_HEAD
= 6,
1213 ATA_IOPORT_WR_COMMAND
= 7,
1214 ATA_IOPORT_WR_NUM_REGISTERS
,
1217 const char *ATA_IOPORT_WR_lookup
[ATA_IOPORT_WR_NUM_REGISTERS
] = {
1218 [ATA_IOPORT_WR_DATA
] = "Data",
1219 [ATA_IOPORT_WR_FEATURES
] = "Features",
1220 [ATA_IOPORT_WR_SECTOR_COUNT
] = "Sector Count",
1221 [ATA_IOPORT_WR_SECTOR_NUMBER
] = "Sector Number",
1222 [ATA_IOPORT_WR_CYLINDER_LOW
] = "Cylinder Low",
1223 [ATA_IOPORT_WR_CYLINDER_HIGH
] = "Cylinder High",
1224 [ATA_IOPORT_WR_DEVICE_HEAD
] = "Device/Head",
1225 [ATA_IOPORT_WR_COMMAND
] = "Command"
1228 void ide_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
1230 IDEBus
*bus
= opaque
;
1231 IDEState
*s
= idebus_active_if(bus
);
1232 int reg_num
= addr
& 7;
1234 trace_ide_ioport_write(addr
, ATA_IOPORT_WR_lookup
[reg_num
], val
, bus
, s
);
1236 /* ignore writes to command block while busy with previous command */
1237 if (reg_num
!= 7 && (s
->status
& (BUSY_STAT
|DRQ_STAT
))) {
1244 case ATA_IOPORT_WR_FEATURES
:
1246 /* NOTE: data is written to the two drives */
1247 bus
->ifs
[0].hob_feature
= bus
->ifs
[0].feature
;
1248 bus
->ifs
[1].hob_feature
= bus
->ifs
[1].feature
;
1249 bus
->ifs
[0].feature
= val
;
1250 bus
->ifs
[1].feature
= val
;
1252 case ATA_IOPORT_WR_SECTOR_COUNT
:
1254 bus
->ifs
[0].hob_nsector
= bus
->ifs
[0].nsector
;
1255 bus
->ifs
[1].hob_nsector
= bus
->ifs
[1].nsector
;
1256 bus
->ifs
[0].nsector
= val
;
1257 bus
->ifs
[1].nsector
= val
;
1259 case ATA_IOPORT_WR_SECTOR_NUMBER
:
1261 bus
->ifs
[0].hob_sector
= bus
->ifs
[0].sector
;
1262 bus
->ifs
[1].hob_sector
= bus
->ifs
[1].sector
;
1263 bus
->ifs
[0].sector
= val
;
1264 bus
->ifs
[1].sector
= val
;
1266 case ATA_IOPORT_WR_CYLINDER_LOW
:
1268 bus
->ifs
[0].hob_lcyl
= bus
->ifs
[0].lcyl
;
1269 bus
->ifs
[1].hob_lcyl
= bus
->ifs
[1].lcyl
;
1270 bus
->ifs
[0].lcyl
= val
;
1271 bus
->ifs
[1].lcyl
= val
;
1273 case ATA_IOPORT_WR_CYLINDER_HIGH
:
1275 bus
->ifs
[0].hob_hcyl
= bus
->ifs
[0].hcyl
;
1276 bus
->ifs
[1].hob_hcyl
= bus
->ifs
[1].hcyl
;
1277 bus
->ifs
[0].hcyl
= val
;
1278 bus
->ifs
[1].hcyl
= val
;
1280 case ATA_IOPORT_WR_DEVICE_HEAD
:
1281 /* FIXME: HOB readback uses bit 7 */
1282 bus
->ifs
[0].select
= (val
& ~0x10) | 0xa0;
1283 bus
->ifs
[1].select
= (val
| 0x10) | 0xa0;
1285 bus
->unit
= (val
>> 4) & 1;
1288 case ATA_IOPORT_WR_COMMAND
:
1290 ide_exec_cmd(bus
, val
);
1295 static void ide_reset(IDEState
*s
)
1300 blk_aio_cancel(s
->pio_aiocb
);
1301 s
->pio_aiocb
= NULL
;
1304 if (s
->drive_kind
== IDE_CFATA
)
1305 s
->mult_sectors
= 0;
1307 s
->mult_sectors
= MAX_MULT_SECTORS
;
1324 s
->status
= READY_STAT
| SEEK_STAT
;
1328 /* ATAPI specific */
1331 s
->cdrom_changed
= 0;
1332 s
->packet_transfer_size
= 0;
1333 s
->elementary_transfer_size
= 0;
1334 s
->io_buffer_index
= 0;
1335 s
->cd_sector_size
= 0;
1340 s
->io_buffer_size
= 0;
1341 s
->req_nb_sectors
= 0;
1343 ide_set_signature(s
);
1344 /* init the transfer handler so that 0xffff is returned on data
1346 s
->end_transfer_func
= ide_dummy_transfer_stop
;
1347 ide_dummy_transfer_stop(s
);
1348 s
->media_changed
= 0;
1351 static bool cmd_nop(IDEState
*s
, uint8_t cmd
)
1356 static bool cmd_device_reset(IDEState
*s
, uint8_t cmd
)
1358 /* Halt PIO (in the DRQ phase), then DMA */
1359 ide_transfer_cancel(s
);
1360 ide_cancel_dma_sync(s
);
1362 /* Reset any PIO commands, reset signature, etc */
1365 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1366 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1369 /* Do not overwrite status register */
1373 static bool cmd_data_set_management(IDEState
*s
, uint8_t cmd
)
1375 switch (s
->feature
) {
1378 ide_sector_start_dma(s
, IDE_DMA_TRIM
);
1384 ide_abort_command(s
);
1388 static bool cmd_identify(IDEState
*s
, uint8_t cmd
)
1390 if (s
->blk
&& s
->drive_kind
!= IDE_CD
) {
1391 if (s
->drive_kind
!= IDE_CFATA
) {
1394 ide_cfata_identify(s
);
1396 s
->status
= READY_STAT
| SEEK_STAT
;
1397 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1398 ide_set_irq(s
->bus
);
1401 if (s
->drive_kind
== IDE_CD
) {
1402 ide_set_signature(s
);
1404 ide_abort_command(s
);
1410 static bool cmd_verify(IDEState
*s
, uint8_t cmd
)
1412 bool lba48
= (cmd
== WIN_VERIFY_EXT
);
1414 /* do sector number check ? */
1415 ide_cmd_lba48_transform(s
, lba48
);
1420 static bool cmd_set_multiple_mode(IDEState
*s
, uint8_t cmd
)
1422 if (s
->drive_kind
== IDE_CFATA
&& s
->nsector
== 0) {
1423 /* Disable Read and Write Multiple */
1424 s
->mult_sectors
= 0;
1425 } else if ((s
->nsector
& 0xff) != 0 &&
1426 ((s
->nsector
& 0xff) > MAX_MULT_SECTORS
||
1427 (s
->nsector
& (s
->nsector
- 1)) != 0)) {
1428 ide_abort_command(s
);
1430 s
->mult_sectors
= s
->nsector
& 0xff;
1436 static bool cmd_read_multiple(IDEState
*s
, uint8_t cmd
)
1438 bool lba48
= (cmd
== WIN_MULTREAD_EXT
);
1440 if (!s
->blk
|| !s
->mult_sectors
) {
1441 ide_abort_command(s
);
1445 ide_cmd_lba48_transform(s
, lba48
);
1446 s
->req_nb_sectors
= s
->mult_sectors
;
1451 static bool cmd_write_multiple(IDEState
*s
, uint8_t cmd
)
1453 bool lba48
= (cmd
== WIN_MULTWRITE_EXT
);
1456 if (!s
->blk
|| !s
->mult_sectors
) {
1457 ide_abort_command(s
);
1461 ide_cmd_lba48_transform(s
, lba48
);
1463 s
->req_nb_sectors
= s
->mult_sectors
;
1464 n
= MIN(s
->nsector
, s
->req_nb_sectors
);
1466 s
->status
= SEEK_STAT
| READY_STAT
;
1467 ide_transfer_start(s
, s
->io_buffer
, 512 * n
, ide_sector_write
);
1469 s
->media_changed
= 1;
1474 static bool cmd_read_pio(IDEState
*s
, uint8_t cmd
)
1476 bool lba48
= (cmd
== WIN_READ_EXT
);
1478 if (s
->drive_kind
== IDE_CD
) {
1479 ide_set_signature(s
); /* odd, but ATA4 8.27.5.2 requires it */
1480 ide_abort_command(s
);
1485 ide_abort_command(s
);
1489 ide_cmd_lba48_transform(s
, lba48
);
1490 s
->req_nb_sectors
= 1;
1496 static bool cmd_write_pio(IDEState
*s
, uint8_t cmd
)
1498 bool lba48
= (cmd
== WIN_WRITE_EXT
);
1501 ide_abort_command(s
);
1505 ide_cmd_lba48_transform(s
, lba48
);
1507 s
->req_nb_sectors
= 1;
1508 s
->status
= SEEK_STAT
| READY_STAT
;
1509 ide_transfer_start(s
, s
->io_buffer
, 512, ide_sector_write
);
1511 s
->media_changed
= 1;
1516 static bool cmd_read_dma(IDEState
*s
, uint8_t cmd
)
1518 bool lba48
= (cmd
== WIN_READDMA_EXT
);
1521 ide_abort_command(s
);
1525 ide_cmd_lba48_transform(s
, lba48
);
1526 ide_sector_start_dma(s
, IDE_DMA_READ
);
1531 static bool cmd_write_dma(IDEState
*s
, uint8_t cmd
)
1533 bool lba48
= (cmd
== WIN_WRITEDMA_EXT
);
1536 ide_abort_command(s
);
1540 ide_cmd_lba48_transform(s
, lba48
);
1541 ide_sector_start_dma(s
, IDE_DMA_WRITE
);
1543 s
->media_changed
= 1;
1548 static bool cmd_flush_cache(IDEState
*s
, uint8_t cmd
)
1554 static bool cmd_seek(IDEState
*s
, uint8_t cmd
)
1556 /* XXX: Check that seek is within bounds */
1560 static bool cmd_read_native_max(IDEState
*s
, uint8_t cmd
)
1562 bool lba48
= (cmd
== WIN_READ_NATIVE_MAX_EXT
);
1564 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1565 if (s
->nb_sectors
== 0) {
1566 ide_abort_command(s
);
1570 ide_cmd_lba48_transform(s
, lba48
);
1571 ide_set_sector(s
, s
->nb_sectors
- 1);
1576 static bool cmd_check_power_mode(IDEState
*s
, uint8_t cmd
)
1578 s
->nsector
= 0xff; /* device active or idle */
1582 static bool cmd_set_features(IDEState
*s
, uint8_t cmd
)
1584 uint16_t *identify_data
;
1587 ide_abort_command(s
);
1591 /* XXX: valid for CDROM ? */
1592 switch (s
->feature
) {
1593 case 0x02: /* write cache enable */
1594 blk_set_enable_write_cache(s
->blk
, true);
1595 identify_data
= (uint16_t *)s
->identify_data
;
1596 put_le16(identify_data
+ 85, (1 << 14) | (1 << 5) | 1);
1598 case 0x82: /* write cache disable */
1599 blk_set_enable_write_cache(s
->blk
, false);
1600 identify_data
= (uint16_t *)s
->identify_data
;
1601 put_le16(identify_data
+ 85, (1 << 14) | 1);
1604 case 0xcc: /* reverting to power-on defaults enable */
1605 case 0x66: /* reverting to power-on defaults disable */
1606 case 0xaa: /* read look-ahead enable */
1607 case 0x55: /* read look-ahead disable */
1608 case 0x05: /* set advanced power management mode */
1609 case 0x85: /* disable advanced power management mode */
1610 case 0x69: /* NOP */
1611 case 0x67: /* NOP */
1612 case 0x96: /* NOP */
1613 case 0x9a: /* NOP */
1614 case 0x42: /* enable Automatic Acoustic Mode */
1615 case 0xc2: /* disable Automatic Acoustic Mode */
1617 case 0x03: /* set transfer mode */
1619 uint8_t val
= s
->nsector
& 0x07;
1620 identify_data
= (uint16_t *)s
->identify_data
;
1622 switch (s
->nsector
>> 3) {
1623 case 0x00: /* pio default */
1624 case 0x01: /* pio mode */
1625 put_le16(identify_data
+ 62, 0x07);
1626 put_le16(identify_data
+ 63, 0x07);
1627 put_le16(identify_data
+ 88, 0x3f);
1629 case 0x02: /* sigle word dma mode*/
1630 put_le16(identify_data
+ 62, 0x07 | (1 << (val
+ 8)));
1631 put_le16(identify_data
+ 63, 0x07);
1632 put_le16(identify_data
+ 88, 0x3f);
1634 case 0x04: /* mdma mode */
1635 put_le16(identify_data
+ 62, 0x07);
1636 put_le16(identify_data
+ 63, 0x07 | (1 << (val
+ 8)));
1637 put_le16(identify_data
+ 88, 0x3f);
1639 case 0x08: /* udma mode */
1640 put_le16(identify_data
+ 62, 0x07);
1641 put_le16(identify_data
+ 63, 0x07);
1642 put_le16(identify_data
+ 88, 0x3f | (1 << (val
+ 8)));
1652 ide_abort_command(s
);
1657 /*** ATAPI commands ***/
1659 static bool cmd_identify_packet(IDEState
*s
, uint8_t cmd
)
1661 ide_atapi_identify(s
);
1662 s
->status
= READY_STAT
| SEEK_STAT
;
1663 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1664 ide_set_irq(s
->bus
);
1668 static bool cmd_exec_dev_diagnostic(IDEState
*s
, uint8_t cmd
)
1670 ide_set_signature(s
);
1672 if (s
->drive_kind
== IDE_CD
) {
1673 s
->status
= 0; /* ATAPI spec (v6) section 9.10 defines packet
1674 * devices to return a clear status register
1675 * with READY_STAT *not* set. */
1678 s
->status
= READY_STAT
| SEEK_STAT
;
1679 /* The bits of the error register are not as usual for this command!
1680 * They are part of the regular output (this is why ERR_STAT isn't set)
1681 * Device 0 passed, Device 1 passed or not present. */
1683 ide_set_irq(s
->bus
);
1689 static bool cmd_packet(IDEState
*s
, uint8_t cmd
)
1691 /* overlapping commands not supported */
1692 if (s
->feature
& 0x02) {
1693 ide_abort_command(s
);
1697 s
->status
= READY_STAT
| SEEK_STAT
;
1698 s
->atapi_dma
= s
->feature
& 1;
1700 s
->dma_cmd
= IDE_DMA_ATAPI
;
1703 ide_transfer_start(s
, s
->io_buffer
, ATAPI_PACKET_SIZE
,
1709 /*** CF-ATA commands ***/
1711 static bool cmd_cfa_req_ext_error_code(IDEState
*s
, uint8_t cmd
)
1713 s
->error
= 0x09; /* miscellaneous error */
1714 s
->status
= READY_STAT
| SEEK_STAT
;
1715 ide_set_irq(s
->bus
);
1720 static bool cmd_cfa_erase_sectors(IDEState
*s
, uint8_t cmd
)
1722 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1723 * required for Windows 8 to work with AHCI */
1725 if (cmd
== CFA_WEAR_LEVEL
) {
1729 if (cmd
== CFA_ERASE_SECTORS
) {
1730 s
->media_changed
= 1;
1736 static bool cmd_cfa_translate_sector(IDEState
*s
, uint8_t cmd
)
1738 s
->status
= READY_STAT
| SEEK_STAT
;
1740 memset(s
->io_buffer
, 0, 0x200);
1741 s
->io_buffer
[0x00] = s
->hcyl
; /* Cyl MSB */
1742 s
->io_buffer
[0x01] = s
->lcyl
; /* Cyl LSB */
1743 s
->io_buffer
[0x02] = s
->select
; /* Head */
1744 s
->io_buffer
[0x03] = s
->sector
; /* Sector */
1745 s
->io_buffer
[0x04] = ide_get_sector(s
) >> 16; /* LBA MSB */
1746 s
->io_buffer
[0x05] = ide_get_sector(s
) >> 8; /* LBA */
1747 s
->io_buffer
[0x06] = ide_get_sector(s
) >> 0; /* LBA LSB */
1748 s
->io_buffer
[0x13] = 0x00; /* Erase flag */
1749 s
->io_buffer
[0x18] = 0x00; /* Hot count */
1750 s
->io_buffer
[0x19] = 0x00; /* Hot count */
1751 s
->io_buffer
[0x1a] = 0x01; /* Hot count */
1753 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1754 ide_set_irq(s
->bus
);
1759 static bool cmd_cfa_access_metadata_storage(IDEState
*s
, uint8_t cmd
)
1761 switch (s
->feature
) {
1762 case 0x02: /* Inquiry Metadata Storage */
1763 ide_cfata_metadata_inquiry(s
);
1765 case 0x03: /* Read Metadata Storage */
1766 ide_cfata_metadata_read(s
);
1768 case 0x04: /* Write Metadata Storage */
1769 ide_cfata_metadata_write(s
);
1772 ide_abort_command(s
);
1776 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1777 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1778 ide_set_irq(s
->bus
);
1783 static bool cmd_ibm_sense_condition(IDEState
*s
, uint8_t cmd
)
1785 switch (s
->feature
) {
1786 case 0x01: /* sense temperature in device */
1787 s
->nsector
= 0x50; /* +20 C */
1790 ide_abort_command(s
);
1798 /*** SMART commands ***/
1800 static bool cmd_smart(IDEState
*s
, uint8_t cmd
)
1804 if (s
->hcyl
!= 0xc2 || s
->lcyl
!= 0x4f) {
1808 if (!s
->smart_enabled
&& s
->feature
!= SMART_ENABLE
) {
1812 switch (s
->feature
) {
1814 s
->smart_enabled
= 0;
1818 s
->smart_enabled
= 1;
1821 case SMART_ATTR_AUTOSAVE
:
1822 switch (s
->sector
) {
1824 s
->smart_autosave
= 0;
1827 s
->smart_autosave
= 1;
1835 if (!s
->smart_errors
) {
1844 case SMART_READ_THRESH
:
1845 memset(s
->io_buffer
, 0, 0x200);
1846 s
->io_buffer
[0] = 0x01; /* smart struct version */
1848 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1849 s
->io_buffer
[2 + 0 + (n
* 12)] = smart_attributes
[n
][0];
1850 s
->io_buffer
[2 + 1 + (n
* 12)] = smart_attributes
[n
][11];
1854 for (n
= 0; n
< 511; n
++) {
1855 s
->io_buffer
[511] += s
->io_buffer
[n
];
1857 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1859 s
->status
= READY_STAT
| SEEK_STAT
;
1860 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1861 ide_set_irq(s
->bus
);
1864 case SMART_READ_DATA
:
1865 memset(s
->io_buffer
, 0, 0x200);
1866 s
->io_buffer
[0] = 0x01; /* smart struct version */
1868 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1870 for (i
= 0; i
< 11; i
++) {
1871 s
->io_buffer
[2 + i
+ (n
* 12)] = smart_attributes
[n
][i
];
1875 s
->io_buffer
[362] = 0x02 | (s
->smart_autosave
? 0x80 : 0x00);
1876 if (s
->smart_selftest_count
== 0) {
1877 s
->io_buffer
[363] = 0;
1880 s
->smart_selftest_data
[3 +
1881 (s
->smart_selftest_count
- 1) *
1884 s
->io_buffer
[364] = 0x20;
1885 s
->io_buffer
[365] = 0x01;
1886 /* offline data collection capacity: execute + self-test*/
1887 s
->io_buffer
[367] = (1 << 4 | 1 << 3 | 1);
1888 s
->io_buffer
[368] = 0x03; /* smart capability (1) */
1889 s
->io_buffer
[369] = 0x00; /* smart capability (2) */
1890 s
->io_buffer
[370] = 0x01; /* error logging supported */
1891 s
->io_buffer
[372] = 0x02; /* minutes for poll short test */
1892 s
->io_buffer
[373] = 0x36; /* minutes for poll ext test */
1893 s
->io_buffer
[374] = 0x01; /* minutes for poll conveyance */
1895 for (n
= 0; n
< 511; n
++) {
1896 s
->io_buffer
[511] += s
->io_buffer
[n
];
1898 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1900 s
->status
= READY_STAT
| SEEK_STAT
;
1901 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1902 ide_set_irq(s
->bus
);
1905 case SMART_READ_LOG
:
1906 switch (s
->sector
) {
1907 case 0x01: /* summary smart error log */
1908 memset(s
->io_buffer
, 0, 0x200);
1909 s
->io_buffer
[0] = 0x01;
1910 s
->io_buffer
[1] = 0x00; /* no error entries */
1911 s
->io_buffer
[452] = s
->smart_errors
& 0xff;
1912 s
->io_buffer
[453] = (s
->smart_errors
& 0xff00) >> 8;
1914 for (n
= 0; n
< 511; n
++) {
1915 s
->io_buffer
[511] += s
->io_buffer
[n
];
1917 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1919 case 0x06: /* smart self test log */
1920 memset(s
->io_buffer
, 0, 0x200);
1921 s
->io_buffer
[0] = 0x01;
1922 if (s
->smart_selftest_count
== 0) {
1923 s
->io_buffer
[508] = 0;
1925 s
->io_buffer
[508] = s
->smart_selftest_count
;
1926 for (n
= 2; n
< 506; n
++) {
1927 s
->io_buffer
[n
] = s
->smart_selftest_data
[n
];
1931 for (n
= 0; n
< 511; n
++) {
1932 s
->io_buffer
[511] += s
->io_buffer
[n
];
1934 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1939 s
->status
= READY_STAT
| SEEK_STAT
;
1940 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1941 ide_set_irq(s
->bus
);
1944 case SMART_EXECUTE_OFFLINE
:
1945 switch (s
->sector
) {
1946 case 0: /* off-line routine */
1947 case 1: /* short self test */
1948 case 2: /* extended self test */
1949 s
->smart_selftest_count
++;
1950 if (s
->smart_selftest_count
> 21) {
1951 s
->smart_selftest_count
= 1;
1953 n
= 2 + (s
->smart_selftest_count
- 1) * 24;
1954 s
->smart_selftest_data
[n
] = s
->sector
;
1955 s
->smart_selftest_data
[n
+ 1] = 0x00; /* OK and finished */
1956 s
->smart_selftest_data
[n
+ 2] = 0x34; /* hour count lsb */
1957 s
->smart_selftest_data
[n
+ 3] = 0x12; /* hour count msb */
1966 ide_abort_command(s
);
1970 #define HD_OK (1u << IDE_HD)
1971 #define CD_OK (1u << IDE_CD)
1972 #define CFA_OK (1u << IDE_CFATA)
1973 #define HD_CFA_OK (HD_OK | CFA_OK)
1974 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1976 /* Set the Disk Seek Completed status bit during completion */
1977 #define SET_DSC (1u << 8)
1979 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1980 static const struct {
1981 /* Returns true if the completion code should be run */
1982 bool (*handler
)(IDEState
*s
, uint8_t cmd
);
1984 } ide_cmd_table
[0x100] = {
1985 /* NOP not implemented, mandatory for CD */
1986 [CFA_REQ_EXT_ERROR_CODE
] = { cmd_cfa_req_ext_error_code
, CFA_OK
},
1987 [WIN_DSM
] = { cmd_data_set_management
, HD_CFA_OK
},
1988 [WIN_DEVICE_RESET
] = { cmd_device_reset
, CD_OK
},
1989 [WIN_RECAL
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
1990 [WIN_READ
] = { cmd_read_pio
, ALL_OK
},
1991 [WIN_READ_ONCE
] = { cmd_read_pio
, HD_CFA_OK
},
1992 [WIN_READ_EXT
] = { cmd_read_pio
, HD_CFA_OK
},
1993 [WIN_READDMA_EXT
] = { cmd_read_dma
, HD_CFA_OK
},
1994 [WIN_READ_NATIVE_MAX_EXT
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
1995 [WIN_MULTREAD_EXT
] = { cmd_read_multiple
, HD_CFA_OK
},
1996 [WIN_WRITE
] = { cmd_write_pio
, HD_CFA_OK
},
1997 [WIN_WRITE_ONCE
] = { cmd_write_pio
, HD_CFA_OK
},
1998 [WIN_WRITE_EXT
] = { cmd_write_pio
, HD_CFA_OK
},
1999 [WIN_WRITEDMA_EXT
] = { cmd_write_dma
, HD_CFA_OK
},
2000 [CFA_WRITE_SECT_WO_ERASE
] = { cmd_write_pio
, CFA_OK
},
2001 [WIN_MULTWRITE_EXT
] = { cmd_write_multiple
, HD_CFA_OK
},
2002 [WIN_WRITE_VERIFY
] = { cmd_write_pio
, HD_CFA_OK
},
2003 [WIN_VERIFY
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2004 [WIN_VERIFY_ONCE
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2005 [WIN_VERIFY_EXT
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2006 [WIN_SEEK
] = { cmd_seek
, HD_CFA_OK
| SET_DSC
},
2007 [CFA_TRANSLATE_SECTOR
] = { cmd_cfa_translate_sector
, CFA_OK
},
2008 [WIN_DIAGNOSE
] = { cmd_exec_dev_diagnostic
, ALL_OK
},
2009 [WIN_SPECIFY
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
2010 [WIN_STANDBYNOW2
] = { cmd_nop
, HD_CFA_OK
},
2011 [WIN_IDLEIMMEDIATE2
] = { cmd_nop
, HD_CFA_OK
},
2012 [WIN_STANDBY2
] = { cmd_nop
, HD_CFA_OK
},
2013 [WIN_SETIDLE2
] = { cmd_nop
, HD_CFA_OK
},
2014 [WIN_CHECKPOWERMODE2
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2015 [WIN_SLEEPNOW2
] = { cmd_nop
, HD_CFA_OK
},
2016 [WIN_PACKETCMD
] = { cmd_packet
, CD_OK
},
2017 [WIN_PIDENTIFY
] = { cmd_identify_packet
, CD_OK
},
2018 [WIN_SMART
] = { cmd_smart
, HD_CFA_OK
| SET_DSC
},
2019 [CFA_ACCESS_METADATA_STORAGE
] = { cmd_cfa_access_metadata_storage
, CFA_OK
},
2020 [CFA_ERASE_SECTORS
] = { cmd_cfa_erase_sectors
, CFA_OK
| SET_DSC
},
2021 [WIN_MULTREAD
] = { cmd_read_multiple
, HD_CFA_OK
},
2022 [WIN_MULTWRITE
] = { cmd_write_multiple
, HD_CFA_OK
},
2023 [WIN_SETMULT
] = { cmd_set_multiple_mode
, HD_CFA_OK
| SET_DSC
},
2024 [WIN_READDMA
] = { cmd_read_dma
, HD_CFA_OK
},
2025 [WIN_READDMA_ONCE
] = { cmd_read_dma
, HD_CFA_OK
},
2026 [WIN_WRITEDMA
] = { cmd_write_dma
, HD_CFA_OK
},
2027 [WIN_WRITEDMA_ONCE
] = { cmd_write_dma
, HD_CFA_OK
},
2028 [CFA_WRITE_MULTI_WO_ERASE
] = { cmd_write_multiple
, CFA_OK
},
2029 [WIN_STANDBYNOW1
] = { cmd_nop
, HD_CFA_OK
},
2030 [WIN_IDLEIMMEDIATE
] = { cmd_nop
, HD_CFA_OK
},
2031 [WIN_STANDBY
] = { cmd_nop
, HD_CFA_OK
},
2032 [WIN_SETIDLE1
] = { cmd_nop
, HD_CFA_OK
},
2033 [WIN_CHECKPOWERMODE1
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2034 [WIN_SLEEPNOW1
] = { cmd_nop
, HD_CFA_OK
},
2035 [WIN_FLUSH_CACHE
] = { cmd_flush_cache
, ALL_OK
},
2036 [WIN_FLUSH_CACHE_EXT
] = { cmd_flush_cache
, HD_CFA_OK
},
2037 [WIN_IDENTIFY
] = { cmd_identify
, ALL_OK
},
2038 [WIN_SETFEATURES
] = { cmd_set_features
, ALL_OK
| SET_DSC
},
2039 [IBM_SENSE_CONDITION
] = { cmd_ibm_sense_condition
, CFA_OK
| SET_DSC
},
2040 [CFA_WEAR_LEVEL
] = { cmd_cfa_erase_sectors
, HD_CFA_OK
| SET_DSC
},
2041 [WIN_READ_NATIVE_MAX
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
2044 static bool ide_cmd_permitted(IDEState
*s
, uint32_t cmd
)
2046 return cmd
< ARRAY_SIZE(ide_cmd_table
)
2047 && (ide_cmd_table
[cmd
].flags
& (1u << s
->drive_kind
));
2050 void ide_exec_cmd(IDEBus
*bus
, uint32_t val
)
2055 s
= idebus_active_if(bus
);
2056 trace_ide_exec_cmd(bus
, s
, val
);
2058 /* ignore commands to non existent slave */
2059 if (s
!= bus
->ifs
&& !s
->blk
) {
2063 /* Only RESET is allowed while BSY and/or DRQ are set,
2064 * and only to ATAPI devices. */
2065 if (s
->status
& (BUSY_STAT
|DRQ_STAT
)) {
2066 if (val
!= WIN_DEVICE_RESET
|| s
->drive_kind
!= IDE_CD
) {
2071 if (!ide_cmd_permitted(s
, val
)) {
2072 ide_abort_command(s
);
2073 ide_set_irq(s
->bus
);
2077 s
->status
= READY_STAT
| BUSY_STAT
;
2079 s
->io_buffer_offset
= 0;
2081 complete
= ide_cmd_table
[val
].handler(s
, val
);
2083 s
->status
&= ~BUSY_STAT
;
2084 assert(!!s
->error
== !!(s
->status
& ERR_STAT
));
2086 if ((ide_cmd_table
[val
].flags
& SET_DSC
) && !s
->error
) {
2087 s
->status
|= SEEK_STAT
;
2091 ide_set_irq(s
->bus
);
2095 /* IOport [R]ead [R]egisters */
2096 enum ATA_IOPORT_RR
{
2097 ATA_IOPORT_RR_DATA
= 0,
2098 ATA_IOPORT_RR_ERROR
= 1,
2099 ATA_IOPORT_RR_SECTOR_COUNT
= 2,
2100 ATA_IOPORT_RR_SECTOR_NUMBER
= 3,
2101 ATA_IOPORT_RR_CYLINDER_LOW
= 4,
2102 ATA_IOPORT_RR_CYLINDER_HIGH
= 5,
2103 ATA_IOPORT_RR_DEVICE_HEAD
= 6,
2104 ATA_IOPORT_RR_STATUS
= 7,
2105 ATA_IOPORT_RR_NUM_REGISTERS
,
2108 const char *ATA_IOPORT_RR_lookup
[ATA_IOPORT_RR_NUM_REGISTERS
] = {
2109 [ATA_IOPORT_RR_DATA
] = "Data",
2110 [ATA_IOPORT_RR_ERROR
] = "Error",
2111 [ATA_IOPORT_RR_SECTOR_COUNT
] = "Sector Count",
2112 [ATA_IOPORT_RR_SECTOR_NUMBER
] = "Sector Number",
2113 [ATA_IOPORT_RR_CYLINDER_LOW
] = "Cylinder Low",
2114 [ATA_IOPORT_RR_CYLINDER_HIGH
] = "Cylinder High",
2115 [ATA_IOPORT_RR_DEVICE_HEAD
] = "Device/Head",
2116 [ATA_IOPORT_RR_STATUS
] = "Status"
2119 uint32_t ide_ioport_read(void *opaque
, uint32_t addr
)
2121 IDEBus
*bus
= opaque
;
2122 IDEState
*s
= idebus_active_if(bus
);
2127 /* FIXME: HOB readback uses bit 7, but it's always set right now */
2128 //hob = s->select & (1 << 7);
2131 case ATA_IOPORT_RR_DATA
:
2134 case ATA_IOPORT_RR_ERROR
:
2135 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2136 (s
!= bus
->ifs
&& !s
->blk
)) {
2141 ret
= s
->hob_feature
;
2144 case ATA_IOPORT_RR_SECTOR_COUNT
:
2145 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2148 ret
= s
->nsector
& 0xff;
2150 ret
= s
->hob_nsector
;
2153 case ATA_IOPORT_RR_SECTOR_NUMBER
:
2154 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2159 ret
= s
->hob_sector
;
2162 case ATA_IOPORT_RR_CYLINDER_LOW
:
2163 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2171 case ATA_IOPORT_RR_CYLINDER_HIGH
:
2172 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2180 case ATA_IOPORT_RR_DEVICE_HEAD
:
2181 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2188 case ATA_IOPORT_RR_STATUS
:
2189 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2190 (s
!= bus
->ifs
&& !s
->blk
)) {
2195 qemu_irq_lower(bus
->irq
);
2199 trace_ide_ioport_read(addr
, ATA_IOPORT_RR_lookup
[reg_num
], ret
, bus
, s
);
2203 uint32_t ide_status_read(void *opaque
, uint32_t addr
)
2205 IDEBus
*bus
= opaque
;
2206 IDEState
*s
= idebus_active_if(bus
);
2209 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2210 (s
!= bus
->ifs
&& !s
->blk
)) {
2216 trace_ide_status_read(addr
, ret
, bus
, s
);
2220 void ide_cmd_write(void *opaque
, uint32_t addr
, uint32_t val
)
2222 IDEBus
*bus
= opaque
;
2226 trace_ide_cmd_write(addr
, val
, bus
);
2228 /* common for both drives */
2229 if (!(bus
->cmd
& IDE_CMD_RESET
) &&
2230 (val
& IDE_CMD_RESET
)) {
2231 /* reset low to high */
2232 for(i
= 0;i
< 2; i
++) {
2234 s
->status
= BUSY_STAT
| SEEK_STAT
;
2237 } else if ((bus
->cmd
& IDE_CMD_RESET
) &&
2238 !(val
& IDE_CMD_RESET
)) {
2240 for(i
= 0;i
< 2; i
++) {
2242 if (s
->drive_kind
== IDE_CD
)
2243 s
->status
= 0x00; /* NOTE: READY is _not_ set */
2245 s
->status
= READY_STAT
| SEEK_STAT
;
2246 ide_set_signature(s
);
2254 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2255 * transferred from the device to the guest), false if it's a PIO in
2257 static bool ide_is_pio_out(IDEState
*s
)
2259 if (s
->end_transfer_func
== ide_sector_write
||
2260 s
->end_transfer_func
== ide_atapi_cmd
) {
2262 } else if (s
->end_transfer_func
== ide_sector_read
||
2263 s
->end_transfer_func
== ide_transfer_stop
||
2264 s
->end_transfer_func
== ide_atapi_cmd_reply_end
||
2265 s
->end_transfer_func
== ide_dummy_transfer_stop
) {
2272 void ide_data_writew(void *opaque
, uint32_t addr
, uint32_t val
)
2274 IDEBus
*bus
= opaque
;
2275 IDEState
*s
= idebus_active_if(bus
);
2278 trace_ide_data_writew(addr
, val
, bus
, s
);
2280 /* PIO data access allowed only when DRQ bit is set. The result of a write
2281 * during PIO out is indeterminate, just ignore it. */
2282 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2287 if (p
+ 2 > s
->data_end
) {
2291 *(uint16_t *)p
= le16_to_cpu(val
);
2294 if (p
>= s
->data_end
) {
2295 s
->status
&= ~DRQ_STAT
;
2296 s
->end_transfer_func(s
);
2300 uint32_t ide_data_readw(void *opaque
, uint32_t addr
)
2302 IDEBus
*bus
= opaque
;
2303 IDEState
*s
= idebus_active_if(bus
);
2307 /* PIO data access allowed only when DRQ bit is set. The result of a read
2308 * during PIO in is indeterminate, return 0 and don't move forward. */
2309 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2314 if (p
+ 2 > s
->data_end
) {
2318 ret
= cpu_to_le16(*(uint16_t *)p
);
2321 if (p
>= s
->data_end
) {
2322 s
->status
&= ~DRQ_STAT
;
2323 s
->end_transfer_func(s
);
2326 trace_ide_data_readw(addr
, ret
, bus
, s
);
2330 void ide_data_writel(void *opaque
, uint32_t addr
, uint32_t val
)
2332 IDEBus
*bus
= opaque
;
2333 IDEState
*s
= idebus_active_if(bus
);
2336 trace_ide_data_writel(addr
, val
, bus
, s
);
2338 /* PIO data access allowed only when DRQ bit is set. The result of a write
2339 * during PIO out is indeterminate, just ignore it. */
2340 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2345 if (p
+ 4 > s
->data_end
) {
2349 *(uint32_t *)p
= le32_to_cpu(val
);
2352 if (p
>= s
->data_end
) {
2353 s
->status
&= ~DRQ_STAT
;
2354 s
->end_transfer_func(s
);
2358 uint32_t ide_data_readl(void *opaque
, uint32_t addr
)
2360 IDEBus
*bus
= opaque
;
2361 IDEState
*s
= idebus_active_if(bus
);
2365 /* PIO data access allowed only when DRQ bit is set. The result of a read
2366 * during PIO in is indeterminate, return 0 and don't move forward. */
2367 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2373 if (p
+ 4 > s
->data_end
) {
2377 ret
= cpu_to_le32(*(uint32_t *)p
);
2380 if (p
>= s
->data_end
) {
2381 s
->status
&= ~DRQ_STAT
;
2382 s
->end_transfer_func(s
);
2386 trace_ide_data_readl(addr
, ret
, bus
, s
);
2390 static void ide_dummy_transfer_stop(IDEState
*s
)
2392 s
->data_ptr
= s
->io_buffer
;
2393 s
->data_end
= s
->io_buffer
;
2394 s
->io_buffer
[0] = 0xff;
2395 s
->io_buffer
[1] = 0xff;
2396 s
->io_buffer
[2] = 0xff;
2397 s
->io_buffer
[3] = 0xff;
2400 void ide_bus_reset(IDEBus
*bus
)
2404 ide_reset(&bus
->ifs
[0]);
2405 ide_reset(&bus
->ifs
[1]);
2408 /* pending async DMA */
2409 if (bus
->dma
->aiocb
) {
2410 trace_ide_bus_reset_aio();
2411 blk_aio_cancel(bus
->dma
->aiocb
);
2412 bus
->dma
->aiocb
= NULL
;
2415 /* reset dma provider too */
2416 if (bus
->dma
->ops
->reset
) {
2417 bus
->dma
->ops
->reset(bus
->dma
);
2421 static bool ide_cd_is_tray_open(void *opaque
)
2423 return ((IDEState
*)opaque
)->tray_open
;
2426 static bool ide_cd_is_medium_locked(void *opaque
)
2428 return ((IDEState
*)opaque
)->tray_locked
;
2431 static void ide_resize_cb(void *opaque
)
2433 IDEState
*s
= opaque
;
2434 uint64_t nb_sectors
;
2436 if (!s
->identify_set
) {
2440 blk_get_geometry(s
->blk
, &nb_sectors
);
2441 s
->nb_sectors
= nb_sectors
;
2443 /* Update the identify data buffer. */
2444 if (s
->drive_kind
== IDE_CFATA
) {
2445 ide_cfata_identify_size(s
);
2447 /* IDE_CD uses a different set of callbacks entirely. */
2448 assert(s
->drive_kind
!= IDE_CD
);
2449 ide_identify_size(s
);
2453 static const BlockDevOps ide_cd_block_ops
= {
2454 .change_media_cb
= ide_cd_change_cb
,
2455 .eject_request_cb
= ide_cd_eject_request_cb
,
2456 .is_tray_open
= ide_cd_is_tray_open
,
2457 .is_medium_locked
= ide_cd_is_medium_locked
,
2460 static const BlockDevOps ide_hd_block_ops
= {
2461 .resize_cb
= ide_resize_cb
,
2464 int ide_init_drive(IDEState
*s
, BlockBackend
*blk
, IDEDriveKind kind
,
2465 const char *version
, const char *serial
, const char *model
,
2467 uint32_t cylinders
, uint32_t heads
, uint32_t secs
,
2468 int chs_trans
, Error
**errp
)
2470 uint64_t nb_sectors
;
2473 s
->drive_kind
= kind
;
2475 blk_get_geometry(blk
, &nb_sectors
);
2476 s
->cylinders
= cylinders
;
2479 s
->chs_trans
= chs_trans
;
2480 s
->nb_sectors
= nb_sectors
;
2482 /* The SMART values should be preserved across power cycles
2484 s
->smart_enabled
= 1;
2485 s
->smart_autosave
= 1;
2486 s
->smart_errors
= 0;
2487 s
->smart_selftest_count
= 0;
2488 if (kind
== IDE_CD
) {
2489 blk_set_dev_ops(blk
, &ide_cd_block_ops
, s
);
2490 blk_set_guest_block_size(blk
, 2048);
2492 if (!blk_is_inserted(s
->blk
)) {
2493 error_setg(errp
, "Device needs media, but drive is empty");
2496 if (blk_is_read_only(blk
)) {
2497 error_setg(errp
, "Can't use a read-only drive");
2500 blk_set_dev_ops(blk
, &ide_hd_block_ops
, s
);
2503 pstrcpy(s
->drive_serial_str
, sizeof(s
->drive_serial_str
), serial
);
2505 snprintf(s
->drive_serial_str
, sizeof(s
->drive_serial_str
),
2506 "QM%05d", s
->drive_serial
);
2509 pstrcpy(s
->drive_model_str
, sizeof(s
->drive_model_str
), model
);
2513 strcpy(s
->drive_model_str
, "QEMU DVD-ROM");
2516 strcpy(s
->drive_model_str
, "QEMU MICRODRIVE");
2519 strcpy(s
->drive_model_str
, "QEMU HARDDISK");
2525 pstrcpy(s
->version
, sizeof(s
->version
), version
);
2527 pstrcpy(s
->version
, sizeof(s
->version
), qemu_hw_version());
2531 blk_iostatus_enable(blk
);
2535 static void ide_init1(IDEBus
*bus
, int unit
)
2537 static int drive_serial
= 1;
2538 IDEState
*s
= &bus
->ifs
[unit
];
2542 s
->drive_serial
= drive_serial
++;
2543 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2544 s
->io_buffer_total_len
= IDE_DMA_BUF_SECTORS
*512 + 4;
2545 s
->io_buffer
= qemu_memalign(2048, s
->io_buffer_total_len
);
2546 memset(s
->io_buffer
, 0, s
->io_buffer_total_len
);
2548 s
->smart_selftest_data
= blk_blockalign(s
->blk
, 512);
2549 memset(s
->smart_selftest_data
, 0, 512);
2551 s
->sector_write_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
2552 ide_sector_write_timer_cb
, s
);
2555 static int ide_nop_int(IDEDMA
*dma
, int x
)
2560 static void ide_nop(IDEDMA
*dma
)
2564 static int32_t ide_nop_int32(IDEDMA
*dma
, int32_t l
)
2569 static const IDEDMAOps ide_dma_nop_ops
= {
2570 .prepare_buf
= ide_nop_int32
,
2571 .restart_dma
= ide_nop
,
2572 .rw_buf
= ide_nop_int
,
2575 static void ide_restart_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
2577 s
->unit
= s
->bus
->retry_unit
;
2578 ide_set_sector(s
, s
->bus
->retry_sector_num
);
2579 s
->nsector
= s
->bus
->retry_nsector
;
2580 s
->bus
->dma
->ops
->restart_dma(s
->bus
->dma
);
2581 s
->io_buffer_size
= 0;
2582 s
->dma_cmd
= dma_cmd
;
2583 ide_start_dma(s
, ide_dma_cb
);
2586 static void ide_restart_bh(void *opaque
)
2588 IDEBus
*bus
= opaque
;
2593 qemu_bh_delete(bus
->bh
);
2596 error_status
= bus
->error_status
;
2597 if (bus
->error_status
== 0) {
2601 s
= idebus_active_if(bus
);
2602 is_read
= (bus
->error_status
& IDE_RETRY_READ
) != 0;
2604 /* The error status must be cleared before resubmitting the request: The
2605 * request may fail again, and this case can only be distinguished if the
2606 * called function can set a new error status. */
2607 bus
->error_status
= 0;
2609 /* The HBA has generically asked to be kicked on retry */
2610 if (error_status
& IDE_RETRY_HBA
) {
2611 if (s
->bus
->dma
->ops
->restart
) {
2612 s
->bus
->dma
->ops
->restart(s
->bus
->dma
);
2614 } else if (IS_IDE_RETRY_DMA(error_status
)) {
2615 if (error_status
& IDE_RETRY_TRIM
) {
2616 ide_restart_dma(s
, IDE_DMA_TRIM
);
2618 ide_restart_dma(s
, is_read
? IDE_DMA_READ
: IDE_DMA_WRITE
);
2620 } else if (IS_IDE_RETRY_PIO(error_status
)) {
2624 ide_sector_write(s
);
2626 } else if (error_status
& IDE_RETRY_FLUSH
) {
2628 } else if (IS_IDE_RETRY_ATAPI(error_status
)) {
2629 assert(s
->end_transfer_func
== ide_atapi_cmd
);
2630 ide_atapi_dma_restart(s
);
2636 static void ide_restart_cb(void *opaque
, int running
, RunState state
)
2638 IDEBus
*bus
= opaque
;
2644 bus
->bh
= qemu_bh_new(ide_restart_bh
, bus
);
2645 qemu_bh_schedule(bus
->bh
);
2649 void ide_register_restart_cb(IDEBus
*bus
)
2651 if (bus
->dma
->ops
->restart_dma
) {
2652 bus
->vmstate
= qemu_add_vm_change_state_handler(ide_restart_cb
, bus
);
2656 static IDEDMA ide_dma_nop
= {
2657 .ops
= &ide_dma_nop_ops
,
2661 void ide_init2(IDEBus
*bus
, qemu_irq irq
)
2665 for(i
= 0; i
< 2; i
++) {
2667 ide_reset(&bus
->ifs
[i
]);
2670 bus
->dma
= &ide_dma_nop
;
2673 void ide_exit(IDEState
*s
)
2675 timer_del(s
->sector_write_timer
);
2676 timer_free(s
->sector_write_timer
);
2677 qemu_vfree(s
->smart_selftest_data
);
2678 qemu_vfree(s
->io_buffer
);
2681 static const MemoryRegionPortio ide_portio_list
[] = {
2682 { 0, 8, 1, .read
= ide_ioport_read
, .write
= ide_ioport_write
},
2683 { 0, 1, 2, .read
= ide_data_readw
, .write
= ide_data_writew
},
2684 { 0, 1, 4, .read
= ide_data_readl
, .write
= ide_data_writel
},
2685 PORTIO_END_OF_LIST(),
2688 static const MemoryRegionPortio ide_portio2_list
[] = {
2689 { 0, 1, 1, .read
= ide_status_read
, .write
= ide_cmd_write
},
2690 PORTIO_END_OF_LIST(),
2693 void ide_init_ioport(IDEBus
*bus
, ISADevice
*dev
, int iobase
, int iobase2
)
2695 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2696 bridge has been setup properly to always register with ISA. */
2697 isa_register_portio_list(dev
, &bus
->portio_list
,
2698 iobase
, ide_portio_list
, bus
, "ide");
2701 isa_register_portio_list(dev
, &bus
->portio2_list
,
2702 iobase2
, ide_portio2_list
, bus
, "ide");
2706 static bool is_identify_set(void *opaque
, int version_id
)
2708 IDEState
*s
= opaque
;
2710 return s
->identify_set
!= 0;
2713 static EndTransferFunc
* transfer_end_table
[] = {
2717 ide_atapi_cmd_reply_end
,
2719 ide_dummy_transfer_stop
,
2722 static int transfer_end_table_idx(EndTransferFunc
*fn
)
2726 for (i
= 0; i
< ARRAY_SIZE(transfer_end_table
); i
++)
2727 if (transfer_end_table
[i
] == fn
)
2733 static int ide_drive_post_load(void *opaque
, int version_id
)
2735 IDEState
*s
= opaque
;
2737 if (s
->blk
&& s
->identify_set
) {
2738 blk_set_enable_write_cache(s
->blk
, !!(s
->identify_data
[85] & (1 << 5)));
2743 static int ide_drive_pio_post_load(void *opaque
, int version_id
)
2745 IDEState
*s
= opaque
;
2747 if (s
->end_transfer_fn_idx
>= ARRAY_SIZE(transfer_end_table
)) {
2750 s
->end_transfer_func
= transfer_end_table
[s
->end_transfer_fn_idx
];
2751 s
->data_ptr
= s
->io_buffer
+ s
->cur_io_buffer_offset
;
2752 s
->data_end
= s
->data_ptr
+ s
->cur_io_buffer_len
;
2753 s
->atapi_dma
= s
->feature
& 1; /* as per cmd_packet */
2758 static int ide_drive_pio_pre_save(void *opaque
)
2760 IDEState
*s
= opaque
;
2763 s
->cur_io_buffer_offset
= s
->data_ptr
- s
->io_buffer
;
2764 s
->cur_io_buffer_len
= s
->data_end
- s
->data_ptr
;
2766 idx
= transfer_end_table_idx(s
->end_transfer_func
);
2768 fprintf(stderr
, "%s: invalid end_transfer_func for DRQ_STAT\n",
2770 s
->end_transfer_fn_idx
= 2;
2772 s
->end_transfer_fn_idx
= idx
;
2778 static bool ide_drive_pio_state_needed(void *opaque
)
2780 IDEState
*s
= opaque
;
2782 return ((s
->status
& DRQ_STAT
) != 0)
2783 || (s
->bus
->error_status
& IDE_RETRY_PIO
);
2786 static bool ide_tray_state_needed(void *opaque
)
2788 IDEState
*s
= opaque
;
2790 return s
->tray_open
|| s
->tray_locked
;
2793 static bool ide_atapi_gesn_needed(void *opaque
)
2795 IDEState
*s
= opaque
;
2797 return s
->events
.new_media
|| s
->events
.eject_request
;
2800 static bool ide_error_needed(void *opaque
)
2802 IDEBus
*bus
= opaque
;
2804 return (bus
->error_status
!= 0);
2807 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2808 static const VMStateDescription vmstate_ide_atapi_gesn_state
= {
2809 .name
="ide_drive/atapi/gesn_state",
2811 .minimum_version_id
= 1,
2812 .needed
= ide_atapi_gesn_needed
,
2813 .fields
= (VMStateField
[]) {
2814 VMSTATE_BOOL(events
.new_media
, IDEState
),
2815 VMSTATE_BOOL(events
.eject_request
, IDEState
),
2816 VMSTATE_END_OF_LIST()
2820 static const VMStateDescription vmstate_ide_tray_state
= {
2821 .name
= "ide_drive/tray_state",
2823 .minimum_version_id
= 1,
2824 .needed
= ide_tray_state_needed
,
2825 .fields
= (VMStateField
[]) {
2826 VMSTATE_BOOL(tray_open
, IDEState
),
2827 VMSTATE_BOOL(tray_locked
, IDEState
),
2828 VMSTATE_END_OF_LIST()
2832 static const VMStateDescription vmstate_ide_drive_pio_state
= {
2833 .name
= "ide_drive/pio_state",
2835 .minimum_version_id
= 1,
2836 .pre_save
= ide_drive_pio_pre_save
,
2837 .post_load
= ide_drive_pio_post_load
,
2838 .needed
= ide_drive_pio_state_needed
,
2839 .fields
= (VMStateField
[]) {
2840 VMSTATE_INT32(req_nb_sectors
, IDEState
),
2841 VMSTATE_VARRAY_INT32(io_buffer
, IDEState
, io_buffer_total_len
, 1,
2842 vmstate_info_uint8
, uint8_t),
2843 VMSTATE_INT32(cur_io_buffer_offset
, IDEState
),
2844 VMSTATE_INT32(cur_io_buffer_len
, IDEState
),
2845 VMSTATE_UINT8(end_transfer_fn_idx
, IDEState
),
2846 VMSTATE_INT32(elementary_transfer_size
, IDEState
),
2847 VMSTATE_INT32(packet_transfer_size
, IDEState
),
2848 VMSTATE_END_OF_LIST()
2852 const VMStateDescription vmstate_ide_drive
= {
2853 .name
= "ide_drive",
2855 .minimum_version_id
= 0,
2856 .post_load
= ide_drive_post_load
,
2857 .fields
= (VMStateField
[]) {
2858 VMSTATE_INT32(mult_sectors
, IDEState
),
2859 VMSTATE_INT32(identify_set
, IDEState
),
2860 VMSTATE_BUFFER_TEST(identify_data
, IDEState
, is_identify_set
),
2861 VMSTATE_UINT8(feature
, IDEState
),
2862 VMSTATE_UINT8(error
, IDEState
),
2863 VMSTATE_UINT32(nsector
, IDEState
),
2864 VMSTATE_UINT8(sector
, IDEState
),
2865 VMSTATE_UINT8(lcyl
, IDEState
),
2866 VMSTATE_UINT8(hcyl
, IDEState
),
2867 VMSTATE_UINT8(hob_feature
, IDEState
),
2868 VMSTATE_UINT8(hob_sector
, IDEState
),
2869 VMSTATE_UINT8(hob_nsector
, IDEState
),
2870 VMSTATE_UINT8(hob_lcyl
, IDEState
),
2871 VMSTATE_UINT8(hob_hcyl
, IDEState
),
2872 VMSTATE_UINT8(select
, IDEState
),
2873 VMSTATE_UINT8(status
, IDEState
),
2874 VMSTATE_UINT8(lba48
, IDEState
),
2875 VMSTATE_UINT8(sense_key
, IDEState
),
2876 VMSTATE_UINT8(asc
, IDEState
),
2877 VMSTATE_UINT8_V(cdrom_changed
, IDEState
, 3),
2878 VMSTATE_END_OF_LIST()
2880 .subsections
= (const VMStateDescription
*[]) {
2881 &vmstate_ide_drive_pio_state
,
2882 &vmstate_ide_tray_state
,
2883 &vmstate_ide_atapi_gesn_state
,
2888 static const VMStateDescription vmstate_ide_error_status
= {
2889 .name
="ide_bus/error",
2891 .minimum_version_id
= 1,
2892 .needed
= ide_error_needed
,
2893 .fields
= (VMStateField
[]) {
2894 VMSTATE_INT32(error_status
, IDEBus
),
2895 VMSTATE_INT64_V(retry_sector_num
, IDEBus
, 2),
2896 VMSTATE_UINT32_V(retry_nsector
, IDEBus
, 2),
2897 VMSTATE_UINT8_V(retry_unit
, IDEBus
, 2),
2898 VMSTATE_END_OF_LIST()
2902 const VMStateDescription vmstate_ide_bus
= {
2905 .minimum_version_id
= 1,
2906 .fields
= (VMStateField
[]) {
2907 VMSTATE_UINT8(cmd
, IDEBus
),
2908 VMSTATE_UINT8(unit
, IDEBus
),
2909 VMSTATE_END_OF_LIST()
2911 .subsections
= (const VMStateDescription
*[]) {
2912 &vmstate_ide_error_status
,
2917 void ide_drive_get(DriveInfo
**hd
, int n
)
2921 for (i
= 0; i
< n
; i
++) {
2922 hd
[i
] = drive_get_by_index(IF_IDE
, i
);