2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 #include "qemu/error-report.h"
39 #include "hw/ide/internal.h"
42 /* These values were based on a Seagate ST3500418AS but have been modified
43 to make more sense in QEMU */
44 static const int smart_attributes
[][12] = {
45 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
46 /* raw read error rate*/
47 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
49 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
50 /* start stop count */
51 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
52 /* remapped sectors */
53 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
55 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56 /* power cycle count */
57 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58 /* airflow-temperature-celsius */
59 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
62 const char *IDE_DMA_CMD_lookup
[IDE_DMA__COUNT
] = {
63 [IDE_DMA_READ
] = "DMA READ",
64 [IDE_DMA_WRITE
] = "DMA WRITE",
65 [IDE_DMA_TRIM
] = "DMA TRIM",
66 [IDE_DMA_ATAPI
] = "DMA ATAPI"
69 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval
)
71 if (enval
>= IDE_DMA__BEGIN
&& enval
< IDE_DMA__COUNT
) {
72 return IDE_DMA_CMD_lookup
[enval
];
74 return "DMA UNKNOWN CMD";
77 static void ide_dummy_transfer_stop(IDEState
*s
);
79 static void padstr(char *str
, const char *src
, int len
)
82 for(i
= 0; i
< len
; i
++) {
91 static void put_le16(uint16_t *p
, unsigned int v
)
96 static void ide_identify_size(IDEState
*s
)
98 uint16_t *p
= (uint16_t *)s
->identify_data
;
99 put_le16(p
+ 60, s
->nb_sectors
);
100 put_le16(p
+ 61, s
->nb_sectors
>> 16);
101 put_le16(p
+ 100, s
->nb_sectors
);
102 put_le16(p
+ 101, s
->nb_sectors
>> 16);
103 put_le16(p
+ 102, s
->nb_sectors
>> 32);
104 put_le16(p
+ 103, s
->nb_sectors
>> 48);
107 static void ide_identify(IDEState
*s
)
110 unsigned int oldsize
;
111 IDEDevice
*dev
= s
->unit
? s
->bus
->slave
: s
->bus
->master
;
113 p
= (uint16_t *)s
->identify_data
;
114 if (s
->identify_set
) {
117 memset(p
, 0, sizeof(s
->identify_data
));
119 put_le16(p
+ 0, 0x0040);
120 put_le16(p
+ 1, s
->cylinders
);
121 put_le16(p
+ 3, s
->heads
);
122 put_le16(p
+ 4, 512 * s
->sectors
); /* XXX: retired, remove ? */
123 put_le16(p
+ 5, 512); /* XXX: retired, remove ? */
124 put_le16(p
+ 6, s
->sectors
);
125 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
126 put_le16(p
+ 20, 3); /* XXX: retired, remove ? */
127 put_le16(p
+ 21, 512); /* cache size in sectors */
128 put_le16(p
+ 22, 4); /* ecc bytes */
129 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
130 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
131 #if MAX_MULT_SECTORS > 1
132 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
134 put_le16(p
+ 48, 1); /* dword I/O */
135 put_le16(p
+ 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
136 put_le16(p
+ 51, 0x200); /* PIO transfer cycle */
137 put_le16(p
+ 52, 0x200); /* DMA transfer cycle */
138 put_le16(p
+ 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
139 put_le16(p
+ 54, s
->cylinders
);
140 put_le16(p
+ 55, s
->heads
);
141 put_le16(p
+ 56, s
->sectors
);
142 oldsize
= s
->cylinders
* s
->heads
* s
->sectors
;
143 put_le16(p
+ 57, oldsize
);
144 put_le16(p
+ 58, oldsize
>> 16);
146 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
147 /* *(p + 60) := nb_sectors -- see ide_identify_size */
148 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
149 put_le16(p
+ 62, 0x07); /* single word dma0-2 supported */
150 put_le16(p
+ 63, 0x07); /* mdma0-2 supported */
151 put_le16(p
+ 64, 0x03); /* pio3-4 supported */
152 put_le16(p
+ 65, 120);
153 put_le16(p
+ 66, 120);
154 put_le16(p
+ 67, 120);
155 put_le16(p
+ 68, 120);
156 if (dev
&& dev
->conf
.discard_granularity
) {
157 put_le16(p
+ 69, (1 << 14)); /* determinate TRIM behavior */
161 put_le16(p
+ 75, s
->ncq_queues
- 1);
163 put_le16(p
+ 76, (1 << 8));
166 put_le16(p
+ 80, 0xf0); /* ata3 -> ata6 supported */
167 put_le16(p
+ 81, 0x16); /* conforms to ata5 */
168 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
169 put_le16(p
+ 82, (1 << 14) | (1 << 5) | 1);
170 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
171 put_le16(p
+ 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
172 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
174 put_le16(p
+ 84, (1 << 14) | (1 << 8) | 0);
176 put_le16(p
+ 84, (1 << 14) | 0);
178 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
179 if (blk_enable_write_cache(s
->blk
)) {
180 put_le16(p
+ 85, (1 << 14) | (1 << 5) | 1);
182 put_le16(p
+ 85, (1 << 14) | 1);
184 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
185 put_le16(p
+ 86, (1 << 13) | (1 <<12) | (1 << 10));
186 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
188 put_le16(p
+ 87, (1 << 14) | (1 << 8) | 0);
190 put_le16(p
+ 87, (1 << 14) | 0);
192 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
193 put_le16(p
+ 93, 1 | (1 << 14) | 0x2000);
194 /* *(p + 100) := nb_sectors -- see ide_identify_size */
195 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
196 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
197 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
199 if (dev
&& dev
->conf
.physical_block_size
)
200 put_le16(p
+ 106, 0x6000 | get_physical_block_exp(&dev
->conf
));
202 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
203 put_le16(p
+ 108, s
->wwn
>> 48);
204 put_le16(p
+ 109, s
->wwn
>> 32);
205 put_le16(p
+ 110, s
->wwn
>> 16);
206 put_le16(p
+ 111, s
->wwn
);
208 if (dev
&& dev
->conf
.discard_granularity
) {
209 put_le16(p
+ 169, 1); /* TRIM support */
212 ide_identify_size(s
);
216 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
219 static void ide_atapi_identify(IDEState
*s
)
223 p
= (uint16_t *)s
->identify_data
;
224 if (s
->identify_set
) {
227 memset(p
, 0, sizeof(s
->identify_data
));
229 /* Removable CDROM, 50us response, 12 byte packets */
230 put_le16(p
+ 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
231 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
232 put_le16(p
+ 20, 3); /* buffer type */
233 put_le16(p
+ 21, 512); /* cache size in sectors */
234 put_le16(p
+ 22, 4); /* ecc bytes */
235 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
236 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
237 put_le16(p
+ 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
239 put_le16(p
+ 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
240 put_le16(p
+ 53, 7); /* words 64-70, 54-58, 88 valid */
241 put_le16(p
+ 62, 7); /* single word dma0-2 supported */
242 put_le16(p
+ 63, 7); /* mdma0-2 supported */
244 put_le16(p
+ 49, 1 << 9); /* LBA supported, no DMA */
245 put_le16(p
+ 53, 3); /* words 64-70, 54-58 valid */
246 put_le16(p
+ 63, 0x103); /* DMA modes XXX: may be incorrect */
248 put_le16(p
+ 64, 3); /* pio3-4 supported */
249 put_le16(p
+ 65, 0xb4); /* minimum DMA multiword tx cycle time */
250 put_le16(p
+ 66, 0xb4); /* recommended DMA multiword tx cycle time */
251 put_le16(p
+ 67, 0x12c); /* minimum PIO cycle time without flow control */
252 put_le16(p
+ 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
254 put_le16(p
+ 71, 30); /* in ns */
255 put_le16(p
+ 72, 30); /* in ns */
258 put_le16(p
+ 75, s
->ncq_queues
- 1);
260 put_le16(p
+ 76, (1 << 8));
263 put_le16(p
+ 80, 0x1e); /* support up to ATA/ATAPI-4 */
265 put_le16(p
+ 84, (1 << 8)); /* supports WWN for words 108-111 */
266 put_le16(p
+ 87, (1 << 8)); /* WWN enabled */
270 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
274 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
275 put_le16(p
+ 108, s
->wwn
>> 48);
276 put_le16(p
+ 109, s
->wwn
>> 32);
277 put_le16(p
+ 110, s
->wwn
>> 16);
278 put_le16(p
+ 111, s
->wwn
);
284 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
287 static void ide_cfata_identify_size(IDEState
*s
)
289 uint16_t *p
= (uint16_t *)s
->identify_data
;
290 put_le16(p
+ 7, s
->nb_sectors
>> 16); /* Sectors per card */
291 put_le16(p
+ 8, s
->nb_sectors
); /* Sectors per card */
292 put_le16(p
+ 60, s
->nb_sectors
); /* Total LBA sectors */
293 put_le16(p
+ 61, s
->nb_sectors
>> 16); /* Total LBA sectors */
296 static void ide_cfata_identify(IDEState
*s
)
301 p
= (uint16_t *)s
->identify_data
;
302 if (s
->identify_set
) {
305 memset(p
, 0, sizeof(s
->identify_data
));
307 cur_sec
= s
->cylinders
* s
->heads
* s
->sectors
;
309 put_le16(p
+ 0, 0x848a); /* CF Storage Card signature */
310 put_le16(p
+ 1, s
->cylinders
); /* Default cylinders */
311 put_le16(p
+ 3, s
->heads
); /* Default heads */
312 put_le16(p
+ 6, s
->sectors
); /* Default sectors per track */
313 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
314 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
315 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
316 put_le16(p
+ 22, 0x0004); /* ECC bytes */
317 padstr((char *) (p
+ 23), s
->version
, 8); /* Firmware Revision */
318 padstr((char *) (p
+ 27), s
->drive_model_str
, 40);/* Model number */
319 #if MAX_MULT_SECTORS > 1
320 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
322 put_le16(p
+ 47, 0x0000);
324 put_le16(p
+ 49, 0x0f00); /* Capabilities */
325 put_le16(p
+ 51, 0x0002); /* PIO cycle timing mode */
326 put_le16(p
+ 52, 0x0001); /* DMA cycle timing mode */
327 put_le16(p
+ 53, 0x0003); /* Translation params valid */
328 put_le16(p
+ 54, s
->cylinders
); /* Current cylinders */
329 put_le16(p
+ 55, s
->heads
); /* Current heads */
330 put_le16(p
+ 56, s
->sectors
); /* Current sectors */
331 put_le16(p
+ 57, cur_sec
); /* Current capacity */
332 put_le16(p
+ 58, cur_sec
>> 16); /* Current capacity */
333 if (s
->mult_sectors
) /* Multiple sector setting */
334 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
335 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
336 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
337 put_le16(p
+ 63, 0x0203); /* Multiword DMA capability */
338 put_le16(p
+ 64, 0x0001); /* Flow Control PIO support */
339 put_le16(p
+ 65, 0x0096); /* Min. Multiword DMA cycle */
340 put_le16(p
+ 66, 0x0096); /* Rec. Multiword DMA cycle */
341 put_le16(p
+ 68, 0x00b4); /* Min. PIO cycle time */
342 put_le16(p
+ 82, 0x400c); /* Command Set supported */
343 put_le16(p
+ 83, 0x7068); /* Command Set supported */
344 put_le16(p
+ 84, 0x4000); /* Features supported */
345 put_le16(p
+ 85, 0x000c); /* Command Set enabled */
346 put_le16(p
+ 86, 0x7044); /* Command Set enabled */
347 put_le16(p
+ 87, 0x4000); /* Features enabled */
348 put_le16(p
+ 91, 0x4060); /* Current APM level */
349 put_le16(p
+ 129, 0x0002); /* Current features option */
350 put_le16(p
+ 130, 0x0005); /* Reassigned sectors */
351 put_le16(p
+ 131, 0x0001); /* Initial power mode */
352 put_le16(p
+ 132, 0x0000); /* User signature */
353 put_le16(p
+ 160, 0x8100); /* Power requirement */
354 put_le16(p
+ 161, 0x8001); /* CF command set */
356 ide_cfata_identify_size(s
);
360 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
363 static void ide_set_signature(IDEState
*s
)
365 s
->select
&= 0xf0; /* clear head */
369 if (s
->drive_kind
== IDE_CD
) {
381 typedef struct TrimAIOCB
{
391 static void trim_aio_cancel(BlockAIOCB
*acb
)
393 TrimAIOCB
*iocb
= container_of(acb
, TrimAIOCB
, common
);
395 /* Exit the loop so ide_issue_trim_cb will not continue */
396 iocb
->j
= iocb
->qiov
->niov
- 1;
397 iocb
->i
= (iocb
->qiov
->iov
[iocb
->j
].iov_len
/ 8) - 1;
399 iocb
->ret
= -ECANCELED
;
402 blk_aio_cancel_async(iocb
->aiocb
);
407 static const AIOCBInfo trim_aiocb_info
= {
408 .aiocb_size
= sizeof(TrimAIOCB
),
409 .cancel_async
= trim_aio_cancel
,
412 static void ide_trim_bh_cb(void *opaque
)
414 TrimAIOCB
*iocb
= opaque
;
416 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
418 qemu_bh_delete(iocb
->bh
);
420 qemu_aio_unref(iocb
);
423 static void ide_issue_trim_cb(void *opaque
, int ret
)
425 TrimAIOCB
*iocb
= opaque
;
427 while (iocb
->j
< iocb
->qiov
->niov
) {
429 while (++iocb
->i
< iocb
->qiov
->iov
[j
].iov_len
/ 8) {
431 uint64_t *buffer
= iocb
->qiov
->iov
[j
].iov_base
;
433 /* 6-byte LBA + 2-byte range per entry */
434 uint64_t entry
= le64_to_cpu(buffer
[i
]);
435 uint64_t sector
= entry
& 0x0000ffffffffffffULL
;
436 uint16_t count
= entry
>> 48;
442 /* Got an entry! Submit and exit. */
443 iocb
->aiocb
= blk_aio_pdiscard(iocb
->blk
,
444 sector
<< BDRV_SECTOR_BITS
,
445 count
<< BDRV_SECTOR_BITS
,
446 ide_issue_trim_cb
, opaque
);
459 qemu_bh_schedule(iocb
->bh
);
463 BlockAIOCB
*ide_issue_trim(
464 int64_t offset
, QEMUIOVector
*qiov
,
465 BlockCompletionFunc
*cb
, void *cb_opaque
, void *opaque
)
467 BlockBackend
*blk
= opaque
;
470 iocb
= blk_aio_get(&trim_aiocb_info
, blk
, cb
, cb_opaque
);
472 iocb
->bh
= qemu_bh_new(ide_trim_bh_cb
, iocb
);
477 ide_issue_trim_cb(iocb
, 0);
478 return &iocb
->common
;
481 void ide_abort_command(IDEState
*s
)
483 ide_transfer_stop(s
);
484 s
->status
= READY_STAT
| ERR_STAT
;
488 static void ide_set_retry(IDEState
*s
)
490 s
->bus
->retry_unit
= s
->unit
;
491 s
->bus
->retry_sector_num
= ide_get_sector(s
);
492 s
->bus
->retry_nsector
= s
->nsector
;
495 static void ide_clear_retry(IDEState
*s
)
497 s
->bus
->retry_unit
= -1;
498 s
->bus
->retry_sector_num
= 0;
499 s
->bus
->retry_nsector
= 0;
502 /* prepare data transfer and tell what to do after */
503 void ide_transfer_start(IDEState
*s
, uint8_t *buf
, int size
,
504 EndTransferFunc
*end_transfer_func
)
506 s
->end_transfer_func
= end_transfer_func
;
508 s
->data_end
= buf
+ size
;
510 if (!(s
->status
& ERR_STAT
)) {
511 s
->status
|= DRQ_STAT
;
513 if (s
->bus
->dma
->ops
->start_transfer
) {
514 s
->bus
->dma
->ops
->start_transfer(s
->bus
->dma
);
518 static void ide_cmd_done(IDEState
*s
)
520 if (s
->bus
->dma
->ops
->cmd_done
) {
521 s
->bus
->dma
->ops
->cmd_done(s
->bus
->dma
);
525 static void ide_transfer_halt(IDEState
*s
,
526 void(*end_transfer_func
)(IDEState
*),
529 s
->end_transfer_func
= end_transfer_func
;
530 s
->data_ptr
= s
->io_buffer
;
531 s
->data_end
= s
->io_buffer
;
532 s
->status
&= ~DRQ_STAT
;
538 void ide_transfer_stop(IDEState
*s
)
540 ide_transfer_halt(s
, ide_transfer_stop
, true);
543 static void ide_transfer_cancel(IDEState
*s
)
545 ide_transfer_halt(s
, ide_transfer_cancel
, false);
548 int64_t ide_get_sector(IDEState
*s
)
551 if (s
->select
& 0x40) {
554 sector_num
= ((s
->select
& 0x0f) << 24) | (s
->hcyl
<< 16) |
555 (s
->lcyl
<< 8) | s
->sector
;
557 sector_num
= ((int64_t)s
->hob_hcyl
<< 40) |
558 ((int64_t) s
->hob_lcyl
<< 32) |
559 ((int64_t) s
->hob_sector
<< 24) |
560 ((int64_t) s
->hcyl
<< 16) |
561 ((int64_t) s
->lcyl
<< 8) | s
->sector
;
564 sector_num
= ((s
->hcyl
<< 8) | s
->lcyl
) * s
->heads
* s
->sectors
+
565 (s
->select
& 0x0f) * s
->sectors
+ (s
->sector
- 1);
570 void ide_set_sector(IDEState
*s
, int64_t sector_num
)
573 if (s
->select
& 0x40) {
575 s
->select
= (s
->select
& 0xf0) | (sector_num
>> 24);
576 s
->hcyl
= (sector_num
>> 16);
577 s
->lcyl
= (sector_num
>> 8);
578 s
->sector
= (sector_num
);
580 s
->sector
= sector_num
;
581 s
->lcyl
= sector_num
>> 8;
582 s
->hcyl
= sector_num
>> 16;
583 s
->hob_sector
= sector_num
>> 24;
584 s
->hob_lcyl
= sector_num
>> 32;
585 s
->hob_hcyl
= sector_num
>> 40;
588 cyl
= sector_num
/ (s
->heads
* s
->sectors
);
589 r
= sector_num
% (s
->heads
* s
->sectors
);
592 s
->select
= (s
->select
& 0xf0) | ((r
/ s
->sectors
) & 0x0f);
593 s
->sector
= (r
% s
->sectors
) + 1;
597 static void ide_rw_error(IDEState
*s
) {
598 ide_abort_command(s
);
602 static bool ide_sect_range_ok(IDEState
*s
,
603 uint64_t sector
, uint64_t nb_sectors
)
605 uint64_t total_sectors
;
607 blk_get_geometry(s
->blk
, &total_sectors
);
608 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
614 static void ide_buffered_readv_cb(void *opaque
, int ret
)
616 IDEBufferedRequest
*req
= opaque
;
617 if (!req
->orphaned
) {
619 qemu_iovec_from_buf(req
->original_qiov
, 0, req
->iov
.iov_base
,
620 req
->original_qiov
->size
);
622 req
->original_cb(req
->original_opaque
, ret
);
624 QLIST_REMOVE(req
, list
);
625 qemu_vfree(req
->iov
.iov_base
);
629 #define MAX_BUFFERED_REQS 16
631 BlockAIOCB
*ide_buffered_readv(IDEState
*s
, int64_t sector_num
,
632 QEMUIOVector
*iov
, int nb_sectors
,
633 BlockCompletionFunc
*cb
, void *opaque
)
636 IDEBufferedRequest
*req
;
639 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
642 if (c
> MAX_BUFFERED_REQS
) {
643 return blk_abort_aio_request(s
->blk
, cb
, opaque
, -EIO
);
646 req
= g_new0(IDEBufferedRequest
, 1);
647 req
->original_qiov
= iov
;
648 req
->original_cb
= cb
;
649 req
->original_opaque
= opaque
;
650 req
->iov
.iov_base
= qemu_blockalign(blk_bs(s
->blk
), iov
->size
);
651 req
->iov
.iov_len
= iov
->size
;
652 qemu_iovec_init_external(&req
->qiov
, &req
->iov
, 1);
654 aioreq
= blk_aio_preadv(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
655 &req
->qiov
, 0, ide_buffered_readv_cb
, req
);
657 QLIST_INSERT_HEAD(&s
->buffered_requests
, req
, list
);
662 * Cancel all pending DMA requests.
663 * Any buffered DMA requests are instantly canceled,
664 * but any pending unbuffered DMA requests must be waited on.
666 void ide_cancel_dma_sync(IDEState
*s
)
668 IDEBufferedRequest
*req
;
670 /* First invoke the callbacks of all buffered requests
671 * and flag those requests as orphaned. Ideally there
672 * are no unbuffered (Scatter Gather DMA Requests or
673 * write requests) pending and we can avoid to drain. */
674 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
675 if (!req
->orphaned
) {
676 trace_ide_cancel_dma_sync_buffered(req
->original_cb
, req
);
677 req
->original_cb(req
->original_opaque
, -ECANCELED
);
679 req
->orphaned
= true;
683 * We can't cancel Scatter Gather DMA in the middle of the
684 * operation or a partial (not full) DMA transfer would reach
685 * the storage so we wait for completion instead (we beahve
686 * like if the DMA was completed by the time the guest trying
687 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
690 * In the future we'll be able to safely cancel the I/O if the
691 * whole DMA operation will be submitted to disk with a single
692 * aio operation with preadv/pwritev.
694 if (s
->bus
->dma
->aiocb
) {
695 trace_ide_cancel_dma_sync_remaining();
697 assert(s
->bus
->dma
->aiocb
== NULL
);
701 static void ide_sector_read(IDEState
*s
);
703 static void ide_sector_read_cb(void *opaque
, int ret
)
705 IDEState
*s
= opaque
;
709 s
->status
&= ~BUSY_STAT
;
711 if (ret
== -ECANCELED
) {
715 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
|
721 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
724 if (n
> s
->req_nb_sectors
) {
725 n
= s
->req_nb_sectors
;
728 ide_set_sector(s
, ide_get_sector(s
) + n
);
730 /* Allow the guest to read the io_buffer */
731 ide_transfer_start(s
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
, ide_sector_read
);
735 static void ide_sector_read(IDEState
*s
)
740 s
->status
= READY_STAT
| SEEK_STAT
;
741 s
->error
= 0; /* not needed by IDE spec, but needed by Windows */
742 sector_num
= ide_get_sector(s
);
746 ide_transfer_stop(s
);
750 s
->status
|= BUSY_STAT
;
752 if (n
> s
->req_nb_sectors
) {
753 n
= s
->req_nb_sectors
;
756 trace_ide_sector_read(sector_num
, n
);
758 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
760 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_READ
);
764 s
->iov
.iov_base
= s
->io_buffer
;
765 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
766 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
768 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
769 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
770 s
->pio_aiocb
= ide_buffered_readv(s
, sector_num
, &s
->qiov
, n
,
771 ide_sector_read_cb
, s
);
774 void dma_buf_commit(IDEState
*s
, uint32_t tx_bytes
)
776 if (s
->bus
->dma
->ops
->commit_buf
) {
777 s
->bus
->dma
->ops
->commit_buf(s
->bus
->dma
, tx_bytes
);
779 s
->io_buffer_offset
+= tx_bytes
;
780 qemu_sglist_destroy(&s
->sg
);
783 void ide_set_inactive(IDEState
*s
, bool more
)
785 s
->bus
->dma
->aiocb
= NULL
;
787 if (s
->bus
->dma
->ops
->set_inactive
) {
788 s
->bus
->dma
->ops
->set_inactive(s
->bus
->dma
, more
);
793 void ide_dma_error(IDEState
*s
)
795 dma_buf_commit(s
, 0);
796 ide_abort_command(s
);
797 ide_set_inactive(s
, false);
801 int ide_handle_rw_error(IDEState
*s
, int error
, int op
)
803 bool is_read
= (op
& IDE_RETRY_READ
) != 0;
804 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
806 if (action
== BLOCK_ERROR_ACTION_STOP
) {
807 assert(s
->bus
->retry_unit
== s
->unit
);
808 s
->bus
->error_status
= op
;
809 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
810 block_acct_failed(blk_get_stats(s
->blk
), &s
->acct
);
811 if (IS_IDE_RETRY_DMA(op
)) {
813 } else if (IS_IDE_RETRY_ATAPI(op
)) {
814 ide_atapi_io_error(s
, -error
);
819 blk_error_action(s
->blk
, action
, is_read
, error
);
820 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
823 static void ide_dma_cb(void *opaque
, int ret
)
825 IDEState
*s
= opaque
;
829 bool stay_active
= false;
831 if (ret
== -ECANCELED
) {
835 if (ide_handle_rw_error(s
, -ret
, ide_dma_cmd_to_retry(s
->dma_cmd
))) {
836 s
->bus
->dma
->aiocb
= NULL
;
837 dma_buf_commit(s
, 0);
842 n
= s
->io_buffer_size
>> 9;
843 if (n
> s
->nsector
) {
844 /* The PRDs were longer than needed for this request. Shorten them so
845 * we don't get a negative remainder. The Active bit must remain set
846 * after the request completes. */
851 sector_num
= ide_get_sector(s
);
853 assert(n
* 512 == s
->sg
.size
);
854 dma_buf_commit(s
, s
->sg
.size
);
856 ide_set_sector(s
, sector_num
);
860 /* end of transfer ? */
861 if (s
->nsector
== 0) {
862 s
->status
= READY_STAT
| SEEK_STAT
;
867 /* launch next transfer */
869 s
->io_buffer_index
= 0;
870 s
->io_buffer_size
= n
* 512;
871 if (s
->bus
->dma
->ops
->prepare_buf(s
->bus
->dma
, s
->io_buffer_size
) < 512) {
872 /* The PRDs were too short. Reset the Active bit, but don't raise an
874 s
->status
= READY_STAT
| SEEK_STAT
;
875 dma_buf_commit(s
, 0);
879 trace_ide_dma_cb(s
, sector_num
, n
, IDE_DMA_CMD_str(s
->dma_cmd
));
881 if ((s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) &&
882 !ide_sect_range_ok(s
, sector_num
, n
)) {
884 block_acct_invalid(blk_get_stats(s
->blk
), s
->acct
.type
);
888 offset
= sector_num
<< BDRV_SECTOR_BITS
;
889 switch (s
->dma_cmd
) {
891 s
->bus
->dma
->aiocb
= dma_blk_read(s
->blk
, &s
->sg
, offset
,
892 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
895 s
->bus
->dma
->aiocb
= dma_blk_write(s
->blk
, &s
->sg
, offset
,
896 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
899 s
->bus
->dma
->aiocb
= dma_blk_io(blk_get_aio_context(s
->blk
),
900 &s
->sg
, offset
, BDRV_SECTOR_SIZE
,
901 ide_issue_trim
, s
->blk
, ide_dma_cb
, s
,
902 DMA_DIRECTION_TO_DEVICE
);
910 if (s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) {
911 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
913 ide_set_inactive(s
, stay_active
);
916 static void ide_sector_start_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
918 s
->status
= READY_STAT
| SEEK_STAT
| DRQ_STAT
;
919 s
->io_buffer_size
= 0;
920 s
->dma_cmd
= dma_cmd
;
924 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
925 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
928 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
929 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
935 ide_start_dma(s
, ide_dma_cb
);
938 void ide_start_dma(IDEState
*s
, BlockCompletionFunc
*cb
)
940 s
->io_buffer_index
= 0;
942 if (s
->bus
->dma
->ops
->start_dma
) {
943 s
->bus
->dma
->ops
->start_dma(s
->bus
->dma
, s
, cb
);
947 static void ide_sector_write(IDEState
*s
);
949 static void ide_sector_write_timer_cb(void *opaque
)
951 IDEState
*s
= opaque
;
955 static void ide_sector_write_cb(void *opaque
, int ret
)
957 IDEState
*s
= opaque
;
960 if (ret
== -ECANCELED
) {
965 s
->status
&= ~BUSY_STAT
;
968 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
)) {
973 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
976 if (n
> s
->req_nb_sectors
) {
977 n
= s
->req_nb_sectors
;
981 ide_set_sector(s
, ide_get_sector(s
) + n
);
982 if (s
->nsector
== 0) {
983 /* no more sectors to write */
984 ide_transfer_stop(s
);
987 if (n1
> s
->req_nb_sectors
) {
988 n1
= s
->req_nb_sectors
;
990 ide_transfer_start(s
, s
->io_buffer
, n1
* BDRV_SECTOR_SIZE
,
994 if (win2k_install_hack
&& ((++s
->irq_count
% 16) == 0)) {
995 /* It seems there is a bug in the Windows 2000 installer HDD
996 IDE driver which fills the disk with empty logs when the
997 IDE write IRQ comes too early. This hack tries to correct
998 that at the expense of slower write performances. Use this
999 option _only_ to install Windows 2000. You must disable it
1001 timer_mod(s
->sector_write_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1002 (NANOSECONDS_PER_SECOND
/ 1000));
1004 ide_set_irq(s
->bus
);
1008 static void ide_sector_write(IDEState
*s
)
1013 s
->status
= READY_STAT
| SEEK_STAT
| BUSY_STAT
;
1014 sector_num
= ide_get_sector(s
);
1017 if (n
> s
->req_nb_sectors
) {
1018 n
= s
->req_nb_sectors
;
1021 trace_ide_sector_write(sector_num
, n
);
1023 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
1025 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_WRITE
);
1029 s
->iov
.iov_base
= s
->io_buffer
;
1030 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
1031 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
1033 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
1034 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
1035 s
->pio_aiocb
= blk_aio_pwritev(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
1036 &s
->qiov
, 0, ide_sector_write_cb
, s
);
1039 static void ide_flush_cb(void *opaque
, int ret
)
1041 IDEState
*s
= opaque
;
1043 s
->pio_aiocb
= NULL
;
1045 if (ret
== -ECANCELED
) {
1049 /* XXX: What sector number to set here? */
1050 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_FLUSH
)) {
1056 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
1058 s
->status
= READY_STAT
| SEEK_STAT
;
1060 ide_set_irq(s
->bus
);
1063 static void ide_flush_cache(IDEState
*s
)
1065 if (s
->blk
== NULL
) {
1070 s
->status
|= BUSY_STAT
;
1072 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
, 0, BLOCK_ACCT_FLUSH
);
1074 if (blk_bs(s
->blk
)) {
1075 s
->pio_aiocb
= blk_aio_flush(s
->blk
, ide_flush_cb
, s
);
1077 /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1078 * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1084 static void ide_cfata_metadata_inquiry(IDEState
*s
)
1089 p
= (uint16_t *) s
->io_buffer
;
1090 memset(p
, 0, 0x200);
1091 spd
= ((s
->mdata_size
- 1) >> 9) + 1;
1093 put_le16(p
+ 0, 0x0001); /* Data format revision */
1094 put_le16(p
+ 1, 0x0000); /* Media property: silicon */
1095 put_le16(p
+ 2, s
->media_changed
); /* Media status */
1096 put_le16(p
+ 3, s
->mdata_size
& 0xffff); /* Capacity in bytes (low) */
1097 put_le16(p
+ 4, s
->mdata_size
>> 16); /* Capacity in bytes (high) */
1098 put_le16(p
+ 5, spd
& 0xffff); /* Sectors per device (low) */
1099 put_le16(p
+ 6, spd
>> 16); /* Sectors per device (high) */
1102 static void ide_cfata_metadata_read(IDEState
*s
)
1106 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1107 s
->status
= ERR_STAT
;
1108 s
->error
= ABRT_ERR
;
1112 p
= (uint16_t *) s
->io_buffer
;
1113 memset(p
, 0, 0x200);
1115 put_le16(p
+ 0, s
->media_changed
); /* Media status */
1116 memcpy(p
+ 1, s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1117 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1118 s
->nsector
<< 9), 0x200 - 2));
1121 static void ide_cfata_metadata_write(IDEState
*s
)
1123 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1124 s
->status
= ERR_STAT
;
1125 s
->error
= ABRT_ERR
;
1129 s
->media_changed
= 0;
1131 memcpy(s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1133 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1134 s
->nsector
<< 9), 0x200 - 2));
1137 /* called when the inserted state of the media has changed */
1138 static void ide_cd_change_cb(void *opaque
, bool load
, Error
**errp
)
1140 IDEState
*s
= opaque
;
1141 uint64_t nb_sectors
;
1143 s
->tray_open
= !load
;
1144 blk_get_geometry(s
->blk
, &nb_sectors
);
1145 s
->nb_sectors
= nb_sectors
;
1148 * First indicate to the guest that a CD has been removed. That's
1149 * done on the next command the guest sends us.
1151 * Then we set UNIT_ATTENTION, by which the guest will
1152 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1154 s
->cdrom_changed
= 1;
1155 s
->events
.new_media
= true;
1156 s
->events
.eject_request
= false;
1157 ide_set_irq(s
->bus
);
1160 static void ide_cd_eject_request_cb(void *opaque
, bool force
)
1162 IDEState
*s
= opaque
;
1164 s
->events
.eject_request
= true;
1166 s
->tray_locked
= false;
1168 ide_set_irq(s
->bus
);
1171 static void ide_cmd_lba48_transform(IDEState
*s
, int lba48
)
1175 /* handle the 'magic' 0 nsector count conversion here. to avoid
1176 * fiddling with the rest of the read logic, we just store the
1177 * full sector count in ->nsector and ignore ->hob_nsector from now
1183 if (!s
->nsector
&& !s
->hob_nsector
)
1186 int lo
= s
->nsector
;
1187 int hi
= s
->hob_nsector
;
1189 s
->nsector
= (hi
<< 8) | lo
;
1194 static void ide_clear_hob(IDEBus
*bus
)
1196 /* any write clears HOB high bit of device control register */
1197 bus
->ifs
[0].select
&= ~(1 << 7);
1198 bus
->ifs
[1].select
&= ~(1 << 7);
1201 /* IOport [W]rite [R]egisters */
1202 enum ATA_IOPORT_WR
{
1203 ATA_IOPORT_WR_DATA
= 0,
1204 ATA_IOPORT_WR_FEATURES
= 1,
1205 ATA_IOPORT_WR_SECTOR_COUNT
= 2,
1206 ATA_IOPORT_WR_SECTOR_NUMBER
= 3,
1207 ATA_IOPORT_WR_CYLINDER_LOW
= 4,
1208 ATA_IOPORT_WR_CYLINDER_HIGH
= 5,
1209 ATA_IOPORT_WR_DEVICE_HEAD
= 6,
1210 ATA_IOPORT_WR_COMMAND
= 7,
1211 ATA_IOPORT_WR_NUM_REGISTERS
,
1214 const char *ATA_IOPORT_WR_lookup
[ATA_IOPORT_WR_NUM_REGISTERS
] = {
1215 [ATA_IOPORT_WR_DATA
] = "Data",
1216 [ATA_IOPORT_WR_FEATURES
] = "Features",
1217 [ATA_IOPORT_WR_SECTOR_COUNT
] = "Sector Count",
1218 [ATA_IOPORT_WR_SECTOR_NUMBER
] = "Sector Number",
1219 [ATA_IOPORT_WR_CYLINDER_LOW
] = "Cylinder Low",
1220 [ATA_IOPORT_WR_CYLINDER_HIGH
] = "Cylinder High",
1221 [ATA_IOPORT_WR_DEVICE_HEAD
] = "Device/Head",
1222 [ATA_IOPORT_WR_COMMAND
] = "Command"
1225 void ide_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
1227 IDEBus
*bus
= opaque
;
1228 IDEState
*s
= idebus_active_if(bus
);
1229 int reg_num
= addr
& 7;
1231 trace_ide_ioport_write(addr
, ATA_IOPORT_WR_lookup
[reg_num
], val
, bus
, s
);
1233 /* ignore writes to command block while busy with previous command */
1234 if (reg_num
!= 7 && (s
->status
& (BUSY_STAT
|DRQ_STAT
))) {
1241 case ATA_IOPORT_WR_FEATURES
:
1243 /* NOTE: data is written to the two drives */
1244 bus
->ifs
[0].hob_feature
= bus
->ifs
[0].feature
;
1245 bus
->ifs
[1].hob_feature
= bus
->ifs
[1].feature
;
1246 bus
->ifs
[0].feature
= val
;
1247 bus
->ifs
[1].feature
= val
;
1249 case ATA_IOPORT_WR_SECTOR_COUNT
:
1251 bus
->ifs
[0].hob_nsector
= bus
->ifs
[0].nsector
;
1252 bus
->ifs
[1].hob_nsector
= bus
->ifs
[1].nsector
;
1253 bus
->ifs
[0].nsector
= val
;
1254 bus
->ifs
[1].nsector
= val
;
1256 case ATA_IOPORT_WR_SECTOR_NUMBER
:
1258 bus
->ifs
[0].hob_sector
= bus
->ifs
[0].sector
;
1259 bus
->ifs
[1].hob_sector
= bus
->ifs
[1].sector
;
1260 bus
->ifs
[0].sector
= val
;
1261 bus
->ifs
[1].sector
= val
;
1263 case ATA_IOPORT_WR_CYLINDER_LOW
:
1265 bus
->ifs
[0].hob_lcyl
= bus
->ifs
[0].lcyl
;
1266 bus
->ifs
[1].hob_lcyl
= bus
->ifs
[1].lcyl
;
1267 bus
->ifs
[0].lcyl
= val
;
1268 bus
->ifs
[1].lcyl
= val
;
1270 case ATA_IOPORT_WR_CYLINDER_HIGH
:
1272 bus
->ifs
[0].hob_hcyl
= bus
->ifs
[0].hcyl
;
1273 bus
->ifs
[1].hob_hcyl
= bus
->ifs
[1].hcyl
;
1274 bus
->ifs
[0].hcyl
= val
;
1275 bus
->ifs
[1].hcyl
= val
;
1277 case ATA_IOPORT_WR_DEVICE_HEAD
:
1278 /* FIXME: HOB readback uses bit 7 */
1279 bus
->ifs
[0].select
= (val
& ~0x10) | 0xa0;
1280 bus
->ifs
[1].select
= (val
| 0x10) | 0xa0;
1282 bus
->unit
= (val
>> 4) & 1;
1285 case ATA_IOPORT_WR_COMMAND
:
1287 ide_exec_cmd(bus
, val
);
1292 static void ide_reset(IDEState
*s
)
1297 blk_aio_cancel(s
->pio_aiocb
);
1298 s
->pio_aiocb
= NULL
;
1301 if (s
->drive_kind
== IDE_CFATA
)
1302 s
->mult_sectors
= 0;
1304 s
->mult_sectors
= MAX_MULT_SECTORS
;
1321 s
->status
= READY_STAT
| SEEK_STAT
;
1325 /* ATAPI specific */
1328 s
->cdrom_changed
= 0;
1329 s
->packet_transfer_size
= 0;
1330 s
->elementary_transfer_size
= 0;
1331 s
->io_buffer_index
= 0;
1332 s
->cd_sector_size
= 0;
1337 s
->io_buffer_size
= 0;
1338 s
->req_nb_sectors
= 0;
1340 ide_set_signature(s
);
1341 /* init the transfer handler so that 0xffff is returned on data
1343 s
->end_transfer_func
= ide_dummy_transfer_stop
;
1344 ide_dummy_transfer_stop(s
);
1345 s
->media_changed
= 0;
1348 static bool cmd_nop(IDEState
*s
, uint8_t cmd
)
1353 static bool cmd_device_reset(IDEState
*s
, uint8_t cmd
)
1355 /* Halt PIO (in the DRQ phase), then DMA */
1356 ide_transfer_cancel(s
);
1357 ide_cancel_dma_sync(s
);
1359 /* Reset any PIO commands, reset signature, etc */
1362 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1363 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1366 /* Do not overwrite status register */
1370 static bool cmd_data_set_management(IDEState
*s
, uint8_t cmd
)
1372 switch (s
->feature
) {
1375 ide_sector_start_dma(s
, IDE_DMA_TRIM
);
1381 ide_abort_command(s
);
1385 static bool cmd_identify(IDEState
*s
, uint8_t cmd
)
1387 if (s
->blk
&& s
->drive_kind
!= IDE_CD
) {
1388 if (s
->drive_kind
!= IDE_CFATA
) {
1391 ide_cfata_identify(s
);
1393 s
->status
= READY_STAT
| SEEK_STAT
;
1394 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1395 ide_set_irq(s
->bus
);
1398 if (s
->drive_kind
== IDE_CD
) {
1399 ide_set_signature(s
);
1401 ide_abort_command(s
);
1407 static bool cmd_verify(IDEState
*s
, uint8_t cmd
)
1409 bool lba48
= (cmd
== WIN_VERIFY_EXT
);
1411 /* do sector number check ? */
1412 ide_cmd_lba48_transform(s
, lba48
);
1417 static bool cmd_set_multiple_mode(IDEState
*s
, uint8_t cmd
)
1419 if (s
->drive_kind
== IDE_CFATA
&& s
->nsector
== 0) {
1420 /* Disable Read and Write Multiple */
1421 s
->mult_sectors
= 0;
1422 } else if ((s
->nsector
& 0xff) != 0 &&
1423 ((s
->nsector
& 0xff) > MAX_MULT_SECTORS
||
1424 (s
->nsector
& (s
->nsector
- 1)) != 0)) {
1425 ide_abort_command(s
);
1427 s
->mult_sectors
= s
->nsector
& 0xff;
1433 static bool cmd_read_multiple(IDEState
*s
, uint8_t cmd
)
1435 bool lba48
= (cmd
== WIN_MULTREAD_EXT
);
1437 if (!s
->blk
|| !s
->mult_sectors
) {
1438 ide_abort_command(s
);
1442 ide_cmd_lba48_transform(s
, lba48
);
1443 s
->req_nb_sectors
= s
->mult_sectors
;
1448 static bool cmd_write_multiple(IDEState
*s
, uint8_t cmd
)
1450 bool lba48
= (cmd
== WIN_MULTWRITE_EXT
);
1453 if (!s
->blk
|| !s
->mult_sectors
) {
1454 ide_abort_command(s
);
1458 ide_cmd_lba48_transform(s
, lba48
);
1460 s
->req_nb_sectors
= s
->mult_sectors
;
1461 n
= MIN(s
->nsector
, s
->req_nb_sectors
);
1463 s
->status
= SEEK_STAT
| READY_STAT
;
1464 ide_transfer_start(s
, s
->io_buffer
, 512 * n
, ide_sector_write
);
1466 s
->media_changed
= 1;
1471 static bool cmd_read_pio(IDEState
*s
, uint8_t cmd
)
1473 bool lba48
= (cmd
== WIN_READ_EXT
);
1475 if (s
->drive_kind
== IDE_CD
) {
1476 ide_set_signature(s
); /* odd, but ATA4 8.27.5.2 requires it */
1477 ide_abort_command(s
);
1482 ide_abort_command(s
);
1486 ide_cmd_lba48_transform(s
, lba48
);
1487 s
->req_nb_sectors
= 1;
1493 static bool cmd_write_pio(IDEState
*s
, uint8_t cmd
)
1495 bool lba48
= (cmd
== WIN_WRITE_EXT
);
1498 ide_abort_command(s
);
1502 ide_cmd_lba48_transform(s
, lba48
);
1504 s
->req_nb_sectors
= 1;
1505 s
->status
= SEEK_STAT
| READY_STAT
;
1506 ide_transfer_start(s
, s
->io_buffer
, 512, ide_sector_write
);
1508 s
->media_changed
= 1;
1513 static bool cmd_read_dma(IDEState
*s
, uint8_t cmd
)
1515 bool lba48
= (cmd
== WIN_READDMA_EXT
);
1518 ide_abort_command(s
);
1522 ide_cmd_lba48_transform(s
, lba48
);
1523 ide_sector_start_dma(s
, IDE_DMA_READ
);
1528 static bool cmd_write_dma(IDEState
*s
, uint8_t cmd
)
1530 bool lba48
= (cmd
== WIN_WRITEDMA_EXT
);
1533 ide_abort_command(s
);
1537 ide_cmd_lba48_transform(s
, lba48
);
1538 ide_sector_start_dma(s
, IDE_DMA_WRITE
);
1540 s
->media_changed
= 1;
1545 static bool cmd_flush_cache(IDEState
*s
, uint8_t cmd
)
1551 static bool cmd_seek(IDEState
*s
, uint8_t cmd
)
1553 /* XXX: Check that seek is within bounds */
1557 static bool cmd_read_native_max(IDEState
*s
, uint8_t cmd
)
1559 bool lba48
= (cmd
== WIN_READ_NATIVE_MAX_EXT
);
1561 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1562 if (s
->nb_sectors
== 0) {
1563 ide_abort_command(s
);
1567 ide_cmd_lba48_transform(s
, lba48
);
1568 ide_set_sector(s
, s
->nb_sectors
- 1);
1573 static bool cmd_check_power_mode(IDEState
*s
, uint8_t cmd
)
1575 s
->nsector
= 0xff; /* device active or idle */
1579 static bool cmd_set_features(IDEState
*s
, uint8_t cmd
)
1581 uint16_t *identify_data
;
1584 ide_abort_command(s
);
1588 /* XXX: valid for CDROM ? */
1589 switch (s
->feature
) {
1590 case 0x02: /* write cache enable */
1591 blk_set_enable_write_cache(s
->blk
, true);
1592 identify_data
= (uint16_t *)s
->identify_data
;
1593 put_le16(identify_data
+ 85, (1 << 14) | (1 << 5) | 1);
1595 case 0x82: /* write cache disable */
1596 blk_set_enable_write_cache(s
->blk
, false);
1597 identify_data
= (uint16_t *)s
->identify_data
;
1598 put_le16(identify_data
+ 85, (1 << 14) | 1);
1601 case 0xcc: /* reverting to power-on defaults enable */
1602 case 0x66: /* reverting to power-on defaults disable */
1603 case 0xaa: /* read look-ahead enable */
1604 case 0x55: /* read look-ahead disable */
1605 case 0x05: /* set advanced power management mode */
1606 case 0x85: /* disable advanced power management mode */
1607 case 0x69: /* NOP */
1608 case 0x67: /* NOP */
1609 case 0x96: /* NOP */
1610 case 0x9a: /* NOP */
1611 case 0x42: /* enable Automatic Acoustic Mode */
1612 case 0xc2: /* disable Automatic Acoustic Mode */
1614 case 0x03: /* set transfer mode */
1616 uint8_t val
= s
->nsector
& 0x07;
1617 identify_data
= (uint16_t *)s
->identify_data
;
1619 switch (s
->nsector
>> 3) {
1620 case 0x00: /* pio default */
1621 case 0x01: /* pio mode */
1622 put_le16(identify_data
+ 62, 0x07);
1623 put_le16(identify_data
+ 63, 0x07);
1624 put_le16(identify_data
+ 88, 0x3f);
1626 case 0x02: /* sigle word dma mode*/
1627 put_le16(identify_data
+ 62, 0x07 | (1 << (val
+ 8)));
1628 put_le16(identify_data
+ 63, 0x07);
1629 put_le16(identify_data
+ 88, 0x3f);
1631 case 0x04: /* mdma mode */
1632 put_le16(identify_data
+ 62, 0x07);
1633 put_le16(identify_data
+ 63, 0x07 | (1 << (val
+ 8)));
1634 put_le16(identify_data
+ 88, 0x3f);
1636 case 0x08: /* udma mode */
1637 put_le16(identify_data
+ 62, 0x07);
1638 put_le16(identify_data
+ 63, 0x07);
1639 put_le16(identify_data
+ 88, 0x3f | (1 << (val
+ 8)));
1649 ide_abort_command(s
);
1654 /*** ATAPI commands ***/
1656 static bool cmd_identify_packet(IDEState
*s
, uint8_t cmd
)
1658 ide_atapi_identify(s
);
1659 s
->status
= READY_STAT
| SEEK_STAT
;
1660 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1661 ide_set_irq(s
->bus
);
1665 static bool cmd_exec_dev_diagnostic(IDEState
*s
, uint8_t cmd
)
1667 ide_set_signature(s
);
1669 if (s
->drive_kind
== IDE_CD
) {
1670 s
->status
= 0; /* ATAPI spec (v6) section 9.10 defines packet
1671 * devices to return a clear status register
1672 * with READY_STAT *not* set. */
1675 s
->status
= READY_STAT
| SEEK_STAT
;
1676 /* The bits of the error register are not as usual for this command!
1677 * They are part of the regular output (this is why ERR_STAT isn't set)
1678 * Device 0 passed, Device 1 passed or not present. */
1680 ide_set_irq(s
->bus
);
1686 static bool cmd_packet(IDEState
*s
, uint8_t cmd
)
1688 /* overlapping commands not supported */
1689 if (s
->feature
& 0x02) {
1690 ide_abort_command(s
);
1694 s
->status
= READY_STAT
| SEEK_STAT
;
1695 s
->atapi_dma
= s
->feature
& 1;
1697 s
->dma_cmd
= IDE_DMA_ATAPI
;
1700 ide_transfer_start(s
, s
->io_buffer
, ATAPI_PACKET_SIZE
,
1706 /*** CF-ATA commands ***/
1708 static bool cmd_cfa_req_ext_error_code(IDEState
*s
, uint8_t cmd
)
1710 s
->error
= 0x09; /* miscellaneous error */
1711 s
->status
= READY_STAT
| SEEK_STAT
;
1712 ide_set_irq(s
->bus
);
1717 static bool cmd_cfa_erase_sectors(IDEState
*s
, uint8_t cmd
)
1719 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1720 * required for Windows 8 to work with AHCI */
1722 if (cmd
== CFA_WEAR_LEVEL
) {
1726 if (cmd
== CFA_ERASE_SECTORS
) {
1727 s
->media_changed
= 1;
1733 static bool cmd_cfa_translate_sector(IDEState
*s
, uint8_t cmd
)
1735 s
->status
= READY_STAT
| SEEK_STAT
;
1737 memset(s
->io_buffer
, 0, 0x200);
1738 s
->io_buffer
[0x00] = s
->hcyl
; /* Cyl MSB */
1739 s
->io_buffer
[0x01] = s
->lcyl
; /* Cyl LSB */
1740 s
->io_buffer
[0x02] = s
->select
; /* Head */
1741 s
->io_buffer
[0x03] = s
->sector
; /* Sector */
1742 s
->io_buffer
[0x04] = ide_get_sector(s
) >> 16; /* LBA MSB */
1743 s
->io_buffer
[0x05] = ide_get_sector(s
) >> 8; /* LBA */
1744 s
->io_buffer
[0x06] = ide_get_sector(s
) >> 0; /* LBA LSB */
1745 s
->io_buffer
[0x13] = 0x00; /* Erase flag */
1746 s
->io_buffer
[0x18] = 0x00; /* Hot count */
1747 s
->io_buffer
[0x19] = 0x00; /* Hot count */
1748 s
->io_buffer
[0x1a] = 0x01; /* Hot count */
1750 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1751 ide_set_irq(s
->bus
);
1756 static bool cmd_cfa_access_metadata_storage(IDEState
*s
, uint8_t cmd
)
1758 switch (s
->feature
) {
1759 case 0x02: /* Inquiry Metadata Storage */
1760 ide_cfata_metadata_inquiry(s
);
1762 case 0x03: /* Read Metadata Storage */
1763 ide_cfata_metadata_read(s
);
1765 case 0x04: /* Write Metadata Storage */
1766 ide_cfata_metadata_write(s
);
1769 ide_abort_command(s
);
1773 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1774 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1775 ide_set_irq(s
->bus
);
1780 static bool cmd_ibm_sense_condition(IDEState
*s
, uint8_t cmd
)
1782 switch (s
->feature
) {
1783 case 0x01: /* sense temperature in device */
1784 s
->nsector
= 0x50; /* +20 C */
1787 ide_abort_command(s
);
1795 /*** SMART commands ***/
1797 static bool cmd_smart(IDEState
*s
, uint8_t cmd
)
1801 if (s
->hcyl
!= 0xc2 || s
->lcyl
!= 0x4f) {
1805 if (!s
->smart_enabled
&& s
->feature
!= SMART_ENABLE
) {
1809 switch (s
->feature
) {
1811 s
->smart_enabled
= 0;
1815 s
->smart_enabled
= 1;
1818 case SMART_ATTR_AUTOSAVE
:
1819 switch (s
->sector
) {
1821 s
->smart_autosave
= 0;
1824 s
->smart_autosave
= 1;
1832 if (!s
->smart_errors
) {
1841 case SMART_READ_THRESH
:
1842 memset(s
->io_buffer
, 0, 0x200);
1843 s
->io_buffer
[0] = 0x01; /* smart struct version */
1845 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1846 s
->io_buffer
[2 + 0 + (n
* 12)] = smart_attributes
[n
][0];
1847 s
->io_buffer
[2 + 1 + (n
* 12)] = smart_attributes
[n
][11];
1851 for (n
= 0; n
< 511; n
++) {
1852 s
->io_buffer
[511] += s
->io_buffer
[n
];
1854 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1856 s
->status
= READY_STAT
| SEEK_STAT
;
1857 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1858 ide_set_irq(s
->bus
);
1861 case SMART_READ_DATA
:
1862 memset(s
->io_buffer
, 0, 0x200);
1863 s
->io_buffer
[0] = 0x01; /* smart struct version */
1865 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1867 for (i
= 0; i
< 11; i
++) {
1868 s
->io_buffer
[2 + i
+ (n
* 12)] = smart_attributes
[n
][i
];
1872 s
->io_buffer
[362] = 0x02 | (s
->smart_autosave
? 0x80 : 0x00);
1873 if (s
->smart_selftest_count
== 0) {
1874 s
->io_buffer
[363] = 0;
1877 s
->smart_selftest_data
[3 +
1878 (s
->smart_selftest_count
- 1) *
1881 s
->io_buffer
[364] = 0x20;
1882 s
->io_buffer
[365] = 0x01;
1883 /* offline data collection capacity: execute + self-test*/
1884 s
->io_buffer
[367] = (1 << 4 | 1 << 3 | 1);
1885 s
->io_buffer
[368] = 0x03; /* smart capability (1) */
1886 s
->io_buffer
[369] = 0x00; /* smart capability (2) */
1887 s
->io_buffer
[370] = 0x01; /* error logging supported */
1888 s
->io_buffer
[372] = 0x02; /* minutes for poll short test */
1889 s
->io_buffer
[373] = 0x36; /* minutes for poll ext test */
1890 s
->io_buffer
[374] = 0x01; /* minutes for poll conveyance */
1892 for (n
= 0; n
< 511; n
++) {
1893 s
->io_buffer
[511] += s
->io_buffer
[n
];
1895 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1897 s
->status
= READY_STAT
| SEEK_STAT
;
1898 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1899 ide_set_irq(s
->bus
);
1902 case SMART_READ_LOG
:
1903 switch (s
->sector
) {
1904 case 0x01: /* summary smart error log */
1905 memset(s
->io_buffer
, 0, 0x200);
1906 s
->io_buffer
[0] = 0x01;
1907 s
->io_buffer
[1] = 0x00; /* no error entries */
1908 s
->io_buffer
[452] = s
->smart_errors
& 0xff;
1909 s
->io_buffer
[453] = (s
->smart_errors
& 0xff00) >> 8;
1911 for (n
= 0; n
< 511; n
++) {
1912 s
->io_buffer
[511] += s
->io_buffer
[n
];
1914 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1916 case 0x06: /* smart self test log */
1917 memset(s
->io_buffer
, 0, 0x200);
1918 s
->io_buffer
[0] = 0x01;
1919 if (s
->smart_selftest_count
== 0) {
1920 s
->io_buffer
[508] = 0;
1922 s
->io_buffer
[508] = s
->smart_selftest_count
;
1923 for (n
= 2; n
< 506; n
++) {
1924 s
->io_buffer
[n
] = s
->smart_selftest_data
[n
];
1928 for (n
= 0; n
< 511; n
++) {
1929 s
->io_buffer
[511] += s
->io_buffer
[n
];
1931 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1936 s
->status
= READY_STAT
| SEEK_STAT
;
1937 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1938 ide_set_irq(s
->bus
);
1941 case SMART_EXECUTE_OFFLINE
:
1942 switch (s
->sector
) {
1943 case 0: /* off-line routine */
1944 case 1: /* short self test */
1945 case 2: /* extended self test */
1946 s
->smart_selftest_count
++;
1947 if (s
->smart_selftest_count
> 21) {
1948 s
->smart_selftest_count
= 1;
1950 n
= 2 + (s
->smart_selftest_count
- 1) * 24;
1951 s
->smart_selftest_data
[n
] = s
->sector
;
1952 s
->smart_selftest_data
[n
+ 1] = 0x00; /* OK and finished */
1953 s
->smart_selftest_data
[n
+ 2] = 0x34; /* hour count lsb */
1954 s
->smart_selftest_data
[n
+ 3] = 0x12; /* hour count msb */
1963 ide_abort_command(s
);
1967 #define HD_OK (1u << IDE_HD)
1968 #define CD_OK (1u << IDE_CD)
1969 #define CFA_OK (1u << IDE_CFATA)
1970 #define HD_CFA_OK (HD_OK | CFA_OK)
1971 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1973 /* Set the Disk Seek Completed status bit during completion */
1974 #define SET_DSC (1u << 8)
1976 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1977 static const struct {
1978 /* Returns true if the completion code should be run */
1979 bool (*handler
)(IDEState
*s
, uint8_t cmd
);
1981 } ide_cmd_table
[0x100] = {
1982 /* NOP not implemented, mandatory for CD */
1983 [CFA_REQ_EXT_ERROR_CODE
] = { cmd_cfa_req_ext_error_code
, CFA_OK
},
1984 [WIN_DSM
] = { cmd_data_set_management
, HD_CFA_OK
},
1985 [WIN_DEVICE_RESET
] = { cmd_device_reset
, CD_OK
},
1986 [WIN_RECAL
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
1987 [WIN_READ
] = { cmd_read_pio
, ALL_OK
},
1988 [WIN_READ_ONCE
] = { cmd_read_pio
, HD_CFA_OK
},
1989 [WIN_READ_EXT
] = { cmd_read_pio
, HD_CFA_OK
},
1990 [WIN_READDMA_EXT
] = { cmd_read_dma
, HD_CFA_OK
},
1991 [WIN_READ_NATIVE_MAX_EXT
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
1992 [WIN_MULTREAD_EXT
] = { cmd_read_multiple
, HD_CFA_OK
},
1993 [WIN_WRITE
] = { cmd_write_pio
, HD_CFA_OK
},
1994 [WIN_WRITE_ONCE
] = { cmd_write_pio
, HD_CFA_OK
},
1995 [WIN_WRITE_EXT
] = { cmd_write_pio
, HD_CFA_OK
},
1996 [WIN_WRITEDMA_EXT
] = { cmd_write_dma
, HD_CFA_OK
},
1997 [CFA_WRITE_SECT_WO_ERASE
] = { cmd_write_pio
, CFA_OK
},
1998 [WIN_MULTWRITE_EXT
] = { cmd_write_multiple
, HD_CFA_OK
},
1999 [WIN_WRITE_VERIFY
] = { cmd_write_pio
, HD_CFA_OK
},
2000 [WIN_VERIFY
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2001 [WIN_VERIFY_ONCE
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2002 [WIN_VERIFY_EXT
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2003 [WIN_SEEK
] = { cmd_seek
, HD_CFA_OK
| SET_DSC
},
2004 [CFA_TRANSLATE_SECTOR
] = { cmd_cfa_translate_sector
, CFA_OK
},
2005 [WIN_DIAGNOSE
] = { cmd_exec_dev_diagnostic
, ALL_OK
},
2006 [WIN_SPECIFY
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
2007 [WIN_STANDBYNOW2
] = { cmd_nop
, HD_CFA_OK
},
2008 [WIN_IDLEIMMEDIATE2
] = { cmd_nop
, HD_CFA_OK
},
2009 [WIN_STANDBY2
] = { cmd_nop
, HD_CFA_OK
},
2010 [WIN_SETIDLE2
] = { cmd_nop
, HD_CFA_OK
},
2011 [WIN_CHECKPOWERMODE2
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2012 [WIN_SLEEPNOW2
] = { cmd_nop
, HD_CFA_OK
},
2013 [WIN_PACKETCMD
] = { cmd_packet
, CD_OK
},
2014 [WIN_PIDENTIFY
] = { cmd_identify_packet
, CD_OK
},
2015 [WIN_SMART
] = { cmd_smart
, HD_CFA_OK
| SET_DSC
},
2016 [CFA_ACCESS_METADATA_STORAGE
] = { cmd_cfa_access_metadata_storage
, CFA_OK
},
2017 [CFA_ERASE_SECTORS
] = { cmd_cfa_erase_sectors
, CFA_OK
| SET_DSC
},
2018 [WIN_MULTREAD
] = { cmd_read_multiple
, HD_CFA_OK
},
2019 [WIN_MULTWRITE
] = { cmd_write_multiple
, HD_CFA_OK
},
2020 [WIN_SETMULT
] = { cmd_set_multiple_mode
, HD_CFA_OK
| SET_DSC
},
2021 [WIN_READDMA
] = { cmd_read_dma
, HD_CFA_OK
},
2022 [WIN_READDMA_ONCE
] = { cmd_read_dma
, HD_CFA_OK
},
2023 [WIN_WRITEDMA
] = { cmd_write_dma
, HD_CFA_OK
},
2024 [WIN_WRITEDMA_ONCE
] = { cmd_write_dma
, HD_CFA_OK
},
2025 [CFA_WRITE_MULTI_WO_ERASE
] = { cmd_write_multiple
, CFA_OK
},
2026 [WIN_STANDBYNOW1
] = { cmd_nop
, HD_CFA_OK
},
2027 [WIN_IDLEIMMEDIATE
] = { cmd_nop
, HD_CFA_OK
},
2028 [WIN_STANDBY
] = { cmd_nop
, HD_CFA_OK
},
2029 [WIN_SETIDLE1
] = { cmd_nop
, HD_CFA_OK
},
2030 [WIN_CHECKPOWERMODE1
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2031 [WIN_SLEEPNOW1
] = { cmd_nop
, HD_CFA_OK
},
2032 [WIN_FLUSH_CACHE
] = { cmd_flush_cache
, ALL_OK
},
2033 [WIN_FLUSH_CACHE_EXT
] = { cmd_flush_cache
, HD_CFA_OK
},
2034 [WIN_IDENTIFY
] = { cmd_identify
, ALL_OK
},
2035 [WIN_SETFEATURES
] = { cmd_set_features
, ALL_OK
| SET_DSC
},
2036 [IBM_SENSE_CONDITION
] = { cmd_ibm_sense_condition
, CFA_OK
| SET_DSC
},
2037 [CFA_WEAR_LEVEL
] = { cmd_cfa_erase_sectors
, HD_CFA_OK
| SET_DSC
},
2038 [WIN_READ_NATIVE_MAX
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
2041 static bool ide_cmd_permitted(IDEState
*s
, uint32_t cmd
)
2043 return cmd
< ARRAY_SIZE(ide_cmd_table
)
2044 && (ide_cmd_table
[cmd
].flags
& (1u << s
->drive_kind
));
2047 void ide_exec_cmd(IDEBus
*bus
, uint32_t val
)
2052 s
= idebus_active_if(bus
);
2053 trace_ide_exec_cmd(bus
, s
, val
);
2055 /* ignore commands to non existent slave */
2056 if (s
!= bus
->ifs
&& !s
->blk
) {
2060 /* Only RESET is allowed while BSY and/or DRQ are set,
2061 * and only to ATAPI devices. */
2062 if (s
->status
& (BUSY_STAT
|DRQ_STAT
)) {
2063 if (val
!= WIN_DEVICE_RESET
|| s
->drive_kind
!= IDE_CD
) {
2068 if (!ide_cmd_permitted(s
, val
)) {
2069 ide_abort_command(s
);
2070 ide_set_irq(s
->bus
);
2074 s
->status
= READY_STAT
| BUSY_STAT
;
2076 s
->io_buffer_offset
= 0;
2078 complete
= ide_cmd_table
[val
].handler(s
, val
);
2080 s
->status
&= ~BUSY_STAT
;
2081 assert(!!s
->error
== !!(s
->status
& ERR_STAT
));
2083 if ((ide_cmd_table
[val
].flags
& SET_DSC
) && !s
->error
) {
2084 s
->status
|= SEEK_STAT
;
2088 ide_set_irq(s
->bus
);
2092 /* IOport [R]ead [R]egisters */
2093 enum ATA_IOPORT_RR
{
2094 ATA_IOPORT_RR_DATA
= 0,
2095 ATA_IOPORT_RR_ERROR
= 1,
2096 ATA_IOPORT_RR_SECTOR_COUNT
= 2,
2097 ATA_IOPORT_RR_SECTOR_NUMBER
= 3,
2098 ATA_IOPORT_RR_CYLINDER_LOW
= 4,
2099 ATA_IOPORT_RR_CYLINDER_HIGH
= 5,
2100 ATA_IOPORT_RR_DEVICE_HEAD
= 6,
2101 ATA_IOPORT_RR_STATUS
= 7,
2102 ATA_IOPORT_RR_NUM_REGISTERS
,
2105 const char *ATA_IOPORT_RR_lookup
[ATA_IOPORT_RR_NUM_REGISTERS
] = {
2106 [ATA_IOPORT_RR_DATA
] = "Data",
2107 [ATA_IOPORT_RR_ERROR
] = "Error",
2108 [ATA_IOPORT_RR_SECTOR_COUNT
] = "Sector Count",
2109 [ATA_IOPORT_RR_SECTOR_NUMBER
] = "Sector Number",
2110 [ATA_IOPORT_RR_CYLINDER_LOW
] = "Cylinder Low",
2111 [ATA_IOPORT_RR_CYLINDER_HIGH
] = "Cylinder High",
2112 [ATA_IOPORT_RR_DEVICE_HEAD
] = "Device/Head",
2113 [ATA_IOPORT_RR_STATUS
] = "Status"
2116 uint32_t ide_ioport_read(void *opaque
, uint32_t addr
)
2118 IDEBus
*bus
= opaque
;
2119 IDEState
*s
= idebus_active_if(bus
);
2124 /* FIXME: HOB readback uses bit 7, but it's always set right now */
2125 //hob = s->select & (1 << 7);
2128 case ATA_IOPORT_RR_DATA
:
2131 case ATA_IOPORT_RR_ERROR
:
2132 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2133 (s
!= bus
->ifs
&& !s
->blk
)) {
2138 ret
= s
->hob_feature
;
2141 case ATA_IOPORT_RR_SECTOR_COUNT
:
2142 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2145 ret
= s
->nsector
& 0xff;
2147 ret
= s
->hob_nsector
;
2150 case ATA_IOPORT_RR_SECTOR_NUMBER
:
2151 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2156 ret
= s
->hob_sector
;
2159 case ATA_IOPORT_RR_CYLINDER_LOW
:
2160 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2168 case ATA_IOPORT_RR_CYLINDER_HIGH
:
2169 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2177 case ATA_IOPORT_RR_DEVICE_HEAD
:
2178 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2185 case ATA_IOPORT_RR_STATUS
:
2186 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2187 (s
!= bus
->ifs
&& !s
->blk
)) {
2192 qemu_irq_lower(bus
->irq
);
2196 trace_ide_ioport_read(addr
, ATA_IOPORT_RR_lookup
[reg_num
], ret
, bus
, s
);
2200 uint32_t ide_status_read(void *opaque
, uint32_t addr
)
2202 IDEBus
*bus
= opaque
;
2203 IDEState
*s
= idebus_active_if(bus
);
2206 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2207 (s
!= bus
->ifs
&& !s
->blk
)) {
2213 trace_ide_status_read(addr
, ret
, bus
, s
);
2217 void ide_cmd_write(void *opaque
, uint32_t addr
, uint32_t val
)
2219 IDEBus
*bus
= opaque
;
2223 trace_ide_cmd_write(addr
, val
, bus
);
2225 /* common for both drives */
2226 if (!(bus
->cmd
& IDE_CMD_RESET
) &&
2227 (val
& IDE_CMD_RESET
)) {
2228 /* reset low to high */
2229 for(i
= 0;i
< 2; i
++) {
2231 s
->status
= BUSY_STAT
| SEEK_STAT
;
2234 } else if ((bus
->cmd
& IDE_CMD_RESET
) &&
2235 !(val
& IDE_CMD_RESET
)) {
2237 for(i
= 0;i
< 2; i
++) {
2239 if (s
->drive_kind
== IDE_CD
)
2240 s
->status
= 0x00; /* NOTE: READY is _not_ set */
2242 s
->status
= READY_STAT
| SEEK_STAT
;
2243 ide_set_signature(s
);
2251 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2252 * transferred from the device to the guest), false if it's a PIO in
2254 static bool ide_is_pio_out(IDEState
*s
)
2256 if (s
->end_transfer_func
== ide_sector_write
||
2257 s
->end_transfer_func
== ide_atapi_cmd
) {
2259 } else if (s
->end_transfer_func
== ide_sector_read
||
2260 s
->end_transfer_func
== ide_transfer_stop
||
2261 s
->end_transfer_func
== ide_atapi_cmd_reply_end
||
2262 s
->end_transfer_func
== ide_dummy_transfer_stop
) {
2269 void ide_data_writew(void *opaque
, uint32_t addr
, uint32_t val
)
2271 IDEBus
*bus
= opaque
;
2272 IDEState
*s
= idebus_active_if(bus
);
2275 trace_ide_data_writew(addr
, val
, bus
, s
);
2277 /* PIO data access allowed only when DRQ bit is set. The result of a write
2278 * during PIO out is indeterminate, just ignore it. */
2279 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2284 if (p
+ 2 > s
->data_end
) {
2288 *(uint16_t *)p
= le16_to_cpu(val
);
2291 if (p
>= s
->data_end
) {
2292 s
->status
&= ~DRQ_STAT
;
2293 s
->end_transfer_func(s
);
2297 uint32_t ide_data_readw(void *opaque
, uint32_t addr
)
2299 IDEBus
*bus
= opaque
;
2300 IDEState
*s
= idebus_active_if(bus
);
2304 /* PIO data access allowed only when DRQ bit is set. The result of a read
2305 * during PIO in is indeterminate, return 0 and don't move forward. */
2306 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2311 if (p
+ 2 > s
->data_end
) {
2315 ret
= cpu_to_le16(*(uint16_t *)p
);
2318 if (p
>= s
->data_end
) {
2319 s
->status
&= ~DRQ_STAT
;
2320 s
->end_transfer_func(s
);
2323 trace_ide_data_readw(addr
, ret
, bus
, s
);
2327 void ide_data_writel(void *opaque
, uint32_t addr
, uint32_t val
)
2329 IDEBus
*bus
= opaque
;
2330 IDEState
*s
= idebus_active_if(bus
);
2333 trace_ide_data_writel(addr
, val
, bus
, s
);
2335 /* PIO data access allowed only when DRQ bit is set. The result of a write
2336 * during PIO out is indeterminate, just ignore it. */
2337 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2342 if (p
+ 4 > s
->data_end
) {
2346 *(uint32_t *)p
= le32_to_cpu(val
);
2349 if (p
>= s
->data_end
) {
2350 s
->status
&= ~DRQ_STAT
;
2351 s
->end_transfer_func(s
);
2355 uint32_t ide_data_readl(void *opaque
, uint32_t addr
)
2357 IDEBus
*bus
= opaque
;
2358 IDEState
*s
= idebus_active_if(bus
);
2362 /* PIO data access allowed only when DRQ bit is set. The result of a read
2363 * during PIO in is indeterminate, return 0 and don't move forward. */
2364 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2370 if (p
+ 4 > s
->data_end
) {
2374 ret
= cpu_to_le32(*(uint32_t *)p
);
2377 if (p
>= s
->data_end
) {
2378 s
->status
&= ~DRQ_STAT
;
2379 s
->end_transfer_func(s
);
2383 trace_ide_data_readl(addr
, ret
, bus
, s
);
2387 static void ide_dummy_transfer_stop(IDEState
*s
)
2389 s
->data_ptr
= s
->io_buffer
;
2390 s
->data_end
= s
->io_buffer
;
2391 s
->io_buffer
[0] = 0xff;
2392 s
->io_buffer
[1] = 0xff;
2393 s
->io_buffer
[2] = 0xff;
2394 s
->io_buffer
[3] = 0xff;
2397 void ide_bus_reset(IDEBus
*bus
)
2401 ide_reset(&bus
->ifs
[0]);
2402 ide_reset(&bus
->ifs
[1]);
2405 /* pending async DMA */
2406 if (bus
->dma
->aiocb
) {
2407 trace_ide_bus_reset_aio();
2408 blk_aio_cancel(bus
->dma
->aiocb
);
2409 bus
->dma
->aiocb
= NULL
;
2412 /* reset dma provider too */
2413 if (bus
->dma
->ops
->reset
) {
2414 bus
->dma
->ops
->reset(bus
->dma
);
2418 static bool ide_cd_is_tray_open(void *opaque
)
2420 return ((IDEState
*)opaque
)->tray_open
;
2423 static bool ide_cd_is_medium_locked(void *opaque
)
2425 return ((IDEState
*)opaque
)->tray_locked
;
2428 static void ide_resize_cb(void *opaque
)
2430 IDEState
*s
= opaque
;
2431 uint64_t nb_sectors
;
2433 if (!s
->identify_set
) {
2437 blk_get_geometry(s
->blk
, &nb_sectors
);
2438 s
->nb_sectors
= nb_sectors
;
2440 /* Update the identify data buffer. */
2441 if (s
->drive_kind
== IDE_CFATA
) {
2442 ide_cfata_identify_size(s
);
2444 /* IDE_CD uses a different set of callbacks entirely. */
2445 assert(s
->drive_kind
!= IDE_CD
);
2446 ide_identify_size(s
);
2450 static const BlockDevOps ide_cd_block_ops
= {
2451 .change_media_cb
= ide_cd_change_cb
,
2452 .eject_request_cb
= ide_cd_eject_request_cb
,
2453 .is_tray_open
= ide_cd_is_tray_open
,
2454 .is_medium_locked
= ide_cd_is_medium_locked
,
2457 static const BlockDevOps ide_hd_block_ops
= {
2458 .resize_cb
= ide_resize_cb
,
2461 int ide_init_drive(IDEState
*s
, BlockBackend
*blk
, IDEDriveKind kind
,
2462 const char *version
, const char *serial
, const char *model
,
2464 uint32_t cylinders
, uint32_t heads
, uint32_t secs
,
2465 int chs_trans
, Error
**errp
)
2467 uint64_t nb_sectors
;
2470 s
->drive_kind
= kind
;
2472 blk_get_geometry(blk
, &nb_sectors
);
2473 s
->cylinders
= cylinders
;
2476 s
->chs_trans
= chs_trans
;
2477 s
->nb_sectors
= nb_sectors
;
2479 /* The SMART values should be preserved across power cycles
2481 s
->smart_enabled
= 1;
2482 s
->smart_autosave
= 1;
2483 s
->smart_errors
= 0;
2484 s
->smart_selftest_count
= 0;
2485 if (kind
== IDE_CD
) {
2486 blk_set_dev_ops(blk
, &ide_cd_block_ops
, s
);
2487 blk_set_guest_block_size(blk
, 2048);
2489 if (!blk_is_inserted(s
->blk
)) {
2490 error_setg(errp
, "Device needs media, but drive is empty");
2493 if (blk_is_read_only(blk
)) {
2494 error_setg(errp
, "Can't use a read-only drive");
2497 blk_set_dev_ops(blk
, &ide_hd_block_ops
, s
);
2500 pstrcpy(s
->drive_serial_str
, sizeof(s
->drive_serial_str
), serial
);
2502 snprintf(s
->drive_serial_str
, sizeof(s
->drive_serial_str
),
2503 "QM%05d", s
->drive_serial
);
2506 pstrcpy(s
->drive_model_str
, sizeof(s
->drive_model_str
), model
);
2510 strcpy(s
->drive_model_str
, "QEMU DVD-ROM");
2513 strcpy(s
->drive_model_str
, "QEMU MICRODRIVE");
2516 strcpy(s
->drive_model_str
, "QEMU HARDDISK");
2522 pstrcpy(s
->version
, sizeof(s
->version
), version
);
2524 pstrcpy(s
->version
, sizeof(s
->version
), qemu_hw_version());
2528 blk_iostatus_enable(blk
);
2532 static void ide_init1(IDEBus
*bus
, int unit
)
2534 static int drive_serial
= 1;
2535 IDEState
*s
= &bus
->ifs
[unit
];
2539 s
->drive_serial
= drive_serial
++;
2540 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2541 s
->io_buffer_total_len
= IDE_DMA_BUF_SECTORS
*512 + 4;
2542 s
->io_buffer
= qemu_memalign(2048, s
->io_buffer_total_len
);
2543 memset(s
->io_buffer
, 0, s
->io_buffer_total_len
);
2545 s
->smart_selftest_data
= blk_blockalign(s
->blk
, 512);
2546 memset(s
->smart_selftest_data
, 0, 512);
2548 s
->sector_write_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
2549 ide_sector_write_timer_cb
, s
);
2552 static int ide_nop_int(IDEDMA
*dma
, int x
)
2557 static void ide_nop(IDEDMA
*dma
)
2561 static int32_t ide_nop_int32(IDEDMA
*dma
, int32_t l
)
2566 static const IDEDMAOps ide_dma_nop_ops
= {
2567 .prepare_buf
= ide_nop_int32
,
2568 .restart_dma
= ide_nop
,
2569 .rw_buf
= ide_nop_int
,
2572 static void ide_restart_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
2574 s
->unit
= s
->bus
->retry_unit
;
2575 ide_set_sector(s
, s
->bus
->retry_sector_num
);
2576 s
->nsector
= s
->bus
->retry_nsector
;
2577 s
->bus
->dma
->ops
->restart_dma(s
->bus
->dma
);
2578 s
->io_buffer_size
= 0;
2579 s
->dma_cmd
= dma_cmd
;
2580 ide_start_dma(s
, ide_dma_cb
);
2583 static void ide_restart_bh(void *opaque
)
2585 IDEBus
*bus
= opaque
;
2590 qemu_bh_delete(bus
->bh
);
2593 error_status
= bus
->error_status
;
2594 if (bus
->error_status
== 0) {
2598 s
= idebus_active_if(bus
);
2599 is_read
= (bus
->error_status
& IDE_RETRY_READ
) != 0;
2601 /* The error status must be cleared before resubmitting the request: The
2602 * request may fail again, and this case can only be distinguished if the
2603 * called function can set a new error status. */
2604 bus
->error_status
= 0;
2606 /* The HBA has generically asked to be kicked on retry */
2607 if (error_status
& IDE_RETRY_HBA
) {
2608 if (s
->bus
->dma
->ops
->restart
) {
2609 s
->bus
->dma
->ops
->restart(s
->bus
->dma
);
2611 } else if (IS_IDE_RETRY_DMA(error_status
)) {
2612 if (error_status
& IDE_RETRY_TRIM
) {
2613 ide_restart_dma(s
, IDE_DMA_TRIM
);
2615 ide_restart_dma(s
, is_read
? IDE_DMA_READ
: IDE_DMA_WRITE
);
2617 } else if (IS_IDE_RETRY_PIO(error_status
)) {
2621 ide_sector_write(s
);
2623 } else if (error_status
& IDE_RETRY_FLUSH
) {
2625 } else if (IS_IDE_RETRY_ATAPI(error_status
)) {
2626 assert(s
->end_transfer_func
== ide_atapi_cmd
);
2627 ide_atapi_dma_restart(s
);
2633 static void ide_restart_cb(void *opaque
, int running
, RunState state
)
2635 IDEBus
*bus
= opaque
;
2641 bus
->bh
= qemu_bh_new(ide_restart_bh
, bus
);
2642 qemu_bh_schedule(bus
->bh
);
2646 void ide_register_restart_cb(IDEBus
*bus
)
2648 if (bus
->dma
->ops
->restart_dma
) {
2649 bus
->vmstate
= qemu_add_vm_change_state_handler(ide_restart_cb
, bus
);
2653 static IDEDMA ide_dma_nop
= {
2654 .ops
= &ide_dma_nop_ops
,
2658 void ide_init2(IDEBus
*bus
, qemu_irq irq
)
2662 for(i
= 0; i
< 2; i
++) {
2664 ide_reset(&bus
->ifs
[i
]);
2667 bus
->dma
= &ide_dma_nop
;
2670 void ide_exit(IDEState
*s
)
2672 timer_del(s
->sector_write_timer
);
2673 timer_free(s
->sector_write_timer
);
2674 qemu_vfree(s
->smart_selftest_data
);
2675 qemu_vfree(s
->io_buffer
);
2678 static const MemoryRegionPortio ide_portio_list
[] = {
2679 { 0, 8, 1, .read
= ide_ioport_read
, .write
= ide_ioport_write
},
2680 { 0, 1, 2, .read
= ide_data_readw
, .write
= ide_data_writew
},
2681 { 0, 1, 4, .read
= ide_data_readl
, .write
= ide_data_writel
},
2682 PORTIO_END_OF_LIST(),
2685 static const MemoryRegionPortio ide_portio2_list
[] = {
2686 { 0, 1, 1, .read
= ide_status_read
, .write
= ide_cmd_write
},
2687 PORTIO_END_OF_LIST(),
2690 void ide_init_ioport(IDEBus
*bus
, ISADevice
*dev
, int iobase
, int iobase2
)
2692 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2693 bridge has been setup properly to always register with ISA. */
2694 isa_register_portio_list(dev
, &bus
->portio_list
,
2695 iobase
, ide_portio_list
, bus
, "ide");
2698 isa_register_portio_list(dev
, &bus
->portio2_list
,
2699 iobase2
, ide_portio2_list
, bus
, "ide");
2703 static bool is_identify_set(void *opaque
, int version_id
)
2705 IDEState
*s
= opaque
;
2707 return s
->identify_set
!= 0;
2710 static EndTransferFunc
* transfer_end_table
[] = {
2714 ide_atapi_cmd_reply_end
,
2716 ide_dummy_transfer_stop
,
2719 static int transfer_end_table_idx(EndTransferFunc
*fn
)
2723 for (i
= 0; i
< ARRAY_SIZE(transfer_end_table
); i
++)
2724 if (transfer_end_table
[i
] == fn
)
2730 static int ide_drive_post_load(void *opaque
, int version_id
)
2732 IDEState
*s
= opaque
;
2734 if (s
->blk
&& s
->identify_set
) {
2735 blk_set_enable_write_cache(s
->blk
, !!(s
->identify_data
[85] & (1 << 5)));
2740 static int ide_drive_pio_post_load(void *opaque
, int version_id
)
2742 IDEState
*s
= opaque
;
2744 if (s
->end_transfer_fn_idx
>= ARRAY_SIZE(transfer_end_table
)) {
2747 s
->end_transfer_func
= transfer_end_table
[s
->end_transfer_fn_idx
];
2748 s
->data_ptr
= s
->io_buffer
+ s
->cur_io_buffer_offset
;
2749 s
->data_end
= s
->data_ptr
+ s
->cur_io_buffer_len
;
2750 s
->atapi_dma
= s
->feature
& 1; /* as per cmd_packet */
2755 static void ide_drive_pio_pre_save(void *opaque
)
2757 IDEState
*s
= opaque
;
2760 s
->cur_io_buffer_offset
= s
->data_ptr
- s
->io_buffer
;
2761 s
->cur_io_buffer_len
= s
->data_end
- s
->data_ptr
;
2763 idx
= transfer_end_table_idx(s
->end_transfer_func
);
2765 fprintf(stderr
, "%s: invalid end_transfer_func for DRQ_STAT\n",
2767 s
->end_transfer_fn_idx
= 2;
2769 s
->end_transfer_fn_idx
= idx
;
2773 static bool ide_drive_pio_state_needed(void *opaque
)
2775 IDEState
*s
= opaque
;
2777 return ((s
->status
& DRQ_STAT
) != 0)
2778 || (s
->bus
->error_status
& IDE_RETRY_PIO
);
2781 static bool ide_tray_state_needed(void *opaque
)
2783 IDEState
*s
= opaque
;
2785 return s
->tray_open
|| s
->tray_locked
;
2788 static bool ide_atapi_gesn_needed(void *opaque
)
2790 IDEState
*s
= opaque
;
2792 return s
->events
.new_media
|| s
->events
.eject_request
;
2795 static bool ide_error_needed(void *opaque
)
2797 IDEBus
*bus
= opaque
;
2799 return (bus
->error_status
!= 0);
2802 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2803 static const VMStateDescription vmstate_ide_atapi_gesn_state
= {
2804 .name
="ide_drive/atapi/gesn_state",
2806 .minimum_version_id
= 1,
2807 .needed
= ide_atapi_gesn_needed
,
2808 .fields
= (VMStateField
[]) {
2809 VMSTATE_BOOL(events
.new_media
, IDEState
),
2810 VMSTATE_BOOL(events
.eject_request
, IDEState
),
2811 VMSTATE_END_OF_LIST()
2815 static const VMStateDescription vmstate_ide_tray_state
= {
2816 .name
= "ide_drive/tray_state",
2818 .minimum_version_id
= 1,
2819 .needed
= ide_tray_state_needed
,
2820 .fields
= (VMStateField
[]) {
2821 VMSTATE_BOOL(tray_open
, IDEState
),
2822 VMSTATE_BOOL(tray_locked
, IDEState
),
2823 VMSTATE_END_OF_LIST()
2827 static const VMStateDescription vmstate_ide_drive_pio_state
= {
2828 .name
= "ide_drive/pio_state",
2830 .minimum_version_id
= 1,
2831 .pre_save
= ide_drive_pio_pre_save
,
2832 .post_load
= ide_drive_pio_post_load
,
2833 .needed
= ide_drive_pio_state_needed
,
2834 .fields
= (VMStateField
[]) {
2835 VMSTATE_INT32(req_nb_sectors
, IDEState
),
2836 VMSTATE_VARRAY_INT32(io_buffer
, IDEState
, io_buffer_total_len
, 1,
2837 vmstate_info_uint8
, uint8_t),
2838 VMSTATE_INT32(cur_io_buffer_offset
, IDEState
),
2839 VMSTATE_INT32(cur_io_buffer_len
, IDEState
),
2840 VMSTATE_UINT8(end_transfer_fn_idx
, IDEState
),
2841 VMSTATE_INT32(elementary_transfer_size
, IDEState
),
2842 VMSTATE_INT32(packet_transfer_size
, IDEState
),
2843 VMSTATE_END_OF_LIST()
2847 const VMStateDescription vmstate_ide_drive
= {
2848 .name
= "ide_drive",
2850 .minimum_version_id
= 0,
2851 .post_load
= ide_drive_post_load
,
2852 .fields
= (VMStateField
[]) {
2853 VMSTATE_INT32(mult_sectors
, IDEState
),
2854 VMSTATE_INT32(identify_set
, IDEState
),
2855 VMSTATE_BUFFER_TEST(identify_data
, IDEState
, is_identify_set
),
2856 VMSTATE_UINT8(feature
, IDEState
),
2857 VMSTATE_UINT8(error
, IDEState
),
2858 VMSTATE_UINT32(nsector
, IDEState
),
2859 VMSTATE_UINT8(sector
, IDEState
),
2860 VMSTATE_UINT8(lcyl
, IDEState
),
2861 VMSTATE_UINT8(hcyl
, IDEState
),
2862 VMSTATE_UINT8(hob_feature
, IDEState
),
2863 VMSTATE_UINT8(hob_sector
, IDEState
),
2864 VMSTATE_UINT8(hob_nsector
, IDEState
),
2865 VMSTATE_UINT8(hob_lcyl
, IDEState
),
2866 VMSTATE_UINT8(hob_hcyl
, IDEState
),
2867 VMSTATE_UINT8(select
, IDEState
),
2868 VMSTATE_UINT8(status
, IDEState
),
2869 VMSTATE_UINT8(lba48
, IDEState
),
2870 VMSTATE_UINT8(sense_key
, IDEState
),
2871 VMSTATE_UINT8(asc
, IDEState
),
2872 VMSTATE_UINT8_V(cdrom_changed
, IDEState
, 3),
2873 VMSTATE_END_OF_LIST()
2875 .subsections
= (const VMStateDescription
*[]) {
2876 &vmstate_ide_drive_pio_state
,
2877 &vmstate_ide_tray_state
,
2878 &vmstate_ide_atapi_gesn_state
,
2883 static const VMStateDescription vmstate_ide_error_status
= {
2884 .name
="ide_bus/error",
2886 .minimum_version_id
= 1,
2887 .needed
= ide_error_needed
,
2888 .fields
= (VMStateField
[]) {
2889 VMSTATE_INT32(error_status
, IDEBus
),
2890 VMSTATE_INT64_V(retry_sector_num
, IDEBus
, 2),
2891 VMSTATE_UINT32_V(retry_nsector
, IDEBus
, 2),
2892 VMSTATE_UINT8_V(retry_unit
, IDEBus
, 2),
2893 VMSTATE_END_OF_LIST()
2897 const VMStateDescription vmstate_ide_bus
= {
2900 .minimum_version_id
= 1,
2901 .fields
= (VMStateField
[]) {
2902 VMSTATE_UINT8(cmd
, IDEBus
),
2903 VMSTATE_UINT8(unit
, IDEBus
),
2904 VMSTATE_END_OF_LIST()
2906 .subsections
= (const VMStateDescription
*[]) {
2907 &vmstate_ide_error_status
,
2912 void ide_drive_get(DriveInfo
**hd
, int n
)
2916 for (i
= 0; i
< n
; i
++) {
2917 hd
[i
] = drive_get_by_index(IF_IDE
, i
);