2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "hw/isa/isa.h"
28 #include "migration/vmstate.h"
29 #include "qemu/error-report.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/timer.h"
32 #include "qemu/hw-version.h"
33 #include "qemu/memalign.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/blockdev.h"
36 #include "sysemu/dma.h"
37 #include "hw/block/block.h"
38 #include "sysemu/block-backend.h"
39 #include "qapi/error.h"
40 #include "qemu/cutils.h"
41 #include "sysemu/replay.h"
42 #include "sysemu/runstate.h"
43 #include "hw/ide/internal.h"
46 /* These values were based on a Seagate ST3500418AS but have been modified
47 to make more sense in QEMU */
48 static const int smart_attributes
[][12] = {
49 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
50 /* raw read error rate*/
51 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
53 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* start stop count */
55 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
56 /* remapped sectors */
57 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
59 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
60 /* power cycle count */
61 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
62 /* airflow-temperature-celsius */
63 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
66 const char *IDE_DMA_CMD_lookup
[IDE_DMA__COUNT
] = {
67 [IDE_DMA_READ
] = "DMA READ",
68 [IDE_DMA_WRITE
] = "DMA WRITE",
69 [IDE_DMA_TRIM
] = "DMA TRIM",
70 [IDE_DMA_ATAPI
] = "DMA ATAPI"
73 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval
)
75 if ((unsigned)enval
< IDE_DMA__COUNT
) {
76 return IDE_DMA_CMD_lookup
[enval
];
78 return "DMA UNKNOWN CMD";
81 static void ide_dummy_transfer_stop(IDEState
*s
);
83 static void padstr(char *str
, const char *src
, int len
)
86 for(i
= 0; i
< len
; i
++) {
95 static void put_le16(uint16_t *p
, unsigned int v
)
100 static void ide_identify_size(IDEState
*s
)
102 uint16_t *p
= (uint16_t *)s
->identify_data
;
103 int64_t nb_sectors_lba28
= s
->nb_sectors
;
104 if (nb_sectors_lba28
>= 1 << 28) {
105 nb_sectors_lba28
= (1 << 28) - 1;
107 put_le16(p
+ 60, nb_sectors_lba28
);
108 put_le16(p
+ 61, nb_sectors_lba28
>> 16);
109 put_le16(p
+ 100, s
->nb_sectors
);
110 put_le16(p
+ 101, s
->nb_sectors
>> 16);
111 put_le16(p
+ 102, s
->nb_sectors
>> 32);
112 put_le16(p
+ 103, s
->nb_sectors
>> 48);
115 static void ide_identify(IDEState
*s
)
118 unsigned int oldsize
;
119 IDEDevice
*dev
= s
->unit
? s
->bus
->slave
: s
->bus
->master
;
121 p
= (uint16_t *)s
->identify_data
;
122 if (s
->identify_set
) {
125 memset(p
, 0, sizeof(s
->identify_data
));
127 put_le16(p
+ 0, 0x0040);
128 put_le16(p
+ 1, s
->cylinders
);
129 put_le16(p
+ 3, s
->heads
);
130 put_le16(p
+ 4, 512 * s
->sectors
); /* XXX: retired, remove ? */
131 put_le16(p
+ 5, 512); /* XXX: retired, remove ? */
132 put_le16(p
+ 6, s
->sectors
);
133 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
134 put_le16(p
+ 20, 3); /* XXX: retired, remove ? */
135 put_le16(p
+ 21, 512); /* cache size in sectors */
136 put_le16(p
+ 22, 4); /* ecc bytes */
137 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
138 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
139 #if MAX_MULT_SECTORS > 1
140 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
142 put_le16(p
+ 48, 1); /* dword I/O */
143 put_le16(p
+ 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
144 put_le16(p
+ 51, 0x200); /* PIO transfer cycle */
145 put_le16(p
+ 52, 0x200); /* DMA transfer cycle */
146 put_le16(p
+ 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
147 put_le16(p
+ 54, s
->cylinders
);
148 put_le16(p
+ 55, s
->heads
);
149 put_le16(p
+ 56, s
->sectors
);
150 oldsize
= s
->cylinders
* s
->heads
* s
->sectors
;
151 put_le16(p
+ 57, oldsize
);
152 put_le16(p
+ 58, oldsize
>> 16);
154 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
155 /* *(p + 60) := nb_sectors -- see ide_identify_size */
156 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
157 put_le16(p
+ 62, 0x07); /* single word dma0-2 supported */
158 put_le16(p
+ 63, 0x07); /* mdma0-2 supported */
159 put_le16(p
+ 64, 0x03); /* pio3-4 supported */
160 put_le16(p
+ 65, 120);
161 put_le16(p
+ 66, 120);
162 put_le16(p
+ 67, 120);
163 put_le16(p
+ 68, 120);
164 if (dev
&& dev
->conf
.discard_granularity
) {
165 put_le16(p
+ 69, (1 << 14)); /* determinate TRIM behavior */
169 put_le16(p
+ 75, s
->ncq_queues
- 1);
171 put_le16(p
+ 76, (1 << 8));
174 put_le16(p
+ 80, 0xf0); /* ata3 -> ata6 supported */
175 put_le16(p
+ 81, 0x16); /* conforms to ata5 */
176 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
177 put_le16(p
+ 82, (1 << 14) | (1 << 5) | 1);
178 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
179 put_le16(p
+ 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
180 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
182 put_le16(p
+ 84, (1 << 14) | (1 << 8) | 0);
184 put_le16(p
+ 84, (1 << 14) | 0);
186 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
187 if (blk_enable_write_cache(s
->blk
)) {
188 put_le16(p
+ 85, (1 << 14) | (1 << 5) | 1);
190 put_le16(p
+ 85, (1 << 14) | 1);
192 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
193 put_le16(p
+ 86, (1 << 13) | (1 <<12) | (1 << 10));
194 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
196 put_le16(p
+ 87, (1 << 14) | (1 << 8) | 0);
198 put_le16(p
+ 87, (1 << 14) | 0);
200 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
201 put_le16(p
+ 93, 1 | (1 << 14) | 0x2000);
202 /* *(p + 100) := nb_sectors -- see ide_identify_size */
203 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
204 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
205 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
207 if (dev
&& dev
->conf
.physical_block_size
)
208 put_le16(p
+ 106, 0x6000 | get_physical_block_exp(&dev
->conf
));
210 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
211 put_le16(p
+ 108, s
->wwn
>> 48);
212 put_le16(p
+ 109, s
->wwn
>> 32);
213 put_le16(p
+ 110, s
->wwn
>> 16);
214 put_le16(p
+ 111, s
->wwn
);
216 if (dev
&& dev
->conf
.discard_granularity
) {
217 put_le16(p
+ 169, 1); /* TRIM support */
220 put_le16(p
+ 217, dev
->rotation_rate
); /* Nominal media rotation rate */
223 ide_identify_size(s
);
227 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
230 static void ide_atapi_identify(IDEState
*s
)
234 p
= (uint16_t *)s
->identify_data
;
235 if (s
->identify_set
) {
238 memset(p
, 0, sizeof(s
->identify_data
));
240 /* Removable CDROM, 50us response, 12 byte packets */
241 put_le16(p
+ 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
242 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
243 put_le16(p
+ 20, 3); /* buffer type */
244 put_le16(p
+ 21, 512); /* cache size in sectors */
245 put_le16(p
+ 22, 4); /* ecc bytes */
246 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
247 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
248 put_le16(p
+ 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
250 put_le16(p
+ 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
251 put_le16(p
+ 53, 7); /* words 64-70, 54-58, 88 valid */
252 put_le16(p
+ 62, 7); /* single word dma0-2 supported */
253 put_le16(p
+ 63, 7); /* mdma0-2 supported */
255 put_le16(p
+ 49, 1 << 9); /* LBA supported, no DMA */
256 put_le16(p
+ 53, 3); /* words 64-70, 54-58 valid */
257 put_le16(p
+ 63, 0x103); /* DMA modes XXX: may be incorrect */
259 put_le16(p
+ 64, 3); /* pio3-4 supported */
260 put_le16(p
+ 65, 0xb4); /* minimum DMA multiword tx cycle time */
261 put_le16(p
+ 66, 0xb4); /* recommended DMA multiword tx cycle time */
262 put_le16(p
+ 67, 0x12c); /* minimum PIO cycle time without flow control */
263 put_le16(p
+ 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
265 put_le16(p
+ 71, 30); /* in ns */
266 put_le16(p
+ 72, 30); /* in ns */
269 put_le16(p
+ 75, s
->ncq_queues
- 1);
271 put_le16(p
+ 76, (1 << 8));
274 put_le16(p
+ 80, 0x1e); /* support up to ATA/ATAPI-4 */
276 put_le16(p
+ 84, (1 << 8)); /* supports WWN for words 108-111 */
277 put_le16(p
+ 87, (1 << 8)); /* WWN enabled */
281 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
285 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
286 put_le16(p
+ 108, s
->wwn
>> 48);
287 put_le16(p
+ 109, s
->wwn
>> 32);
288 put_le16(p
+ 110, s
->wwn
>> 16);
289 put_le16(p
+ 111, s
->wwn
);
295 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
298 static void ide_cfata_identify_size(IDEState
*s
)
300 uint16_t *p
= (uint16_t *)s
->identify_data
;
301 put_le16(p
+ 7, s
->nb_sectors
>> 16); /* Sectors per card */
302 put_le16(p
+ 8, s
->nb_sectors
); /* Sectors per card */
303 put_le16(p
+ 60, s
->nb_sectors
); /* Total LBA sectors */
304 put_le16(p
+ 61, s
->nb_sectors
>> 16); /* Total LBA sectors */
307 static void ide_cfata_identify(IDEState
*s
)
312 p
= (uint16_t *)s
->identify_data
;
313 if (s
->identify_set
) {
316 memset(p
, 0, sizeof(s
->identify_data
));
318 cur_sec
= s
->cylinders
* s
->heads
* s
->sectors
;
320 put_le16(p
+ 0, 0x848a); /* CF Storage Card signature */
321 put_le16(p
+ 1, s
->cylinders
); /* Default cylinders */
322 put_le16(p
+ 3, s
->heads
); /* Default heads */
323 put_le16(p
+ 6, s
->sectors
); /* Default sectors per track */
324 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
325 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
326 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
327 put_le16(p
+ 22, 0x0004); /* ECC bytes */
328 padstr((char *) (p
+ 23), s
->version
, 8); /* Firmware Revision */
329 padstr((char *) (p
+ 27), s
->drive_model_str
, 40);/* Model number */
330 #if MAX_MULT_SECTORS > 1
331 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
333 put_le16(p
+ 47, 0x0000);
335 put_le16(p
+ 49, 0x0f00); /* Capabilities */
336 put_le16(p
+ 51, 0x0002); /* PIO cycle timing mode */
337 put_le16(p
+ 52, 0x0001); /* DMA cycle timing mode */
338 put_le16(p
+ 53, 0x0003); /* Translation params valid */
339 put_le16(p
+ 54, s
->cylinders
); /* Current cylinders */
340 put_le16(p
+ 55, s
->heads
); /* Current heads */
341 put_le16(p
+ 56, s
->sectors
); /* Current sectors */
342 put_le16(p
+ 57, cur_sec
); /* Current capacity */
343 put_le16(p
+ 58, cur_sec
>> 16); /* Current capacity */
344 if (s
->mult_sectors
) /* Multiple sector setting */
345 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
346 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
347 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
348 put_le16(p
+ 63, 0x0203); /* Multiword DMA capability */
349 put_le16(p
+ 64, 0x0001); /* Flow Control PIO support */
350 put_le16(p
+ 65, 0x0096); /* Min. Multiword DMA cycle */
351 put_le16(p
+ 66, 0x0096); /* Rec. Multiword DMA cycle */
352 put_le16(p
+ 68, 0x00b4); /* Min. PIO cycle time */
353 put_le16(p
+ 82, 0x400c); /* Command Set supported */
354 put_le16(p
+ 83, 0x7068); /* Command Set supported */
355 put_le16(p
+ 84, 0x4000); /* Features supported */
356 put_le16(p
+ 85, 0x000c); /* Command Set enabled */
357 put_le16(p
+ 86, 0x7044); /* Command Set enabled */
358 put_le16(p
+ 87, 0x4000); /* Features enabled */
359 put_le16(p
+ 91, 0x4060); /* Current APM level */
360 put_le16(p
+ 129, 0x0002); /* Current features option */
361 put_le16(p
+ 130, 0x0005); /* Reassigned sectors */
362 put_le16(p
+ 131, 0x0001); /* Initial power mode */
363 put_le16(p
+ 132, 0x0000); /* User signature */
364 put_le16(p
+ 160, 0x8100); /* Power requirement */
365 put_le16(p
+ 161, 0x8001); /* CF command set */
367 ide_cfata_identify_size(s
);
371 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
374 static void ide_set_signature(IDEState
*s
)
376 s
->select
&= ~(ATA_DEV_HS
); /* clear head */
380 if (s
->drive_kind
== IDE_CD
) {
392 static bool ide_sect_range_ok(IDEState
*s
,
393 uint64_t sector
, uint64_t nb_sectors
)
395 uint64_t total_sectors
;
397 blk_get_geometry(s
->blk
, &total_sectors
);
398 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
404 typedef struct TrimAIOCB
{
414 static void trim_aio_cancel(BlockAIOCB
*acb
)
416 TrimAIOCB
*iocb
= container_of(acb
, TrimAIOCB
, common
);
418 /* Exit the loop so ide_issue_trim_cb will not continue */
419 iocb
->j
= iocb
->qiov
->niov
- 1;
420 iocb
->i
= (iocb
->qiov
->iov
[iocb
->j
].iov_len
/ 8) - 1;
422 iocb
->ret
= -ECANCELED
;
425 blk_aio_cancel_async(iocb
->aiocb
);
430 static const AIOCBInfo trim_aiocb_info
= {
431 .aiocb_size
= sizeof(TrimAIOCB
),
432 .cancel_async
= trim_aio_cancel
,
435 static void ide_trim_bh_cb(void *opaque
)
437 TrimAIOCB
*iocb
= opaque
;
438 BlockBackend
*blk
= iocb
->s
->blk
;
440 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
442 qemu_bh_delete(iocb
->bh
);
444 qemu_aio_unref(iocb
);
446 /* Paired with an increment in ide_issue_trim() */
447 blk_dec_in_flight(blk
);
450 static void ide_issue_trim_cb(void *opaque
, int ret
)
452 TrimAIOCB
*iocb
= opaque
;
453 IDEState
*s
= iocb
->s
;
457 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
459 block_acct_failed(blk_get_stats(s
->blk
), &s
->acct
);
464 while (iocb
->j
< iocb
->qiov
->niov
) {
466 while (++iocb
->i
< iocb
->qiov
->iov
[j
].iov_len
/ 8) {
468 uint64_t *buffer
= iocb
->qiov
->iov
[j
].iov_base
;
470 /* 6-byte LBA + 2-byte range per entry */
471 uint64_t entry
= le64_to_cpu(buffer
[i
]);
472 uint64_t sector
= entry
& 0x0000ffffffffffffULL
;
473 uint16_t count
= entry
>> 48;
479 if (!ide_sect_range_ok(s
, sector
, count
)) {
480 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_UNMAP
);
485 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
486 count
<< BDRV_SECTOR_BITS
, BLOCK_ACCT_UNMAP
);
488 /* Got an entry! Submit and exit. */
489 iocb
->aiocb
= blk_aio_pdiscard(s
->blk
,
490 sector
<< BDRV_SECTOR_BITS
,
491 count
<< BDRV_SECTOR_BITS
,
492 ide_issue_trim_cb
, opaque
);
506 replay_bh_schedule_event(iocb
->bh
);
510 BlockAIOCB
*ide_issue_trim(
511 int64_t offset
, QEMUIOVector
*qiov
,
512 BlockCompletionFunc
*cb
, void *cb_opaque
, void *opaque
)
514 IDEState
*s
= opaque
;
517 /* Paired with a decrement in ide_trim_bh_cb() */
518 blk_inc_in_flight(s
->blk
);
520 iocb
= blk_aio_get(&trim_aiocb_info
, s
->blk
, cb
, cb_opaque
);
522 iocb
->bh
= qemu_bh_new(ide_trim_bh_cb
, iocb
);
527 ide_issue_trim_cb(iocb
, 0);
528 return &iocb
->common
;
531 void ide_abort_command(IDEState
*s
)
533 ide_transfer_stop(s
);
534 s
->status
= READY_STAT
| ERR_STAT
;
538 static void ide_set_retry(IDEState
*s
)
540 s
->bus
->retry_unit
= s
->unit
;
541 s
->bus
->retry_sector_num
= ide_get_sector(s
);
542 s
->bus
->retry_nsector
= s
->nsector
;
545 static void ide_clear_retry(IDEState
*s
)
547 s
->bus
->retry_unit
= -1;
548 s
->bus
->retry_sector_num
= 0;
549 s
->bus
->retry_nsector
= 0;
552 /* prepare data transfer and tell what to do after */
553 bool ide_transfer_start_norecurse(IDEState
*s
, uint8_t *buf
, int size
,
554 EndTransferFunc
*end_transfer_func
)
557 s
->data_end
= buf
+ size
;
559 if (!(s
->status
& ERR_STAT
)) {
560 s
->status
|= DRQ_STAT
;
562 if (!s
->bus
->dma
->ops
->pio_transfer
) {
563 s
->end_transfer_func
= end_transfer_func
;
566 s
->bus
->dma
->ops
->pio_transfer(s
->bus
->dma
);
570 void ide_transfer_start(IDEState
*s
, uint8_t *buf
, int size
,
571 EndTransferFunc
*end_transfer_func
)
573 if (ide_transfer_start_norecurse(s
, buf
, size
, end_transfer_func
)) {
574 end_transfer_func(s
);
578 static void ide_cmd_done(IDEState
*s
)
580 if (s
->bus
->dma
->ops
->cmd_done
) {
581 s
->bus
->dma
->ops
->cmd_done(s
->bus
->dma
);
585 static void ide_transfer_halt(IDEState
*s
)
587 s
->end_transfer_func
= ide_transfer_stop
;
588 s
->data_ptr
= s
->io_buffer
;
589 s
->data_end
= s
->io_buffer
;
590 s
->status
&= ~DRQ_STAT
;
593 void ide_transfer_stop(IDEState
*s
)
595 ide_transfer_halt(s
);
599 int64_t ide_get_sector(IDEState
*s
)
602 if (s
->select
& (ATA_DEV_LBA
)) {
604 sector_num
= ((int64_t)s
->hob_hcyl
<< 40) |
605 ((int64_t) s
->hob_lcyl
<< 32) |
606 ((int64_t) s
->hob_sector
<< 24) |
607 ((int64_t) s
->hcyl
<< 16) |
608 ((int64_t) s
->lcyl
<< 8) | s
->sector
;
611 sector_num
= ((s
->select
& (ATA_DEV_LBA_MSB
)) << 24) |
612 (s
->hcyl
<< 16) | (s
->lcyl
<< 8) | s
->sector
;
616 sector_num
= ((s
->hcyl
<< 8) | s
->lcyl
) * s
->heads
* s
->sectors
+
617 (s
->select
& (ATA_DEV_HS
)) * s
->sectors
+ (s
->sector
- 1);
623 void ide_set_sector(IDEState
*s
, int64_t sector_num
)
626 if (s
->select
& (ATA_DEV_LBA
)) {
628 s
->sector
= sector_num
;
629 s
->lcyl
= sector_num
>> 8;
630 s
->hcyl
= sector_num
>> 16;
631 s
->hob_sector
= sector_num
>> 24;
632 s
->hob_lcyl
= sector_num
>> 32;
633 s
->hob_hcyl
= sector_num
>> 40;
636 s
->select
= (s
->select
& ~(ATA_DEV_LBA_MSB
)) |
637 ((sector_num
>> 24) & (ATA_DEV_LBA_MSB
));
638 s
->hcyl
= (sector_num
>> 16);
639 s
->lcyl
= (sector_num
>> 8);
640 s
->sector
= (sector_num
);
644 cyl
= sector_num
/ (s
->heads
* s
->sectors
);
645 r
= sector_num
% (s
->heads
* s
->sectors
);
648 s
->select
= (s
->select
& ~(ATA_DEV_HS
)) |
649 ((r
/ s
->sectors
) & (ATA_DEV_HS
));
650 s
->sector
= (r
% s
->sectors
) + 1;
654 static void ide_rw_error(IDEState
*s
) {
655 ide_abort_command(s
);
659 static void ide_buffered_readv_cb(void *opaque
, int ret
)
661 IDEBufferedRequest
*req
= opaque
;
662 if (!req
->orphaned
) {
664 assert(req
->qiov
.size
== req
->original_qiov
->size
);
665 qemu_iovec_from_buf(req
->original_qiov
, 0,
666 req
->qiov
.local_iov
.iov_base
,
667 req
->original_qiov
->size
);
669 req
->original_cb(req
->original_opaque
, ret
);
671 QLIST_REMOVE(req
, list
);
672 qemu_vfree(qemu_iovec_buf(&req
->qiov
));
676 #define MAX_BUFFERED_REQS 16
678 BlockAIOCB
*ide_buffered_readv(IDEState
*s
, int64_t sector_num
,
679 QEMUIOVector
*iov
, int nb_sectors
,
680 BlockCompletionFunc
*cb
, void *opaque
)
683 IDEBufferedRequest
*req
;
686 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
689 if (c
> MAX_BUFFERED_REQS
) {
690 return blk_abort_aio_request(s
->blk
, cb
, opaque
, -EIO
);
693 req
= g_new0(IDEBufferedRequest
, 1);
694 req
->original_qiov
= iov
;
695 req
->original_cb
= cb
;
696 req
->original_opaque
= opaque
;
697 qemu_iovec_init_buf(&req
->qiov
, blk_blockalign(s
->blk
, iov
->size
),
700 aioreq
= blk_aio_preadv(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
701 &req
->qiov
, 0, ide_buffered_readv_cb
, req
);
703 QLIST_INSERT_HEAD(&s
->buffered_requests
, req
, list
);
708 * Cancel all pending DMA requests.
709 * Any buffered DMA requests are instantly canceled,
710 * but any pending unbuffered DMA requests must be waited on.
712 void ide_cancel_dma_sync(IDEState
*s
)
714 IDEBufferedRequest
*req
;
716 /* First invoke the callbacks of all buffered requests
717 * and flag those requests as orphaned. Ideally there
718 * are no unbuffered (Scatter Gather DMA Requests or
719 * write requests) pending and we can avoid to drain. */
720 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
721 if (!req
->orphaned
) {
722 trace_ide_cancel_dma_sync_buffered(req
->original_cb
, req
);
723 req
->original_cb(req
->original_opaque
, -ECANCELED
);
725 req
->orphaned
= true;
729 * We can't cancel Scatter Gather DMA in the middle of the
730 * operation or a partial (not full) DMA transfer would reach
731 * the storage so we wait for completion instead (we behave
732 * like if the DMA was completed by the time the guest trying
733 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
736 * In the future we'll be able to safely cancel the I/O if the
737 * whole DMA operation will be submitted to disk with a single
738 * aio operation with preadv/pwritev.
740 if (s
->bus
->dma
->aiocb
) {
741 trace_ide_cancel_dma_sync_remaining();
743 assert(s
->bus
->dma
->aiocb
== NULL
);
747 static void ide_sector_read(IDEState
*s
);
749 static void ide_sector_read_cb(void *opaque
, int ret
)
751 IDEState
*s
= opaque
;
755 s
->status
&= ~BUSY_STAT
;
758 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
|
764 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
767 if (n
> s
->req_nb_sectors
) {
768 n
= s
->req_nb_sectors
;
771 ide_set_sector(s
, ide_get_sector(s
) + n
);
773 /* Allow the guest to read the io_buffer */
774 ide_transfer_start(s
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
, ide_sector_read
);
778 static void ide_sector_read(IDEState
*s
)
783 s
->status
= READY_STAT
| SEEK_STAT
;
784 s
->error
= 0; /* not needed by IDE spec, but needed by Windows */
785 sector_num
= ide_get_sector(s
);
789 ide_transfer_stop(s
);
793 s
->status
|= BUSY_STAT
;
795 if (n
> s
->req_nb_sectors
) {
796 n
= s
->req_nb_sectors
;
799 trace_ide_sector_read(sector_num
, n
);
801 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
803 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_READ
);
807 qemu_iovec_init_buf(&s
->qiov
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
);
809 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
810 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
811 s
->pio_aiocb
= ide_buffered_readv(s
, sector_num
, &s
->qiov
, n
,
812 ide_sector_read_cb
, s
);
815 void dma_buf_commit(IDEState
*s
, uint32_t tx_bytes
)
817 if (s
->bus
->dma
->ops
->commit_buf
) {
818 s
->bus
->dma
->ops
->commit_buf(s
->bus
->dma
, tx_bytes
);
820 s
->io_buffer_offset
+= tx_bytes
;
821 qemu_sglist_destroy(&s
->sg
);
824 void ide_set_inactive(IDEState
*s
, bool more
)
826 s
->bus
->dma
->aiocb
= NULL
;
828 if (s
->bus
->dma
->ops
->set_inactive
) {
829 s
->bus
->dma
->ops
->set_inactive(s
->bus
->dma
, more
);
834 void ide_dma_error(IDEState
*s
)
836 dma_buf_commit(s
, 0);
837 ide_abort_command(s
);
838 ide_set_inactive(s
, false);
842 int ide_handle_rw_error(IDEState
*s
, int error
, int op
)
844 bool is_read
= (op
& IDE_RETRY_READ
) != 0;
845 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
847 if (action
== BLOCK_ERROR_ACTION_STOP
) {
848 assert(s
->bus
->retry_unit
== s
->unit
);
849 s
->bus
->error_status
= op
;
850 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
851 block_acct_failed(blk_get_stats(s
->blk
), &s
->acct
);
852 if (IS_IDE_RETRY_DMA(op
)) {
854 } else if (IS_IDE_RETRY_ATAPI(op
)) {
855 ide_atapi_io_error(s
, -error
);
860 blk_error_action(s
->blk
, action
, is_read
, error
);
861 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
864 static void ide_dma_cb(void *opaque
, int ret
)
866 IDEState
*s
= opaque
;
870 bool stay_active
= false;
871 int32_t prep_size
= 0;
873 if (ret
== -EINVAL
) {
879 if (ide_handle_rw_error(s
, -ret
, ide_dma_cmd_to_retry(s
->dma_cmd
))) {
880 s
->bus
->dma
->aiocb
= NULL
;
881 dma_buf_commit(s
, 0);
886 if (s
->io_buffer_size
> s
->nsector
* 512) {
888 * The PRDs were longer than needed for this request.
889 * The Active bit must remain set after the request completes.
894 n
= s
->io_buffer_size
>> 9;
897 sector_num
= ide_get_sector(s
);
899 assert(n
* 512 == s
->sg
.size
);
900 dma_buf_commit(s
, s
->sg
.size
);
902 ide_set_sector(s
, sector_num
);
906 /* end of transfer ? */
907 if (s
->nsector
== 0) {
908 s
->status
= READY_STAT
| SEEK_STAT
;
913 /* launch next transfer */
915 s
->io_buffer_index
= 0;
916 s
->io_buffer_size
= n
* 512;
917 prep_size
= s
->bus
->dma
->ops
->prepare_buf(s
->bus
->dma
, s
->io_buffer_size
);
918 /* prepare_buf() must succeed and respect the limit */
919 assert(prep_size
>= 0 && prep_size
<= n
* 512);
922 * Now prep_size stores the number of bytes in the sglist, and
923 * s->io_buffer_size stores the number of bytes described by the PRDs.
926 if (prep_size
< n
* 512) {
928 * The PRDs are too short for this request. Error condition!
929 * Reset the Active bit and don't raise the interrupt.
931 s
->status
= READY_STAT
| SEEK_STAT
;
932 dma_buf_commit(s
, 0);
936 trace_ide_dma_cb(s
, sector_num
, n
, IDE_DMA_CMD_str(s
->dma_cmd
));
938 if ((s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) &&
939 !ide_sect_range_ok(s
, sector_num
, n
)) {
941 block_acct_invalid(blk_get_stats(s
->blk
), s
->acct
.type
);
945 offset
= sector_num
<< BDRV_SECTOR_BITS
;
946 switch (s
->dma_cmd
) {
948 s
->bus
->dma
->aiocb
= dma_blk_read(s
->blk
, &s
->sg
, offset
,
949 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
952 s
->bus
->dma
->aiocb
= dma_blk_write(s
->blk
, &s
->sg
, offset
,
953 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
956 s
->bus
->dma
->aiocb
= dma_blk_io(blk_get_aio_context(s
->blk
),
957 &s
->sg
, offset
, BDRV_SECTOR_SIZE
,
958 ide_issue_trim
, s
, ide_dma_cb
, s
,
959 DMA_DIRECTION_TO_DEVICE
);
967 if (s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) {
968 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
970 ide_set_inactive(s
, stay_active
);
973 static void ide_sector_start_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
975 s
->status
= READY_STAT
| SEEK_STAT
| DRQ_STAT
;
976 s
->io_buffer_size
= 0;
977 s
->dma_cmd
= dma_cmd
;
981 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
982 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
985 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
986 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
992 ide_start_dma(s
, ide_dma_cb
);
995 void ide_start_dma(IDEState
*s
, BlockCompletionFunc
*cb
)
997 s
->io_buffer_index
= 0;
999 if (s
->bus
->dma
->ops
->start_dma
) {
1000 s
->bus
->dma
->ops
->start_dma(s
->bus
->dma
, s
, cb
);
1004 static void ide_sector_write(IDEState
*s
);
1006 static void ide_sector_write_timer_cb(void *opaque
)
1008 IDEState
*s
= opaque
;
1009 ide_set_irq(s
->bus
);
1012 static void ide_sector_write_cb(void *opaque
, int ret
)
1014 IDEState
*s
= opaque
;
1017 s
->pio_aiocb
= NULL
;
1018 s
->status
&= ~BUSY_STAT
;
1021 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
)) {
1026 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
1029 if (n
> s
->req_nb_sectors
) {
1030 n
= s
->req_nb_sectors
;
1034 ide_set_sector(s
, ide_get_sector(s
) + n
);
1035 if (s
->nsector
== 0) {
1036 /* no more sectors to write */
1037 ide_transfer_stop(s
);
1039 int n1
= s
->nsector
;
1040 if (n1
> s
->req_nb_sectors
) {
1041 n1
= s
->req_nb_sectors
;
1043 ide_transfer_start(s
, s
->io_buffer
, n1
* BDRV_SECTOR_SIZE
,
1047 if (win2k_install_hack
&& ((++s
->irq_count
% 16) == 0)) {
1048 /* It seems there is a bug in the Windows 2000 installer HDD
1049 IDE driver which fills the disk with empty logs when the
1050 IDE write IRQ comes too early. This hack tries to correct
1051 that at the expense of slower write performances. Use this
1052 option _only_ to install Windows 2000. You must disable it
1054 timer_mod(s
->sector_write_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1055 (NANOSECONDS_PER_SECOND
/ 1000));
1057 ide_set_irq(s
->bus
);
1061 static void ide_sector_write(IDEState
*s
)
1066 s
->status
= READY_STAT
| SEEK_STAT
| BUSY_STAT
;
1067 sector_num
= ide_get_sector(s
);
1070 if (n
> s
->req_nb_sectors
) {
1071 n
= s
->req_nb_sectors
;
1074 trace_ide_sector_write(sector_num
, n
);
1076 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
1078 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_WRITE
);
1082 qemu_iovec_init_buf(&s
->qiov
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
);
1084 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
1085 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
1086 s
->pio_aiocb
= blk_aio_pwritev(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
1087 &s
->qiov
, 0, ide_sector_write_cb
, s
);
1090 static void ide_flush_cb(void *opaque
, int ret
)
1092 IDEState
*s
= opaque
;
1094 s
->pio_aiocb
= NULL
;
1097 /* XXX: What sector number to set here? */
1098 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_FLUSH
)) {
1104 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
1106 s
->status
= READY_STAT
| SEEK_STAT
;
1108 ide_set_irq(s
->bus
);
1111 static void ide_flush_cache(IDEState
*s
)
1113 if (s
->blk
== NULL
) {
1118 s
->status
|= BUSY_STAT
;
1120 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
, 0, BLOCK_ACCT_FLUSH
);
1121 s
->pio_aiocb
= blk_aio_flush(s
->blk
, ide_flush_cb
, s
);
1124 static void ide_cfata_metadata_inquiry(IDEState
*s
)
1129 p
= (uint16_t *) s
->io_buffer
;
1130 memset(p
, 0, 0x200);
1131 spd
= ((s
->mdata_size
- 1) >> 9) + 1;
1133 put_le16(p
+ 0, 0x0001); /* Data format revision */
1134 put_le16(p
+ 1, 0x0000); /* Media property: silicon */
1135 put_le16(p
+ 2, s
->media_changed
); /* Media status */
1136 put_le16(p
+ 3, s
->mdata_size
& 0xffff); /* Capacity in bytes (low) */
1137 put_le16(p
+ 4, s
->mdata_size
>> 16); /* Capacity in bytes (high) */
1138 put_le16(p
+ 5, spd
& 0xffff); /* Sectors per device (low) */
1139 put_le16(p
+ 6, spd
>> 16); /* Sectors per device (high) */
1142 static void ide_cfata_metadata_read(IDEState
*s
)
1146 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1147 s
->status
= ERR_STAT
;
1148 s
->error
= ABRT_ERR
;
1152 p
= (uint16_t *) s
->io_buffer
;
1153 memset(p
, 0, 0x200);
1155 put_le16(p
+ 0, s
->media_changed
); /* Media status */
1156 memcpy(p
+ 1, s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1157 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1158 s
->nsector
<< 9), 0x200 - 2));
1161 static void ide_cfata_metadata_write(IDEState
*s
)
1163 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1164 s
->status
= ERR_STAT
;
1165 s
->error
= ABRT_ERR
;
1169 s
->media_changed
= 0;
1171 memcpy(s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1173 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1174 s
->nsector
<< 9), 0x200 - 2));
1177 /* called when the inserted state of the media has changed */
1178 static void ide_cd_change_cb(void *opaque
, bool load
, Error
**errp
)
1180 IDEState
*s
= opaque
;
1181 uint64_t nb_sectors
;
1183 s
->tray_open
= !load
;
1184 blk_get_geometry(s
->blk
, &nb_sectors
);
1185 s
->nb_sectors
= nb_sectors
;
1188 * First indicate to the guest that a CD has been removed. That's
1189 * done on the next command the guest sends us.
1191 * Then we set UNIT_ATTENTION, by which the guest will
1192 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1194 s
->cdrom_changed
= 1;
1195 s
->events
.new_media
= true;
1196 s
->events
.eject_request
= false;
1197 ide_set_irq(s
->bus
);
1200 static void ide_cd_eject_request_cb(void *opaque
, bool force
)
1202 IDEState
*s
= opaque
;
1204 s
->events
.eject_request
= true;
1206 s
->tray_locked
= false;
1208 ide_set_irq(s
->bus
);
1211 static void ide_cmd_lba48_transform(IDEState
*s
, int lba48
)
1215 /* handle the 'magic' 0 nsector count conversion here. to avoid
1216 * fiddling with the rest of the read logic, we just store the
1217 * full sector count in ->nsector and ignore ->hob_nsector from now
1223 if (!s
->nsector
&& !s
->hob_nsector
)
1226 int lo
= s
->nsector
;
1227 int hi
= s
->hob_nsector
;
1229 s
->nsector
= (hi
<< 8) | lo
;
1234 static void ide_clear_hob(IDEBus
*bus
)
1236 /* any write clears HOB high bit of device control register */
1237 bus
->cmd
&= ~(IDE_CTRL_HOB
);
1240 /* IOport [W]rite [R]egisters */
1241 enum ATA_IOPORT_WR
{
1242 ATA_IOPORT_WR_DATA
= 0,
1243 ATA_IOPORT_WR_FEATURES
= 1,
1244 ATA_IOPORT_WR_SECTOR_COUNT
= 2,
1245 ATA_IOPORT_WR_SECTOR_NUMBER
= 3,
1246 ATA_IOPORT_WR_CYLINDER_LOW
= 4,
1247 ATA_IOPORT_WR_CYLINDER_HIGH
= 5,
1248 ATA_IOPORT_WR_DEVICE_HEAD
= 6,
1249 ATA_IOPORT_WR_COMMAND
= 7,
1250 ATA_IOPORT_WR_NUM_REGISTERS
,
1253 const char *ATA_IOPORT_WR_lookup
[ATA_IOPORT_WR_NUM_REGISTERS
] = {
1254 [ATA_IOPORT_WR_DATA
] = "Data",
1255 [ATA_IOPORT_WR_FEATURES
] = "Features",
1256 [ATA_IOPORT_WR_SECTOR_COUNT
] = "Sector Count",
1257 [ATA_IOPORT_WR_SECTOR_NUMBER
] = "Sector Number",
1258 [ATA_IOPORT_WR_CYLINDER_LOW
] = "Cylinder Low",
1259 [ATA_IOPORT_WR_CYLINDER_HIGH
] = "Cylinder High",
1260 [ATA_IOPORT_WR_DEVICE_HEAD
] = "Device/Head",
1261 [ATA_IOPORT_WR_COMMAND
] = "Command"
1264 void ide_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
1266 IDEBus
*bus
= opaque
;
1267 IDEState
*s
= idebus_active_if(bus
);
1268 int reg_num
= addr
& 7;
1270 trace_ide_ioport_write(addr
, ATA_IOPORT_WR_lookup
[reg_num
], val
, bus
, s
);
1272 /* ignore writes to command block while busy with previous command */
1273 if (reg_num
!= 7 && (s
->status
& (BUSY_STAT
|DRQ_STAT
))) {
1277 /* NOTE: Device0 and Device1 both receive incoming register writes.
1278 * (They're on the same bus! They have to!) */
1283 case ATA_IOPORT_WR_FEATURES
:
1285 bus
->ifs
[0].hob_feature
= bus
->ifs
[0].feature
;
1286 bus
->ifs
[1].hob_feature
= bus
->ifs
[1].feature
;
1287 bus
->ifs
[0].feature
= val
;
1288 bus
->ifs
[1].feature
= val
;
1290 case ATA_IOPORT_WR_SECTOR_COUNT
:
1292 bus
->ifs
[0].hob_nsector
= bus
->ifs
[0].nsector
;
1293 bus
->ifs
[1].hob_nsector
= bus
->ifs
[1].nsector
;
1294 bus
->ifs
[0].nsector
= val
;
1295 bus
->ifs
[1].nsector
= val
;
1297 case ATA_IOPORT_WR_SECTOR_NUMBER
:
1299 bus
->ifs
[0].hob_sector
= bus
->ifs
[0].sector
;
1300 bus
->ifs
[1].hob_sector
= bus
->ifs
[1].sector
;
1301 bus
->ifs
[0].sector
= val
;
1302 bus
->ifs
[1].sector
= val
;
1304 case ATA_IOPORT_WR_CYLINDER_LOW
:
1306 bus
->ifs
[0].hob_lcyl
= bus
->ifs
[0].lcyl
;
1307 bus
->ifs
[1].hob_lcyl
= bus
->ifs
[1].lcyl
;
1308 bus
->ifs
[0].lcyl
= val
;
1309 bus
->ifs
[1].lcyl
= val
;
1311 case ATA_IOPORT_WR_CYLINDER_HIGH
:
1313 bus
->ifs
[0].hob_hcyl
= bus
->ifs
[0].hcyl
;
1314 bus
->ifs
[1].hob_hcyl
= bus
->ifs
[1].hcyl
;
1315 bus
->ifs
[0].hcyl
= val
;
1316 bus
->ifs
[1].hcyl
= val
;
1318 case ATA_IOPORT_WR_DEVICE_HEAD
:
1320 bus
->ifs
[0].select
= val
| (ATA_DEV_ALWAYS_ON
);
1321 bus
->ifs
[1].select
= val
| (ATA_DEV_ALWAYS_ON
);
1323 bus
->unit
= (val
& (ATA_DEV_SELECT
)) ? 1 : 0;
1326 case ATA_IOPORT_WR_COMMAND
:
1328 qemu_irq_lower(bus
->irq
);
1329 ide_exec_cmd(bus
, val
);
1334 static void ide_reset(IDEState
*s
)
1339 blk_aio_cancel(s
->pio_aiocb
);
1340 s
->pio_aiocb
= NULL
;
1343 if (s
->reset_reverts
) {
1344 s
->reset_reverts
= false;
1345 s
->heads
= s
->drive_heads
;
1346 s
->sectors
= s
->drive_sectors
;
1348 if (s
->drive_kind
== IDE_CFATA
)
1349 s
->mult_sectors
= 0;
1351 s
->mult_sectors
= MAX_MULT_SECTORS
;
1367 s
->select
= (ATA_DEV_ALWAYS_ON
);
1368 s
->status
= READY_STAT
| SEEK_STAT
;
1372 /* ATAPI specific */
1375 s
->cdrom_changed
= 0;
1376 s
->packet_transfer_size
= 0;
1377 s
->elementary_transfer_size
= 0;
1378 s
->io_buffer_index
= 0;
1379 s
->cd_sector_size
= 0;
1384 s
->io_buffer_size
= 0;
1385 s
->req_nb_sectors
= 0;
1387 ide_set_signature(s
);
1388 /* init the transfer handler so that 0xffff is returned on data
1390 s
->end_transfer_func
= ide_dummy_transfer_stop
;
1391 ide_dummy_transfer_stop(s
);
1392 s
->media_changed
= 0;
1395 static bool cmd_nop(IDEState
*s
, uint8_t cmd
)
1400 static bool cmd_device_reset(IDEState
*s
, uint8_t cmd
)
1402 /* Halt PIO (in the DRQ phase), then DMA */
1403 ide_transfer_halt(s
);
1404 ide_cancel_dma_sync(s
);
1406 /* Reset any PIO commands, reset signature, etc */
1409 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1410 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1413 /* Do not overwrite status register */
1417 static bool cmd_data_set_management(IDEState
*s
, uint8_t cmd
)
1419 switch (s
->feature
) {
1422 ide_sector_start_dma(s
, IDE_DMA_TRIM
);
1428 ide_abort_command(s
);
1432 static bool cmd_identify(IDEState
*s
, uint8_t cmd
)
1434 if (s
->blk
&& s
->drive_kind
!= IDE_CD
) {
1435 if (s
->drive_kind
!= IDE_CFATA
) {
1438 ide_cfata_identify(s
);
1440 s
->status
= READY_STAT
| SEEK_STAT
;
1441 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1442 ide_set_irq(s
->bus
);
1445 if (s
->drive_kind
== IDE_CD
) {
1446 ide_set_signature(s
);
1448 ide_abort_command(s
);
1454 static bool cmd_verify(IDEState
*s
, uint8_t cmd
)
1456 bool lba48
= (cmd
== WIN_VERIFY_EXT
);
1458 /* do sector number check ? */
1459 ide_cmd_lba48_transform(s
, lba48
);
1464 static bool cmd_set_multiple_mode(IDEState
*s
, uint8_t cmd
)
1466 if (s
->drive_kind
== IDE_CFATA
&& s
->nsector
== 0) {
1467 /* Disable Read and Write Multiple */
1468 s
->mult_sectors
= 0;
1469 } else if ((s
->nsector
& 0xff) != 0 &&
1470 ((s
->nsector
& 0xff) > MAX_MULT_SECTORS
||
1471 (s
->nsector
& (s
->nsector
- 1)) != 0)) {
1472 ide_abort_command(s
);
1474 s
->mult_sectors
= s
->nsector
& 0xff;
1480 static bool cmd_read_multiple(IDEState
*s
, uint8_t cmd
)
1482 bool lba48
= (cmd
== WIN_MULTREAD_EXT
);
1484 if (!s
->blk
|| !s
->mult_sectors
) {
1485 ide_abort_command(s
);
1489 ide_cmd_lba48_transform(s
, lba48
);
1490 s
->req_nb_sectors
= s
->mult_sectors
;
1495 static bool cmd_write_multiple(IDEState
*s
, uint8_t cmd
)
1497 bool lba48
= (cmd
== WIN_MULTWRITE_EXT
);
1500 if (!s
->blk
|| !s
->mult_sectors
) {
1501 ide_abort_command(s
);
1505 ide_cmd_lba48_transform(s
, lba48
);
1507 s
->req_nb_sectors
= s
->mult_sectors
;
1508 n
= MIN(s
->nsector
, s
->req_nb_sectors
);
1510 s
->status
= SEEK_STAT
| READY_STAT
;
1511 ide_transfer_start(s
, s
->io_buffer
, 512 * n
, ide_sector_write
);
1513 s
->media_changed
= 1;
1518 static bool cmd_read_pio(IDEState
*s
, uint8_t cmd
)
1520 bool lba48
= (cmd
== WIN_READ_EXT
);
1522 if (s
->drive_kind
== IDE_CD
) {
1523 ide_set_signature(s
); /* odd, but ATA4 8.27.5.2 requires it */
1524 ide_abort_command(s
);
1529 ide_abort_command(s
);
1533 ide_cmd_lba48_transform(s
, lba48
);
1534 s
->req_nb_sectors
= 1;
1540 static bool cmd_write_pio(IDEState
*s
, uint8_t cmd
)
1542 bool lba48
= (cmd
== WIN_WRITE_EXT
);
1545 ide_abort_command(s
);
1549 ide_cmd_lba48_transform(s
, lba48
);
1551 s
->req_nb_sectors
= 1;
1552 s
->status
= SEEK_STAT
| READY_STAT
;
1553 ide_transfer_start(s
, s
->io_buffer
, 512, ide_sector_write
);
1555 s
->media_changed
= 1;
1560 static bool cmd_read_dma(IDEState
*s
, uint8_t cmd
)
1562 bool lba48
= (cmd
== WIN_READDMA_EXT
);
1565 ide_abort_command(s
);
1569 ide_cmd_lba48_transform(s
, lba48
);
1570 ide_sector_start_dma(s
, IDE_DMA_READ
);
1575 static bool cmd_write_dma(IDEState
*s
, uint8_t cmd
)
1577 bool lba48
= (cmd
== WIN_WRITEDMA_EXT
);
1580 ide_abort_command(s
);
1584 ide_cmd_lba48_transform(s
, lba48
);
1585 ide_sector_start_dma(s
, IDE_DMA_WRITE
);
1587 s
->media_changed
= 1;
1592 static bool cmd_flush_cache(IDEState
*s
, uint8_t cmd
)
1598 static bool cmd_seek(IDEState
*s
, uint8_t cmd
)
1600 /* XXX: Check that seek is within bounds */
1604 static bool cmd_read_native_max(IDEState
*s
, uint8_t cmd
)
1606 bool lba48
= (cmd
== WIN_READ_NATIVE_MAX_EXT
);
1608 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1609 if (s
->nb_sectors
== 0) {
1610 ide_abort_command(s
);
1614 ide_cmd_lba48_transform(s
, lba48
);
1615 ide_set_sector(s
, s
->nb_sectors
- 1);
1620 static bool cmd_check_power_mode(IDEState
*s
, uint8_t cmd
)
1622 s
->nsector
= 0xff; /* device active or idle */
1626 /* INITIALIZE DEVICE PARAMETERS */
1627 static bool cmd_specify(IDEState
*s
, uint8_t cmd
)
1629 if (s
->blk
&& s
->drive_kind
!= IDE_CD
) {
1630 s
->heads
= (s
->select
& (ATA_DEV_HS
)) + 1;
1631 s
->sectors
= s
->nsector
;
1632 ide_set_irq(s
->bus
);
1634 ide_abort_command(s
);
1640 static bool cmd_set_features(IDEState
*s
, uint8_t cmd
)
1642 uint16_t *identify_data
;
1645 ide_abort_command(s
);
1649 /* XXX: valid for CDROM ? */
1650 switch (s
->feature
) {
1651 case 0x02: /* write cache enable */
1652 blk_set_enable_write_cache(s
->blk
, true);
1653 identify_data
= (uint16_t *)s
->identify_data
;
1654 put_le16(identify_data
+ 85, (1 << 14) | (1 << 5) | 1);
1656 case 0x82: /* write cache disable */
1657 blk_set_enable_write_cache(s
->blk
, false);
1658 identify_data
= (uint16_t *)s
->identify_data
;
1659 put_le16(identify_data
+ 85, (1 << 14) | 1);
1662 case 0xcc: /* reverting to power-on defaults enable */
1663 s
->reset_reverts
= true;
1665 case 0x66: /* reverting to power-on defaults disable */
1666 s
->reset_reverts
= false;
1668 case 0xaa: /* read look-ahead enable */
1669 case 0x55: /* read look-ahead disable */
1670 case 0x05: /* set advanced power management mode */
1671 case 0x85: /* disable advanced power management mode */
1672 case 0x69: /* NOP */
1673 case 0x67: /* NOP */
1674 case 0x96: /* NOP */
1675 case 0x9a: /* NOP */
1676 case 0x42: /* enable Automatic Acoustic Mode */
1677 case 0xc2: /* disable Automatic Acoustic Mode */
1679 case 0x03: /* set transfer mode */
1681 uint8_t val
= s
->nsector
& 0x07;
1682 identify_data
= (uint16_t *)s
->identify_data
;
1684 switch (s
->nsector
>> 3) {
1685 case 0x00: /* pio default */
1686 case 0x01: /* pio mode */
1687 put_le16(identify_data
+ 62, 0x07);
1688 put_le16(identify_data
+ 63, 0x07);
1689 put_le16(identify_data
+ 88, 0x3f);
1691 case 0x02: /* sigle word dma mode*/
1692 put_le16(identify_data
+ 62, 0x07 | (1 << (val
+ 8)));
1693 put_le16(identify_data
+ 63, 0x07);
1694 put_le16(identify_data
+ 88, 0x3f);
1696 case 0x04: /* mdma mode */
1697 put_le16(identify_data
+ 62, 0x07);
1698 put_le16(identify_data
+ 63, 0x07 | (1 << (val
+ 8)));
1699 put_le16(identify_data
+ 88, 0x3f);
1701 case 0x08: /* udma mode */
1702 put_le16(identify_data
+ 62, 0x07);
1703 put_le16(identify_data
+ 63, 0x07);
1704 put_le16(identify_data
+ 88, 0x3f | (1 << (val
+ 8)));
1714 ide_abort_command(s
);
1719 /*** ATAPI commands ***/
1721 static bool cmd_identify_packet(IDEState
*s
, uint8_t cmd
)
1723 ide_atapi_identify(s
);
1724 s
->status
= READY_STAT
| SEEK_STAT
;
1725 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1726 ide_set_irq(s
->bus
);
1730 /* EXECUTE DEVICE DIAGNOSTIC */
1731 static bool cmd_exec_dev_diagnostic(IDEState
*s
, uint8_t cmd
)
1734 * Clear the device register per the ATA (v6) specification,
1735 * because ide_set_signature does not clear LBA or drive bits.
1737 s
->select
= (ATA_DEV_ALWAYS_ON
);
1738 ide_set_signature(s
);
1740 if (s
->drive_kind
== IDE_CD
) {
1741 s
->status
= 0; /* ATAPI spec (v6) section 9.10 defines packet
1742 * devices to return a clear status register
1743 * with READY_STAT *not* set. */
1746 s
->status
= READY_STAT
| SEEK_STAT
;
1747 /* The bits of the error register are not as usual for this command!
1748 * They are part of the regular output (this is why ERR_STAT isn't set)
1749 * Device 0 passed, Device 1 passed or not present. */
1751 ide_set_irq(s
->bus
);
1757 static bool cmd_packet(IDEState
*s
, uint8_t cmd
)
1759 /* overlapping commands not supported */
1760 if (s
->feature
& 0x02) {
1761 ide_abort_command(s
);
1765 s
->status
= READY_STAT
| SEEK_STAT
;
1766 s
->atapi_dma
= s
->feature
& 1;
1768 s
->dma_cmd
= IDE_DMA_ATAPI
;
1771 ide_transfer_start(s
, s
->io_buffer
, ATAPI_PACKET_SIZE
,
1777 /*** CF-ATA commands ***/
1779 static bool cmd_cfa_req_ext_error_code(IDEState
*s
, uint8_t cmd
)
1781 s
->error
= 0x09; /* miscellaneous error */
1782 s
->status
= READY_STAT
| SEEK_STAT
;
1783 ide_set_irq(s
->bus
);
1788 static bool cmd_cfa_erase_sectors(IDEState
*s
, uint8_t cmd
)
1790 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1791 * required for Windows 8 to work with AHCI */
1793 if (cmd
== CFA_WEAR_LEVEL
) {
1797 if (cmd
== CFA_ERASE_SECTORS
) {
1798 s
->media_changed
= 1;
1804 static bool cmd_cfa_translate_sector(IDEState
*s
, uint8_t cmd
)
1806 s
->status
= READY_STAT
| SEEK_STAT
;
1808 memset(s
->io_buffer
, 0, 0x200);
1809 s
->io_buffer
[0x00] = s
->hcyl
; /* Cyl MSB */
1810 s
->io_buffer
[0x01] = s
->lcyl
; /* Cyl LSB */
1811 s
->io_buffer
[0x02] = s
->select
; /* Head */
1812 s
->io_buffer
[0x03] = s
->sector
; /* Sector */
1813 s
->io_buffer
[0x04] = ide_get_sector(s
) >> 16; /* LBA MSB */
1814 s
->io_buffer
[0x05] = ide_get_sector(s
) >> 8; /* LBA */
1815 s
->io_buffer
[0x06] = ide_get_sector(s
) >> 0; /* LBA LSB */
1816 s
->io_buffer
[0x13] = 0x00; /* Erase flag */
1817 s
->io_buffer
[0x18] = 0x00; /* Hot count */
1818 s
->io_buffer
[0x19] = 0x00; /* Hot count */
1819 s
->io_buffer
[0x1a] = 0x01; /* Hot count */
1821 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1822 ide_set_irq(s
->bus
);
1827 static bool cmd_cfa_access_metadata_storage(IDEState
*s
, uint8_t cmd
)
1829 switch (s
->feature
) {
1830 case 0x02: /* Inquiry Metadata Storage */
1831 ide_cfata_metadata_inquiry(s
);
1833 case 0x03: /* Read Metadata Storage */
1834 ide_cfata_metadata_read(s
);
1836 case 0x04: /* Write Metadata Storage */
1837 ide_cfata_metadata_write(s
);
1840 ide_abort_command(s
);
1844 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1845 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1846 ide_set_irq(s
->bus
);
1851 static bool cmd_ibm_sense_condition(IDEState
*s
, uint8_t cmd
)
1853 switch (s
->feature
) {
1854 case 0x01: /* sense temperature in device */
1855 s
->nsector
= 0x50; /* +20 C */
1858 ide_abort_command(s
);
1866 /*** SMART commands ***/
1868 static bool cmd_smart(IDEState
*s
, uint8_t cmd
)
1872 if (s
->hcyl
!= 0xc2 || s
->lcyl
!= 0x4f) {
1876 if (!s
->smart_enabled
&& s
->feature
!= SMART_ENABLE
) {
1880 switch (s
->feature
) {
1882 s
->smart_enabled
= 0;
1886 s
->smart_enabled
= 1;
1889 case SMART_ATTR_AUTOSAVE
:
1890 switch (s
->sector
) {
1892 s
->smart_autosave
= 0;
1895 s
->smart_autosave
= 1;
1903 if (!s
->smart_errors
) {
1912 case SMART_READ_THRESH
:
1913 memset(s
->io_buffer
, 0, 0x200);
1914 s
->io_buffer
[0] = 0x01; /* smart struct version */
1916 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1917 s
->io_buffer
[2 + 0 + (n
* 12)] = smart_attributes
[n
][0];
1918 s
->io_buffer
[2 + 1 + (n
* 12)] = smart_attributes
[n
][11];
1922 for (n
= 0; n
< 511; n
++) {
1923 s
->io_buffer
[511] += s
->io_buffer
[n
];
1925 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1927 s
->status
= READY_STAT
| SEEK_STAT
;
1928 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1929 ide_set_irq(s
->bus
);
1932 case SMART_READ_DATA
:
1933 memset(s
->io_buffer
, 0, 0x200);
1934 s
->io_buffer
[0] = 0x01; /* smart struct version */
1936 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1938 for (i
= 0; i
< 11; i
++) {
1939 s
->io_buffer
[2 + i
+ (n
* 12)] = smart_attributes
[n
][i
];
1943 s
->io_buffer
[362] = 0x02 | (s
->smart_autosave
? 0x80 : 0x00);
1944 if (s
->smart_selftest_count
== 0) {
1945 s
->io_buffer
[363] = 0;
1948 s
->smart_selftest_data
[3 +
1949 (s
->smart_selftest_count
- 1) *
1952 s
->io_buffer
[364] = 0x20;
1953 s
->io_buffer
[365] = 0x01;
1954 /* offline data collection capacity: execute + self-test*/
1955 s
->io_buffer
[367] = (1 << 4 | 1 << 3 | 1);
1956 s
->io_buffer
[368] = 0x03; /* smart capability (1) */
1957 s
->io_buffer
[369] = 0x00; /* smart capability (2) */
1958 s
->io_buffer
[370] = 0x01; /* error logging supported */
1959 s
->io_buffer
[372] = 0x02; /* minutes for poll short test */
1960 s
->io_buffer
[373] = 0x36; /* minutes for poll ext test */
1961 s
->io_buffer
[374] = 0x01; /* minutes for poll conveyance */
1963 for (n
= 0; n
< 511; n
++) {
1964 s
->io_buffer
[511] += s
->io_buffer
[n
];
1966 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1968 s
->status
= READY_STAT
| SEEK_STAT
;
1969 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1970 ide_set_irq(s
->bus
);
1973 case SMART_READ_LOG
:
1974 switch (s
->sector
) {
1975 case 0x01: /* summary smart error log */
1976 memset(s
->io_buffer
, 0, 0x200);
1977 s
->io_buffer
[0] = 0x01;
1978 s
->io_buffer
[1] = 0x00; /* no error entries */
1979 s
->io_buffer
[452] = s
->smart_errors
& 0xff;
1980 s
->io_buffer
[453] = (s
->smart_errors
& 0xff00) >> 8;
1982 for (n
= 0; n
< 511; n
++) {
1983 s
->io_buffer
[511] += s
->io_buffer
[n
];
1985 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1987 case 0x06: /* smart self test log */
1988 memset(s
->io_buffer
, 0, 0x200);
1989 s
->io_buffer
[0] = 0x01;
1990 if (s
->smart_selftest_count
== 0) {
1991 s
->io_buffer
[508] = 0;
1993 s
->io_buffer
[508] = s
->smart_selftest_count
;
1994 for (n
= 2; n
< 506; n
++) {
1995 s
->io_buffer
[n
] = s
->smart_selftest_data
[n
];
1999 for (n
= 0; n
< 511; n
++) {
2000 s
->io_buffer
[511] += s
->io_buffer
[n
];
2002 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
2007 s
->status
= READY_STAT
| SEEK_STAT
;
2008 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
2009 ide_set_irq(s
->bus
);
2012 case SMART_EXECUTE_OFFLINE
:
2013 switch (s
->sector
) {
2014 case 0: /* off-line routine */
2015 case 1: /* short self test */
2016 case 2: /* extended self test */
2017 s
->smart_selftest_count
++;
2018 if (s
->smart_selftest_count
> 21) {
2019 s
->smart_selftest_count
= 1;
2021 n
= 2 + (s
->smart_selftest_count
- 1) * 24;
2022 s
->smart_selftest_data
[n
] = s
->sector
;
2023 s
->smart_selftest_data
[n
+ 1] = 0x00; /* OK and finished */
2024 s
->smart_selftest_data
[n
+ 2] = 0x34; /* hour count lsb */
2025 s
->smart_selftest_data
[n
+ 3] = 0x12; /* hour count msb */
2034 ide_abort_command(s
);
2038 #define HD_OK (1u << IDE_HD)
2039 #define CD_OK (1u << IDE_CD)
2040 #define CFA_OK (1u << IDE_CFATA)
2041 #define HD_CFA_OK (HD_OK | CFA_OK)
2042 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2044 /* Set the Disk Seek Completed status bit during completion */
2045 #define SET_DSC (1u << 8)
2047 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2048 static const struct {
2049 /* Returns true if the completion code should be run */
2050 bool (*handler
)(IDEState
*s
, uint8_t cmd
);
2052 } ide_cmd_table
[0x100] = {
2053 /* NOP not implemented, mandatory for CD */
2054 [CFA_REQ_EXT_ERROR_CODE
] = { cmd_cfa_req_ext_error_code
, CFA_OK
},
2055 [WIN_DSM
] = { cmd_data_set_management
, HD_CFA_OK
},
2056 [WIN_DEVICE_RESET
] = { cmd_device_reset
, CD_OK
},
2057 [WIN_RECAL
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
2058 [WIN_READ
] = { cmd_read_pio
, ALL_OK
},
2059 [WIN_READ_ONCE
] = { cmd_read_pio
, HD_CFA_OK
},
2060 [WIN_READ_EXT
] = { cmd_read_pio
, HD_CFA_OK
},
2061 [WIN_READDMA_EXT
] = { cmd_read_dma
, HD_CFA_OK
},
2062 [WIN_READ_NATIVE_MAX_EXT
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
2063 [WIN_MULTREAD_EXT
] = { cmd_read_multiple
, HD_CFA_OK
},
2064 [WIN_WRITE
] = { cmd_write_pio
, HD_CFA_OK
},
2065 [WIN_WRITE_ONCE
] = { cmd_write_pio
, HD_CFA_OK
},
2066 [WIN_WRITE_EXT
] = { cmd_write_pio
, HD_CFA_OK
},
2067 [WIN_WRITEDMA_EXT
] = { cmd_write_dma
, HD_CFA_OK
},
2068 [CFA_WRITE_SECT_WO_ERASE
] = { cmd_write_pio
, CFA_OK
},
2069 [WIN_MULTWRITE_EXT
] = { cmd_write_multiple
, HD_CFA_OK
},
2070 [WIN_WRITE_VERIFY
] = { cmd_write_pio
, HD_CFA_OK
},
2071 [WIN_VERIFY
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2072 [WIN_VERIFY_ONCE
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2073 [WIN_VERIFY_EXT
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2074 [WIN_SEEK
] = { cmd_seek
, HD_CFA_OK
| SET_DSC
},
2075 [CFA_TRANSLATE_SECTOR
] = { cmd_cfa_translate_sector
, CFA_OK
},
2076 [WIN_DIAGNOSE
] = { cmd_exec_dev_diagnostic
, ALL_OK
},
2077 [WIN_SPECIFY
] = { cmd_specify
, HD_CFA_OK
| SET_DSC
},
2078 [WIN_STANDBYNOW2
] = { cmd_nop
, HD_CFA_OK
},
2079 [WIN_IDLEIMMEDIATE2
] = { cmd_nop
, HD_CFA_OK
},
2080 [WIN_STANDBY2
] = { cmd_nop
, HD_CFA_OK
},
2081 [WIN_SETIDLE2
] = { cmd_nop
, HD_CFA_OK
},
2082 [WIN_CHECKPOWERMODE2
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2083 [WIN_SLEEPNOW2
] = { cmd_nop
, HD_CFA_OK
},
2084 [WIN_PACKETCMD
] = { cmd_packet
, CD_OK
},
2085 [WIN_PIDENTIFY
] = { cmd_identify_packet
, CD_OK
},
2086 [WIN_SMART
] = { cmd_smart
, HD_CFA_OK
| SET_DSC
},
2087 [CFA_ACCESS_METADATA_STORAGE
] = { cmd_cfa_access_metadata_storage
, CFA_OK
},
2088 [CFA_ERASE_SECTORS
] = { cmd_cfa_erase_sectors
, CFA_OK
| SET_DSC
},
2089 [WIN_MULTREAD
] = { cmd_read_multiple
, HD_CFA_OK
},
2090 [WIN_MULTWRITE
] = { cmd_write_multiple
, HD_CFA_OK
},
2091 [WIN_SETMULT
] = { cmd_set_multiple_mode
, HD_CFA_OK
| SET_DSC
},
2092 [WIN_READDMA
] = { cmd_read_dma
, HD_CFA_OK
},
2093 [WIN_READDMA_ONCE
] = { cmd_read_dma
, HD_CFA_OK
},
2094 [WIN_WRITEDMA
] = { cmd_write_dma
, HD_CFA_OK
},
2095 [WIN_WRITEDMA_ONCE
] = { cmd_write_dma
, HD_CFA_OK
},
2096 [CFA_WRITE_MULTI_WO_ERASE
] = { cmd_write_multiple
, CFA_OK
},
2097 [WIN_STANDBYNOW1
] = { cmd_nop
, HD_CFA_OK
},
2098 [WIN_IDLEIMMEDIATE
] = { cmd_nop
, HD_CFA_OK
},
2099 [WIN_STANDBY
] = { cmd_nop
, HD_CFA_OK
},
2100 [WIN_SETIDLE1
] = { cmd_nop
, HD_CFA_OK
},
2101 [WIN_CHECKPOWERMODE1
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2102 [WIN_SLEEPNOW1
] = { cmd_nop
, HD_CFA_OK
},
2103 [WIN_FLUSH_CACHE
] = { cmd_flush_cache
, ALL_OK
},
2104 [WIN_FLUSH_CACHE_EXT
] = { cmd_flush_cache
, HD_CFA_OK
},
2105 [WIN_IDENTIFY
] = { cmd_identify
, ALL_OK
},
2106 [WIN_SETFEATURES
] = { cmd_set_features
, ALL_OK
| SET_DSC
},
2107 [IBM_SENSE_CONDITION
] = { cmd_ibm_sense_condition
, CFA_OK
| SET_DSC
},
2108 [CFA_WEAR_LEVEL
] = { cmd_cfa_erase_sectors
, HD_CFA_OK
| SET_DSC
},
2109 [WIN_READ_NATIVE_MAX
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
2112 static bool ide_cmd_permitted(IDEState
*s
, uint32_t cmd
)
2114 return cmd
< ARRAY_SIZE(ide_cmd_table
)
2115 && (ide_cmd_table
[cmd
].flags
& (1u << s
->drive_kind
));
2118 void ide_exec_cmd(IDEBus
*bus
, uint32_t val
)
2123 s
= idebus_active_if(bus
);
2124 trace_ide_exec_cmd(bus
, s
, val
);
2126 /* ignore commands to non existent slave */
2127 if (s
!= bus
->ifs
&& !s
->blk
) {
2131 /* Only RESET is allowed while BSY and/or DRQ are set,
2132 * and only to ATAPI devices. */
2133 if (s
->status
& (BUSY_STAT
|DRQ_STAT
)) {
2134 if (val
!= WIN_DEVICE_RESET
|| s
->drive_kind
!= IDE_CD
) {
2139 if (!ide_cmd_permitted(s
, val
)) {
2140 ide_abort_command(s
);
2141 ide_set_irq(s
->bus
);
2145 s
->status
= READY_STAT
| BUSY_STAT
;
2147 s
->io_buffer_offset
= 0;
2149 complete
= ide_cmd_table
[val
].handler(s
, val
);
2151 s
->status
&= ~BUSY_STAT
;
2152 assert(!!s
->error
== !!(s
->status
& ERR_STAT
));
2154 if ((ide_cmd_table
[val
].flags
& SET_DSC
) && !s
->error
) {
2155 s
->status
|= SEEK_STAT
;
2159 ide_set_irq(s
->bus
);
2163 /* IOport [R]ead [R]egisters */
2164 enum ATA_IOPORT_RR
{
2165 ATA_IOPORT_RR_DATA
= 0,
2166 ATA_IOPORT_RR_ERROR
= 1,
2167 ATA_IOPORT_RR_SECTOR_COUNT
= 2,
2168 ATA_IOPORT_RR_SECTOR_NUMBER
= 3,
2169 ATA_IOPORT_RR_CYLINDER_LOW
= 4,
2170 ATA_IOPORT_RR_CYLINDER_HIGH
= 5,
2171 ATA_IOPORT_RR_DEVICE_HEAD
= 6,
2172 ATA_IOPORT_RR_STATUS
= 7,
2173 ATA_IOPORT_RR_NUM_REGISTERS
,
2176 const char *ATA_IOPORT_RR_lookup
[ATA_IOPORT_RR_NUM_REGISTERS
] = {
2177 [ATA_IOPORT_RR_DATA
] = "Data",
2178 [ATA_IOPORT_RR_ERROR
] = "Error",
2179 [ATA_IOPORT_RR_SECTOR_COUNT
] = "Sector Count",
2180 [ATA_IOPORT_RR_SECTOR_NUMBER
] = "Sector Number",
2181 [ATA_IOPORT_RR_CYLINDER_LOW
] = "Cylinder Low",
2182 [ATA_IOPORT_RR_CYLINDER_HIGH
] = "Cylinder High",
2183 [ATA_IOPORT_RR_DEVICE_HEAD
] = "Device/Head",
2184 [ATA_IOPORT_RR_STATUS
] = "Status"
2187 uint32_t ide_ioport_read(void *opaque
, uint32_t addr
)
2189 IDEBus
*bus
= opaque
;
2190 IDEState
*s
= idebus_active_if(bus
);
2195 hob
= bus
->cmd
& (IDE_CTRL_HOB
);
2197 case ATA_IOPORT_RR_DATA
:
2199 * The pre-GRUB Solaris x86 bootloader relies upon inb
2200 * consuming a word from the drive's sector buffer.
2202 ret
= ide_data_readw(bus
, addr
) & 0xff;
2204 case ATA_IOPORT_RR_ERROR
:
2205 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2206 (s
!= bus
->ifs
&& !s
->blk
)) {
2211 ret
= s
->hob_feature
;
2214 case ATA_IOPORT_RR_SECTOR_COUNT
:
2215 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2218 ret
= s
->nsector
& 0xff;
2220 ret
= s
->hob_nsector
;
2223 case ATA_IOPORT_RR_SECTOR_NUMBER
:
2224 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2229 ret
= s
->hob_sector
;
2232 case ATA_IOPORT_RR_CYLINDER_LOW
:
2233 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2241 case ATA_IOPORT_RR_CYLINDER_HIGH
:
2242 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2250 case ATA_IOPORT_RR_DEVICE_HEAD
:
2251 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2258 case ATA_IOPORT_RR_STATUS
:
2259 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2260 (s
!= bus
->ifs
&& !s
->blk
)) {
2265 qemu_irq_lower(bus
->irq
);
2269 trace_ide_ioport_read(addr
, ATA_IOPORT_RR_lookup
[reg_num
], ret
, bus
, s
);
2273 uint32_t ide_status_read(void *opaque
, uint32_t addr
)
2275 IDEBus
*bus
= opaque
;
2276 IDEState
*s
= idebus_active_if(bus
);
2279 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2280 (s
!= bus
->ifs
&& !s
->blk
)) {
2286 trace_ide_status_read(addr
, ret
, bus
, s
);
2290 static void ide_perform_srst(IDEState
*s
)
2292 s
->status
|= BUSY_STAT
;
2294 /* Halt PIO (Via register state); PIO BH remains scheduled. */
2295 ide_transfer_halt(s
);
2297 /* Cancel DMA -- may drain block device and invoke callbacks */
2298 ide_cancel_dma_sync(s
);
2300 /* Cancel PIO callback, reset registers/signature, etc */
2303 /* perform diagnostic */
2304 cmd_exec_dev_diagnostic(s
, WIN_DIAGNOSE
);
2307 static void ide_bus_perform_srst(void *opaque
)
2309 IDEBus
*bus
= opaque
;
2313 for (i
= 0; i
< 2; i
++) {
2315 ide_perform_srst(s
);
2318 bus
->cmd
&= ~IDE_CTRL_RESET
;
2321 void ide_ctrl_write(void *opaque
, uint32_t addr
, uint32_t val
)
2323 IDEBus
*bus
= opaque
;
2327 trace_ide_ctrl_write(addr
, val
, bus
);
2329 /* Device0 and Device1 each have their own control register,
2330 * but QEMU models it as just one register in the controller. */
2331 if (!(bus
->cmd
& IDE_CTRL_RESET
) && (val
& IDE_CTRL_RESET
)) {
2332 for (i
= 0; i
< 2; i
++) {
2334 s
->status
|= BUSY_STAT
;
2336 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2337 ide_bus_perform_srst
, bus
);
2344 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2345 * transferred from the device to the guest), false if it's a PIO in
2347 static bool ide_is_pio_out(IDEState
*s
)
2349 if (s
->end_transfer_func
== ide_sector_write
||
2350 s
->end_transfer_func
== ide_atapi_cmd
) {
2352 } else if (s
->end_transfer_func
== ide_sector_read
||
2353 s
->end_transfer_func
== ide_transfer_stop
||
2354 s
->end_transfer_func
== ide_atapi_cmd_reply_end
||
2355 s
->end_transfer_func
== ide_dummy_transfer_stop
) {
2362 void ide_data_writew(void *opaque
, uint32_t addr
, uint32_t val
)
2364 IDEBus
*bus
= opaque
;
2365 IDEState
*s
= idebus_active_if(bus
);
2368 trace_ide_data_writew(addr
, val
, bus
, s
);
2370 /* PIO data access allowed only when DRQ bit is set. The result of a write
2371 * during PIO out is indeterminate, just ignore it. */
2372 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2377 if (p
+ 2 > s
->data_end
) {
2381 *(uint16_t *)p
= le16_to_cpu(val
);
2384 if (p
>= s
->data_end
) {
2385 s
->status
&= ~DRQ_STAT
;
2386 s
->end_transfer_func(s
);
2390 uint32_t ide_data_readw(void *opaque
, uint32_t addr
)
2392 IDEBus
*bus
= opaque
;
2393 IDEState
*s
= idebus_active_if(bus
);
2397 /* PIO data access allowed only when DRQ bit is set. The result of a read
2398 * during PIO in is indeterminate, return 0 and don't move forward. */
2399 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2404 if (p
+ 2 > s
->data_end
) {
2408 ret
= cpu_to_le16(*(uint16_t *)p
);
2411 if (p
>= s
->data_end
) {
2412 s
->status
&= ~DRQ_STAT
;
2413 s
->end_transfer_func(s
);
2416 trace_ide_data_readw(addr
, ret
, bus
, s
);
2420 void ide_data_writel(void *opaque
, uint32_t addr
, uint32_t val
)
2422 IDEBus
*bus
= opaque
;
2423 IDEState
*s
= idebus_active_if(bus
);
2426 trace_ide_data_writel(addr
, val
, bus
, s
);
2428 /* PIO data access allowed only when DRQ bit is set. The result of a write
2429 * during PIO out is indeterminate, just ignore it. */
2430 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2435 if (p
+ 4 > s
->data_end
) {
2439 *(uint32_t *)p
= le32_to_cpu(val
);
2442 if (p
>= s
->data_end
) {
2443 s
->status
&= ~DRQ_STAT
;
2444 s
->end_transfer_func(s
);
2448 uint32_t ide_data_readl(void *opaque
, uint32_t addr
)
2450 IDEBus
*bus
= opaque
;
2451 IDEState
*s
= idebus_active_if(bus
);
2455 /* PIO data access allowed only when DRQ bit is set. The result of a read
2456 * during PIO in is indeterminate, return 0 and don't move forward. */
2457 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2463 if (p
+ 4 > s
->data_end
) {
2467 ret
= cpu_to_le32(*(uint32_t *)p
);
2470 if (p
>= s
->data_end
) {
2471 s
->status
&= ~DRQ_STAT
;
2472 s
->end_transfer_func(s
);
2476 trace_ide_data_readl(addr
, ret
, bus
, s
);
2480 static void ide_dummy_transfer_stop(IDEState
*s
)
2482 s
->data_ptr
= s
->io_buffer
;
2483 s
->data_end
= s
->io_buffer
;
2484 s
->io_buffer
[0] = 0xff;
2485 s
->io_buffer
[1] = 0xff;
2486 s
->io_buffer
[2] = 0xff;
2487 s
->io_buffer
[3] = 0xff;
2490 void ide_bus_reset(IDEBus
*bus
)
2494 ide_reset(&bus
->ifs
[0]);
2495 ide_reset(&bus
->ifs
[1]);
2498 /* pending async DMA */
2499 if (bus
->dma
->aiocb
) {
2500 trace_ide_bus_reset_aio();
2501 blk_aio_cancel(bus
->dma
->aiocb
);
2502 bus
->dma
->aiocb
= NULL
;
2505 /* reset dma provider too */
2506 if (bus
->dma
->ops
->reset
) {
2507 bus
->dma
->ops
->reset(bus
->dma
);
2511 static bool ide_cd_is_tray_open(void *opaque
)
2513 return ((IDEState
*)opaque
)->tray_open
;
2516 static bool ide_cd_is_medium_locked(void *opaque
)
2518 return ((IDEState
*)opaque
)->tray_locked
;
2521 static void ide_resize_cb(void *opaque
)
2523 IDEState
*s
= opaque
;
2524 uint64_t nb_sectors
;
2526 if (!s
->identify_set
) {
2530 blk_get_geometry(s
->blk
, &nb_sectors
);
2531 s
->nb_sectors
= nb_sectors
;
2533 /* Update the identify data buffer. */
2534 if (s
->drive_kind
== IDE_CFATA
) {
2535 ide_cfata_identify_size(s
);
2537 /* IDE_CD uses a different set of callbacks entirely. */
2538 assert(s
->drive_kind
!= IDE_CD
);
2539 ide_identify_size(s
);
2543 static const BlockDevOps ide_cd_block_ops
= {
2544 .change_media_cb
= ide_cd_change_cb
,
2545 .eject_request_cb
= ide_cd_eject_request_cb
,
2546 .is_tray_open
= ide_cd_is_tray_open
,
2547 .is_medium_locked
= ide_cd_is_medium_locked
,
2550 static const BlockDevOps ide_hd_block_ops
= {
2551 .resize_cb
= ide_resize_cb
,
2554 int ide_init_drive(IDEState
*s
, BlockBackend
*blk
, IDEDriveKind kind
,
2555 const char *version
, const char *serial
, const char *model
,
2557 uint32_t cylinders
, uint32_t heads
, uint32_t secs
,
2558 int chs_trans
, Error
**errp
)
2560 uint64_t nb_sectors
;
2563 s
->drive_kind
= kind
;
2565 blk_get_geometry(blk
, &nb_sectors
);
2566 s
->cylinders
= cylinders
;
2567 s
->heads
= s
->drive_heads
= heads
;
2568 s
->sectors
= s
->drive_sectors
= secs
;
2569 s
->chs_trans
= chs_trans
;
2570 s
->nb_sectors
= nb_sectors
;
2572 /* The SMART values should be preserved across power cycles
2574 s
->smart_enabled
= 1;
2575 s
->smart_autosave
= 1;
2576 s
->smart_errors
= 0;
2577 s
->smart_selftest_count
= 0;
2578 if (kind
== IDE_CD
) {
2579 blk_set_dev_ops(blk
, &ide_cd_block_ops
, s
);
2581 if (!blk_is_inserted(s
->blk
)) {
2582 error_setg(errp
, "Device needs media, but drive is empty");
2585 if (!blk_is_writable(blk
)) {
2586 error_setg(errp
, "Can't use a read-only drive");
2589 blk_set_dev_ops(blk
, &ide_hd_block_ops
, s
);
2592 pstrcpy(s
->drive_serial_str
, sizeof(s
->drive_serial_str
), serial
);
2594 snprintf(s
->drive_serial_str
, sizeof(s
->drive_serial_str
),
2595 "QM%05d", s
->drive_serial
);
2598 pstrcpy(s
->drive_model_str
, sizeof(s
->drive_model_str
), model
);
2602 strcpy(s
->drive_model_str
, "QEMU DVD-ROM");
2605 strcpy(s
->drive_model_str
, "QEMU MICRODRIVE");
2608 strcpy(s
->drive_model_str
, "QEMU HARDDISK");
2614 pstrcpy(s
->version
, sizeof(s
->version
), version
);
2616 pstrcpy(s
->version
, sizeof(s
->version
), qemu_hw_version());
2620 blk_iostatus_enable(blk
);
2624 static void ide_init1(IDEBus
*bus
, int unit
)
2626 static int drive_serial
= 1;
2627 IDEState
*s
= &bus
->ifs
[unit
];
2631 s
->drive_serial
= drive_serial
++;
2632 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2633 s
->io_buffer_total_len
= IDE_DMA_BUF_SECTORS
*512 + 4;
2634 s
->io_buffer
= qemu_memalign(2048, s
->io_buffer_total_len
);
2635 memset(s
->io_buffer
, 0, s
->io_buffer_total_len
);
2637 s
->smart_selftest_data
= blk_blockalign(s
->blk
, 512);
2638 memset(s
->smart_selftest_data
, 0, 512);
2640 s
->sector_write_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
2641 ide_sector_write_timer_cb
, s
);
2644 static int ide_nop_int(const IDEDMA
*dma
, bool is_write
)
2649 static void ide_nop(const IDEDMA
*dma
)
2653 static int32_t ide_nop_int32(const IDEDMA
*dma
, int32_t l
)
2658 static const IDEDMAOps ide_dma_nop_ops
= {
2659 .prepare_buf
= ide_nop_int32
,
2660 .restart_dma
= ide_nop
,
2661 .rw_buf
= ide_nop_int
,
2664 static void ide_restart_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
2666 s
->unit
= s
->bus
->retry_unit
;
2667 ide_set_sector(s
, s
->bus
->retry_sector_num
);
2668 s
->nsector
= s
->bus
->retry_nsector
;
2669 s
->bus
->dma
->ops
->restart_dma(s
->bus
->dma
);
2670 s
->io_buffer_size
= 0;
2671 s
->dma_cmd
= dma_cmd
;
2672 ide_start_dma(s
, ide_dma_cb
);
2675 static void ide_restart_bh(void *opaque
)
2677 IDEBus
*bus
= opaque
;
2682 qemu_bh_delete(bus
->bh
);
2685 error_status
= bus
->error_status
;
2686 if (bus
->error_status
== 0) {
2690 s
= idebus_active_if(bus
);
2691 is_read
= (bus
->error_status
& IDE_RETRY_READ
) != 0;
2693 /* The error status must be cleared before resubmitting the request: The
2694 * request may fail again, and this case can only be distinguished if the
2695 * called function can set a new error status. */
2696 bus
->error_status
= 0;
2698 /* The HBA has generically asked to be kicked on retry */
2699 if (error_status
& IDE_RETRY_HBA
) {
2700 if (s
->bus
->dma
->ops
->restart
) {
2701 s
->bus
->dma
->ops
->restart(s
->bus
->dma
);
2703 } else if (IS_IDE_RETRY_DMA(error_status
)) {
2704 if (error_status
& IDE_RETRY_TRIM
) {
2705 ide_restart_dma(s
, IDE_DMA_TRIM
);
2707 ide_restart_dma(s
, is_read
? IDE_DMA_READ
: IDE_DMA_WRITE
);
2709 } else if (IS_IDE_RETRY_PIO(error_status
)) {
2713 ide_sector_write(s
);
2715 } else if (error_status
& IDE_RETRY_FLUSH
) {
2717 } else if (IS_IDE_RETRY_ATAPI(error_status
)) {
2718 assert(s
->end_transfer_func
== ide_atapi_cmd
);
2719 ide_atapi_dma_restart(s
);
2725 static void ide_restart_cb(void *opaque
, bool running
, RunState state
)
2727 IDEBus
*bus
= opaque
;
2733 bus
->bh
= qemu_bh_new(ide_restart_bh
, bus
);
2734 qemu_bh_schedule(bus
->bh
);
2738 void ide_register_restart_cb(IDEBus
*bus
)
2740 if (bus
->dma
->ops
->restart_dma
) {
2741 bus
->vmstate
= qemu_add_vm_change_state_handler(ide_restart_cb
, bus
);
2745 static IDEDMA ide_dma_nop
= {
2746 .ops
= &ide_dma_nop_ops
,
2750 void ide_init2(IDEBus
*bus
, qemu_irq irq
)
2754 for(i
= 0; i
< 2; i
++) {
2756 ide_reset(&bus
->ifs
[i
]);
2759 bus
->dma
= &ide_dma_nop
;
2762 void ide_exit(IDEState
*s
)
2764 timer_free(s
->sector_write_timer
);
2765 qemu_vfree(s
->smart_selftest_data
);
2766 qemu_vfree(s
->io_buffer
);
2769 static bool is_identify_set(void *opaque
, int version_id
)
2771 IDEState
*s
= opaque
;
2773 return s
->identify_set
!= 0;
2776 static EndTransferFunc
* transfer_end_table
[] = {
2780 ide_atapi_cmd_reply_end
,
2782 ide_dummy_transfer_stop
,
2785 static int transfer_end_table_idx(EndTransferFunc
*fn
)
2789 for (i
= 0; i
< ARRAY_SIZE(transfer_end_table
); i
++)
2790 if (transfer_end_table
[i
] == fn
)
2796 static int ide_drive_post_load(void *opaque
, int version_id
)
2798 IDEState
*s
= opaque
;
2800 if (s
->blk
&& s
->identify_set
) {
2801 blk_set_enable_write_cache(s
->blk
, !!(s
->identify_data
[85] & (1 << 5)));
2806 static int ide_drive_pio_post_load(void *opaque
, int version_id
)
2808 IDEState
*s
= opaque
;
2810 if (s
->end_transfer_fn_idx
>= ARRAY_SIZE(transfer_end_table
)) {
2813 s
->end_transfer_func
= transfer_end_table
[s
->end_transfer_fn_idx
];
2814 s
->data_ptr
= s
->io_buffer
+ s
->cur_io_buffer_offset
;
2815 s
->data_end
= s
->data_ptr
+ s
->cur_io_buffer_len
;
2816 s
->atapi_dma
= s
->feature
& 1; /* as per cmd_packet */
2821 static int ide_drive_pio_pre_save(void *opaque
)
2823 IDEState
*s
= opaque
;
2826 s
->cur_io_buffer_offset
= s
->data_ptr
- s
->io_buffer
;
2827 s
->cur_io_buffer_len
= s
->data_end
- s
->data_ptr
;
2829 idx
= transfer_end_table_idx(s
->end_transfer_func
);
2831 fprintf(stderr
, "%s: invalid end_transfer_func for DRQ_STAT\n",
2833 s
->end_transfer_fn_idx
= 2;
2835 s
->end_transfer_fn_idx
= idx
;
2841 static bool ide_drive_pio_state_needed(void *opaque
)
2843 IDEState
*s
= opaque
;
2845 return ((s
->status
& DRQ_STAT
) != 0)
2846 || (s
->bus
->error_status
& IDE_RETRY_PIO
);
2849 static bool ide_tray_state_needed(void *opaque
)
2851 IDEState
*s
= opaque
;
2853 return s
->tray_open
|| s
->tray_locked
;
2856 static bool ide_atapi_gesn_needed(void *opaque
)
2858 IDEState
*s
= opaque
;
2860 return s
->events
.new_media
|| s
->events
.eject_request
;
2863 static bool ide_error_needed(void *opaque
)
2865 IDEBus
*bus
= opaque
;
2867 return (bus
->error_status
!= 0);
2870 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2871 static const VMStateDescription vmstate_ide_atapi_gesn_state
= {
2872 .name
="ide_drive/atapi/gesn_state",
2874 .minimum_version_id
= 1,
2875 .needed
= ide_atapi_gesn_needed
,
2876 .fields
= (VMStateField
[]) {
2877 VMSTATE_BOOL(events
.new_media
, IDEState
),
2878 VMSTATE_BOOL(events
.eject_request
, IDEState
),
2879 VMSTATE_END_OF_LIST()
2883 static const VMStateDescription vmstate_ide_tray_state
= {
2884 .name
= "ide_drive/tray_state",
2886 .minimum_version_id
= 1,
2887 .needed
= ide_tray_state_needed
,
2888 .fields
= (VMStateField
[]) {
2889 VMSTATE_BOOL(tray_open
, IDEState
),
2890 VMSTATE_BOOL(tray_locked
, IDEState
),
2891 VMSTATE_END_OF_LIST()
2895 static const VMStateDescription vmstate_ide_drive_pio_state
= {
2896 .name
= "ide_drive/pio_state",
2898 .minimum_version_id
= 1,
2899 .pre_save
= ide_drive_pio_pre_save
,
2900 .post_load
= ide_drive_pio_post_load
,
2901 .needed
= ide_drive_pio_state_needed
,
2902 .fields
= (VMStateField
[]) {
2903 VMSTATE_INT32(req_nb_sectors
, IDEState
),
2904 VMSTATE_VARRAY_INT32(io_buffer
, IDEState
, io_buffer_total_len
, 1,
2905 vmstate_info_uint8
, uint8_t),
2906 VMSTATE_INT32(cur_io_buffer_offset
, IDEState
),
2907 VMSTATE_INT32(cur_io_buffer_len
, IDEState
),
2908 VMSTATE_UINT8(end_transfer_fn_idx
, IDEState
),
2909 VMSTATE_INT32(elementary_transfer_size
, IDEState
),
2910 VMSTATE_INT32(packet_transfer_size
, IDEState
),
2911 VMSTATE_END_OF_LIST()
2915 const VMStateDescription vmstate_ide_drive
= {
2916 .name
= "ide_drive",
2918 .minimum_version_id
= 0,
2919 .post_load
= ide_drive_post_load
,
2920 .fields
= (VMStateField
[]) {
2921 VMSTATE_INT32(mult_sectors
, IDEState
),
2922 VMSTATE_INT32(identify_set
, IDEState
),
2923 VMSTATE_BUFFER_TEST(identify_data
, IDEState
, is_identify_set
),
2924 VMSTATE_UINT8(feature
, IDEState
),
2925 VMSTATE_UINT8(error
, IDEState
),
2926 VMSTATE_UINT32(nsector
, IDEState
),
2927 VMSTATE_UINT8(sector
, IDEState
),
2928 VMSTATE_UINT8(lcyl
, IDEState
),
2929 VMSTATE_UINT8(hcyl
, IDEState
),
2930 VMSTATE_UINT8(hob_feature
, IDEState
),
2931 VMSTATE_UINT8(hob_sector
, IDEState
),
2932 VMSTATE_UINT8(hob_nsector
, IDEState
),
2933 VMSTATE_UINT8(hob_lcyl
, IDEState
),
2934 VMSTATE_UINT8(hob_hcyl
, IDEState
),
2935 VMSTATE_UINT8(select
, IDEState
),
2936 VMSTATE_UINT8(status
, IDEState
),
2937 VMSTATE_UINT8(lba48
, IDEState
),
2938 VMSTATE_UINT8(sense_key
, IDEState
),
2939 VMSTATE_UINT8(asc
, IDEState
),
2940 VMSTATE_UINT8_V(cdrom_changed
, IDEState
, 3),
2941 VMSTATE_END_OF_LIST()
2943 .subsections
= (const VMStateDescription
*[]) {
2944 &vmstate_ide_drive_pio_state
,
2945 &vmstate_ide_tray_state
,
2946 &vmstate_ide_atapi_gesn_state
,
2951 static const VMStateDescription vmstate_ide_error_status
= {
2952 .name
="ide_bus/error",
2954 .minimum_version_id
= 1,
2955 .needed
= ide_error_needed
,
2956 .fields
= (VMStateField
[]) {
2957 VMSTATE_INT32(error_status
, IDEBus
),
2958 VMSTATE_INT64_V(retry_sector_num
, IDEBus
, 2),
2959 VMSTATE_UINT32_V(retry_nsector
, IDEBus
, 2),
2960 VMSTATE_UINT8_V(retry_unit
, IDEBus
, 2),
2961 VMSTATE_END_OF_LIST()
2965 const VMStateDescription vmstate_ide_bus
= {
2968 .minimum_version_id
= 1,
2969 .fields
= (VMStateField
[]) {
2970 VMSTATE_UINT8(cmd
, IDEBus
),
2971 VMSTATE_UINT8(unit
, IDEBus
),
2972 VMSTATE_END_OF_LIST()
2974 .subsections
= (const VMStateDescription
*[]) {
2975 &vmstate_ide_error_status
,
2980 void ide_drive_get(DriveInfo
**hd
, int n
)
2984 for (i
= 0; i
< n
; i
++) {
2985 hd
[i
] = drive_get_by_index(IF_IDE
, i
);