2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/isa/isa.h>
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/dma.h"
33 #include "hw/block/block.h"
34 #include "sysemu/blockdev.h"
36 #include <hw/ide/internal.h>
38 /* These values were based on a Seagate ST3500418AS but have been modified
39 to make more sense in QEMU */
40 static const int smart_attributes
[][12] = {
41 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
42 /* raw read error rate*/
43 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
45 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
46 /* start stop count */
47 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
48 /* remapped sectors */
49 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
51 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52 /* power cycle count */
53 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* airflow-temperature-celsius */
55 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 static int ide_handle_rw_error(IDEState
*s
, int error
, int op
);
59 static void ide_dummy_transfer_stop(IDEState
*s
);
61 static void padstr(char *str
, const char *src
, int len
)
64 for(i
= 0; i
< len
; i
++) {
73 static void put_le16(uint16_t *p
, unsigned int v
)
78 static void ide_identify(IDEState
*s
)
82 IDEDevice
*dev
= s
->unit
? s
->bus
->slave
: s
->bus
->master
;
84 if (s
->identify_set
) {
85 memcpy(s
->io_buffer
, s
->identify_data
, sizeof(s
->identify_data
));
89 memset(s
->io_buffer
, 0, 512);
90 p
= (uint16_t *)s
->io_buffer
;
91 put_le16(p
+ 0, 0x0040);
92 put_le16(p
+ 1, s
->cylinders
);
93 put_le16(p
+ 3, s
->heads
);
94 put_le16(p
+ 4, 512 * s
->sectors
); /* XXX: retired, remove ? */
95 put_le16(p
+ 5, 512); /* XXX: retired, remove ? */
96 put_le16(p
+ 6, s
->sectors
);
97 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
98 put_le16(p
+ 20, 3); /* XXX: retired, remove ? */
99 put_le16(p
+ 21, 512); /* cache size in sectors */
100 put_le16(p
+ 22, 4); /* ecc bytes */
101 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
102 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
103 #if MAX_MULT_SECTORS > 1
104 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
106 put_le16(p
+ 48, 1); /* dword I/O */
107 put_le16(p
+ 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
108 put_le16(p
+ 51, 0x200); /* PIO transfer cycle */
109 put_le16(p
+ 52, 0x200); /* DMA transfer cycle */
110 put_le16(p
+ 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
111 put_le16(p
+ 54, s
->cylinders
);
112 put_le16(p
+ 55, s
->heads
);
113 put_le16(p
+ 56, s
->sectors
);
114 oldsize
= s
->cylinders
* s
->heads
* s
->sectors
;
115 put_le16(p
+ 57, oldsize
);
116 put_le16(p
+ 58, oldsize
>> 16);
118 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
119 put_le16(p
+ 60, s
->nb_sectors
);
120 put_le16(p
+ 61, s
->nb_sectors
>> 16);
121 put_le16(p
+ 62, 0x07); /* single word dma0-2 supported */
122 put_le16(p
+ 63, 0x07); /* mdma0-2 supported */
123 put_le16(p
+ 64, 0x03); /* pio3-4 supported */
124 put_le16(p
+ 65, 120);
125 put_le16(p
+ 66, 120);
126 put_le16(p
+ 67, 120);
127 put_le16(p
+ 68, 120);
128 if (dev
&& dev
->conf
.discard_granularity
) {
129 put_le16(p
+ 69, (1 << 14)); /* determinate TRIM behavior */
133 put_le16(p
+ 75, s
->ncq_queues
- 1);
135 put_le16(p
+ 76, (1 << 8));
138 put_le16(p
+ 80, 0xf0); /* ata3 -> ata6 supported */
139 put_le16(p
+ 81, 0x16); /* conforms to ata5 */
140 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
141 put_le16(p
+ 82, (1 << 14) | (1 << 5) | 1);
142 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
143 put_le16(p
+ 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
144 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
146 put_le16(p
+ 84, (1 << 14) | (1 << 8) | 0);
148 put_le16(p
+ 84, (1 << 14) | 0);
150 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
151 if (bdrv_enable_write_cache(s
->bs
))
152 put_le16(p
+ 85, (1 << 14) | (1 << 5) | 1);
154 put_le16(p
+ 85, (1 << 14) | 1);
155 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
156 put_le16(p
+ 86, (1 << 13) | (1 <<12) | (1 << 10));
157 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
159 put_le16(p
+ 87, (1 << 14) | (1 << 8) | 0);
161 put_le16(p
+ 87, (1 << 14) | 0);
163 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
164 put_le16(p
+ 93, 1 | (1 << 14) | 0x2000);
165 put_le16(p
+ 100, s
->nb_sectors
);
166 put_le16(p
+ 101, s
->nb_sectors
>> 16);
167 put_le16(p
+ 102, s
->nb_sectors
>> 32);
168 put_le16(p
+ 103, s
->nb_sectors
>> 48);
170 if (dev
&& dev
->conf
.physical_block_size
)
171 put_le16(p
+ 106, 0x6000 | get_physical_block_exp(&dev
->conf
));
173 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
174 put_le16(p
+ 108, s
->wwn
>> 48);
175 put_le16(p
+ 109, s
->wwn
>> 32);
176 put_le16(p
+ 110, s
->wwn
>> 16);
177 put_le16(p
+ 111, s
->wwn
);
179 if (dev
&& dev
->conf
.discard_granularity
) {
180 put_le16(p
+ 169, 1); /* TRIM support */
183 memcpy(s
->identify_data
, p
, sizeof(s
->identify_data
));
187 static void ide_atapi_identify(IDEState
*s
)
191 if (s
->identify_set
) {
192 memcpy(s
->io_buffer
, s
->identify_data
, sizeof(s
->identify_data
));
196 memset(s
->io_buffer
, 0, 512);
197 p
= (uint16_t *)s
->io_buffer
;
198 /* Removable CDROM, 50us response, 12 byte packets */
199 put_le16(p
+ 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
200 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
201 put_le16(p
+ 20, 3); /* buffer type */
202 put_le16(p
+ 21, 512); /* cache size in sectors */
203 put_le16(p
+ 22, 4); /* ecc bytes */
204 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
205 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
206 put_le16(p
+ 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
208 put_le16(p
+ 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
209 put_le16(p
+ 53, 7); /* words 64-70, 54-58, 88 valid */
210 put_le16(p
+ 62, 7); /* single word dma0-2 supported */
211 put_le16(p
+ 63, 7); /* mdma0-2 supported */
213 put_le16(p
+ 49, 1 << 9); /* LBA supported, no DMA */
214 put_le16(p
+ 53, 3); /* words 64-70, 54-58 valid */
215 put_le16(p
+ 63, 0x103); /* DMA modes XXX: may be incorrect */
217 put_le16(p
+ 64, 3); /* pio3-4 supported */
218 put_le16(p
+ 65, 0xb4); /* minimum DMA multiword tx cycle time */
219 put_le16(p
+ 66, 0xb4); /* recommended DMA multiword tx cycle time */
220 put_le16(p
+ 67, 0x12c); /* minimum PIO cycle time without flow control */
221 put_le16(p
+ 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
223 put_le16(p
+ 71, 30); /* in ns */
224 put_le16(p
+ 72, 30); /* in ns */
227 put_le16(p
+ 75, s
->ncq_queues
- 1);
229 put_le16(p
+ 76, (1 << 8));
232 put_le16(p
+ 80, 0x1e); /* support up to ATA/ATAPI-4 */
234 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
236 memcpy(s
->identify_data
, p
, sizeof(s
->identify_data
));
240 static void ide_cfata_identify(IDEState
*s
)
245 p
= (uint16_t *) s
->identify_data
;
249 memset(p
, 0, sizeof(s
->identify_data
));
251 cur_sec
= s
->cylinders
* s
->heads
* s
->sectors
;
253 put_le16(p
+ 0, 0x848a); /* CF Storage Card signature */
254 put_le16(p
+ 1, s
->cylinders
); /* Default cylinders */
255 put_le16(p
+ 3, s
->heads
); /* Default heads */
256 put_le16(p
+ 6, s
->sectors
); /* Default sectors per track */
257 put_le16(p
+ 7, s
->nb_sectors
>> 16); /* Sectors per card */
258 put_le16(p
+ 8, s
->nb_sectors
); /* Sectors per card */
259 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
260 put_le16(p
+ 22, 0x0004); /* ECC bytes */
261 padstr((char *) (p
+ 23), s
->version
, 8); /* Firmware Revision */
262 padstr((char *) (p
+ 27), s
->drive_model_str
, 40);/* Model number */
263 #if MAX_MULT_SECTORS > 1
264 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
266 put_le16(p
+ 47, 0x0000);
268 put_le16(p
+ 49, 0x0f00); /* Capabilities */
269 put_le16(p
+ 51, 0x0002); /* PIO cycle timing mode */
270 put_le16(p
+ 52, 0x0001); /* DMA cycle timing mode */
271 put_le16(p
+ 53, 0x0003); /* Translation params valid */
272 put_le16(p
+ 54, s
->cylinders
); /* Current cylinders */
273 put_le16(p
+ 55, s
->heads
); /* Current heads */
274 put_le16(p
+ 56, s
->sectors
); /* Current sectors */
275 put_le16(p
+ 57, cur_sec
); /* Current capacity */
276 put_le16(p
+ 58, cur_sec
>> 16); /* Current capacity */
277 if (s
->mult_sectors
) /* Multiple sector setting */
278 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
279 put_le16(p
+ 60, s
->nb_sectors
); /* Total LBA sectors */
280 put_le16(p
+ 61, s
->nb_sectors
>> 16); /* Total LBA sectors */
281 put_le16(p
+ 63, 0x0203); /* Multiword DMA capability */
282 put_le16(p
+ 64, 0x0001); /* Flow Control PIO support */
283 put_le16(p
+ 65, 0x0096); /* Min. Multiword DMA cycle */
284 put_le16(p
+ 66, 0x0096); /* Rec. Multiword DMA cycle */
285 put_le16(p
+ 68, 0x00b4); /* Min. PIO cycle time */
286 put_le16(p
+ 82, 0x400c); /* Command Set supported */
287 put_le16(p
+ 83, 0x7068); /* Command Set supported */
288 put_le16(p
+ 84, 0x4000); /* Features supported */
289 put_le16(p
+ 85, 0x000c); /* Command Set enabled */
290 put_le16(p
+ 86, 0x7044); /* Command Set enabled */
291 put_le16(p
+ 87, 0x4000); /* Features enabled */
292 put_le16(p
+ 91, 0x4060); /* Current APM level */
293 put_le16(p
+ 129, 0x0002); /* Current features option */
294 put_le16(p
+ 130, 0x0005); /* Reassigned sectors */
295 put_le16(p
+ 131, 0x0001); /* Initial power mode */
296 put_le16(p
+ 132, 0x0000); /* User signature */
297 put_le16(p
+ 160, 0x8100); /* Power requirement */
298 put_le16(p
+ 161, 0x8001); /* CF command set */
303 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
306 static void ide_set_signature(IDEState
*s
)
308 s
->select
&= 0xf0; /* clear head */
312 if (s
->drive_kind
== IDE_CD
) {
324 typedef struct TrimAIOCB
{
325 BlockDriverAIOCB common
;
329 BlockDriverAIOCB
*aiocb
;
333 static void trim_aio_cancel(BlockDriverAIOCB
*acb
)
335 TrimAIOCB
*iocb
= container_of(acb
, TrimAIOCB
, common
);
337 /* Exit the loop in case bdrv_aio_cancel calls ide_issue_trim_cb again. */
338 iocb
->j
= iocb
->qiov
->niov
- 1;
339 iocb
->i
= (iocb
->qiov
->iov
[iocb
->j
].iov_len
/ 8) - 1;
341 /* Tell ide_issue_trim_cb not to trigger the completion, too. */
342 qemu_bh_delete(iocb
->bh
);
346 bdrv_aio_cancel(iocb
->aiocb
);
348 qemu_aio_release(iocb
);
351 static const AIOCBInfo trim_aiocb_info
= {
352 .aiocb_size
= sizeof(TrimAIOCB
),
353 .cancel
= trim_aio_cancel
,
356 static void ide_trim_bh_cb(void *opaque
)
358 TrimAIOCB
*iocb
= opaque
;
360 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
362 qemu_bh_delete(iocb
->bh
);
364 qemu_aio_release(iocb
);
367 static void ide_issue_trim_cb(void *opaque
, int ret
)
369 TrimAIOCB
*iocb
= opaque
;
371 while (iocb
->j
< iocb
->qiov
->niov
) {
373 while (++iocb
->i
< iocb
->qiov
->iov
[j
].iov_len
/ 8) {
375 uint64_t *buffer
= iocb
->qiov
->iov
[j
].iov_base
;
377 /* 6-byte LBA + 2-byte range per entry */
378 uint64_t entry
= le64_to_cpu(buffer
[i
]);
379 uint64_t sector
= entry
& 0x0000ffffffffffffULL
;
380 uint16_t count
= entry
>> 48;
386 /* Got an entry! Submit and exit. */
387 iocb
->aiocb
= bdrv_aio_discard(iocb
->common
.bs
, sector
, count
,
388 ide_issue_trim_cb
, opaque
);
401 qemu_bh_schedule(iocb
->bh
);
405 BlockDriverAIOCB
*ide_issue_trim(BlockDriverState
*bs
,
406 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
407 BlockDriverCompletionFunc
*cb
, void *opaque
)
411 iocb
= qemu_aio_get(&trim_aiocb_info
, bs
, cb
, opaque
);
412 iocb
->bh
= qemu_bh_new(ide_trim_bh_cb
, iocb
);
417 ide_issue_trim_cb(iocb
, 0);
418 return &iocb
->common
;
421 static inline void ide_abort_command(IDEState
*s
)
423 ide_transfer_stop(s
);
424 s
->status
= READY_STAT
| ERR_STAT
;
428 /* prepare data transfer and tell what to do after */
429 void ide_transfer_start(IDEState
*s
, uint8_t *buf
, int size
,
430 EndTransferFunc
*end_transfer_func
)
432 s
->end_transfer_func
= end_transfer_func
;
434 s
->data_end
= buf
+ size
;
435 if (!(s
->status
& ERR_STAT
)) {
436 s
->status
|= DRQ_STAT
;
438 if (s
->bus
->dma
->ops
->start_transfer
) {
439 s
->bus
->dma
->ops
->start_transfer(s
->bus
->dma
);
443 static void ide_cmd_done(IDEState
*s
)
445 if (s
->bus
->dma
->ops
->cmd_done
) {
446 s
->bus
->dma
->ops
->cmd_done(s
->bus
->dma
);
450 void ide_transfer_stop(IDEState
*s
)
452 s
->end_transfer_func
= ide_transfer_stop
;
453 s
->data_ptr
= s
->io_buffer
;
454 s
->data_end
= s
->io_buffer
;
455 s
->status
&= ~DRQ_STAT
;
459 int64_t ide_get_sector(IDEState
*s
)
462 if (s
->select
& 0x40) {
465 sector_num
= ((s
->select
& 0x0f) << 24) | (s
->hcyl
<< 16) |
466 (s
->lcyl
<< 8) | s
->sector
;
468 sector_num
= ((int64_t)s
->hob_hcyl
<< 40) |
469 ((int64_t) s
->hob_lcyl
<< 32) |
470 ((int64_t) s
->hob_sector
<< 24) |
471 ((int64_t) s
->hcyl
<< 16) |
472 ((int64_t) s
->lcyl
<< 8) | s
->sector
;
475 sector_num
= ((s
->hcyl
<< 8) | s
->lcyl
) * s
->heads
* s
->sectors
+
476 (s
->select
& 0x0f) * s
->sectors
+ (s
->sector
- 1);
481 void ide_set_sector(IDEState
*s
, int64_t sector_num
)
484 if (s
->select
& 0x40) {
486 s
->select
= (s
->select
& 0xf0) | (sector_num
>> 24);
487 s
->hcyl
= (sector_num
>> 16);
488 s
->lcyl
= (sector_num
>> 8);
489 s
->sector
= (sector_num
);
491 s
->sector
= sector_num
;
492 s
->lcyl
= sector_num
>> 8;
493 s
->hcyl
= sector_num
>> 16;
494 s
->hob_sector
= sector_num
>> 24;
495 s
->hob_lcyl
= sector_num
>> 32;
496 s
->hob_hcyl
= sector_num
>> 40;
499 cyl
= sector_num
/ (s
->heads
* s
->sectors
);
500 r
= sector_num
% (s
->heads
* s
->sectors
);
503 s
->select
= (s
->select
& 0xf0) | ((r
/ s
->sectors
) & 0x0f);
504 s
->sector
= (r
% s
->sectors
) + 1;
508 static void ide_rw_error(IDEState
*s
) {
509 ide_abort_command(s
);
513 static bool ide_sect_range_ok(IDEState
*s
,
514 uint64_t sector
, uint64_t nb_sectors
)
516 uint64_t total_sectors
;
518 bdrv_get_geometry(s
->bs
, &total_sectors
);
519 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
525 static void ide_sector_read_cb(void *opaque
, int ret
)
527 IDEState
*s
= opaque
;
531 s
->status
&= ~BUSY_STAT
;
533 bdrv_acct_done(s
->bs
, &s
->acct
);
535 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
|
542 if (n
> s
->req_nb_sectors
) {
543 n
= s
->req_nb_sectors
;
546 /* Allow the guest to read the io_buffer */
547 ide_transfer_start(s
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
, ide_sector_read
);
551 ide_set_sector(s
, ide_get_sector(s
) + n
);
555 void ide_sector_read(IDEState
*s
)
560 s
->status
= READY_STAT
| SEEK_STAT
;
561 s
->error
= 0; /* not needed by IDE spec, but needed by Windows */
562 sector_num
= ide_get_sector(s
);
566 ide_transfer_stop(s
);
570 s
->status
|= BUSY_STAT
;
572 if (n
> s
->req_nb_sectors
) {
573 n
= s
->req_nb_sectors
;
576 #if defined(DEBUG_IDE)
577 printf("sector=%" PRId64
"\n", sector_num
);
580 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
585 s
->iov
.iov_base
= s
->io_buffer
;
586 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
587 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
589 bdrv_acct_start(s
->bs
, &s
->acct
, n
* BDRV_SECTOR_SIZE
, BDRV_ACCT_READ
);
590 s
->pio_aiocb
= bdrv_aio_readv(s
->bs
, sector_num
, &s
->qiov
, n
,
591 ide_sector_read_cb
, s
);
594 static void dma_buf_commit(IDEState
*s
)
596 qemu_sglist_destroy(&s
->sg
);
599 void ide_set_inactive(IDEState
*s
, bool more
)
601 s
->bus
->dma
->aiocb
= NULL
;
602 if (s
->bus
->dma
->ops
->set_inactive
) {
603 s
->bus
->dma
->ops
->set_inactive(s
->bus
->dma
, more
);
608 void ide_dma_error(IDEState
*s
)
610 ide_abort_command(s
);
611 ide_set_inactive(s
, false);
615 static int ide_handle_rw_error(IDEState
*s
, int error
, int op
)
617 bool is_read
= (op
& IDE_RETRY_READ
) != 0;
618 BlockErrorAction action
= bdrv_get_error_action(s
->bs
, is_read
, error
);
620 if (action
== BLOCK_ERROR_ACTION_STOP
) {
621 s
->bus
->dma
->ops
->set_unit(s
->bus
->dma
, s
->unit
);
622 s
->bus
->error_status
= op
;
623 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
624 if (op
& IDE_RETRY_DMA
) {
631 bdrv_error_action(s
->bs
, action
, is_read
, error
);
632 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
635 void ide_dma_cb(void *opaque
, int ret
)
637 IDEState
*s
= opaque
;
640 bool stay_active
= false;
643 int op
= IDE_RETRY_DMA
;
645 if (s
->dma_cmd
== IDE_DMA_READ
)
646 op
|= IDE_RETRY_READ
;
647 else if (s
->dma_cmd
== IDE_DMA_TRIM
)
648 op
|= IDE_RETRY_TRIM
;
650 if (ide_handle_rw_error(s
, -ret
, op
)) {
655 n
= s
->io_buffer_size
>> 9;
656 if (n
> s
->nsector
) {
657 /* The PRDs were longer than needed for this request. Shorten them so
658 * we don't get a negative remainder. The Active bit must remain set
659 * after the request completes. */
664 sector_num
= ide_get_sector(s
);
668 ide_set_sector(s
, sector_num
);
672 /* end of transfer ? */
673 if (s
->nsector
== 0) {
674 s
->status
= READY_STAT
| SEEK_STAT
;
679 /* launch next transfer */
681 s
->io_buffer_index
= 0;
682 s
->io_buffer_size
= n
* 512;
683 if (s
->bus
->dma
->ops
->prepare_buf(s
->bus
->dma
, ide_cmd_is_read(s
)) == 0) {
684 /* The PRDs were too short. Reset the Active bit, but don't raise an
686 s
->status
= READY_STAT
| SEEK_STAT
;
691 printf("ide_dma_cb: sector_num=%" PRId64
" n=%d, cmd_cmd=%d\n",
692 sector_num
, n
, s
->dma_cmd
);
695 if ((s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) &&
696 !ide_sect_range_ok(s
, sector_num
, n
)) {
702 switch (s
->dma_cmd
) {
704 s
->bus
->dma
->aiocb
= dma_bdrv_read(s
->bs
, &s
->sg
, sector_num
,
708 s
->bus
->dma
->aiocb
= dma_bdrv_write(s
->bs
, &s
->sg
, sector_num
,
712 s
->bus
->dma
->aiocb
= dma_bdrv_io(s
->bs
, &s
->sg
, sector_num
,
713 ide_issue_trim
, ide_dma_cb
, s
,
714 DMA_DIRECTION_TO_DEVICE
);
720 if (s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) {
721 bdrv_acct_done(s
->bs
, &s
->acct
);
723 ide_set_inactive(s
, stay_active
);
726 static void ide_sector_start_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
728 s
->status
= READY_STAT
| SEEK_STAT
| DRQ_STAT
| BUSY_STAT
;
729 s
->io_buffer_index
= 0;
730 s
->io_buffer_size
= 0;
731 s
->dma_cmd
= dma_cmd
;
735 bdrv_acct_start(s
->bs
, &s
->acct
, s
->nsector
* BDRV_SECTOR_SIZE
,
739 bdrv_acct_start(s
->bs
, &s
->acct
, s
->nsector
* BDRV_SECTOR_SIZE
,
746 ide_start_dma(s
, ide_dma_cb
);
749 void ide_start_dma(IDEState
*s
, BlockDriverCompletionFunc
*cb
)
751 if (s
->bus
->dma
->ops
->start_dma
) {
752 s
->bus
->dma
->ops
->start_dma(s
->bus
->dma
, s
, cb
);
756 static void ide_sector_write_timer_cb(void *opaque
)
758 IDEState
*s
= opaque
;
762 static void ide_sector_write_cb(void *opaque
, int ret
)
764 IDEState
*s
= opaque
;
767 bdrv_acct_done(s
->bs
, &s
->acct
);
770 s
->status
&= ~BUSY_STAT
;
773 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
)) {
779 if (n
> s
->req_nb_sectors
) {
780 n
= s
->req_nb_sectors
;
783 if (s
->nsector
== 0) {
784 /* no more sectors to write */
785 ide_transfer_stop(s
);
788 if (n1
> s
->req_nb_sectors
) {
789 n1
= s
->req_nb_sectors
;
791 ide_transfer_start(s
, s
->io_buffer
, n1
* BDRV_SECTOR_SIZE
,
794 ide_set_sector(s
, ide_get_sector(s
) + n
);
796 if (win2k_install_hack
&& ((++s
->irq_count
% 16) == 0)) {
797 /* It seems there is a bug in the Windows 2000 installer HDD
798 IDE driver which fills the disk with empty logs when the
799 IDE write IRQ comes too early. This hack tries to correct
800 that at the expense of slower write performances. Use this
801 option _only_ to install Windows 2000. You must disable it
803 timer_mod(s
->sector_write_timer
,
804 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + (get_ticks_per_sec() / 1000));
810 void ide_sector_write(IDEState
*s
)
815 s
->status
= READY_STAT
| SEEK_STAT
| BUSY_STAT
;
816 sector_num
= ide_get_sector(s
);
817 #if defined(DEBUG_IDE)
818 printf("sector=%" PRId64
"\n", sector_num
);
821 if (n
> s
->req_nb_sectors
) {
822 n
= s
->req_nb_sectors
;
825 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
830 s
->iov
.iov_base
= s
->io_buffer
;
831 s
->iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
832 qemu_iovec_init_external(&s
->qiov
, &s
->iov
, 1);
834 bdrv_acct_start(s
->bs
, &s
->acct
, n
* BDRV_SECTOR_SIZE
, BDRV_ACCT_READ
);
835 s
->pio_aiocb
= bdrv_aio_writev(s
->bs
, sector_num
, &s
->qiov
, n
,
836 ide_sector_write_cb
, s
);
839 static void ide_flush_cb(void *opaque
, int ret
)
841 IDEState
*s
= opaque
;
846 /* XXX: What sector number to set here? */
847 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_FLUSH
)) {
853 bdrv_acct_done(s
->bs
, &s
->acct
);
855 s
->status
= READY_STAT
| SEEK_STAT
;
860 void ide_flush_cache(IDEState
*s
)
867 s
->status
|= BUSY_STAT
;
868 bdrv_acct_start(s
->bs
, &s
->acct
, 0, BDRV_ACCT_FLUSH
);
869 s
->pio_aiocb
= bdrv_aio_flush(s
->bs
, ide_flush_cb
, s
);
872 static void ide_cfata_metadata_inquiry(IDEState
*s
)
877 p
= (uint16_t *) s
->io_buffer
;
879 spd
= ((s
->mdata_size
- 1) >> 9) + 1;
881 put_le16(p
+ 0, 0x0001); /* Data format revision */
882 put_le16(p
+ 1, 0x0000); /* Media property: silicon */
883 put_le16(p
+ 2, s
->media_changed
); /* Media status */
884 put_le16(p
+ 3, s
->mdata_size
& 0xffff); /* Capacity in bytes (low) */
885 put_le16(p
+ 4, s
->mdata_size
>> 16); /* Capacity in bytes (high) */
886 put_le16(p
+ 5, spd
& 0xffff); /* Sectors per device (low) */
887 put_le16(p
+ 6, spd
>> 16); /* Sectors per device (high) */
890 static void ide_cfata_metadata_read(IDEState
*s
)
894 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
895 s
->status
= ERR_STAT
;
900 p
= (uint16_t *) s
->io_buffer
;
903 put_le16(p
+ 0, s
->media_changed
); /* Media status */
904 memcpy(p
+ 1, s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
905 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
906 s
->nsector
<< 9), 0x200 - 2));
909 static void ide_cfata_metadata_write(IDEState
*s
)
911 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
912 s
->status
= ERR_STAT
;
917 s
->media_changed
= 0;
919 memcpy(s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
921 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
922 s
->nsector
<< 9), 0x200 - 2));
925 /* called when the inserted state of the media has changed */
926 static void ide_cd_change_cb(void *opaque
, bool load
)
928 IDEState
*s
= opaque
;
931 s
->tray_open
= !load
;
932 bdrv_get_geometry(s
->bs
, &nb_sectors
);
933 s
->nb_sectors
= nb_sectors
;
936 * First indicate to the guest that a CD has been removed. That's
937 * done on the next command the guest sends us.
939 * Then we set UNIT_ATTENTION, by which the guest will
940 * detect a new CD in the drive. See ide_atapi_cmd() for details.
942 s
->cdrom_changed
= 1;
943 s
->events
.new_media
= true;
944 s
->events
.eject_request
= false;
948 static void ide_cd_eject_request_cb(void *opaque
, bool force
)
950 IDEState
*s
= opaque
;
952 s
->events
.eject_request
= true;
954 s
->tray_locked
= false;
959 static void ide_cmd_lba48_transform(IDEState
*s
, int lba48
)
963 /* handle the 'magic' 0 nsector count conversion here. to avoid
964 * fiddling with the rest of the read logic, we just store the
965 * full sector count in ->nsector and ignore ->hob_nsector from now
971 if (!s
->nsector
&& !s
->hob_nsector
)
975 int hi
= s
->hob_nsector
;
977 s
->nsector
= (hi
<< 8) | lo
;
982 static void ide_clear_hob(IDEBus
*bus
)
984 /* any write clears HOB high bit of device control register */
985 bus
->ifs
[0].select
&= ~(1 << 7);
986 bus
->ifs
[1].select
&= ~(1 << 7);
989 void ide_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
991 IDEBus
*bus
= opaque
;
994 printf("IDE: write addr=0x%x val=0x%02x\n", addr
, val
);
999 /* ignore writes to command block while busy with previous command */
1000 if (addr
!= 7 && (idebus_active_if(bus
)->status
& (BUSY_STAT
|DRQ_STAT
)))
1008 /* NOTE: data is written to the two drives */
1009 bus
->ifs
[0].hob_feature
= bus
->ifs
[0].feature
;
1010 bus
->ifs
[1].hob_feature
= bus
->ifs
[1].feature
;
1011 bus
->ifs
[0].feature
= val
;
1012 bus
->ifs
[1].feature
= val
;
1016 bus
->ifs
[0].hob_nsector
= bus
->ifs
[0].nsector
;
1017 bus
->ifs
[1].hob_nsector
= bus
->ifs
[1].nsector
;
1018 bus
->ifs
[0].nsector
= val
;
1019 bus
->ifs
[1].nsector
= val
;
1023 bus
->ifs
[0].hob_sector
= bus
->ifs
[0].sector
;
1024 bus
->ifs
[1].hob_sector
= bus
->ifs
[1].sector
;
1025 bus
->ifs
[0].sector
= val
;
1026 bus
->ifs
[1].sector
= val
;
1030 bus
->ifs
[0].hob_lcyl
= bus
->ifs
[0].lcyl
;
1031 bus
->ifs
[1].hob_lcyl
= bus
->ifs
[1].lcyl
;
1032 bus
->ifs
[0].lcyl
= val
;
1033 bus
->ifs
[1].lcyl
= val
;
1037 bus
->ifs
[0].hob_hcyl
= bus
->ifs
[0].hcyl
;
1038 bus
->ifs
[1].hob_hcyl
= bus
->ifs
[1].hcyl
;
1039 bus
->ifs
[0].hcyl
= val
;
1040 bus
->ifs
[1].hcyl
= val
;
1043 /* FIXME: HOB readback uses bit 7 */
1044 bus
->ifs
[0].select
= (val
& ~0x10) | 0xa0;
1045 bus
->ifs
[1].select
= (val
| 0x10) | 0xa0;
1047 bus
->unit
= (val
>> 4) & 1;
1052 ide_exec_cmd(bus
, val
);
1057 static bool cmd_nop(IDEState
*s
, uint8_t cmd
)
1062 static bool cmd_data_set_management(IDEState
*s
, uint8_t cmd
)
1064 switch (s
->feature
) {
1067 ide_sector_start_dma(s
, IDE_DMA_TRIM
);
1073 ide_abort_command(s
);
1077 static bool cmd_identify(IDEState
*s
, uint8_t cmd
)
1079 if (s
->bs
&& s
->drive_kind
!= IDE_CD
) {
1080 if (s
->drive_kind
!= IDE_CFATA
) {
1083 ide_cfata_identify(s
);
1085 s
->status
= READY_STAT
| SEEK_STAT
;
1086 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1087 ide_set_irq(s
->bus
);
1090 if (s
->drive_kind
== IDE_CD
) {
1091 ide_set_signature(s
);
1093 ide_abort_command(s
);
1099 static bool cmd_verify(IDEState
*s
, uint8_t cmd
)
1101 bool lba48
= (cmd
== WIN_VERIFY_EXT
);
1103 /* do sector number check ? */
1104 ide_cmd_lba48_transform(s
, lba48
);
1109 static bool cmd_set_multiple_mode(IDEState
*s
, uint8_t cmd
)
1111 if (s
->drive_kind
== IDE_CFATA
&& s
->nsector
== 0) {
1112 /* Disable Read and Write Multiple */
1113 s
->mult_sectors
= 0;
1114 } else if ((s
->nsector
& 0xff) != 0 &&
1115 ((s
->nsector
& 0xff) > MAX_MULT_SECTORS
||
1116 (s
->nsector
& (s
->nsector
- 1)) != 0)) {
1117 ide_abort_command(s
);
1119 s
->mult_sectors
= s
->nsector
& 0xff;
1125 static bool cmd_read_multiple(IDEState
*s
, uint8_t cmd
)
1127 bool lba48
= (cmd
== WIN_MULTREAD_EXT
);
1129 if (!s
->bs
|| !s
->mult_sectors
) {
1130 ide_abort_command(s
);
1134 ide_cmd_lba48_transform(s
, lba48
);
1135 s
->req_nb_sectors
= s
->mult_sectors
;
1140 static bool cmd_write_multiple(IDEState
*s
, uint8_t cmd
)
1142 bool lba48
= (cmd
== WIN_MULTWRITE_EXT
);
1145 if (!s
->bs
|| !s
->mult_sectors
) {
1146 ide_abort_command(s
);
1150 ide_cmd_lba48_transform(s
, lba48
);
1152 s
->req_nb_sectors
= s
->mult_sectors
;
1153 n
= MIN(s
->nsector
, s
->req_nb_sectors
);
1155 s
->status
= SEEK_STAT
| READY_STAT
;
1156 ide_transfer_start(s
, s
->io_buffer
, 512 * n
, ide_sector_write
);
1158 s
->media_changed
= 1;
1163 static bool cmd_read_pio(IDEState
*s
, uint8_t cmd
)
1165 bool lba48
= (cmd
== WIN_READ_EXT
);
1167 if (s
->drive_kind
== IDE_CD
) {
1168 ide_set_signature(s
); /* odd, but ATA4 8.27.5.2 requires it */
1169 ide_abort_command(s
);
1174 ide_abort_command(s
);
1178 ide_cmd_lba48_transform(s
, lba48
);
1179 s
->req_nb_sectors
= 1;
1185 static bool cmd_write_pio(IDEState
*s
, uint8_t cmd
)
1187 bool lba48
= (cmd
== WIN_WRITE_EXT
);
1190 ide_abort_command(s
);
1194 ide_cmd_lba48_transform(s
, lba48
);
1196 s
->req_nb_sectors
= 1;
1197 s
->status
= SEEK_STAT
| READY_STAT
;
1198 ide_transfer_start(s
, s
->io_buffer
, 512, ide_sector_write
);
1200 s
->media_changed
= 1;
1205 static bool cmd_read_dma(IDEState
*s
, uint8_t cmd
)
1207 bool lba48
= (cmd
== WIN_READDMA_EXT
);
1210 ide_abort_command(s
);
1214 ide_cmd_lba48_transform(s
, lba48
);
1215 ide_sector_start_dma(s
, IDE_DMA_READ
);
1220 static bool cmd_write_dma(IDEState
*s
, uint8_t cmd
)
1222 bool lba48
= (cmd
== WIN_WRITEDMA_EXT
);
1225 ide_abort_command(s
);
1229 ide_cmd_lba48_transform(s
, lba48
);
1230 ide_sector_start_dma(s
, IDE_DMA_WRITE
);
1232 s
->media_changed
= 1;
1237 static bool cmd_flush_cache(IDEState
*s
, uint8_t cmd
)
1243 static bool cmd_seek(IDEState
*s
, uint8_t cmd
)
1245 /* XXX: Check that seek is within bounds */
1249 static bool cmd_read_native_max(IDEState
*s
, uint8_t cmd
)
1251 bool lba48
= (cmd
== WIN_READ_NATIVE_MAX_EXT
);
1253 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1254 if (s
->nb_sectors
== 0) {
1255 ide_abort_command(s
);
1259 ide_cmd_lba48_transform(s
, lba48
);
1260 ide_set_sector(s
, s
->nb_sectors
- 1);
1265 static bool cmd_check_power_mode(IDEState
*s
, uint8_t cmd
)
1267 s
->nsector
= 0xff; /* device active or idle */
1271 static bool cmd_set_features(IDEState
*s
, uint8_t cmd
)
1273 uint16_t *identify_data
;
1276 ide_abort_command(s
);
1280 /* XXX: valid for CDROM ? */
1281 switch (s
->feature
) {
1282 case 0x02: /* write cache enable */
1283 bdrv_set_enable_write_cache(s
->bs
, true);
1284 identify_data
= (uint16_t *)s
->identify_data
;
1285 put_le16(identify_data
+ 85, (1 << 14) | (1 << 5) | 1);
1287 case 0x82: /* write cache disable */
1288 bdrv_set_enable_write_cache(s
->bs
, false);
1289 identify_data
= (uint16_t *)s
->identify_data
;
1290 put_le16(identify_data
+ 85, (1 << 14) | 1);
1293 case 0xcc: /* reverting to power-on defaults enable */
1294 case 0x66: /* reverting to power-on defaults disable */
1295 case 0xaa: /* read look-ahead enable */
1296 case 0x55: /* read look-ahead disable */
1297 case 0x05: /* set advanced power management mode */
1298 case 0x85: /* disable advanced power management mode */
1299 case 0x69: /* NOP */
1300 case 0x67: /* NOP */
1301 case 0x96: /* NOP */
1302 case 0x9a: /* NOP */
1303 case 0x42: /* enable Automatic Acoustic Mode */
1304 case 0xc2: /* disable Automatic Acoustic Mode */
1306 case 0x03: /* set transfer mode */
1308 uint8_t val
= s
->nsector
& 0x07;
1309 identify_data
= (uint16_t *)s
->identify_data
;
1311 switch (s
->nsector
>> 3) {
1312 case 0x00: /* pio default */
1313 case 0x01: /* pio mode */
1314 put_le16(identify_data
+ 62, 0x07);
1315 put_le16(identify_data
+ 63, 0x07);
1316 put_le16(identify_data
+ 88, 0x3f);
1318 case 0x02: /* sigle word dma mode*/
1319 put_le16(identify_data
+ 62, 0x07 | (1 << (val
+ 8)));
1320 put_le16(identify_data
+ 63, 0x07);
1321 put_le16(identify_data
+ 88, 0x3f);
1323 case 0x04: /* mdma mode */
1324 put_le16(identify_data
+ 62, 0x07);
1325 put_le16(identify_data
+ 63, 0x07 | (1 << (val
+ 8)));
1326 put_le16(identify_data
+ 88, 0x3f);
1328 case 0x08: /* udma mode */
1329 put_le16(identify_data
+ 62, 0x07);
1330 put_le16(identify_data
+ 63, 0x07);
1331 put_le16(identify_data
+ 88, 0x3f | (1 << (val
+ 8)));
1341 ide_abort_command(s
);
1346 /*** ATAPI commands ***/
1348 static bool cmd_identify_packet(IDEState
*s
, uint8_t cmd
)
1350 ide_atapi_identify(s
);
1351 s
->status
= READY_STAT
| SEEK_STAT
;
1352 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1353 ide_set_irq(s
->bus
);
1357 static bool cmd_exec_dev_diagnostic(IDEState
*s
, uint8_t cmd
)
1359 ide_set_signature(s
);
1361 if (s
->drive_kind
== IDE_CD
) {
1362 s
->status
= 0; /* ATAPI spec (v6) section 9.10 defines packet
1363 * devices to return a clear status register
1364 * with READY_STAT *not* set. */
1367 s
->status
= READY_STAT
| SEEK_STAT
;
1368 /* The bits of the error register are not as usual for this command!
1369 * They are part of the regular output (this is why ERR_STAT isn't set)
1370 * Device 0 passed, Device 1 passed or not present. */
1372 ide_set_irq(s
->bus
);
1378 static bool cmd_device_reset(IDEState
*s
, uint8_t cmd
)
1380 ide_set_signature(s
);
1381 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1387 static bool cmd_packet(IDEState
*s
, uint8_t cmd
)
1389 /* overlapping commands not supported */
1390 if (s
->feature
& 0x02) {
1391 ide_abort_command(s
);
1395 s
->status
= READY_STAT
| SEEK_STAT
;
1396 s
->atapi_dma
= s
->feature
& 1;
1398 ide_transfer_start(s
, s
->io_buffer
, ATAPI_PACKET_SIZE
,
1404 /*** CF-ATA commands ***/
1406 static bool cmd_cfa_req_ext_error_code(IDEState
*s
, uint8_t cmd
)
1408 s
->error
= 0x09; /* miscellaneous error */
1409 s
->status
= READY_STAT
| SEEK_STAT
;
1410 ide_set_irq(s
->bus
);
1415 static bool cmd_cfa_erase_sectors(IDEState
*s
, uint8_t cmd
)
1417 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1418 * required for Windows 8 to work with AHCI */
1420 if (cmd
== CFA_WEAR_LEVEL
) {
1424 if (cmd
== CFA_ERASE_SECTORS
) {
1425 s
->media_changed
= 1;
1431 static bool cmd_cfa_translate_sector(IDEState
*s
, uint8_t cmd
)
1433 s
->status
= READY_STAT
| SEEK_STAT
;
1435 memset(s
->io_buffer
, 0, 0x200);
1436 s
->io_buffer
[0x00] = s
->hcyl
; /* Cyl MSB */
1437 s
->io_buffer
[0x01] = s
->lcyl
; /* Cyl LSB */
1438 s
->io_buffer
[0x02] = s
->select
; /* Head */
1439 s
->io_buffer
[0x03] = s
->sector
; /* Sector */
1440 s
->io_buffer
[0x04] = ide_get_sector(s
) >> 16; /* LBA MSB */
1441 s
->io_buffer
[0x05] = ide_get_sector(s
) >> 8; /* LBA */
1442 s
->io_buffer
[0x06] = ide_get_sector(s
) >> 0; /* LBA LSB */
1443 s
->io_buffer
[0x13] = 0x00; /* Erase flag */
1444 s
->io_buffer
[0x18] = 0x00; /* Hot count */
1445 s
->io_buffer
[0x19] = 0x00; /* Hot count */
1446 s
->io_buffer
[0x1a] = 0x01; /* Hot count */
1448 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1449 ide_set_irq(s
->bus
);
1454 static bool cmd_cfa_access_metadata_storage(IDEState
*s
, uint8_t cmd
)
1456 switch (s
->feature
) {
1457 case 0x02: /* Inquiry Metadata Storage */
1458 ide_cfata_metadata_inquiry(s
);
1460 case 0x03: /* Read Metadata Storage */
1461 ide_cfata_metadata_read(s
);
1463 case 0x04: /* Write Metadata Storage */
1464 ide_cfata_metadata_write(s
);
1467 ide_abort_command(s
);
1471 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1472 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1473 ide_set_irq(s
->bus
);
1478 static bool cmd_ibm_sense_condition(IDEState
*s
, uint8_t cmd
)
1480 switch (s
->feature
) {
1481 case 0x01: /* sense temperature in device */
1482 s
->nsector
= 0x50; /* +20 C */
1485 ide_abort_command(s
);
1493 /*** SMART commands ***/
1495 static bool cmd_smart(IDEState
*s
, uint8_t cmd
)
1499 if (s
->hcyl
!= 0xc2 || s
->lcyl
!= 0x4f) {
1503 if (!s
->smart_enabled
&& s
->feature
!= SMART_ENABLE
) {
1507 switch (s
->feature
) {
1509 s
->smart_enabled
= 0;
1513 s
->smart_enabled
= 1;
1516 case SMART_ATTR_AUTOSAVE
:
1517 switch (s
->sector
) {
1519 s
->smart_autosave
= 0;
1522 s
->smart_autosave
= 1;
1530 if (!s
->smart_errors
) {
1539 case SMART_READ_THRESH
:
1540 memset(s
->io_buffer
, 0, 0x200);
1541 s
->io_buffer
[0] = 0x01; /* smart struct version */
1543 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1544 s
->io_buffer
[2 + 0 + (n
* 12)] = smart_attributes
[n
][0];
1545 s
->io_buffer
[2 + 1 + (n
* 12)] = smart_attributes
[n
][11];
1549 for (n
= 0; n
< 511; n
++) {
1550 s
->io_buffer
[511] += s
->io_buffer
[n
];
1552 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1554 s
->status
= READY_STAT
| SEEK_STAT
;
1555 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1556 ide_set_irq(s
->bus
);
1559 case SMART_READ_DATA
:
1560 memset(s
->io_buffer
, 0, 0x200);
1561 s
->io_buffer
[0] = 0x01; /* smart struct version */
1563 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1565 for (i
= 0; i
< 11; i
++) {
1566 s
->io_buffer
[2 + i
+ (n
* 12)] = smart_attributes
[n
][i
];
1570 s
->io_buffer
[362] = 0x02 | (s
->smart_autosave
? 0x80 : 0x00);
1571 if (s
->smart_selftest_count
== 0) {
1572 s
->io_buffer
[363] = 0;
1575 s
->smart_selftest_data
[3 +
1576 (s
->smart_selftest_count
- 1) *
1579 s
->io_buffer
[364] = 0x20;
1580 s
->io_buffer
[365] = 0x01;
1581 /* offline data collection capacity: execute + self-test*/
1582 s
->io_buffer
[367] = (1 << 4 | 1 << 3 | 1);
1583 s
->io_buffer
[368] = 0x03; /* smart capability (1) */
1584 s
->io_buffer
[369] = 0x00; /* smart capability (2) */
1585 s
->io_buffer
[370] = 0x01; /* error logging supported */
1586 s
->io_buffer
[372] = 0x02; /* minutes for poll short test */
1587 s
->io_buffer
[373] = 0x36; /* minutes for poll ext test */
1588 s
->io_buffer
[374] = 0x01; /* minutes for poll conveyance */
1590 for (n
= 0; n
< 511; n
++) {
1591 s
->io_buffer
[511] += s
->io_buffer
[n
];
1593 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1595 s
->status
= READY_STAT
| SEEK_STAT
;
1596 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1597 ide_set_irq(s
->bus
);
1600 case SMART_READ_LOG
:
1601 switch (s
->sector
) {
1602 case 0x01: /* summary smart error log */
1603 memset(s
->io_buffer
, 0, 0x200);
1604 s
->io_buffer
[0] = 0x01;
1605 s
->io_buffer
[1] = 0x00; /* no error entries */
1606 s
->io_buffer
[452] = s
->smart_errors
& 0xff;
1607 s
->io_buffer
[453] = (s
->smart_errors
& 0xff00) >> 8;
1609 for (n
= 0; n
< 511; n
++) {
1610 s
->io_buffer
[511] += s
->io_buffer
[n
];
1612 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1614 case 0x06: /* smart self test log */
1615 memset(s
->io_buffer
, 0, 0x200);
1616 s
->io_buffer
[0] = 0x01;
1617 if (s
->smart_selftest_count
== 0) {
1618 s
->io_buffer
[508] = 0;
1620 s
->io_buffer
[508] = s
->smart_selftest_count
;
1621 for (n
= 2; n
< 506; n
++) {
1622 s
->io_buffer
[n
] = s
->smart_selftest_data
[n
];
1626 for (n
= 0; n
< 511; n
++) {
1627 s
->io_buffer
[511] += s
->io_buffer
[n
];
1629 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1634 s
->status
= READY_STAT
| SEEK_STAT
;
1635 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1636 ide_set_irq(s
->bus
);
1639 case SMART_EXECUTE_OFFLINE
:
1640 switch (s
->sector
) {
1641 case 0: /* off-line routine */
1642 case 1: /* short self test */
1643 case 2: /* extended self test */
1644 s
->smart_selftest_count
++;
1645 if (s
->smart_selftest_count
> 21) {
1646 s
->smart_selftest_count
= 1;
1648 n
= 2 + (s
->smart_selftest_count
- 1) * 24;
1649 s
->smart_selftest_data
[n
] = s
->sector
;
1650 s
->smart_selftest_data
[n
+ 1] = 0x00; /* OK and finished */
1651 s
->smart_selftest_data
[n
+ 2] = 0x34; /* hour count lsb */
1652 s
->smart_selftest_data
[n
+ 3] = 0x12; /* hour count msb */
1661 ide_abort_command(s
);
1665 #define HD_OK (1u << IDE_HD)
1666 #define CD_OK (1u << IDE_CD)
1667 #define CFA_OK (1u << IDE_CFATA)
1668 #define HD_CFA_OK (HD_OK | CFA_OK)
1669 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1671 /* Set the Disk Seek Completed status bit during completion */
1672 #define SET_DSC (1u << 8)
1674 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1675 static const struct {
1676 /* Returns true if the completion code should be run */
1677 bool (*handler
)(IDEState
*s
, uint8_t cmd
);
1679 } ide_cmd_table
[0x100] = {
1680 /* NOP not implemented, mandatory for CD */
1681 [CFA_REQ_EXT_ERROR_CODE
] = { cmd_cfa_req_ext_error_code
, CFA_OK
},
1682 [WIN_DSM
] = { cmd_data_set_management
, ALL_OK
},
1683 [WIN_DEVICE_RESET
] = { cmd_device_reset
, CD_OK
},
1684 [WIN_RECAL
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
1685 [WIN_READ
] = { cmd_read_pio
, ALL_OK
},
1686 [WIN_READ_ONCE
] = { cmd_read_pio
, ALL_OK
},
1687 [WIN_READ_EXT
] = { cmd_read_pio
, HD_CFA_OK
},
1688 [WIN_READDMA_EXT
] = { cmd_read_dma
, HD_CFA_OK
},
1689 [WIN_READ_NATIVE_MAX_EXT
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
1690 [WIN_MULTREAD_EXT
] = { cmd_read_multiple
, HD_CFA_OK
},
1691 [WIN_WRITE
] = { cmd_write_pio
, HD_CFA_OK
},
1692 [WIN_WRITE_ONCE
] = { cmd_write_pio
, HD_CFA_OK
},
1693 [WIN_WRITE_EXT
] = { cmd_write_pio
, HD_CFA_OK
},
1694 [WIN_WRITEDMA_EXT
] = { cmd_write_dma
, HD_CFA_OK
},
1695 [CFA_WRITE_SECT_WO_ERASE
] = { cmd_write_pio
, CFA_OK
},
1696 [WIN_MULTWRITE_EXT
] = { cmd_write_multiple
, HD_CFA_OK
},
1697 [WIN_WRITE_VERIFY
] = { cmd_write_pio
, HD_CFA_OK
},
1698 [WIN_VERIFY
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
1699 [WIN_VERIFY_ONCE
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
1700 [WIN_VERIFY_EXT
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
1701 [WIN_SEEK
] = { cmd_seek
, HD_CFA_OK
| SET_DSC
},
1702 [CFA_TRANSLATE_SECTOR
] = { cmd_cfa_translate_sector
, CFA_OK
},
1703 [WIN_DIAGNOSE
] = { cmd_exec_dev_diagnostic
, ALL_OK
},
1704 [WIN_SPECIFY
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
1705 [WIN_STANDBYNOW2
] = { cmd_nop
, ALL_OK
},
1706 [WIN_IDLEIMMEDIATE2
] = { cmd_nop
, ALL_OK
},
1707 [WIN_STANDBY2
] = { cmd_nop
, ALL_OK
},
1708 [WIN_SETIDLE2
] = { cmd_nop
, ALL_OK
},
1709 [WIN_CHECKPOWERMODE2
] = { cmd_check_power_mode
, ALL_OK
| SET_DSC
},
1710 [WIN_SLEEPNOW2
] = { cmd_nop
, ALL_OK
},
1711 [WIN_PACKETCMD
] = { cmd_packet
, CD_OK
},
1712 [WIN_PIDENTIFY
] = { cmd_identify_packet
, CD_OK
},
1713 [WIN_SMART
] = { cmd_smart
, HD_CFA_OK
| SET_DSC
},
1714 [CFA_ACCESS_METADATA_STORAGE
] = { cmd_cfa_access_metadata_storage
, CFA_OK
},
1715 [CFA_ERASE_SECTORS
] = { cmd_cfa_erase_sectors
, CFA_OK
| SET_DSC
},
1716 [WIN_MULTREAD
] = { cmd_read_multiple
, HD_CFA_OK
},
1717 [WIN_MULTWRITE
] = { cmd_write_multiple
, HD_CFA_OK
},
1718 [WIN_SETMULT
] = { cmd_set_multiple_mode
, HD_CFA_OK
| SET_DSC
},
1719 [WIN_READDMA
] = { cmd_read_dma
, HD_CFA_OK
},
1720 [WIN_READDMA_ONCE
] = { cmd_read_dma
, HD_CFA_OK
},
1721 [WIN_WRITEDMA
] = { cmd_write_dma
, HD_CFA_OK
},
1722 [WIN_WRITEDMA_ONCE
] = { cmd_write_dma
, HD_CFA_OK
},
1723 [CFA_WRITE_MULTI_WO_ERASE
] = { cmd_write_multiple
, CFA_OK
},
1724 [WIN_STANDBYNOW1
] = { cmd_nop
, ALL_OK
},
1725 [WIN_IDLEIMMEDIATE
] = { cmd_nop
, ALL_OK
},
1726 [WIN_STANDBY
] = { cmd_nop
, ALL_OK
},
1727 [WIN_SETIDLE1
] = { cmd_nop
, ALL_OK
},
1728 [WIN_CHECKPOWERMODE1
] = { cmd_check_power_mode
, ALL_OK
| SET_DSC
},
1729 [WIN_SLEEPNOW1
] = { cmd_nop
, ALL_OK
},
1730 [WIN_FLUSH_CACHE
] = { cmd_flush_cache
, ALL_OK
},
1731 [WIN_FLUSH_CACHE_EXT
] = { cmd_flush_cache
, HD_CFA_OK
},
1732 [WIN_IDENTIFY
] = { cmd_identify
, ALL_OK
},
1733 [WIN_SETFEATURES
] = { cmd_set_features
, ALL_OK
| SET_DSC
},
1734 [IBM_SENSE_CONDITION
] = { cmd_ibm_sense_condition
, CFA_OK
| SET_DSC
},
1735 [CFA_WEAR_LEVEL
] = { cmd_cfa_erase_sectors
, HD_CFA_OK
| SET_DSC
},
1736 [WIN_READ_NATIVE_MAX
] = { cmd_read_native_max
, ALL_OK
| SET_DSC
},
1739 static bool ide_cmd_permitted(IDEState
*s
, uint32_t cmd
)
1741 return cmd
< ARRAY_SIZE(ide_cmd_table
)
1742 && (ide_cmd_table
[cmd
].flags
& (1u << s
->drive_kind
));
1745 void ide_exec_cmd(IDEBus
*bus
, uint32_t val
)
1750 #if defined(DEBUG_IDE)
1751 printf("ide: CMD=%02x\n", val
);
1753 s
= idebus_active_if(bus
);
1754 /* ignore commands to non existent slave */
1755 if (s
!= bus
->ifs
&& !s
->bs
)
1758 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
1759 if ((s
->status
& (BUSY_STAT
|DRQ_STAT
)) && val
!= WIN_DEVICE_RESET
)
1762 if (!ide_cmd_permitted(s
, val
)) {
1763 ide_abort_command(s
);
1764 ide_set_irq(s
->bus
);
1768 s
->status
= READY_STAT
| BUSY_STAT
;
1771 complete
= ide_cmd_table
[val
].handler(s
, val
);
1773 s
->status
&= ~BUSY_STAT
;
1774 assert(!!s
->error
== !!(s
->status
& ERR_STAT
));
1776 if ((ide_cmd_table
[val
].flags
& SET_DSC
) && !s
->error
) {
1777 s
->status
|= SEEK_STAT
;
1781 ide_set_irq(s
->bus
);
1785 uint32_t ide_ioport_read(void *opaque
, uint32_t addr1
)
1787 IDEBus
*bus
= opaque
;
1788 IDEState
*s
= idebus_active_if(bus
);
1793 /* FIXME: HOB readback uses bit 7, but it's always set right now */
1794 //hob = s->select & (1 << 7);
1801 if ((!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
) ||
1802 (s
!= bus
->ifs
&& !s
->bs
))
1807 ret
= s
->hob_feature
;
1810 if (!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
)
1813 ret
= s
->nsector
& 0xff;
1815 ret
= s
->hob_nsector
;
1818 if (!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
)
1823 ret
= s
->hob_sector
;
1826 if (!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
)
1834 if (!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
)
1842 if (!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
)
1849 if ((!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
) ||
1850 (s
!= bus
->ifs
&& !s
->bs
))
1854 qemu_irq_lower(bus
->irq
);
1858 printf("ide: read addr=0x%x val=%02x\n", addr1
, ret
);
1863 uint32_t ide_status_read(void *opaque
, uint32_t addr
)
1865 IDEBus
*bus
= opaque
;
1866 IDEState
*s
= idebus_active_if(bus
);
1869 if ((!bus
->ifs
[0].bs
&& !bus
->ifs
[1].bs
) ||
1870 (s
!= bus
->ifs
&& !s
->bs
))
1875 printf("ide: read status addr=0x%x val=%02x\n", addr
, ret
);
1880 void ide_cmd_write(void *opaque
, uint32_t addr
, uint32_t val
)
1882 IDEBus
*bus
= opaque
;
1887 printf("ide: write control addr=0x%x val=%02x\n", addr
, val
);
1889 /* common for both drives */
1890 if (!(bus
->cmd
& IDE_CMD_RESET
) &&
1891 (val
& IDE_CMD_RESET
)) {
1892 /* reset low to high */
1893 for(i
= 0;i
< 2; i
++) {
1895 s
->status
= BUSY_STAT
| SEEK_STAT
;
1898 } else if ((bus
->cmd
& IDE_CMD_RESET
) &&
1899 !(val
& IDE_CMD_RESET
)) {
1901 for(i
= 0;i
< 2; i
++) {
1903 if (s
->drive_kind
== IDE_CD
)
1904 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1906 s
->status
= READY_STAT
| SEEK_STAT
;
1907 ide_set_signature(s
);
1915 * Returns true if the running PIO transfer is a PIO out (i.e. data is
1916 * transferred from the device to the guest), false if it's a PIO in
1918 static bool ide_is_pio_out(IDEState
*s
)
1920 if (s
->end_transfer_func
== ide_sector_write
||
1921 s
->end_transfer_func
== ide_atapi_cmd
) {
1923 } else if (s
->end_transfer_func
== ide_sector_read
||
1924 s
->end_transfer_func
== ide_transfer_stop
||
1925 s
->end_transfer_func
== ide_atapi_cmd_reply_end
||
1926 s
->end_transfer_func
== ide_dummy_transfer_stop
) {
1933 void ide_data_writew(void *opaque
, uint32_t addr
, uint32_t val
)
1935 IDEBus
*bus
= opaque
;
1936 IDEState
*s
= idebus_active_if(bus
);
1939 /* PIO data access allowed only when DRQ bit is set. The result of a write
1940 * during PIO out is indeterminate, just ignore it. */
1941 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
1946 *(uint16_t *)p
= le16_to_cpu(val
);
1949 if (p
>= s
->data_end
)
1950 s
->end_transfer_func(s
);
1953 uint32_t ide_data_readw(void *opaque
, uint32_t addr
)
1955 IDEBus
*bus
= opaque
;
1956 IDEState
*s
= idebus_active_if(bus
);
1960 /* PIO data access allowed only when DRQ bit is set. The result of a read
1961 * during PIO in is indeterminate, return 0 and don't move forward. */
1962 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
1967 ret
= cpu_to_le16(*(uint16_t *)p
);
1970 if (p
>= s
->data_end
)
1971 s
->end_transfer_func(s
);
1975 void ide_data_writel(void *opaque
, uint32_t addr
, uint32_t val
)
1977 IDEBus
*bus
= opaque
;
1978 IDEState
*s
= idebus_active_if(bus
);
1981 /* PIO data access allowed only when DRQ bit is set. The result of a write
1982 * during PIO out is indeterminate, just ignore it. */
1983 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
1988 *(uint32_t *)p
= le32_to_cpu(val
);
1991 if (p
>= s
->data_end
)
1992 s
->end_transfer_func(s
);
1995 uint32_t ide_data_readl(void *opaque
, uint32_t addr
)
1997 IDEBus
*bus
= opaque
;
1998 IDEState
*s
= idebus_active_if(bus
);
2002 /* PIO data access allowed only when DRQ bit is set. The result of a read
2003 * during PIO in is indeterminate, return 0 and don't move forward. */
2004 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2009 ret
= cpu_to_le32(*(uint32_t *)p
);
2012 if (p
>= s
->data_end
)
2013 s
->end_transfer_func(s
);
2017 static void ide_dummy_transfer_stop(IDEState
*s
)
2019 s
->data_ptr
= s
->io_buffer
;
2020 s
->data_end
= s
->io_buffer
;
2021 s
->io_buffer
[0] = 0xff;
2022 s
->io_buffer
[1] = 0xff;
2023 s
->io_buffer
[2] = 0xff;
2024 s
->io_buffer
[3] = 0xff;
2027 static void ide_reset(IDEState
*s
)
2030 printf("ide: reset\n");
2034 bdrv_aio_cancel(s
->pio_aiocb
);
2035 s
->pio_aiocb
= NULL
;
2038 if (s
->drive_kind
== IDE_CFATA
)
2039 s
->mult_sectors
= 0;
2041 s
->mult_sectors
= MAX_MULT_SECTORS
;
2058 s
->status
= READY_STAT
| SEEK_STAT
;
2062 /* ATAPI specific */
2065 s
->cdrom_changed
= 0;
2066 s
->packet_transfer_size
= 0;
2067 s
->elementary_transfer_size
= 0;
2068 s
->io_buffer_index
= 0;
2069 s
->cd_sector_size
= 0;
2074 s
->io_buffer_size
= 0;
2075 s
->req_nb_sectors
= 0;
2077 ide_set_signature(s
);
2078 /* init the transfer handler so that 0xffff is returned on data
2080 s
->end_transfer_func
= ide_dummy_transfer_stop
;
2081 ide_dummy_transfer_stop(s
);
2082 s
->media_changed
= 0;
2085 void ide_bus_reset(IDEBus
*bus
)
2089 ide_reset(&bus
->ifs
[0]);
2090 ide_reset(&bus
->ifs
[1]);
2093 /* pending async DMA */
2094 if (bus
->dma
->aiocb
) {
2096 printf("aio_cancel\n");
2098 bdrv_aio_cancel(bus
->dma
->aiocb
);
2099 bus
->dma
->aiocb
= NULL
;
2102 /* reset dma provider too */
2103 if (bus
->dma
->ops
->reset
) {
2104 bus
->dma
->ops
->reset(bus
->dma
);
2108 static bool ide_cd_is_tray_open(void *opaque
)
2110 return ((IDEState
*)opaque
)->tray_open
;
2113 static bool ide_cd_is_medium_locked(void *opaque
)
2115 return ((IDEState
*)opaque
)->tray_locked
;
2118 static const BlockDevOps ide_cd_block_ops
= {
2119 .change_media_cb
= ide_cd_change_cb
,
2120 .eject_request_cb
= ide_cd_eject_request_cb
,
2121 .is_tray_open
= ide_cd_is_tray_open
,
2122 .is_medium_locked
= ide_cd_is_medium_locked
,
2125 int ide_init_drive(IDEState
*s
, BlockDriverState
*bs
, IDEDriveKind kind
,
2126 const char *version
, const char *serial
, const char *model
,
2128 uint32_t cylinders
, uint32_t heads
, uint32_t secs
,
2131 uint64_t nb_sectors
;
2134 s
->drive_kind
= kind
;
2136 bdrv_get_geometry(bs
, &nb_sectors
);
2137 s
->cylinders
= cylinders
;
2140 s
->chs_trans
= chs_trans
;
2141 s
->nb_sectors
= nb_sectors
;
2143 /* The SMART values should be preserved across power cycles
2145 s
->smart_enabled
= 1;
2146 s
->smart_autosave
= 1;
2147 s
->smart_errors
= 0;
2148 s
->smart_selftest_count
= 0;
2149 if (kind
== IDE_CD
) {
2150 bdrv_set_dev_ops(bs
, &ide_cd_block_ops
, s
);
2151 bdrv_set_guest_block_size(bs
, 2048);
2153 if (!bdrv_is_inserted(s
->bs
)) {
2154 error_report("Device needs media, but drive is empty");
2157 if (bdrv_is_read_only(bs
)) {
2158 error_report("Can't use a read-only drive");
2163 pstrcpy(s
->drive_serial_str
, sizeof(s
->drive_serial_str
), serial
);
2165 snprintf(s
->drive_serial_str
, sizeof(s
->drive_serial_str
),
2166 "QM%05d", s
->drive_serial
);
2169 pstrcpy(s
->drive_model_str
, sizeof(s
->drive_model_str
), model
);
2173 strcpy(s
->drive_model_str
, "QEMU DVD-ROM");
2176 strcpy(s
->drive_model_str
, "QEMU MICRODRIVE");
2179 strcpy(s
->drive_model_str
, "QEMU HARDDISK");
2185 pstrcpy(s
->version
, sizeof(s
->version
), version
);
2187 pstrcpy(s
->version
, sizeof(s
->version
), qemu_get_version());
2191 bdrv_iostatus_enable(bs
);
2195 static void ide_init1(IDEBus
*bus
, int unit
)
2197 static int drive_serial
= 1;
2198 IDEState
*s
= &bus
->ifs
[unit
];
2202 s
->drive_serial
= drive_serial
++;
2203 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2204 s
->io_buffer_total_len
= IDE_DMA_BUF_SECTORS
*512 + 4;
2205 s
->io_buffer
= qemu_memalign(2048, s
->io_buffer_total_len
);
2206 memset(s
->io_buffer
, 0, s
->io_buffer_total_len
);
2208 s
->smart_selftest_data
= qemu_blockalign(s
->bs
, 512);
2209 memset(s
->smart_selftest_data
, 0, 512);
2211 s
->sector_write_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
2212 ide_sector_write_timer_cb
, s
);
2215 static int ide_nop_int(IDEDMA
*dma
, int x
)
2220 static void ide_nop_restart(void *opaque
, int x
, RunState y
)
2224 static const IDEDMAOps ide_dma_nop_ops
= {
2225 .prepare_buf
= ide_nop_int
,
2226 .rw_buf
= ide_nop_int
,
2227 .set_unit
= ide_nop_int
,
2228 .restart_cb
= ide_nop_restart
,
2231 static IDEDMA ide_dma_nop
= {
2232 .ops
= &ide_dma_nop_ops
,
2236 void ide_init2(IDEBus
*bus
, qemu_irq irq
)
2240 for(i
= 0; i
< 2; i
++) {
2242 ide_reset(&bus
->ifs
[i
]);
2245 bus
->dma
= &ide_dma_nop
;
2248 static const MemoryRegionPortio ide_portio_list
[] = {
2249 { 0, 8, 1, .read
= ide_ioport_read
, .write
= ide_ioport_write
},
2250 { 0, 2, 2, .read
= ide_data_readw
, .write
= ide_data_writew
},
2251 { 0, 4, 4, .read
= ide_data_readl
, .write
= ide_data_writel
},
2252 PORTIO_END_OF_LIST(),
2255 static const MemoryRegionPortio ide_portio2_list
[] = {
2256 { 0, 1, 1, .read
= ide_status_read
, .write
= ide_cmd_write
},
2257 PORTIO_END_OF_LIST(),
2260 void ide_init_ioport(IDEBus
*bus
, ISADevice
*dev
, int iobase
, int iobase2
)
2262 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2263 bridge has been setup properly to always register with ISA. */
2264 isa_register_portio_list(dev
, iobase
, ide_portio_list
, bus
, "ide");
2267 isa_register_portio_list(dev
, iobase2
, ide_portio2_list
, bus
, "ide");
2271 static bool is_identify_set(void *opaque
, int version_id
)
2273 IDEState
*s
= opaque
;
2275 return s
->identify_set
!= 0;
2278 static EndTransferFunc
* transfer_end_table
[] = {
2282 ide_atapi_cmd_reply_end
,
2284 ide_dummy_transfer_stop
,
2287 static int transfer_end_table_idx(EndTransferFunc
*fn
)
2291 for (i
= 0; i
< ARRAY_SIZE(transfer_end_table
); i
++)
2292 if (transfer_end_table
[i
] == fn
)
2298 static int ide_drive_post_load(void *opaque
, int version_id
)
2300 IDEState
*s
= opaque
;
2302 if (s
->identify_set
) {
2303 bdrv_set_enable_write_cache(s
->bs
, !!(s
->identify_data
[85] & (1 << 5)));
2308 static int ide_drive_pio_post_load(void *opaque
, int version_id
)
2310 IDEState
*s
= opaque
;
2312 if (s
->end_transfer_fn_idx
>= ARRAY_SIZE(transfer_end_table
)) {
2315 s
->end_transfer_func
= transfer_end_table
[s
->end_transfer_fn_idx
];
2316 s
->data_ptr
= s
->io_buffer
+ s
->cur_io_buffer_offset
;
2317 s
->data_end
= s
->data_ptr
+ s
->cur_io_buffer_len
;
2322 static void ide_drive_pio_pre_save(void *opaque
)
2324 IDEState
*s
= opaque
;
2327 s
->cur_io_buffer_offset
= s
->data_ptr
- s
->io_buffer
;
2328 s
->cur_io_buffer_len
= s
->data_end
- s
->data_ptr
;
2330 idx
= transfer_end_table_idx(s
->end_transfer_func
);
2332 fprintf(stderr
, "%s: invalid end_transfer_func for DRQ_STAT\n",
2334 s
->end_transfer_fn_idx
= 2;
2336 s
->end_transfer_fn_idx
= idx
;
2340 static bool ide_drive_pio_state_needed(void *opaque
)
2342 IDEState
*s
= opaque
;
2344 return ((s
->status
& DRQ_STAT
) != 0)
2345 || (s
->bus
->error_status
& IDE_RETRY_PIO
);
2348 static bool ide_tray_state_needed(void *opaque
)
2350 IDEState
*s
= opaque
;
2352 return s
->tray_open
|| s
->tray_locked
;
2355 static bool ide_atapi_gesn_needed(void *opaque
)
2357 IDEState
*s
= opaque
;
2359 return s
->events
.new_media
|| s
->events
.eject_request
;
2362 static bool ide_error_needed(void *opaque
)
2364 IDEBus
*bus
= opaque
;
2366 return (bus
->error_status
!= 0);
2369 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2370 static const VMStateDescription vmstate_ide_atapi_gesn_state
= {
2371 .name
="ide_drive/atapi/gesn_state",
2373 .minimum_version_id
= 1,
2374 .fields
= (VMStateField
[]) {
2375 VMSTATE_BOOL(events
.new_media
, IDEState
),
2376 VMSTATE_BOOL(events
.eject_request
, IDEState
),
2377 VMSTATE_END_OF_LIST()
2381 static const VMStateDescription vmstate_ide_tray_state
= {
2382 .name
= "ide_drive/tray_state",
2384 .minimum_version_id
= 1,
2385 .fields
= (VMStateField
[]) {
2386 VMSTATE_BOOL(tray_open
, IDEState
),
2387 VMSTATE_BOOL(tray_locked
, IDEState
),
2388 VMSTATE_END_OF_LIST()
2392 static const VMStateDescription vmstate_ide_drive_pio_state
= {
2393 .name
= "ide_drive/pio_state",
2395 .minimum_version_id
= 1,
2396 .pre_save
= ide_drive_pio_pre_save
,
2397 .post_load
= ide_drive_pio_post_load
,
2398 .fields
= (VMStateField
[]) {
2399 VMSTATE_INT32(req_nb_sectors
, IDEState
),
2400 VMSTATE_VARRAY_INT32(io_buffer
, IDEState
, io_buffer_total_len
, 1,
2401 vmstate_info_uint8
, uint8_t),
2402 VMSTATE_INT32(cur_io_buffer_offset
, IDEState
),
2403 VMSTATE_INT32(cur_io_buffer_len
, IDEState
),
2404 VMSTATE_UINT8(end_transfer_fn_idx
, IDEState
),
2405 VMSTATE_INT32(elementary_transfer_size
, IDEState
),
2406 VMSTATE_INT32(packet_transfer_size
, IDEState
),
2407 VMSTATE_END_OF_LIST()
2411 const VMStateDescription vmstate_ide_drive
= {
2412 .name
= "ide_drive",
2414 .minimum_version_id
= 0,
2415 .post_load
= ide_drive_post_load
,
2416 .fields
= (VMStateField
[]) {
2417 VMSTATE_INT32(mult_sectors
, IDEState
),
2418 VMSTATE_INT32(identify_set
, IDEState
),
2419 VMSTATE_BUFFER_TEST(identify_data
, IDEState
, is_identify_set
),
2420 VMSTATE_UINT8(feature
, IDEState
),
2421 VMSTATE_UINT8(error
, IDEState
),
2422 VMSTATE_UINT32(nsector
, IDEState
),
2423 VMSTATE_UINT8(sector
, IDEState
),
2424 VMSTATE_UINT8(lcyl
, IDEState
),
2425 VMSTATE_UINT8(hcyl
, IDEState
),
2426 VMSTATE_UINT8(hob_feature
, IDEState
),
2427 VMSTATE_UINT8(hob_sector
, IDEState
),
2428 VMSTATE_UINT8(hob_nsector
, IDEState
),
2429 VMSTATE_UINT8(hob_lcyl
, IDEState
),
2430 VMSTATE_UINT8(hob_hcyl
, IDEState
),
2431 VMSTATE_UINT8(select
, IDEState
),
2432 VMSTATE_UINT8(status
, IDEState
),
2433 VMSTATE_UINT8(lba48
, IDEState
),
2434 VMSTATE_UINT8(sense_key
, IDEState
),
2435 VMSTATE_UINT8(asc
, IDEState
),
2436 VMSTATE_UINT8_V(cdrom_changed
, IDEState
, 3),
2437 VMSTATE_END_OF_LIST()
2439 .subsections
= (VMStateSubsection
[]) {
2441 .vmsd
= &vmstate_ide_drive_pio_state
,
2442 .needed
= ide_drive_pio_state_needed
,
2444 .vmsd
= &vmstate_ide_tray_state
,
2445 .needed
= ide_tray_state_needed
,
2447 .vmsd
= &vmstate_ide_atapi_gesn_state
,
2448 .needed
= ide_atapi_gesn_needed
,
2455 static const VMStateDescription vmstate_ide_error_status
= {
2456 .name
="ide_bus/error",
2458 .minimum_version_id
= 1,
2459 .fields
= (VMStateField
[]) {
2460 VMSTATE_INT32(error_status
, IDEBus
),
2461 VMSTATE_END_OF_LIST()
2465 const VMStateDescription vmstate_ide_bus
= {
2468 .minimum_version_id
= 1,
2469 .fields
= (VMStateField
[]) {
2470 VMSTATE_UINT8(cmd
, IDEBus
),
2471 VMSTATE_UINT8(unit
, IDEBus
),
2472 VMSTATE_END_OF_LIST()
2474 .subsections
= (VMStateSubsection
[]) {
2476 .vmsd
= &vmstate_ide_error_status
,
2477 .needed
= ide_error_needed
,
2484 void ide_drive_get(DriveInfo
**hd
, int max_bus
)
2488 if (drive_get_max_bus(IF_IDE
) >= max_bus
) {
2489 fprintf(stderr
, "qemu: too many IDE bus: %d\n", max_bus
);
2493 for(i
= 0; i
< max_bus
* MAX_IDE_DEVS
; i
++) {
2494 hd
[i
] = drive_get(IF_IDE
, i
/ MAX_IDE_DEVS
, i
% MAX_IDE_DEVS
);