Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[qemu.git] / hw / ide / core.c
blobd4af5e2eb1758efc9361a1c2264240f74537ece0
1 /*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include <hw/hw.h>
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/isa/isa.h>
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/dma.h"
33 #include "hw/block/block.h"
34 #include "sysemu/block-backend.h"
36 #include <hw/ide/internal.h>
38 /* These values were based on a Seagate ST3500418AS but have been modified
39 to make more sense in QEMU */
40 static const int smart_attributes[][12] = {
41 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
42 /* raw read error rate*/
43 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
44 /* spin up */
45 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
46 /* start stop count */
47 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
48 /* remapped sectors */
49 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
50 /* power on hours */
51 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52 /* power cycle count */
53 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* airflow-temperature-celsius */
55 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 static int ide_handle_rw_error(IDEState *s, int error, int op);
59 static void ide_dummy_transfer_stop(IDEState *s);
61 static void padstr(char *str, const char *src, int len)
63 int i, v;
64 for(i = 0; i < len; i++) {
65 if (*src)
66 v = *src++;
67 else
68 v = ' ';
69 str[i^1] = v;
73 static void put_le16(uint16_t *p, unsigned int v)
75 *p = cpu_to_le16(v);
78 static void ide_identify_size(IDEState *s)
80 uint16_t *p = (uint16_t *)s->identify_data;
81 put_le16(p + 60, s->nb_sectors);
82 put_le16(p + 61, s->nb_sectors >> 16);
83 put_le16(p + 100, s->nb_sectors);
84 put_le16(p + 101, s->nb_sectors >> 16);
85 put_le16(p + 102, s->nb_sectors >> 32);
86 put_le16(p + 103, s->nb_sectors >> 48);
89 static void ide_identify(IDEState *s)
91 uint16_t *p;
92 unsigned int oldsize;
93 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
95 p = (uint16_t *)s->identify_data;
96 if (s->identify_set) {
97 goto fill_buffer;
99 memset(p, 0, sizeof(s->identify_data));
101 put_le16(p + 0, 0x0040);
102 put_le16(p + 1, s->cylinders);
103 put_le16(p + 3, s->heads);
104 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
105 put_le16(p + 5, 512); /* XXX: retired, remove ? */
106 put_le16(p + 6, s->sectors);
107 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
108 put_le16(p + 20, 3); /* XXX: retired, remove ? */
109 put_le16(p + 21, 512); /* cache size in sectors */
110 put_le16(p + 22, 4); /* ecc bytes */
111 padstr((char *)(p + 23), s->version, 8); /* firmware version */
112 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
113 #if MAX_MULT_SECTORS > 1
114 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
115 #endif
116 put_le16(p + 48, 1); /* dword I/O */
117 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
118 put_le16(p + 51, 0x200); /* PIO transfer cycle */
119 put_le16(p + 52, 0x200); /* DMA transfer cycle */
120 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
121 put_le16(p + 54, s->cylinders);
122 put_le16(p + 55, s->heads);
123 put_le16(p + 56, s->sectors);
124 oldsize = s->cylinders * s->heads * s->sectors;
125 put_le16(p + 57, oldsize);
126 put_le16(p + 58, oldsize >> 16);
127 if (s->mult_sectors)
128 put_le16(p + 59, 0x100 | s->mult_sectors);
129 /* *(p + 60) := nb_sectors -- see ide_identify_size */
130 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
131 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
132 put_le16(p + 63, 0x07); /* mdma0-2 supported */
133 put_le16(p + 64, 0x03); /* pio3-4 supported */
134 put_le16(p + 65, 120);
135 put_le16(p + 66, 120);
136 put_le16(p + 67, 120);
137 put_le16(p + 68, 120);
138 if (dev && dev->conf.discard_granularity) {
139 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
142 if (s->ncq_queues) {
143 put_le16(p + 75, s->ncq_queues - 1);
144 /* NCQ supported */
145 put_le16(p + 76, (1 << 8));
148 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
149 put_le16(p + 81, 0x16); /* conforms to ata5 */
150 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
151 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
152 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
153 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
154 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
155 if (s->wwn) {
156 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
157 } else {
158 put_le16(p + 84, (1 << 14) | 0);
160 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
161 if (blk_enable_write_cache(s->blk)) {
162 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
163 } else {
164 put_le16(p + 85, (1 << 14) | 1);
166 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
167 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
168 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
169 if (s->wwn) {
170 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
171 } else {
172 put_le16(p + 87, (1 << 14) | 0);
174 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
175 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
176 /* *(p + 100) := nb_sectors -- see ide_identify_size */
177 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
178 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
179 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
181 if (dev && dev->conf.physical_block_size)
182 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
183 if (s->wwn) {
184 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
185 put_le16(p + 108, s->wwn >> 48);
186 put_le16(p + 109, s->wwn >> 32);
187 put_le16(p + 110, s->wwn >> 16);
188 put_le16(p + 111, s->wwn);
190 if (dev && dev->conf.discard_granularity) {
191 put_le16(p + 169, 1); /* TRIM support */
194 ide_identify_size(s);
195 s->identify_set = 1;
197 fill_buffer:
198 memcpy(s->io_buffer, p, sizeof(s->identify_data));
201 static void ide_atapi_identify(IDEState *s)
203 uint16_t *p;
205 p = (uint16_t *)s->identify_data;
206 if (s->identify_set) {
207 goto fill_buffer;
209 memset(p, 0, sizeof(s->identify_data));
211 /* Removable CDROM, 50us response, 12 byte packets */
212 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
213 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
214 put_le16(p + 20, 3); /* buffer type */
215 put_le16(p + 21, 512); /* cache size in sectors */
216 put_le16(p + 22, 4); /* ecc bytes */
217 padstr((char *)(p + 23), s->version, 8); /* firmware version */
218 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
219 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
220 #ifdef USE_DMA_CDROM
221 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
222 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
223 put_le16(p + 62, 7); /* single word dma0-2 supported */
224 put_le16(p + 63, 7); /* mdma0-2 supported */
225 #else
226 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
227 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
228 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
229 #endif
230 put_le16(p + 64, 3); /* pio3-4 supported */
231 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
232 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
233 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
234 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
236 put_le16(p + 71, 30); /* in ns */
237 put_le16(p + 72, 30); /* in ns */
239 if (s->ncq_queues) {
240 put_le16(p + 75, s->ncq_queues - 1);
241 /* NCQ supported */
242 put_le16(p + 76, (1 << 8));
245 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
246 if (s->wwn) {
247 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
248 put_le16(p + 87, (1 << 8)); /* WWN enabled */
251 #ifdef USE_DMA_CDROM
252 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
253 #endif
255 if (s->wwn) {
256 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
257 put_le16(p + 108, s->wwn >> 48);
258 put_le16(p + 109, s->wwn >> 32);
259 put_le16(p + 110, s->wwn >> 16);
260 put_le16(p + 111, s->wwn);
263 s->identify_set = 1;
265 fill_buffer:
266 memcpy(s->io_buffer, p, sizeof(s->identify_data));
269 static void ide_cfata_identify_size(IDEState *s)
271 uint16_t *p = (uint16_t *)s->identify_data;
272 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
273 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
274 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
275 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
278 static void ide_cfata_identify(IDEState *s)
280 uint16_t *p;
281 uint32_t cur_sec;
283 p = (uint16_t *)s->identify_data;
284 if (s->identify_set) {
285 goto fill_buffer;
287 memset(p, 0, sizeof(s->identify_data));
289 cur_sec = s->cylinders * s->heads * s->sectors;
291 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
292 put_le16(p + 1, s->cylinders); /* Default cylinders */
293 put_le16(p + 3, s->heads); /* Default heads */
294 put_le16(p + 6, s->sectors); /* Default sectors per track */
295 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
296 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
297 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
298 put_le16(p + 22, 0x0004); /* ECC bytes */
299 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
300 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
301 #if MAX_MULT_SECTORS > 1
302 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
303 #else
304 put_le16(p + 47, 0x0000);
305 #endif
306 put_le16(p + 49, 0x0f00); /* Capabilities */
307 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
308 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
309 put_le16(p + 53, 0x0003); /* Translation params valid */
310 put_le16(p + 54, s->cylinders); /* Current cylinders */
311 put_le16(p + 55, s->heads); /* Current heads */
312 put_le16(p + 56, s->sectors); /* Current sectors */
313 put_le16(p + 57, cur_sec); /* Current capacity */
314 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
315 if (s->mult_sectors) /* Multiple sector setting */
316 put_le16(p + 59, 0x100 | s->mult_sectors);
317 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
318 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
319 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
320 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
321 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
322 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
323 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
324 put_le16(p + 82, 0x400c); /* Command Set supported */
325 put_le16(p + 83, 0x7068); /* Command Set supported */
326 put_le16(p + 84, 0x4000); /* Features supported */
327 put_le16(p + 85, 0x000c); /* Command Set enabled */
328 put_le16(p + 86, 0x7044); /* Command Set enabled */
329 put_le16(p + 87, 0x4000); /* Features enabled */
330 put_le16(p + 91, 0x4060); /* Current APM level */
331 put_le16(p + 129, 0x0002); /* Current features option */
332 put_le16(p + 130, 0x0005); /* Reassigned sectors */
333 put_le16(p + 131, 0x0001); /* Initial power mode */
334 put_le16(p + 132, 0x0000); /* User signature */
335 put_le16(p + 160, 0x8100); /* Power requirement */
336 put_le16(p + 161, 0x8001); /* CF command set */
338 ide_cfata_identify_size(s);
339 s->identify_set = 1;
341 fill_buffer:
342 memcpy(s->io_buffer, p, sizeof(s->identify_data));
345 static void ide_set_signature(IDEState *s)
347 s->select &= 0xf0; /* clear head */
348 /* put signature */
349 s->nsector = 1;
350 s->sector = 1;
351 if (s->drive_kind == IDE_CD) {
352 s->lcyl = 0x14;
353 s->hcyl = 0xeb;
354 } else if (s->blk) {
355 s->lcyl = 0;
356 s->hcyl = 0;
357 } else {
358 s->lcyl = 0xff;
359 s->hcyl = 0xff;
363 typedef struct TrimAIOCB {
364 BlockAIOCB common;
365 BlockBackend *blk;
366 QEMUBH *bh;
367 int ret;
368 QEMUIOVector *qiov;
369 BlockAIOCB *aiocb;
370 int i, j;
371 } TrimAIOCB;
373 static void trim_aio_cancel(BlockAIOCB *acb)
375 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
377 /* Exit the loop so ide_issue_trim_cb will not continue */
378 iocb->j = iocb->qiov->niov - 1;
379 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
381 iocb->ret = -ECANCELED;
383 if (iocb->aiocb) {
384 blk_aio_cancel_async(iocb->aiocb);
385 iocb->aiocb = NULL;
389 static const AIOCBInfo trim_aiocb_info = {
390 .aiocb_size = sizeof(TrimAIOCB),
391 .cancel_async = trim_aio_cancel,
394 static void ide_trim_bh_cb(void *opaque)
396 TrimAIOCB *iocb = opaque;
398 iocb->common.cb(iocb->common.opaque, iocb->ret);
400 qemu_bh_delete(iocb->bh);
401 iocb->bh = NULL;
402 qemu_aio_unref(iocb);
405 static void ide_issue_trim_cb(void *opaque, int ret)
407 TrimAIOCB *iocb = opaque;
408 if (ret >= 0) {
409 while (iocb->j < iocb->qiov->niov) {
410 int j = iocb->j;
411 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
412 int i = iocb->i;
413 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
415 /* 6-byte LBA + 2-byte range per entry */
416 uint64_t entry = le64_to_cpu(buffer[i]);
417 uint64_t sector = entry & 0x0000ffffffffffffULL;
418 uint16_t count = entry >> 48;
420 if (count == 0) {
421 continue;
424 /* Got an entry! Submit and exit. */
425 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
426 ide_issue_trim_cb, opaque);
427 return;
430 iocb->j++;
431 iocb->i = -1;
433 } else {
434 iocb->ret = ret;
437 iocb->aiocb = NULL;
438 if (iocb->bh) {
439 qemu_bh_schedule(iocb->bh);
443 BlockAIOCB *ide_issue_trim(BlockBackend *blk,
444 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
445 BlockCompletionFunc *cb, void *opaque)
447 TrimAIOCB *iocb;
449 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
450 iocb->blk = blk;
451 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
452 iocb->ret = 0;
453 iocb->qiov = qiov;
454 iocb->i = -1;
455 iocb->j = 0;
456 ide_issue_trim_cb(iocb, 0);
457 return &iocb->common;
460 static inline void ide_abort_command(IDEState *s)
462 ide_transfer_stop(s);
463 s->status = READY_STAT | ERR_STAT;
464 s->error = ABRT_ERR;
467 /* prepare data transfer and tell what to do after */
468 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
469 EndTransferFunc *end_transfer_func)
471 s->end_transfer_func = end_transfer_func;
472 s->data_ptr = buf;
473 s->data_end = buf + size;
474 if (!(s->status & ERR_STAT)) {
475 s->status |= DRQ_STAT;
477 if (s->bus->dma->ops->start_transfer) {
478 s->bus->dma->ops->start_transfer(s->bus->dma);
482 static void ide_cmd_done(IDEState *s)
484 if (s->bus->dma->ops->cmd_done) {
485 s->bus->dma->ops->cmd_done(s->bus->dma);
489 void ide_transfer_stop(IDEState *s)
491 s->end_transfer_func = ide_transfer_stop;
492 s->data_ptr = s->io_buffer;
493 s->data_end = s->io_buffer;
494 s->status &= ~DRQ_STAT;
495 ide_cmd_done(s);
498 int64_t ide_get_sector(IDEState *s)
500 int64_t sector_num;
501 if (s->select & 0x40) {
502 /* lba */
503 if (!s->lba48) {
504 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
505 (s->lcyl << 8) | s->sector;
506 } else {
507 sector_num = ((int64_t)s->hob_hcyl << 40) |
508 ((int64_t) s->hob_lcyl << 32) |
509 ((int64_t) s->hob_sector << 24) |
510 ((int64_t) s->hcyl << 16) |
511 ((int64_t) s->lcyl << 8) | s->sector;
513 } else {
514 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
515 (s->select & 0x0f) * s->sectors + (s->sector - 1);
517 return sector_num;
520 void ide_set_sector(IDEState *s, int64_t sector_num)
522 unsigned int cyl, r;
523 if (s->select & 0x40) {
524 if (!s->lba48) {
525 s->select = (s->select & 0xf0) | (sector_num >> 24);
526 s->hcyl = (sector_num >> 16);
527 s->lcyl = (sector_num >> 8);
528 s->sector = (sector_num);
529 } else {
530 s->sector = sector_num;
531 s->lcyl = sector_num >> 8;
532 s->hcyl = sector_num >> 16;
533 s->hob_sector = sector_num >> 24;
534 s->hob_lcyl = sector_num >> 32;
535 s->hob_hcyl = sector_num >> 40;
537 } else {
538 cyl = sector_num / (s->heads * s->sectors);
539 r = sector_num % (s->heads * s->sectors);
540 s->hcyl = cyl >> 8;
541 s->lcyl = cyl;
542 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
543 s->sector = (r % s->sectors) + 1;
547 static void ide_rw_error(IDEState *s) {
548 ide_abort_command(s);
549 ide_set_irq(s->bus);
552 static bool ide_sect_range_ok(IDEState *s,
553 uint64_t sector, uint64_t nb_sectors)
555 uint64_t total_sectors;
557 blk_get_geometry(s->blk, &total_sectors);
558 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
559 return false;
561 return true;
564 static void ide_sector_read_cb(void *opaque, int ret)
566 IDEState *s = opaque;
567 int n;
569 s->pio_aiocb = NULL;
570 s->status &= ~BUSY_STAT;
572 if (ret == -ECANCELED) {
573 return;
575 block_acct_done(blk_get_stats(s->blk), &s->acct);
576 if (ret != 0) {
577 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
578 IDE_RETRY_READ)) {
579 return;
583 n = s->nsector;
584 if (n > s->req_nb_sectors) {
585 n = s->req_nb_sectors;
588 /* Allow the guest to read the io_buffer */
589 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
591 ide_set_irq(s->bus);
593 ide_set_sector(s, ide_get_sector(s) + n);
594 s->nsector -= n;
595 s->io_buffer_offset += 512 * n;
598 void ide_sector_read(IDEState *s)
600 int64_t sector_num;
601 int n;
603 s->status = READY_STAT | SEEK_STAT;
604 s->error = 0; /* not needed by IDE spec, but needed by Windows */
605 sector_num = ide_get_sector(s);
606 n = s->nsector;
608 if (n == 0) {
609 ide_transfer_stop(s);
610 return;
613 s->status |= BUSY_STAT;
615 if (n > s->req_nb_sectors) {
616 n = s->req_nb_sectors;
619 #if defined(DEBUG_IDE)
620 printf("sector=%" PRId64 "\n", sector_num);
621 #endif
623 if (!ide_sect_range_ok(s, sector_num, n)) {
624 ide_rw_error(s);
625 return;
628 s->iov.iov_base = s->io_buffer;
629 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
630 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
632 block_acct_start(blk_get_stats(s->blk), &s->acct,
633 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
634 s->pio_aiocb = blk_aio_readv(s->blk, sector_num, &s->qiov, n,
635 ide_sector_read_cb, s);
638 static void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
640 if (s->bus->dma->ops->commit_buf) {
641 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
643 qemu_sglist_destroy(&s->sg);
646 void ide_set_inactive(IDEState *s, bool more)
648 s->bus->dma->aiocb = NULL;
649 if (s->bus->dma->ops->set_inactive) {
650 s->bus->dma->ops->set_inactive(s->bus->dma, more);
652 ide_cmd_done(s);
655 void ide_dma_error(IDEState *s)
657 dma_buf_commit(s, 0);
658 ide_abort_command(s);
659 ide_set_inactive(s, false);
660 ide_set_irq(s->bus);
663 static int ide_handle_rw_error(IDEState *s, int error, int op)
665 bool is_read = (op & IDE_RETRY_READ) != 0;
666 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
668 if (action == BLOCK_ERROR_ACTION_STOP) {
669 s->bus->dma->ops->set_unit(s->bus->dma, s->unit);
670 s->bus->error_status = op;
671 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
672 if (op & IDE_RETRY_DMA) {
673 ide_dma_error(s);
674 } else {
675 ide_rw_error(s);
678 blk_error_action(s->blk, action, is_read, error);
679 return action != BLOCK_ERROR_ACTION_IGNORE;
682 void ide_dma_cb(void *opaque, int ret)
684 IDEState *s = opaque;
685 int n;
686 int64_t sector_num;
687 bool stay_active = false;
689 if (ret == -ECANCELED) {
690 return;
692 if (ret < 0) {
693 int op = IDE_RETRY_DMA;
695 if (s->dma_cmd == IDE_DMA_READ)
696 op |= IDE_RETRY_READ;
697 else if (s->dma_cmd == IDE_DMA_TRIM)
698 op |= IDE_RETRY_TRIM;
700 if (ide_handle_rw_error(s, -ret, op)) {
701 return;
705 n = s->io_buffer_size >> 9;
706 if (n > s->nsector) {
707 /* The PRDs were longer than needed for this request. Shorten them so
708 * we don't get a negative remainder. The Active bit must remain set
709 * after the request completes. */
710 n = s->nsector;
711 stay_active = true;
714 sector_num = ide_get_sector(s);
715 if (n > 0) {
716 assert(s->io_buffer_size == s->sg.size);
717 dma_buf_commit(s, s->io_buffer_size);
718 sector_num += n;
719 ide_set_sector(s, sector_num);
720 s->nsector -= n;
723 /* end of transfer ? */
724 if (s->nsector == 0) {
725 s->status = READY_STAT | SEEK_STAT;
726 ide_set_irq(s->bus);
727 goto eot;
730 /* launch next transfer */
731 n = s->nsector;
732 s->io_buffer_index = 0;
733 s->io_buffer_size = n * 512;
734 if (s->bus->dma->ops->prepare_buf(s->bus->dma, ide_cmd_is_read(s)) < 512) {
735 /* The PRDs were too short. Reset the Active bit, but don't raise an
736 * interrupt. */
737 s->status = READY_STAT | SEEK_STAT;
738 dma_buf_commit(s, 0);
739 goto eot;
742 #ifdef DEBUG_AIO
743 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
744 sector_num, n, s->dma_cmd);
745 #endif
747 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
748 !ide_sect_range_ok(s, sector_num, n)) {
749 ide_dma_error(s);
750 return;
753 switch (s->dma_cmd) {
754 case IDE_DMA_READ:
755 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
756 ide_dma_cb, s);
757 break;
758 case IDE_DMA_WRITE:
759 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
760 ide_dma_cb, s);
761 break;
762 case IDE_DMA_TRIM:
763 s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
764 ide_issue_trim, ide_dma_cb, s,
765 DMA_DIRECTION_TO_DEVICE);
766 break;
768 return;
770 eot:
771 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
772 block_acct_done(blk_get_stats(s->blk), &s->acct);
774 ide_set_inactive(s, stay_active);
777 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
779 s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
780 s->io_buffer_index = 0;
781 s->io_buffer_size = 0;
782 s->dma_cmd = dma_cmd;
784 switch (dma_cmd) {
785 case IDE_DMA_READ:
786 block_acct_start(blk_get_stats(s->blk), &s->acct,
787 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
788 break;
789 case IDE_DMA_WRITE:
790 block_acct_start(blk_get_stats(s->blk), &s->acct,
791 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
792 break;
793 default:
794 break;
797 ide_start_dma(s, ide_dma_cb);
800 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
802 if (s->bus->dma->ops->start_dma) {
803 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
807 static void ide_sector_write_timer_cb(void *opaque)
809 IDEState *s = opaque;
810 ide_set_irq(s->bus);
813 static void ide_sector_write_cb(void *opaque, int ret)
815 IDEState *s = opaque;
816 int n;
818 if (ret == -ECANCELED) {
819 return;
821 block_acct_done(blk_get_stats(s->blk), &s->acct);
823 s->pio_aiocb = NULL;
824 s->status &= ~BUSY_STAT;
826 if (ret != 0) {
827 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
828 return;
832 n = s->nsector;
833 if (n > s->req_nb_sectors) {
834 n = s->req_nb_sectors;
836 s->nsector -= n;
837 s->io_buffer_offset += 512 * n;
839 if (s->nsector == 0) {
840 /* no more sectors to write */
841 ide_transfer_stop(s);
842 } else {
843 int n1 = s->nsector;
844 if (n1 > s->req_nb_sectors) {
845 n1 = s->req_nb_sectors;
847 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
848 ide_sector_write);
850 ide_set_sector(s, ide_get_sector(s) + n);
852 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
853 /* It seems there is a bug in the Windows 2000 installer HDD
854 IDE driver which fills the disk with empty logs when the
855 IDE write IRQ comes too early. This hack tries to correct
856 that at the expense of slower write performances. Use this
857 option _only_ to install Windows 2000. You must disable it
858 for normal use. */
859 timer_mod(s->sector_write_timer,
860 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
861 } else {
862 ide_set_irq(s->bus);
866 void ide_sector_write(IDEState *s)
868 int64_t sector_num;
869 int n;
871 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
872 sector_num = ide_get_sector(s);
873 #if defined(DEBUG_IDE)
874 printf("sector=%" PRId64 "\n", sector_num);
875 #endif
876 n = s->nsector;
877 if (n > s->req_nb_sectors) {
878 n = s->req_nb_sectors;
881 if (!ide_sect_range_ok(s, sector_num, n)) {
882 ide_rw_error(s);
883 return;
886 s->iov.iov_base = s->io_buffer;
887 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
888 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
890 block_acct_start(blk_get_stats(s->blk), &s->acct,
891 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
892 s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
893 ide_sector_write_cb, s);
896 static void ide_flush_cb(void *opaque, int ret)
898 IDEState *s = opaque;
900 s->pio_aiocb = NULL;
902 if (ret == -ECANCELED) {
903 return;
905 if (ret < 0) {
906 /* XXX: What sector number to set here? */
907 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
908 return;
912 if (s->blk) {
913 block_acct_done(blk_get_stats(s->blk), &s->acct);
915 s->status = READY_STAT | SEEK_STAT;
916 ide_cmd_done(s);
917 ide_set_irq(s->bus);
920 void ide_flush_cache(IDEState *s)
922 if (s->blk == NULL) {
923 ide_flush_cb(s, 0);
924 return;
927 s->status |= BUSY_STAT;
928 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
929 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
932 static void ide_cfata_metadata_inquiry(IDEState *s)
934 uint16_t *p;
935 uint32_t spd;
937 p = (uint16_t *) s->io_buffer;
938 memset(p, 0, 0x200);
939 spd = ((s->mdata_size - 1) >> 9) + 1;
941 put_le16(p + 0, 0x0001); /* Data format revision */
942 put_le16(p + 1, 0x0000); /* Media property: silicon */
943 put_le16(p + 2, s->media_changed); /* Media status */
944 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
945 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
946 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
947 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
950 static void ide_cfata_metadata_read(IDEState *s)
952 uint16_t *p;
954 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
955 s->status = ERR_STAT;
956 s->error = ABRT_ERR;
957 return;
960 p = (uint16_t *) s->io_buffer;
961 memset(p, 0, 0x200);
963 put_le16(p + 0, s->media_changed); /* Media status */
964 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
965 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
966 s->nsector << 9), 0x200 - 2));
969 static void ide_cfata_metadata_write(IDEState *s)
971 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
972 s->status = ERR_STAT;
973 s->error = ABRT_ERR;
974 return;
977 s->media_changed = 0;
979 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
980 s->io_buffer + 2,
981 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
982 s->nsector << 9), 0x200 - 2));
985 /* called when the inserted state of the media has changed */
986 static void ide_cd_change_cb(void *opaque, bool load)
988 IDEState *s = opaque;
989 uint64_t nb_sectors;
991 s->tray_open = !load;
992 blk_get_geometry(s->blk, &nb_sectors);
993 s->nb_sectors = nb_sectors;
996 * First indicate to the guest that a CD has been removed. That's
997 * done on the next command the guest sends us.
999 * Then we set UNIT_ATTENTION, by which the guest will
1000 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1002 s->cdrom_changed = 1;
1003 s->events.new_media = true;
1004 s->events.eject_request = false;
1005 ide_set_irq(s->bus);
1008 static void ide_cd_eject_request_cb(void *opaque, bool force)
1010 IDEState *s = opaque;
1012 s->events.eject_request = true;
1013 if (force) {
1014 s->tray_locked = false;
1016 ide_set_irq(s->bus);
1019 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1021 s->lba48 = lba48;
1023 /* handle the 'magic' 0 nsector count conversion here. to avoid
1024 * fiddling with the rest of the read logic, we just store the
1025 * full sector count in ->nsector and ignore ->hob_nsector from now
1027 if (!s->lba48) {
1028 if (!s->nsector)
1029 s->nsector = 256;
1030 } else {
1031 if (!s->nsector && !s->hob_nsector)
1032 s->nsector = 65536;
1033 else {
1034 int lo = s->nsector;
1035 int hi = s->hob_nsector;
1037 s->nsector = (hi << 8) | lo;
1042 static void ide_clear_hob(IDEBus *bus)
1044 /* any write clears HOB high bit of device control register */
1045 bus->ifs[0].select &= ~(1 << 7);
1046 bus->ifs[1].select &= ~(1 << 7);
1049 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1051 IDEBus *bus = opaque;
1053 #ifdef DEBUG_IDE
1054 printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1055 #endif
1057 addr &= 7;
1059 /* ignore writes to command block while busy with previous command */
1060 if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1061 return;
1063 switch(addr) {
1064 case 0:
1065 break;
1066 case 1:
1067 ide_clear_hob(bus);
1068 /* NOTE: data is written to the two drives */
1069 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1070 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1071 bus->ifs[0].feature = val;
1072 bus->ifs[1].feature = val;
1073 break;
1074 case 2:
1075 ide_clear_hob(bus);
1076 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1077 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1078 bus->ifs[0].nsector = val;
1079 bus->ifs[1].nsector = val;
1080 break;
1081 case 3:
1082 ide_clear_hob(bus);
1083 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1084 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1085 bus->ifs[0].sector = val;
1086 bus->ifs[1].sector = val;
1087 break;
1088 case 4:
1089 ide_clear_hob(bus);
1090 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1091 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1092 bus->ifs[0].lcyl = val;
1093 bus->ifs[1].lcyl = val;
1094 break;
1095 case 5:
1096 ide_clear_hob(bus);
1097 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1098 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1099 bus->ifs[0].hcyl = val;
1100 bus->ifs[1].hcyl = val;
1101 break;
1102 case 6:
1103 /* FIXME: HOB readback uses bit 7 */
1104 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1105 bus->ifs[1].select = (val | 0x10) | 0xa0;
1106 /* select drive */
1107 bus->unit = (val >> 4) & 1;
1108 break;
1109 default:
1110 case 7:
1111 /* command */
1112 ide_exec_cmd(bus, val);
1113 break;
1117 static bool cmd_nop(IDEState *s, uint8_t cmd)
1119 return true;
1122 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1124 switch (s->feature) {
1125 case DSM_TRIM:
1126 if (s->blk) {
1127 ide_sector_start_dma(s, IDE_DMA_TRIM);
1128 return false;
1130 break;
1133 ide_abort_command(s);
1134 return true;
1137 static bool cmd_identify(IDEState *s, uint8_t cmd)
1139 if (s->blk && s->drive_kind != IDE_CD) {
1140 if (s->drive_kind != IDE_CFATA) {
1141 ide_identify(s);
1142 } else {
1143 ide_cfata_identify(s);
1145 s->status = READY_STAT | SEEK_STAT;
1146 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1147 ide_set_irq(s->bus);
1148 return false;
1149 } else {
1150 if (s->drive_kind == IDE_CD) {
1151 ide_set_signature(s);
1153 ide_abort_command(s);
1156 return true;
1159 static bool cmd_verify(IDEState *s, uint8_t cmd)
1161 bool lba48 = (cmd == WIN_VERIFY_EXT);
1163 /* do sector number check ? */
1164 ide_cmd_lba48_transform(s, lba48);
1166 return true;
1169 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1171 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1172 /* Disable Read and Write Multiple */
1173 s->mult_sectors = 0;
1174 } else if ((s->nsector & 0xff) != 0 &&
1175 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1176 (s->nsector & (s->nsector - 1)) != 0)) {
1177 ide_abort_command(s);
1178 } else {
1179 s->mult_sectors = s->nsector & 0xff;
1182 return true;
1185 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1187 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1189 if (!s->blk || !s->mult_sectors) {
1190 ide_abort_command(s);
1191 return true;
1194 ide_cmd_lba48_transform(s, lba48);
1195 s->req_nb_sectors = s->mult_sectors;
1196 ide_sector_read(s);
1197 return false;
1200 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1202 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1203 int n;
1205 if (!s->blk || !s->mult_sectors) {
1206 ide_abort_command(s);
1207 return true;
1210 ide_cmd_lba48_transform(s, lba48);
1212 s->req_nb_sectors = s->mult_sectors;
1213 n = MIN(s->nsector, s->req_nb_sectors);
1215 s->status = SEEK_STAT | READY_STAT;
1216 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1218 s->media_changed = 1;
1220 return false;
1223 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1225 bool lba48 = (cmd == WIN_READ_EXT);
1227 if (s->drive_kind == IDE_CD) {
1228 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1229 ide_abort_command(s);
1230 return true;
1233 if (!s->blk) {
1234 ide_abort_command(s);
1235 return true;
1238 ide_cmd_lba48_transform(s, lba48);
1239 s->req_nb_sectors = 1;
1240 ide_sector_read(s);
1242 return false;
1245 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1247 bool lba48 = (cmd == WIN_WRITE_EXT);
1249 if (!s->blk) {
1250 ide_abort_command(s);
1251 return true;
1254 ide_cmd_lba48_transform(s, lba48);
1256 s->req_nb_sectors = 1;
1257 s->status = SEEK_STAT | READY_STAT;
1258 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1260 s->media_changed = 1;
1262 return false;
1265 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1267 bool lba48 = (cmd == WIN_READDMA_EXT);
1269 if (!s->blk) {
1270 ide_abort_command(s);
1271 return true;
1274 ide_cmd_lba48_transform(s, lba48);
1275 ide_sector_start_dma(s, IDE_DMA_READ);
1277 return false;
1280 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1282 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1284 if (!s->blk) {
1285 ide_abort_command(s);
1286 return true;
1289 ide_cmd_lba48_transform(s, lba48);
1290 ide_sector_start_dma(s, IDE_DMA_WRITE);
1292 s->media_changed = 1;
1294 return false;
1297 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1299 ide_flush_cache(s);
1300 return false;
1303 static bool cmd_seek(IDEState *s, uint8_t cmd)
1305 /* XXX: Check that seek is within bounds */
1306 return true;
1309 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1311 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1313 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1314 if (s->nb_sectors == 0) {
1315 ide_abort_command(s);
1316 return true;
1319 ide_cmd_lba48_transform(s, lba48);
1320 ide_set_sector(s, s->nb_sectors - 1);
1322 return true;
1325 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1327 s->nsector = 0xff; /* device active or idle */
1328 return true;
1331 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1333 uint16_t *identify_data;
1335 if (!s->blk) {
1336 ide_abort_command(s);
1337 return true;
1340 /* XXX: valid for CDROM ? */
1341 switch (s->feature) {
1342 case 0x02: /* write cache enable */
1343 blk_set_enable_write_cache(s->blk, true);
1344 identify_data = (uint16_t *)s->identify_data;
1345 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1346 return true;
1347 case 0x82: /* write cache disable */
1348 blk_set_enable_write_cache(s->blk, false);
1349 identify_data = (uint16_t *)s->identify_data;
1350 put_le16(identify_data + 85, (1 << 14) | 1);
1351 ide_flush_cache(s);
1352 return false;
1353 case 0xcc: /* reverting to power-on defaults enable */
1354 case 0x66: /* reverting to power-on defaults disable */
1355 case 0xaa: /* read look-ahead enable */
1356 case 0x55: /* read look-ahead disable */
1357 case 0x05: /* set advanced power management mode */
1358 case 0x85: /* disable advanced power management mode */
1359 case 0x69: /* NOP */
1360 case 0x67: /* NOP */
1361 case 0x96: /* NOP */
1362 case 0x9a: /* NOP */
1363 case 0x42: /* enable Automatic Acoustic Mode */
1364 case 0xc2: /* disable Automatic Acoustic Mode */
1365 return true;
1366 case 0x03: /* set transfer mode */
1368 uint8_t val = s->nsector & 0x07;
1369 identify_data = (uint16_t *)s->identify_data;
1371 switch (s->nsector >> 3) {
1372 case 0x00: /* pio default */
1373 case 0x01: /* pio mode */
1374 put_le16(identify_data + 62, 0x07);
1375 put_le16(identify_data + 63, 0x07);
1376 put_le16(identify_data + 88, 0x3f);
1377 break;
1378 case 0x02: /* sigle word dma mode*/
1379 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1380 put_le16(identify_data + 63, 0x07);
1381 put_le16(identify_data + 88, 0x3f);
1382 break;
1383 case 0x04: /* mdma mode */
1384 put_le16(identify_data + 62, 0x07);
1385 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1386 put_le16(identify_data + 88, 0x3f);
1387 break;
1388 case 0x08: /* udma mode */
1389 put_le16(identify_data + 62, 0x07);
1390 put_le16(identify_data + 63, 0x07);
1391 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1392 break;
1393 default:
1394 goto abort_cmd;
1396 return true;
1400 abort_cmd:
1401 ide_abort_command(s);
1402 return true;
1406 /*** ATAPI commands ***/
1408 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1410 ide_atapi_identify(s);
1411 s->status = READY_STAT | SEEK_STAT;
1412 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1413 ide_set_irq(s->bus);
1414 return false;
1417 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1419 ide_set_signature(s);
1421 if (s->drive_kind == IDE_CD) {
1422 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1423 * devices to return a clear status register
1424 * with READY_STAT *not* set. */
1425 s->error = 0x01;
1426 } else {
1427 s->status = READY_STAT | SEEK_STAT;
1428 /* The bits of the error register are not as usual for this command!
1429 * They are part of the regular output (this is why ERR_STAT isn't set)
1430 * Device 0 passed, Device 1 passed or not present. */
1431 s->error = 0x01;
1432 ide_set_irq(s->bus);
1435 return false;
1438 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1440 ide_set_signature(s);
1441 s->status = 0x00; /* NOTE: READY is _not_ set */
1442 s->error = 0x01;
1444 return false;
1447 static bool cmd_packet(IDEState *s, uint8_t cmd)
1449 /* overlapping commands not supported */
1450 if (s->feature & 0x02) {
1451 ide_abort_command(s);
1452 return true;
1455 s->status = READY_STAT | SEEK_STAT;
1456 s->atapi_dma = s->feature & 1;
1457 s->nsector = 1;
1458 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1459 ide_atapi_cmd);
1460 return false;
1464 /*** CF-ATA commands ***/
1466 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1468 s->error = 0x09; /* miscellaneous error */
1469 s->status = READY_STAT | SEEK_STAT;
1470 ide_set_irq(s->bus);
1472 return false;
1475 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1477 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1478 * required for Windows 8 to work with AHCI */
1480 if (cmd == CFA_WEAR_LEVEL) {
1481 s->nsector = 0;
1484 if (cmd == CFA_ERASE_SECTORS) {
1485 s->media_changed = 1;
1488 return true;
1491 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1493 s->status = READY_STAT | SEEK_STAT;
1495 memset(s->io_buffer, 0, 0x200);
1496 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1497 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1498 s->io_buffer[0x02] = s->select; /* Head */
1499 s->io_buffer[0x03] = s->sector; /* Sector */
1500 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1501 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1502 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1503 s->io_buffer[0x13] = 0x00; /* Erase flag */
1504 s->io_buffer[0x18] = 0x00; /* Hot count */
1505 s->io_buffer[0x19] = 0x00; /* Hot count */
1506 s->io_buffer[0x1a] = 0x01; /* Hot count */
1508 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1509 ide_set_irq(s->bus);
1511 return false;
1514 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1516 switch (s->feature) {
1517 case 0x02: /* Inquiry Metadata Storage */
1518 ide_cfata_metadata_inquiry(s);
1519 break;
1520 case 0x03: /* Read Metadata Storage */
1521 ide_cfata_metadata_read(s);
1522 break;
1523 case 0x04: /* Write Metadata Storage */
1524 ide_cfata_metadata_write(s);
1525 break;
1526 default:
1527 ide_abort_command(s);
1528 return true;
1531 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1532 s->status = 0x00; /* NOTE: READY is _not_ set */
1533 ide_set_irq(s->bus);
1535 return false;
1538 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1540 switch (s->feature) {
1541 case 0x01: /* sense temperature in device */
1542 s->nsector = 0x50; /* +20 C */
1543 break;
1544 default:
1545 ide_abort_command(s);
1546 return true;
1549 return true;
1553 /*** SMART commands ***/
1555 static bool cmd_smart(IDEState *s, uint8_t cmd)
1557 int n;
1559 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1560 goto abort_cmd;
1563 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1564 goto abort_cmd;
1567 switch (s->feature) {
1568 case SMART_DISABLE:
1569 s->smart_enabled = 0;
1570 return true;
1572 case SMART_ENABLE:
1573 s->smart_enabled = 1;
1574 return true;
1576 case SMART_ATTR_AUTOSAVE:
1577 switch (s->sector) {
1578 case 0x00:
1579 s->smart_autosave = 0;
1580 break;
1581 case 0xf1:
1582 s->smart_autosave = 1;
1583 break;
1584 default:
1585 goto abort_cmd;
1587 return true;
1589 case SMART_STATUS:
1590 if (!s->smart_errors) {
1591 s->hcyl = 0xc2;
1592 s->lcyl = 0x4f;
1593 } else {
1594 s->hcyl = 0x2c;
1595 s->lcyl = 0xf4;
1597 return true;
1599 case SMART_READ_THRESH:
1600 memset(s->io_buffer, 0, 0x200);
1601 s->io_buffer[0] = 0x01; /* smart struct version */
1603 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1604 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1605 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1608 /* checksum */
1609 for (n = 0; n < 511; n++) {
1610 s->io_buffer[511] += s->io_buffer[n];
1612 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1614 s->status = READY_STAT | SEEK_STAT;
1615 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1616 ide_set_irq(s->bus);
1617 return false;
1619 case SMART_READ_DATA:
1620 memset(s->io_buffer, 0, 0x200);
1621 s->io_buffer[0] = 0x01; /* smart struct version */
1623 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1624 int i;
1625 for (i = 0; i < 11; i++) {
1626 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1630 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1631 if (s->smart_selftest_count == 0) {
1632 s->io_buffer[363] = 0;
1633 } else {
1634 s->io_buffer[363] =
1635 s->smart_selftest_data[3 +
1636 (s->smart_selftest_count - 1) *
1637 24];
1639 s->io_buffer[364] = 0x20;
1640 s->io_buffer[365] = 0x01;
1641 /* offline data collection capacity: execute + self-test*/
1642 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1643 s->io_buffer[368] = 0x03; /* smart capability (1) */
1644 s->io_buffer[369] = 0x00; /* smart capability (2) */
1645 s->io_buffer[370] = 0x01; /* error logging supported */
1646 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1647 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1648 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1650 for (n = 0; n < 511; n++) {
1651 s->io_buffer[511] += s->io_buffer[n];
1653 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1655 s->status = READY_STAT | SEEK_STAT;
1656 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1657 ide_set_irq(s->bus);
1658 return false;
1660 case SMART_READ_LOG:
1661 switch (s->sector) {
1662 case 0x01: /* summary smart error log */
1663 memset(s->io_buffer, 0, 0x200);
1664 s->io_buffer[0] = 0x01;
1665 s->io_buffer[1] = 0x00; /* no error entries */
1666 s->io_buffer[452] = s->smart_errors & 0xff;
1667 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1669 for (n = 0; n < 511; n++) {
1670 s->io_buffer[511] += s->io_buffer[n];
1672 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1673 break;
1674 case 0x06: /* smart self test log */
1675 memset(s->io_buffer, 0, 0x200);
1676 s->io_buffer[0] = 0x01;
1677 if (s->smart_selftest_count == 0) {
1678 s->io_buffer[508] = 0;
1679 } else {
1680 s->io_buffer[508] = s->smart_selftest_count;
1681 for (n = 2; n < 506; n++) {
1682 s->io_buffer[n] = s->smart_selftest_data[n];
1686 for (n = 0; n < 511; n++) {
1687 s->io_buffer[511] += s->io_buffer[n];
1689 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1690 break;
1691 default:
1692 goto abort_cmd;
1694 s->status = READY_STAT | SEEK_STAT;
1695 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1696 ide_set_irq(s->bus);
1697 return false;
1699 case SMART_EXECUTE_OFFLINE:
1700 switch (s->sector) {
1701 case 0: /* off-line routine */
1702 case 1: /* short self test */
1703 case 2: /* extended self test */
1704 s->smart_selftest_count++;
1705 if (s->smart_selftest_count > 21) {
1706 s->smart_selftest_count = 1;
1708 n = 2 + (s->smart_selftest_count - 1) * 24;
1709 s->smart_selftest_data[n] = s->sector;
1710 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1711 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1712 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1713 break;
1714 default:
1715 goto abort_cmd;
1717 return true;
1720 abort_cmd:
1721 ide_abort_command(s);
1722 return true;
1725 #define HD_OK (1u << IDE_HD)
1726 #define CD_OK (1u << IDE_CD)
1727 #define CFA_OK (1u << IDE_CFATA)
1728 #define HD_CFA_OK (HD_OK | CFA_OK)
1729 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1731 /* Set the Disk Seek Completed status bit during completion */
1732 #define SET_DSC (1u << 8)
1734 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1735 static const struct {
1736 /* Returns true if the completion code should be run */
1737 bool (*handler)(IDEState *s, uint8_t cmd);
1738 int flags;
1739 } ide_cmd_table[0x100] = {
1740 /* NOP not implemented, mandatory for CD */
1741 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1742 [WIN_DSM] = { cmd_data_set_management, ALL_OK },
1743 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1744 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1745 [WIN_READ] = { cmd_read_pio, ALL_OK },
1746 [WIN_READ_ONCE] = { cmd_read_pio, ALL_OK },
1747 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1748 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1749 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1750 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1751 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1752 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1753 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1754 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1755 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
1756 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
1757 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
1758 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
1759 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
1760 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
1761 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
1762 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
1763 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
1764 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
1765 [WIN_STANDBYNOW2] = { cmd_nop, ALL_OK },
1766 [WIN_IDLEIMMEDIATE2] = { cmd_nop, ALL_OK },
1767 [WIN_STANDBY2] = { cmd_nop, ALL_OK },
1768 [WIN_SETIDLE2] = { cmd_nop, ALL_OK },
1769 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, ALL_OK | SET_DSC },
1770 [WIN_SLEEPNOW2] = { cmd_nop, ALL_OK },
1771 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
1772 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
1773 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
1774 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1775 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1776 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
1777 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
1778 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1779 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
1780 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
1781 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
1782 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
1783 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
1784 [WIN_STANDBYNOW1] = { cmd_nop, ALL_OK },
1785 [WIN_IDLEIMMEDIATE] = { cmd_nop, ALL_OK },
1786 [WIN_STANDBY] = { cmd_nop, ALL_OK },
1787 [WIN_SETIDLE1] = { cmd_nop, ALL_OK },
1788 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, ALL_OK | SET_DSC },
1789 [WIN_SLEEPNOW1] = { cmd_nop, ALL_OK },
1790 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
1791 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
1792 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
1793 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
1794 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1795 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
1796 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, ALL_OK | SET_DSC },
1799 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
1801 return cmd < ARRAY_SIZE(ide_cmd_table)
1802 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
1805 void ide_exec_cmd(IDEBus *bus, uint32_t val)
1807 IDEState *s;
1808 bool complete;
1810 #if defined(DEBUG_IDE)
1811 printf("ide: CMD=%02x\n", val);
1812 #endif
1813 s = idebus_active_if(bus);
1814 /* ignore commands to non existent slave */
1815 if (s != bus->ifs && !s->blk) {
1816 return;
1819 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
1820 if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET)
1821 return;
1823 if (!ide_cmd_permitted(s, val)) {
1824 ide_abort_command(s);
1825 ide_set_irq(s->bus);
1826 return;
1829 s->status = READY_STAT | BUSY_STAT;
1830 s->error = 0;
1831 s->io_buffer_offset = 0;
1833 complete = ide_cmd_table[val].handler(s, val);
1834 if (complete) {
1835 s->status &= ~BUSY_STAT;
1836 assert(!!s->error == !!(s->status & ERR_STAT));
1838 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
1839 s->status |= SEEK_STAT;
1842 ide_cmd_done(s);
1843 ide_set_irq(s->bus);
1847 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
1849 IDEBus *bus = opaque;
1850 IDEState *s = idebus_active_if(bus);
1851 uint32_t addr;
1852 int ret, hob;
1854 addr = addr1 & 7;
1855 /* FIXME: HOB readback uses bit 7, but it's always set right now */
1856 //hob = s->select & (1 << 7);
1857 hob = 0;
1858 switch(addr) {
1859 case 0:
1860 ret = 0xff;
1861 break;
1862 case 1:
1863 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1864 (s != bus->ifs && !s->blk)) {
1865 ret = 0;
1866 } else if (!hob) {
1867 ret = s->error;
1868 } else {
1869 ret = s->hob_feature;
1871 break;
1872 case 2:
1873 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1874 ret = 0;
1875 } else if (!hob) {
1876 ret = s->nsector & 0xff;
1877 } else {
1878 ret = s->hob_nsector;
1880 break;
1881 case 3:
1882 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1883 ret = 0;
1884 } else if (!hob) {
1885 ret = s->sector;
1886 } else {
1887 ret = s->hob_sector;
1889 break;
1890 case 4:
1891 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1892 ret = 0;
1893 } else if (!hob) {
1894 ret = s->lcyl;
1895 } else {
1896 ret = s->hob_lcyl;
1898 break;
1899 case 5:
1900 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1901 ret = 0;
1902 } else if (!hob) {
1903 ret = s->hcyl;
1904 } else {
1905 ret = s->hob_hcyl;
1907 break;
1908 case 6:
1909 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1910 ret = 0;
1911 } else {
1912 ret = s->select;
1914 break;
1915 default:
1916 case 7:
1917 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1918 (s != bus->ifs && !s->blk)) {
1919 ret = 0;
1920 } else {
1921 ret = s->status;
1923 qemu_irq_lower(bus->irq);
1924 break;
1926 #ifdef DEBUG_IDE
1927 printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
1928 #endif
1929 return ret;
1932 uint32_t ide_status_read(void *opaque, uint32_t addr)
1934 IDEBus *bus = opaque;
1935 IDEState *s = idebus_active_if(bus);
1936 int ret;
1938 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1939 (s != bus->ifs && !s->blk)) {
1940 ret = 0;
1941 } else {
1942 ret = s->status;
1944 #ifdef DEBUG_IDE
1945 printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
1946 #endif
1947 return ret;
1950 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
1952 IDEBus *bus = opaque;
1953 IDEState *s;
1954 int i;
1956 #ifdef DEBUG_IDE
1957 printf("ide: write control addr=0x%x val=%02x\n", addr, val);
1958 #endif
1959 /* common for both drives */
1960 if (!(bus->cmd & IDE_CMD_RESET) &&
1961 (val & IDE_CMD_RESET)) {
1962 /* reset low to high */
1963 for(i = 0;i < 2; i++) {
1964 s = &bus->ifs[i];
1965 s->status = BUSY_STAT | SEEK_STAT;
1966 s->error = 0x01;
1968 } else if ((bus->cmd & IDE_CMD_RESET) &&
1969 !(val & IDE_CMD_RESET)) {
1970 /* high to low */
1971 for(i = 0;i < 2; i++) {
1972 s = &bus->ifs[i];
1973 if (s->drive_kind == IDE_CD)
1974 s->status = 0x00; /* NOTE: READY is _not_ set */
1975 else
1976 s->status = READY_STAT | SEEK_STAT;
1977 ide_set_signature(s);
1981 bus->cmd = val;
1985 * Returns true if the running PIO transfer is a PIO out (i.e. data is
1986 * transferred from the device to the guest), false if it's a PIO in
1988 static bool ide_is_pio_out(IDEState *s)
1990 if (s->end_transfer_func == ide_sector_write ||
1991 s->end_transfer_func == ide_atapi_cmd) {
1992 return false;
1993 } else if (s->end_transfer_func == ide_sector_read ||
1994 s->end_transfer_func == ide_transfer_stop ||
1995 s->end_transfer_func == ide_atapi_cmd_reply_end ||
1996 s->end_transfer_func == ide_dummy_transfer_stop) {
1997 return true;
2000 abort();
2003 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2005 IDEBus *bus = opaque;
2006 IDEState *s = idebus_active_if(bus);
2007 uint8_t *p;
2009 /* PIO data access allowed only when DRQ bit is set. The result of a write
2010 * during PIO out is indeterminate, just ignore it. */
2011 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2012 return;
2015 p = s->data_ptr;
2016 *(uint16_t *)p = le16_to_cpu(val);
2017 p += 2;
2018 s->data_ptr = p;
2019 if (p >= s->data_end)
2020 s->end_transfer_func(s);
2023 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2025 IDEBus *bus = opaque;
2026 IDEState *s = idebus_active_if(bus);
2027 uint8_t *p;
2028 int ret;
2030 /* PIO data access allowed only when DRQ bit is set. The result of a read
2031 * during PIO in is indeterminate, return 0 and don't move forward. */
2032 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2033 return 0;
2036 p = s->data_ptr;
2037 ret = cpu_to_le16(*(uint16_t *)p);
2038 p += 2;
2039 s->data_ptr = p;
2040 if (p >= s->data_end)
2041 s->end_transfer_func(s);
2042 return ret;
2045 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2047 IDEBus *bus = opaque;
2048 IDEState *s = idebus_active_if(bus);
2049 uint8_t *p;
2051 /* PIO data access allowed only when DRQ bit is set. The result of a write
2052 * during PIO out is indeterminate, just ignore it. */
2053 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2054 return;
2057 p = s->data_ptr;
2058 *(uint32_t *)p = le32_to_cpu(val);
2059 p += 4;
2060 s->data_ptr = p;
2061 if (p >= s->data_end)
2062 s->end_transfer_func(s);
2065 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2067 IDEBus *bus = opaque;
2068 IDEState *s = idebus_active_if(bus);
2069 uint8_t *p;
2070 int ret;
2072 /* PIO data access allowed only when DRQ bit is set. The result of a read
2073 * during PIO in is indeterminate, return 0 and don't move forward. */
2074 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2075 return 0;
2078 p = s->data_ptr;
2079 ret = cpu_to_le32(*(uint32_t *)p);
2080 p += 4;
2081 s->data_ptr = p;
2082 if (p >= s->data_end)
2083 s->end_transfer_func(s);
2084 return ret;
2087 static void ide_dummy_transfer_stop(IDEState *s)
2089 s->data_ptr = s->io_buffer;
2090 s->data_end = s->io_buffer;
2091 s->io_buffer[0] = 0xff;
2092 s->io_buffer[1] = 0xff;
2093 s->io_buffer[2] = 0xff;
2094 s->io_buffer[3] = 0xff;
2097 static void ide_reset(IDEState *s)
2099 #ifdef DEBUG_IDE
2100 printf("ide: reset\n");
2101 #endif
2103 if (s->pio_aiocb) {
2104 blk_aio_cancel(s->pio_aiocb);
2105 s->pio_aiocb = NULL;
2108 if (s->drive_kind == IDE_CFATA)
2109 s->mult_sectors = 0;
2110 else
2111 s->mult_sectors = MAX_MULT_SECTORS;
2112 /* ide regs */
2113 s->feature = 0;
2114 s->error = 0;
2115 s->nsector = 0;
2116 s->sector = 0;
2117 s->lcyl = 0;
2118 s->hcyl = 0;
2120 /* lba48 */
2121 s->hob_feature = 0;
2122 s->hob_sector = 0;
2123 s->hob_nsector = 0;
2124 s->hob_lcyl = 0;
2125 s->hob_hcyl = 0;
2127 s->select = 0xa0;
2128 s->status = READY_STAT | SEEK_STAT;
2130 s->lba48 = 0;
2132 /* ATAPI specific */
2133 s->sense_key = 0;
2134 s->asc = 0;
2135 s->cdrom_changed = 0;
2136 s->packet_transfer_size = 0;
2137 s->elementary_transfer_size = 0;
2138 s->io_buffer_index = 0;
2139 s->cd_sector_size = 0;
2140 s->atapi_dma = 0;
2141 s->tray_locked = 0;
2142 s->tray_open = 0;
2143 /* ATA DMA state */
2144 s->io_buffer_size = 0;
2145 s->req_nb_sectors = 0;
2147 ide_set_signature(s);
2148 /* init the transfer handler so that 0xffff is returned on data
2149 accesses */
2150 s->end_transfer_func = ide_dummy_transfer_stop;
2151 ide_dummy_transfer_stop(s);
2152 s->media_changed = 0;
2155 void ide_bus_reset(IDEBus *bus)
2157 bus->unit = 0;
2158 bus->cmd = 0;
2159 ide_reset(&bus->ifs[0]);
2160 ide_reset(&bus->ifs[1]);
2161 ide_clear_hob(bus);
2163 /* pending async DMA */
2164 if (bus->dma->aiocb) {
2165 #ifdef DEBUG_AIO
2166 printf("aio_cancel\n");
2167 #endif
2168 blk_aio_cancel(bus->dma->aiocb);
2169 bus->dma->aiocb = NULL;
2172 /* reset dma provider too */
2173 if (bus->dma->ops->reset) {
2174 bus->dma->ops->reset(bus->dma);
2178 static bool ide_cd_is_tray_open(void *opaque)
2180 return ((IDEState *)opaque)->tray_open;
2183 static bool ide_cd_is_medium_locked(void *opaque)
2185 return ((IDEState *)opaque)->tray_locked;
2188 static void ide_resize_cb(void *opaque)
2190 IDEState *s = opaque;
2191 uint64_t nb_sectors;
2193 if (!s->identify_set) {
2194 return;
2197 blk_get_geometry(s->blk, &nb_sectors);
2198 s->nb_sectors = nb_sectors;
2200 /* Update the identify data buffer. */
2201 if (s->drive_kind == IDE_CFATA) {
2202 ide_cfata_identify_size(s);
2203 } else {
2204 /* IDE_CD uses a different set of callbacks entirely. */
2205 assert(s->drive_kind != IDE_CD);
2206 ide_identify_size(s);
2210 static const BlockDevOps ide_cd_block_ops = {
2211 .change_media_cb = ide_cd_change_cb,
2212 .eject_request_cb = ide_cd_eject_request_cb,
2213 .is_tray_open = ide_cd_is_tray_open,
2214 .is_medium_locked = ide_cd_is_medium_locked,
2217 static const BlockDevOps ide_hd_block_ops = {
2218 .resize_cb = ide_resize_cb,
2221 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2222 const char *version, const char *serial, const char *model,
2223 uint64_t wwn,
2224 uint32_t cylinders, uint32_t heads, uint32_t secs,
2225 int chs_trans)
2227 uint64_t nb_sectors;
2229 s->blk = blk;
2230 s->drive_kind = kind;
2232 blk_get_geometry(blk, &nb_sectors);
2233 s->cylinders = cylinders;
2234 s->heads = heads;
2235 s->sectors = secs;
2236 s->chs_trans = chs_trans;
2237 s->nb_sectors = nb_sectors;
2238 s->wwn = wwn;
2239 /* The SMART values should be preserved across power cycles
2240 but they aren't. */
2241 s->smart_enabled = 1;
2242 s->smart_autosave = 1;
2243 s->smart_errors = 0;
2244 s->smart_selftest_count = 0;
2245 if (kind == IDE_CD) {
2246 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2247 blk_set_guest_block_size(blk, 2048);
2248 } else {
2249 if (!blk_is_inserted(s->blk)) {
2250 error_report("Device needs media, but drive is empty");
2251 return -1;
2253 if (blk_is_read_only(blk)) {
2254 error_report("Can't use a read-only drive");
2255 return -1;
2257 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2259 if (serial) {
2260 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2261 } else {
2262 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2263 "QM%05d", s->drive_serial);
2265 if (model) {
2266 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2267 } else {
2268 switch (kind) {
2269 case IDE_CD:
2270 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2271 break;
2272 case IDE_CFATA:
2273 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2274 break;
2275 default:
2276 strcpy(s->drive_model_str, "QEMU HARDDISK");
2277 break;
2281 if (version) {
2282 pstrcpy(s->version, sizeof(s->version), version);
2283 } else {
2284 pstrcpy(s->version, sizeof(s->version), qemu_get_version());
2287 ide_reset(s);
2288 blk_iostatus_enable(blk);
2289 return 0;
2292 static void ide_init1(IDEBus *bus, int unit)
2294 static int drive_serial = 1;
2295 IDEState *s = &bus->ifs[unit];
2297 s->bus = bus;
2298 s->unit = unit;
2299 s->drive_serial = drive_serial++;
2300 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2301 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2302 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2303 memset(s->io_buffer, 0, s->io_buffer_total_len);
2305 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2306 memset(s->smart_selftest_data, 0, 512);
2308 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2309 ide_sector_write_timer_cb, s);
2312 static int ide_nop_int(IDEDMA *dma, int x)
2314 return 0;
2317 static int32_t ide_nop_int32(IDEDMA *dma, int x)
2319 return 0;
2322 static void ide_nop_restart(void *opaque, int x, RunState y)
2326 static const IDEDMAOps ide_dma_nop_ops = {
2327 .prepare_buf = ide_nop_int32,
2328 .rw_buf = ide_nop_int,
2329 .set_unit = ide_nop_int,
2330 .restart_cb = ide_nop_restart,
2333 static IDEDMA ide_dma_nop = {
2334 .ops = &ide_dma_nop_ops,
2335 .aiocb = NULL,
2338 void ide_init2(IDEBus *bus, qemu_irq irq)
2340 int i;
2342 for(i = 0; i < 2; i++) {
2343 ide_init1(bus, i);
2344 ide_reset(&bus->ifs[i]);
2346 bus->irq = irq;
2347 bus->dma = &ide_dma_nop;
2350 static const MemoryRegionPortio ide_portio_list[] = {
2351 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2352 { 0, 2, 2, .read = ide_data_readw, .write = ide_data_writew },
2353 { 0, 4, 4, .read = ide_data_readl, .write = ide_data_writel },
2354 PORTIO_END_OF_LIST(),
2357 static const MemoryRegionPortio ide_portio2_list[] = {
2358 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2359 PORTIO_END_OF_LIST(),
2362 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2364 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2365 bridge has been setup properly to always register with ISA. */
2366 isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2368 if (iobase2) {
2369 isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2373 static bool is_identify_set(void *opaque, int version_id)
2375 IDEState *s = opaque;
2377 return s->identify_set != 0;
2380 static EndTransferFunc* transfer_end_table[] = {
2381 ide_sector_read,
2382 ide_sector_write,
2383 ide_transfer_stop,
2384 ide_atapi_cmd_reply_end,
2385 ide_atapi_cmd,
2386 ide_dummy_transfer_stop,
2389 static int transfer_end_table_idx(EndTransferFunc *fn)
2391 int i;
2393 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2394 if (transfer_end_table[i] == fn)
2395 return i;
2397 return -1;
2400 static int ide_drive_post_load(void *opaque, int version_id)
2402 IDEState *s = opaque;
2404 if (s->blk && s->identify_set) {
2405 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2407 return 0;
2410 static int ide_drive_pio_post_load(void *opaque, int version_id)
2412 IDEState *s = opaque;
2414 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2415 return -EINVAL;
2417 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2418 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2419 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2421 return 0;
2424 static void ide_drive_pio_pre_save(void *opaque)
2426 IDEState *s = opaque;
2427 int idx;
2429 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2430 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2432 idx = transfer_end_table_idx(s->end_transfer_func);
2433 if (idx == -1) {
2434 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2435 __func__);
2436 s->end_transfer_fn_idx = 2;
2437 } else {
2438 s->end_transfer_fn_idx = idx;
2442 static bool ide_drive_pio_state_needed(void *opaque)
2444 IDEState *s = opaque;
2446 return ((s->status & DRQ_STAT) != 0)
2447 || (s->bus->error_status & IDE_RETRY_PIO);
2450 static bool ide_tray_state_needed(void *opaque)
2452 IDEState *s = opaque;
2454 return s->tray_open || s->tray_locked;
2457 static bool ide_atapi_gesn_needed(void *opaque)
2459 IDEState *s = opaque;
2461 return s->events.new_media || s->events.eject_request;
2464 static bool ide_error_needed(void *opaque)
2466 IDEBus *bus = opaque;
2468 return (bus->error_status != 0);
2471 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2472 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2473 .name ="ide_drive/atapi/gesn_state",
2474 .version_id = 1,
2475 .minimum_version_id = 1,
2476 .fields = (VMStateField[]) {
2477 VMSTATE_BOOL(events.new_media, IDEState),
2478 VMSTATE_BOOL(events.eject_request, IDEState),
2479 VMSTATE_END_OF_LIST()
2483 static const VMStateDescription vmstate_ide_tray_state = {
2484 .name = "ide_drive/tray_state",
2485 .version_id = 1,
2486 .minimum_version_id = 1,
2487 .fields = (VMStateField[]) {
2488 VMSTATE_BOOL(tray_open, IDEState),
2489 VMSTATE_BOOL(tray_locked, IDEState),
2490 VMSTATE_END_OF_LIST()
2494 static const VMStateDescription vmstate_ide_drive_pio_state = {
2495 .name = "ide_drive/pio_state",
2496 .version_id = 1,
2497 .minimum_version_id = 1,
2498 .pre_save = ide_drive_pio_pre_save,
2499 .post_load = ide_drive_pio_post_load,
2500 .fields = (VMStateField[]) {
2501 VMSTATE_INT32(req_nb_sectors, IDEState),
2502 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2503 vmstate_info_uint8, uint8_t),
2504 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2505 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2506 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2507 VMSTATE_INT32(elementary_transfer_size, IDEState),
2508 VMSTATE_INT32(packet_transfer_size, IDEState),
2509 VMSTATE_END_OF_LIST()
2513 const VMStateDescription vmstate_ide_drive = {
2514 .name = "ide_drive",
2515 .version_id = 3,
2516 .minimum_version_id = 0,
2517 .post_load = ide_drive_post_load,
2518 .fields = (VMStateField[]) {
2519 VMSTATE_INT32(mult_sectors, IDEState),
2520 VMSTATE_INT32(identify_set, IDEState),
2521 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2522 VMSTATE_UINT8(feature, IDEState),
2523 VMSTATE_UINT8(error, IDEState),
2524 VMSTATE_UINT32(nsector, IDEState),
2525 VMSTATE_UINT8(sector, IDEState),
2526 VMSTATE_UINT8(lcyl, IDEState),
2527 VMSTATE_UINT8(hcyl, IDEState),
2528 VMSTATE_UINT8(hob_feature, IDEState),
2529 VMSTATE_UINT8(hob_sector, IDEState),
2530 VMSTATE_UINT8(hob_nsector, IDEState),
2531 VMSTATE_UINT8(hob_lcyl, IDEState),
2532 VMSTATE_UINT8(hob_hcyl, IDEState),
2533 VMSTATE_UINT8(select, IDEState),
2534 VMSTATE_UINT8(status, IDEState),
2535 VMSTATE_UINT8(lba48, IDEState),
2536 VMSTATE_UINT8(sense_key, IDEState),
2537 VMSTATE_UINT8(asc, IDEState),
2538 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2539 VMSTATE_END_OF_LIST()
2541 .subsections = (VMStateSubsection []) {
2543 .vmsd = &vmstate_ide_drive_pio_state,
2544 .needed = ide_drive_pio_state_needed,
2545 }, {
2546 .vmsd = &vmstate_ide_tray_state,
2547 .needed = ide_tray_state_needed,
2548 }, {
2549 .vmsd = &vmstate_ide_atapi_gesn_state,
2550 .needed = ide_atapi_gesn_needed,
2551 }, {
2552 /* empty */
2557 static const VMStateDescription vmstate_ide_error_status = {
2558 .name ="ide_bus/error",
2559 .version_id = 1,
2560 .minimum_version_id = 1,
2561 .fields = (VMStateField[]) {
2562 VMSTATE_INT32(error_status, IDEBus),
2563 VMSTATE_END_OF_LIST()
2567 const VMStateDescription vmstate_ide_bus = {
2568 .name = "ide_bus",
2569 .version_id = 1,
2570 .minimum_version_id = 1,
2571 .fields = (VMStateField[]) {
2572 VMSTATE_UINT8(cmd, IDEBus),
2573 VMSTATE_UINT8(unit, IDEBus),
2574 VMSTATE_END_OF_LIST()
2576 .subsections = (VMStateSubsection []) {
2578 .vmsd = &vmstate_ide_error_status,
2579 .needed = ide_error_needed,
2580 }, {
2581 /* empty */
2586 void ide_drive_get(DriveInfo **hd, int n)
2588 int i;
2589 int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2590 int max_devs = drive_get_max_devs(IF_IDE);
2591 int n_buses = max_devs ? (n / max_devs) : n;
2594 * Note: The number of actual buses available is not known.
2595 * We compute this based on the size of the DriveInfo* array, n.
2596 * If it is less than max_devs * <num_real_buses>,
2597 * We will stop looking for drives prematurely instead of overfilling
2598 * the array.
2601 if (highest_bus > n_buses) {
2602 error_report("Too many IDE buses defined (%d > %d)",
2603 highest_bus, n_buses);
2604 exit(1);
2607 for (i = 0; i < n; i++) {
2608 hd[i] = drive_get_by_index(IF_IDE, i);