nbd: Don't crash when server reports NBD_CMD_READ failure
[qemu/ar7.git] / hw / ide / core.c
blob471d0c928b55ce8bc59acc66ff2d61ac6a1f892b
1 /*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 #include "qemu/error-report.h"
39 #include "hw/ide/internal.h"
40 #include "trace.h"
42 /* These values were based on a Seagate ST3500418AS but have been modified
43 to make more sense in QEMU */
44 static const int smart_attributes[][12] = {
45 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
46 /* raw read error rate*/
47 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
48 /* spin up */
49 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
50 /* start stop count */
51 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
52 /* remapped sectors */
53 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
54 /* power on hours */
55 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56 /* power cycle count */
57 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58 /* airflow-temperature-celsius */
59 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
62 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
63 [IDE_DMA_READ] = "DMA READ",
64 [IDE_DMA_WRITE] = "DMA WRITE",
65 [IDE_DMA_TRIM] = "DMA TRIM",
66 [IDE_DMA_ATAPI] = "DMA ATAPI"
69 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
71 if ((unsigned)enval < IDE_DMA__COUNT) {
72 return IDE_DMA_CMD_lookup[enval];
74 return "DMA UNKNOWN CMD";
77 static void ide_dummy_transfer_stop(IDEState *s);
79 static void padstr(char *str, const char *src, int len)
81 int i, v;
82 for(i = 0; i < len; i++) {
83 if (*src)
84 v = *src++;
85 else
86 v = ' ';
87 str[i^1] = v;
91 static void put_le16(uint16_t *p, unsigned int v)
93 *p = cpu_to_le16(v);
96 static void ide_identify_size(IDEState *s)
98 uint16_t *p = (uint16_t *)s->identify_data;
99 put_le16(p + 60, s->nb_sectors);
100 put_le16(p + 61, s->nb_sectors >> 16);
101 put_le16(p + 100, s->nb_sectors);
102 put_le16(p + 101, s->nb_sectors >> 16);
103 put_le16(p + 102, s->nb_sectors >> 32);
104 put_le16(p + 103, s->nb_sectors >> 48);
107 static void ide_identify(IDEState *s)
109 uint16_t *p;
110 unsigned int oldsize;
111 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
113 p = (uint16_t *)s->identify_data;
114 if (s->identify_set) {
115 goto fill_buffer;
117 memset(p, 0, sizeof(s->identify_data));
119 put_le16(p + 0, 0x0040);
120 put_le16(p + 1, s->cylinders);
121 put_le16(p + 3, s->heads);
122 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
123 put_le16(p + 5, 512); /* XXX: retired, remove ? */
124 put_le16(p + 6, s->sectors);
125 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
126 put_le16(p + 20, 3); /* XXX: retired, remove ? */
127 put_le16(p + 21, 512); /* cache size in sectors */
128 put_le16(p + 22, 4); /* ecc bytes */
129 padstr((char *)(p + 23), s->version, 8); /* firmware version */
130 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
131 #if MAX_MULT_SECTORS > 1
132 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
133 #endif
134 put_le16(p + 48, 1); /* dword I/O */
135 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
136 put_le16(p + 51, 0x200); /* PIO transfer cycle */
137 put_le16(p + 52, 0x200); /* DMA transfer cycle */
138 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
139 put_le16(p + 54, s->cylinders);
140 put_le16(p + 55, s->heads);
141 put_le16(p + 56, s->sectors);
142 oldsize = s->cylinders * s->heads * s->sectors;
143 put_le16(p + 57, oldsize);
144 put_le16(p + 58, oldsize >> 16);
145 if (s->mult_sectors)
146 put_le16(p + 59, 0x100 | s->mult_sectors);
147 /* *(p + 60) := nb_sectors -- see ide_identify_size */
148 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
149 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
150 put_le16(p + 63, 0x07); /* mdma0-2 supported */
151 put_le16(p + 64, 0x03); /* pio3-4 supported */
152 put_le16(p + 65, 120);
153 put_le16(p + 66, 120);
154 put_le16(p + 67, 120);
155 put_le16(p + 68, 120);
156 if (dev && dev->conf.discard_granularity) {
157 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
160 if (s->ncq_queues) {
161 put_le16(p + 75, s->ncq_queues - 1);
162 /* NCQ supported */
163 put_le16(p + 76, (1 << 8));
166 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
167 put_le16(p + 81, 0x16); /* conforms to ata5 */
168 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
169 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
170 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
171 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
172 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
173 if (s->wwn) {
174 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
175 } else {
176 put_le16(p + 84, (1 << 14) | 0);
178 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
179 if (blk_enable_write_cache(s->blk)) {
180 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
181 } else {
182 put_le16(p + 85, (1 << 14) | 1);
184 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
185 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
186 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
187 if (s->wwn) {
188 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
189 } else {
190 put_le16(p + 87, (1 << 14) | 0);
192 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
193 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
194 /* *(p + 100) := nb_sectors -- see ide_identify_size */
195 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
196 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
197 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
199 if (dev && dev->conf.physical_block_size)
200 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
201 if (s->wwn) {
202 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
203 put_le16(p + 108, s->wwn >> 48);
204 put_le16(p + 109, s->wwn >> 32);
205 put_le16(p + 110, s->wwn >> 16);
206 put_le16(p + 111, s->wwn);
208 if (dev && dev->conf.discard_granularity) {
209 put_le16(p + 169, 1); /* TRIM support */
211 if (dev) {
212 put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
215 ide_identify_size(s);
216 s->identify_set = 1;
218 fill_buffer:
219 memcpy(s->io_buffer, p, sizeof(s->identify_data));
222 static void ide_atapi_identify(IDEState *s)
224 uint16_t *p;
226 p = (uint16_t *)s->identify_data;
227 if (s->identify_set) {
228 goto fill_buffer;
230 memset(p, 0, sizeof(s->identify_data));
232 /* Removable CDROM, 50us response, 12 byte packets */
233 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
234 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
235 put_le16(p + 20, 3); /* buffer type */
236 put_le16(p + 21, 512); /* cache size in sectors */
237 put_le16(p + 22, 4); /* ecc bytes */
238 padstr((char *)(p + 23), s->version, 8); /* firmware version */
239 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
240 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
241 #ifdef USE_DMA_CDROM
242 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
243 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
244 put_le16(p + 62, 7); /* single word dma0-2 supported */
245 put_le16(p + 63, 7); /* mdma0-2 supported */
246 #else
247 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
248 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
249 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
250 #endif
251 put_le16(p + 64, 3); /* pio3-4 supported */
252 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
253 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
254 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
255 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
257 put_le16(p + 71, 30); /* in ns */
258 put_le16(p + 72, 30); /* in ns */
260 if (s->ncq_queues) {
261 put_le16(p + 75, s->ncq_queues - 1);
262 /* NCQ supported */
263 put_le16(p + 76, (1 << 8));
266 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
267 if (s->wwn) {
268 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
269 put_le16(p + 87, (1 << 8)); /* WWN enabled */
272 #ifdef USE_DMA_CDROM
273 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
274 #endif
276 if (s->wwn) {
277 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
278 put_le16(p + 108, s->wwn >> 48);
279 put_le16(p + 109, s->wwn >> 32);
280 put_le16(p + 110, s->wwn >> 16);
281 put_le16(p + 111, s->wwn);
284 s->identify_set = 1;
286 fill_buffer:
287 memcpy(s->io_buffer, p, sizeof(s->identify_data));
290 static void ide_cfata_identify_size(IDEState *s)
292 uint16_t *p = (uint16_t *)s->identify_data;
293 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
294 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
295 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
296 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
299 static void ide_cfata_identify(IDEState *s)
301 uint16_t *p;
302 uint32_t cur_sec;
304 p = (uint16_t *)s->identify_data;
305 if (s->identify_set) {
306 goto fill_buffer;
308 memset(p, 0, sizeof(s->identify_data));
310 cur_sec = s->cylinders * s->heads * s->sectors;
312 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
313 put_le16(p + 1, s->cylinders); /* Default cylinders */
314 put_le16(p + 3, s->heads); /* Default heads */
315 put_le16(p + 6, s->sectors); /* Default sectors per track */
316 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
317 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
318 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
319 put_le16(p + 22, 0x0004); /* ECC bytes */
320 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
321 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
322 #if MAX_MULT_SECTORS > 1
323 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
324 #else
325 put_le16(p + 47, 0x0000);
326 #endif
327 put_le16(p + 49, 0x0f00); /* Capabilities */
328 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
329 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
330 put_le16(p + 53, 0x0003); /* Translation params valid */
331 put_le16(p + 54, s->cylinders); /* Current cylinders */
332 put_le16(p + 55, s->heads); /* Current heads */
333 put_le16(p + 56, s->sectors); /* Current sectors */
334 put_le16(p + 57, cur_sec); /* Current capacity */
335 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
336 if (s->mult_sectors) /* Multiple sector setting */
337 put_le16(p + 59, 0x100 | s->mult_sectors);
338 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
339 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
340 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
341 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
342 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
343 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
344 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
345 put_le16(p + 82, 0x400c); /* Command Set supported */
346 put_le16(p + 83, 0x7068); /* Command Set supported */
347 put_le16(p + 84, 0x4000); /* Features supported */
348 put_le16(p + 85, 0x000c); /* Command Set enabled */
349 put_le16(p + 86, 0x7044); /* Command Set enabled */
350 put_le16(p + 87, 0x4000); /* Features enabled */
351 put_le16(p + 91, 0x4060); /* Current APM level */
352 put_le16(p + 129, 0x0002); /* Current features option */
353 put_le16(p + 130, 0x0005); /* Reassigned sectors */
354 put_le16(p + 131, 0x0001); /* Initial power mode */
355 put_le16(p + 132, 0x0000); /* User signature */
356 put_le16(p + 160, 0x8100); /* Power requirement */
357 put_le16(p + 161, 0x8001); /* CF command set */
359 ide_cfata_identify_size(s);
360 s->identify_set = 1;
362 fill_buffer:
363 memcpy(s->io_buffer, p, sizeof(s->identify_data));
366 static void ide_set_signature(IDEState *s)
368 s->select &= 0xf0; /* clear head */
369 /* put signature */
370 s->nsector = 1;
371 s->sector = 1;
372 if (s->drive_kind == IDE_CD) {
373 s->lcyl = 0x14;
374 s->hcyl = 0xeb;
375 } else if (s->blk) {
376 s->lcyl = 0;
377 s->hcyl = 0;
378 } else {
379 s->lcyl = 0xff;
380 s->hcyl = 0xff;
384 typedef struct TrimAIOCB {
385 BlockAIOCB common;
386 BlockBackend *blk;
387 QEMUBH *bh;
388 int ret;
389 QEMUIOVector *qiov;
390 BlockAIOCB *aiocb;
391 int i, j;
392 } TrimAIOCB;
394 static void trim_aio_cancel(BlockAIOCB *acb)
396 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
398 /* Exit the loop so ide_issue_trim_cb will not continue */
399 iocb->j = iocb->qiov->niov - 1;
400 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
402 iocb->ret = -ECANCELED;
404 if (iocb->aiocb) {
405 blk_aio_cancel_async(iocb->aiocb);
406 iocb->aiocb = NULL;
410 static const AIOCBInfo trim_aiocb_info = {
411 .aiocb_size = sizeof(TrimAIOCB),
412 .cancel_async = trim_aio_cancel,
415 static void ide_trim_bh_cb(void *opaque)
417 TrimAIOCB *iocb = opaque;
419 iocb->common.cb(iocb->common.opaque, iocb->ret);
421 qemu_bh_delete(iocb->bh);
422 iocb->bh = NULL;
423 qemu_aio_unref(iocb);
426 static void ide_issue_trim_cb(void *opaque, int ret)
428 TrimAIOCB *iocb = opaque;
429 if (ret >= 0) {
430 while (iocb->j < iocb->qiov->niov) {
431 int j = iocb->j;
432 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
433 int i = iocb->i;
434 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
436 /* 6-byte LBA + 2-byte range per entry */
437 uint64_t entry = le64_to_cpu(buffer[i]);
438 uint64_t sector = entry & 0x0000ffffffffffffULL;
439 uint16_t count = entry >> 48;
441 if (count == 0) {
442 continue;
445 /* Got an entry! Submit and exit. */
446 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
447 sector << BDRV_SECTOR_BITS,
448 count << BDRV_SECTOR_BITS,
449 ide_issue_trim_cb, opaque);
450 return;
453 iocb->j++;
454 iocb->i = -1;
456 } else {
457 iocb->ret = ret;
460 iocb->aiocb = NULL;
461 if (iocb->bh) {
462 qemu_bh_schedule(iocb->bh);
466 BlockAIOCB *ide_issue_trim(
467 int64_t offset, QEMUIOVector *qiov,
468 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
470 BlockBackend *blk = opaque;
471 TrimAIOCB *iocb;
473 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
474 iocb->blk = blk;
475 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
476 iocb->ret = 0;
477 iocb->qiov = qiov;
478 iocb->i = -1;
479 iocb->j = 0;
480 ide_issue_trim_cb(iocb, 0);
481 return &iocb->common;
484 void ide_abort_command(IDEState *s)
486 ide_transfer_stop(s);
487 s->status = READY_STAT | ERR_STAT;
488 s->error = ABRT_ERR;
491 static void ide_set_retry(IDEState *s)
493 s->bus->retry_unit = s->unit;
494 s->bus->retry_sector_num = ide_get_sector(s);
495 s->bus->retry_nsector = s->nsector;
498 static void ide_clear_retry(IDEState *s)
500 s->bus->retry_unit = -1;
501 s->bus->retry_sector_num = 0;
502 s->bus->retry_nsector = 0;
505 /* prepare data transfer and tell what to do after */
506 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
507 EndTransferFunc *end_transfer_func)
509 s->end_transfer_func = end_transfer_func;
510 s->data_ptr = buf;
511 s->data_end = buf + size;
512 ide_set_retry(s);
513 if (!(s->status & ERR_STAT)) {
514 s->status |= DRQ_STAT;
516 if (s->bus->dma->ops->start_transfer) {
517 s->bus->dma->ops->start_transfer(s->bus->dma);
521 static void ide_cmd_done(IDEState *s)
523 if (s->bus->dma->ops->cmd_done) {
524 s->bus->dma->ops->cmd_done(s->bus->dma);
528 static void ide_transfer_halt(IDEState *s,
529 void(*end_transfer_func)(IDEState *),
530 bool notify)
532 s->end_transfer_func = end_transfer_func;
533 s->data_ptr = s->io_buffer;
534 s->data_end = s->io_buffer;
535 s->status &= ~DRQ_STAT;
536 if (notify) {
537 ide_cmd_done(s);
541 void ide_transfer_stop(IDEState *s)
543 ide_transfer_halt(s, ide_transfer_stop, true);
546 static void ide_transfer_cancel(IDEState *s)
548 ide_transfer_halt(s, ide_transfer_cancel, false);
551 int64_t ide_get_sector(IDEState *s)
553 int64_t sector_num;
554 if (s->select & 0x40) {
555 /* lba */
556 if (!s->lba48) {
557 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
558 (s->lcyl << 8) | s->sector;
559 } else {
560 sector_num = ((int64_t)s->hob_hcyl << 40) |
561 ((int64_t) s->hob_lcyl << 32) |
562 ((int64_t) s->hob_sector << 24) |
563 ((int64_t) s->hcyl << 16) |
564 ((int64_t) s->lcyl << 8) | s->sector;
566 } else {
567 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
568 (s->select & 0x0f) * s->sectors + (s->sector - 1);
570 return sector_num;
573 void ide_set_sector(IDEState *s, int64_t sector_num)
575 unsigned int cyl, r;
576 if (s->select & 0x40) {
577 if (!s->lba48) {
578 s->select = (s->select & 0xf0) | (sector_num >> 24);
579 s->hcyl = (sector_num >> 16);
580 s->lcyl = (sector_num >> 8);
581 s->sector = (sector_num);
582 } else {
583 s->sector = sector_num;
584 s->lcyl = sector_num >> 8;
585 s->hcyl = sector_num >> 16;
586 s->hob_sector = sector_num >> 24;
587 s->hob_lcyl = sector_num >> 32;
588 s->hob_hcyl = sector_num >> 40;
590 } else {
591 cyl = sector_num / (s->heads * s->sectors);
592 r = sector_num % (s->heads * s->sectors);
593 s->hcyl = cyl >> 8;
594 s->lcyl = cyl;
595 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
596 s->sector = (r % s->sectors) + 1;
600 static void ide_rw_error(IDEState *s) {
601 ide_abort_command(s);
602 ide_set_irq(s->bus);
605 static bool ide_sect_range_ok(IDEState *s,
606 uint64_t sector, uint64_t nb_sectors)
608 uint64_t total_sectors;
610 blk_get_geometry(s->blk, &total_sectors);
611 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
612 return false;
614 return true;
617 static void ide_buffered_readv_cb(void *opaque, int ret)
619 IDEBufferedRequest *req = opaque;
620 if (!req->orphaned) {
621 if (!ret) {
622 qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
623 req->original_qiov->size);
625 req->original_cb(req->original_opaque, ret);
627 QLIST_REMOVE(req, list);
628 qemu_vfree(req->iov.iov_base);
629 g_free(req);
632 #define MAX_BUFFERED_REQS 16
634 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
635 QEMUIOVector *iov, int nb_sectors,
636 BlockCompletionFunc *cb, void *opaque)
638 BlockAIOCB *aioreq;
639 IDEBufferedRequest *req;
640 int c = 0;
642 QLIST_FOREACH(req, &s->buffered_requests, list) {
643 c++;
645 if (c > MAX_BUFFERED_REQS) {
646 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
649 req = g_new0(IDEBufferedRequest, 1);
650 req->original_qiov = iov;
651 req->original_cb = cb;
652 req->original_opaque = opaque;
653 req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
654 req->iov.iov_len = iov->size;
655 qemu_iovec_init_external(&req->qiov, &req->iov, 1);
657 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
658 &req->qiov, 0, ide_buffered_readv_cb, req);
660 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
661 return aioreq;
665 * Cancel all pending DMA requests.
666 * Any buffered DMA requests are instantly canceled,
667 * but any pending unbuffered DMA requests must be waited on.
669 void ide_cancel_dma_sync(IDEState *s)
671 IDEBufferedRequest *req;
673 /* First invoke the callbacks of all buffered requests
674 * and flag those requests as orphaned. Ideally there
675 * are no unbuffered (Scatter Gather DMA Requests or
676 * write requests) pending and we can avoid to drain. */
677 QLIST_FOREACH(req, &s->buffered_requests, list) {
678 if (!req->orphaned) {
679 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
680 req->original_cb(req->original_opaque, -ECANCELED);
682 req->orphaned = true;
686 * We can't cancel Scatter Gather DMA in the middle of the
687 * operation or a partial (not full) DMA transfer would reach
688 * the storage so we wait for completion instead (we beahve
689 * like if the DMA was completed by the time the guest trying
690 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
691 * set).
693 * In the future we'll be able to safely cancel the I/O if the
694 * whole DMA operation will be submitted to disk with a single
695 * aio operation with preadv/pwritev.
697 if (s->bus->dma->aiocb) {
698 trace_ide_cancel_dma_sync_remaining();
699 blk_drain(s->blk);
700 assert(s->bus->dma->aiocb == NULL);
704 static void ide_sector_read(IDEState *s);
706 static void ide_sector_read_cb(void *opaque, int ret)
708 IDEState *s = opaque;
709 int n;
711 s->pio_aiocb = NULL;
712 s->status &= ~BUSY_STAT;
714 if (ret == -ECANCELED) {
715 return;
717 if (ret != 0) {
718 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
719 IDE_RETRY_READ)) {
720 return;
724 block_acct_done(blk_get_stats(s->blk), &s->acct);
726 n = s->nsector;
727 if (n > s->req_nb_sectors) {
728 n = s->req_nb_sectors;
731 ide_set_sector(s, ide_get_sector(s) + n);
732 s->nsector -= n;
733 /* Allow the guest to read the io_buffer */
734 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
735 ide_set_irq(s->bus);
738 static void ide_sector_read(IDEState *s)
740 int64_t sector_num;
741 int n;
743 s->status = READY_STAT | SEEK_STAT;
744 s->error = 0; /* not needed by IDE spec, but needed by Windows */
745 sector_num = ide_get_sector(s);
746 n = s->nsector;
748 if (n == 0) {
749 ide_transfer_stop(s);
750 return;
753 s->status |= BUSY_STAT;
755 if (n > s->req_nb_sectors) {
756 n = s->req_nb_sectors;
759 trace_ide_sector_read(sector_num, n);
761 if (!ide_sect_range_ok(s, sector_num, n)) {
762 ide_rw_error(s);
763 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
764 return;
767 s->iov.iov_base = s->io_buffer;
768 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
769 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
771 block_acct_start(blk_get_stats(s->blk), &s->acct,
772 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
773 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
774 ide_sector_read_cb, s);
777 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
779 if (s->bus->dma->ops->commit_buf) {
780 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
782 s->io_buffer_offset += tx_bytes;
783 qemu_sglist_destroy(&s->sg);
786 void ide_set_inactive(IDEState *s, bool more)
788 s->bus->dma->aiocb = NULL;
789 ide_clear_retry(s);
790 if (s->bus->dma->ops->set_inactive) {
791 s->bus->dma->ops->set_inactive(s->bus->dma, more);
793 ide_cmd_done(s);
796 void ide_dma_error(IDEState *s)
798 dma_buf_commit(s, 0);
799 ide_abort_command(s);
800 ide_set_inactive(s, false);
801 ide_set_irq(s->bus);
804 int ide_handle_rw_error(IDEState *s, int error, int op)
806 bool is_read = (op & IDE_RETRY_READ) != 0;
807 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
809 if (action == BLOCK_ERROR_ACTION_STOP) {
810 assert(s->bus->retry_unit == s->unit);
811 s->bus->error_status = op;
812 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
813 block_acct_failed(blk_get_stats(s->blk), &s->acct);
814 if (IS_IDE_RETRY_DMA(op)) {
815 ide_dma_error(s);
816 } else if (IS_IDE_RETRY_ATAPI(op)) {
817 ide_atapi_io_error(s, -error);
818 } else {
819 ide_rw_error(s);
822 blk_error_action(s->blk, action, is_read, error);
823 return action != BLOCK_ERROR_ACTION_IGNORE;
826 static void ide_dma_cb(void *opaque, int ret)
828 IDEState *s = opaque;
829 int n;
830 int64_t sector_num;
831 uint64_t offset;
832 bool stay_active = false;
834 if (ret == -ECANCELED) {
835 return;
837 if (ret < 0) {
838 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
839 s->bus->dma->aiocb = NULL;
840 dma_buf_commit(s, 0);
841 return;
845 n = s->io_buffer_size >> 9;
846 if (n > s->nsector) {
847 /* The PRDs were longer than needed for this request. Shorten them so
848 * we don't get a negative remainder. The Active bit must remain set
849 * after the request completes. */
850 n = s->nsector;
851 stay_active = true;
854 sector_num = ide_get_sector(s);
855 if (n > 0) {
856 assert(n * 512 == s->sg.size);
857 dma_buf_commit(s, s->sg.size);
858 sector_num += n;
859 ide_set_sector(s, sector_num);
860 s->nsector -= n;
863 /* end of transfer ? */
864 if (s->nsector == 0) {
865 s->status = READY_STAT | SEEK_STAT;
866 ide_set_irq(s->bus);
867 goto eot;
870 /* launch next transfer */
871 n = s->nsector;
872 s->io_buffer_index = 0;
873 s->io_buffer_size = n * 512;
874 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
875 /* The PRDs were too short. Reset the Active bit, but don't raise an
876 * interrupt. */
877 s->status = READY_STAT | SEEK_STAT;
878 dma_buf_commit(s, 0);
879 goto eot;
882 trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
884 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
885 !ide_sect_range_ok(s, sector_num, n)) {
886 ide_dma_error(s);
887 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
888 return;
891 offset = sector_num << BDRV_SECTOR_BITS;
892 switch (s->dma_cmd) {
893 case IDE_DMA_READ:
894 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
895 BDRV_SECTOR_SIZE, ide_dma_cb, s);
896 break;
897 case IDE_DMA_WRITE:
898 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
899 BDRV_SECTOR_SIZE, ide_dma_cb, s);
900 break;
901 case IDE_DMA_TRIM:
902 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
903 &s->sg, offset, BDRV_SECTOR_SIZE,
904 ide_issue_trim, s->blk, ide_dma_cb, s,
905 DMA_DIRECTION_TO_DEVICE);
906 break;
907 default:
908 abort();
910 return;
912 eot:
913 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
914 block_acct_done(blk_get_stats(s->blk), &s->acct);
916 ide_set_inactive(s, stay_active);
919 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
921 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
922 s->io_buffer_size = 0;
923 s->dma_cmd = dma_cmd;
925 switch (dma_cmd) {
926 case IDE_DMA_READ:
927 block_acct_start(blk_get_stats(s->blk), &s->acct,
928 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
929 break;
930 case IDE_DMA_WRITE:
931 block_acct_start(blk_get_stats(s->blk), &s->acct,
932 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
933 break;
934 default:
935 break;
938 ide_start_dma(s, ide_dma_cb);
941 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
943 s->io_buffer_index = 0;
944 ide_set_retry(s);
945 if (s->bus->dma->ops->start_dma) {
946 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
950 static void ide_sector_write(IDEState *s);
952 static void ide_sector_write_timer_cb(void *opaque)
954 IDEState *s = opaque;
955 ide_set_irq(s->bus);
958 static void ide_sector_write_cb(void *opaque, int ret)
960 IDEState *s = opaque;
961 int n;
963 if (ret == -ECANCELED) {
964 return;
967 s->pio_aiocb = NULL;
968 s->status &= ~BUSY_STAT;
970 if (ret != 0) {
971 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
972 return;
976 block_acct_done(blk_get_stats(s->blk), &s->acct);
978 n = s->nsector;
979 if (n > s->req_nb_sectors) {
980 n = s->req_nb_sectors;
982 s->nsector -= n;
984 ide_set_sector(s, ide_get_sector(s) + n);
985 if (s->nsector == 0) {
986 /* no more sectors to write */
987 ide_transfer_stop(s);
988 } else {
989 int n1 = s->nsector;
990 if (n1 > s->req_nb_sectors) {
991 n1 = s->req_nb_sectors;
993 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
994 ide_sector_write);
997 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
998 /* It seems there is a bug in the Windows 2000 installer HDD
999 IDE driver which fills the disk with empty logs when the
1000 IDE write IRQ comes too early. This hack tries to correct
1001 that at the expense of slower write performances. Use this
1002 option _only_ to install Windows 2000. You must disable it
1003 for normal use. */
1004 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1005 (NANOSECONDS_PER_SECOND / 1000));
1006 } else {
1007 ide_set_irq(s->bus);
1011 static void ide_sector_write(IDEState *s)
1013 int64_t sector_num;
1014 int n;
1016 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1017 sector_num = ide_get_sector(s);
1019 n = s->nsector;
1020 if (n > s->req_nb_sectors) {
1021 n = s->req_nb_sectors;
1024 trace_ide_sector_write(sector_num, n);
1026 if (!ide_sect_range_ok(s, sector_num, n)) {
1027 ide_rw_error(s);
1028 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1029 return;
1032 s->iov.iov_base = s->io_buffer;
1033 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
1034 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1036 block_acct_start(blk_get_stats(s->blk), &s->acct,
1037 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1038 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1039 &s->qiov, 0, ide_sector_write_cb, s);
1042 static void ide_flush_cb(void *opaque, int ret)
1044 IDEState *s = opaque;
1046 s->pio_aiocb = NULL;
1048 if (ret == -ECANCELED) {
1049 return;
1051 if (ret < 0) {
1052 /* XXX: What sector number to set here? */
1053 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1054 return;
1058 if (s->blk) {
1059 block_acct_done(blk_get_stats(s->blk), &s->acct);
1061 s->status = READY_STAT | SEEK_STAT;
1062 ide_cmd_done(s);
1063 ide_set_irq(s->bus);
1066 static void ide_flush_cache(IDEState *s)
1068 if (s->blk == NULL) {
1069 ide_flush_cb(s, 0);
1070 return;
1073 s->status |= BUSY_STAT;
1074 ide_set_retry(s);
1075 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1077 if (blk_bs(s->blk)) {
1078 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1079 } else {
1080 /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1081 * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1083 ide_flush_cb(s, 0);
1087 static void ide_cfata_metadata_inquiry(IDEState *s)
1089 uint16_t *p;
1090 uint32_t spd;
1092 p = (uint16_t *) s->io_buffer;
1093 memset(p, 0, 0x200);
1094 spd = ((s->mdata_size - 1) >> 9) + 1;
1096 put_le16(p + 0, 0x0001); /* Data format revision */
1097 put_le16(p + 1, 0x0000); /* Media property: silicon */
1098 put_le16(p + 2, s->media_changed); /* Media status */
1099 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1100 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1101 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1102 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1105 static void ide_cfata_metadata_read(IDEState *s)
1107 uint16_t *p;
1109 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1110 s->status = ERR_STAT;
1111 s->error = ABRT_ERR;
1112 return;
1115 p = (uint16_t *) s->io_buffer;
1116 memset(p, 0, 0x200);
1118 put_le16(p + 0, s->media_changed); /* Media status */
1119 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1120 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1121 s->nsector << 9), 0x200 - 2));
1124 static void ide_cfata_metadata_write(IDEState *s)
1126 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1127 s->status = ERR_STAT;
1128 s->error = ABRT_ERR;
1129 return;
1132 s->media_changed = 0;
1134 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1135 s->io_buffer + 2,
1136 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1137 s->nsector << 9), 0x200 - 2));
1140 /* called when the inserted state of the media has changed */
1141 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1143 IDEState *s = opaque;
1144 uint64_t nb_sectors;
1146 s->tray_open = !load;
1147 blk_get_geometry(s->blk, &nb_sectors);
1148 s->nb_sectors = nb_sectors;
1151 * First indicate to the guest that a CD has been removed. That's
1152 * done on the next command the guest sends us.
1154 * Then we set UNIT_ATTENTION, by which the guest will
1155 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1157 s->cdrom_changed = 1;
1158 s->events.new_media = true;
1159 s->events.eject_request = false;
1160 ide_set_irq(s->bus);
1163 static void ide_cd_eject_request_cb(void *opaque, bool force)
1165 IDEState *s = opaque;
1167 s->events.eject_request = true;
1168 if (force) {
1169 s->tray_locked = false;
1171 ide_set_irq(s->bus);
1174 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1176 s->lba48 = lba48;
1178 /* handle the 'magic' 0 nsector count conversion here. to avoid
1179 * fiddling with the rest of the read logic, we just store the
1180 * full sector count in ->nsector and ignore ->hob_nsector from now
1182 if (!s->lba48) {
1183 if (!s->nsector)
1184 s->nsector = 256;
1185 } else {
1186 if (!s->nsector && !s->hob_nsector)
1187 s->nsector = 65536;
1188 else {
1189 int lo = s->nsector;
1190 int hi = s->hob_nsector;
1192 s->nsector = (hi << 8) | lo;
1197 static void ide_clear_hob(IDEBus *bus)
1199 /* any write clears HOB high bit of device control register */
1200 bus->ifs[0].select &= ~(1 << 7);
1201 bus->ifs[1].select &= ~(1 << 7);
1204 /* IOport [W]rite [R]egisters */
1205 enum ATA_IOPORT_WR {
1206 ATA_IOPORT_WR_DATA = 0,
1207 ATA_IOPORT_WR_FEATURES = 1,
1208 ATA_IOPORT_WR_SECTOR_COUNT = 2,
1209 ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1210 ATA_IOPORT_WR_CYLINDER_LOW = 4,
1211 ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1212 ATA_IOPORT_WR_DEVICE_HEAD = 6,
1213 ATA_IOPORT_WR_COMMAND = 7,
1214 ATA_IOPORT_WR_NUM_REGISTERS,
1217 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1218 [ATA_IOPORT_WR_DATA] = "Data",
1219 [ATA_IOPORT_WR_FEATURES] = "Features",
1220 [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1221 [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1222 [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1223 [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1224 [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1225 [ATA_IOPORT_WR_COMMAND] = "Command"
1228 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1230 IDEBus *bus = opaque;
1231 IDEState *s = idebus_active_if(bus);
1232 int reg_num = addr & 7;
1234 trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1236 /* ignore writes to command block while busy with previous command */
1237 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1238 return;
1241 switch (reg_num) {
1242 case 0:
1243 break;
1244 case ATA_IOPORT_WR_FEATURES:
1245 ide_clear_hob(bus);
1246 /* NOTE: data is written to the two drives */
1247 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1248 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1249 bus->ifs[0].feature = val;
1250 bus->ifs[1].feature = val;
1251 break;
1252 case ATA_IOPORT_WR_SECTOR_COUNT:
1253 ide_clear_hob(bus);
1254 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1255 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1256 bus->ifs[0].nsector = val;
1257 bus->ifs[1].nsector = val;
1258 break;
1259 case ATA_IOPORT_WR_SECTOR_NUMBER:
1260 ide_clear_hob(bus);
1261 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1262 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1263 bus->ifs[0].sector = val;
1264 bus->ifs[1].sector = val;
1265 break;
1266 case ATA_IOPORT_WR_CYLINDER_LOW:
1267 ide_clear_hob(bus);
1268 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1269 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1270 bus->ifs[0].lcyl = val;
1271 bus->ifs[1].lcyl = val;
1272 break;
1273 case ATA_IOPORT_WR_CYLINDER_HIGH:
1274 ide_clear_hob(bus);
1275 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1276 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1277 bus->ifs[0].hcyl = val;
1278 bus->ifs[1].hcyl = val;
1279 break;
1280 case ATA_IOPORT_WR_DEVICE_HEAD:
1281 /* FIXME: HOB readback uses bit 7 */
1282 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1283 bus->ifs[1].select = (val | 0x10) | 0xa0;
1284 /* select drive */
1285 bus->unit = (val >> 4) & 1;
1286 break;
1287 default:
1288 case ATA_IOPORT_WR_COMMAND:
1289 /* command */
1290 ide_exec_cmd(bus, val);
1291 break;
1295 static void ide_reset(IDEState *s)
1297 trace_ide_reset(s);
1299 if (s->pio_aiocb) {
1300 blk_aio_cancel(s->pio_aiocb);
1301 s->pio_aiocb = NULL;
1304 if (s->drive_kind == IDE_CFATA)
1305 s->mult_sectors = 0;
1306 else
1307 s->mult_sectors = MAX_MULT_SECTORS;
1308 /* ide regs */
1309 s->feature = 0;
1310 s->error = 0;
1311 s->nsector = 0;
1312 s->sector = 0;
1313 s->lcyl = 0;
1314 s->hcyl = 0;
1316 /* lba48 */
1317 s->hob_feature = 0;
1318 s->hob_sector = 0;
1319 s->hob_nsector = 0;
1320 s->hob_lcyl = 0;
1321 s->hob_hcyl = 0;
1323 s->select = 0xa0;
1324 s->status = READY_STAT | SEEK_STAT;
1326 s->lba48 = 0;
1328 /* ATAPI specific */
1329 s->sense_key = 0;
1330 s->asc = 0;
1331 s->cdrom_changed = 0;
1332 s->packet_transfer_size = 0;
1333 s->elementary_transfer_size = 0;
1334 s->io_buffer_index = 0;
1335 s->cd_sector_size = 0;
1336 s->atapi_dma = 0;
1337 s->tray_locked = 0;
1338 s->tray_open = 0;
1339 /* ATA DMA state */
1340 s->io_buffer_size = 0;
1341 s->req_nb_sectors = 0;
1343 ide_set_signature(s);
1344 /* init the transfer handler so that 0xffff is returned on data
1345 accesses */
1346 s->end_transfer_func = ide_dummy_transfer_stop;
1347 ide_dummy_transfer_stop(s);
1348 s->media_changed = 0;
1351 static bool cmd_nop(IDEState *s, uint8_t cmd)
1353 return true;
1356 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1358 /* Halt PIO (in the DRQ phase), then DMA */
1359 ide_transfer_cancel(s);
1360 ide_cancel_dma_sync(s);
1362 /* Reset any PIO commands, reset signature, etc */
1363 ide_reset(s);
1365 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1366 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1367 s->status = 0x00;
1369 /* Do not overwrite status register */
1370 return false;
1373 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1375 switch (s->feature) {
1376 case DSM_TRIM:
1377 if (s->blk) {
1378 ide_sector_start_dma(s, IDE_DMA_TRIM);
1379 return false;
1381 break;
1384 ide_abort_command(s);
1385 return true;
1388 static bool cmd_identify(IDEState *s, uint8_t cmd)
1390 if (s->blk && s->drive_kind != IDE_CD) {
1391 if (s->drive_kind != IDE_CFATA) {
1392 ide_identify(s);
1393 } else {
1394 ide_cfata_identify(s);
1396 s->status = READY_STAT | SEEK_STAT;
1397 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1398 ide_set_irq(s->bus);
1399 return false;
1400 } else {
1401 if (s->drive_kind == IDE_CD) {
1402 ide_set_signature(s);
1404 ide_abort_command(s);
1407 return true;
1410 static bool cmd_verify(IDEState *s, uint8_t cmd)
1412 bool lba48 = (cmd == WIN_VERIFY_EXT);
1414 /* do sector number check ? */
1415 ide_cmd_lba48_transform(s, lba48);
1417 return true;
1420 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1422 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1423 /* Disable Read and Write Multiple */
1424 s->mult_sectors = 0;
1425 } else if ((s->nsector & 0xff) != 0 &&
1426 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1427 (s->nsector & (s->nsector - 1)) != 0)) {
1428 ide_abort_command(s);
1429 } else {
1430 s->mult_sectors = s->nsector & 0xff;
1433 return true;
1436 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1438 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1440 if (!s->blk || !s->mult_sectors) {
1441 ide_abort_command(s);
1442 return true;
1445 ide_cmd_lba48_transform(s, lba48);
1446 s->req_nb_sectors = s->mult_sectors;
1447 ide_sector_read(s);
1448 return false;
1451 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1453 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1454 int n;
1456 if (!s->blk || !s->mult_sectors) {
1457 ide_abort_command(s);
1458 return true;
1461 ide_cmd_lba48_transform(s, lba48);
1463 s->req_nb_sectors = s->mult_sectors;
1464 n = MIN(s->nsector, s->req_nb_sectors);
1466 s->status = SEEK_STAT | READY_STAT;
1467 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1469 s->media_changed = 1;
1471 return false;
1474 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1476 bool lba48 = (cmd == WIN_READ_EXT);
1478 if (s->drive_kind == IDE_CD) {
1479 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1480 ide_abort_command(s);
1481 return true;
1484 if (!s->blk) {
1485 ide_abort_command(s);
1486 return true;
1489 ide_cmd_lba48_transform(s, lba48);
1490 s->req_nb_sectors = 1;
1491 ide_sector_read(s);
1493 return false;
1496 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1498 bool lba48 = (cmd == WIN_WRITE_EXT);
1500 if (!s->blk) {
1501 ide_abort_command(s);
1502 return true;
1505 ide_cmd_lba48_transform(s, lba48);
1507 s->req_nb_sectors = 1;
1508 s->status = SEEK_STAT | READY_STAT;
1509 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1511 s->media_changed = 1;
1513 return false;
1516 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1518 bool lba48 = (cmd == WIN_READDMA_EXT);
1520 if (!s->blk) {
1521 ide_abort_command(s);
1522 return true;
1525 ide_cmd_lba48_transform(s, lba48);
1526 ide_sector_start_dma(s, IDE_DMA_READ);
1528 return false;
1531 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1533 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1535 if (!s->blk) {
1536 ide_abort_command(s);
1537 return true;
1540 ide_cmd_lba48_transform(s, lba48);
1541 ide_sector_start_dma(s, IDE_DMA_WRITE);
1543 s->media_changed = 1;
1545 return false;
1548 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1550 ide_flush_cache(s);
1551 return false;
1554 static bool cmd_seek(IDEState *s, uint8_t cmd)
1556 /* XXX: Check that seek is within bounds */
1557 return true;
1560 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1562 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1564 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1565 if (s->nb_sectors == 0) {
1566 ide_abort_command(s);
1567 return true;
1570 ide_cmd_lba48_transform(s, lba48);
1571 ide_set_sector(s, s->nb_sectors - 1);
1573 return true;
1576 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1578 s->nsector = 0xff; /* device active or idle */
1579 return true;
1582 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1584 uint16_t *identify_data;
1586 if (!s->blk) {
1587 ide_abort_command(s);
1588 return true;
1591 /* XXX: valid for CDROM ? */
1592 switch (s->feature) {
1593 case 0x02: /* write cache enable */
1594 blk_set_enable_write_cache(s->blk, true);
1595 identify_data = (uint16_t *)s->identify_data;
1596 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1597 return true;
1598 case 0x82: /* write cache disable */
1599 blk_set_enable_write_cache(s->blk, false);
1600 identify_data = (uint16_t *)s->identify_data;
1601 put_le16(identify_data + 85, (1 << 14) | 1);
1602 ide_flush_cache(s);
1603 return false;
1604 case 0xcc: /* reverting to power-on defaults enable */
1605 case 0x66: /* reverting to power-on defaults disable */
1606 case 0xaa: /* read look-ahead enable */
1607 case 0x55: /* read look-ahead disable */
1608 case 0x05: /* set advanced power management mode */
1609 case 0x85: /* disable advanced power management mode */
1610 case 0x69: /* NOP */
1611 case 0x67: /* NOP */
1612 case 0x96: /* NOP */
1613 case 0x9a: /* NOP */
1614 case 0x42: /* enable Automatic Acoustic Mode */
1615 case 0xc2: /* disable Automatic Acoustic Mode */
1616 return true;
1617 case 0x03: /* set transfer mode */
1619 uint8_t val = s->nsector & 0x07;
1620 identify_data = (uint16_t *)s->identify_data;
1622 switch (s->nsector >> 3) {
1623 case 0x00: /* pio default */
1624 case 0x01: /* pio mode */
1625 put_le16(identify_data + 62, 0x07);
1626 put_le16(identify_data + 63, 0x07);
1627 put_le16(identify_data + 88, 0x3f);
1628 break;
1629 case 0x02: /* sigle word dma mode*/
1630 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1631 put_le16(identify_data + 63, 0x07);
1632 put_le16(identify_data + 88, 0x3f);
1633 break;
1634 case 0x04: /* mdma mode */
1635 put_le16(identify_data + 62, 0x07);
1636 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1637 put_le16(identify_data + 88, 0x3f);
1638 break;
1639 case 0x08: /* udma mode */
1640 put_le16(identify_data + 62, 0x07);
1641 put_le16(identify_data + 63, 0x07);
1642 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1643 break;
1644 default:
1645 goto abort_cmd;
1647 return true;
1651 abort_cmd:
1652 ide_abort_command(s);
1653 return true;
1657 /*** ATAPI commands ***/
1659 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1661 ide_atapi_identify(s);
1662 s->status = READY_STAT | SEEK_STAT;
1663 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1664 ide_set_irq(s->bus);
1665 return false;
1668 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1670 ide_set_signature(s);
1672 if (s->drive_kind == IDE_CD) {
1673 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1674 * devices to return a clear status register
1675 * with READY_STAT *not* set. */
1676 s->error = 0x01;
1677 } else {
1678 s->status = READY_STAT | SEEK_STAT;
1679 /* The bits of the error register are not as usual for this command!
1680 * They are part of the regular output (this is why ERR_STAT isn't set)
1681 * Device 0 passed, Device 1 passed or not present. */
1682 s->error = 0x01;
1683 ide_set_irq(s->bus);
1686 return false;
1689 static bool cmd_packet(IDEState *s, uint8_t cmd)
1691 /* overlapping commands not supported */
1692 if (s->feature & 0x02) {
1693 ide_abort_command(s);
1694 return true;
1697 s->status = READY_STAT | SEEK_STAT;
1698 s->atapi_dma = s->feature & 1;
1699 if (s->atapi_dma) {
1700 s->dma_cmd = IDE_DMA_ATAPI;
1702 s->nsector = 1;
1703 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1704 ide_atapi_cmd);
1705 return false;
1709 /*** CF-ATA commands ***/
1711 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1713 s->error = 0x09; /* miscellaneous error */
1714 s->status = READY_STAT | SEEK_STAT;
1715 ide_set_irq(s->bus);
1717 return false;
1720 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1722 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1723 * required for Windows 8 to work with AHCI */
1725 if (cmd == CFA_WEAR_LEVEL) {
1726 s->nsector = 0;
1729 if (cmd == CFA_ERASE_SECTORS) {
1730 s->media_changed = 1;
1733 return true;
1736 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1738 s->status = READY_STAT | SEEK_STAT;
1740 memset(s->io_buffer, 0, 0x200);
1741 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1742 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1743 s->io_buffer[0x02] = s->select; /* Head */
1744 s->io_buffer[0x03] = s->sector; /* Sector */
1745 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1746 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1747 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1748 s->io_buffer[0x13] = 0x00; /* Erase flag */
1749 s->io_buffer[0x18] = 0x00; /* Hot count */
1750 s->io_buffer[0x19] = 0x00; /* Hot count */
1751 s->io_buffer[0x1a] = 0x01; /* Hot count */
1753 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1754 ide_set_irq(s->bus);
1756 return false;
1759 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1761 switch (s->feature) {
1762 case 0x02: /* Inquiry Metadata Storage */
1763 ide_cfata_metadata_inquiry(s);
1764 break;
1765 case 0x03: /* Read Metadata Storage */
1766 ide_cfata_metadata_read(s);
1767 break;
1768 case 0x04: /* Write Metadata Storage */
1769 ide_cfata_metadata_write(s);
1770 break;
1771 default:
1772 ide_abort_command(s);
1773 return true;
1776 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1777 s->status = 0x00; /* NOTE: READY is _not_ set */
1778 ide_set_irq(s->bus);
1780 return false;
1783 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1785 switch (s->feature) {
1786 case 0x01: /* sense temperature in device */
1787 s->nsector = 0x50; /* +20 C */
1788 break;
1789 default:
1790 ide_abort_command(s);
1791 return true;
1794 return true;
1798 /*** SMART commands ***/
1800 static bool cmd_smart(IDEState *s, uint8_t cmd)
1802 int n;
1804 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1805 goto abort_cmd;
1808 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1809 goto abort_cmd;
1812 switch (s->feature) {
1813 case SMART_DISABLE:
1814 s->smart_enabled = 0;
1815 return true;
1817 case SMART_ENABLE:
1818 s->smart_enabled = 1;
1819 return true;
1821 case SMART_ATTR_AUTOSAVE:
1822 switch (s->sector) {
1823 case 0x00:
1824 s->smart_autosave = 0;
1825 break;
1826 case 0xf1:
1827 s->smart_autosave = 1;
1828 break;
1829 default:
1830 goto abort_cmd;
1832 return true;
1834 case SMART_STATUS:
1835 if (!s->smart_errors) {
1836 s->hcyl = 0xc2;
1837 s->lcyl = 0x4f;
1838 } else {
1839 s->hcyl = 0x2c;
1840 s->lcyl = 0xf4;
1842 return true;
1844 case SMART_READ_THRESH:
1845 memset(s->io_buffer, 0, 0x200);
1846 s->io_buffer[0] = 0x01; /* smart struct version */
1848 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1849 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1850 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1853 /* checksum */
1854 for (n = 0; n < 511; n++) {
1855 s->io_buffer[511] += s->io_buffer[n];
1857 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1859 s->status = READY_STAT | SEEK_STAT;
1860 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1861 ide_set_irq(s->bus);
1862 return false;
1864 case SMART_READ_DATA:
1865 memset(s->io_buffer, 0, 0x200);
1866 s->io_buffer[0] = 0x01; /* smart struct version */
1868 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1869 int i;
1870 for (i = 0; i < 11; i++) {
1871 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1875 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1876 if (s->smart_selftest_count == 0) {
1877 s->io_buffer[363] = 0;
1878 } else {
1879 s->io_buffer[363] =
1880 s->smart_selftest_data[3 +
1881 (s->smart_selftest_count - 1) *
1882 24];
1884 s->io_buffer[364] = 0x20;
1885 s->io_buffer[365] = 0x01;
1886 /* offline data collection capacity: execute + self-test*/
1887 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1888 s->io_buffer[368] = 0x03; /* smart capability (1) */
1889 s->io_buffer[369] = 0x00; /* smart capability (2) */
1890 s->io_buffer[370] = 0x01; /* error logging supported */
1891 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1892 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1893 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1895 for (n = 0; n < 511; n++) {
1896 s->io_buffer[511] += s->io_buffer[n];
1898 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1900 s->status = READY_STAT | SEEK_STAT;
1901 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1902 ide_set_irq(s->bus);
1903 return false;
1905 case SMART_READ_LOG:
1906 switch (s->sector) {
1907 case 0x01: /* summary smart error log */
1908 memset(s->io_buffer, 0, 0x200);
1909 s->io_buffer[0] = 0x01;
1910 s->io_buffer[1] = 0x00; /* no error entries */
1911 s->io_buffer[452] = s->smart_errors & 0xff;
1912 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1914 for (n = 0; n < 511; n++) {
1915 s->io_buffer[511] += s->io_buffer[n];
1917 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1918 break;
1919 case 0x06: /* smart self test log */
1920 memset(s->io_buffer, 0, 0x200);
1921 s->io_buffer[0] = 0x01;
1922 if (s->smart_selftest_count == 0) {
1923 s->io_buffer[508] = 0;
1924 } else {
1925 s->io_buffer[508] = s->smart_selftest_count;
1926 for (n = 2; n < 506; n++) {
1927 s->io_buffer[n] = s->smart_selftest_data[n];
1931 for (n = 0; n < 511; n++) {
1932 s->io_buffer[511] += s->io_buffer[n];
1934 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1935 break;
1936 default:
1937 goto abort_cmd;
1939 s->status = READY_STAT | SEEK_STAT;
1940 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1941 ide_set_irq(s->bus);
1942 return false;
1944 case SMART_EXECUTE_OFFLINE:
1945 switch (s->sector) {
1946 case 0: /* off-line routine */
1947 case 1: /* short self test */
1948 case 2: /* extended self test */
1949 s->smart_selftest_count++;
1950 if (s->smart_selftest_count > 21) {
1951 s->smart_selftest_count = 1;
1953 n = 2 + (s->smart_selftest_count - 1) * 24;
1954 s->smart_selftest_data[n] = s->sector;
1955 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1956 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1957 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1958 break;
1959 default:
1960 goto abort_cmd;
1962 return true;
1965 abort_cmd:
1966 ide_abort_command(s);
1967 return true;
1970 #define HD_OK (1u << IDE_HD)
1971 #define CD_OK (1u << IDE_CD)
1972 #define CFA_OK (1u << IDE_CFATA)
1973 #define HD_CFA_OK (HD_OK | CFA_OK)
1974 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1976 /* Set the Disk Seek Completed status bit during completion */
1977 #define SET_DSC (1u << 8)
1979 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1980 static const struct {
1981 /* Returns true if the completion code should be run */
1982 bool (*handler)(IDEState *s, uint8_t cmd);
1983 int flags;
1984 } ide_cmd_table[0x100] = {
1985 /* NOP not implemented, mandatory for CD */
1986 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1987 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
1988 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1989 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1990 [WIN_READ] = { cmd_read_pio, ALL_OK },
1991 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
1992 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1993 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1994 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1995 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1996 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1997 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1998 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1999 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
2000 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
2001 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
2002 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
2003 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
2004 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
2005 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
2006 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
2007 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
2008 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
2009 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
2010 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
2011 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
2012 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
2013 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
2014 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2015 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
2016 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
2017 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
2018 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
2019 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2020 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2021 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
2022 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
2023 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2024 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
2025 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
2026 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
2027 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
2028 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
2029 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
2030 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
2031 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
2032 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
2033 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2034 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
2035 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
2036 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
2037 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
2038 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
2039 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2040 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2041 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2044 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2046 return cmd < ARRAY_SIZE(ide_cmd_table)
2047 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2050 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2052 IDEState *s;
2053 bool complete;
2055 s = idebus_active_if(bus);
2056 trace_ide_exec_cmd(bus, s, val);
2058 /* ignore commands to non existent slave */
2059 if (s != bus->ifs && !s->blk) {
2060 return;
2063 /* Only RESET is allowed while BSY and/or DRQ are set,
2064 * and only to ATAPI devices. */
2065 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2066 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2067 return;
2071 if (!ide_cmd_permitted(s, val)) {
2072 ide_abort_command(s);
2073 ide_set_irq(s->bus);
2074 return;
2077 s->status = READY_STAT | BUSY_STAT;
2078 s->error = 0;
2079 s->io_buffer_offset = 0;
2081 complete = ide_cmd_table[val].handler(s, val);
2082 if (complete) {
2083 s->status &= ~BUSY_STAT;
2084 assert(!!s->error == !!(s->status & ERR_STAT));
2086 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2087 s->status |= SEEK_STAT;
2090 ide_cmd_done(s);
2091 ide_set_irq(s->bus);
2095 /* IOport [R]ead [R]egisters */
2096 enum ATA_IOPORT_RR {
2097 ATA_IOPORT_RR_DATA = 0,
2098 ATA_IOPORT_RR_ERROR = 1,
2099 ATA_IOPORT_RR_SECTOR_COUNT = 2,
2100 ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2101 ATA_IOPORT_RR_CYLINDER_LOW = 4,
2102 ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2103 ATA_IOPORT_RR_DEVICE_HEAD = 6,
2104 ATA_IOPORT_RR_STATUS = 7,
2105 ATA_IOPORT_RR_NUM_REGISTERS,
2108 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2109 [ATA_IOPORT_RR_DATA] = "Data",
2110 [ATA_IOPORT_RR_ERROR] = "Error",
2111 [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2112 [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2113 [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2114 [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2115 [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2116 [ATA_IOPORT_RR_STATUS] = "Status"
2119 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2121 IDEBus *bus = opaque;
2122 IDEState *s = idebus_active_if(bus);
2123 uint32_t reg_num;
2124 int ret, hob;
2126 reg_num = addr & 7;
2127 /* FIXME: HOB readback uses bit 7, but it's always set right now */
2128 //hob = s->select & (1 << 7);
2129 hob = 0;
2130 switch (reg_num) {
2131 case ATA_IOPORT_RR_DATA:
2132 ret = 0xff;
2133 break;
2134 case ATA_IOPORT_RR_ERROR:
2135 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2136 (s != bus->ifs && !s->blk)) {
2137 ret = 0;
2138 } else if (!hob) {
2139 ret = s->error;
2140 } else {
2141 ret = s->hob_feature;
2143 break;
2144 case ATA_IOPORT_RR_SECTOR_COUNT:
2145 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2146 ret = 0;
2147 } else if (!hob) {
2148 ret = s->nsector & 0xff;
2149 } else {
2150 ret = s->hob_nsector;
2152 break;
2153 case ATA_IOPORT_RR_SECTOR_NUMBER:
2154 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2155 ret = 0;
2156 } else if (!hob) {
2157 ret = s->sector;
2158 } else {
2159 ret = s->hob_sector;
2161 break;
2162 case ATA_IOPORT_RR_CYLINDER_LOW:
2163 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2164 ret = 0;
2165 } else if (!hob) {
2166 ret = s->lcyl;
2167 } else {
2168 ret = s->hob_lcyl;
2170 break;
2171 case ATA_IOPORT_RR_CYLINDER_HIGH:
2172 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2173 ret = 0;
2174 } else if (!hob) {
2175 ret = s->hcyl;
2176 } else {
2177 ret = s->hob_hcyl;
2179 break;
2180 case ATA_IOPORT_RR_DEVICE_HEAD:
2181 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2182 ret = 0;
2183 } else {
2184 ret = s->select;
2186 break;
2187 default:
2188 case ATA_IOPORT_RR_STATUS:
2189 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2190 (s != bus->ifs && !s->blk)) {
2191 ret = 0;
2192 } else {
2193 ret = s->status;
2195 qemu_irq_lower(bus->irq);
2196 break;
2199 trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2200 return ret;
2203 uint32_t ide_status_read(void *opaque, uint32_t addr)
2205 IDEBus *bus = opaque;
2206 IDEState *s = idebus_active_if(bus);
2207 int ret;
2209 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2210 (s != bus->ifs && !s->blk)) {
2211 ret = 0;
2212 } else {
2213 ret = s->status;
2216 trace_ide_status_read(addr, ret, bus, s);
2217 return ret;
2220 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2222 IDEBus *bus = opaque;
2223 IDEState *s;
2224 int i;
2226 trace_ide_cmd_write(addr, val, bus);
2228 /* common for both drives */
2229 if (!(bus->cmd & IDE_CMD_RESET) &&
2230 (val & IDE_CMD_RESET)) {
2231 /* reset low to high */
2232 for(i = 0;i < 2; i++) {
2233 s = &bus->ifs[i];
2234 s->status = BUSY_STAT | SEEK_STAT;
2235 s->error = 0x01;
2237 } else if ((bus->cmd & IDE_CMD_RESET) &&
2238 !(val & IDE_CMD_RESET)) {
2239 /* high to low */
2240 for(i = 0;i < 2; i++) {
2241 s = &bus->ifs[i];
2242 if (s->drive_kind == IDE_CD)
2243 s->status = 0x00; /* NOTE: READY is _not_ set */
2244 else
2245 s->status = READY_STAT | SEEK_STAT;
2246 ide_set_signature(s);
2250 bus->cmd = val;
2254 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2255 * transferred from the device to the guest), false if it's a PIO in
2257 static bool ide_is_pio_out(IDEState *s)
2259 if (s->end_transfer_func == ide_sector_write ||
2260 s->end_transfer_func == ide_atapi_cmd) {
2261 return false;
2262 } else if (s->end_transfer_func == ide_sector_read ||
2263 s->end_transfer_func == ide_transfer_stop ||
2264 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2265 s->end_transfer_func == ide_dummy_transfer_stop) {
2266 return true;
2269 abort();
2272 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2274 IDEBus *bus = opaque;
2275 IDEState *s = idebus_active_if(bus);
2276 uint8_t *p;
2278 trace_ide_data_writew(addr, val, bus, s);
2280 /* PIO data access allowed only when DRQ bit is set. The result of a write
2281 * during PIO out is indeterminate, just ignore it. */
2282 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2283 return;
2286 p = s->data_ptr;
2287 if (p + 2 > s->data_end) {
2288 return;
2291 *(uint16_t *)p = le16_to_cpu(val);
2292 p += 2;
2293 s->data_ptr = p;
2294 if (p >= s->data_end) {
2295 s->status &= ~DRQ_STAT;
2296 s->end_transfer_func(s);
2300 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2302 IDEBus *bus = opaque;
2303 IDEState *s = idebus_active_if(bus);
2304 uint8_t *p;
2305 int ret;
2307 /* PIO data access allowed only when DRQ bit is set. The result of a read
2308 * during PIO in is indeterminate, return 0 and don't move forward. */
2309 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2310 return 0;
2313 p = s->data_ptr;
2314 if (p + 2 > s->data_end) {
2315 return 0;
2318 ret = cpu_to_le16(*(uint16_t *)p);
2319 p += 2;
2320 s->data_ptr = p;
2321 if (p >= s->data_end) {
2322 s->status &= ~DRQ_STAT;
2323 s->end_transfer_func(s);
2326 trace_ide_data_readw(addr, ret, bus, s);
2327 return ret;
2330 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2332 IDEBus *bus = opaque;
2333 IDEState *s = idebus_active_if(bus);
2334 uint8_t *p;
2336 trace_ide_data_writel(addr, val, bus, s);
2338 /* PIO data access allowed only when DRQ bit is set. The result of a write
2339 * during PIO out is indeterminate, just ignore it. */
2340 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2341 return;
2344 p = s->data_ptr;
2345 if (p + 4 > s->data_end) {
2346 return;
2349 *(uint32_t *)p = le32_to_cpu(val);
2350 p += 4;
2351 s->data_ptr = p;
2352 if (p >= s->data_end) {
2353 s->status &= ~DRQ_STAT;
2354 s->end_transfer_func(s);
2358 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2360 IDEBus *bus = opaque;
2361 IDEState *s = idebus_active_if(bus);
2362 uint8_t *p;
2363 int ret;
2365 /* PIO data access allowed only when DRQ bit is set. The result of a read
2366 * during PIO in is indeterminate, return 0 and don't move forward. */
2367 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2368 ret = 0;
2369 goto out;
2372 p = s->data_ptr;
2373 if (p + 4 > s->data_end) {
2374 return 0;
2377 ret = cpu_to_le32(*(uint32_t *)p);
2378 p += 4;
2379 s->data_ptr = p;
2380 if (p >= s->data_end) {
2381 s->status &= ~DRQ_STAT;
2382 s->end_transfer_func(s);
2385 out:
2386 trace_ide_data_readl(addr, ret, bus, s);
2387 return ret;
2390 static void ide_dummy_transfer_stop(IDEState *s)
2392 s->data_ptr = s->io_buffer;
2393 s->data_end = s->io_buffer;
2394 s->io_buffer[0] = 0xff;
2395 s->io_buffer[1] = 0xff;
2396 s->io_buffer[2] = 0xff;
2397 s->io_buffer[3] = 0xff;
2400 void ide_bus_reset(IDEBus *bus)
2402 bus->unit = 0;
2403 bus->cmd = 0;
2404 ide_reset(&bus->ifs[0]);
2405 ide_reset(&bus->ifs[1]);
2406 ide_clear_hob(bus);
2408 /* pending async DMA */
2409 if (bus->dma->aiocb) {
2410 trace_ide_bus_reset_aio();
2411 blk_aio_cancel(bus->dma->aiocb);
2412 bus->dma->aiocb = NULL;
2415 /* reset dma provider too */
2416 if (bus->dma->ops->reset) {
2417 bus->dma->ops->reset(bus->dma);
2421 static bool ide_cd_is_tray_open(void *opaque)
2423 return ((IDEState *)opaque)->tray_open;
2426 static bool ide_cd_is_medium_locked(void *opaque)
2428 return ((IDEState *)opaque)->tray_locked;
2431 static void ide_resize_cb(void *opaque)
2433 IDEState *s = opaque;
2434 uint64_t nb_sectors;
2436 if (!s->identify_set) {
2437 return;
2440 blk_get_geometry(s->blk, &nb_sectors);
2441 s->nb_sectors = nb_sectors;
2443 /* Update the identify data buffer. */
2444 if (s->drive_kind == IDE_CFATA) {
2445 ide_cfata_identify_size(s);
2446 } else {
2447 /* IDE_CD uses a different set of callbacks entirely. */
2448 assert(s->drive_kind != IDE_CD);
2449 ide_identify_size(s);
2453 static const BlockDevOps ide_cd_block_ops = {
2454 .change_media_cb = ide_cd_change_cb,
2455 .eject_request_cb = ide_cd_eject_request_cb,
2456 .is_tray_open = ide_cd_is_tray_open,
2457 .is_medium_locked = ide_cd_is_medium_locked,
2460 static const BlockDevOps ide_hd_block_ops = {
2461 .resize_cb = ide_resize_cb,
2464 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2465 const char *version, const char *serial, const char *model,
2466 uint64_t wwn,
2467 uint32_t cylinders, uint32_t heads, uint32_t secs,
2468 int chs_trans, Error **errp)
2470 uint64_t nb_sectors;
2472 s->blk = blk;
2473 s->drive_kind = kind;
2475 blk_get_geometry(blk, &nb_sectors);
2476 s->cylinders = cylinders;
2477 s->heads = heads;
2478 s->sectors = secs;
2479 s->chs_trans = chs_trans;
2480 s->nb_sectors = nb_sectors;
2481 s->wwn = wwn;
2482 /* The SMART values should be preserved across power cycles
2483 but they aren't. */
2484 s->smart_enabled = 1;
2485 s->smart_autosave = 1;
2486 s->smart_errors = 0;
2487 s->smart_selftest_count = 0;
2488 if (kind == IDE_CD) {
2489 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2490 blk_set_guest_block_size(blk, 2048);
2491 } else {
2492 if (!blk_is_inserted(s->blk)) {
2493 error_setg(errp, "Device needs media, but drive is empty");
2494 return -1;
2496 if (blk_is_read_only(blk)) {
2497 error_setg(errp, "Can't use a read-only drive");
2498 return -1;
2500 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2502 if (serial) {
2503 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2504 } else {
2505 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2506 "QM%05d", s->drive_serial);
2508 if (model) {
2509 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2510 } else {
2511 switch (kind) {
2512 case IDE_CD:
2513 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2514 break;
2515 case IDE_CFATA:
2516 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2517 break;
2518 default:
2519 strcpy(s->drive_model_str, "QEMU HARDDISK");
2520 break;
2524 if (version) {
2525 pstrcpy(s->version, sizeof(s->version), version);
2526 } else {
2527 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2530 ide_reset(s);
2531 blk_iostatus_enable(blk);
2532 return 0;
2535 static void ide_init1(IDEBus *bus, int unit)
2537 static int drive_serial = 1;
2538 IDEState *s = &bus->ifs[unit];
2540 s->bus = bus;
2541 s->unit = unit;
2542 s->drive_serial = drive_serial++;
2543 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2544 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2545 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2546 memset(s->io_buffer, 0, s->io_buffer_total_len);
2548 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2549 memset(s->smart_selftest_data, 0, 512);
2551 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2552 ide_sector_write_timer_cb, s);
2555 static int ide_nop_int(IDEDMA *dma, int x)
2557 return 0;
2560 static void ide_nop(IDEDMA *dma)
2564 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2566 return 0;
2569 static const IDEDMAOps ide_dma_nop_ops = {
2570 .prepare_buf = ide_nop_int32,
2571 .restart_dma = ide_nop,
2572 .rw_buf = ide_nop_int,
2575 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2577 s->unit = s->bus->retry_unit;
2578 ide_set_sector(s, s->bus->retry_sector_num);
2579 s->nsector = s->bus->retry_nsector;
2580 s->bus->dma->ops->restart_dma(s->bus->dma);
2581 s->io_buffer_size = 0;
2582 s->dma_cmd = dma_cmd;
2583 ide_start_dma(s, ide_dma_cb);
2586 static void ide_restart_bh(void *opaque)
2588 IDEBus *bus = opaque;
2589 IDEState *s;
2590 bool is_read;
2591 int error_status;
2593 qemu_bh_delete(bus->bh);
2594 bus->bh = NULL;
2596 error_status = bus->error_status;
2597 if (bus->error_status == 0) {
2598 return;
2601 s = idebus_active_if(bus);
2602 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2604 /* The error status must be cleared before resubmitting the request: The
2605 * request may fail again, and this case can only be distinguished if the
2606 * called function can set a new error status. */
2607 bus->error_status = 0;
2609 /* The HBA has generically asked to be kicked on retry */
2610 if (error_status & IDE_RETRY_HBA) {
2611 if (s->bus->dma->ops->restart) {
2612 s->bus->dma->ops->restart(s->bus->dma);
2614 } else if (IS_IDE_RETRY_DMA(error_status)) {
2615 if (error_status & IDE_RETRY_TRIM) {
2616 ide_restart_dma(s, IDE_DMA_TRIM);
2617 } else {
2618 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2620 } else if (IS_IDE_RETRY_PIO(error_status)) {
2621 if (is_read) {
2622 ide_sector_read(s);
2623 } else {
2624 ide_sector_write(s);
2626 } else if (error_status & IDE_RETRY_FLUSH) {
2627 ide_flush_cache(s);
2628 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2629 assert(s->end_transfer_func == ide_atapi_cmd);
2630 ide_atapi_dma_restart(s);
2631 } else {
2632 abort();
2636 static void ide_restart_cb(void *opaque, int running, RunState state)
2638 IDEBus *bus = opaque;
2640 if (!running)
2641 return;
2643 if (!bus->bh) {
2644 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2645 qemu_bh_schedule(bus->bh);
2649 void ide_register_restart_cb(IDEBus *bus)
2651 if (bus->dma->ops->restart_dma) {
2652 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2656 static IDEDMA ide_dma_nop = {
2657 .ops = &ide_dma_nop_ops,
2658 .aiocb = NULL,
2661 void ide_init2(IDEBus *bus, qemu_irq irq)
2663 int i;
2665 for(i = 0; i < 2; i++) {
2666 ide_init1(bus, i);
2667 ide_reset(&bus->ifs[i]);
2669 bus->irq = irq;
2670 bus->dma = &ide_dma_nop;
2673 void ide_exit(IDEState *s)
2675 timer_del(s->sector_write_timer);
2676 timer_free(s->sector_write_timer);
2677 qemu_vfree(s->smart_selftest_data);
2678 qemu_vfree(s->io_buffer);
2681 static const MemoryRegionPortio ide_portio_list[] = {
2682 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2683 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2684 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2685 PORTIO_END_OF_LIST(),
2688 static const MemoryRegionPortio ide_portio2_list[] = {
2689 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2690 PORTIO_END_OF_LIST(),
2693 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2695 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2696 bridge has been setup properly to always register with ISA. */
2697 isa_register_portio_list(dev, &bus->portio_list,
2698 iobase, ide_portio_list, bus, "ide");
2700 if (iobase2) {
2701 isa_register_portio_list(dev, &bus->portio2_list,
2702 iobase2, ide_portio2_list, bus, "ide");
2706 static bool is_identify_set(void *opaque, int version_id)
2708 IDEState *s = opaque;
2710 return s->identify_set != 0;
2713 static EndTransferFunc* transfer_end_table[] = {
2714 ide_sector_read,
2715 ide_sector_write,
2716 ide_transfer_stop,
2717 ide_atapi_cmd_reply_end,
2718 ide_atapi_cmd,
2719 ide_dummy_transfer_stop,
2722 static int transfer_end_table_idx(EndTransferFunc *fn)
2724 int i;
2726 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2727 if (transfer_end_table[i] == fn)
2728 return i;
2730 return -1;
2733 static int ide_drive_post_load(void *opaque, int version_id)
2735 IDEState *s = opaque;
2737 if (s->blk && s->identify_set) {
2738 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2740 return 0;
2743 static int ide_drive_pio_post_load(void *opaque, int version_id)
2745 IDEState *s = opaque;
2747 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2748 return -EINVAL;
2750 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2751 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2752 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2753 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2755 return 0;
2758 static int ide_drive_pio_pre_save(void *opaque)
2760 IDEState *s = opaque;
2761 int idx;
2763 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2764 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2766 idx = transfer_end_table_idx(s->end_transfer_func);
2767 if (idx == -1) {
2768 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2769 __func__);
2770 s->end_transfer_fn_idx = 2;
2771 } else {
2772 s->end_transfer_fn_idx = idx;
2775 return 0;
2778 static bool ide_drive_pio_state_needed(void *opaque)
2780 IDEState *s = opaque;
2782 return ((s->status & DRQ_STAT) != 0)
2783 || (s->bus->error_status & IDE_RETRY_PIO);
2786 static bool ide_tray_state_needed(void *opaque)
2788 IDEState *s = opaque;
2790 return s->tray_open || s->tray_locked;
2793 static bool ide_atapi_gesn_needed(void *opaque)
2795 IDEState *s = opaque;
2797 return s->events.new_media || s->events.eject_request;
2800 static bool ide_error_needed(void *opaque)
2802 IDEBus *bus = opaque;
2804 return (bus->error_status != 0);
2807 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2808 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2809 .name ="ide_drive/atapi/gesn_state",
2810 .version_id = 1,
2811 .minimum_version_id = 1,
2812 .needed = ide_atapi_gesn_needed,
2813 .fields = (VMStateField[]) {
2814 VMSTATE_BOOL(events.new_media, IDEState),
2815 VMSTATE_BOOL(events.eject_request, IDEState),
2816 VMSTATE_END_OF_LIST()
2820 static const VMStateDescription vmstate_ide_tray_state = {
2821 .name = "ide_drive/tray_state",
2822 .version_id = 1,
2823 .minimum_version_id = 1,
2824 .needed = ide_tray_state_needed,
2825 .fields = (VMStateField[]) {
2826 VMSTATE_BOOL(tray_open, IDEState),
2827 VMSTATE_BOOL(tray_locked, IDEState),
2828 VMSTATE_END_OF_LIST()
2832 static const VMStateDescription vmstate_ide_drive_pio_state = {
2833 .name = "ide_drive/pio_state",
2834 .version_id = 1,
2835 .minimum_version_id = 1,
2836 .pre_save = ide_drive_pio_pre_save,
2837 .post_load = ide_drive_pio_post_load,
2838 .needed = ide_drive_pio_state_needed,
2839 .fields = (VMStateField[]) {
2840 VMSTATE_INT32(req_nb_sectors, IDEState),
2841 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2842 vmstate_info_uint8, uint8_t),
2843 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2844 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2845 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2846 VMSTATE_INT32(elementary_transfer_size, IDEState),
2847 VMSTATE_INT32(packet_transfer_size, IDEState),
2848 VMSTATE_END_OF_LIST()
2852 const VMStateDescription vmstate_ide_drive = {
2853 .name = "ide_drive",
2854 .version_id = 3,
2855 .minimum_version_id = 0,
2856 .post_load = ide_drive_post_load,
2857 .fields = (VMStateField[]) {
2858 VMSTATE_INT32(mult_sectors, IDEState),
2859 VMSTATE_INT32(identify_set, IDEState),
2860 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2861 VMSTATE_UINT8(feature, IDEState),
2862 VMSTATE_UINT8(error, IDEState),
2863 VMSTATE_UINT32(nsector, IDEState),
2864 VMSTATE_UINT8(sector, IDEState),
2865 VMSTATE_UINT8(lcyl, IDEState),
2866 VMSTATE_UINT8(hcyl, IDEState),
2867 VMSTATE_UINT8(hob_feature, IDEState),
2868 VMSTATE_UINT8(hob_sector, IDEState),
2869 VMSTATE_UINT8(hob_nsector, IDEState),
2870 VMSTATE_UINT8(hob_lcyl, IDEState),
2871 VMSTATE_UINT8(hob_hcyl, IDEState),
2872 VMSTATE_UINT8(select, IDEState),
2873 VMSTATE_UINT8(status, IDEState),
2874 VMSTATE_UINT8(lba48, IDEState),
2875 VMSTATE_UINT8(sense_key, IDEState),
2876 VMSTATE_UINT8(asc, IDEState),
2877 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2878 VMSTATE_END_OF_LIST()
2880 .subsections = (const VMStateDescription*[]) {
2881 &vmstate_ide_drive_pio_state,
2882 &vmstate_ide_tray_state,
2883 &vmstate_ide_atapi_gesn_state,
2884 NULL
2888 static const VMStateDescription vmstate_ide_error_status = {
2889 .name ="ide_bus/error",
2890 .version_id = 2,
2891 .minimum_version_id = 1,
2892 .needed = ide_error_needed,
2893 .fields = (VMStateField[]) {
2894 VMSTATE_INT32(error_status, IDEBus),
2895 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2896 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2897 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2898 VMSTATE_END_OF_LIST()
2902 const VMStateDescription vmstate_ide_bus = {
2903 .name = "ide_bus",
2904 .version_id = 1,
2905 .minimum_version_id = 1,
2906 .fields = (VMStateField[]) {
2907 VMSTATE_UINT8(cmd, IDEBus),
2908 VMSTATE_UINT8(unit, IDEBus),
2909 VMSTATE_END_OF_LIST()
2911 .subsections = (const VMStateDescription*[]) {
2912 &vmstate_ide_error_status,
2913 NULL
2917 void ide_drive_get(DriveInfo **hd, int n)
2919 int i;
2921 for (i = 0; i < n; i++) {
2922 hd[i] = drive_get_by_index(IF_IDE, i);