hw/ide: Rename idebus_active_if() -> ide_bus_active_if()
[qemu/kevin.git] / hw / ide / core.c
blob2d034731cf37bb0f22813ab4c9490ecb36a0550d
1 /*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "hw/irq.h"
28 #include "hw/isa/isa.h"
29 #include "migration/vmstate.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/timer.h"
33 #include "qemu/hw-version.h"
34 #include "qemu/memalign.h"
35 #include "sysemu/sysemu.h"
36 #include "sysemu/blockdev.h"
37 #include "sysemu/dma.h"
38 #include "hw/block/block.h"
39 #include "sysemu/block-backend.h"
40 #include "qapi/error.h"
41 #include "qemu/cutils.h"
42 #include "sysemu/replay.h"
43 #include "sysemu/runstate.h"
44 #include "hw/ide/internal.h"
45 #include "trace.h"
47 /* These values were based on a Seagate ST3500418AS but have been modified
48 to make more sense in QEMU */
49 static const int smart_attributes[][12] = {
50 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
51 /* raw read error rate*/
52 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
53 /* spin up */
54 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55 /* start stop count */
56 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
57 /* remapped sectors */
58 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
59 /* power on hours */
60 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
61 /* power cycle count */
62 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
63 /* airflow-temperature-celsius */
64 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
67 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
68 [IDE_DMA_READ] = "DMA READ",
69 [IDE_DMA_WRITE] = "DMA WRITE",
70 [IDE_DMA_TRIM] = "DMA TRIM",
71 [IDE_DMA_ATAPI] = "DMA ATAPI"
74 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
76 if ((unsigned)enval < IDE_DMA__COUNT) {
77 return IDE_DMA_CMD_lookup[enval];
79 return "DMA UNKNOWN CMD";
82 static void ide_dummy_transfer_stop(IDEState *s);
84 static void padstr(char *str, const char *src, int len)
86 int i, v;
87 for(i = 0; i < len; i++) {
88 if (*src)
89 v = *src++;
90 else
91 v = ' ';
92 str[i^1] = v;
96 static void put_le16(uint16_t *p, unsigned int v)
98 *p = cpu_to_le16(v);
101 static void ide_identify_size(IDEState *s)
103 uint16_t *p = (uint16_t *)s->identify_data;
104 int64_t nb_sectors_lba28 = s->nb_sectors;
105 if (nb_sectors_lba28 >= 1 << 28) {
106 nb_sectors_lba28 = (1 << 28) - 1;
108 put_le16(p + 60, nb_sectors_lba28);
109 put_le16(p + 61, nb_sectors_lba28 >> 16);
110 put_le16(p + 100, s->nb_sectors);
111 put_le16(p + 101, s->nb_sectors >> 16);
112 put_le16(p + 102, s->nb_sectors >> 32);
113 put_le16(p + 103, s->nb_sectors >> 48);
116 static void ide_identify(IDEState *s)
118 uint16_t *p;
119 unsigned int oldsize;
120 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
122 p = (uint16_t *)s->identify_data;
123 if (s->identify_set) {
124 goto fill_buffer;
126 memset(p, 0, sizeof(s->identify_data));
128 put_le16(p + 0, 0x0040);
129 put_le16(p + 1, s->cylinders);
130 put_le16(p + 3, s->heads);
131 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
132 put_le16(p + 5, 512); /* XXX: retired, remove ? */
133 put_le16(p + 6, s->sectors);
134 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
135 put_le16(p + 20, 3); /* XXX: retired, remove ? */
136 put_le16(p + 21, 512); /* cache size in sectors */
137 put_le16(p + 22, 4); /* ecc bytes */
138 padstr((char *)(p + 23), s->version, 8); /* firmware version */
139 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
140 #if MAX_MULT_SECTORS > 1
141 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
142 #endif
143 put_le16(p + 48, 1); /* dword I/O */
144 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
145 put_le16(p + 51, 0x200); /* PIO transfer cycle */
146 put_le16(p + 52, 0x200); /* DMA transfer cycle */
147 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
148 put_le16(p + 54, s->cylinders);
149 put_le16(p + 55, s->heads);
150 put_le16(p + 56, s->sectors);
151 oldsize = s->cylinders * s->heads * s->sectors;
152 put_le16(p + 57, oldsize);
153 put_le16(p + 58, oldsize >> 16);
154 if (s->mult_sectors)
155 put_le16(p + 59, 0x100 | s->mult_sectors);
156 /* *(p + 60) := nb_sectors -- see ide_identify_size */
157 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
158 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
159 put_le16(p + 63, 0x07); /* mdma0-2 supported */
160 put_le16(p + 64, 0x03); /* pio3-4 supported */
161 put_le16(p + 65, 120);
162 put_le16(p + 66, 120);
163 put_le16(p + 67, 120);
164 put_le16(p + 68, 120);
165 if (dev && dev->conf.discard_granularity) {
166 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
169 if (s->ncq_queues) {
170 put_le16(p + 75, s->ncq_queues - 1);
171 /* NCQ supported */
172 put_le16(p + 76, (1 << 8));
175 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
176 put_le16(p + 81, 0x16); /* conforms to ata5 */
177 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
178 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
179 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
180 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
181 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
182 if (s->wwn) {
183 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
184 } else {
185 put_le16(p + 84, (1 << 14) | 0);
187 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
188 if (blk_enable_write_cache(s->blk)) {
189 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
190 } else {
191 put_le16(p + 85, (1 << 14) | 1);
193 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
194 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
195 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
196 if (s->wwn) {
197 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
198 } else {
199 put_le16(p + 87, (1 << 14) | 0);
201 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
202 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
203 /* *(p + 100) := nb_sectors -- see ide_identify_size */
204 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
205 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
206 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
208 if (dev && dev->conf.physical_block_size)
209 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
210 if (s->wwn) {
211 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
212 put_le16(p + 108, s->wwn >> 48);
213 put_le16(p + 109, s->wwn >> 32);
214 put_le16(p + 110, s->wwn >> 16);
215 put_le16(p + 111, s->wwn);
217 if (dev && dev->conf.discard_granularity) {
218 put_le16(p + 169, 1); /* TRIM support */
220 if (dev) {
221 put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
224 ide_identify_size(s);
225 s->identify_set = 1;
227 fill_buffer:
228 memcpy(s->io_buffer, p, sizeof(s->identify_data));
231 static void ide_atapi_identify(IDEState *s)
233 uint16_t *p;
235 p = (uint16_t *)s->identify_data;
236 if (s->identify_set) {
237 goto fill_buffer;
239 memset(p, 0, sizeof(s->identify_data));
241 /* Removable CDROM, 50us response, 12 byte packets */
242 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
243 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
244 put_le16(p + 20, 3); /* buffer type */
245 put_le16(p + 21, 512); /* cache size in sectors */
246 put_le16(p + 22, 4); /* ecc bytes */
247 padstr((char *)(p + 23), s->version, 8); /* firmware version */
248 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
249 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
250 #ifdef USE_DMA_CDROM
251 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
252 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
253 put_le16(p + 62, 7); /* single word dma0-2 supported */
254 put_le16(p + 63, 7); /* mdma0-2 supported */
255 #else
256 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
257 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
258 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
259 #endif
260 put_le16(p + 64, 3); /* pio3-4 supported */
261 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
262 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
263 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
264 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
266 put_le16(p + 71, 30); /* in ns */
267 put_le16(p + 72, 30); /* in ns */
269 if (s->ncq_queues) {
270 put_le16(p + 75, s->ncq_queues - 1);
271 /* NCQ supported */
272 put_le16(p + 76, (1 << 8));
275 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
276 if (s->wwn) {
277 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
278 put_le16(p + 87, (1 << 8)); /* WWN enabled */
281 #ifdef USE_DMA_CDROM
282 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
283 #endif
285 if (s->wwn) {
286 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
287 put_le16(p + 108, s->wwn >> 48);
288 put_le16(p + 109, s->wwn >> 32);
289 put_le16(p + 110, s->wwn >> 16);
290 put_le16(p + 111, s->wwn);
293 s->identify_set = 1;
295 fill_buffer:
296 memcpy(s->io_buffer, p, sizeof(s->identify_data));
299 static void ide_cfata_identify_size(IDEState *s)
301 uint16_t *p = (uint16_t *)s->identify_data;
302 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
303 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
304 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
305 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
308 static void ide_cfata_identify(IDEState *s)
310 uint16_t *p;
311 uint32_t cur_sec;
313 p = (uint16_t *)s->identify_data;
314 if (s->identify_set) {
315 goto fill_buffer;
317 memset(p, 0, sizeof(s->identify_data));
319 cur_sec = s->cylinders * s->heads * s->sectors;
321 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
322 put_le16(p + 1, s->cylinders); /* Default cylinders */
323 put_le16(p + 3, s->heads); /* Default heads */
324 put_le16(p + 6, s->sectors); /* Default sectors per track */
325 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
326 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
327 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
328 put_le16(p + 22, 0x0004); /* ECC bytes */
329 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
330 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
331 #if MAX_MULT_SECTORS > 1
332 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
333 #else
334 put_le16(p + 47, 0x0000);
335 #endif
336 put_le16(p + 49, 0x0f00); /* Capabilities */
337 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
338 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
339 put_le16(p + 53, 0x0003); /* Translation params valid */
340 put_le16(p + 54, s->cylinders); /* Current cylinders */
341 put_le16(p + 55, s->heads); /* Current heads */
342 put_le16(p + 56, s->sectors); /* Current sectors */
343 put_le16(p + 57, cur_sec); /* Current capacity */
344 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
345 if (s->mult_sectors) /* Multiple sector setting */
346 put_le16(p + 59, 0x100 | s->mult_sectors);
347 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
348 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
349 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
350 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
351 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
352 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
353 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
354 put_le16(p + 82, 0x400c); /* Command Set supported */
355 put_le16(p + 83, 0x7068); /* Command Set supported */
356 put_le16(p + 84, 0x4000); /* Features supported */
357 put_le16(p + 85, 0x000c); /* Command Set enabled */
358 put_le16(p + 86, 0x7044); /* Command Set enabled */
359 put_le16(p + 87, 0x4000); /* Features enabled */
360 put_le16(p + 91, 0x4060); /* Current APM level */
361 put_le16(p + 129, 0x0002); /* Current features option */
362 put_le16(p + 130, 0x0005); /* Reassigned sectors */
363 put_le16(p + 131, 0x0001); /* Initial power mode */
364 put_le16(p + 132, 0x0000); /* User signature */
365 put_le16(p + 160, 0x8100); /* Power requirement */
366 put_le16(p + 161, 0x8001); /* CF command set */
368 ide_cfata_identify_size(s);
369 s->identify_set = 1;
371 fill_buffer:
372 memcpy(s->io_buffer, p, sizeof(s->identify_data));
375 static void ide_set_signature(IDEState *s)
377 s->select &= ~(ATA_DEV_HS); /* clear head */
378 /* put signature */
379 s->nsector = 1;
380 s->sector = 1;
381 if (s->drive_kind == IDE_CD) {
382 s->lcyl = 0x14;
383 s->hcyl = 0xeb;
384 } else if (s->blk) {
385 s->lcyl = 0;
386 s->hcyl = 0;
387 } else {
388 s->lcyl = 0xff;
389 s->hcyl = 0xff;
393 static bool ide_sect_range_ok(IDEState *s,
394 uint64_t sector, uint64_t nb_sectors)
396 uint64_t total_sectors;
398 blk_get_geometry(s->blk, &total_sectors);
399 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
400 return false;
402 return true;
405 typedef struct TrimAIOCB {
406 BlockAIOCB common;
407 IDEState *s;
408 QEMUBH *bh;
409 int ret;
410 QEMUIOVector *qiov;
411 BlockAIOCB *aiocb;
412 int i, j;
413 } TrimAIOCB;
415 static void trim_aio_cancel(BlockAIOCB *acb)
417 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
419 /* Exit the loop so ide_issue_trim_cb will not continue */
420 iocb->j = iocb->qiov->niov - 1;
421 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
423 iocb->ret = -ECANCELED;
425 if (iocb->aiocb) {
426 blk_aio_cancel_async(iocb->aiocb);
427 iocb->aiocb = NULL;
431 static const AIOCBInfo trim_aiocb_info = {
432 .aiocb_size = sizeof(TrimAIOCB),
433 .cancel_async = trim_aio_cancel,
436 static void ide_trim_bh_cb(void *opaque)
438 TrimAIOCB *iocb = opaque;
439 BlockBackend *blk = iocb->s->blk;
441 iocb->common.cb(iocb->common.opaque, iocb->ret);
443 qemu_bh_delete(iocb->bh);
444 iocb->bh = NULL;
445 qemu_aio_unref(iocb);
447 /* Paired with an increment in ide_issue_trim() */
448 blk_dec_in_flight(blk);
451 static void ide_issue_trim_cb(void *opaque, int ret)
453 TrimAIOCB *iocb = opaque;
454 IDEState *s = iocb->s;
456 if (iocb->i >= 0) {
457 if (ret >= 0) {
458 block_acct_done(blk_get_stats(s->blk), &s->acct);
459 } else {
460 block_acct_failed(blk_get_stats(s->blk), &s->acct);
464 if (ret >= 0) {
465 while (iocb->j < iocb->qiov->niov) {
466 int j = iocb->j;
467 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
468 int i = iocb->i;
469 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
471 /* 6-byte LBA + 2-byte range per entry */
472 uint64_t entry = le64_to_cpu(buffer[i]);
473 uint64_t sector = entry & 0x0000ffffffffffffULL;
474 uint16_t count = entry >> 48;
476 if (count == 0) {
477 continue;
480 if (!ide_sect_range_ok(s, sector, count)) {
481 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
482 iocb->ret = -EINVAL;
483 goto done;
486 block_acct_start(blk_get_stats(s->blk), &s->acct,
487 count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
489 /* Got an entry! Submit and exit. */
490 iocb->aiocb = blk_aio_pdiscard(s->blk,
491 sector << BDRV_SECTOR_BITS,
492 count << BDRV_SECTOR_BITS,
493 ide_issue_trim_cb, opaque);
494 return;
497 iocb->j++;
498 iocb->i = -1;
500 } else {
501 iocb->ret = ret;
504 done:
505 iocb->aiocb = NULL;
506 if (iocb->bh) {
507 replay_bh_schedule_event(iocb->bh);
511 BlockAIOCB *ide_issue_trim(
512 int64_t offset, QEMUIOVector *qiov,
513 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
515 IDEState *s = opaque;
516 TrimAIOCB *iocb;
518 /* Paired with a decrement in ide_trim_bh_cb() */
519 blk_inc_in_flight(s->blk);
521 iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
522 iocb->s = s;
523 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
524 iocb->ret = 0;
525 iocb->qiov = qiov;
526 iocb->i = -1;
527 iocb->j = 0;
528 ide_issue_trim_cb(iocb, 0);
529 return &iocb->common;
532 void ide_abort_command(IDEState *s)
534 ide_transfer_stop(s);
535 s->status = READY_STAT | ERR_STAT;
536 s->error = ABRT_ERR;
539 static void ide_set_retry(IDEState *s)
541 s->bus->retry_unit = s->unit;
542 s->bus->retry_sector_num = ide_get_sector(s);
543 s->bus->retry_nsector = s->nsector;
546 static void ide_clear_retry(IDEState *s)
548 s->bus->retry_unit = -1;
549 s->bus->retry_sector_num = 0;
550 s->bus->retry_nsector = 0;
553 /* prepare data transfer and tell what to do after */
554 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
555 EndTransferFunc *end_transfer_func)
557 s->data_ptr = buf;
558 s->data_end = buf + size;
559 ide_set_retry(s);
560 if (!(s->status & ERR_STAT)) {
561 s->status |= DRQ_STAT;
563 if (!s->bus->dma->ops->pio_transfer) {
564 s->end_transfer_func = end_transfer_func;
565 return false;
567 s->bus->dma->ops->pio_transfer(s->bus->dma);
568 return true;
571 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
572 EndTransferFunc *end_transfer_func)
574 if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
575 end_transfer_func(s);
579 static void ide_cmd_done(IDEState *s)
581 if (s->bus->dma->ops->cmd_done) {
582 s->bus->dma->ops->cmd_done(s->bus->dma);
586 static void ide_transfer_halt(IDEState *s)
588 s->end_transfer_func = ide_transfer_stop;
589 s->data_ptr = s->io_buffer;
590 s->data_end = s->io_buffer;
591 s->status &= ~DRQ_STAT;
594 void ide_transfer_stop(IDEState *s)
596 ide_transfer_halt(s);
597 ide_cmd_done(s);
600 int64_t ide_get_sector(IDEState *s)
602 int64_t sector_num;
603 if (s->select & (ATA_DEV_LBA)) {
604 if (s->lba48) {
605 sector_num = ((int64_t)s->hob_hcyl << 40) |
606 ((int64_t) s->hob_lcyl << 32) |
607 ((int64_t) s->hob_sector << 24) |
608 ((int64_t) s->hcyl << 16) |
609 ((int64_t) s->lcyl << 8) | s->sector;
610 } else {
611 /* LBA28 */
612 sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
613 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
615 } else {
616 /* CHS */
617 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
618 (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
621 return sector_num;
624 void ide_set_sector(IDEState *s, int64_t sector_num)
626 unsigned int cyl, r;
627 if (s->select & (ATA_DEV_LBA)) {
628 if (s->lba48) {
629 s->sector = sector_num;
630 s->lcyl = sector_num >> 8;
631 s->hcyl = sector_num >> 16;
632 s->hob_sector = sector_num >> 24;
633 s->hob_lcyl = sector_num >> 32;
634 s->hob_hcyl = sector_num >> 40;
635 } else {
636 /* LBA28 */
637 s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
638 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
639 s->hcyl = (sector_num >> 16);
640 s->lcyl = (sector_num >> 8);
641 s->sector = (sector_num);
643 } else {
644 /* CHS */
645 cyl = sector_num / (s->heads * s->sectors);
646 r = sector_num % (s->heads * s->sectors);
647 s->hcyl = cyl >> 8;
648 s->lcyl = cyl;
649 s->select = (s->select & ~(ATA_DEV_HS)) |
650 ((r / s->sectors) & (ATA_DEV_HS));
651 s->sector = (r % s->sectors) + 1;
655 static void ide_rw_error(IDEState *s) {
656 ide_abort_command(s);
657 ide_bus_set_irq(s->bus);
660 static void ide_buffered_readv_cb(void *opaque, int ret)
662 IDEBufferedRequest *req = opaque;
663 if (!req->orphaned) {
664 if (!ret) {
665 assert(req->qiov.size == req->original_qiov->size);
666 qemu_iovec_from_buf(req->original_qiov, 0,
667 req->qiov.local_iov.iov_base,
668 req->original_qiov->size);
670 req->original_cb(req->original_opaque, ret);
672 QLIST_REMOVE(req, list);
673 qemu_vfree(qemu_iovec_buf(&req->qiov));
674 g_free(req);
677 #define MAX_BUFFERED_REQS 16
679 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
680 QEMUIOVector *iov, int nb_sectors,
681 BlockCompletionFunc *cb, void *opaque)
683 BlockAIOCB *aioreq;
684 IDEBufferedRequest *req;
685 int c = 0;
687 QLIST_FOREACH(req, &s->buffered_requests, list) {
688 c++;
690 if (c > MAX_BUFFERED_REQS) {
691 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
694 req = g_new0(IDEBufferedRequest, 1);
695 req->original_qiov = iov;
696 req->original_cb = cb;
697 req->original_opaque = opaque;
698 qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
699 iov->size);
701 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
702 &req->qiov, 0, ide_buffered_readv_cb, req);
704 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
705 return aioreq;
709 * Cancel all pending DMA requests.
710 * Any buffered DMA requests are instantly canceled,
711 * but any pending unbuffered DMA requests must be waited on.
713 void ide_cancel_dma_sync(IDEState *s)
715 IDEBufferedRequest *req;
717 /* First invoke the callbacks of all buffered requests
718 * and flag those requests as orphaned. Ideally there
719 * are no unbuffered (Scatter Gather DMA Requests or
720 * write requests) pending and we can avoid to drain. */
721 QLIST_FOREACH(req, &s->buffered_requests, list) {
722 if (!req->orphaned) {
723 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
724 req->original_cb(req->original_opaque, -ECANCELED);
726 req->orphaned = true;
730 * We can't cancel Scatter Gather DMA in the middle of the
731 * operation or a partial (not full) DMA transfer would reach
732 * the storage so we wait for completion instead (we behave
733 * like if the DMA was completed by the time the guest trying
734 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
735 * set).
737 * In the future we'll be able to safely cancel the I/O if the
738 * whole DMA operation will be submitted to disk with a single
739 * aio operation with preadv/pwritev.
741 if (s->bus->dma->aiocb) {
742 trace_ide_cancel_dma_sync_remaining();
743 blk_drain(s->blk);
744 assert(s->bus->dma->aiocb == NULL);
748 static void ide_sector_read(IDEState *s);
750 static void ide_sector_read_cb(void *opaque, int ret)
752 IDEState *s = opaque;
753 int n;
755 s->pio_aiocb = NULL;
756 s->status &= ~BUSY_STAT;
758 if (ret != 0) {
759 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
760 IDE_RETRY_READ)) {
761 return;
765 block_acct_done(blk_get_stats(s->blk), &s->acct);
767 n = s->nsector;
768 if (n > s->req_nb_sectors) {
769 n = s->req_nb_sectors;
772 ide_set_sector(s, ide_get_sector(s) + n);
773 s->nsector -= n;
774 /* Allow the guest to read the io_buffer */
775 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
776 ide_bus_set_irq(s->bus);
779 static void ide_sector_read(IDEState *s)
781 int64_t sector_num;
782 int n;
784 s->status = READY_STAT | SEEK_STAT;
785 s->error = 0; /* not needed by IDE spec, but needed by Windows */
786 sector_num = ide_get_sector(s);
787 n = s->nsector;
789 if (n == 0) {
790 ide_transfer_stop(s);
791 return;
794 s->status |= BUSY_STAT;
796 if (n > s->req_nb_sectors) {
797 n = s->req_nb_sectors;
800 trace_ide_sector_read(sector_num, n);
802 if (!ide_sect_range_ok(s, sector_num, n)) {
803 ide_rw_error(s);
804 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
805 return;
808 qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
810 block_acct_start(blk_get_stats(s->blk), &s->acct,
811 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
812 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
813 ide_sector_read_cb, s);
816 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
818 if (s->bus->dma->ops->commit_buf) {
819 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
821 s->io_buffer_offset += tx_bytes;
822 qemu_sglist_destroy(&s->sg);
825 void ide_set_inactive(IDEState *s, bool more)
827 s->bus->dma->aiocb = NULL;
828 ide_clear_retry(s);
829 if (s->bus->dma->ops->set_inactive) {
830 s->bus->dma->ops->set_inactive(s->bus->dma, more);
832 ide_cmd_done(s);
835 void ide_dma_error(IDEState *s)
837 dma_buf_commit(s, 0);
838 ide_abort_command(s);
839 ide_set_inactive(s, false);
840 ide_bus_set_irq(s->bus);
843 int ide_handle_rw_error(IDEState *s, int error, int op)
845 bool is_read = (op & IDE_RETRY_READ) != 0;
846 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
848 if (action == BLOCK_ERROR_ACTION_STOP) {
849 assert(s->bus->retry_unit == s->unit);
850 s->bus->error_status = op;
851 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
852 block_acct_failed(blk_get_stats(s->blk), &s->acct);
853 if (IS_IDE_RETRY_DMA(op)) {
854 ide_dma_error(s);
855 } else if (IS_IDE_RETRY_ATAPI(op)) {
856 ide_atapi_io_error(s, -error);
857 } else {
858 ide_rw_error(s);
861 blk_error_action(s->blk, action, is_read, error);
862 return action != BLOCK_ERROR_ACTION_IGNORE;
865 static void ide_dma_cb(void *opaque, int ret)
867 IDEState *s = opaque;
868 int n;
869 int64_t sector_num;
870 uint64_t offset;
871 bool stay_active = false;
872 int32_t prep_size = 0;
874 if (ret == -EINVAL) {
875 ide_dma_error(s);
876 return;
879 if (ret < 0) {
880 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
881 s->bus->dma->aiocb = NULL;
882 dma_buf_commit(s, 0);
883 return;
887 if (s->io_buffer_size > s->nsector * 512) {
889 * The PRDs were longer than needed for this request.
890 * The Active bit must remain set after the request completes.
892 n = s->nsector;
893 stay_active = true;
894 } else {
895 n = s->io_buffer_size >> 9;
898 sector_num = ide_get_sector(s);
899 if (n > 0) {
900 assert(n * 512 == s->sg.size);
901 dma_buf_commit(s, s->sg.size);
902 sector_num += n;
903 ide_set_sector(s, sector_num);
904 s->nsector -= n;
907 /* end of transfer ? */
908 if (s->nsector == 0) {
909 s->status = READY_STAT | SEEK_STAT;
910 ide_bus_set_irq(s->bus);
911 goto eot;
914 /* launch next transfer */
915 n = s->nsector;
916 s->io_buffer_index = 0;
917 s->io_buffer_size = n * 512;
918 prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
919 /* prepare_buf() must succeed and respect the limit */
920 assert(prep_size >= 0 && prep_size <= n * 512);
923 * Now prep_size stores the number of bytes in the sglist, and
924 * s->io_buffer_size stores the number of bytes described by the PRDs.
927 if (prep_size < n * 512) {
929 * The PRDs are too short for this request. Error condition!
930 * Reset the Active bit and don't raise the interrupt.
932 s->status = READY_STAT | SEEK_STAT;
933 dma_buf_commit(s, 0);
934 goto eot;
937 trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
939 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
940 !ide_sect_range_ok(s, sector_num, n)) {
941 ide_dma_error(s);
942 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
943 return;
946 offset = sector_num << BDRV_SECTOR_BITS;
947 switch (s->dma_cmd) {
948 case IDE_DMA_READ:
949 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
950 BDRV_SECTOR_SIZE, ide_dma_cb, s);
951 break;
952 case IDE_DMA_WRITE:
953 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
954 BDRV_SECTOR_SIZE, ide_dma_cb, s);
955 break;
956 case IDE_DMA_TRIM:
957 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
958 &s->sg, offset, BDRV_SECTOR_SIZE,
959 ide_issue_trim, s, ide_dma_cb, s,
960 DMA_DIRECTION_TO_DEVICE);
961 break;
962 default:
963 abort();
965 return;
967 eot:
968 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
969 block_acct_done(blk_get_stats(s->blk), &s->acct);
971 ide_set_inactive(s, stay_active);
974 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
976 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
977 s->io_buffer_size = 0;
978 s->dma_cmd = dma_cmd;
980 switch (dma_cmd) {
981 case IDE_DMA_READ:
982 block_acct_start(blk_get_stats(s->blk), &s->acct,
983 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
984 break;
985 case IDE_DMA_WRITE:
986 block_acct_start(blk_get_stats(s->blk), &s->acct,
987 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
988 break;
989 default:
990 break;
993 ide_start_dma(s, ide_dma_cb);
996 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
998 s->io_buffer_index = 0;
999 ide_set_retry(s);
1000 if (s->bus->dma->ops->start_dma) {
1001 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
1005 static void ide_sector_write(IDEState *s);
1007 static void ide_sector_write_timer_cb(void *opaque)
1009 IDEState *s = opaque;
1010 ide_bus_set_irq(s->bus);
1013 static void ide_sector_write_cb(void *opaque, int ret)
1015 IDEState *s = opaque;
1016 int n;
1018 s->pio_aiocb = NULL;
1019 s->status &= ~BUSY_STAT;
1021 if (ret != 0) {
1022 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1023 return;
1027 block_acct_done(blk_get_stats(s->blk), &s->acct);
1029 n = s->nsector;
1030 if (n > s->req_nb_sectors) {
1031 n = s->req_nb_sectors;
1033 s->nsector -= n;
1035 ide_set_sector(s, ide_get_sector(s) + n);
1036 if (s->nsector == 0) {
1037 /* no more sectors to write */
1038 ide_transfer_stop(s);
1039 } else {
1040 int n1 = s->nsector;
1041 if (n1 > s->req_nb_sectors) {
1042 n1 = s->req_nb_sectors;
1044 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1045 ide_sector_write);
1048 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1049 /* It seems there is a bug in the Windows 2000 installer HDD
1050 IDE driver which fills the disk with empty logs when the
1051 IDE write IRQ comes too early. This hack tries to correct
1052 that at the expense of slower write performances. Use this
1053 option _only_ to install Windows 2000. You must disable it
1054 for normal use. */
1055 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1056 (NANOSECONDS_PER_SECOND / 1000));
1057 } else {
1058 ide_bus_set_irq(s->bus);
1062 static void ide_sector_write(IDEState *s)
1064 int64_t sector_num;
1065 int n;
1067 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1068 sector_num = ide_get_sector(s);
1070 n = s->nsector;
1071 if (n > s->req_nb_sectors) {
1072 n = s->req_nb_sectors;
1075 trace_ide_sector_write(sector_num, n);
1077 if (!ide_sect_range_ok(s, sector_num, n)) {
1078 ide_rw_error(s);
1079 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1080 return;
1083 qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1085 block_acct_start(blk_get_stats(s->blk), &s->acct,
1086 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1087 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1088 &s->qiov, 0, ide_sector_write_cb, s);
1091 static void ide_flush_cb(void *opaque, int ret)
1093 IDEState *s = opaque;
1095 s->pio_aiocb = NULL;
1097 if (ret < 0) {
1098 /* XXX: What sector number to set here? */
1099 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1100 return;
1104 if (s->blk) {
1105 block_acct_done(blk_get_stats(s->blk), &s->acct);
1107 s->status = READY_STAT | SEEK_STAT;
1108 ide_cmd_done(s);
1109 ide_bus_set_irq(s->bus);
1112 static void ide_flush_cache(IDEState *s)
1114 if (s->blk == NULL) {
1115 ide_flush_cb(s, 0);
1116 return;
1119 s->status |= BUSY_STAT;
1120 ide_set_retry(s);
1121 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1122 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1125 static void ide_cfata_metadata_inquiry(IDEState *s)
1127 uint16_t *p;
1128 uint32_t spd;
1130 p = (uint16_t *) s->io_buffer;
1131 memset(p, 0, 0x200);
1132 spd = ((s->mdata_size - 1) >> 9) + 1;
1134 put_le16(p + 0, 0x0001); /* Data format revision */
1135 put_le16(p + 1, 0x0000); /* Media property: silicon */
1136 put_le16(p + 2, s->media_changed); /* Media status */
1137 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1138 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1139 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1140 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1143 static void ide_cfata_metadata_read(IDEState *s)
1145 uint16_t *p;
1147 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1148 s->status = ERR_STAT;
1149 s->error = ABRT_ERR;
1150 return;
1153 p = (uint16_t *) s->io_buffer;
1154 memset(p, 0, 0x200);
1156 put_le16(p + 0, s->media_changed); /* Media status */
1157 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1158 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1159 s->nsector << 9), 0x200 - 2));
1162 static void ide_cfata_metadata_write(IDEState *s)
1164 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1165 s->status = ERR_STAT;
1166 s->error = ABRT_ERR;
1167 return;
1170 s->media_changed = 0;
1172 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1173 s->io_buffer + 2,
1174 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1175 s->nsector << 9), 0x200 - 2));
1178 /* called when the inserted state of the media has changed */
1179 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1181 IDEState *s = opaque;
1182 uint64_t nb_sectors;
1184 s->tray_open = !load;
1185 blk_get_geometry(s->blk, &nb_sectors);
1186 s->nb_sectors = nb_sectors;
1189 * First indicate to the guest that a CD has been removed. That's
1190 * done on the next command the guest sends us.
1192 * Then we set UNIT_ATTENTION, by which the guest will
1193 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1195 s->cdrom_changed = 1;
1196 s->events.new_media = true;
1197 s->events.eject_request = false;
1198 ide_bus_set_irq(s->bus);
1201 static void ide_cd_eject_request_cb(void *opaque, bool force)
1203 IDEState *s = opaque;
1205 s->events.eject_request = true;
1206 if (force) {
1207 s->tray_locked = false;
1209 ide_bus_set_irq(s->bus);
1212 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1214 s->lba48 = lba48;
1216 /* handle the 'magic' 0 nsector count conversion here. to avoid
1217 * fiddling with the rest of the read logic, we just store the
1218 * full sector count in ->nsector and ignore ->hob_nsector from now
1220 if (!s->lba48) {
1221 if (!s->nsector)
1222 s->nsector = 256;
1223 } else {
1224 if (!s->nsector && !s->hob_nsector)
1225 s->nsector = 65536;
1226 else {
1227 int lo = s->nsector;
1228 int hi = s->hob_nsector;
1230 s->nsector = (hi << 8) | lo;
1235 static void ide_clear_hob(IDEBus *bus)
1237 /* any write clears HOB high bit of device control register */
1238 bus->cmd &= ~(IDE_CTRL_HOB);
1241 /* IOport [W]rite [R]egisters */
1242 enum ATA_IOPORT_WR {
1243 ATA_IOPORT_WR_DATA = 0,
1244 ATA_IOPORT_WR_FEATURES = 1,
1245 ATA_IOPORT_WR_SECTOR_COUNT = 2,
1246 ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1247 ATA_IOPORT_WR_CYLINDER_LOW = 4,
1248 ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1249 ATA_IOPORT_WR_DEVICE_HEAD = 6,
1250 ATA_IOPORT_WR_COMMAND = 7,
1251 ATA_IOPORT_WR_NUM_REGISTERS,
1254 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1255 [ATA_IOPORT_WR_DATA] = "Data",
1256 [ATA_IOPORT_WR_FEATURES] = "Features",
1257 [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1258 [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1259 [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1260 [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1261 [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1262 [ATA_IOPORT_WR_COMMAND] = "Command"
1265 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1267 IDEBus *bus = opaque;
1268 IDEState *s = ide_bus_active_if(bus);
1269 int reg_num = addr & 7;
1271 trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1273 /* ignore writes to command block while busy with previous command */
1274 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1275 return;
1278 /* NOTE: Device0 and Device1 both receive incoming register writes.
1279 * (They're on the same bus! They have to!) */
1281 switch (reg_num) {
1282 case 0:
1283 break;
1284 case ATA_IOPORT_WR_FEATURES:
1285 ide_clear_hob(bus);
1286 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1287 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1288 bus->ifs[0].feature = val;
1289 bus->ifs[1].feature = val;
1290 break;
1291 case ATA_IOPORT_WR_SECTOR_COUNT:
1292 ide_clear_hob(bus);
1293 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1294 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1295 bus->ifs[0].nsector = val;
1296 bus->ifs[1].nsector = val;
1297 break;
1298 case ATA_IOPORT_WR_SECTOR_NUMBER:
1299 ide_clear_hob(bus);
1300 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1301 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1302 bus->ifs[0].sector = val;
1303 bus->ifs[1].sector = val;
1304 break;
1305 case ATA_IOPORT_WR_CYLINDER_LOW:
1306 ide_clear_hob(bus);
1307 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1308 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1309 bus->ifs[0].lcyl = val;
1310 bus->ifs[1].lcyl = val;
1311 break;
1312 case ATA_IOPORT_WR_CYLINDER_HIGH:
1313 ide_clear_hob(bus);
1314 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1315 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1316 bus->ifs[0].hcyl = val;
1317 bus->ifs[1].hcyl = val;
1318 break;
1319 case ATA_IOPORT_WR_DEVICE_HEAD:
1320 ide_clear_hob(bus);
1321 bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1322 bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1323 /* select drive */
1324 bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1325 break;
1326 default:
1327 case ATA_IOPORT_WR_COMMAND:
1328 ide_clear_hob(bus);
1329 qemu_irq_lower(bus->irq);
1330 ide_bus_exec_cmd(bus, val);
1331 break;
1335 static void ide_reset(IDEState *s)
1337 trace_ide_reset(s);
1339 if (s->pio_aiocb) {
1340 blk_aio_cancel(s->pio_aiocb);
1341 s->pio_aiocb = NULL;
1344 if (s->reset_reverts) {
1345 s->reset_reverts = false;
1346 s->heads = s->drive_heads;
1347 s->sectors = s->drive_sectors;
1349 if (s->drive_kind == IDE_CFATA)
1350 s->mult_sectors = 0;
1351 else
1352 s->mult_sectors = MAX_MULT_SECTORS;
1353 /* ide regs */
1354 s->feature = 0;
1355 s->error = 0;
1356 s->nsector = 0;
1357 s->sector = 0;
1358 s->lcyl = 0;
1359 s->hcyl = 0;
1361 /* lba48 */
1362 s->hob_feature = 0;
1363 s->hob_sector = 0;
1364 s->hob_nsector = 0;
1365 s->hob_lcyl = 0;
1366 s->hob_hcyl = 0;
1368 s->select = (ATA_DEV_ALWAYS_ON);
1369 s->status = READY_STAT | SEEK_STAT;
1371 s->lba48 = 0;
1373 /* ATAPI specific */
1374 s->sense_key = 0;
1375 s->asc = 0;
1376 s->cdrom_changed = 0;
1377 s->packet_transfer_size = 0;
1378 s->elementary_transfer_size = 0;
1379 s->io_buffer_index = 0;
1380 s->cd_sector_size = 0;
1381 s->atapi_dma = 0;
1382 s->tray_locked = 0;
1383 s->tray_open = 0;
1384 /* ATA DMA state */
1385 s->io_buffer_size = 0;
1386 s->req_nb_sectors = 0;
1388 ide_set_signature(s);
1389 /* init the transfer handler so that 0xffff is returned on data
1390 accesses */
1391 s->end_transfer_func = ide_dummy_transfer_stop;
1392 ide_dummy_transfer_stop(s);
1393 s->media_changed = 0;
1396 static bool cmd_nop(IDEState *s, uint8_t cmd)
1398 return true;
1401 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1403 /* Halt PIO (in the DRQ phase), then DMA */
1404 ide_transfer_halt(s);
1405 ide_cancel_dma_sync(s);
1407 /* Reset any PIO commands, reset signature, etc */
1408 ide_reset(s);
1410 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1411 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1412 s->status = 0x00;
1414 /* Do not overwrite status register */
1415 return false;
1418 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1420 switch (s->feature) {
1421 case DSM_TRIM:
1422 if (s->blk) {
1423 ide_sector_start_dma(s, IDE_DMA_TRIM);
1424 return false;
1426 break;
1429 ide_abort_command(s);
1430 return true;
1433 static bool cmd_identify(IDEState *s, uint8_t cmd)
1435 if (s->blk && s->drive_kind != IDE_CD) {
1436 if (s->drive_kind != IDE_CFATA) {
1437 ide_identify(s);
1438 } else {
1439 ide_cfata_identify(s);
1441 s->status = READY_STAT | SEEK_STAT;
1442 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1443 ide_bus_set_irq(s->bus);
1444 return false;
1445 } else {
1446 if (s->drive_kind == IDE_CD) {
1447 ide_set_signature(s);
1449 ide_abort_command(s);
1452 return true;
1455 static bool cmd_verify(IDEState *s, uint8_t cmd)
1457 bool lba48 = (cmd == WIN_VERIFY_EXT);
1459 /* do sector number check ? */
1460 ide_cmd_lba48_transform(s, lba48);
1462 return true;
1465 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1467 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1468 /* Disable Read and Write Multiple */
1469 s->mult_sectors = 0;
1470 } else if ((s->nsector & 0xff) != 0 &&
1471 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1472 (s->nsector & (s->nsector - 1)) != 0)) {
1473 ide_abort_command(s);
1474 } else {
1475 s->mult_sectors = s->nsector & 0xff;
1478 return true;
1481 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1483 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1485 if (!s->blk || !s->mult_sectors) {
1486 ide_abort_command(s);
1487 return true;
1490 ide_cmd_lba48_transform(s, lba48);
1491 s->req_nb_sectors = s->mult_sectors;
1492 ide_sector_read(s);
1493 return false;
1496 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1498 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1499 int n;
1501 if (!s->blk || !s->mult_sectors) {
1502 ide_abort_command(s);
1503 return true;
1506 ide_cmd_lba48_transform(s, lba48);
1508 s->req_nb_sectors = s->mult_sectors;
1509 n = MIN(s->nsector, s->req_nb_sectors);
1511 s->status = SEEK_STAT | READY_STAT;
1512 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1514 s->media_changed = 1;
1516 return false;
1519 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1521 bool lba48 = (cmd == WIN_READ_EXT);
1523 if (s->drive_kind == IDE_CD) {
1524 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1525 ide_abort_command(s);
1526 return true;
1529 if (!s->blk) {
1530 ide_abort_command(s);
1531 return true;
1534 ide_cmd_lba48_transform(s, lba48);
1535 s->req_nb_sectors = 1;
1536 ide_sector_read(s);
1538 return false;
1541 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1543 bool lba48 = (cmd == WIN_WRITE_EXT);
1545 if (!s->blk) {
1546 ide_abort_command(s);
1547 return true;
1550 ide_cmd_lba48_transform(s, lba48);
1552 s->req_nb_sectors = 1;
1553 s->status = SEEK_STAT | READY_STAT;
1554 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1556 s->media_changed = 1;
1558 return false;
1561 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1563 bool lba48 = (cmd == WIN_READDMA_EXT);
1565 if (!s->blk) {
1566 ide_abort_command(s);
1567 return true;
1570 ide_cmd_lba48_transform(s, lba48);
1571 ide_sector_start_dma(s, IDE_DMA_READ);
1573 return false;
1576 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1578 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1580 if (!s->blk) {
1581 ide_abort_command(s);
1582 return true;
1585 ide_cmd_lba48_transform(s, lba48);
1586 ide_sector_start_dma(s, IDE_DMA_WRITE);
1588 s->media_changed = 1;
1590 return false;
1593 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1595 ide_flush_cache(s);
1596 return false;
1599 static bool cmd_seek(IDEState *s, uint8_t cmd)
1601 /* XXX: Check that seek is within bounds */
1602 return true;
1605 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1607 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1609 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1610 if (s->nb_sectors == 0) {
1611 ide_abort_command(s);
1612 return true;
1615 ide_cmd_lba48_transform(s, lba48);
1616 ide_set_sector(s, s->nb_sectors - 1);
1618 return true;
1621 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1623 s->nsector = 0xff; /* device active or idle */
1624 return true;
1627 /* INITIALIZE DEVICE PARAMETERS */
1628 static bool cmd_specify(IDEState *s, uint8_t cmd)
1630 if (s->blk && s->drive_kind != IDE_CD) {
1631 s->heads = (s->select & (ATA_DEV_HS)) + 1;
1632 s->sectors = s->nsector;
1633 ide_bus_set_irq(s->bus);
1634 } else {
1635 ide_abort_command(s);
1638 return true;
1641 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1643 uint16_t *identify_data;
1645 if (!s->blk) {
1646 ide_abort_command(s);
1647 return true;
1650 /* XXX: valid for CDROM ? */
1651 switch (s->feature) {
1652 case 0x01: /* 8-bit I/O enable (CompactFlash) */
1653 case 0x81: /* 8-bit I/O disable (CompactFlash) */
1654 if (s->drive_kind != IDE_CFATA) {
1655 goto abort_cmd;
1657 s->io8 = !(s->feature & 0x80);
1658 return true;
1659 case 0x02: /* write cache enable */
1660 blk_set_enable_write_cache(s->blk, true);
1661 identify_data = (uint16_t *)s->identify_data;
1662 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1663 return true;
1664 case 0x82: /* write cache disable */
1665 blk_set_enable_write_cache(s->blk, false);
1666 identify_data = (uint16_t *)s->identify_data;
1667 put_le16(identify_data + 85, (1 << 14) | 1);
1668 ide_flush_cache(s);
1669 return false;
1670 case 0xcc: /* reverting to power-on defaults enable */
1671 s->reset_reverts = true;
1672 return true;
1673 case 0x66: /* reverting to power-on defaults disable */
1674 s->reset_reverts = false;
1675 return true;
1676 case 0xaa: /* read look-ahead enable */
1677 case 0x55: /* read look-ahead disable */
1678 case 0x05: /* set advanced power management mode */
1679 case 0x85: /* disable advanced power management mode */
1680 case 0x69: /* NOP */
1681 case 0x67: /* NOP */
1682 case 0x96: /* NOP */
1683 case 0x9a: /* NOP */
1684 case 0x42: /* enable Automatic Acoustic Mode */
1685 case 0xc2: /* disable Automatic Acoustic Mode */
1686 return true;
1687 case 0x03: /* set transfer mode */
1689 uint8_t val = s->nsector & 0x07;
1690 identify_data = (uint16_t *)s->identify_data;
1692 switch (s->nsector >> 3) {
1693 case 0x00: /* pio default */
1694 case 0x01: /* pio mode */
1695 put_le16(identify_data + 62, 0x07);
1696 put_le16(identify_data + 63, 0x07);
1697 put_le16(identify_data + 88, 0x3f);
1698 break;
1699 case 0x02: /* sigle word dma mode*/
1700 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1701 put_le16(identify_data + 63, 0x07);
1702 put_le16(identify_data + 88, 0x3f);
1703 break;
1704 case 0x04: /* mdma mode */
1705 put_le16(identify_data + 62, 0x07);
1706 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1707 put_le16(identify_data + 88, 0x3f);
1708 break;
1709 case 0x08: /* udma mode */
1710 put_le16(identify_data + 62, 0x07);
1711 put_le16(identify_data + 63, 0x07);
1712 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1713 break;
1714 default:
1715 goto abort_cmd;
1717 return true;
1721 abort_cmd:
1722 ide_abort_command(s);
1723 return true;
1727 /*** ATAPI commands ***/
1729 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1731 ide_atapi_identify(s);
1732 s->status = READY_STAT | SEEK_STAT;
1733 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1734 ide_bus_set_irq(s->bus);
1735 return false;
1738 /* EXECUTE DEVICE DIAGNOSTIC */
1739 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1742 * Clear the device register per the ATA (v6) specification,
1743 * because ide_set_signature does not clear LBA or drive bits.
1745 s->select = (ATA_DEV_ALWAYS_ON);
1746 ide_set_signature(s);
1748 if (s->drive_kind == IDE_CD) {
1749 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1750 * devices to return a clear status register
1751 * with READY_STAT *not* set. */
1752 s->error = 0x01;
1753 } else {
1754 s->status = READY_STAT | SEEK_STAT;
1755 /* The bits of the error register are not as usual for this command!
1756 * They are part of the regular output (this is why ERR_STAT isn't set)
1757 * Device 0 passed, Device 1 passed or not present. */
1758 s->error = 0x01;
1759 ide_bus_set_irq(s->bus);
1762 return false;
1765 static bool cmd_packet(IDEState *s, uint8_t cmd)
1767 /* overlapping commands not supported */
1768 if (s->feature & 0x02) {
1769 ide_abort_command(s);
1770 return true;
1773 s->status = READY_STAT | SEEK_STAT;
1774 s->atapi_dma = s->feature & 1;
1775 if (s->atapi_dma) {
1776 s->dma_cmd = IDE_DMA_ATAPI;
1778 s->nsector = 1;
1779 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1780 ide_atapi_cmd);
1781 return false;
1785 /*** CF-ATA commands ***/
1787 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1789 s->error = 0x09; /* miscellaneous error */
1790 s->status = READY_STAT | SEEK_STAT;
1791 ide_bus_set_irq(s->bus);
1793 return false;
1796 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1798 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1799 * required for Windows 8 to work with AHCI */
1801 if (cmd == CFA_WEAR_LEVEL) {
1802 s->nsector = 0;
1805 if (cmd == CFA_ERASE_SECTORS) {
1806 s->media_changed = 1;
1809 return true;
1812 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1814 s->status = READY_STAT | SEEK_STAT;
1816 memset(s->io_buffer, 0, 0x200);
1817 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1818 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1819 s->io_buffer[0x02] = s->select; /* Head */
1820 s->io_buffer[0x03] = s->sector; /* Sector */
1821 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1822 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1823 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1824 s->io_buffer[0x13] = 0x00; /* Erase flag */
1825 s->io_buffer[0x18] = 0x00; /* Hot count */
1826 s->io_buffer[0x19] = 0x00; /* Hot count */
1827 s->io_buffer[0x1a] = 0x01; /* Hot count */
1829 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1830 ide_bus_set_irq(s->bus);
1832 return false;
1835 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1837 switch (s->feature) {
1838 case 0x02: /* Inquiry Metadata Storage */
1839 ide_cfata_metadata_inquiry(s);
1840 break;
1841 case 0x03: /* Read Metadata Storage */
1842 ide_cfata_metadata_read(s);
1843 break;
1844 case 0x04: /* Write Metadata Storage */
1845 ide_cfata_metadata_write(s);
1846 break;
1847 default:
1848 ide_abort_command(s);
1849 return true;
1852 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1853 s->status = 0x00; /* NOTE: READY is _not_ set */
1854 ide_bus_set_irq(s->bus);
1856 return false;
1859 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1861 switch (s->feature) {
1862 case 0x01: /* sense temperature in device */
1863 s->nsector = 0x50; /* +20 C */
1864 break;
1865 default:
1866 ide_abort_command(s);
1867 return true;
1870 return true;
1874 /*** SMART commands ***/
1876 static bool cmd_smart(IDEState *s, uint8_t cmd)
1878 int n;
1880 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1881 goto abort_cmd;
1884 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1885 goto abort_cmd;
1888 switch (s->feature) {
1889 case SMART_DISABLE:
1890 s->smart_enabled = 0;
1891 return true;
1893 case SMART_ENABLE:
1894 s->smart_enabled = 1;
1895 return true;
1897 case SMART_ATTR_AUTOSAVE:
1898 switch (s->sector) {
1899 case 0x00:
1900 s->smart_autosave = 0;
1901 break;
1902 case 0xf1:
1903 s->smart_autosave = 1;
1904 break;
1905 default:
1906 goto abort_cmd;
1908 return true;
1910 case SMART_STATUS:
1911 if (!s->smart_errors) {
1912 s->hcyl = 0xc2;
1913 s->lcyl = 0x4f;
1914 } else {
1915 s->hcyl = 0x2c;
1916 s->lcyl = 0xf4;
1918 return true;
1920 case SMART_READ_THRESH:
1921 memset(s->io_buffer, 0, 0x200);
1922 s->io_buffer[0] = 0x01; /* smart struct version */
1924 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1925 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1926 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1929 /* checksum */
1930 for (n = 0; n < 511; n++) {
1931 s->io_buffer[511] += s->io_buffer[n];
1933 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1935 s->status = READY_STAT | SEEK_STAT;
1936 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1937 ide_bus_set_irq(s->bus);
1938 return false;
1940 case SMART_READ_DATA:
1941 memset(s->io_buffer, 0, 0x200);
1942 s->io_buffer[0] = 0x01; /* smart struct version */
1944 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1945 int i;
1946 for (i = 0; i < 11; i++) {
1947 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1951 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1952 if (s->smart_selftest_count == 0) {
1953 s->io_buffer[363] = 0;
1954 } else {
1955 s->io_buffer[363] =
1956 s->smart_selftest_data[3 +
1957 (s->smart_selftest_count - 1) *
1958 24];
1960 s->io_buffer[364] = 0x20;
1961 s->io_buffer[365] = 0x01;
1962 /* offline data collection capacity: execute + self-test*/
1963 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1964 s->io_buffer[368] = 0x03; /* smart capability (1) */
1965 s->io_buffer[369] = 0x00; /* smart capability (2) */
1966 s->io_buffer[370] = 0x01; /* error logging supported */
1967 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1968 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1969 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1971 for (n = 0; n < 511; n++) {
1972 s->io_buffer[511] += s->io_buffer[n];
1974 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1976 s->status = READY_STAT | SEEK_STAT;
1977 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1978 ide_bus_set_irq(s->bus);
1979 return false;
1981 case SMART_READ_LOG:
1982 switch (s->sector) {
1983 case 0x01: /* summary smart error log */
1984 memset(s->io_buffer, 0, 0x200);
1985 s->io_buffer[0] = 0x01;
1986 s->io_buffer[1] = 0x00; /* no error entries */
1987 s->io_buffer[452] = s->smart_errors & 0xff;
1988 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1990 for (n = 0; n < 511; n++) {
1991 s->io_buffer[511] += s->io_buffer[n];
1993 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1994 break;
1995 case 0x06: /* smart self test log */
1996 memset(s->io_buffer, 0, 0x200);
1997 s->io_buffer[0] = 0x01;
1998 if (s->smart_selftest_count == 0) {
1999 s->io_buffer[508] = 0;
2000 } else {
2001 s->io_buffer[508] = s->smart_selftest_count;
2002 for (n = 2; n < 506; n++) {
2003 s->io_buffer[n] = s->smart_selftest_data[n];
2007 for (n = 0; n < 511; n++) {
2008 s->io_buffer[511] += s->io_buffer[n];
2010 s->io_buffer[511] = 0x100 - s->io_buffer[511];
2011 break;
2012 default:
2013 goto abort_cmd;
2015 s->status = READY_STAT | SEEK_STAT;
2016 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2017 ide_bus_set_irq(s->bus);
2018 return false;
2020 case SMART_EXECUTE_OFFLINE:
2021 switch (s->sector) {
2022 case 0: /* off-line routine */
2023 case 1: /* short self test */
2024 case 2: /* extended self test */
2025 s->smart_selftest_count++;
2026 if (s->smart_selftest_count > 21) {
2027 s->smart_selftest_count = 1;
2029 n = 2 + (s->smart_selftest_count - 1) * 24;
2030 s->smart_selftest_data[n] = s->sector;
2031 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
2032 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
2033 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
2034 break;
2035 default:
2036 goto abort_cmd;
2038 return true;
2041 abort_cmd:
2042 ide_abort_command(s);
2043 return true;
2046 #define HD_OK (1u << IDE_HD)
2047 #define CD_OK (1u << IDE_CD)
2048 #define CFA_OK (1u << IDE_CFATA)
2049 #define HD_CFA_OK (HD_OK | CFA_OK)
2050 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2052 /* Set the Disk Seek Completed status bit during completion */
2053 #define SET_DSC (1u << 8)
2055 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2056 static const struct {
2057 /* Returns true if the completion code should be run */
2058 bool (*handler)(IDEState *s, uint8_t cmd);
2059 int flags;
2060 } ide_cmd_table[0x100] = {
2061 /* NOP not implemented, mandatory for CD */
2062 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
2063 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
2064 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
2065 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
2066 [WIN_READ] = { cmd_read_pio, ALL_OK },
2067 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
2068 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
2069 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
2070 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2071 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
2072 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
2073 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
2074 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
2075 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
2076 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
2077 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
2078 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
2079 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
2080 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
2081 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
2082 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
2083 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
2084 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
2085 [WIN_SPECIFY] = { cmd_specify, HD_CFA_OK | SET_DSC },
2086 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
2087 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
2088 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
2089 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
2090 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2091 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
2092 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
2093 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
2094 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
2095 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2096 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2097 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
2098 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
2099 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2100 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
2101 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
2102 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
2103 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
2104 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
2105 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
2106 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
2107 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
2108 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
2109 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2110 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
2111 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
2112 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
2113 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
2114 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
2115 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2116 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2117 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2120 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2122 return cmd < ARRAY_SIZE(ide_cmd_table)
2123 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2126 void ide_bus_exec_cmd(IDEBus *bus, uint32_t val)
2128 IDEState *s;
2129 bool complete;
2131 s = ide_bus_active_if(bus);
2132 trace_ide_bus_exec_cmd(bus, s, val);
2134 /* ignore commands to non existent slave */
2135 if (s != bus->ifs && !s->blk) {
2136 return;
2139 /* Only RESET is allowed while BSY and/or DRQ are set,
2140 * and only to ATAPI devices. */
2141 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2142 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2143 return;
2147 if (!ide_cmd_permitted(s, val)) {
2148 ide_abort_command(s);
2149 ide_bus_set_irq(s->bus);
2150 return;
2153 s->status = READY_STAT | BUSY_STAT;
2154 s->error = 0;
2155 s->io_buffer_offset = 0;
2157 complete = ide_cmd_table[val].handler(s, val);
2158 if (complete) {
2159 s->status &= ~BUSY_STAT;
2160 assert(!!s->error == !!(s->status & ERR_STAT));
2162 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2163 s->status |= SEEK_STAT;
2166 ide_cmd_done(s);
2167 ide_bus_set_irq(s->bus);
2171 /* IOport [R]ead [R]egisters */
2172 enum ATA_IOPORT_RR {
2173 ATA_IOPORT_RR_DATA = 0,
2174 ATA_IOPORT_RR_ERROR = 1,
2175 ATA_IOPORT_RR_SECTOR_COUNT = 2,
2176 ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2177 ATA_IOPORT_RR_CYLINDER_LOW = 4,
2178 ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2179 ATA_IOPORT_RR_DEVICE_HEAD = 6,
2180 ATA_IOPORT_RR_STATUS = 7,
2181 ATA_IOPORT_RR_NUM_REGISTERS,
2184 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2185 [ATA_IOPORT_RR_DATA] = "Data",
2186 [ATA_IOPORT_RR_ERROR] = "Error",
2187 [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2188 [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2189 [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2190 [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2191 [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2192 [ATA_IOPORT_RR_STATUS] = "Status"
2195 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2197 IDEBus *bus = opaque;
2198 IDEState *s = ide_bus_active_if(bus);
2199 uint32_t reg_num;
2200 int ret, hob;
2202 reg_num = addr & 7;
2203 hob = bus->cmd & (IDE_CTRL_HOB);
2204 switch (reg_num) {
2205 case ATA_IOPORT_RR_DATA:
2207 * The pre-GRUB Solaris x86 bootloader relies upon inb
2208 * consuming a word from the drive's sector buffer.
2210 ret = ide_data_readw(bus, addr) & 0xff;
2211 break;
2212 case ATA_IOPORT_RR_ERROR:
2213 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2214 (s != bus->ifs && !s->blk)) {
2215 ret = 0;
2216 } else if (!hob) {
2217 ret = s->error;
2218 } else {
2219 ret = s->hob_feature;
2221 break;
2222 case ATA_IOPORT_RR_SECTOR_COUNT:
2223 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2224 ret = 0;
2225 } else if (!hob) {
2226 ret = s->nsector & 0xff;
2227 } else {
2228 ret = s->hob_nsector;
2230 break;
2231 case ATA_IOPORT_RR_SECTOR_NUMBER:
2232 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2233 ret = 0;
2234 } else if (!hob) {
2235 ret = s->sector;
2236 } else {
2237 ret = s->hob_sector;
2239 break;
2240 case ATA_IOPORT_RR_CYLINDER_LOW:
2241 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2242 ret = 0;
2243 } else if (!hob) {
2244 ret = s->lcyl;
2245 } else {
2246 ret = s->hob_lcyl;
2248 break;
2249 case ATA_IOPORT_RR_CYLINDER_HIGH:
2250 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2251 ret = 0;
2252 } else if (!hob) {
2253 ret = s->hcyl;
2254 } else {
2255 ret = s->hob_hcyl;
2257 break;
2258 case ATA_IOPORT_RR_DEVICE_HEAD:
2259 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2260 ret = 0;
2261 } else {
2262 ret = s->select;
2264 break;
2265 default:
2266 case ATA_IOPORT_RR_STATUS:
2267 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2268 (s != bus->ifs && !s->blk)) {
2269 ret = 0;
2270 } else {
2271 ret = s->status;
2273 qemu_irq_lower(bus->irq);
2274 break;
2277 trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2278 return ret;
2281 uint32_t ide_status_read(void *opaque, uint32_t addr)
2283 IDEBus *bus = opaque;
2284 IDEState *s = ide_bus_active_if(bus);
2285 int ret;
2287 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2288 (s != bus->ifs && !s->blk)) {
2289 ret = 0;
2290 } else {
2291 ret = s->status;
2294 trace_ide_status_read(addr, ret, bus, s);
2295 return ret;
2298 static void ide_perform_srst(IDEState *s)
2300 s->status |= BUSY_STAT;
2302 /* Halt PIO (Via register state); PIO BH remains scheduled. */
2303 ide_transfer_halt(s);
2305 /* Cancel DMA -- may drain block device and invoke callbacks */
2306 ide_cancel_dma_sync(s);
2308 /* Cancel PIO callback, reset registers/signature, etc */
2309 ide_reset(s);
2311 /* perform diagnostic */
2312 cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2315 static void ide_bus_perform_srst(void *opaque)
2317 IDEBus *bus = opaque;
2318 IDEState *s;
2319 int i;
2321 for (i = 0; i < 2; i++) {
2322 s = &bus->ifs[i];
2323 ide_perform_srst(s);
2326 bus->cmd &= ~IDE_CTRL_RESET;
2329 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2331 IDEBus *bus = opaque;
2332 IDEState *s;
2333 int i;
2335 trace_ide_ctrl_write(addr, val, bus);
2337 /* Device0 and Device1 each have their own control register,
2338 * but QEMU models it as just one register in the controller. */
2339 if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2340 for (i = 0; i < 2; i++) {
2341 s = &bus->ifs[i];
2342 s->status |= BUSY_STAT;
2344 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2345 ide_bus_perform_srst, bus);
2348 bus->cmd = val;
2352 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2353 * transferred from the device to the guest), false if it's a PIO in
2355 static bool ide_is_pio_out(IDEState *s)
2357 if (s->end_transfer_func == ide_sector_write ||
2358 s->end_transfer_func == ide_atapi_cmd) {
2359 return false;
2360 } else if (s->end_transfer_func == ide_sector_read ||
2361 s->end_transfer_func == ide_transfer_stop ||
2362 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2363 s->end_transfer_func == ide_dummy_transfer_stop) {
2364 return true;
2367 abort();
2370 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2372 IDEBus *bus = opaque;
2373 IDEState *s = ide_bus_active_if(bus);
2374 uint8_t *p;
2376 trace_ide_data_writew(addr, val, bus, s);
2378 /* PIO data access allowed only when DRQ bit is set. The result of a write
2379 * during PIO out is indeterminate, just ignore it. */
2380 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2381 return;
2384 p = s->data_ptr;
2385 if (s->io8) {
2386 if (p + 1 > s->data_end) {
2387 return;
2390 *p++ = val;
2391 } else {
2392 if (p + 2 > s->data_end) {
2393 return;
2396 *(uint16_t *)p = le16_to_cpu(val);
2397 p += 2;
2399 s->data_ptr = p;
2400 if (p >= s->data_end) {
2401 s->status &= ~DRQ_STAT;
2402 s->end_transfer_func(s);
2406 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2408 IDEBus *bus = opaque;
2409 IDEState *s = ide_bus_active_if(bus);
2410 uint8_t *p;
2411 int ret;
2413 /* PIO data access allowed only when DRQ bit is set. The result of a read
2414 * during PIO in is indeterminate, return 0 and don't move forward. */
2415 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2416 return 0;
2419 p = s->data_ptr;
2420 if (s->io8) {
2421 if (p + 1 > s->data_end) {
2422 return 0;
2425 ret = *p++;
2426 } else {
2427 if (p + 2 > s->data_end) {
2428 return 0;
2431 ret = cpu_to_le16(*(uint16_t *)p);
2432 p += 2;
2434 s->data_ptr = p;
2435 if (p >= s->data_end) {
2436 s->status &= ~DRQ_STAT;
2437 s->end_transfer_func(s);
2440 trace_ide_data_readw(addr, ret, bus, s);
2441 return ret;
2444 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2446 IDEBus *bus = opaque;
2447 IDEState *s = ide_bus_active_if(bus);
2448 uint8_t *p;
2450 trace_ide_data_writel(addr, val, bus, s);
2452 /* PIO data access allowed only when DRQ bit is set. The result of a write
2453 * during PIO out is indeterminate, just ignore it. */
2454 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2455 return;
2458 p = s->data_ptr;
2459 if (p + 4 > s->data_end) {
2460 return;
2463 *(uint32_t *)p = le32_to_cpu(val);
2464 p += 4;
2465 s->data_ptr = p;
2466 if (p >= s->data_end) {
2467 s->status &= ~DRQ_STAT;
2468 s->end_transfer_func(s);
2472 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2474 IDEBus *bus = opaque;
2475 IDEState *s = ide_bus_active_if(bus);
2476 uint8_t *p;
2477 int ret;
2479 /* PIO data access allowed only when DRQ bit is set. The result of a read
2480 * during PIO in is indeterminate, return 0 and don't move forward. */
2481 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2482 ret = 0;
2483 goto out;
2486 p = s->data_ptr;
2487 if (p + 4 > s->data_end) {
2488 return 0;
2491 ret = cpu_to_le32(*(uint32_t *)p);
2492 p += 4;
2493 s->data_ptr = p;
2494 if (p >= s->data_end) {
2495 s->status &= ~DRQ_STAT;
2496 s->end_transfer_func(s);
2499 out:
2500 trace_ide_data_readl(addr, ret, bus, s);
2501 return ret;
2504 static void ide_dummy_transfer_stop(IDEState *s)
2506 s->data_ptr = s->io_buffer;
2507 s->data_end = s->io_buffer;
2508 s->io_buffer[0] = 0xff;
2509 s->io_buffer[1] = 0xff;
2510 s->io_buffer[2] = 0xff;
2511 s->io_buffer[3] = 0xff;
2514 void ide_bus_reset(IDEBus *bus)
2516 bus->unit = 0;
2517 bus->cmd = 0;
2518 ide_reset(&bus->ifs[0]);
2519 ide_reset(&bus->ifs[1]);
2520 ide_clear_hob(bus);
2522 /* pending async DMA */
2523 if (bus->dma->aiocb) {
2524 trace_ide_bus_reset_aio();
2525 blk_aio_cancel(bus->dma->aiocb);
2526 bus->dma->aiocb = NULL;
2529 /* reset dma provider too */
2530 if (bus->dma->ops->reset) {
2531 bus->dma->ops->reset(bus->dma);
2535 static bool ide_cd_is_tray_open(void *opaque)
2537 return ((IDEState *)opaque)->tray_open;
2540 static bool ide_cd_is_medium_locked(void *opaque)
2542 return ((IDEState *)opaque)->tray_locked;
2545 static void ide_resize_cb(void *opaque)
2547 IDEState *s = opaque;
2548 uint64_t nb_sectors;
2550 if (!s->identify_set) {
2551 return;
2554 blk_get_geometry(s->blk, &nb_sectors);
2555 s->nb_sectors = nb_sectors;
2557 /* Update the identify data buffer. */
2558 if (s->drive_kind == IDE_CFATA) {
2559 ide_cfata_identify_size(s);
2560 } else {
2561 /* IDE_CD uses a different set of callbacks entirely. */
2562 assert(s->drive_kind != IDE_CD);
2563 ide_identify_size(s);
2567 static const BlockDevOps ide_cd_block_ops = {
2568 .change_media_cb = ide_cd_change_cb,
2569 .eject_request_cb = ide_cd_eject_request_cb,
2570 .is_tray_open = ide_cd_is_tray_open,
2571 .is_medium_locked = ide_cd_is_medium_locked,
2574 static const BlockDevOps ide_hd_block_ops = {
2575 .resize_cb = ide_resize_cb,
2578 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2579 const char *version, const char *serial, const char *model,
2580 uint64_t wwn,
2581 uint32_t cylinders, uint32_t heads, uint32_t secs,
2582 int chs_trans, Error **errp)
2584 uint64_t nb_sectors;
2586 s->blk = blk;
2587 s->drive_kind = kind;
2589 blk_get_geometry(blk, &nb_sectors);
2590 s->cylinders = cylinders;
2591 s->heads = s->drive_heads = heads;
2592 s->sectors = s->drive_sectors = secs;
2593 s->chs_trans = chs_trans;
2594 s->nb_sectors = nb_sectors;
2595 s->wwn = wwn;
2596 /* The SMART values should be preserved across power cycles
2597 but they aren't. */
2598 s->smart_enabled = 1;
2599 s->smart_autosave = 1;
2600 s->smart_errors = 0;
2601 s->smart_selftest_count = 0;
2602 if (kind == IDE_CD) {
2603 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2604 } else {
2605 if (!blk_is_inserted(s->blk)) {
2606 error_setg(errp, "Device needs media, but drive is empty");
2607 return -1;
2609 if (!blk_is_writable(blk)) {
2610 error_setg(errp, "Can't use a read-only drive");
2611 return -1;
2613 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2615 if (serial) {
2616 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2617 } else {
2618 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2619 "QM%05d", s->drive_serial);
2621 if (model) {
2622 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2623 } else {
2624 switch (kind) {
2625 case IDE_CD:
2626 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2627 break;
2628 case IDE_CFATA:
2629 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2630 break;
2631 default:
2632 strcpy(s->drive_model_str, "QEMU HARDDISK");
2633 break;
2637 if (version) {
2638 pstrcpy(s->version, sizeof(s->version), version);
2639 } else {
2640 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2643 ide_reset(s);
2644 blk_iostatus_enable(blk);
2645 return 0;
2648 static void ide_init1(IDEBus *bus, int unit)
2650 static int drive_serial = 1;
2651 IDEState *s = &bus->ifs[unit];
2653 s->bus = bus;
2654 s->unit = unit;
2655 s->drive_serial = drive_serial++;
2656 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2657 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2658 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2659 memset(s->io_buffer, 0, s->io_buffer_total_len);
2661 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2662 memset(s->smart_selftest_data, 0, 512);
2664 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2665 ide_sector_write_timer_cb, s);
2668 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2670 return 0;
2673 static void ide_nop(const IDEDMA *dma)
2677 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2679 return 0;
2682 static const IDEDMAOps ide_dma_nop_ops = {
2683 .prepare_buf = ide_nop_int32,
2684 .restart_dma = ide_nop,
2685 .rw_buf = ide_nop_int,
2688 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2690 s->unit = s->bus->retry_unit;
2691 ide_set_sector(s, s->bus->retry_sector_num);
2692 s->nsector = s->bus->retry_nsector;
2693 s->bus->dma->ops->restart_dma(s->bus->dma);
2694 s->io_buffer_size = 0;
2695 s->dma_cmd = dma_cmd;
2696 ide_start_dma(s, ide_dma_cb);
2699 static void ide_restart_bh(void *opaque)
2701 IDEBus *bus = opaque;
2702 IDEState *s;
2703 bool is_read;
2704 int error_status;
2706 qemu_bh_delete(bus->bh);
2707 bus->bh = NULL;
2709 error_status = bus->error_status;
2710 if (bus->error_status == 0) {
2711 return;
2714 s = ide_bus_active_if(bus);
2715 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2717 /* The error status must be cleared before resubmitting the request: The
2718 * request may fail again, and this case can only be distinguished if the
2719 * called function can set a new error status. */
2720 bus->error_status = 0;
2722 /* The HBA has generically asked to be kicked on retry */
2723 if (error_status & IDE_RETRY_HBA) {
2724 if (s->bus->dma->ops->restart) {
2725 s->bus->dma->ops->restart(s->bus->dma);
2727 } else if (IS_IDE_RETRY_DMA(error_status)) {
2728 if (error_status & IDE_RETRY_TRIM) {
2729 ide_restart_dma(s, IDE_DMA_TRIM);
2730 } else {
2731 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2733 } else if (IS_IDE_RETRY_PIO(error_status)) {
2734 if (is_read) {
2735 ide_sector_read(s);
2736 } else {
2737 ide_sector_write(s);
2739 } else if (error_status & IDE_RETRY_FLUSH) {
2740 ide_flush_cache(s);
2741 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2742 assert(s->end_transfer_func == ide_atapi_cmd);
2743 ide_atapi_dma_restart(s);
2744 } else {
2745 abort();
2749 static void ide_restart_cb(void *opaque, bool running, RunState state)
2751 IDEBus *bus = opaque;
2753 if (!running)
2754 return;
2756 if (!bus->bh) {
2757 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2758 qemu_bh_schedule(bus->bh);
2762 void ide_bus_register_restart_cb(IDEBus *bus)
2764 if (bus->dma->ops->restart_dma) {
2765 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2769 static IDEDMA ide_dma_nop = {
2770 .ops = &ide_dma_nop_ops,
2771 .aiocb = NULL,
2774 void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out)
2776 int i;
2778 for(i = 0; i < 2; i++) {
2779 ide_init1(bus, i);
2780 ide_reset(&bus->ifs[i]);
2782 bus->irq = irq_out;
2783 bus->dma = &ide_dma_nop;
2786 void ide_bus_set_irq(IDEBus *bus)
2788 if (!(bus->cmd & IDE_CTRL_DISABLE_IRQ)) {
2789 qemu_irq_raise(bus->irq);
2793 void ide_exit(IDEState *s)
2795 timer_free(s->sector_write_timer);
2796 qemu_vfree(s->smart_selftest_data);
2797 qemu_vfree(s->io_buffer);
2800 static bool is_identify_set(void *opaque, int version_id)
2802 IDEState *s = opaque;
2804 return s->identify_set != 0;
2807 static EndTransferFunc* transfer_end_table[] = {
2808 ide_sector_read,
2809 ide_sector_write,
2810 ide_transfer_stop,
2811 ide_atapi_cmd_reply_end,
2812 ide_atapi_cmd,
2813 ide_dummy_transfer_stop,
2816 static int transfer_end_table_idx(EndTransferFunc *fn)
2818 int i;
2820 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2821 if (transfer_end_table[i] == fn)
2822 return i;
2824 return -1;
2827 static int ide_drive_post_load(void *opaque, int version_id)
2829 IDEState *s = opaque;
2831 if (s->blk && s->identify_set) {
2832 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2834 return 0;
2837 static int ide_drive_pio_post_load(void *opaque, int version_id)
2839 IDEState *s = opaque;
2841 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2842 return -EINVAL;
2844 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2845 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2846 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2847 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2849 return 0;
2852 static int ide_drive_pio_pre_save(void *opaque)
2854 IDEState *s = opaque;
2855 int idx;
2857 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2858 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2860 idx = transfer_end_table_idx(s->end_transfer_func);
2861 if (idx == -1) {
2862 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2863 __func__);
2864 s->end_transfer_fn_idx = 2;
2865 } else {
2866 s->end_transfer_fn_idx = idx;
2869 return 0;
2872 static bool ide_drive_pio_state_needed(void *opaque)
2874 IDEState *s = opaque;
2876 return ((s->status & DRQ_STAT) != 0)
2877 || (s->bus->error_status & IDE_RETRY_PIO);
2880 static bool ide_tray_state_needed(void *opaque)
2882 IDEState *s = opaque;
2884 return s->tray_open || s->tray_locked;
2887 static bool ide_atapi_gesn_needed(void *opaque)
2889 IDEState *s = opaque;
2891 return s->events.new_media || s->events.eject_request;
2894 static bool ide_error_needed(void *opaque)
2896 IDEBus *bus = opaque;
2898 return (bus->error_status != 0);
2901 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2902 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2903 .name ="ide_drive/atapi/gesn_state",
2904 .version_id = 1,
2905 .minimum_version_id = 1,
2906 .needed = ide_atapi_gesn_needed,
2907 .fields = (VMStateField[]) {
2908 VMSTATE_BOOL(events.new_media, IDEState),
2909 VMSTATE_BOOL(events.eject_request, IDEState),
2910 VMSTATE_END_OF_LIST()
2914 static const VMStateDescription vmstate_ide_tray_state = {
2915 .name = "ide_drive/tray_state",
2916 .version_id = 1,
2917 .minimum_version_id = 1,
2918 .needed = ide_tray_state_needed,
2919 .fields = (VMStateField[]) {
2920 VMSTATE_BOOL(tray_open, IDEState),
2921 VMSTATE_BOOL(tray_locked, IDEState),
2922 VMSTATE_END_OF_LIST()
2926 static const VMStateDescription vmstate_ide_drive_pio_state = {
2927 .name = "ide_drive/pio_state",
2928 .version_id = 1,
2929 .minimum_version_id = 1,
2930 .pre_save = ide_drive_pio_pre_save,
2931 .post_load = ide_drive_pio_post_load,
2932 .needed = ide_drive_pio_state_needed,
2933 .fields = (VMStateField[]) {
2934 VMSTATE_INT32(req_nb_sectors, IDEState),
2935 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2936 vmstate_info_uint8, uint8_t),
2937 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2938 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2939 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2940 VMSTATE_INT32(elementary_transfer_size, IDEState),
2941 VMSTATE_INT32(packet_transfer_size, IDEState),
2942 VMSTATE_END_OF_LIST()
2946 const VMStateDescription vmstate_ide_drive = {
2947 .name = "ide_drive",
2948 .version_id = 3,
2949 .minimum_version_id = 0,
2950 .post_load = ide_drive_post_load,
2951 .fields = (VMStateField[]) {
2952 VMSTATE_INT32(mult_sectors, IDEState),
2953 VMSTATE_INT32(identify_set, IDEState),
2954 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2955 VMSTATE_UINT8(feature, IDEState),
2956 VMSTATE_UINT8(error, IDEState),
2957 VMSTATE_UINT32(nsector, IDEState),
2958 VMSTATE_UINT8(sector, IDEState),
2959 VMSTATE_UINT8(lcyl, IDEState),
2960 VMSTATE_UINT8(hcyl, IDEState),
2961 VMSTATE_UINT8(hob_feature, IDEState),
2962 VMSTATE_UINT8(hob_sector, IDEState),
2963 VMSTATE_UINT8(hob_nsector, IDEState),
2964 VMSTATE_UINT8(hob_lcyl, IDEState),
2965 VMSTATE_UINT8(hob_hcyl, IDEState),
2966 VMSTATE_UINT8(select, IDEState),
2967 VMSTATE_UINT8(status, IDEState),
2968 VMSTATE_UINT8(lba48, IDEState),
2969 VMSTATE_UINT8(sense_key, IDEState),
2970 VMSTATE_UINT8(asc, IDEState),
2971 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2972 VMSTATE_END_OF_LIST()
2974 .subsections = (const VMStateDescription*[]) {
2975 &vmstate_ide_drive_pio_state,
2976 &vmstate_ide_tray_state,
2977 &vmstate_ide_atapi_gesn_state,
2978 NULL
2982 static const VMStateDescription vmstate_ide_error_status = {
2983 .name ="ide_bus/error",
2984 .version_id = 2,
2985 .minimum_version_id = 1,
2986 .needed = ide_error_needed,
2987 .fields = (VMStateField[]) {
2988 VMSTATE_INT32(error_status, IDEBus),
2989 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2990 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2991 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2992 VMSTATE_END_OF_LIST()
2996 const VMStateDescription vmstate_ide_bus = {
2997 .name = "ide_bus",
2998 .version_id = 1,
2999 .minimum_version_id = 1,
3000 .fields = (VMStateField[]) {
3001 VMSTATE_UINT8(cmd, IDEBus),
3002 VMSTATE_UINT8(unit, IDEBus),
3003 VMSTATE_END_OF_LIST()
3005 .subsections = (const VMStateDescription*[]) {
3006 &vmstate_ide_error_status,
3007 NULL
3011 void ide_drive_get(DriveInfo **hd, int n)
3013 int i;
3015 for (i = 0; i < n; i++) {
3016 hd[i] = drive_get_by_index(IF_IDE, i);