ide: fix ATAPI command permissions
[qemu/ar7.git] / hw / ide / core.c
blob1cc6945d80a8a8fbb2cc50657e57733083c13b66
1 /*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include <hw/hw.h>
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/isa/isa.h>
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/dma.h"
33 #include "hw/block/block.h"
34 #include "sysemu/block-backend.h"
36 #include <hw/ide/internal.h>
38 /* These values were based on a Seagate ST3500418AS but have been modified
39 to make more sense in QEMU */
40 static const int smart_attributes[][12] = {
41 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
42 /* raw read error rate*/
43 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
44 /* spin up */
45 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
46 /* start stop count */
47 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
48 /* remapped sectors */
49 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
50 /* power on hours */
51 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52 /* power cycle count */
53 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* airflow-temperature-celsius */
55 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 static int ide_handle_rw_error(IDEState *s, int error, int op);
59 static void ide_dummy_transfer_stop(IDEState *s);
61 static void padstr(char *str, const char *src, int len)
63 int i, v;
64 for(i = 0; i < len; i++) {
65 if (*src)
66 v = *src++;
67 else
68 v = ' ';
69 str[i^1] = v;
73 static void put_le16(uint16_t *p, unsigned int v)
75 *p = cpu_to_le16(v);
78 static void ide_identify_size(IDEState *s)
80 uint16_t *p = (uint16_t *)s->identify_data;
81 put_le16(p + 60, s->nb_sectors);
82 put_le16(p + 61, s->nb_sectors >> 16);
83 put_le16(p + 100, s->nb_sectors);
84 put_le16(p + 101, s->nb_sectors >> 16);
85 put_le16(p + 102, s->nb_sectors >> 32);
86 put_le16(p + 103, s->nb_sectors >> 48);
89 static void ide_identify(IDEState *s)
91 uint16_t *p;
92 unsigned int oldsize;
93 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
95 p = (uint16_t *)s->identify_data;
96 if (s->identify_set) {
97 goto fill_buffer;
99 memset(p, 0, sizeof(s->identify_data));
101 put_le16(p + 0, 0x0040);
102 put_le16(p + 1, s->cylinders);
103 put_le16(p + 3, s->heads);
104 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
105 put_le16(p + 5, 512); /* XXX: retired, remove ? */
106 put_le16(p + 6, s->sectors);
107 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
108 put_le16(p + 20, 3); /* XXX: retired, remove ? */
109 put_le16(p + 21, 512); /* cache size in sectors */
110 put_le16(p + 22, 4); /* ecc bytes */
111 padstr((char *)(p + 23), s->version, 8); /* firmware version */
112 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
113 #if MAX_MULT_SECTORS > 1
114 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
115 #endif
116 put_le16(p + 48, 1); /* dword I/O */
117 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
118 put_le16(p + 51, 0x200); /* PIO transfer cycle */
119 put_le16(p + 52, 0x200); /* DMA transfer cycle */
120 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
121 put_le16(p + 54, s->cylinders);
122 put_le16(p + 55, s->heads);
123 put_le16(p + 56, s->sectors);
124 oldsize = s->cylinders * s->heads * s->sectors;
125 put_le16(p + 57, oldsize);
126 put_le16(p + 58, oldsize >> 16);
127 if (s->mult_sectors)
128 put_le16(p + 59, 0x100 | s->mult_sectors);
129 /* *(p + 60) := nb_sectors -- see ide_identify_size */
130 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
131 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
132 put_le16(p + 63, 0x07); /* mdma0-2 supported */
133 put_le16(p + 64, 0x03); /* pio3-4 supported */
134 put_le16(p + 65, 120);
135 put_le16(p + 66, 120);
136 put_le16(p + 67, 120);
137 put_le16(p + 68, 120);
138 if (dev && dev->conf.discard_granularity) {
139 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
142 if (s->ncq_queues) {
143 put_le16(p + 75, s->ncq_queues - 1);
144 /* NCQ supported */
145 put_le16(p + 76, (1 << 8));
148 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
149 put_le16(p + 81, 0x16); /* conforms to ata5 */
150 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
151 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
152 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
153 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
154 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
155 if (s->wwn) {
156 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
157 } else {
158 put_le16(p + 84, (1 << 14) | 0);
160 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
161 if (blk_enable_write_cache(s->blk)) {
162 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
163 } else {
164 put_le16(p + 85, (1 << 14) | 1);
166 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
167 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
168 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
169 if (s->wwn) {
170 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
171 } else {
172 put_le16(p + 87, (1 << 14) | 0);
174 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
175 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
176 /* *(p + 100) := nb_sectors -- see ide_identify_size */
177 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
178 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
179 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
181 if (dev && dev->conf.physical_block_size)
182 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
183 if (s->wwn) {
184 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
185 put_le16(p + 108, s->wwn >> 48);
186 put_le16(p + 109, s->wwn >> 32);
187 put_le16(p + 110, s->wwn >> 16);
188 put_le16(p + 111, s->wwn);
190 if (dev && dev->conf.discard_granularity) {
191 put_le16(p + 169, 1); /* TRIM support */
194 ide_identify_size(s);
195 s->identify_set = 1;
197 fill_buffer:
198 memcpy(s->io_buffer, p, sizeof(s->identify_data));
201 static void ide_atapi_identify(IDEState *s)
203 uint16_t *p;
205 p = (uint16_t *)s->identify_data;
206 if (s->identify_set) {
207 goto fill_buffer;
209 memset(p, 0, sizeof(s->identify_data));
211 /* Removable CDROM, 50us response, 12 byte packets */
212 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
213 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
214 put_le16(p + 20, 3); /* buffer type */
215 put_le16(p + 21, 512); /* cache size in sectors */
216 put_le16(p + 22, 4); /* ecc bytes */
217 padstr((char *)(p + 23), s->version, 8); /* firmware version */
218 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
219 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
220 #ifdef USE_DMA_CDROM
221 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
222 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
223 put_le16(p + 62, 7); /* single word dma0-2 supported */
224 put_le16(p + 63, 7); /* mdma0-2 supported */
225 #else
226 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
227 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
228 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
229 #endif
230 put_le16(p + 64, 3); /* pio3-4 supported */
231 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
232 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
233 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
234 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
236 put_le16(p + 71, 30); /* in ns */
237 put_le16(p + 72, 30); /* in ns */
239 if (s->ncq_queues) {
240 put_le16(p + 75, s->ncq_queues - 1);
241 /* NCQ supported */
242 put_le16(p + 76, (1 << 8));
245 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
246 if (s->wwn) {
247 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
248 put_le16(p + 87, (1 << 8)); /* WWN enabled */
251 #ifdef USE_DMA_CDROM
252 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
253 #endif
255 if (s->wwn) {
256 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
257 put_le16(p + 108, s->wwn >> 48);
258 put_le16(p + 109, s->wwn >> 32);
259 put_le16(p + 110, s->wwn >> 16);
260 put_le16(p + 111, s->wwn);
263 s->identify_set = 1;
265 fill_buffer:
266 memcpy(s->io_buffer, p, sizeof(s->identify_data));
269 static void ide_cfata_identify_size(IDEState *s)
271 uint16_t *p = (uint16_t *)s->identify_data;
272 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
273 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
274 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
275 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
278 static void ide_cfata_identify(IDEState *s)
280 uint16_t *p;
281 uint32_t cur_sec;
283 p = (uint16_t *)s->identify_data;
284 if (s->identify_set) {
285 goto fill_buffer;
287 memset(p, 0, sizeof(s->identify_data));
289 cur_sec = s->cylinders * s->heads * s->sectors;
291 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
292 put_le16(p + 1, s->cylinders); /* Default cylinders */
293 put_le16(p + 3, s->heads); /* Default heads */
294 put_le16(p + 6, s->sectors); /* Default sectors per track */
295 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
296 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
297 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
298 put_le16(p + 22, 0x0004); /* ECC bytes */
299 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
300 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
301 #if MAX_MULT_SECTORS > 1
302 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
303 #else
304 put_le16(p + 47, 0x0000);
305 #endif
306 put_le16(p + 49, 0x0f00); /* Capabilities */
307 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
308 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
309 put_le16(p + 53, 0x0003); /* Translation params valid */
310 put_le16(p + 54, s->cylinders); /* Current cylinders */
311 put_le16(p + 55, s->heads); /* Current heads */
312 put_le16(p + 56, s->sectors); /* Current sectors */
313 put_le16(p + 57, cur_sec); /* Current capacity */
314 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
315 if (s->mult_sectors) /* Multiple sector setting */
316 put_le16(p + 59, 0x100 | s->mult_sectors);
317 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
318 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
319 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
320 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
321 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
322 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
323 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
324 put_le16(p + 82, 0x400c); /* Command Set supported */
325 put_le16(p + 83, 0x7068); /* Command Set supported */
326 put_le16(p + 84, 0x4000); /* Features supported */
327 put_le16(p + 85, 0x000c); /* Command Set enabled */
328 put_le16(p + 86, 0x7044); /* Command Set enabled */
329 put_le16(p + 87, 0x4000); /* Features enabled */
330 put_le16(p + 91, 0x4060); /* Current APM level */
331 put_le16(p + 129, 0x0002); /* Current features option */
332 put_le16(p + 130, 0x0005); /* Reassigned sectors */
333 put_le16(p + 131, 0x0001); /* Initial power mode */
334 put_le16(p + 132, 0x0000); /* User signature */
335 put_le16(p + 160, 0x8100); /* Power requirement */
336 put_le16(p + 161, 0x8001); /* CF command set */
338 ide_cfata_identify_size(s);
339 s->identify_set = 1;
341 fill_buffer:
342 memcpy(s->io_buffer, p, sizeof(s->identify_data));
345 static void ide_set_signature(IDEState *s)
347 s->select &= 0xf0; /* clear head */
348 /* put signature */
349 s->nsector = 1;
350 s->sector = 1;
351 if (s->drive_kind == IDE_CD) {
352 s->lcyl = 0x14;
353 s->hcyl = 0xeb;
354 } else if (s->blk) {
355 s->lcyl = 0;
356 s->hcyl = 0;
357 } else {
358 s->lcyl = 0xff;
359 s->hcyl = 0xff;
363 typedef struct TrimAIOCB {
364 BlockAIOCB common;
365 BlockBackend *blk;
366 QEMUBH *bh;
367 int ret;
368 QEMUIOVector *qiov;
369 BlockAIOCB *aiocb;
370 int i, j;
371 } TrimAIOCB;
373 static void trim_aio_cancel(BlockAIOCB *acb)
375 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
377 /* Exit the loop so ide_issue_trim_cb will not continue */
378 iocb->j = iocb->qiov->niov - 1;
379 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
381 iocb->ret = -ECANCELED;
383 if (iocb->aiocb) {
384 blk_aio_cancel_async(iocb->aiocb);
385 iocb->aiocb = NULL;
389 static const AIOCBInfo trim_aiocb_info = {
390 .aiocb_size = sizeof(TrimAIOCB),
391 .cancel_async = trim_aio_cancel,
394 static void ide_trim_bh_cb(void *opaque)
396 TrimAIOCB *iocb = opaque;
398 iocb->common.cb(iocb->common.opaque, iocb->ret);
400 qemu_bh_delete(iocb->bh);
401 iocb->bh = NULL;
402 qemu_aio_unref(iocb);
405 static void ide_issue_trim_cb(void *opaque, int ret)
407 TrimAIOCB *iocb = opaque;
408 if (ret >= 0) {
409 while (iocb->j < iocb->qiov->niov) {
410 int j = iocb->j;
411 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
412 int i = iocb->i;
413 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
415 /* 6-byte LBA + 2-byte range per entry */
416 uint64_t entry = le64_to_cpu(buffer[i]);
417 uint64_t sector = entry & 0x0000ffffffffffffULL;
418 uint16_t count = entry >> 48;
420 if (count == 0) {
421 continue;
424 /* Got an entry! Submit and exit. */
425 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
426 ide_issue_trim_cb, opaque);
427 return;
430 iocb->j++;
431 iocb->i = -1;
433 } else {
434 iocb->ret = ret;
437 iocb->aiocb = NULL;
438 if (iocb->bh) {
439 qemu_bh_schedule(iocb->bh);
443 BlockAIOCB *ide_issue_trim(BlockBackend *blk,
444 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
445 BlockCompletionFunc *cb, void *opaque)
447 TrimAIOCB *iocb;
449 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
450 iocb->blk = blk;
451 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
452 iocb->ret = 0;
453 iocb->qiov = qiov;
454 iocb->i = -1;
455 iocb->j = 0;
456 ide_issue_trim_cb(iocb, 0);
457 return &iocb->common;
460 static inline void ide_abort_command(IDEState *s)
462 ide_transfer_stop(s);
463 s->status = READY_STAT | ERR_STAT;
464 s->error = ABRT_ERR;
467 /* prepare data transfer and tell what to do after */
468 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
469 EndTransferFunc *end_transfer_func)
471 s->end_transfer_func = end_transfer_func;
472 s->data_ptr = buf;
473 s->data_end = buf + size;
474 if (!(s->status & ERR_STAT)) {
475 s->status |= DRQ_STAT;
477 if (s->bus->dma->ops->start_transfer) {
478 s->bus->dma->ops->start_transfer(s->bus->dma);
482 static void ide_cmd_done(IDEState *s)
484 if (s->bus->dma->ops->cmd_done) {
485 s->bus->dma->ops->cmd_done(s->bus->dma);
489 void ide_transfer_stop(IDEState *s)
491 s->end_transfer_func = ide_transfer_stop;
492 s->data_ptr = s->io_buffer;
493 s->data_end = s->io_buffer;
494 s->status &= ~DRQ_STAT;
495 ide_cmd_done(s);
498 int64_t ide_get_sector(IDEState *s)
500 int64_t sector_num;
501 if (s->select & 0x40) {
502 /* lba */
503 if (!s->lba48) {
504 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
505 (s->lcyl << 8) | s->sector;
506 } else {
507 sector_num = ((int64_t)s->hob_hcyl << 40) |
508 ((int64_t) s->hob_lcyl << 32) |
509 ((int64_t) s->hob_sector << 24) |
510 ((int64_t) s->hcyl << 16) |
511 ((int64_t) s->lcyl << 8) | s->sector;
513 } else {
514 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
515 (s->select & 0x0f) * s->sectors + (s->sector - 1);
517 return sector_num;
520 void ide_set_sector(IDEState *s, int64_t sector_num)
522 unsigned int cyl, r;
523 if (s->select & 0x40) {
524 if (!s->lba48) {
525 s->select = (s->select & 0xf0) | (sector_num >> 24);
526 s->hcyl = (sector_num >> 16);
527 s->lcyl = (sector_num >> 8);
528 s->sector = (sector_num);
529 } else {
530 s->sector = sector_num;
531 s->lcyl = sector_num >> 8;
532 s->hcyl = sector_num >> 16;
533 s->hob_sector = sector_num >> 24;
534 s->hob_lcyl = sector_num >> 32;
535 s->hob_hcyl = sector_num >> 40;
537 } else {
538 cyl = sector_num / (s->heads * s->sectors);
539 r = sector_num % (s->heads * s->sectors);
540 s->hcyl = cyl >> 8;
541 s->lcyl = cyl;
542 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
543 s->sector = (r % s->sectors) + 1;
547 static void ide_rw_error(IDEState *s) {
548 ide_abort_command(s);
549 ide_set_irq(s->bus);
552 static bool ide_sect_range_ok(IDEState *s,
553 uint64_t sector, uint64_t nb_sectors)
555 uint64_t total_sectors;
557 blk_get_geometry(s->blk, &total_sectors);
558 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
559 return false;
561 return true;
564 static void ide_sector_read(IDEState *s);
566 static void ide_sector_read_cb(void *opaque, int ret)
568 IDEState *s = opaque;
569 int n;
571 s->pio_aiocb = NULL;
572 s->status &= ~BUSY_STAT;
574 if (ret == -ECANCELED) {
575 return;
577 block_acct_done(blk_get_stats(s->blk), &s->acct);
578 if (ret != 0) {
579 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
580 IDE_RETRY_READ)) {
581 return;
585 n = s->nsector;
586 if (n > s->req_nb_sectors) {
587 n = s->req_nb_sectors;
590 ide_set_sector(s, ide_get_sector(s) + n);
591 s->nsector -= n;
592 /* Allow the guest to read the io_buffer */
593 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
594 ide_set_irq(s->bus);
597 static void ide_sector_read(IDEState *s)
599 int64_t sector_num;
600 int n;
602 s->status = READY_STAT | SEEK_STAT;
603 s->error = 0; /* not needed by IDE spec, but needed by Windows */
604 sector_num = ide_get_sector(s);
605 n = s->nsector;
607 if (n == 0) {
608 ide_transfer_stop(s);
609 return;
612 s->status |= BUSY_STAT;
614 if (n > s->req_nb_sectors) {
615 n = s->req_nb_sectors;
618 #if defined(DEBUG_IDE)
619 printf("sector=%" PRId64 "\n", sector_num);
620 #endif
622 if (!ide_sect_range_ok(s, sector_num, n)) {
623 ide_rw_error(s);
624 return;
627 s->iov.iov_base = s->io_buffer;
628 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
629 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
631 block_acct_start(blk_get_stats(s->blk), &s->acct,
632 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
633 s->pio_aiocb = blk_aio_readv(s->blk, sector_num, &s->qiov, n,
634 ide_sector_read_cb, s);
637 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
639 if (s->bus->dma->ops->commit_buf) {
640 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
642 s->io_buffer_offset += tx_bytes;
643 qemu_sglist_destroy(&s->sg);
646 void ide_set_inactive(IDEState *s, bool more)
648 s->bus->dma->aiocb = NULL;
649 s->bus->retry_unit = -1;
650 s->bus->retry_sector_num = 0;
651 s->bus->retry_nsector = 0;
652 if (s->bus->dma->ops->set_inactive) {
653 s->bus->dma->ops->set_inactive(s->bus->dma, more);
655 ide_cmd_done(s);
658 void ide_dma_error(IDEState *s)
660 dma_buf_commit(s, 0);
661 ide_abort_command(s);
662 ide_set_inactive(s, false);
663 ide_set_irq(s->bus);
666 static int ide_handle_rw_error(IDEState *s, int error, int op)
668 bool is_read = (op & IDE_RETRY_READ) != 0;
669 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
671 if (action == BLOCK_ERROR_ACTION_STOP) {
672 assert(s->bus->retry_unit == s->unit);
673 s->bus->error_status = op;
674 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
675 if (op & IDE_RETRY_DMA) {
676 ide_dma_error(s);
677 } else {
678 ide_rw_error(s);
681 blk_error_action(s->blk, action, is_read, error);
682 return action != BLOCK_ERROR_ACTION_IGNORE;
685 static void ide_dma_cb(void *opaque, int ret)
687 IDEState *s = opaque;
688 int n;
689 int64_t sector_num;
690 bool stay_active = false;
692 if (ret == -ECANCELED) {
693 return;
695 if (ret < 0) {
696 int op = IDE_RETRY_DMA;
698 if (s->dma_cmd == IDE_DMA_READ)
699 op |= IDE_RETRY_READ;
700 else if (s->dma_cmd == IDE_DMA_TRIM)
701 op |= IDE_RETRY_TRIM;
703 if (ide_handle_rw_error(s, -ret, op)) {
704 return;
708 n = s->io_buffer_size >> 9;
709 if (n > s->nsector) {
710 /* The PRDs were longer than needed for this request. Shorten them so
711 * we don't get a negative remainder. The Active bit must remain set
712 * after the request completes. */
713 n = s->nsector;
714 stay_active = true;
717 sector_num = ide_get_sector(s);
718 if (n > 0) {
719 assert(n * 512 == s->sg.size);
720 dma_buf_commit(s, s->sg.size);
721 sector_num += n;
722 ide_set_sector(s, sector_num);
723 s->nsector -= n;
726 /* end of transfer ? */
727 if (s->nsector == 0) {
728 s->status = READY_STAT | SEEK_STAT;
729 ide_set_irq(s->bus);
730 goto eot;
733 /* launch next transfer */
734 n = s->nsector;
735 s->io_buffer_index = 0;
736 s->io_buffer_size = n * 512;
737 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
738 /* The PRDs were too short. Reset the Active bit, but don't raise an
739 * interrupt. */
740 s->status = READY_STAT | SEEK_STAT;
741 dma_buf_commit(s, 0);
742 goto eot;
745 #ifdef DEBUG_AIO
746 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
747 sector_num, n, s->dma_cmd);
748 #endif
750 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
751 !ide_sect_range_ok(s, sector_num, n)) {
752 ide_dma_error(s);
753 return;
756 switch (s->dma_cmd) {
757 case IDE_DMA_READ:
758 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
759 ide_dma_cb, s);
760 break;
761 case IDE_DMA_WRITE:
762 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
763 ide_dma_cb, s);
764 break;
765 case IDE_DMA_TRIM:
766 s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
767 ide_issue_trim, ide_dma_cb, s,
768 DMA_DIRECTION_TO_DEVICE);
769 break;
771 return;
773 eot:
774 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
775 block_acct_done(blk_get_stats(s->blk), &s->acct);
777 ide_set_inactive(s, stay_active);
780 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
782 s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
783 s->io_buffer_size = 0;
784 s->dma_cmd = dma_cmd;
786 switch (dma_cmd) {
787 case IDE_DMA_READ:
788 block_acct_start(blk_get_stats(s->blk), &s->acct,
789 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
790 break;
791 case IDE_DMA_WRITE:
792 block_acct_start(blk_get_stats(s->blk), &s->acct,
793 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
794 break;
795 default:
796 break;
799 ide_start_dma(s, ide_dma_cb);
802 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
804 s->io_buffer_index = 0;
805 s->bus->retry_unit = s->unit;
806 s->bus->retry_sector_num = ide_get_sector(s);
807 s->bus->retry_nsector = s->nsector;
808 if (s->bus->dma->ops->start_dma) {
809 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
813 static void ide_sector_write(IDEState *s);
815 static void ide_sector_write_timer_cb(void *opaque)
817 IDEState *s = opaque;
818 ide_set_irq(s->bus);
821 static void ide_sector_write_cb(void *opaque, int ret)
823 IDEState *s = opaque;
824 int n;
826 if (ret == -ECANCELED) {
827 return;
829 block_acct_done(blk_get_stats(s->blk), &s->acct);
831 s->pio_aiocb = NULL;
832 s->status &= ~BUSY_STAT;
834 if (ret != 0) {
835 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
836 return;
840 n = s->nsector;
841 if (n > s->req_nb_sectors) {
842 n = s->req_nb_sectors;
844 s->nsector -= n;
846 ide_set_sector(s, ide_get_sector(s) + n);
847 if (s->nsector == 0) {
848 /* no more sectors to write */
849 ide_transfer_stop(s);
850 } else {
851 int n1 = s->nsector;
852 if (n1 > s->req_nb_sectors) {
853 n1 = s->req_nb_sectors;
855 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
856 ide_sector_write);
859 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
860 /* It seems there is a bug in the Windows 2000 installer HDD
861 IDE driver which fills the disk with empty logs when the
862 IDE write IRQ comes too early. This hack tries to correct
863 that at the expense of slower write performances. Use this
864 option _only_ to install Windows 2000. You must disable it
865 for normal use. */
866 timer_mod(s->sector_write_timer,
867 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
868 } else {
869 ide_set_irq(s->bus);
873 static void ide_sector_write(IDEState *s)
875 int64_t sector_num;
876 int n;
878 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
879 sector_num = ide_get_sector(s);
880 #if defined(DEBUG_IDE)
881 printf("sector=%" PRId64 "\n", sector_num);
882 #endif
883 n = s->nsector;
884 if (n > s->req_nb_sectors) {
885 n = s->req_nb_sectors;
888 if (!ide_sect_range_ok(s, sector_num, n)) {
889 ide_rw_error(s);
890 return;
893 s->iov.iov_base = s->io_buffer;
894 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
895 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
897 block_acct_start(blk_get_stats(s->blk), &s->acct,
898 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
899 s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
900 ide_sector_write_cb, s);
903 static void ide_flush_cb(void *opaque, int ret)
905 IDEState *s = opaque;
907 s->pio_aiocb = NULL;
909 if (ret == -ECANCELED) {
910 return;
912 if (ret < 0) {
913 /* XXX: What sector number to set here? */
914 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
915 return;
919 if (s->blk) {
920 block_acct_done(blk_get_stats(s->blk), &s->acct);
922 s->status = READY_STAT | SEEK_STAT;
923 ide_cmd_done(s);
924 ide_set_irq(s->bus);
927 static void ide_flush_cache(IDEState *s)
929 if (s->blk == NULL) {
930 ide_flush_cb(s, 0);
931 return;
934 s->status |= BUSY_STAT;
935 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
936 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
939 static void ide_cfata_metadata_inquiry(IDEState *s)
941 uint16_t *p;
942 uint32_t spd;
944 p = (uint16_t *) s->io_buffer;
945 memset(p, 0, 0x200);
946 spd = ((s->mdata_size - 1) >> 9) + 1;
948 put_le16(p + 0, 0x0001); /* Data format revision */
949 put_le16(p + 1, 0x0000); /* Media property: silicon */
950 put_le16(p + 2, s->media_changed); /* Media status */
951 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
952 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
953 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
954 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
957 static void ide_cfata_metadata_read(IDEState *s)
959 uint16_t *p;
961 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
962 s->status = ERR_STAT;
963 s->error = ABRT_ERR;
964 return;
967 p = (uint16_t *) s->io_buffer;
968 memset(p, 0, 0x200);
970 put_le16(p + 0, s->media_changed); /* Media status */
971 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
972 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
973 s->nsector << 9), 0x200 - 2));
976 static void ide_cfata_metadata_write(IDEState *s)
978 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
979 s->status = ERR_STAT;
980 s->error = ABRT_ERR;
981 return;
984 s->media_changed = 0;
986 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
987 s->io_buffer + 2,
988 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
989 s->nsector << 9), 0x200 - 2));
992 /* called when the inserted state of the media has changed */
993 static void ide_cd_change_cb(void *opaque, bool load)
995 IDEState *s = opaque;
996 uint64_t nb_sectors;
998 s->tray_open = !load;
999 blk_get_geometry(s->blk, &nb_sectors);
1000 s->nb_sectors = nb_sectors;
1003 * First indicate to the guest that a CD has been removed. That's
1004 * done on the next command the guest sends us.
1006 * Then we set UNIT_ATTENTION, by which the guest will
1007 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1009 s->cdrom_changed = 1;
1010 s->events.new_media = true;
1011 s->events.eject_request = false;
1012 ide_set_irq(s->bus);
1015 static void ide_cd_eject_request_cb(void *opaque, bool force)
1017 IDEState *s = opaque;
1019 s->events.eject_request = true;
1020 if (force) {
1021 s->tray_locked = false;
1023 ide_set_irq(s->bus);
1026 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1028 s->lba48 = lba48;
1030 /* handle the 'magic' 0 nsector count conversion here. to avoid
1031 * fiddling with the rest of the read logic, we just store the
1032 * full sector count in ->nsector and ignore ->hob_nsector from now
1034 if (!s->lba48) {
1035 if (!s->nsector)
1036 s->nsector = 256;
1037 } else {
1038 if (!s->nsector && !s->hob_nsector)
1039 s->nsector = 65536;
1040 else {
1041 int lo = s->nsector;
1042 int hi = s->hob_nsector;
1044 s->nsector = (hi << 8) | lo;
1049 static void ide_clear_hob(IDEBus *bus)
1051 /* any write clears HOB high bit of device control register */
1052 bus->ifs[0].select &= ~(1 << 7);
1053 bus->ifs[1].select &= ~(1 << 7);
1056 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1058 IDEBus *bus = opaque;
1060 #ifdef DEBUG_IDE
1061 printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1062 #endif
1064 addr &= 7;
1066 /* ignore writes to command block while busy with previous command */
1067 if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1068 return;
1070 switch(addr) {
1071 case 0:
1072 break;
1073 case 1:
1074 ide_clear_hob(bus);
1075 /* NOTE: data is written to the two drives */
1076 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1077 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1078 bus->ifs[0].feature = val;
1079 bus->ifs[1].feature = val;
1080 break;
1081 case 2:
1082 ide_clear_hob(bus);
1083 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1084 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1085 bus->ifs[0].nsector = val;
1086 bus->ifs[1].nsector = val;
1087 break;
1088 case 3:
1089 ide_clear_hob(bus);
1090 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1091 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1092 bus->ifs[0].sector = val;
1093 bus->ifs[1].sector = val;
1094 break;
1095 case 4:
1096 ide_clear_hob(bus);
1097 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1098 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1099 bus->ifs[0].lcyl = val;
1100 bus->ifs[1].lcyl = val;
1101 break;
1102 case 5:
1103 ide_clear_hob(bus);
1104 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1105 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1106 bus->ifs[0].hcyl = val;
1107 bus->ifs[1].hcyl = val;
1108 break;
1109 case 6:
1110 /* FIXME: HOB readback uses bit 7 */
1111 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1112 bus->ifs[1].select = (val | 0x10) | 0xa0;
1113 /* select drive */
1114 bus->unit = (val >> 4) & 1;
1115 break;
1116 default:
1117 case 7:
1118 /* command */
1119 ide_exec_cmd(bus, val);
1120 break;
1124 static bool cmd_nop(IDEState *s, uint8_t cmd)
1126 return true;
1129 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1131 switch (s->feature) {
1132 case DSM_TRIM:
1133 if (s->blk) {
1134 ide_sector_start_dma(s, IDE_DMA_TRIM);
1135 return false;
1137 break;
1140 ide_abort_command(s);
1141 return true;
1144 static bool cmd_identify(IDEState *s, uint8_t cmd)
1146 if (s->blk && s->drive_kind != IDE_CD) {
1147 if (s->drive_kind != IDE_CFATA) {
1148 ide_identify(s);
1149 } else {
1150 ide_cfata_identify(s);
1152 s->status = READY_STAT | SEEK_STAT;
1153 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1154 ide_set_irq(s->bus);
1155 return false;
1156 } else {
1157 if (s->drive_kind == IDE_CD) {
1158 ide_set_signature(s);
1160 ide_abort_command(s);
1163 return true;
1166 static bool cmd_verify(IDEState *s, uint8_t cmd)
1168 bool lba48 = (cmd == WIN_VERIFY_EXT);
1170 /* do sector number check ? */
1171 ide_cmd_lba48_transform(s, lba48);
1173 return true;
1176 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1178 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1179 /* Disable Read and Write Multiple */
1180 s->mult_sectors = 0;
1181 } else if ((s->nsector & 0xff) != 0 &&
1182 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1183 (s->nsector & (s->nsector - 1)) != 0)) {
1184 ide_abort_command(s);
1185 } else {
1186 s->mult_sectors = s->nsector & 0xff;
1189 return true;
1192 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1194 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1196 if (!s->blk || !s->mult_sectors) {
1197 ide_abort_command(s);
1198 return true;
1201 ide_cmd_lba48_transform(s, lba48);
1202 s->req_nb_sectors = s->mult_sectors;
1203 ide_sector_read(s);
1204 return false;
1207 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1209 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1210 int n;
1212 if (!s->blk || !s->mult_sectors) {
1213 ide_abort_command(s);
1214 return true;
1217 ide_cmd_lba48_transform(s, lba48);
1219 s->req_nb_sectors = s->mult_sectors;
1220 n = MIN(s->nsector, s->req_nb_sectors);
1222 s->status = SEEK_STAT | READY_STAT;
1223 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1225 s->media_changed = 1;
1227 return false;
1230 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1232 bool lba48 = (cmd == WIN_READ_EXT);
1234 if (s->drive_kind == IDE_CD) {
1235 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1236 ide_abort_command(s);
1237 return true;
1240 if (!s->blk) {
1241 ide_abort_command(s);
1242 return true;
1245 ide_cmd_lba48_transform(s, lba48);
1246 s->req_nb_sectors = 1;
1247 ide_sector_read(s);
1249 return false;
1252 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1254 bool lba48 = (cmd == WIN_WRITE_EXT);
1256 if (!s->blk) {
1257 ide_abort_command(s);
1258 return true;
1261 ide_cmd_lba48_transform(s, lba48);
1263 s->req_nb_sectors = 1;
1264 s->status = SEEK_STAT | READY_STAT;
1265 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1267 s->media_changed = 1;
1269 return false;
1272 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1274 bool lba48 = (cmd == WIN_READDMA_EXT);
1276 if (!s->blk) {
1277 ide_abort_command(s);
1278 return true;
1281 ide_cmd_lba48_transform(s, lba48);
1282 ide_sector_start_dma(s, IDE_DMA_READ);
1284 return false;
1287 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1289 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1291 if (!s->blk) {
1292 ide_abort_command(s);
1293 return true;
1296 ide_cmd_lba48_transform(s, lba48);
1297 ide_sector_start_dma(s, IDE_DMA_WRITE);
1299 s->media_changed = 1;
1301 return false;
1304 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1306 ide_flush_cache(s);
1307 return false;
1310 static bool cmd_seek(IDEState *s, uint8_t cmd)
1312 /* XXX: Check that seek is within bounds */
1313 return true;
1316 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1318 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1320 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1321 if (s->nb_sectors == 0) {
1322 ide_abort_command(s);
1323 return true;
1326 ide_cmd_lba48_transform(s, lba48);
1327 ide_set_sector(s, s->nb_sectors - 1);
1329 return true;
1332 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1334 s->nsector = 0xff; /* device active or idle */
1335 return true;
1338 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1340 uint16_t *identify_data;
1342 if (!s->blk) {
1343 ide_abort_command(s);
1344 return true;
1347 /* XXX: valid for CDROM ? */
1348 switch (s->feature) {
1349 case 0x02: /* write cache enable */
1350 blk_set_enable_write_cache(s->blk, true);
1351 identify_data = (uint16_t *)s->identify_data;
1352 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1353 return true;
1354 case 0x82: /* write cache disable */
1355 blk_set_enable_write_cache(s->blk, false);
1356 identify_data = (uint16_t *)s->identify_data;
1357 put_le16(identify_data + 85, (1 << 14) | 1);
1358 ide_flush_cache(s);
1359 return false;
1360 case 0xcc: /* reverting to power-on defaults enable */
1361 case 0x66: /* reverting to power-on defaults disable */
1362 case 0xaa: /* read look-ahead enable */
1363 case 0x55: /* read look-ahead disable */
1364 case 0x05: /* set advanced power management mode */
1365 case 0x85: /* disable advanced power management mode */
1366 case 0x69: /* NOP */
1367 case 0x67: /* NOP */
1368 case 0x96: /* NOP */
1369 case 0x9a: /* NOP */
1370 case 0x42: /* enable Automatic Acoustic Mode */
1371 case 0xc2: /* disable Automatic Acoustic Mode */
1372 return true;
1373 case 0x03: /* set transfer mode */
1375 uint8_t val = s->nsector & 0x07;
1376 identify_data = (uint16_t *)s->identify_data;
1378 switch (s->nsector >> 3) {
1379 case 0x00: /* pio default */
1380 case 0x01: /* pio mode */
1381 put_le16(identify_data + 62, 0x07);
1382 put_le16(identify_data + 63, 0x07);
1383 put_le16(identify_data + 88, 0x3f);
1384 break;
1385 case 0x02: /* sigle word dma mode*/
1386 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1387 put_le16(identify_data + 63, 0x07);
1388 put_le16(identify_data + 88, 0x3f);
1389 break;
1390 case 0x04: /* mdma mode */
1391 put_le16(identify_data + 62, 0x07);
1392 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1393 put_le16(identify_data + 88, 0x3f);
1394 break;
1395 case 0x08: /* udma mode */
1396 put_le16(identify_data + 62, 0x07);
1397 put_le16(identify_data + 63, 0x07);
1398 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1399 break;
1400 default:
1401 goto abort_cmd;
1403 return true;
1407 abort_cmd:
1408 ide_abort_command(s);
1409 return true;
1413 /*** ATAPI commands ***/
1415 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1417 ide_atapi_identify(s);
1418 s->status = READY_STAT | SEEK_STAT;
1419 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1420 ide_set_irq(s->bus);
1421 return false;
1424 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1426 ide_set_signature(s);
1428 if (s->drive_kind == IDE_CD) {
1429 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1430 * devices to return a clear status register
1431 * with READY_STAT *not* set. */
1432 s->error = 0x01;
1433 } else {
1434 s->status = READY_STAT | SEEK_STAT;
1435 /* The bits of the error register are not as usual for this command!
1436 * They are part of the regular output (this is why ERR_STAT isn't set)
1437 * Device 0 passed, Device 1 passed or not present. */
1438 s->error = 0x01;
1439 ide_set_irq(s->bus);
1442 return false;
1445 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1447 ide_set_signature(s);
1448 s->status = 0x00; /* NOTE: READY is _not_ set */
1449 s->error = 0x01;
1451 return false;
1454 static bool cmd_packet(IDEState *s, uint8_t cmd)
1456 /* overlapping commands not supported */
1457 if (s->feature & 0x02) {
1458 ide_abort_command(s);
1459 return true;
1462 s->status = READY_STAT | SEEK_STAT;
1463 s->atapi_dma = s->feature & 1;
1464 s->nsector = 1;
1465 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1466 ide_atapi_cmd);
1467 return false;
1471 /*** CF-ATA commands ***/
1473 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1475 s->error = 0x09; /* miscellaneous error */
1476 s->status = READY_STAT | SEEK_STAT;
1477 ide_set_irq(s->bus);
1479 return false;
1482 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1484 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1485 * required for Windows 8 to work with AHCI */
1487 if (cmd == CFA_WEAR_LEVEL) {
1488 s->nsector = 0;
1491 if (cmd == CFA_ERASE_SECTORS) {
1492 s->media_changed = 1;
1495 return true;
1498 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1500 s->status = READY_STAT | SEEK_STAT;
1502 memset(s->io_buffer, 0, 0x200);
1503 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1504 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1505 s->io_buffer[0x02] = s->select; /* Head */
1506 s->io_buffer[0x03] = s->sector; /* Sector */
1507 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1508 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1509 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1510 s->io_buffer[0x13] = 0x00; /* Erase flag */
1511 s->io_buffer[0x18] = 0x00; /* Hot count */
1512 s->io_buffer[0x19] = 0x00; /* Hot count */
1513 s->io_buffer[0x1a] = 0x01; /* Hot count */
1515 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1516 ide_set_irq(s->bus);
1518 return false;
1521 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1523 switch (s->feature) {
1524 case 0x02: /* Inquiry Metadata Storage */
1525 ide_cfata_metadata_inquiry(s);
1526 break;
1527 case 0x03: /* Read Metadata Storage */
1528 ide_cfata_metadata_read(s);
1529 break;
1530 case 0x04: /* Write Metadata Storage */
1531 ide_cfata_metadata_write(s);
1532 break;
1533 default:
1534 ide_abort_command(s);
1535 return true;
1538 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1539 s->status = 0x00; /* NOTE: READY is _not_ set */
1540 ide_set_irq(s->bus);
1542 return false;
1545 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1547 switch (s->feature) {
1548 case 0x01: /* sense temperature in device */
1549 s->nsector = 0x50; /* +20 C */
1550 break;
1551 default:
1552 ide_abort_command(s);
1553 return true;
1556 return true;
1560 /*** SMART commands ***/
1562 static bool cmd_smart(IDEState *s, uint8_t cmd)
1564 int n;
1566 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1567 goto abort_cmd;
1570 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1571 goto abort_cmd;
1574 switch (s->feature) {
1575 case SMART_DISABLE:
1576 s->smart_enabled = 0;
1577 return true;
1579 case SMART_ENABLE:
1580 s->smart_enabled = 1;
1581 return true;
1583 case SMART_ATTR_AUTOSAVE:
1584 switch (s->sector) {
1585 case 0x00:
1586 s->smart_autosave = 0;
1587 break;
1588 case 0xf1:
1589 s->smart_autosave = 1;
1590 break;
1591 default:
1592 goto abort_cmd;
1594 return true;
1596 case SMART_STATUS:
1597 if (!s->smart_errors) {
1598 s->hcyl = 0xc2;
1599 s->lcyl = 0x4f;
1600 } else {
1601 s->hcyl = 0x2c;
1602 s->lcyl = 0xf4;
1604 return true;
1606 case SMART_READ_THRESH:
1607 memset(s->io_buffer, 0, 0x200);
1608 s->io_buffer[0] = 0x01; /* smart struct version */
1610 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1611 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1612 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1615 /* checksum */
1616 for (n = 0; n < 511; n++) {
1617 s->io_buffer[511] += s->io_buffer[n];
1619 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1621 s->status = READY_STAT | SEEK_STAT;
1622 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1623 ide_set_irq(s->bus);
1624 return false;
1626 case SMART_READ_DATA:
1627 memset(s->io_buffer, 0, 0x200);
1628 s->io_buffer[0] = 0x01; /* smart struct version */
1630 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1631 int i;
1632 for (i = 0; i < 11; i++) {
1633 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1637 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1638 if (s->smart_selftest_count == 0) {
1639 s->io_buffer[363] = 0;
1640 } else {
1641 s->io_buffer[363] =
1642 s->smart_selftest_data[3 +
1643 (s->smart_selftest_count - 1) *
1644 24];
1646 s->io_buffer[364] = 0x20;
1647 s->io_buffer[365] = 0x01;
1648 /* offline data collection capacity: execute + self-test*/
1649 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1650 s->io_buffer[368] = 0x03; /* smart capability (1) */
1651 s->io_buffer[369] = 0x00; /* smart capability (2) */
1652 s->io_buffer[370] = 0x01; /* error logging supported */
1653 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1654 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1655 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1657 for (n = 0; n < 511; n++) {
1658 s->io_buffer[511] += s->io_buffer[n];
1660 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1662 s->status = READY_STAT | SEEK_STAT;
1663 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1664 ide_set_irq(s->bus);
1665 return false;
1667 case SMART_READ_LOG:
1668 switch (s->sector) {
1669 case 0x01: /* summary smart error log */
1670 memset(s->io_buffer, 0, 0x200);
1671 s->io_buffer[0] = 0x01;
1672 s->io_buffer[1] = 0x00; /* no error entries */
1673 s->io_buffer[452] = s->smart_errors & 0xff;
1674 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1676 for (n = 0; n < 511; n++) {
1677 s->io_buffer[511] += s->io_buffer[n];
1679 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1680 break;
1681 case 0x06: /* smart self test log */
1682 memset(s->io_buffer, 0, 0x200);
1683 s->io_buffer[0] = 0x01;
1684 if (s->smart_selftest_count == 0) {
1685 s->io_buffer[508] = 0;
1686 } else {
1687 s->io_buffer[508] = s->smart_selftest_count;
1688 for (n = 2; n < 506; n++) {
1689 s->io_buffer[n] = s->smart_selftest_data[n];
1693 for (n = 0; n < 511; n++) {
1694 s->io_buffer[511] += s->io_buffer[n];
1696 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1697 break;
1698 default:
1699 goto abort_cmd;
1701 s->status = READY_STAT | SEEK_STAT;
1702 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1703 ide_set_irq(s->bus);
1704 return false;
1706 case SMART_EXECUTE_OFFLINE:
1707 switch (s->sector) {
1708 case 0: /* off-line routine */
1709 case 1: /* short self test */
1710 case 2: /* extended self test */
1711 s->smart_selftest_count++;
1712 if (s->smart_selftest_count > 21) {
1713 s->smart_selftest_count = 1;
1715 n = 2 + (s->smart_selftest_count - 1) * 24;
1716 s->smart_selftest_data[n] = s->sector;
1717 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1718 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1719 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1720 break;
1721 default:
1722 goto abort_cmd;
1724 return true;
1727 abort_cmd:
1728 ide_abort_command(s);
1729 return true;
1732 #define HD_OK (1u << IDE_HD)
1733 #define CD_OK (1u << IDE_CD)
1734 #define CFA_OK (1u << IDE_CFATA)
1735 #define HD_CFA_OK (HD_OK | CFA_OK)
1736 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1738 /* Set the Disk Seek Completed status bit during completion */
1739 #define SET_DSC (1u << 8)
1741 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1742 static const struct {
1743 /* Returns true if the completion code should be run */
1744 bool (*handler)(IDEState *s, uint8_t cmd);
1745 int flags;
1746 } ide_cmd_table[0x100] = {
1747 /* NOP not implemented, mandatory for CD */
1748 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1749 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
1750 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1751 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1752 [WIN_READ] = { cmd_read_pio, ALL_OK },
1753 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
1754 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1755 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1756 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1757 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1758 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1759 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1760 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1761 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1762 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
1763 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
1764 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
1765 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
1766 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
1767 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
1768 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
1769 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
1770 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
1771 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
1772 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
1773 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
1774 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
1775 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
1776 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1777 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
1778 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
1779 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
1780 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
1781 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1782 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1783 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
1784 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
1785 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1786 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
1787 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
1788 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
1789 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
1790 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
1791 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
1792 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
1793 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
1794 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
1795 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1796 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
1797 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
1798 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
1799 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
1800 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
1801 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1802 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
1803 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1806 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
1808 return cmd < ARRAY_SIZE(ide_cmd_table)
1809 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
1812 void ide_exec_cmd(IDEBus *bus, uint32_t val)
1814 IDEState *s;
1815 bool complete;
1817 #if defined(DEBUG_IDE)
1818 printf("ide: CMD=%02x\n", val);
1819 #endif
1820 s = idebus_active_if(bus);
1821 /* ignore commands to non existent slave */
1822 if (s != bus->ifs && !s->blk) {
1823 return;
1826 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
1827 if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET)
1828 return;
1830 if (!ide_cmd_permitted(s, val)) {
1831 ide_abort_command(s);
1832 ide_set_irq(s->bus);
1833 return;
1836 s->status = READY_STAT | BUSY_STAT;
1837 s->error = 0;
1838 s->io_buffer_offset = 0;
1840 complete = ide_cmd_table[val].handler(s, val);
1841 if (complete) {
1842 s->status &= ~BUSY_STAT;
1843 assert(!!s->error == !!(s->status & ERR_STAT));
1845 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
1846 s->status |= SEEK_STAT;
1849 ide_cmd_done(s);
1850 ide_set_irq(s->bus);
1854 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
1856 IDEBus *bus = opaque;
1857 IDEState *s = idebus_active_if(bus);
1858 uint32_t addr;
1859 int ret, hob;
1861 addr = addr1 & 7;
1862 /* FIXME: HOB readback uses bit 7, but it's always set right now */
1863 //hob = s->select & (1 << 7);
1864 hob = 0;
1865 switch(addr) {
1866 case 0:
1867 ret = 0xff;
1868 break;
1869 case 1:
1870 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1871 (s != bus->ifs && !s->blk)) {
1872 ret = 0;
1873 } else if (!hob) {
1874 ret = s->error;
1875 } else {
1876 ret = s->hob_feature;
1878 break;
1879 case 2:
1880 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1881 ret = 0;
1882 } else if (!hob) {
1883 ret = s->nsector & 0xff;
1884 } else {
1885 ret = s->hob_nsector;
1887 break;
1888 case 3:
1889 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1890 ret = 0;
1891 } else if (!hob) {
1892 ret = s->sector;
1893 } else {
1894 ret = s->hob_sector;
1896 break;
1897 case 4:
1898 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1899 ret = 0;
1900 } else if (!hob) {
1901 ret = s->lcyl;
1902 } else {
1903 ret = s->hob_lcyl;
1905 break;
1906 case 5:
1907 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1908 ret = 0;
1909 } else if (!hob) {
1910 ret = s->hcyl;
1911 } else {
1912 ret = s->hob_hcyl;
1914 break;
1915 case 6:
1916 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1917 ret = 0;
1918 } else {
1919 ret = s->select;
1921 break;
1922 default:
1923 case 7:
1924 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1925 (s != bus->ifs && !s->blk)) {
1926 ret = 0;
1927 } else {
1928 ret = s->status;
1930 qemu_irq_lower(bus->irq);
1931 break;
1933 #ifdef DEBUG_IDE
1934 printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
1935 #endif
1936 return ret;
1939 uint32_t ide_status_read(void *opaque, uint32_t addr)
1941 IDEBus *bus = opaque;
1942 IDEState *s = idebus_active_if(bus);
1943 int ret;
1945 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1946 (s != bus->ifs && !s->blk)) {
1947 ret = 0;
1948 } else {
1949 ret = s->status;
1951 #ifdef DEBUG_IDE
1952 printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
1953 #endif
1954 return ret;
1957 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
1959 IDEBus *bus = opaque;
1960 IDEState *s;
1961 int i;
1963 #ifdef DEBUG_IDE
1964 printf("ide: write control addr=0x%x val=%02x\n", addr, val);
1965 #endif
1966 /* common for both drives */
1967 if (!(bus->cmd & IDE_CMD_RESET) &&
1968 (val & IDE_CMD_RESET)) {
1969 /* reset low to high */
1970 for(i = 0;i < 2; i++) {
1971 s = &bus->ifs[i];
1972 s->status = BUSY_STAT | SEEK_STAT;
1973 s->error = 0x01;
1975 } else if ((bus->cmd & IDE_CMD_RESET) &&
1976 !(val & IDE_CMD_RESET)) {
1977 /* high to low */
1978 for(i = 0;i < 2; i++) {
1979 s = &bus->ifs[i];
1980 if (s->drive_kind == IDE_CD)
1981 s->status = 0x00; /* NOTE: READY is _not_ set */
1982 else
1983 s->status = READY_STAT | SEEK_STAT;
1984 ide_set_signature(s);
1988 bus->cmd = val;
1992 * Returns true if the running PIO transfer is a PIO out (i.e. data is
1993 * transferred from the device to the guest), false if it's a PIO in
1995 static bool ide_is_pio_out(IDEState *s)
1997 if (s->end_transfer_func == ide_sector_write ||
1998 s->end_transfer_func == ide_atapi_cmd) {
1999 return false;
2000 } else if (s->end_transfer_func == ide_sector_read ||
2001 s->end_transfer_func == ide_transfer_stop ||
2002 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2003 s->end_transfer_func == ide_dummy_transfer_stop) {
2004 return true;
2007 abort();
2010 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2012 IDEBus *bus = opaque;
2013 IDEState *s = idebus_active_if(bus);
2014 uint8_t *p;
2016 /* PIO data access allowed only when DRQ bit is set. The result of a write
2017 * during PIO out is indeterminate, just ignore it. */
2018 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2019 return;
2022 p = s->data_ptr;
2023 if (p + 2 > s->data_end) {
2024 return;
2027 *(uint16_t *)p = le16_to_cpu(val);
2028 p += 2;
2029 s->data_ptr = p;
2030 if (p >= s->data_end) {
2031 s->status &= ~DRQ_STAT;
2032 s->end_transfer_func(s);
2036 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2038 IDEBus *bus = opaque;
2039 IDEState *s = idebus_active_if(bus);
2040 uint8_t *p;
2041 int ret;
2043 /* PIO data access allowed only when DRQ bit is set. The result of a read
2044 * during PIO in is indeterminate, return 0 and don't move forward. */
2045 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2046 return 0;
2049 p = s->data_ptr;
2050 if (p + 2 > s->data_end) {
2051 return 0;
2054 ret = cpu_to_le16(*(uint16_t *)p);
2055 p += 2;
2056 s->data_ptr = p;
2057 if (p >= s->data_end) {
2058 s->status &= ~DRQ_STAT;
2059 s->end_transfer_func(s);
2061 return ret;
2064 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2066 IDEBus *bus = opaque;
2067 IDEState *s = idebus_active_if(bus);
2068 uint8_t *p;
2070 /* PIO data access allowed only when DRQ bit is set. The result of a write
2071 * during PIO out is indeterminate, just ignore it. */
2072 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2073 return;
2076 p = s->data_ptr;
2077 if (p + 4 > s->data_end) {
2078 return;
2081 *(uint32_t *)p = le32_to_cpu(val);
2082 p += 4;
2083 s->data_ptr = p;
2084 if (p >= s->data_end) {
2085 s->status &= ~DRQ_STAT;
2086 s->end_transfer_func(s);
2090 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2092 IDEBus *bus = opaque;
2093 IDEState *s = idebus_active_if(bus);
2094 uint8_t *p;
2095 int ret;
2097 /* PIO data access allowed only when DRQ bit is set. The result of a read
2098 * during PIO in is indeterminate, return 0 and don't move forward. */
2099 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2100 return 0;
2103 p = s->data_ptr;
2104 if (p + 4 > s->data_end) {
2105 return 0;
2108 ret = cpu_to_le32(*(uint32_t *)p);
2109 p += 4;
2110 s->data_ptr = p;
2111 if (p >= s->data_end) {
2112 s->status &= ~DRQ_STAT;
2113 s->end_transfer_func(s);
2115 return ret;
2118 static void ide_dummy_transfer_stop(IDEState *s)
2120 s->data_ptr = s->io_buffer;
2121 s->data_end = s->io_buffer;
2122 s->io_buffer[0] = 0xff;
2123 s->io_buffer[1] = 0xff;
2124 s->io_buffer[2] = 0xff;
2125 s->io_buffer[3] = 0xff;
2128 static void ide_reset(IDEState *s)
2130 #ifdef DEBUG_IDE
2131 printf("ide: reset\n");
2132 #endif
2134 if (s->pio_aiocb) {
2135 blk_aio_cancel(s->pio_aiocb);
2136 s->pio_aiocb = NULL;
2139 if (s->drive_kind == IDE_CFATA)
2140 s->mult_sectors = 0;
2141 else
2142 s->mult_sectors = MAX_MULT_SECTORS;
2143 /* ide regs */
2144 s->feature = 0;
2145 s->error = 0;
2146 s->nsector = 0;
2147 s->sector = 0;
2148 s->lcyl = 0;
2149 s->hcyl = 0;
2151 /* lba48 */
2152 s->hob_feature = 0;
2153 s->hob_sector = 0;
2154 s->hob_nsector = 0;
2155 s->hob_lcyl = 0;
2156 s->hob_hcyl = 0;
2158 s->select = 0xa0;
2159 s->status = READY_STAT | SEEK_STAT;
2161 s->lba48 = 0;
2163 /* ATAPI specific */
2164 s->sense_key = 0;
2165 s->asc = 0;
2166 s->cdrom_changed = 0;
2167 s->packet_transfer_size = 0;
2168 s->elementary_transfer_size = 0;
2169 s->io_buffer_index = 0;
2170 s->cd_sector_size = 0;
2171 s->atapi_dma = 0;
2172 s->tray_locked = 0;
2173 s->tray_open = 0;
2174 /* ATA DMA state */
2175 s->io_buffer_size = 0;
2176 s->req_nb_sectors = 0;
2178 ide_set_signature(s);
2179 /* init the transfer handler so that 0xffff is returned on data
2180 accesses */
2181 s->end_transfer_func = ide_dummy_transfer_stop;
2182 ide_dummy_transfer_stop(s);
2183 s->media_changed = 0;
2186 void ide_bus_reset(IDEBus *bus)
2188 bus->unit = 0;
2189 bus->cmd = 0;
2190 ide_reset(&bus->ifs[0]);
2191 ide_reset(&bus->ifs[1]);
2192 ide_clear_hob(bus);
2194 /* pending async DMA */
2195 if (bus->dma->aiocb) {
2196 #ifdef DEBUG_AIO
2197 printf("aio_cancel\n");
2198 #endif
2199 blk_aio_cancel(bus->dma->aiocb);
2200 bus->dma->aiocb = NULL;
2203 /* reset dma provider too */
2204 if (bus->dma->ops->reset) {
2205 bus->dma->ops->reset(bus->dma);
2209 static bool ide_cd_is_tray_open(void *opaque)
2211 return ((IDEState *)opaque)->tray_open;
2214 static bool ide_cd_is_medium_locked(void *opaque)
2216 return ((IDEState *)opaque)->tray_locked;
2219 static void ide_resize_cb(void *opaque)
2221 IDEState *s = opaque;
2222 uint64_t nb_sectors;
2224 if (!s->identify_set) {
2225 return;
2228 blk_get_geometry(s->blk, &nb_sectors);
2229 s->nb_sectors = nb_sectors;
2231 /* Update the identify data buffer. */
2232 if (s->drive_kind == IDE_CFATA) {
2233 ide_cfata_identify_size(s);
2234 } else {
2235 /* IDE_CD uses a different set of callbacks entirely. */
2236 assert(s->drive_kind != IDE_CD);
2237 ide_identify_size(s);
2241 static const BlockDevOps ide_cd_block_ops = {
2242 .change_media_cb = ide_cd_change_cb,
2243 .eject_request_cb = ide_cd_eject_request_cb,
2244 .is_tray_open = ide_cd_is_tray_open,
2245 .is_medium_locked = ide_cd_is_medium_locked,
2248 static const BlockDevOps ide_hd_block_ops = {
2249 .resize_cb = ide_resize_cb,
2252 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2253 const char *version, const char *serial, const char *model,
2254 uint64_t wwn,
2255 uint32_t cylinders, uint32_t heads, uint32_t secs,
2256 int chs_trans)
2258 uint64_t nb_sectors;
2260 s->blk = blk;
2261 s->drive_kind = kind;
2263 blk_get_geometry(blk, &nb_sectors);
2264 s->cylinders = cylinders;
2265 s->heads = heads;
2266 s->sectors = secs;
2267 s->chs_trans = chs_trans;
2268 s->nb_sectors = nb_sectors;
2269 s->wwn = wwn;
2270 /* The SMART values should be preserved across power cycles
2271 but they aren't. */
2272 s->smart_enabled = 1;
2273 s->smart_autosave = 1;
2274 s->smart_errors = 0;
2275 s->smart_selftest_count = 0;
2276 if (kind == IDE_CD) {
2277 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2278 blk_set_guest_block_size(blk, 2048);
2279 } else {
2280 if (!blk_is_inserted(s->blk)) {
2281 error_report("Device needs media, but drive is empty");
2282 return -1;
2284 if (blk_is_read_only(blk)) {
2285 error_report("Can't use a read-only drive");
2286 return -1;
2288 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2290 if (serial) {
2291 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2292 } else {
2293 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2294 "QM%05d", s->drive_serial);
2296 if (model) {
2297 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2298 } else {
2299 switch (kind) {
2300 case IDE_CD:
2301 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2302 break;
2303 case IDE_CFATA:
2304 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2305 break;
2306 default:
2307 strcpy(s->drive_model_str, "QEMU HARDDISK");
2308 break;
2312 if (version) {
2313 pstrcpy(s->version, sizeof(s->version), version);
2314 } else {
2315 pstrcpy(s->version, sizeof(s->version), qemu_get_version());
2318 ide_reset(s);
2319 blk_iostatus_enable(blk);
2320 return 0;
2323 static void ide_init1(IDEBus *bus, int unit)
2325 static int drive_serial = 1;
2326 IDEState *s = &bus->ifs[unit];
2328 s->bus = bus;
2329 s->unit = unit;
2330 s->drive_serial = drive_serial++;
2331 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2332 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2333 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2334 memset(s->io_buffer, 0, s->io_buffer_total_len);
2336 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2337 memset(s->smart_selftest_data, 0, 512);
2339 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2340 ide_sector_write_timer_cb, s);
2343 static int ide_nop_int(IDEDMA *dma, int x)
2345 return 0;
2348 static void ide_nop(IDEDMA *dma)
2352 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2354 return 0;
2357 static const IDEDMAOps ide_dma_nop_ops = {
2358 .prepare_buf = ide_nop_int32,
2359 .restart_dma = ide_nop,
2360 .rw_buf = ide_nop_int,
2363 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2365 s->unit = s->bus->retry_unit;
2366 ide_set_sector(s, s->bus->retry_sector_num);
2367 s->nsector = s->bus->retry_nsector;
2368 s->bus->dma->ops->restart_dma(s->bus->dma);
2369 s->io_buffer_size = 0;
2370 s->dma_cmd = dma_cmd;
2371 ide_start_dma(s, ide_dma_cb);
2374 static void ide_restart_bh(void *opaque)
2376 IDEBus *bus = opaque;
2377 IDEState *s;
2378 bool is_read;
2379 int error_status;
2381 qemu_bh_delete(bus->bh);
2382 bus->bh = NULL;
2384 error_status = bus->error_status;
2385 if (bus->error_status == 0) {
2386 return;
2389 s = idebus_active_if(bus);
2390 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2392 /* The error status must be cleared before resubmitting the request: The
2393 * request may fail again, and this case can only be distinguished if the
2394 * called function can set a new error status. */
2395 bus->error_status = 0;
2397 /* The HBA has generically asked to be kicked on retry */
2398 if (error_status & IDE_RETRY_HBA) {
2399 if (s->bus->dma->ops->restart) {
2400 s->bus->dma->ops->restart(s->bus->dma);
2404 if (error_status & IDE_RETRY_DMA) {
2405 if (error_status & IDE_RETRY_TRIM) {
2406 ide_restart_dma(s, IDE_DMA_TRIM);
2407 } else {
2408 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2410 } else if (error_status & IDE_RETRY_PIO) {
2411 if (is_read) {
2412 ide_sector_read(s);
2413 } else {
2414 ide_sector_write(s);
2416 } else if (error_status & IDE_RETRY_FLUSH) {
2417 ide_flush_cache(s);
2418 } else {
2420 * We've not got any bits to tell us about ATAPI - but
2421 * we do have the end_transfer_func that tells us what
2422 * we're trying to do.
2424 if (s->end_transfer_func == ide_atapi_cmd) {
2425 ide_atapi_dma_restart(s);
2430 static void ide_restart_cb(void *opaque, int running, RunState state)
2432 IDEBus *bus = opaque;
2434 if (!running)
2435 return;
2437 if (!bus->bh) {
2438 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2439 qemu_bh_schedule(bus->bh);
2443 void ide_register_restart_cb(IDEBus *bus)
2445 if (bus->dma->ops->restart_dma) {
2446 qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2450 static IDEDMA ide_dma_nop = {
2451 .ops = &ide_dma_nop_ops,
2452 .aiocb = NULL,
2455 void ide_init2(IDEBus *bus, qemu_irq irq)
2457 int i;
2459 for(i = 0; i < 2; i++) {
2460 ide_init1(bus, i);
2461 ide_reset(&bus->ifs[i]);
2463 bus->irq = irq;
2464 bus->dma = &ide_dma_nop;
2467 static const MemoryRegionPortio ide_portio_list[] = {
2468 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2469 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2470 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2471 PORTIO_END_OF_LIST(),
2474 static const MemoryRegionPortio ide_portio2_list[] = {
2475 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2476 PORTIO_END_OF_LIST(),
2479 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2481 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2482 bridge has been setup properly to always register with ISA. */
2483 isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2485 if (iobase2) {
2486 isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2490 static bool is_identify_set(void *opaque, int version_id)
2492 IDEState *s = opaque;
2494 return s->identify_set != 0;
2497 static EndTransferFunc* transfer_end_table[] = {
2498 ide_sector_read,
2499 ide_sector_write,
2500 ide_transfer_stop,
2501 ide_atapi_cmd_reply_end,
2502 ide_atapi_cmd,
2503 ide_dummy_transfer_stop,
2506 static int transfer_end_table_idx(EndTransferFunc *fn)
2508 int i;
2510 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2511 if (transfer_end_table[i] == fn)
2512 return i;
2514 return -1;
2517 static int ide_drive_post_load(void *opaque, int version_id)
2519 IDEState *s = opaque;
2521 if (s->blk && s->identify_set) {
2522 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2524 return 0;
2527 static int ide_drive_pio_post_load(void *opaque, int version_id)
2529 IDEState *s = opaque;
2531 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2532 return -EINVAL;
2534 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2535 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2536 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2537 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2539 return 0;
2542 static void ide_drive_pio_pre_save(void *opaque)
2544 IDEState *s = opaque;
2545 int idx;
2547 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2548 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2550 idx = transfer_end_table_idx(s->end_transfer_func);
2551 if (idx == -1) {
2552 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2553 __func__);
2554 s->end_transfer_fn_idx = 2;
2555 } else {
2556 s->end_transfer_fn_idx = idx;
2560 static bool ide_drive_pio_state_needed(void *opaque)
2562 IDEState *s = opaque;
2564 return ((s->status & DRQ_STAT) != 0)
2565 || (s->bus->error_status & IDE_RETRY_PIO);
2568 static bool ide_tray_state_needed(void *opaque)
2570 IDEState *s = opaque;
2572 return s->tray_open || s->tray_locked;
2575 static bool ide_atapi_gesn_needed(void *opaque)
2577 IDEState *s = opaque;
2579 return s->events.new_media || s->events.eject_request;
2582 static bool ide_error_needed(void *opaque)
2584 IDEBus *bus = opaque;
2586 return (bus->error_status != 0);
2589 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2590 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2591 .name ="ide_drive/atapi/gesn_state",
2592 .version_id = 1,
2593 .minimum_version_id = 1,
2594 .needed = ide_atapi_gesn_needed,
2595 .fields = (VMStateField[]) {
2596 VMSTATE_BOOL(events.new_media, IDEState),
2597 VMSTATE_BOOL(events.eject_request, IDEState),
2598 VMSTATE_END_OF_LIST()
2602 static const VMStateDescription vmstate_ide_tray_state = {
2603 .name = "ide_drive/tray_state",
2604 .version_id = 1,
2605 .minimum_version_id = 1,
2606 .needed = ide_tray_state_needed,
2607 .fields = (VMStateField[]) {
2608 VMSTATE_BOOL(tray_open, IDEState),
2609 VMSTATE_BOOL(tray_locked, IDEState),
2610 VMSTATE_END_OF_LIST()
2614 static const VMStateDescription vmstate_ide_drive_pio_state = {
2615 .name = "ide_drive/pio_state",
2616 .version_id = 1,
2617 .minimum_version_id = 1,
2618 .pre_save = ide_drive_pio_pre_save,
2619 .post_load = ide_drive_pio_post_load,
2620 .needed = ide_drive_pio_state_needed,
2621 .fields = (VMStateField[]) {
2622 VMSTATE_INT32(req_nb_sectors, IDEState),
2623 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2624 vmstate_info_uint8, uint8_t),
2625 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2626 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2627 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2628 VMSTATE_INT32(elementary_transfer_size, IDEState),
2629 VMSTATE_INT32(packet_transfer_size, IDEState),
2630 VMSTATE_END_OF_LIST()
2634 const VMStateDescription vmstate_ide_drive = {
2635 .name = "ide_drive",
2636 .version_id = 3,
2637 .minimum_version_id = 0,
2638 .post_load = ide_drive_post_load,
2639 .fields = (VMStateField[]) {
2640 VMSTATE_INT32(mult_sectors, IDEState),
2641 VMSTATE_INT32(identify_set, IDEState),
2642 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2643 VMSTATE_UINT8(feature, IDEState),
2644 VMSTATE_UINT8(error, IDEState),
2645 VMSTATE_UINT32(nsector, IDEState),
2646 VMSTATE_UINT8(sector, IDEState),
2647 VMSTATE_UINT8(lcyl, IDEState),
2648 VMSTATE_UINT8(hcyl, IDEState),
2649 VMSTATE_UINT8(hob_feature, IDEState),
2650 VMSTATE_UINT8(hob_sector, IDEState),
2651 VMSTATE_UINT8(hob_nsector, IDEState),
2652 VMSTATE_UINT8(hob_lcyl, IDEState),
2653 VMSTATE_UINT8(hob_hcyl, IDEState),
2654 VMSTATE_UINT8(select, IDEState),
2655 VMSTATE_UINT8(status, IDEState),
2656 VMSTATE_UINT8(lba48, IDEState),
2657 VMSTATE_UINT8(sense_key, IDEState),
2658 VMSTATE_UINT8(asc, IDEState),
2659 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2660 VMSTATE_END_OF_LIST()
2662 .subsections = (const VMStateDescription*[]) {
2663 &vmstate_ide_drive_pio_state,
2664 &vmstate_ide_tray_state,
2665 &vmstate_ide_atapi_gesn_state,
2666 NULL
2670 static const VMStateDescription vmstate_ide_error_status = {
2671 .name ="ide_bus/error",
2672 .version_id = 2,
2673 .minimum_version_id = 1,
2674 .needed = ide_error_needed,
2675 .fields = (VMStateField[]) {
2676 VMSTATE_INT32(error_status, IDEBus),
2677 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2678 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2679 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2680 VMSTATE_END_OF_LIST()
2684 const VMStateDescription vmstate_ide_bus = {
2685 .name = "ide_bus",
2686 .version_id = 1,
2687 .minimum_version_id = 1,
2688 .fields = (VMStateField[]) {
2689 VMSTATE_UINT8(cmd, IDEBus),
2690 VMSTATE_UINT8(unit, IDEBus),
2691 VMSTATE_END_OF_LIST()
2693 .subsections = (const VMStateDescription*[]) {
2694 &vmstate_ide_error_status,
2695 NULL
2699 void ide_drive_get(DriveInfo **hd, int n)
2701 int i;
2702 int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2703 int max_devs = drive_get_max_devs(IF_IDE);
2704 int n_buses = max_devs ? (n / max_devs) : n;
2707 * Note: The number of actual buses available is not known.
2708 * We compute this based on the size of the DriveInfo* array, n.
2709 * If it is less than max_devs * <num_real_buses>,
2710 * We will stop looking for drives prematurely instead of overfilling
2711 * the array.
2714 if (highest_bus > n_buses) {
2715 error_report("Too many IDE buses defined (%d > %d)",
2716 highest_bus, n_buses);
2717 exit(1);
2720 for (i = 0; i < n; i++) {
2721 hd[i] = drive_get_by_index(IF_IDE, i);