ide: cleanup warnings
[qemu/cris-port.git] / hw / ide / pci.c
blobf5ac93242eed4968f8a56fa8539a7d88b8cc9730
1 /*
2 * QEMU IDE Emulation: PCI Bus support.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include <hw/hw.h>
26 #include <hw/pc.h>
27 #include <hw/pci.h>
28 #include <hw/isa.h>
29 #include "block.h"
30 #include "block_int.h"
31 #include "dma.h"
33 #include <hw/ide/pci.h>
35 #define BMDMA_PAGE_SIZE 4096
37 static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
38 BlockDriverCompletionFunc *dma_cb)
40 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
42 bm->unit = s->unit;
43 bm->dma_cb = dma_cb;
44 bm->cur_prd_last = 0;
45 bm->cur_prd_addr = 0;
46 bm->cur_prd_len = 0;
47 bm->sector_num = ide_get_sector(s);
48 bm->nsector = s->nsector;
50 if (bm->status & BM_STATUS_DMAING) {
51 bm->dma_cb(bmdma_active_if(bm), 0);
55 /* return 0 if buffer completed */
56 static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
58 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
59 IDEState *s = bmdma_active_if(bm);
60 struct {
61 uint32_t addr;
62 uint32_t size;
63 } prd;
64 int l, len;
66 qemu_sglist_init(&s->sg, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
67 s->io_buffer_size = 0;
68 for(;;) {
69 if (bm->cur_prd_len == 0) {
70 /* end of table (with a fail safe of one page) */
71 if (bm->cur_prd_last ||
72 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
73 return s->io_buffer_size != 0;
74 cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
75 bm->cur_addr += 8;
76 prd.addr = le32_to_cpu(prd.addr);
77 prd.size = le32_to_cpu(prd.size);
78 len = prd.size & 0xfffe;
79 if (len == 0)
80 len = 0x10000;
81 bm->cur_prd_len = len;
82 bm->cur_prd_addr = prd.addr;
83 bm->cur_prd_last = (prd.size & 0x80000000);
85 l = bm->cur_prd_len;
86 if (l > 0) {
87 qemu_sglist_add(&s->sg, bm->cur_prd_addr, l);
88 bm->cur_prd_addr += l;
89 bm->cur_prd_len -= l;
90 s->io_buffer_size += l;
93 return 1;
96 /* return 0 if buffer completed */
97 static int bmdma_rw_buf(IDEDMA *dma, int is_write)
99 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
100 IDEState *s = bmdma_active_if(bm);
101 struct {
102 uint32_t addr;
103 uint32_t size;
104 } prd;
105 int l, len;
107 for(;;) {
108 l = s->io_buffer_size - s->io_buffer_index;
109 if (l <= 0)
110 break;
111 if (bm->cur_prd_len == 0) {
112 /* end of table (with a fail safe of one page) */
113 if (bm->cur_prd_last ||
114 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
115 return 0;
116 cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
117 bm->cur_addr += 8;
118 prd.addr = le32_to_cpu(prd.addr);
119 prd.size = le32_to_cpu(prd.size);
120 len = prd.size & 0xfffe;
121 if (len == 0)
122 len = 0x10000;
123 bm->cur_prd_len = len;
124 bm->cur_prd_addr = prd.addr;
125 bm->cur_prd_last = (prd.size & 0x80000000);
127 if (l > bm->cur_prd_len)
128 l = bm->cur_prd_len;
129 if (l > 0) {
130 if (is_write) {
131 cpu_physical_memory_write(bm->cur_prd_addr,
132 s->io_buffer + s->io_buffer_index, l);
133 } else {
134 cpu_physical_memory_read(bm->cur_prd_addr,
135 s->io_buffer + s->io_buffer_index, l);
137 bm->cur_prd_addr += l;
138 bm->cur_prd_len -= l;
139 s->io_buffer_index += l;
142 return 1;
145 static int bmdma_set_unit(IDEDMA *dma, int unit)
147 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
148 bm->unit = unit;
150 return 0;
153 static int bmdma_add_status(IDEDMA *dma, int status)
155 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
156 bm->status |= status;
158 return 0;
161 static int bmdma_set_inactive(IDEDMA *dma)
163 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
165 bm->status &= ~BM_STATUS_DMAING;
166 bm->dma_cb = NULL;
167 bm->unit = -1;
169 return 0;
172 static void bmdma_restart_dma(BMDMAState *bm, int is_read)
174 IDEState *s = bmdma_active_if(bm);
176 ide_set_sector(s, bm->sector_num);
177 s->io_buffer_index = 0;
178 s->io_buffer_size = 0;
179 s->nsector = bm->nsector;
180 s->is_read = is_read;
181 bm->cur_addr = bm->addr;
182 bm->dma_cb = ide_dma_cb;
183 bmdma_start_dma(&bm->dma, s, bm->dma_cb);
186 static void bmdma_restart_bh(void *opaque)
188 BMDMAState *bm = opaque;
189 int is_read;
191 qemu_bh_delete(bm->bh);
192 bm->bh = NULL;
194 is_read = !!(bm->status & BM_STATUS_RETRY_READ);
196 if (bm->status & BM_STATUS_DMA_RETRY) {
197 bm->status &= ~(BM_STATUS_DMA_RETRY | BM_STATUS_RETRY_READ);
198 bmdma_restart_dma(bm, is_read);
199 } else if (bm->status & BM_STATUS_PIO_RETRY) {
200 bm->status &= ~(BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ);
201 if (is_read) {
202 ide_sector_read(bmdma_active_if(bm));
203 } else {
204 ide_sector_write(bmdma_active_if(bm));
206 } else if (bm->status & BM_STATUS_RETRY_FLUSH) {
207 ide_flush_cache(bmdma_active_if(bm));
211 static void bmdma_restart_cb(void *opaque, int running, int reason)
213 IDEDMA *dma = opaque;
214 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
216 if (!running)
217 return;
219 if (!bm->bh) {
220 bm->bh = qemu_bh_new(bmdma_restart_bh, &bm->dma);
221 qemu_bh_schedule(bm->bh);
225 static void bmdma_cancel(BMDMAState *bm)
227 if (bm->status & BM_STATUS_DMAING) {
228 /* cancel DMA request */
229 bmdma_set_inactive(&bm->dma);
233 static int bmdma_reset(IDEDMA *dma)
235 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
237 #ifdef DEBUG_IDE
238 printf("ide: dma_reset\n");
239 #endif
240 bmdma_cancel(bm);
241 bm->cmd = 0;
242 bm->status = 0;
243 bm->addr = 0;
244 bm->cur_addr = 0;
245 bm->cur_prd_last = 0;
246 bm->cur_prd_addr = 0;
247 bm->cur_prd_len = 0;
248 bm->sector_num = 0;
249 bm->nsector = 0;
251 return 0;
254 static int bmdma_start_transfer(IDEDMA *dma)
256 return 0;
259 static void bmdma_irq(void *opaque, int n, int level)
261 BMDMAState *bm = opaque;
263 if (!level) {
264 /* pass through lower */
265 qemu_set_irq(bm->irq, level);
266 return;
269 bm->status |= BM_STATUS_INT;
271 /* trigger the real irq */
272 qemu_set_irq(bm->irq, level);
275 void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val)
277 BMDMAState *bm = opaque;
278 #ifdef DEBUG_IDE
279 printf("%s: 0x%08x\n", __func__, val);
280 #endif
282 /* Ignore writes to SSBM if it keeps the old value */
283 if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
284 if (!(val & BM_CMD_START)) {
286 * We can't cancel Scatter Gather DMA in the middle of the
287 * operation or a partial (not full) DMA transfer would reach
288 * the storage so we wait for completion instead (we beahve
289 * like if the DMA was completed by the time the guest trying
290 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
291 * set).
293 * In the future we'll be able to safely cancel the I/O if the
294 * whole DMA operation will be submitted to disk with a single
295 * aio operation with preadv/pwritev.
297 if (bm->bus->dma->aiocb) {
298 qemu_aio_flush();
299 #ifdef DEBUG_IDE
300 if (bm->bus->dma->aiocb)
301 printf("ide_dma_cancel: aiocb still pending\n");
302 if (bm->status & BM_STATUS_DMAING)
303 printf("ide_dma_cancel: BM_STATUS_DMAING still pending\n");
304 #endif
306 } else {
307 bm->cur_addr = bm->addr;
308 if (!(bm->status & BM_STATUS_DMAING)) {
309 bm->status |= BM_STATUS_DMAING;
310 /* start dma transfer if possible */
311 if (bm->dma_cb)
312 bm->dma_cb(bmdma_active_if(bm), 0);
317 bm->cmd = val & 0x09;
320 static void bmdma_addr_read(IORange *ioport, uint64_t addr,
321 unsigned width, uint64_t *data)
323 BMDMAState *bm = container_of(ioport, BMDMAState, addr_ioport);
324 uint32_t mask = (1ULL << (width * 8)) - 1;
326 *data = (bm->addr >> (addr * 8)) & mask;
327 #ifdef DEBUG_IDE
328 printf("%s: 0x%08x\n", __func__, (unsigned)*data);
329 #endif
332 static void bmdma_addr_write(IORange *ioport, uint64_t addr,
333 unsigned width, uint64_t data)
335 BMDMAState *bm = container_of(ioport, BMDMAState, addr_ioport);
336 int shift = addr * 8;
337 uint32_t mask = (1ULL << (width * 8)) - 1;
339 #ifdef DEBUG_IDE
340 printf("%s: 0x%08x\n", __func__, (unsigned)data);
341 #endif
342 bm->addr &= ~(mask << shift);
343 bm->addr |= ((data & mask) << shift) & ~3;
346 const IORangeOps bmdma_addr_ioport_ops = {
347 .read = bmdma_addr_read,
348 .write = bmdma_addr_write,
351 static bool ide_bmdma_current_needed(void *opaque)
353 BMDMAState *bm = opaque;
355 return (bm->cur_prd_len != 0);
358 static const VMStateDescription vmstate_bmdma_current = {
359 .name = "ide bmdma_current",
360 .version_id = 1,
361 .minimum_version_id = 1,
362 .minimum_version_id_old = 1,
363 .fields = (VMStateField []) {
364 VMSTATE_UINT32(cur_addr, BMDMAState),
365 VMSTATE_UINT32(cur_prd_last, BMDMAState),
366 VMSTATE_UINT32(cur_prd_addr, BMDMAState),
367 VMSTATE_UINT32(cur_prd_len, BMDMAState),
368 VMSTATE_END_OF_LIST()
373 static const VMStateDescription vmstate_bmdma = {
374 .name = "ide bmdma",
375 .version_id = 3,
376 .minimum_version_id = 0,
377 .minimum_version_id_old = 0,
378 .fields = (VMStateField []) {
379 VMSTATE_UINT8(cmd, BMDMAState),
380 VMSTATE_UINT8(status, BMDMAState),
381 VMSTATE_UINT32(addr, BMDMAState),
382 VMSTATE_INT64(sector_num, BMDMAState),
383 VMSTATE_UINT32(nsector, BMDMAState),
384 VMSTATE_UINT8(unit, BMDMAState),
385 VMSTATE_END_OF_LIST()
387 .subsections = (VMStateSubsection []) {
389 .vmsd = &vmstate_bmdma_current,
390 .needed = ide_bmdma_current_needed,
391 }, {
392 /* empty */
397 static int ide_pci_post_load(void *opaque, int version_id)
399 PCIIDEState *d = opaque;
400 int i;
402 for(i = 0; i < 2; i++) {
403 /* current versions always store 0/1, but older version
404 stored bigger values. We only need last bit */
405 d->bmdma[i].unit &= 1;
407 return 0;
410 const VMStateDescription vmstate_ide_pci = {
411 .name = "ide",
412 .version_id = 3,
413 .minimum_version_id = 0,
414 .minimum_version_id_old = 0,
415 .post_load = ide_pci_post_load,
416 .fields = (VMStateField []) {
417 VMSTATE_PCI_DEVICE(dev, PCIIDEState),
418 VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
419 vmstate_bmdma, BMDMAState),
420 VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
421 VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
422 VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
423 VMSTATE_END_OF_LIST()
427 void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
429 PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);
430 static const int bus[4] = { 0, 0, 1, 1 };
431 static const int unit[4] = { 0, 1, 0, 1 };
432 int i;
434 for (i = 0; i < 4; i++) {
435 if (hd_table[i] == NULL)
436 continue;
437 ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
441 static const struct IDEDMAOps bmdma_ops = {
442 .start_dma = bmdma_start_dma,
443 .start_transfer = bmdma_start_transfer,
444 .prepare_buf = bmdma_prepare_buf,
445 .rw_buf = bmdma_rw_buf,
446 .set_unit = bmdma_set_unit,
447 .add_status = bmdma_add_status,
448 .set_inactive = bmdma_set_inactive,
449 .restart_cb = bmdma_restart_cb,
450 .reset = bmdma_reset,
453 void bmdma_init(IDEBus *bus, BMDMAState *bm)
455 qemu_irq *irq;
457 if (bus->dma == &bm->dma) {
458 return;
461 bm->dma.ops = &bmdma_ops;
462 bus->dma = &bm->dma;
463 bm->irq = bus->irq;
464 irq = qemu_allocate_irqs(bmdma_irq, bm, 1);
465 bus->irq = *irq;