Split DMA controller in two
[qemu/qemu_0_9_1_stable.git] / hw / esp.c
blob627d90cabd48911371dc4cf6d6fb82875c7c8102
1 /*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "vl.h"
26 /* debug ESP card */
27 //#define DEBUG_ESP
30 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), also
31 * produced as NCR89C100. See
32 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
33 * and
34 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
37 #ifdef DEBUG_ESP
38 #define DPRINTF(fmt, args...) \
39 do { printf("ESP: " fmt , ##args); } while (0)
40 #else
41 #define DPRINTF(fmt, args...)
42 #endif
44 #define ESP_MASK 0x3f
45 #define ESP_REGS 16
46 #define ESP_SIZE (ESP_REGS * 4)
47 #define TI_BUFSZ 32
48 /* The HBA is ID 7, so for simplicitly limit to 7 devices. */
49 #define ESP_MAX_DEVS 7
51 typedef struct ESPState ESPState;
53 struct ESPState {
54 BlockDriverState **bd;
55 uint8_t rregs[ESP_REGS];
56 uint8_t wregs[ESP_REGS];
57 int32_t ti_size;
58 uint32_t ti_rptr, ti_wptr;
59 uint8_t ti_buf[TI_BUFSZ];
60 int sense;
61 int dma;
62 SCSIDevice *scsi_dev[MAX_DISKS];
63 SCSIDevice *current_dev;
64 uint8_t cmdbuf[TI_BUFSZ];
65 int cmdlen;
66 int do_cmd;
68 /* The amount of data left in the current DMA transfer. */
69 uint32_t dma_left;
70 /* The size of the current DMA transfer. Zero if no transfer is in
71 progress. */
72 uint32_t dma_counter;
73 uint8_t *async_buf;
74 uint32_t async_len;
75 void *dma_opaque;
78 #define STAT_DO 0x00
79 #define STAT_DI 0x01
80 #define STAT_CD 0x02
81 #define STAT_ST 0x03
82 #define STAT_MI 0x06
83 #define STAT_MO 0x07
85 #define STAT_TC 0x10
86 #define STAT_PE 0x20
87 #define STAT_GE 0x40
88 #define STAT_IN 0x80
90 #define INTR_FC 0x08
91 #define INTR_BS 0x10
92 #define INTR_DC 0x20
93 #define INTR_RST 0x80
95 #define SEQ_0 0x0
96 #define SEQ_CD 0x4
98 static int get_cmd(ESPState *s, uint8_t *buf)
100 uint32_t dmalen;
101 int target;
103 dmalen = s->rregs[0] | (s->rregs[1] << 8);
104 target = s->wregs[4] & 7;
105 DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
106 if (s->dma) {
107 espdma_memory_read(s->dma_opaque, buf, dmalen);
108 } else {
109 buf[0] = 0;
110 memcpy(&buf[1], s->ti_buf, dmalen);
111 dmalen++;
114 s->ti_size = 0;
115 s->ti_rptr = 0;
116 s->ti_wptr = 0;
118 if (s->current_dev) {
119 /* Started a new command before the old one finished. Cancel it. */
120 scsi_cancel_io(s->current_dev, 0);
121 s->async_len = 0;
124 if (target >= MAX_DISKS || !s->scsi_dev[target]) {
125 // No such drive
126 s->rregs[4] = STAT_IN;
127 s->rregs[5] = INTR_DC;
128 s->rregs[6] = SEQ_0;
129 espdma_raise_irq(s->dma_opaque);
130 return 0;
132 s->current_dev = s->scsi_dev[target];
133 return dmalen;
136 static void do_cmd(ESPState *s, uint8_t *buf)
138 int32_t datalen;
139 int lun;
141 DPRINTF("do_cmd: busid 0x%x\n", buf[0]);
142 lun = buf[0] & 7;
143 datalen = scsi_send_command(s->current_dev, 0, &buf[1], lun);
144 s->ti_size = datalen;
145 if (datalen != 0) {
146 s->rregs[4] = STAT_IN | STAT_TC;
147 s->dma_left = 0;
148 s->dma_counter = 0;
149 if (datalen > 0) {
150 s->rregs[4] |= STAT_DI;
151 scsi_read_data(s->current_dev, 0);
152 } else {
153 s->rregs[4] |= STAT_DO;
154 scsi_write_data(s->current_dev, 0);
157 s->rregs[5] = INTR_BS | INTR_FC;
158 s->rregs[6] = SEQ_CD;
159 espdma_raise_irq(s->dma_opaque);
162 static void handle_satn(ESPState *s)
164 uint8_t buf[32];
165 int len;
167 len = get_cmd(s, buf);
168 if (len)
169 do_cmd(s, buf);
172 static void handle_satn_stop(ESPState *s)
174 s->cmdlen = get_cmd(s, s->cmdbuf);
175 if (s->cmdlen) {
176 DPRINTF("Set ATN & Stop: cmdlen %d\n", s->cmdlen);
177 s->do_cmd = 1;
178 s->rregs[4] = STAT_IN | STAT_TC | STAT_CD;
179 s->rregs[5] = INTR_BS | INTR_FC;
180 s->rregs[6] = SEQ_CD;
181 espdma_raise_irq(s->dma_opaque);
185 static void write_response(ESPState *s)
187 DPRINTF("Transfer status (sense=%d)\n", s->sense);
188 s->ti_buf[0] = s->sense;
189 s->ti_buf[1] = 0;
190 if (s->dma) {
191 espdma_memory_write(s->dma_opaque, s->ti_buf, 2);
192 s->rregs[4] = STAT_IN | STAT_TC | STAT_ST;
193 s->rregs[5] = INTR_BS | INTR_FC;
194 s->rregs[6] = SEQ_CD;
195 } else {
196 s->ti_size = 2;
197 s->ti_rptr = 0;
198 s->ti_wptr = 0;
199 s->rregs[7] = 2;
201 espdma_raise_irq(s->dma_opaque);
204 static void esp_dma_done(ESPState *s)
206 s->rregs[4] |= STAT_IN | STAT_TC;
207 s->rregs[5] = INTR_BS;
208 s->rregs[6] = 0;
209 s->rregs[7] = 0;
210 s->rregs[0] = 0;
211 s->rregs[1] = 0;
212 espdma_raise_irq(s->dma_opaque);
215 static void esp_do_dma(ESPState *s)
217 uint32_t len;
218 int to_device;
220 to_device = (s->ti_size < 0);
221 len = s->dma_left;
222 if (s->do_cmd) {
223 DPRINTF("command len %d + %d\n", s->cmdlen, len);
224 espdma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
225 s->ti_size = 0;
226 s->cmdlen = 0;
227 s->do_cmd = 0;
228 do_cmd(s, s->cmdbuf);
229 return;
231 if (s->async_len == 0) {
232 /* Defer until data is available. */
233 return;
235 if (len > s->async_len) {
236 len = s->async_len;
238 if (to_device) {
239 espdma_memory_read(s->dma_opaque, s->async_buf, len);
240 } else {
241 espdma_memory_write(s->dma_opaque, s->async_buf, len);
243 s->dma_left -= len;
244 s->async_buf += len;
245 s->async_len -= len;
246 if (to_device)
247 s->ti_size += len;
248 else
249 s->ti_size -= len;
250 if (s->async_len == 0) {
251 if (to_device) {
252 // ti_size is negative
253 scsi_write_data(s->current_dev, 0);
254 } else {
255 scsi_read_data(s->current_dev, 0);
256 /* If there is still data to be read from the device then
257 complete the DMA operation immeriately. Otherwise defer
258 until the scsi layer has completed. */
259 if (s->dma_left == 0 && s->ti_size > 0) {
260 esp_dma_done(s);
263 } else {
264 /* Partially filled a scsi buffer. Complete immediately. */
265 esp_dma_done(s);
269 static void esp_command_complete(void *opaque, int reason, uint32_t tag,
270 uint32_t arg)
272 ESPState *s = (ESPState *)opaque;
274 if (reason == SCSI_REASON_DONE) {
275 DPRINTF("SCSI Command complete\n");
276 if (s->ti_size != 0)
277 DPRINTF("SCSI command completed unexpectedly\n");
278 s->ti_size = 0;
279 s->dma_left = 0;
280 s->async_len = 0;
281 if (arg)
282 DPRINTF("Command failed\n");
283 s->sense = arg;
284 s->rregs[4] = STAT_ST;
285 esp_dma_done(s);
286 s->current_dev = NULL;
287 } else {
288 DPRINTF("transfer %d/%d\n", s->dma_left, s->ti_size);
289 s->async_len = arg;
290 s->async_buf = scsi_get_buf(s->current_dev, 0);
291 if (s->dma_left) {
292 esp_do_dma(s);
293 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
294 /* If this was the last part of a DMA transfer then the
295 completion interrupt is deferred to here. */
296 esp_dma_done(s);
301 static void handle_ti(ESPState *s)
303 uint32_t dmalen, minlen;
305 dmalen = s->rregs[0] | (s->rregs[1] << 8);
306 if (dmalen==0) {
307 dmalen=0x10000;
309 s->dma_counter = dmalen;
311 if (s->do_cmd)
312 minlen = (dmalen < 32) ? dmalen : 32;
313 else if (s->ti_size < 0)
314 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
315 else
316 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
317 DPRINTF("Transfer Information len %d\n", minlen);
318 if (s->dma) {
319 s->dma_left = minlen;
320 s->rregs[4] &= ~STAT_TC;
321 esp_do_dma(s);
322 } else if (s->do_cmd) {
323 DPRINTF("command len %d\n", s->cmdlen);
324 s->ti_size = 0;
325 s->cmdlen = 0;
326 s->do_cmd = 0;
327 do_cmd(s, s->cmdbuf);
328 return;
332 static void esp_reset(void *opaque)
334 ESPState *s = opaque;
336 memset(s->rregs, 0, ESP_REGS);
337 memset(s->wregs, 0, ESP_REGS);
338 s->rregs[0x0e] = 0x4; // Indicate fas100a
339 s->ti_size = 0;
340 s->ti_rptr = 0;
341 s->ti_wptr = 0;
342 s->dma = 0;
343 s->do_cmd = 0;
346 static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)
348 ESPState *s = opaque;
349 uint32_t saddr;
351 saddr = (addr & ESP_MASK) >> 2;
352 DPRINTF("read reg[%d]: 0x%2.2x\n", saddr, s->rregs[saddr]);
353 switch (saddr) {
354 case 2:
355 // FIFO
356 if (s->ti_size > 0) {
357 s->ti_size--;
358 if ((s->rregs[4] & 6) == 0) {
359 /* Data in/out. */
360 fprintf(stderr, "esp: PIO data read not implemented\n");
361 s->rregs[2] = 0;
362 } else {
363 s->rregs[2] = s->ti_buf[s->ti_rptr++];
365 espdma_raise_irq(s->dma_opaque);
367 if (s->ti_size == 0) {
368 s->ti_rptr = 0;
369 s->ti_wptr = 0;
371 break;
372 case 5:
373 // interrupt
374 // Clear interrupt/error status bits
375 s->rregs[4] &= ~(STAT_IN | STAT_GE | STAT_PE);
376 espdma_clear_irq(s->dma_opaque);
377 break;
378 default:
379 break;
381 return s->rregs[saddr];
384 static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
386 ESPState *s = opaque;
387 uint32_t saddr;
389 saddr = (addr & ESP_MASK) >> 2;
390 DPRINTF("write reg[%d]: 0x%2.2x -> 0x%2.2x\n", saddr, s->wregs[saddr], val);
391 switch (saddr) {
392 case 0:
393 case 1:
394 s->rregs[4] &= ~STAT_TC;
395 break;
396 case 2:
397 // FIFO
398 if (s->do_cmd) {
399 s->cmdbuf[s->cmdlen++] = val & 0xff;
400 } else if ((s->rregs[4] & 6) == 0) {
401 uint8_t buf;
402 buf = val & 0xff;
403 s->ti_size--;
404 fprintf(stderr, "esp: PIO data write not implemented\n");
405 } else {
406 s->ti_size++;
407 s->ti_buf[s->ti_wptr++] = val & 0xff;
409 break;
410 case 3:
411 s->rregs[saddr] = val;
412 // Command
413 if (val & 0x80) {
414 s->dma = 1;
415 /* Reload DMA counter. */
416 s->rregs[0] = s->wregs[0];
417 s->rregs[1] = s->wregs[1];
418 } else {
419 s->dma = 0;
421 switch(val & 0x7f) {
422 case 0:
423 DPRINTF("NOP (%2.2x)\n", val);
424 break;
425 case 1:
426 DPRINTF("Flush FIFO (%2.2x)\n", val);
427 //s->ti_size = 0;
428 s->rregs[5] = INTR_FC;
429 s->rregs[6] = 0;
430 break;
431 case 2:
432 DPRINTF("Chip reset (%2.2x)\n", val);
433 esp_reset(s);
434 break;
435 case 3:
436 DPRINTF("Bus reset (%2.2x)\n", val);
437 s->rregs[5] = INTR_RST;
438 if (!(s->wregs[8] & 0x40)) {
439 espdma_raise_irq(s->dma_opaque);
441 break;
442 case 0x10:
443 handle_ti(s);
444 break;
445 case 0x11:
446 DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val);
447 write_response(s);
448 break;
449 case 0x12:
450 DPRINTF("Message Accepted (%2.2x)\n", val);
451 write_response(s);
452 s->rregs[5] = INTR_DC;
453 s->rregs[6] = 0;
454 break;
455 case 0x1a:
456 DPRINTF("Set ATN (%2.2x)\n", val);
457 break;
458 case 0x42:
459 DPRINTF("Set ATN (%2.2x)\n", val);
460 handle_satn(s);
461 break;
462 case 0x43:
463 DPRINTF("Set ATN & stop (%2.2x)\n", val);
464 handle_satn_stop(s);
465 break;
466 default:
467 DPRINTF("Unhandled ESP command (%2.2x)\n", val);
468 break;
470 break;
471 case 4 ... 7:
472 break;
473 case 8:
474 s->rregs[saddr] = val;
475 break;
476 case 9 ... 10:
477 break;
478 case 11:
479 s->rregs[saddr] = val & 0x15;
480 break;
481 case 12 ... 15:
482 s->rregs[saddr] = val;
483 break;
484 default:
485 break;
487 s->wregs[saddr] = val;
490 static CPUReadMemoryFunc *esp_mem_read[3] = {
491 esp_mem_readb,
492 esp_mem_readb,
493 esp_mem_readb,
496 static CPUWriteMemoryFunc *esp_mem_write[3] = {
497 esp_mem_writeb,
498 esp_mem_writeb,
499 esp_mem_writeb,
502 static void esp_save(QEMUFile *f, void *opaque)
504 ESPState *s = opaque;
506 qemu_put_buffer(f, s->rregs, ESP_REGS);
507 qemu_put_buffer(f, s->wregs, ESP_REGS);
508 qemu_put_be32s(f, &s->ti_size);
509 qemu_put_be32s(f, &s->ti_rptr);
510 qemu_put_be32s(f, &s->ti_wptr);
511 qemu_put_buffer(f, s->ti_buf, TI_BUFSZ);
512 qemu_put_be32s(f, &s->sense);
513 qemu_put_be32s(f, &s->dma);
514 qemu_put_buffer(f, s->cmdbuf, TI_BUFSZ);
515 qemu_put_be32s(f, &s->cmdlen);
516 qemu_put_be32s(f, &s->do_cmd);
517 qemu_put_be32s(f, &s->dma_left);
518 // There should be no transfers in progress, so dma_counter is not saved
521 static int esp_load(QEMUFile *f, void *opaque, int version_id)
523 ESPState *s = opaque;
525 if (version_id != 3)
526 return -EINVAL; // Cannot emulate 2
528 qemu_get_buffer(f, s->rregs, ESP_REGS);
529 qemu_get_buffer(f, s->wregs, ESP_REGS);
530 qemu_get_be32s(f, &s->ti_size);
531 qemu_get_be32s(f, &s->ti_rptr);
532 qemu_get_be32s(f, &s->ti_wptr);
533 qemu_get_buffer(f, s->ti_buf, TI_BUFSZ);
534 qemu_get_be32s(f, &s->sense);
535 qemu_get_be32s(f, &s->dma);
536 qemu_get_buffer(f, s->cmdbuf, TI_BUFSZ);
537 qemu_get_be32s(f, &s->cmdlen);
538 qemu_get_be32s(f, &s->do_cmd);
539 qemu_get_be32s(f, &s->dma_left);
541 return 0;
544 void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id)
546 ESPState *s = (ESPState *)opaque;
548 if (id < 0) {
549 for (id = 0; id < ESP_MAX_DEVS; id++) {
550 if (s->scsi_dev[id] == NULL)
551 break;
554 if (id >= ESP_MAX_DEVS) {
555 DPRINTF("Bad Device ID %d\n", id);
556 return;
558 if (s->scsi_dev[id]) {
559 DPRINTF("Destroying device %d\n", id);
560 scsi_disk_destroy(s->scsi_dev[id]);
562 DPRINTF("Attaching block device %d\n", id);
563 /* Command queueing is not implemented. */
564 s->scsi_dev[id] = scsi_disk_init(bd, 0, esp_command_complete, s);
567 void *esp_init(BlockDriverState **bd, target_phys_addr_t espaddr,
568 void *dma_opaque)
570 ESPState *s;
571 int esp_io_memory;
573 s = qemu_mallocz(sizeof(ESPState));
574 if (!s)
575 return NULL;
577 s->bd = bd;
578 s->dma_opaque = dma_opaque;
579 sparc32_dma_set_reset_data(dma_opaque, esp_reset, s);
581 esp_io_memory = cpu_register_io_memory(0, esp_mem_read, esp_mem_write, s);
582 cpu_register_physical_memory(espaddr, ESP_SIZE, esp_io_memory);
584 esp_reset(s);
586 register_savevm("esp", espaddr, 3, esp_save, esp_load, s);
587 qemu_register_reset(esp_reset, s);
589 return s;