Merge remote-tracking branch 'aneesh/for-upstream' into staging
[qemu-kvm.git] / hw / esp.c
bloba011347b4f76653ad5c967eae0841a75f496ec56
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "sysbus.h"
27 #include "pci.h"
28 #include "scsi.h"
29 #include "esp.h"
30 #include "trace.h"
31 #include "qemu-log.h"
34 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
35 * also produced as NCR89C100. See
36 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
37 * and
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41 #define ESP_REGS 16
42 #define TI_BUFSZ 16
44 typedef struct ESPState ESPState;
46 struct ESPState {
47 uint8_t rregs[ESP_REGS];
48 uint8_t wregs[ESP_REGS];
49 qemu_irq irq;
50 uint8_t chip_id;
51 int32_t ti_size;
52 uint32_t ti_rptr, ti_wptr;
53 uint32_t status;
54 uint32_t dma;
55 uint8_t ti_buf[TI_BUFSZ];
56 SCSIBus bus;
57 SCSIDevice *current_dev;
58 SCSIRequest *current_req;
59 uint8_t cmdbuf[TI_BUFSZ];
60 uint32_t cmdlen;
61 uint32_t do_cmd;
63 /* The amount of data left in the current DMA transfer. */
64 uint32_t dma_left;
65 /* The size of the current DMA transfer. Zero if no transfer is in
66 progress. */
67 uint32_t dma_counter;
68 int dma_enabled;
70 uint32_t async_len;
71 uint8_t *async_buf;
73 ESPDMAMemoryReadWriteFunc dma_memory_read;
74 ESPDMAMemoryReadWriteFunc dma_memory_write;
75 void *dma_opaque;
76 void (*dma_cb)(ESPState *s);
79 #define ESP_TCLO 0x0
80 #define ESP_TCMID 0x1
81 #define ESP_FIFO 0x2
82 #define ESP_CMD 0x3
83 #define ESP_RSTAT 0x4
84 #define ESP_WBUSID 0x4
85 #define ESP_RINTR 0x5
86 #define ESP_WSEL 0x5
87 #define ESP_RSEQ 0x6
88 #define ESP_WSYNTP 0x6
89 #define ESP_RFLAGS 0x7
90 #define ESP_WSYNO 0x7
91 #define ESP_CFG1 0x8
92 #define ESP_RRES1 0x9
93 #define ESP_WCCF 0x9
94 #define ESP_RRES2 0xa
95 #define ESP_WTEST 0xa
96 #define ESP_CFG2 0xb
97 #define ESP_CFG3 0xc
98 #define ESP_RES3 0xd
99 #define ESP_TCHI 0xe
100 #define ESP_RES4 0xf
102 #define CMD_DMA 0x80
103 #define CMD_CMD 0x7f
105 #define CMD_NOP 0x00
106 #define CMD_FLUSH 0x01
107 #define CMD_RESET 0x02
108 #define CMD_BUSRESET 0x03
109 #define CMD_TI 0x10
110 #define CMD_ICCS 0x11
111 #define CMD_MSGACC 0x12
112 #define CMD_PAD 0x18
113 #define CMD_SATN 0x1a
114 #define CMD_RSTATN 0x1b
115 #define CMD_SEL 0x41
116 #define CMD_SELATN 0x42
117 #define CMD_SELATNS 0x43
118 #define CMD_ENSEL 0x44
119 #define CMD_DISSEL 0x45
121 #define STAT_DO 0x00
122 #define STAT_DI 0x01
123 #define STAT_CD 0x02
124 #define STAT_ST 0x03
125 #define STAT_MO 0x06
126 #define STAT_MI 0x07
127 #define STAT_PIO_MASK 0x06
129 #define STAT_TC 0x10
130 #define STAT_PE 0x20
131 #define STAT_GE 0x40
132 #define STAT_INT 0x80
134 #define BUSID_DID 0x07
136 #define INTR_FC 0x08
137 #define INTR_BS 0x10
138 #define INTR_DC 0x20
139 #define INTR_RST 0x80
141 #define SEQ_0 0x0
142 #define SEQ_CD 0x4
144 #define CFG1_RESREPT 0x40
146 #define TCHI_FAS100A 0x4
147 #define TCHI_AM53C974 0x12
149 static void esp_raise_irq(ESPState *s)
151 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
152 s->rregs[ESP_RSTAT] |= STAT_INT;
153 qemu_irq_raise(s->irq);
154 trace_esp_raise_irq();
158 static void esp_lower_irq(ESPState *s)
160 if (s->rregs[ESP_RSTAT] & STAT_INT) {
161 s->rregs[ESP_RSTAT] &= ~STAT_INT;
162 qemu_irq_lower(s->irq);
163 trace_esp_lower_irq();
167 static void esp_dma_enable(ESPState *s, int irq, int level)
169 if (level) {
170 s->dma_enabled = 1;
171 trace_esp_dma_enable();
172 if (s->dma_cb) {
173 s->dma_cb(s);
174 s->dma_cb = NULL;
176 } else {
177 trace_esp_dma_disable();
178 s->dma_enabled = 0;
182 static void esp_request_cancelled(SCSIRequest *req)
184 ESPState *s = req->hba_private;
186 if (req == s->current_req) {
187 scsi_req_unref(s->current_req);
188 s->current_req = NULL;
189 s->current_dev = NULL;
193 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
195 uint32_t dmalen;
196 int target;
198 target = s->wregs[ESP_WBUSID] & BUSID_DID;
199 if (s->dma) {
200 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
201 s->dma_memory_read(s->dma_opaque, buf, dmalen);
202 } else {
203 dmalen = s->ti_size;
204 memcpy(buf, s->ti_buf, dmalen);
205 buf[0] = buf[2] >> 5;
207 trace_esp_get_cmd(dmalen, target);
209 s->ti_size = 0;
210 s->ti_rptr = 0;
211 s->ti_wptr = 0;
213 if (s->current_req) {
214 /* Started a new command before the old one finished. Cancel it. */
215 scsi_req_cancel(s->current_req);
216 s->async_len = 0;
219 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
220 if (!s->current_dev) {
221 // No such drive
222 s->rregs[ESP_RSTAT] = 0;
223 s->rregs[ESP_RINTR] = INTR_DC;
224 s->rregs[ESP_RSEQ] = SEQ_0;
225 esp_raise_irq(s);
226 return 0;
228 return dmalen;
231 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
233 int32_t datalen;
234 int lun;
235 SCSIDevice *current_lun;
237 trace_esp_do_busid_cmd(busid);
238 lun = busid & 7;
239 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
240 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
241 datalen = scsi_req_enqueue(s->current_req);
242 s->ti_size = datalen;
243 if (datalen != 0) {
244 s->rregs[ESP_RSTAT] = STAT_TC;
245 s->dma_left = 0;
246 s->dma_counter = 0;
247 if (datalen > 0) {
248 s->rregs[ESP_RSTAT] |= STAT_DI;
249 } else {
250 s->rregs[ESP_RSTAT] |= STAT_DO;
252 scsi_req_continue(s->current_req);
254 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
255 s->rregs[ESP_RSEQ] = SEQ_CD;
256 esp_raise_irq(s);
259 static void do_cmd(ESPState *s, uint8_t *buf)
261 uint8_t busid = buf[0];
263 do_busid_cmd(s, &buf[1], busid);
266 static void handle_satn(ESPState *s)
268 uint8_t buf[32];
269 int len;
271 if (s->dma && !s->dma_enabled) {
272 s->dma_cb = handle_satn;
273 return;
275 len = get_cmd(s, buf);
276 if (len)
277 do_cmd(s, buf);
280 static void handle_s_without_atn(ESPState *s)
282 uint8_t buf[32];
283 int len;
285 if (s->dma && !s->dma_enabled) {
286 s->dma_cb = handle_s_without_atn;
287 return;
289 len = get_cmd(s, buf);
290 if (len) {
291 do_busid_cmd(s, buf, 0);
295 static void handle_satn_stop(ESPState *s)
297 if (s->dma && !s->dma_enabled) {
298 s->dma_cb = handle_satn_stop;
299 return;
301 s->cmdlen = get_cmd(s, s->cmdbuf);
302 if (s->cmdlen) {
303 trace_esp_handle_satn_stop(s->cmdlen);
304 s->do_cmd = 1;
305 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
306 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
307 s->rregs[ESP_RSEQ] = SEQ_CD;
308 esp_raise_irq(s);
312 static void write_response(ESPState *s)
314 trace_esp_write_response(s->status);
315 s->ti_buf[0] = s->status;
316 s->ti_buf[1] = 0;
317 if (s->dma) {
318 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
319 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
320 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
321 s->rregs[ESP_RSEQ] = SEQ_CD;
322 } else {
323 s->ti_size = 2;
324 s->ti_rptr = 0;
325 s->ti_wptr = 0;
326 s->rregs[ESP_RFLAGS] = 2;
328 esp_raise_irq(s);
331 static void esp_dma_done(ESPState *s)
333 s->rregs[ESP_RSTAT] |= STAT_TC;
334 s->rregs[ESP_RINTR] = INTR_BS;
335 s->rregs[ESP_RSEQ] = 0;
336 s->rregs[ESP_RFLAGS] = 0;
337 s->rregs[ESP_TCLO] = 0;
338 s->rregs[ESP_TCMID] = 0;
339 esp_raise_irq(s);
342 static void esp_do_dma(ESPState *s)
344 uint32_t len;
345 int to_device;
347 to_device = (s->ti_size < 0);
348 len = s->dma_left;
349 if (s->do_cmd) {
350 trace_esp_do_dma(s->cmdlen, len);
351 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
352 s->ti_size = 0;
353 s->cmdlen = 0;
354 s->do_cmd = 0;
355 do_cmd(s, s->cmdbuf);
356 return;
358 if (s->async_len == 0) {
359 /* Defer until data is available. */
360 return;
362 if (len > s->async_len) {
363 len = s->async_len;
365 if (to_device) {
366 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
367 } else {
368 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
370 s->dma_left -= len;
371 s->async_buf += len;
372 s->async_len -= len;
373 if (to_device)
374 s->ti_size += len;
375 else
376 s->ti_size -= len;
377 if (s->async_len == 0) {
378 scsi_req_continue(s->current_req);
379 /* If there is still data to be read from the device then
380 complete the DMA operation immediately. Otherwise defer
381 until the scsi layer has completed. */
382 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
383 return;
387 /* Partially filled a scsi buffer. Complete immediately. */
388 esp_dma_done(s);
391 static void esp_command_complete(SCSIRequest *req, uint32_t status,
392 size_t resid)
394 ESPState *s = req->hba_private;
396 trace_esp_command_complete();
397 if (s->ti_size != 0) {
398 trace_esp_command_complete_unexpected();
400 s->ti_size = 0;
401 s->dma_left = 0;
402 s->async_len = 0;
403 if (status) {
404 trace_esp_command_complete_fail();
406 s->status = status;
407 s->rregs[ESP_RSTAT] = STAT_ST;
408 esp_dma_done(s);
409 if (s->current_req) {
410 scsi_req_unref(s->current_req);
411 s->current_req = NULL;
412 s->current_dev = NULL;
416 static void esp_transfer_data(SCSIRequest *req, uint32_t len)
418 ESPState *s = req->hba_private;
420 trace_esp_transfer_data(s->dma_left, s->ti_size);
421 s->async_len = len;
422 s->async_buf = scsi_req_get_buf(req);
423 if (s->dma_left) {
424 esp_do_dma(s);
425 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
426 /* If this was the last part of a DMA transfer then the
427 completion interrupt is deferred to here. */
428 esp_dma_done(s);
432 static void handle_ti(ESPState *s)
434 uint32_t dmalen, minlen;
436 if (s->dma && !s->dma_enabled) {
437 s->dma_cb = handle_ti;
438 return;
441 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
442 if (dmalen==0) {
443 dmalen=0x10000;
445 s->dma_counter = dmalen;
447 if (s->do_cmd)
448 minlen = (dmalen < 32) ? dmalen : 32;
449 else if (s->ti_size < 0)
450 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
451 else
452 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
453 trace_esp_handle_ti(minlen);
454 if (s->dma) {
455 s->dma_left = minlen;
456 s->rregs[ESP_RSTAT] &= ~STAT_TC;
457 esp_do_dma(s);
458 } else if (s->do_cmd) {
459 trace_esp_handle_ti_cmd(s->cmdlen);
460 s->ti_size = 0;
461 s->cmdlen = 0;
462 s->do_cmd = 0;
463 do_cmd(s, s->cmdbuf);
464 return;
468 static void esp_hard_reset(ESPState *s)
470 memset(s->rregs, 0, ESP_REGS);
471 memset(s->wregs, 0, ESP_REGS);
472 s->rregs[ESP_TCHI] = s->chip_id;
473 s->ti_size = 0;
474 s->ti_rptr = 0;
475 s->ti_wptr = 0;
476 s->dma = 0;
477 s->do_cmd = 0;
478 s->dma_cb = NULL;
480 s->rregs[ESP_CFG1] = 7;
483 static void esp_soft_reset(ESPState *s)
485 qemu_irq_lower(s->irq);
486 esp_hard_reset(s);
489 static void parent_esp_reset(ESPState *s, int irq, int level)
491 if (level) {
492 esp_soft_reset(s);
496 static uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
498 uint32_t old_val;
500 trace_esp_mem_readb(saddr, s->rregs[saddr]);
501 switch (saddr) {
502 case ESP_FIFO:
503 if (s->ti_size > 0) {
504 s->ti_size--;
505 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
506 /* Data out. */
507 qemu_log_mask(LOG_UNIMP,
508 "esp: PIO data read not implemented\n");
509 s->rregs[ESP_FIFO] = 0;
510 } else {
511 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
513 esp_raise_irq(s);
515 if (s->ti_size == 0) {
516 s->ti_rptr = 0;
517 s->ti_wptr = 0;
519 break;
520 case ESP_RINTR:
521 /* Clear sequence step, interrupt register and all status bits
522 except TC */
523 old_val = s->rregs[ESP_RINTR];
524 s->rregs[ESP_RINTR] = 0;
525 s->rregs[ESP_RSTAT] &= ~STAT_TC;
526 s->rregs[ESP_RSEQ] = SEQ_CD;
527 esp_lower_irq(s);
529 return old_val;
530 default:
531 break;
533 return s->rregs[saddr];
536 static void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
538 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
539 switch (saddr) {
540 case ESP_TCLO:
541 case ESP_TCMID:
542 s->rregs[ESP_RSTAT] &= ~STAT_TC;
543 break;
544 case ESP_FIFO:
545 if (s->do_cmd) {
546 s->cmdbuf[s->cmdlen++] = val & 0xff;
547 } else if (s->ti_size == TI_BUFSZ - 1) {
548 trace_esp_error_fifo_overrun();
549 } else {
550 s->ti_size++;
551 s->ti_buf[s->ti_wptr++] = val & 0xff;
553 break;
554 case ESP_CMD:
555 s->rregs[saddr] = val;
556 if (val & CMD_DMA) {
557 s->dma = 1;
558 /* Reload DMA counter. */
559 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
560 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
561 } else {
562 s->dma = 0;
564 switch(val & CMD_CMD) {
565 case CMD_NOP:
566 trace_esp_mem_writeb_cmd_nop(val);
567 break;
568 case CMD_FLUSH:
569 trace_esp_mem_writeb_cmd_flush(val);
570 //s->ti_size = 0;
571 s->rregs[ESP_RINTR] = INTR_FC;
572 s->rregs[ESP_RSEQ] = 0;
573 s->rregs[ESP_RFLAGS] = 0;
574 break;
575 case CMD_RESET:
576 trace_esp_mem_writeb_cmd_reset(val);
577 esp_soft_reset(s);
578 break;
579 case CMD_BUSRESET:
580 trace_esp_mem_writeb_cmd_bus_reset(val);
581 s->rregs[ESP_RINTR] = INTR_RST;
582 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
583 esp_raise_irq(s);
585 break;
586 case CMD_TI:
587 handle_ti(s);
588 break;
589 case CMD_ICCS:
590 trace_esp_mem_writeb_cmd_iccs(val);
591 write_response(s);
592 s->rregs[ESP_RINTR] = INTR_FC;
593 s->rregs[ESP_RSTAT] |= STAT_MI;
594 break;
595 case CMD_MSGACC:
596 trace_esp_mem_writeb_cmd_msgacc(val);
597 s->rregs[ESP_RINTR] = INTR_DC;
598 s->rregs[ESP_RSEQ] = 0;
599 s->rregs[ESP_RFLAGS] = 0;
600 esp_raise_irq(s);
601 break;
602 case CMD_PAD:
603 trace_esp_mem_writeb_cmd_pad(val);
604 s->rregs[ESP_RSTAT] = STAT_TC;
605 s->rregs[ESP_RINTR] = INTR_FC;
606 s->rregs[ESP_RSEQ] = 0;
607 break;
608 case CMD_SATN:
609 trace_esp_mem_writeb_cmd_satn(val);
610 break;
611 case CMD_RSTATN:
612 trace_esp_mem_writeb_cmd_rstatn(val);
613 break;
614 case CMD_SEL:
615 trace_esp_mem_writeb_cmd_sel(val);
616 handle_s_without_atn(s);
617 break;
618 case CMD_SELATN:
619 trace_esp_mem_writeb_cmd_selatn(val);
620 handle_satn(s);
621 break;
622 case CMD_SELATNS:
623 trace_esp_mem_writeb_cmd_selatns(val);
624 handle_satn_stop(s);
625 break;
626 case CMD_ENSEL:
627 trace_esp_mem_writeb_cmd_ensel(val);
628 s->rregs[ESP_RINTR] = 0;
629 break;
630 case CMD_DISSEL:
631 trace_esp_mem_writeb_cmd_dissel(val);
632 s->rregs[ESP_RINTR] = 0;
633 esp_raise_irq(s);
634 break;
635 default:
636 trace_esp_error_unhandled_command(val);
637 break;
639 break;
640 case ESP_WBUSID ... ESP_WSYNO:
641 break;
642 case ESP_CFG1:
643 s->rregs[saddr] = val;
644 break;
645 case ESP_WCCF ... ESP_WTEST:
646 break;
647 case ESP_CFG2 ... ESP_RES4:
648 s->rregs[saddr] = val;
649 break;
650 default:
651 trace_esp_error_invalid_write(val, saddr);
652 return;
654 s->wregs[saddr] = val;
657 static bool esp_mem_accepts(void *opaque, target_phys_addr_t addr,
658 unsigned size, bool is_write)
660 return (size == 1) || (is_write && size == 4);
663 static const VMStateDescription vmstate_esp = {
664 .name ="esp",
665 .version_id = 3,
666 .minimum_version_id = 3,
667 .minimum_version_id_old = 3,
668 .fields = (VMStateField []) {
669 VMSTATE_BUFFER(rregs, ESPState),
670 VMSTATE_BUFFER(wregs, ESPState),
671 VMSTATE_INT32(ti_size, ESPState),
672 VMSTATE_UINT32(ti_rptr, ESPState),
673 VMSTATE_UINT32(ti_wptr, ESPState),
674 VMSTATE_BUFFER(ti_buf, ESPState),
675 VMSTATE_UINT32(status, ESPState),
676 VMSTATE_UINT32(dma, ESPState),
677 VMSTATE_BUFFER(cmdbuf, ESPState),
678 VMSTATE_UINT32(cmdlen, ESPState),
679 VMSTATE_UINT32(do_cmd, ESPState),
680 VMSTATE_UINT32(dma_left, ESPState),
681 VMSTATE_END_OF_LIST()
685 typedef struct {
686 SysBusDevice busdev;
687 MemoryRegion iomem;
688 uint32_t it_shift;
689 ESPState esp;
690 } SysBusESPState;
692 static void sysbus_esp_mem_write(void *opaque, target_phys_addr_t addr,
693 uint64_t val, unsigned int size)
695 SysBusESPState *sysbus = opaque;
696 uint32_t saddr;
698 saddr = addr >> sysbus->it_shift;
699 esp_reg_write(&sysbus->esp, saddr, val);
702 static uint64_t sysbus_esp_mem_read(void *opaque, target_phys_addr_t addr,
703 unsigned int size)
705 SysBusESPState *sysbus = opaque;
706 uint32_t saddr;
708 saddr = addr >> sysbus->it_shift;
709 return esp_reg_read(&sysbus->esp, saddr);
712 static const MemoryRegionOps sysbus_esp_mem_ops = {
713 .read = sysbus_esp_mem_read,
714 .write = sysbus_esp_mem_write,
715 .endianness = DEVICE_NATIVE_ENDIAN,
716 .valid.accepts = esp_mem_accepts,
719 void esp_init(target_phys_addr_t espaddr, int it_shift,
720 ESPDMAMemoryReadWriteFunc dma_memory_read,
721 ESPDMAMemoryReadWriteFunc dma_memory_write,
722 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
723 qemu_irq *dma_enable)
725 DeviceState *dev;
726 SysBusDevice *s;
727 SysBusESPState *sysbus;
728 ESPState *esp;
730 dev = qdev_create(NULL, "esp");
731 sysbus = DO_UPCAST(SysBusESPState, busdev.qdev, dev);
732 esp = &sysbus->esp;
733 esp->dma_memory_read = dma_memory_read;
734 esp->dma_memory_write = dma_memory_write;
735 esp->dma_opaque = dma_opaque;
736 sysbus->it_shift = it_shift;
737 /* XXX for now until rc4030 has been changed to use DMA enable signal */
738 esp->dma_enabled = 1;
739 qdev_init_nofail(dev);
740 s = sysbus_from_qdev(dev);
741 sysbus_connect_irq(s, 0, irq);
742 sysbus_mmio_map(s, 0, espaddr);
743 *reset = qdev_get_gpio_in(dev, 0);
744 *dma_enable = qdev_get_gpio_in(dev, 1);
747 static const struct SCSIBusInfo esp_scsi_info = {
748 .tcq = false,
749 .max_target = ESP_MAX_DEVS,
750 .max_lun = 7,
752 .transfer_data = esp_transfer_data,
753 .complete = esp_command_complete,
754 .cancel = esp_request_cancelled
757 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
759 DeviceState *d = opaque;
760 SysBusESPState *sysbus = container_of(d, SysBusESPState, busdev.qdev);
761 ESPState *s = &sysbus->esp;
763 switch (irq) {
764 case 0:
765 parent_esp_reset(s, irq, level);
766 break;
767 case 1:
768 esp_dma_enable(opaque, irq, level);
769 break;
773 static int sysbus_esp_init(SysBusDevice *dev)
775 SysBusESPState *sysbus = FROM_SYSBUS(SysBusESPState, dev);
776 ESPState *s = &sysbus->esp;
778 sysbus_init_irq(dev, &s->irq);
779 assert(sysbus->it_shift != -1);
781 s->chip_id = TCHI_FAS100A;
782 memory_region_init_io(&sysbus->iomem, &sysbus_esp_mem_ops, sysbus,
783 "esp", ESP_REGS << sysbus->it_shift);
784 sysbus_init_mmio(dev, &sysbus->iomem);
786 qdev_init_gpio_in(&dev->qdev, sysbus_esp_gpio_demux, 2);
788 scsi_bus_new(&s->bus, &dev->qdev, &esp_scsi_info);
789 return scsi_bus_legacy_handle_cmdline(&s->bus);
792 static void sysbus_esp_hard_reset(DeviceState *dev)
794 SysBusESPState *sysbus = DO_UPCAST(SysBusESPState, busdev.qdev, dev);
795 esp_hard_reset(&sysbus->esp);
798 static const VMStateDescription vmstate_sysbus_esp_scsi = {
799 .name = "sysbusespscsi",
800 .version_id = 0,
801 .minimum_version_id = 0,
802 .minimum_version_id_old = 0,
803 .fields = (VMStateField[]) {
804 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
805 VMSTATE_END_OF_LIST()
809 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
811 DeviceClass *dc = DEVICE_CLASS(klass);
812 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
814 k->init = sysbus_esp_init;
815 dc->reset = sysbus_esp_hard_reset;
816 dc->vmsd = &vmstate_sysbus_esp_scsi;
819 static TypeInfo sysbus_esp_info = {
820 .name = "esp",
821 .parent = TYPE_SYS_BUS_DEVICE,
822 .instance_size = sizeof(SysBusESPState),
823 .class_init = sysbus_esp_class_init,
826 #define DMA_CMD 0x0
827 #define DMA_STC 0x1
828 #define DMA_SPA 0x2
829 #define DMA_WBC 0x3
830 #define DMA_WAC 0x4
831 #define DMA_STAT 0x5
832 #define DMA_SMDLA 0x6
833 #define DMA_WMAC 0x7
835 #define DMA_CMD_MASK 0x03
836 #define DMA_CMD_DIAG 0x04
837 #define DMA_CMD_MDL 0x10
838 #define DMA_CMD_INTE_P 0x20
839 #define DMA_CMD_INTE_D 0x40
840 #define DMA_CMD_DIR 0x80
842 #define DMA_STAT_PWDN 0x01
843 #define DMA_STAT_ERROR 0x02
844 #define DMA_STAT_ABORT 0x04
845 #define DMA_STAT_DONE 0x08
846 #define DMA_STAT_SCSIINT 0x10
847 #define DMA_STAT_BCMBLT 0x20
849 #define SBAC_STATUS 0x1000
851 typedef struct PCIESPState {
852 PCIDevice dev;
853 MemoryRegion io;
854 uint32_t dma_regs[8];
855 uint32_t sbac;
856 ESPState esp;
857 } PCIESPState;
859 static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
861 trace_esp_pci_dma_idle(val);
862 esp_dma_enable(&pci->esp, 0, 0);
865 static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
867 trace_esp_pci_dma_blast(val);
868 qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
871 static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
873 trace_esp_pci_dma_abort(val);
874 if (pci->esp.current_req) {
875 scsi_req_cancel(pci->esp.current_req);
879 static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
881 trace_esp_pci_dma_start(val);
883 pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
884 pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA];
885 pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA];
887 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
888 | DMA_STAT_DONE | DMA_STAT_ABORT
889 | DMA_STAT_ERROR | DMA_STAT_PWDN);
891 esp_dma_enable(&pci->esp, 0, 1);
894 static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
896 trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val);
897 switch (saddr) {
898 case DMA_CMD:
899 pci->dma_regs[saddr] = val;
900 switch (val & DMA_CMD_MASK) {
901 case 0x0: /* IDLE */
902 esp_pci_handle_idle(pci, val);
903 break;
904 case 0x1: /* BLAST */
905 esp_pci_handle_blast(pci, val);
906 break;
907 case 0x2: /* ABORT */
908 esp_pci_handle_abort(pci, val);
909 break;
910 case 0x3: /* START */
911 esp_pci_handle_start(pci, val);
912 break;
913 default: /* can't happen */
914 abort();
916 break;
917 case DMA_STC:
918 case DMA_SPA:
919 case DMA_SMDLA:
920 pci->dma_regs[saddr] = val;
921 break;
922 case DMA_STAT:
923 if (!(pci->sbac & SBAC_STATUS)) {
924 /* clear some bits on write */
925 uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
926 pci->dma_regs[DMA_STAT] &= ~(val & mask);
928 break;
929 default:
930 trace_esp_pci_error_invalid_write_dma(val, saddr);
931 return;
935 static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
937 uint32_t val;
939 val = pci->dma_regs[saddr];
940 if (saddr == DMA_STAT) {
941 if (pci->esp.rregs[ESP_RSTAT] & STAT_INT) {
942 val |= DMA_STAT_SCSIINT;
944 if (pci->sbac & SBAC_STATUS) {
945 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
946 DMA_STAT_DONE);
950 trace_esp_pci_dma_read(saddr, val);
951 return val;
954 static void esp_pci_io_write(void *opaque, target_phys_addr_t addr,
955 uint64_t val, unsigned int size)
957 PCIESPState *pci = opaque;
959 if (size < 4 || addr & 3) {
960 /* need to upgrade request: we only support 4-bytes accesses */
961 uint32_t current = 0, mask;
962 int shift;
964 if (addr < 0x40) {
965 current = pci->esp.wregs[addr >> 2];
966 } else if (addr < 0x60) {
967 current = pci->dma_regs[(addr - 0x40) >> 2];
968 } else if (addr < 0x74) {
969 current = pci->sbac;
972 shift = (4 - size) * 8;
973 mask = (~(uint32_t)0 << shift) >> shift;
975 shift = ((4 - (addr & 3)) & 3) * 8;
976 val <<= shift;
977 val |= current & ~(mask << shift);
978 addr &= ~3;
979 size = 4;
982 if (addr < 0x40) {
983 /* SCSI core reg */
984 esp_reg_write(&pci->esp, addr >> 2, val);
985 } else if (addr < 0x60) {
986 /* PCI DMA CCB */
987 esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
988 } else if (addr == 0x70) {
989 /* DMA SCSI Bus and control */
990 trace_esp_pci_sbac_write(pci->sbac, val);
991 pci->sbac = val;
992 } else {
993 trace_esp_pci_error_invalid_write((int)addr);
997 static uint64_t esp_pci_io_read(void *opaque, target_phys_addr_t addr,
998 unsigned int size)
1000 PCIESPState *pci = opaque;
1001 uint32_t ret;
1003 if (addr < 0x40) {
1004 /* SCSI core reg */
1005 ret = esp_reg_read(&pci->esp, addr >> 2);
1006 } else if (addr < 0x60) {
1007 /* PCI DMA CCB */
1008 ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
1009 } else if (addr == 0x70) {
1010 /* DMA SCSI Bus and control */
1011 trace_esp_pci_sbac_read(pci->sbac);
1012 ret = pci->sbac;
1013 } else {
1014 /* Invalid region */
1015 trace_esp_pci_error_invalid_read((int)addr);
1016 ret = 0;
1019 /* give only requested data */
1020 ret >>= (addr & 3) * 8;
1021 ret &= ~(~(uint64_t)0 << (8 * size));
1023 return ret;
1026 static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
1027 DMADirection dir)
1029 dma_addr_t addr;
1030 DMADirection expected_dir;
1032 if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) {
1033 expected_dir = DMA_DIRECTION_FROM_DEVICE;
1034 } else {
1035 expected_dir = DMA_DIRECTION_TO_DEVICE;
1038 if (dir != expected_dir) {
1039 trace_esp_pci_error_invalid_dma_direction();
1040 return;
1043 if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) {
1044 qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
1047 addr = pci->dma_regs[DMA_SPA];
1048 if (pci->dma_regs[DMA_WBC] < len) {
1049 len = pci->dma_regs[DMA_WBC];
1052 pci_dma_rw(&pci->dev, addr, buf, len, dir);
1054 /* update status registers */
1055 pci->dma_regs[DMA_WBC] -= len;
1056 pci->dma_regs[DMA_WAC] += len;
1059 static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
1061 PCIESPState *pci = opaque;
1062 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE);
1065 static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len)
1067 PCIESPState *pci = opaque;
1068 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE);
1071 static const MemoryRegionOps esp_pci_io_ops = {
1072 .read = esp_pci_io_read,
1073 .write = esp_pci_io_write,
1074 .endianness = DEVICE_LITTLE_ENDIAN,
1075 .impl = {
1076 .min_access_size = 1,
1077 .max_access_size = 4,
1081 static void esp_pci_hard_reset(DeviceState *dev)
1083 PCIESPState *pci = DO_UPCAST(PCIESPState, dev.qdev, dev);
1084 esp_hard_reset(&pci->esp);
1085 pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
1086 | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
1087 pci->dma_regs[DMA_WBC] &= ~0xffff;
1088 pci->dma_regs[DMA_WAC] = 0xffffffff;
1089 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
1090 | DMA_STAT_DONE | DMA_STAT_ABORT
1091 | DMA_STAT_ERROR);
1092 pci->dma_regs[DMA_WMAC] = 0xfffffffd;
1095 static const VMStateDescription vmstate_esp_pci_scsi = {
1096 .name = "pciespscsi",
1097 .version_id = 0,
1098 .minimum_version_id = 0,
1099 .minimum_version_id_old = 0,
1100 .fields = (VMStateField[]) {
1101 VMSTATE_PCI_DEVICE(dev, PCIESPState),
1102 VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
1103 VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
1104 VMSTATE_END_OF_LIST()
1108 static void esp_pci_command_complete(SCSIRequest *req, uint32_t status,
1109 size_t resid)
1111 ESPState *s = req->hba_private;
1112 PCIESPState *pci = container_of(s, PCIESPState, esp);
1114 esp_command_complete(req, status, resid);
1115 pci->dma_regs[DMA_WBC] = 0;
1116 pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
1119 static const struct SCSIBusInfo esp_pci_scsi_info = {
1120 .tcq = false,
1121 .max_target = ESP_MAX_DEVS,
1122 .max_lun = 7,
1124 .transfer_data = esp_transfer_data,
1125 .complete = esp_pci_command_complete,
1126 .cancel = esp_request_cancelled,
1129 static int esp_pci_scsi_init(PCIDevice *dev)
1131 PCIESPState *pci = DO_UPCAST(PCIESPState, dev, dev);
1132 ESPState *s = &pci->esp;
1133 uint8_t *pci_conf;
1135 pci_conf = pci->dev.config;
1137 /* Interrupt pin A */
1138 pci_conf[PCI_INTERRUPT_PIN] = 0x01;
1140 s->dma_memory_read = esp_pci_dma_memory_read;
1141 s->dma_memory_write = esp_pci_dma_memory_write;
1142 s->dma_opaque = pci;
1143 s->chip_id = TCHI_AM53C974;
1144 memory_region_init_io(&pci->io, &esp_pci_io_ops, pci, "esp-io", 0x80);
1146 pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
1147 s->irq = pci->dev.irq[0];
1149 scsi_bus_new(&s->bus, &dev->qdev, &esp_pci_scsi_info);
1150 if (!dev->qdev.hotplugged) {
1151 return scsi_bus_legacy_handle_cmdline(&s->bus);
1153 return 0;
1156 static void esp_pci_scsi_uninit(PCIDevice *d)
1158 PCIESPState *pci = DO_UPCAST(PCIESPState, dev, d);
1160 memory_region_destroy(&pci->io);
1163 static void esp_pci_class_init(ObjectClass *klass, void *data)
1165 DeviceClass *dc = DEVICE_CLASS(klass);
1166 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1168 k->init = esp_pci_scsi_init;
1169 k->exit = esp_pci_scsi_uninit;
1170 k->vendor_id = PCI_VENDOR_ID_AMD;
1171 k->device_id = PCI_DEVICE_ID_AMD_SCSI;
1172 k->revision = 0x10;
1173 k->class_id = PCI_CLASS_STORAGE_SCSI;
1174 dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
1175 dc->reset = esp_pci_hard_reset;
1176 dc->vmsd = &vmstate_esp_pci_scsi;
1179 static TypeInfo esp_pci_info = {
1180 .name = "am53c974",
1181 .parent = TYPE_PCI_DEVICE,
1182 .instance_size = sizeof(PCIESPState),
1183 .class_init = esp_pci_class_init,
1186 static void esp_register_types(void)
1188 type_register_static(&sysbus_esp_info);
1189 type_register_static(&esp_pci_info);
1192 type_init(esp_register_types)