esp: fix esp_reg_read() trace event
[qemu/ar7.git] / hw / scsi / esp.c
blob16c18535770559ad439b11c132fa2bf04f63d7e0
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * and
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState *s)
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState *s)
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState *s)
65 qemu_irq_raise(s->irq_data);
68 static void esp_lower_drq(ESPState *s)
70 qemu_irq_lower(s->irq_data);
73 void esp_dma_enable(ESPState *s, int irq, int level)
75 if (level) {
76 s->dma_enabled = 1;
77 trace_esp_dma_enable();
78 if (s->dma_cb) {
79 s->dma_cb(s);
80 s->dma_cb = NULL;
82 } else {
83 trace_esp_dma_disable();
84 s->dma_enabled = 0;
88 void esp_request_cancelled(SCSIRequest *req)
90 ESPState *s = req->hba_private;
92 if (req == s->current_req) {
93 scsi_req_unref(s->current_req);
94 s->current_req = NULL;
95 s->current_dev = NULL;
99 static void set_pdma(ESPState *s, enum pdma_origin_id origin,
100 uint32_t index, uint32_t len)
102 s->pdma_origin = origin;
103 s->pdma_start = index;
104 s->pdma_cur = index;
105 s->pdma_len = len;
108 static uint8_t *get_pdma_buf(ESPState *s)
110 switch (s->pdma_origin) {
111 case PDMA:
112 return s->pdma_buf;
113 case TI:
114 return s->ti_buf;
115 case CMD:
116 return s->cmdbuf;
117 case ASYNC:
118 return s->async_buf;
120 return NULL;
123 static int get_cmd_cb(ESPState *s)
125 int target;
127 target = s->wregs[ESP_WBUSID] & BUSID_DID;
129 s->ti_size = 0;
130 s->ti_rptr = 0;
131 s->ti_wptr = 0;
133 if (s->current_req) {
134 /* Started a new command before the old one finished. Cancel it. */
135 scsi_req_cancel(s->current_req);
136 s->async_len = 0;
139 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
140 if (!s->current_dev) {
141 /* No such drive */
142 s->rregs[ESP_RSTAT] = 0;
143 s->rregs[ESP_RINTR] = INTR_DC;
144 s->rregs[ESP_RSEQ] = SEQ_0;
145 esp_raise_irq(s);
146 return -1;
148 return 0;
151 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
153 uint32_t dmalen;
154 int target;
156 target = s->wregs[ESP_WBUSID] & BUSID_DID;
157 if (s->dma) {
158 dmalen = s->rregs[ESP_TCLO];
159 dmalen |= s->rregs[ESP_TCMID] << 8;
160 dmalen |= s->rregs[ESP_TCHI] << 16;
161 if (dmalen > buflen) {
162 return 0;
164 if (s->dma_memory_read) {
165 s->dma_memory_read(s->dma_opaque, buf, dmalen);
166 } else {
167 memcpy(s->pdma_buf, buf, dmalen);
168 set_pdma(s, PDMA, 0, dmalen);
169 esp_raise_drq(s);
170 return 0;
172 } else {
173 dmalen = s->ti_size;
174 if (dmalen > TI_BUFSZ) {
175 return 0;
177 memcpy(buf, s->ti_buf, dmalen);
178 buf[0] = buf[2] >> 5;
180 trace_esp_get_cmd(dmalen, target);
182 if (get_cmd_cb(s) < 0) {
183 return 0;
185 return dmalen;
188 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
190 int32_t datalen;
191 int lun;
192 SCSIDevice *current_lun;
194 trace_esp_do_busid_cmd(busid);
195 lun = busid & 7;
196 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
197 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
198 datalen = scsi_req_enqueue(s->current_req);
199 s->ti_size = datalen;
200 if (datalen != 0) {
201 s->rregs[ESP_RSTAT] = STAT_TC;
202 s->dma_left = 0;
203 s->dma_counter = 0;
204 if (datalen > 0) {
205 s->rregs[ESP_RSTAT] |= STAT_DI;
206 } else {
207 s->rregs[ESP_RSTAT] |= STAT_DO;
209 scsi_req_continue(s->current_req);
211 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
212 s->rregs[ESP_RSEQ] = SEQ_CD;
213 esp_raise_irq(s);
216 static void do_cmd(ESPState *s, uint8_t *buf)
218 uint8_t busid = buf[0];
220 do_busid_cmd(s, &buf[1], busid);
223 static void satn_pdma_cb(ESPState *s)
225 if (get_cmd_cb(s) < 0) {
226 return;
228 if (s->pdma_cur != s->pdma_start) {
229 do_cmd(s, get_pdma_buf(s) + s->pdma_start);
233 static void handle_satn(ESPState *s)
235 uint8_t buf[32];
236 int len;
238 if (s->dma && !s->dma_enabled) {
239 s->dma_cb = handle_satn;
240 return;
242 s->pdma_cb = satn_pdma_cb;
243 len = get_cmd(s, buf, sizeof(buf));
244 if (len) {
245 do_cmd(s, buf);
249 static void s_without_satn_pdma_cb(ESPState *s)
251 if (get_cmd_cb(s) < 0) {
252 return;
254 if (s->pdma_cur != s->pdma_start) {
255 do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
259 static void handle_s_without_atn(ESPState *s)
261 uint8_t buf[32];
262 int len;
264 if (s->dma && !s->dma_enabled) {
265 s->dma_cb = handle_s_without_atn;
266 return;
268 s->pdma_cb = s_without_satn_pdma_cb;
269 len = get_cmd(s, buf, sizeof(buf));
270 if (len) {
271 do_busid_cmd(s, buf, 0);
275 static void satn_stop_pdma_cb(ESPState *s)
277 if (get_cmd_cb(s) < 0) {
278 return;
280 s->cmdlen = s->pdma_cur - s->pdma_start;
281 if (s->cmdlen) {
282 trace_esp_handle_satn_stop(s->cmdlen);
283 s->do_cmd = 1;
284 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
285 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
286 s->rregs[ESP_RSEQ] = SEQ_CD;
287 esp_raise_irq(s);
291 static void handle_satn_stop(ESPState *s)
293 if (s->dma && !s->dma_enabled) {
294 s->dma_cb = handle_satn_stop;
295 return;
297 s->pdma_cb = satn_stop_pdma_cb;
298 s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
299 if (s->cmdlen) {
300 trace_esp_handle_satn_stop(s->cmdlen);
301 s->do_cmd = 1;
302 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
303 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
304 s->rregs[ESP_RSEQ] = SEQ_CD;
305 esp_raise_irq(s);
309 static void write_response_pdma_cb(ESPState *s)
311 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
312 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
313 s->rregs[ESP_RSEQ] = SEQ_CD;
314 esp_raise_irq(s);
317 static void write_response(ESPState *s)
319 trace_esp_write_response(s->status);
320 s->ti_buf[0] = s->status;
321 s->ti_buf[1] = 0;
322 if (s->dma) {
323 if (s->dma_memory_write) {
324 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
325 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
326 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
327 s->rregs[ESP_RSEQ] = SEQ_CD;
328 } else {
329 set_pdma(s, TI, 0, 2);
330 s->pdma_cb = write_response_pdma_cb;
331 esp_raise_drq(s);
332 return;
334 } else {
335 s->ti_size = 2;
336 s->ti_rptr = 0;
337 s->ti_wptr = 2;
338 s->rregs[ESP_RFLAGS] = 2;
340 esp_raise_irq(s);
343 static void esp_dma_done(ESPState *s)
345 s->rregs[ESP_RSTAT] |= STAT_TC;
346 s->rregs[ESP_RINTR] = INTR_BS;
347 s->rregs[ESP_RSEQ] = 0;
348 s->rregs[ESP_RFLAGS] = 0;
349 s->rregs[ESP_TCLO] = 0;
350 s->rregs[ESP_TCMID] = 0;
351 s->rregs[ESP_TCHI] = 0;
352 esp_raise_irq(s);
355 static void do_dma_pdma_cb(ESPState *s)
357 int to_device = (s->ti_size < 0);
358 int len = s->pdma_cur - s->pdma_start;
359 if (s->do_cmd) {
360 s->ti_size = 0;
361 s->cmdlen = 0;
362 s->do_cmd = 0;
363 do_cmd(s, s->cmdbuf);
364 return;
366 s->dma_left -= len;
367 s->async_buf += len;
368 s->async_len -= len;
369 if (to_device) {
370 s->ti_size += len;
371 } else {
372 s->ti_size -= len;
374 if (s->async_len == 0) {
375 scsi_req_continue(s->current_req);
377 * If there is still data to be read from the device then
378 * complete the DMA operation immediately. Otherwise defer
379 * until the scsi layer has completed.
381 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
382 return;
386 /* Partially filled a scsi buffer. Complete immediately. */
387 esp_dma_done(s);
390 static void esp_do_dma(ESPState *s)
392 uint32_t len;
393 int to_device;
395 len = s->dma_left;
396 if (s->do_cmd) {
398 * handle_ti_cmd() case: esp_do_dma() is called only from
399 * handle_ti_cmd() with do_cmd != NULL (see the assert())
401 trace_esp_do_dma(s->cmdlen, len);
402 assert(s->cmdlen <= sizeof(s->cmdbuf) &&
403 len <= sizeof(s->cmdbuf) - s->cmdlen);
404 if (s->dma_memory_read) {
405 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
406 } else {
407 set_pdma(s, CMD, s->cmdlen, len);
408 s->pdma_cb = do_dma_pdma_cb;
409 esp_raise_drq(s);
410 return;
412 trace_esp_handle_ti_cmd(s->cmdlen);
413 s->ti_size = 0;
414 s->cmdlen = 0;
415 s->do_cmd = 0;
416 do_cmd(s, s->cmdbuf);
417 return;
419 if (s->async_len == 0) {
420 /* Defer until data is available. */
421 return;
423 if (len > s->async_len) {
424 len = s->async_len;
426 to_device = (s->ti_size < 0);
427 if (to_device) {
428 if (s->dma_memory_read) {
429 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
430 } else {
431 set_pdma(s, ASYNC, 0, len);
432 s->pdma_cb = do_dma_pdma_cb;
433 esp_raise_drq(s);
434 return;
436 } else {
437 if (s->dma_memory_write) {
438 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
439 } else {
440 set_pdma(s, ASYNC, 0, len);
441 s->pdma_cb = do_dma_pdma_cb;
442 esp_raise_drq(s);
443 return;
446 s->dma_left -= len;
447 s->async_buf += len;
448 s->async_len -= len;
449 if (to_device) {
450 s->ti_size += len;
451 } else {
452 s->ti_size -= len;
454 if (s->async_len == 0) {
455 scsi_req_continue(s->current_req);
457 * If there is still data to be read from the device then
458 * complete the DMA operation immediately. Otherwise defer
459 * until the scsi layer has completed.
461 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
462 return;
466 /* Partially filled a scsi buffer. Complete immediately. */
467 esp_dma_done(s);
470 static void esp_report_command_complete(ESPState *s, uint32_t status)
472 trace_esp_command_complete();
473 if (s->ti_size != 0) {
474 trace_esp_command_complete_unexpected();
476 s->ti_size = 0;
477 s->dma_left = 0;
478 s->async_len = 0;
479 if (status) {
480 trace_esp_command_complete_fail();
482 s->status = status;
483 s->rregs[ESP_RSTAT] = STAT_ST;
484 esp_dma_done(s);
485 if (s->current_req) {
486 scsi_req_unref(s->current_req);
487 s->current_req = NULL;
488 s->current_dev = NULL;
492 void esp_command_complete(SCSIRequest *req, size_t resid)
494 ESPState *s = req->hba_private;
496 if (s->rregs[ESP_RSTAT] & STAT_INT) {
498 * Defer handling command complete until the previous
499 * interrupt has been handled.
501 trace_esp_command_complete_deferred();
502 s->deferred_status = req->status;
503 s->deferred_complete = true;
504 return;
506 esp_report_command_complete(s, req->status);
509 void esp_transfer_data(SCSIRequest *req, uint32_t len)
511 ESPState *s = req->hba_private;
513 assert(!s->do_cmd);
514 trace_esp_transfer_data(s->dma_left, s->ti_size);
515 s->async_len = len;
516 s->async_buf = scsi_req_get_buf(req);
517 if (s->dma_left) {
518 esp_do_dma(s);
519 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
521 * If this was the last part of a DMA transfer then the
522 * completion interrupt is deferred to here.
524 esp_dma_done(s);
528 static void handle_ti(ESPState *s)
530 uint32_t dmalen, minlen;
532 if (s->dma && !s->dma_enabled) {
533 s->dma_cb = handle_ti;
534 return;
537 dmalen = s->rregs[ESP_TCLO];
538 dmalen |= s->rregs[ESP_TCMID] << 8;
539 dmalen |= s->rregs[ESP_TCHI] << 16;
540 if (dmalen == 0) {
541 dmalen = 0x10000;
543 s->dma_counter = dmalen;
545 if (s->do_cmd) {
546 minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
547 } else if (s->ti_size < 0) {
548 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
549 } else {
550 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
552 trace_esp_handle_ti(minlen);
553 if (s->dma) {
554 s->dma_left = minlen;
555 s->rregs[ESP_RSTAT] &= ~STAT_TC;
556 esp_do_dma(s);
557 } else if (s->do_cmd) {
558 trace_esp_handle_ti_cmd(s->cmdlen);
559 s->ti_size = 0;
560 s->cmdlen = 0;
561 s->do_cmd = 0;
562 do_cmd(s, s->cmdbuf);
566 void esp_hard_reset(ESPState *s)
568 memset(s->rregs, 0, ESP_REGS);
569 memset(s->wregs, 0, ESP_REGS);
570 s->tchi_written = 0;
571 s->ti_size = 0;
572 s->ti_rptr = 0;
573 s->ti_wptr = 0;
574 s->dma = 0;
575 s->do_cmd = 0;
576 s->dma_cb = NULL;
578 s->rregs[ESP_CFG1] = 7;
581 static void esp_soft_reset(ESPState *s)
583 qemu_irq_lower(s->irq);
584 qemu_irq_lower(s->irq_data);
585 esp_hard_reset(s);
588 static void parent_esp_reset(ESPState *s, int irq, int level)
590 if (level) {
591 esp_soft_reset(s);
595 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
597 uint32_t val;
599 switch (saddr) {
600 case ESP_FIFO:
601 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
602 /* Data out. */
603 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
604 s->rregs[ESP_FIFO] = 0;
605 } else if (s->ti_rptr < s->ti_wptr) {
606 s->ti_size--;
607 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
609 if (s->ti_rptr == s->ti_wptr) {
610 s->ti_rptr = 0;
611 s->ti_wptr = 0;
613 val = s->rregs[ESP_FIFO];
614 break;
615 case ESP_RINTR:
617 * Clear sequence step, interrupt register and all status bits
618 * except TC
620 val = s->rregs[ESP_RINTR];
621 s->rregs[ESP_RINTR] = 0;
622 s->rregs[ESP_RSTAT] &= ~STAT_TC;
623 s->rregs[ESP_RSEQ] = SEQ_CD;
624 esp_lower_irq(s);
625 if (s->deferred_complete) {
626 esp_report_command_complete(s, s->deferred_status);
627 s->deferred_complete = false;
629 break;
630 case ESP_TCHI:
631 /* Return the unique id if the value has never been written */
632 if (!s->tchi_written) {
633 val = s->chip_id;
634 } else {
635 val = s->rregs[saddr];
637 break;
638 default:
639 val = s->rregs[saddr];
640 break;
643 trace_esp_mem_readb(saddr, val);
644 return val;
647 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
649 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
650 switch (saddr) {
651 case ESP_TCHI:
652 s->tchi_written = true;
653 /* fall through */
654 case ESP_TCLO:
655 case ESP_TCMID:
656 s->rregs[ESP_RSTAT] &= ~STAT_TC;
657 break;
658 case ESP_FIFO:
659 if (s->do_cmd) {
660 if (s->cmdlen < ESP_CMDBUF_SZ) {
661 s->cmdbuf[s->cmdlen++] = val & 0xff;
662 } else {
663 trace_esp_error_fifo_overrun();
665 } else if (s->ti_wptr == TI_BUFSZ - 1) {
666 trace_esp_error_fifo_overrun();
667 } else {
668 s->ti_size++;
669 s->ti_buf[s->ti_wptr++] = val & 0xff;
671 break;
672 case ESP_CMD:
673 s->rregs[saddr] = val;
674 if (val & CMD_DMA) {
675 s->dma = 1;
676 /* Reload DMA counter. */
677 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
678 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
679 s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
680 } else {
681 s->dma = 0;
683 switch (val & CMD_CMD) {
684 case CMD_NOP:
685 trace_esp_mem_writeb_cmd_nop(val);
686 break;
687 case CMD_FLUSH:
688 trace_esp_mem_writeb_cmd_flush(val);
689 /*s->ti_size = 0;*/
690 s->rregs[ESP_RINTR] = INTR_FC;
691 s->rregs[ESP_RSEQ] = 0;
692 s->rregs[ESP_RFLAGS] = 0;
693 break;
694 case CMD_RESET:
695 trace_esp_mem_writeb_cmd_reset(val);
696 esp_soft_reset(s);
697 break;
698 case CMD_BUSRESET:
699 trace_esp_mem_writeb_cmd_bus_reset(val);
700 s->rregs[ESP_RINTR] = INTR_RST;
701 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
702 esp_raise_irq(s);
704 break;
705 case CMD_TI:
706 trace_esp_mem_writeb_cmd_ti(val);
707 handle_ti(s);
708 break;
709 case CMD_ICCS:
710 trace_esp_mem_writeb_cmd_iccs(val);
711 write_response(s);
712 s->rregs[ESP_RINTR] = INTR_FC;
713 s->rregs[ESP_RSTAT] |= STAT_MI;
714 break;
715 case CMD_MSGACC:
716 trace_esp_mem_writeb_cmd_msgacc(val);
717 s->rregs[ESP_RINTR] = INTR_DC;
718 s->rregs[ESP_RSEQ] = 0;
719 s->rregs[ESP_RFLAGS] = 0;
720 esp_raise_irq(s);
721 break;
722 case CMD_PAD:
723 trace_esp_mem_writeb_cmd_pad(val);
724 s->rregs[ESP_RSTAT] = STAT_TC;
725 s->rregs[ESP_RINTR] = INTR_FC;
726 s->rregs[ESP_RSEQ] = 0;
727 break;
728 case CMD_SATN:
729 trace_esp_mem_writeb_cmd_satn(val);
730 break;
731 case CMD_RSTATN:
732 trace_esp_mem_writeb_cmd_rstatn(val);
733 break;
734 case CMD_SEL:
735 trace_esp_mem_writeb_cmd_sel(val);
736 handle_s_without_atn(s);
737 break;
738 case CMD_SELATN:
739 trace_esp_mem_writeb_cmd_selatn(val);
740 handle_satn(s);
741 break;
742 case CMD_SELATNS:
743 trace_esp_mem_writeb_cmd_selatns(val);
744 handle_satn_stop(s);
745 break;
746 case CMD_ENSEL:
747 trace_esp_mem_writeb_cmd_ensel(val);
748 s->rregs[ESP_RINTR] = 0;
749 break;
750 case CMD_DISSEL:
751 trace_esp_mem_writeb_cmd_dissel(val);
752 s->rregs[ESP_RINTR] = 0;
753 esp_raise_irq(s);
754 break;
755 default:
756 trace_esp_error_unhandled_command(val);
757 break;
759 break;
760 case ESP_WBUSID ... ESP_WSYNO:
761 break;
762 case ESP_CFG1:
763 case ESP_CFG2: case ESP_CFG3:
764 case ESP_RES3: case ESP_RES4:
765 s->rregs[saddr] = val;
766 break;
767 case ESP_WCCF ... ESP_WTEST:
768 break;
769 default:
770 trace_esp_error_invalid_write(val, saddr);
771 return;
773 s->wregs[saddr] = val;
776 static bool esp_mem_accepts(void *opaque, hwaddr addr,
777 unsigned size, bool is_write,
778 MemTxAttrs attrs)
780 return (size == 1) || (is_write && size == 4);
783 static bool esp_pdma_needed(void *opaque)
785 ESPState *s = opaque;
786 return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
787 s->dma_enabled;
790 static const VMStateDescription vmstate_esp_pdma = {
791 .name = "esp/pdma",
792 .version_id = 1,
793 .minimum_version_id = 1,
794 .needed = esp_pdma_needed,
795 .fields = (VMStateField[]) {
796 VMSTATE_BUFFER(pdma_buf, ESPState),
797 VMSTATE_INT32(pdma_origin, ESPState),
798 VMSTATE_UINT32(pdma_len, ESPState),
799 VMSTATE_UINT32(pdma_start, ESPState),
800 VMSTATE_UINT32(pdma_cur, ESPState),
801 VMSTATE_END_OF_LIST()
805 static int esp_pre_save(void *opaque)
807 ESPState *s = ESP(opaque);
809 s->mig_version_id = vmstate_esp.version_id;
810 return 0;
813 static int esp_post_load(void *opaque, int version_id)
815 ESPState *s = ESP(opaque);
817 s->mig_version_id = vmstate_esp.version_id;
818 return 0;
821 const VMStateDescription vmstate_esp = {
822 .name = "esp",
823 .version_id = 5,
824 .minimum_version_id = 3,
825 .pre_save = esp_pre_save,
826 .post_load = esp_post_load,
827 .fields = (VMStateField[]) {
828 VMSTATE_BUFFER(rregs, ESPState),
829 VMSTATE_BUFFER(wregs, ESPState),
830 VMSTATE_INT32(ti_size, ESPState),
831 VMSTATE_UINT32(ti_rptr, ESPState),
832 VMSTATE_UINT32(ti_wptr, ESPState),
833 VMSTATE_BUFFER(ti_buf, ESPState),
834 VMSTATE_UINT32(status, ESPState),
835 VMSTATE_UINT32(deferred_status, ESPState),
836 VMSTATE_BOOL(deferred_complete, ESPState),
837 VMSTATE_UINT32(dma, ESPState),
838 VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
839 VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
840 VMSTATE_UINT32(cmdlen, ESPState),
841 VMSTATE_UINT32(do_cmd, ESPState),
842 VMSTATE_UINT32(dma_left, ESPState),
843 VMSTATE_END_OF_LIST()
845 .subsections = (const VMStateDescription * []) {
846 &vmstate_esp_pdma,
847 NULL
851 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
852 uint64_t val, unsigned int size)
854 SysBusESPState *sysbus = opaque;
855 ESPState *s = ESP(&sysbus->esp);
856 uint32_t saddr;
858 saddr = addr >> sysbus->it_shift;
859 esp_reg_write(s, saddr, val);
862 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
863 unsigned int size)
865 SysBusESPState *sysbus = opaque;
866 ESPState *s = ESP(&sysbus->esp);
867 uint32_t saddr;
869 saddr = addr >> sysbus->it_shift;
870 return esp_reg_read(s, saddr);
873 static const MemoryRegionOps sysbus_esp_mem_ops = {
874 .read = sysbus_esp_mem_read,
875 .write = sysbus_esp_mem_write,
876 .endianness = DEVICE_NATIVE_ENDIAN,
877 .valid.accepts = esp_mem_accepts,
880 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
881 uint64_t val, unsigned int size)
883 SysBusESPState *sysbus = opaque;
884 ESPState *s = ESP(&sysbus->esp);
885 uint32_t dmalen;
886 uint8_t *buf = get_pdma_buf(s);
888 dmalen = s->rregs[ESP_TCLO];
889 dmalen |= s->rregs[ESP_TCMID] << 8;
890 dmalen |= s->rregs[ESP_TCHI] << 16;
891 if (dmalen == 0 || s->pdma_len == 0) {
892 return;
894 switch (size) {
895 case 1:
896 buf[s->pdma_cur++] = val;
897 s->pdma_len--;
898 dmalen--;
899 break;
900 case 2:
901 buf[s->pdma_cur++] = val >> 8;
902 buf[s->pdma_cur++] = val;
903 s->pdma_len -= 2;
904 dmalen -= 2;
905 break;
907 s->rregs[ESP_TCLO] = dmalen & 0xff;
908 s->rregs[ESP_TCMID] = dmalen >> 8;
909 s->rregs[ESP_TCHI] = dmalen >> 16;
910 if (s->pdma_len == 0 && s->pdma_cb) {
911 esp_lower_drq(s);
912 s->pdma_cb(s);
913 s->pdma_cb = NULL;
917 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
918 unsigned int size)
920 SysBusESPState *sysbus = opaque;
921 ESPState *s = ESP(&sysbus->esp);
922 uint8_t *buf = get_pdma_buf(s);
923 uint64_t val = 0;
925 if (s->pdma_len == 0) {
926 return 0;
928 switch (size) {
929 case 1:
930 val = buf[s->pdma_cur++];
931 s->pdma_len--;
932 break;
933 case 2:
934 val = buf[s->pdma_cur++];
935 val = (val << 8) | buf[s->pdma_cur++];
936 s->pdma_len -= 2;
937 break;
940 if (s->pdma_len == 0 && s->pdma_cb) {
941 esp_lower_drq(s);
942 s->pdma_cb(s);
943 s->pdma_cb = NULL;
945 return val;
948 static const MemoryRegionOps sysbus_esp_pdma_ops = {
949 .read = sysbus_esp_pdma_read,
950 .write = sysbus_esp_pdma_write,
951 .endianness = DEVICE_NATIVE_ENDIAN,
952 .valid.min_access_size = 1,
953 .valid.max_access_size = 2,
956 static const struct SCSIBusInfo esp_scsi_info = {
957 .tcq = false,
958 .max_target = ESP_MAX_DEVS,
959 .max_lun = 7,
961 .transfer_data = esp_transfer_data,
962 .complete = esp_command_complete,
963 .cancel = esp_request_cancelled
966 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
968 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
969 ESPState *s = ESP(&sysbus->esp);
971 switch (irq) {
972 case 0:
973 parent_esp_reset(s, irq, level);
974 break;
975 case 1:
976 esp_dma_enable(opaque, irq, level);
977 break;
981 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
983 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
984 SysBusESPState *sysbus = SYSBUS_ESP(dev);
985 ESPState *s = ESP(&sysbus->esp);
987 if (!qdev_realize(DEVICE(s), NULL, errp)) {
988 return;
991 sysbus_init_irq(sbd, &s->irq);
992 sysbus_init_irq(sbd, &s->irq_data);
993 assert(sysbus->it_shift != -1);
995 s->chip_id = TCHI_FAS100A;
996 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
997 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
998 sysbus_init_mmio(sbd, &sysbus->iomem);
999 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1000 sysbus, "esp-pdma", 2);
1001 sysbus_init_mmio(sbd, &sysbus->pdma);
1003 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1005 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1008 static void sysbus_esp_hard_reset(DeviceState *dev)
1010 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1011 ESPState *s = ESP(&sysbus->esp);
1013 esp_hard_reset(s);
1016 static void sysbus_esp_init(Object *obj)
1018 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1020 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1023 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1024 .name = "sysbusespscsi",
1025 .version_id = 2,
1026 .minimum_version_id = 1,
1027 .fields = (VMStateField[]) {
1028 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1029 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1030 VMSTATE_END_OF_LIST()
1034 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1036 DeviceClass *dc = DEVICE_CLASS(klass);
1038 dc->realize = sysbus_esp_realize;
1039 dc->reset = sysbus_esp_hard_reset;
1040 dc->vmsd = &vmstate_sysbus_esp_scsi;
1041 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1044 static const TypeInfo sysbus_esp_info = {
1045 .name = TYPE_SYSBUS_ESP,
1046 .parent = TYPE_SYS_BUS_DEVICE,
1047 .instance_init = sysbus_esp_init,
1048 .instance_size = sizeof(SysBusESPState),
1049 .class_init = sysbus_esp_class_init,
1052 static void esp_class_init(ObjectClass *klass, void *data)
1054 DeviceClass *dc = DEVICE_CLASS(klass);
1056 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1057 dc->user_creatable = false;
1058 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1061 static const TypeInfo esp_info = {
1062 .name = TYPE_ESP,
1063 .parent = TYPE_DEVICE,
1064 .instance_size = sizeof(ESPState),
1065 .class_init = esp_class_init,
1068 static void esp_register_types(void)
1070 type_register_static(&sysbus_esp_info);
1071 type_register_static(&esp_info);
1074 type_init(esp_register_types)